def statistical_extraction2(self,
                                trc=None,
                                dt=None,
                                nsamp=None,
                                Type=None):
        nt_wav = np.int(nsamp / 2)  # lenght of wavelet in samples
        nfft = 2**11  # lenght of fft

        # time axis for wavelet
        t_wav = np.arange(nt_wav) * (dt / 1000)
        t_wav = np.concatenate((np.flipud(-t_wav[1:]), t_wav), axis=0)
        trt = signal.hanning(trc.size)
        trc = trt * trc  # apply taper to the trace
        # estimate wavelet spectrum
        wav_est_fft = np.abs(np.fft.fft(trc, nfft))
        fwest = np.fft.fftfreq(nfft, d=dt / 1000)

        # create wavelet in time
        wav = np.real(np.fft.ifft(wav_est_fft)[:nt_wav])
        wav = np.concatenate((np.flipud(wav), wav), axis=0)
        wav = wav / wav.max()
        wcenter = np.argmax(np.abs(wav))
        trt = signal.hanning(wav.size)
        wav = trt * wav
        wav = self.remove_dc(wav)
        # Apply taper to the final wavelet
        trt = signal.hanning(wav.size)
        wav = wav * trt

        return wav, fwest
Exemple #2
0
def hanningWindow(insize):
    hannWinX = zeros((1, insize[1]))
    hannWinY = zeros((insize[0], 1))
    hannWinX[0, :] = hanning(insize[1], sym=True)
    hannWinY[:, 0] = hanning(insize[0], sym=True)
    hannWin2D = hannWinY.dot(hannWinX)
    return hannWin2D
Exemple #3
0
def __do_sweep_windowing(inputSweep, timeVecSweep, freqLimits, freqMin,
                         freqMax, window):
    """
    Applies a fade in and fade out that are minimum at the chirp start and end,
    and maximum between the time intervals corresponding to Finf and Fsup.
    """

    # frequencies at time instants: freq(t)
    freqSweep = freqLimits['freqMin'] * (
        (freqLimits['freqMax'] / freqLimits['freqMin'])
        **(1 / max(timeVecSweep)))**timeVecSweep

    # exact sample where the chirp reaches freqMin [Hz]
    freqMinSample = np.where(freqSweep <= freqMin)
    freqMinSample = freqMinSample[-1][-1]

    # exact sample where the chirp reaches freqMax [Hz]
    freqMaxSample = np.where(freqSweep <= freqMax)
    freqMaxSample = len(freqSweep) - freqMaxSample[-1][-1]
    windowStart = ss.hanning(2 * freqMinSample)
    windowEnd = ss.hanning(2 * freqMaxSample)

    # Uses first half of windowStart, last half of windowEnd, and a vector of
    # ones with the remaining length, in between the half windows
    fullWindow = np.concatenate(
        (windowStart[0:freqMinSample],
         np.ones(int(len(freqSweep) - freqMinSample - freqMaxSample + 1)),
         windowEnd[freqMaxSample:-1]))
    newSweep = fullWindow * inputSweep
    return newSweep
def scaled_han(Ni,Nj):
    ''' Using Hanning window'''
    wdwi =  signal.hanning(Ni)
    wdwi =  (Ni/(wdwi**2).sum())*wdwi
    wdwj =  signal.hanning(Nj)
    wdwj =  (Nj/(wdwj**2).sum())*wdwj
    wdw = wdwi[np.newaxis,...]*wdwj[...,np.newaxis]
    return wdw
Exemple #5
0
 def __init__(self, sawtooth_scale=1.0, triangle_scale=3.2, noise_scale=3.1, epsilon=1e-8):
     # Until I can compute a legitimate prior, these will have to do.
     self.sawtooth_scale = sawtooth_scale
     self.triangle_scale = triangle_scale
     self.noise_scale = noise_scale
     self.noise_shape = signal.hanning(25) / np.sum(signal.hanning(25)) * self.noise_scale
     self.epsilon = epsilon
     self.meansq = self.epsilon
     self.smoothing = 0.5
Exemple #6
0
    def crop_heads(self, head_ms=1):
        """Crops heads of impulse responses

        Args:
            head_ms: Milliseconds of head room in the beginning before impulse response max which will not be cropped

        Returns:
            None
        """
        if self.fs != self.estimator.fs:
            raise ValueError(
                'Refusing to crop heads because HRIR sampling rate doesn\'t match impulse response '
                'estimator\'s sampling rate.')

        for speaker, pair in self.irs.items():
            # Peaks
            peak_left = pair['left'].peak_index()
            peak_right = pair['right'].peak_index()
            itd = np.abs(peak_left - peak_right) / self.fs

            # Speaker channel delay
            head = head_ms * self.fs // 1000
            delay = int(np.round(SPEAKER_DELAYS[speaker] *
                                 self.fs)) + head  # Channel delay in samples

            if peak_left < peak_right:
                # Delay to left ear is smaller, this is must left side speaker
                if speaker[1] == 'R':
                    # Speaker name indicates this is right side speaker but delay to left ear is smaller than to right.
                    # There is something wrong with the measurement
                    warnings.warn(
                        f'Warning: {speaker} measurement has lower delay to left ear than to right ear. '
                        f'{speaker} should be at the right side of the head so the sound should arrive first '
                        f'in the right ear. This is usually a problem with the measurement process or the '
                        f'speaker order given is not correct. Detected delay difference is '
                        f'{itd * 1000:.4f} milliseconds.')
                # Crop out silence from the beginning, only required channel delay remains
                # Secondary ear has additional delay for inter aural time difference
                pair['left'].data = pair['left'].data[peak_left - delay:]
                pair['right'].data = pair['right'].data[peak_left - delay:]
            else:
                # Delay to right ear is smaller, this is must right side speaker
                if speaker[1] == 'L':
                    # Speaker name indicates this is left side speaker but delay to right ear is smaller than to left.
                    # There si something wrong with the measurement
                    warnings.warn(
                        f'Warning: {speaker} measurement has lower delay to right ear than to left ear. '
                        f'{speaker} should be at the left side of the head so the sound should arrive first '
                        f'in the left ear. This is usually a problem with the measurement process or the '
                        f'speaker order given is not correct. Detected delay difference is '
                        f'{itd * 1000:.4f} milliseconds.')
                # Crop out silence from the beginning, only required channel delay remains
                # Secondary ear has additional delay for inter aural time difference
                pair['right'].data = pair['right'].data[peak_right - delay:]
                pair['left'].data = pair['left'].data[peak_right - delay:]

            # Make sure impulse response starts from silence
            window = signal.hanning(head * 2)[:head]
            pair['left'].data[:head] *= window
            pair['right'].data[:head] *= window
Exemple #7
0
    def fft(self, segment_duration_sec, bandwidth=None, step=.5,
            **kwargs):

        # Time
        len_seg = int(segment_duration_sec * self[0].stats.sampling_rate)
        len_step = int(np.floor(len_seg * step))
        times = self.times[:1 - len_seg:len_step]
        n_times = len(times)

        # Frequency
        kwargs.setdefault('n', 2 * len_seg - 1)
        n_frequencies = kwargs['n']
        frequencies = np.linspace(
            0, self[0].stats.sampling_rate, n_frequencies)

        # Calculate spectra
        spectra_shape = len(self), n_times, n_frequencies
        spectra = np.zeros(spectra_shape, dtype=complex)
        waitbar = logtable.waitbar('Spectra', len(self))
        for trace_id, trace in enumerate(self):
            waitbar.progress(trace_id)
            tr = trace.data
            for time_id in range(n_times):
                start = time_id * len_step
                end = start + len_seg
                segment = tr[start:end] * hanning(len_seg)
                spectra[trace_id, time_id] = np.fft.fft(segment, **kwargs)

        # Times are extended with last time of traces
        t_end = self.times[-1]
        times = np.hstack((times, t_end))

        return times, frequencies, spectra
Exemple #8
0
 def test_window_derivative(self):
     """Test if the derivative of a window function is calculated
     properly."""
     window = hanning(210)
     derivative = derive_window(window)
     ix_win_maxima = np.argmax(window)
     self.assertAlmostEqual(derivative[ix_win_maxima], 0.0, places=3)
Exemple #9
0
def main(fn, start, end):
    fn = Path(fn).expanduser()
    # rx_array is loading the last 45% of the waveform from the file
    rx_array = load_bin(fn, start, end)
    # peak_array holds the indexes of each peak in the waveform
    # peak_distance is the smallest distance between each peak
    peak_array, peak_distance = get_peaks(rx_array)
    l = peak_distance - 1
    print("using window: ", l, "\n")
    # remove first peak
    peak_array = peak_array[1:]
    Npulse = len(peak_array) - 1
    print(Npulse, "pulses detected")
    wind = signal.hanning(l)
    Ntone = 2
    Nblockest = 160
    fs = 4e6  # [Hz]
    data = np.empty([Npulse, l])
    # set each row of data to window * (first l samples after each peak)
    for i in range(Npulse):
        data[i, :] = wind * rx_array[peak_array[i]:peak_array[i] + l]

    fb_est, sigma = esprit(data, Ntone, Nblockest, fs)
    print("fb_est", fb_est)
    print("sigma: ", sigma)
    drange = (3e8 * fb_est) / (2e6 / 0.1)
    print("range: ", drange, "\n")
def stochasticModelSynth(stocEnv, H, N):
	"""
	Stochastic synthesis of a sound
	stocEnv: stochastic envelope; H: hop size; N: fft size
	returns y: output sound
	"""

	if not(UF.isPower2(N)):                                 	# raise error if N not a power of two
		raise ValueError("N is not a power of two")
 
	hN = N/2+1                                            		# positive size of fft
	No2 = N/2							# half of N
	L = stocEnv[:,0].size                                    	# number of frames
	ysize = H*(L+3)                                         	# output sound size
	y = np.zeros(ysize)                                     	# initialize output array
	ws = 2*hanning(N)                                        	# synthesis window
	pout = 0                                                 	# output sound pointer
	for l in range(L):                    
		mY = resample(stocEnv[l,:], hN)                        # interpolate to original size
		pY = 2*np.pi*np.random.rand(hN)                        # generate phase random values
		Y = np.zeros(N, dtype = complex)                       # initialize synthesis spectrum
		Y[:hN] = 10**(mY/20) * np.exp(1j*pY)                   # generate positive freq.
		Y[hN:] = 10**(mY[-2:0:-1]/20) * np.exp(-1j*pY[-2:0:-1]) # generate negative freq.
		fftbuffer = np.real(ifft(Y))                           # inverse FFT
		y[pout:pout+N] += ws*fftbuffer                         # overlap-add
		pout += H  
	y = np.delete(y, range(No2))                              # delete half of first window
	y = np.delete(y, range(y.size-No2, y.size))               # delete half of the last window 
	return y
Exemple #11
0
def axes_fft(block):
    w = hanning(len(block))
    if len(block.shape) == 2:
        ax = range(block.shape[1])
        pairs = set([
            tuple(sort(z)) for z in [[x, y] for x in ax for y in ax if x != y]
        ])

        normalize = lambda x: ((x - x.mean()) / sqrt(average(
            (x - x.mean())**2)))

        features = []
        for p in pairs:
            a = block[:, p[0]]
            b = block[:, p[1]]
            # a = normalize(a)
            # b = normalize(b)
            a = abs(fft(a * w))[:block.shape[0] / 2]
            b = abs(fft(b * w))[:block.shape[0] / 2]
            c = log10(1 + a * b)
            features.append(c)

        v = sum(features, axis=0) / len(features)

        return v - v.mean()
Exemple #12
0
def nlfer(signal, pitch, parameters):

    #---------------------------------------------------------------
    # Set parameters.
    #---------------------------------------------------------------
    N_f0_min = np.around((parameters['f0_min']*2/float(signal.new_fs))*pitch.nfft)
    N_f0_max = np.around((parameters['f0_max']/float(signal.new_fs))*pitch.nfft)

    window = hanning(pitch.frame_size+2)[1:-1]
    data = np.zeros((signal.size))  #Needs other array, otherwise stride and
    data[:] = signal.filtered     #windowing will modify signal.filtered

    #---------------------------------------------------------------
    # Main routine.
    #---------------------------------------------------------------
    samples = np.arange(int(np.fix(float(pitch.frame_size)/2)),
                        signal.size-int(np.fix(float(pitch.frame_size)/2)),
                        pitch.frame_jump)

    data_matrix = np.empty((len(samples), pitch.frame_size))
    data_matrix[:, :] = stride_matrix(data, len(samples),
                                    pitch.frame_size, pitch.frame_jump)
    data_matrix *= window

    specData = np.fft.rfft(data_matrix, pitch.nfft)

    frame_energy = np.abs(specData[:, N_f0_min-1:N_f0_max]).sum(axis=1)
    pitch.set_energy(frame_energy, parameters['nlfer_thresh1'])
    pitch.set_frames_pos(samples)
Exemple #13
0
def to_audio(stocEnv, H, N):
    """
    Synthesizes sound from a stochastic model.

    :param stocEnv: stochastic envelope
    :param H: hop size
    :param N: fft size
    :returns: y: output sound
    """

    if not (is_power_of_two(N)):  # raise error if N not a power of two
        raise ValueError("N is not a power of two")

    hN = N / 2 + 1  # positive size of fft
    No2 = N / 2  # half of N
    L = stocEnv.shape[0]  # number of frames
    ysize = H * (L + 3)  # output sound size
    y = np.zeros(ysize)  # initialize output array
    ws = 2 * hanning(N)  # synthesis window
    pout = 0  # output sound pointer
    for l in range(L):
        mY = resample(stocEnv[l, :], hN)  # interpolate to original size
        pY = 2 * np.pi * np.random.rand(hN)  # generate phase random values
        Y = np.zeros(N, dtype=complex)  # initialize synthesis spectrum
        Y[:hN] = from_db_magnitudes(mY) * np.exp(
            1j * pY)  # generate positive freq.
        Y[hN:] = from_db_magnitudes(mY[-2:0:-1]) * np.exp(
            -1j * pY[-2:0:-1])  # generate negative freq.
        fftbuffer = np.real(ifft(Y))  # inverse FFT
        y[pout:pout + N] += ws * fftbuffer  # overlap-add
        pout += H
    y = np.delete(y, range(No2))  # delete half of first window
    y = np.delete(y, range(y.size - No2,
                           y.size))  # delete half of the last window
    return y
Exemple #14
0
def peak_freq(data, window=256, fs=400, overlap=0., ignore_dropped=False,
               frequencies=[6, 20]):

    nChan, nSamples = data.shape
    noverlap = int(overlap * window)
    windowVals = hanning(window)

    # get the corresponding indices for custom frequencies
    freqs = np.fft.fftfreq(window, d=1./fs)[:window/2]
    idx_freqs = []
    idx_freqs.append((freqs < frequencies[0]) | (freqs > frequencies[1]))

    ind = list(xrange(0, nSamples - window + 1, window-noverlap))

    numSlices = len(ind)
    slices = range(numSlices)

    Slices = []
    for iSlice in slices:
        thisSlice = data[:, ind[iSlice]:ind[iSlice] + window]
        if np.sum(np.sum(thisSlice**2, axis=0)>0):
            freqs, thisfft = welch(thisSlice, fs=400, nfft=window/2)
            Slices.append(thisfft.T)
    if len(Slices) > 0:
        Slices = np.array(Slices)
        a = find_peak(Slices, freqs, order=5, max_peak=3)
    else:
        a = np.nan
    return a
Exemple #15
0
def get_3dim_spectrum(wav_name, channel_vec, start_point, stop_point, frame, shift, fftl):
    """
    dump_wav : channel_size * speech_size (2dim)
    """
    samples, _ = sf.read(wav_name.replace('{}', str(channel_vec[0])), start=start_point, stop=stop_point, dtype='float32')
    if len(samples) == 0:
        return None,None
    dump_wav = np.zeros((len(channel_vec), len(samples)), dtype=np.float16)
    dump_wav[0, :] = samples.T
    for ii in range(0,len(channel_vec) - 1):
        samples,_ = sf.read(wav_name.replace('{}', str(channel_vec[ii +1 ])), start=start_point, stop=stop_point, dtype='float32')
        dump_wav[ii + 1, :] = samples.T    

    dump_wav = dump_wav / np.max(np.abs(dump_wav)) * 0.7
    window = sg.hanning(fftl + 1, 'periodic')[: - 1]
    multi_window = npm.repmat(window, len(channel_vec), 1)    
    st = 0
    ed = frame
    number_of_frame = np.int((len(samples) - frame) /  shift)
    spectrums = np.zeros((len(channel_vec), number_of_frame, np.int(fftl / 2) + 1), dtype=np.complex64)
    for ii in range(0, number_of_frame):
        multi_signal_spectrum = fft(dump_wav[:, st:ed], n=fftl, axis=1)[:, 0:np.int(fftl / 2) + 1] # channel * number_of_bin        
        spectrums[:, ii, :] = multi_signal_spectrum
        st = st + shift
        ed = ed + shift
    return spectrums, len(samples)
Exemple #16
0
    def crop_tails(self):
        """Crops out tails after every impulse response has decayed to noise floor."""
        if self.fs != self.estimator.fs:
            raise ValueError(
                'Refusing to crop tails because HRIR\'s sampling rate doesn\'t match impulse response '
                'estimator\'s sampling rate.')
        # Find indices after which there is only noise in each track
        tail_indices = []
        lengths = []
        for speaker, pair in self.irs.items():
            for side, ir in pair.items():
                _, tail_ind, _, _ = ir.decay_params()
                tail_indices.append(tail_ind)
                lengths.append(len(ir))

        # Crop all tracks by last tail index
        seconds_per_octave = len(
            self.estimator) / self.estimator.fs / self.estimator.n_octaves
        fade_out = 2 * int(self.fs * seconds_per_octave *
                           (1 / 24))  # Duration of 1/24 octave in the sweep
        window = signal.hanning(fade_out)[fade_out // 2:]
        fft_len = fftpack.next_fast_len(max(tail_indices))
        tail_ind = min(np.min(lengths), fft_len)
        for speaker, pair in self.irs.items():
            for ir in pair.values():
                ir.data = ir.data[:tail_ind]
                ir.data *= np.concatenate(
                    [np.ones(len(ir.data) - len(window)), window])
Exemple #17
0
def take_lomb_spec(time,
                   var,
                   w=False,
                   key_periods=[
                       1. / 4., 1. / 3., 1. / 2., 1., 365.25 / 4., 365.25 / 3.,
                       365.25 / 2., 365.25
                   ]):
    if w == True:
        window = signal.hanning(len(var))
        var_mean = np.mean(var)
        var = var - var_mean
        var = var * window
        amp_corr = 1. / (sum(window) / len(window))

    freqs = 1. / np.array(key_periods)

    #take lomb
    fb, mag, ph, fr, fi = lomb_phase_spec.lomb(time, var, freqs)

    if w == True:
        mag = mag * amp_corr

    periods = 1. / freqs

    #CORRECT IMAGINARY COMPONENTS TO FFT EQUIVALENTS
    for i in range(len(fi)):
        if fi[i] < 0:
            fi[i] = fi[i] * -1
        elif fi[i] > 0:
            fi[i] = -fi[i]

    return periods, mag, ph, fr, fi
Exemple #18
0
def phase_vocoder(mono, sr, N=2048, tscale=1.0):
    L, H = len(mono), N / 4
    # signal blocks for processing and output
    phi = np.zeros(N)
    out = np.zeros(N, dtype=complex)
    sigout = np.zeros(L / tscale + N)
    # max input amp, window
    amp = max(mono)
    win = sps.hanning(N)
    p = 0
    pp = 0
    while p < L - (N + H):
        if p % 1024 == 0: print '.',
        # take the spectra of two consecutive windows
        p1 = int(p)
        spec1 = np.fft.fft(win * mono[p1:p1 + N])
        spec2 = np.fft.fft(win * mono[p1 + H:p1 + N + H])
        # take their phase difference and integrate
        phi += (np.angle(spec2) - np.angle(spec1))
        # bring the phase back to between pi and -pi
        for i in phi:
            while i > np.pi:
                i -= 2 * np.pi
            while i <= -np.pi:
                i += 2 * np.pi
        out.real, out.imag = np.cos(phi), np.sin(phi)
        # inverse FFT and overlap-add
        sigout[pp:pp + N] += (win * np.fft.ifft(abs(spec2) * out)).real
        pp += H
        p += H * tscale
    print('')
    return np.array(amp * sigout / max(sigout), dtype='int16')
def linsmoothm(x, n):
    le = len(x)
    x = x.reshape((le))
    w = signal.hanning(n)
    w = w / np.sum(w)
    y = np.zeros((le))
    if np.mod(n, 2) == 0:
        l = int(n / 2)
        x = [1 * x[0], x, np.ones(l) * x[le - 1]]
        temp = []
        for sublist in x:
            for item in sublist:
                temp.append(item)
        x = np.array(temp)

    else:
        l = int((n - 1) / 2)
        x = [np.ones(1) * x[0], x, np.ones(l + 1) * x[le - 1]]
        temp = []
        for sublist in x:
            for item in sublist:
                temp.append(item)
        x = np.array(temp)

    for k in range(0, le):
        y[k] = np.dot(w, x[k:k + n])
    y = y.flatten()

    return y
Exemple #20
0
def phase_vocoder(mono, sr, N=2048, tscale= 1.0):
    L,H = len(mono),N/4
    # signal blocks for processing and output
    phi  = np.zeros(N)
    out = np.zeros(N, dtype=complex)
    sigout = np.zeros(L/tscale+N)
    # max input amp, window
    amp = max(mono)
    win = sps.hanning(N)
    p = 0
    pp = 0    
    while p < L-(N+H):
        if p%1024==0: print '.',
        # take the spectra of two consecutive windows
        p1 = int(p)
        spec1 =  np.fft.fft(win*mono[p1:p1+N])
        spec2 =  np.fft.fft(win*mono[p1+H:p1+N+H])
        # take their phase difference and integrate
        phi += (np.angle(spec2) - np.angle(spec1))
        # bring the phase back to between pi and -pi
        for i in phi:
            while i   > np.pi: i -= 2*np.pi
            while i <= -np.pi: i += 2*np.pi
        out.real, out.imag = np.cos(phi), np.sin(phi)
        # inverse FFT and overlap-add
        sigout[pp:pp+N] += (win*np.fft.ifft(abs(spec2)*out)).real
        pp += H
        p += H*tscale
    print('')
    return np.array(amp*sigout/max(sigout), dtype='int16')
Exemple #21
0
def get_seeds_signals(fs: int, fft_size: int = None, noise_length: int = None):
    if fft_size == None:
        fft_size = int(1024 * (2**np.ceil(np.log2(fs / 48000))))
    if noise_length == None:
        noise_length = int(2**np.ceil(np.log2(fs / 2)))
    w = np.arange(fft_size // 2 + 1) * fs / fft_size
    frequency_interval = 3000
    frequency_range = frequency_interval * 2
    upper_limit = 15000
    number_of_aperiodicities = int(2 + np.floor(
        min(upper_limit, fs / 2 - frequency_interval) / frequency_interval))

    pulse = np.zeros((fft_size, number_of_aperiodicities))
    noise = np.zeros((noise_length, number_of_aperiodicities))

    modified_velvet_noise = generate_modified_velvet_noise(noise_length, fs)
    spec_n = fft(modified_velvet_noise, noise_length)

    # Excitation signals in vocal cord vibrations and aperiodic noise were generated

    for i in range(number_of_aperiodicities):
        spec = 0.5 + 0.5 * np.cos(
            ((w - (frequency_interval * i)) / frequency_range) * 2 * np.pi)
        spec[w > (frequency_interval * (i + 1))] = 0
        spec[w < (frequency_interval * (i - 1))] = 0
        if i == number_of_aperiodicities - 1:
            spec[w > (frequency_interval * i)] = 1
        pulse[:, i] = fftshift(ifft(np.r_[spec, spec[-2:0:-1]]).real)
        noise[:, i] = ifft(spec_n * fft(pulse[:, i], noise_length)).real
    h = hanning(fft_size + 2)[1:-1]
    pulse[:, 0] = pulse[:, 0] - np.mean(pulse[:, 0]) * h / np.mean(h)
    return {'pulse': pulse, 'noise': noise}
Exemple #22
0
def stochasticAnal(x, H, N, sf):

    hN = N // 2 + 1
    No2 = N // 2
    if (hN * sf < 3):
        raise ValueError("The stochastic decimation factor is too small")

    if (sf > 1):
        raise ValueError("The stochastic decimation factor is above 1")

    if (H <= 0):
        raise ValueError("The hop size is smaller or equal to 0")

    if not (U.isPow2(N)):
        raise ValueError("The FFT size is not a power of 2")

    w = hanning(N)
    x = np.append(np.zeros(No2), x)
    x = np.append(x, np.zeros(No2))
    begin = No2
    end = x.size - No2
    while begin <= end:
        xw = x[begin - No2:begin + No2] * w
        X = fft(xw)
        absX = abs(X[:hN])
        absX[absX < np.finfo(float).eps] = np.finfo(float).eps
        mX = 20 * np.log10(absX)
        mY = resample(np.maximum(-200, mX), int(sf * hN))
        if begin == No2:
            stocEnv = np.array([mY])
        else:
            stocEnv = np.vstack((stocEnv, np.array([mY])))
        begin += H
    return stocEnv
def stochasticModelSynth(stocEnv, H, N):
    """
	Stochastic synthesis of a sound
	stocEnv: stochastic envelope; H: hop size; N: fft size
	returns y: output sound
	"""

    if not (UF.isPower2(N)):  # raise error if N not a power of two
        raise ValueError("N is not a power of two")

    hN = N // 2 + 1  # positive size of fft
    No2 = N // 2  # half of N
    L = stocEnv[:, 0].size  # number of frames
    ysize = H * (L + 3)  # output sound size
    y = np.zeros(ysize)  # initialize output array
    ws = 2 * hanning(N)  # synthesis window
    pout = 0  # output sound pointer
    for l in range(L):
        mY = resample(stocEnv[l, :], hN)  # interpolate to original size
        pY = 2 * np.pi * np.random.rand(hN)  # generate phase random values
        Y = np.zeros(N, dtype=complex)  # initialize synthesis spectrum
        Y[:hN] = 10**(mY / 20) * np.exp(1j * pY)  # generate positive freq.
        Y[hN:] = 10**(mY[-2:0:-1] / 20) * np.exp(
            -1j * pY[-2:0:-1])  # generate negative freq.
        fftbuffer = np.real(ifft(Y))  # inverse FFT
        y[pout:pout + N] += ws * fftbuffer  # overlap-add
        pout += H
    y = np.delete(y, range(No2))  # delete half of first window
    y = np.delete(y, range(y.size - No2,
                           y.size))  # delete half of the last window
    return y
Exemple #24
0
    def finalize(self):
        discard = self.get_current_value('discard')
        smoothing_window = self.get_current_value('smoothing_window')
        exp_mic_gain = dbi(self.get_current_value('exp_mic_gain'))
        waveform_averages = self.get_current_value('waveform_averages')
        results = self.iface.process(waveform_averages=waveform_averages,
                                     input_gains=exp_mic_gain, discard=discard)

        exp_mic_waveform = results['mic_waveforms'].mean(axis=0)[0]
        exp_mic_psd = db(results['tf'])[0]
        if smoothing_window > 0:
            w = signal.hanning(smoothing_window)
            w /= w.sum()
            exp_mic_psd = np.convolve(exp_mic_psd, w, mode='same')

        speaker_spl = self.calibration.get_spl(results['mic_frequency'],
                                               results['tf'][0])

        results['exp_mic_waveform'] = exp_mic_waveform
        results['exp_mic_psd'] = exp_mic_psd
        results['frequency'] = results['mic_frequency']
        results['speaker_spl'] = speaker_spl

        self.model.update_plots(results, freq_lb=500, freq_ub=50e3)
        self.results = results
        self.result_settings = dict(self.model.paradigm.items())
        self.complete = True
def stochasticModel(x, H, stocf):
	# stochastic analysis/synthesis of a sound, one frame at a time
	# x: input array sound, H: hop size, 
	# stocf: decimation factor of mag spectrum for stochastic analysis
	# returns y: output sound
	N = H*2                                                  # FFT size
	w = hanning(N)                                           # analysis/synthesis window
	x = np.append(np.zeros(H),x)                             # add zeros at beginning to center first window at sample 0
	x = np.append(x,np.zeros(H))                             # add zeros at the end to analyze last sample
	pin = 0                                                  # initialize sound pointer in middle of analysis window       
	pend = x.size-N                                          # last sample to start a frame
	y = np.zeros(x.size)                                     # initialize output array
	while pin<=pend:              
	#-----analysis-----             
		xw = x[pin:pin+N]*w                                    # window the input sound
		X = fft(xw)                                            # compute FFT
		mX = 20 * np.log10(abs(X[:H]))                         # magnitude spectrum of positive frequencies
		mYst = resample(np.maximum(-200, mX), mX.size*stocf)   # decimate the mag spectrum     
	#-----synthesis-----
		mY = resample(mYst, H)                                 # interpolate to original size
		pY = 2*np.pi*np.random.rand(H)                         # generate phase random values
		Y = np.zeros(N, dtype = complex)
		Y[:H] = 10**(mY/20) * np.exp(1j*pY)                    # generate positive freq.
		Y[H+1:] = 10**(mY[:0:-1]/20) * np.exp(-1j*pY[:0:-1])   # generate negative freq.
		fftbuffer = np.real(ifft(Y))                           # inverse FFT
		y[pin:pin+N] += w*fftbuffer                            # overlap-add
		pin += H  
	y = np.delete(y, range(H))                               # delete half of first window which was added 
	y = np.delete(y, range(y.size-H, y.size))                # delete half of last window which was added                                            # advance sound pointer
	return y
Exemple #26
0
 def test_window_derivative(self):
     """Test if the derivative of a window function is calculated
     properly."""
     window = hanning(210)
     derivative = derive_window(window)
     ix_win_maxima = np.argmax(window)
     self.assertAlmostEqual(derivative[ix_win_maxima], 0.0, places=3)
def get_waveform(excitation_signal, spectrogram, temporal_positions, f0, fs):
    y = np.zeros(len(excitation_signal))
    fft_size = (spectrogram.shape[0] - 1) * 2
    latter_index = np.arange(int(fft_size // 2 + 1), fft_size + 1)
    frame_period_sample = int(
        (temporal_positions[1] - temporal_positions[0]) * fs)
    win_len = frame_period_sample * 2 - 1
    half_win_len = frame_period_sample - 1
    win = hanning(win_len + 2)[1:-1]

    for i in range(2, len(f0) - 1):
        origin = (i - 1) * frame_period_sample - half_win_len
        safe_index = np.minimum(len(y), np.arange(origin, origin + win_len))

        tmp = excitation_signal[safe_index - 1] * win
        spec = spectrogram[:, i - 1]
        periodic_spectrum = np.r_[spec, spec[-2:0:-1]]

        tmp_cepstrum = np.fft.fft(np.log(np.abs(periodic_spectrum)) / 2).real
        tmp_complex_cepstrum = np.zeros(fft_size)
        tmp_complex_cepstrum[latter_index.astype(int) -
                             1] = tmp_cepstrum[latter_index.astype(int) -
                                               1] * 2
        tmp_complex_cepstrum[0] = tmp_cepstrum[0]

        spectrum = np.exp(np.fft.ifft(tmp_complex_cepstrum))
        response = ifft(spectrum * fft(tmp, fft_size)).real

        safe_index = np.minimum(len(y), np.arange(origin, origin + fft_size))
        y[safe_index - 1] += response
    return y
Exemple #28
0
def main():
    argvs = sys.argv
    argc = len(argvs)

    if argc < 2:
        sys.exit('Usage: python {} [wav file]'.format(argvs[0]))

    fft_width = 1024
    fft_olap = fft_width * 3 // 4

    wave_filepath = argvs[1]
    wav = read(wave_filepath)

    spec = spectrogram(wav[1],
                       wav[0],
                       window=hanning(fft_width),
                       noverlap=fft_olap,
                       mode='magnitude')

    plt.subplot(2, 1, 1)
    plt.title('original', fontsize=10)
    plt.pcolormesh(spec[1], spec[0], np.log10(spec[2] + 1e-2))

    H, U = nmf(spec[2], 50, 100)
    Y = H @ U

    plt.subplot(2, 1, 2)
    plt.title('reconstructed', fontsize=10)
    plt.pcolormesh(spec[1], spec[0], np.log10(Y + 1e-2))

    plt.tight_layout()
    plt.show()
Exemple #29
0
def td_dft(dat, winlen=4096, winoverlap=2048, dftsize=4096, winty='blackman'):
    # calculate the views
    views = view_as_windows(dat,winlen,winlen-winoverlap)

    # generate desired window
    if winty == 'rect':
        win = np.ones(winlen)
    elif winty == 'bartlett':
        win = bartlett(winlen)
    elif winty == 'hann':
        win = hanning(winlen)
    elif winty == 'hamming':
        win = hamming(winlen)
    elif winty == 'blackman':
        win = blackman(winlen)
    else:
        assert False # invalid winty


    # apply window sequence to views
    views = [ v*win for v in views ]

    # computes time aliasing to input sequences if needed
    if winlen > dftsize:
        views = [ time_alias(v,dftsize) for v in views ]

    # apply fft and fftshift to all views
    dfts = [ fftshift(fft(v,dftsize)) for v in views ]

    return np.array(dfts)
Exemple #30
0
def win_sel(win_str, win_size):
    """
        Function returns a window vector based on window name.
        Note class can only use windows found in scipy.signal library.
    """
    overlap = 0
    if (win_str == 'blackmanharris'):
        win = sig.blackmanharris(win_size)
        overlap = .75
    elif (win_str == 'blackman'):
        win = sig.blackman(win_size)
    elif (win_str == 'bartlett'):
        win = sig.bartlett(win_size)
    elif (win_str == 'hamming'):
        win = sig.hamming(win_size)
    elif (win_str == 'hanning'):
        win = sig.hanning(win_size)
    elif (win_str == 'hann'):
        win = sig.hann(win_size)
    elif (win_str == 'barthann'):
        win = sig.barthann(win_size)
    elif (win_str == 'triang'):
        win = sig.triang(win_size)
    elif (win_str == 'rect' or win_str == None):
        win = np.ones(win_size)
    else:
        print('Invalid Window Defined')
        return -1
    return win, overlap
Exemple #31
0
def stft(x, fs, framesz, hop):
  framesamp = int(framesz*fs)
  hopsamp = int(hop*fs)
  w = hanning(framesamp)
  X = np.array([np.fft.fft(w*x[i:i+framesamp]) 
  for i in range(0, len(x)-framesamp, hopsamp)])
  return X
def stochasticModel(x, w, N, stocf):
    # x: input array sound, w: analysis window, N: FFT size,
    # stocf: decimation factor of mag spectrum for stochastic analysis
    # y: output sound

    hN = N / 2  # size of positive spectrum
    hM = (w.size) / 2  # half analysis window size
    pin = hM  # initialize sound pointer in middle of analysis window
    fftbuffer = np.zeros(N)  # initialize buffer for FFT
    yw = np.zeros(w.size)  # initialize output sound frame
    w = w / sum(w)  # normalize analysis window
    ws = hanning(w.size) * 2  # synthesis window

    # -----analysis-----
    xw = x[pin - hM : pin + hM] * w  # window the input sound
    X = fft(xw)  # compute FFT
    mX = 20 * np.log10(abs(X[:hN]))  # magnitude spectrum of positive frequencies
    mXenv = resample(np.maximum(-200, mX), mX.size * stocf)  # decimate the mag spectrum
    pX = np.angle(X[:hN])
    # -----synthesis-----
    mY = resample(mXenv, hN)  # interpolate to original size
    pY = 2 * np.pi * np.random.rand(hN)  # generate phase random values
    Y = np.zeros(N, dtype=complex)
    Y[:hN] = 10 ** (mY / 20) * np.exp(1j * pY)  # generate positive freq.
    Y[hN + 1 :] = 10 ** (mY[:0:-1] / 20) * np.exp(-1j * pY[:0:-1])  # generate negative freq.

    fftbuffer = np.real(ifft(Y))  # inverse FFT
    y = ws * fftbuffer * N / 2  # overlap-add

    return mX, pX, mY, pY, y
Exemple #33
0
def _lagged_coherence_1freq(x, f, Fs, N_cycles=3, f_step=1):
    """Calculate lagged coherence of x at frequency f using the hanning-taper FFT method"""

    # Determine number of samples to be used in each window to compute lagged coherence
    Nsamp = int(np.ceil(N_cycles * Fs / f))

    # For each N-cycle chunk, calculate the fourier coefficient at the frequency of interest, f
    chunks = _nonoverlapping_chunks(x, Nsamp)
    C = len(chunks)
    hann_window = signal.hanning(Nsamp)
    fourier_f = np.fft.fftfreq(Nsamp, 1 / float(Fs))
    fourier_f_idx = np.argmin(np.abs(fourier_f - f))
    fourier_coefsoi = np.zeros(C, dtype=complex)
    for i2, c in enumerate(chunks):
        fourier_coef = np.fft.fft(c * hann_window)
        fourier_coefsoi[i2] = fourier_coef[fourier_f_idx]

    # Compute the lagged coherence value
    lcs_num = 0
    for i2 in range(C - 1):
        lcs_num += fourier_coefsoi[i2] * np.conj(fourier_coefsoi[i2 + 1])
    lcs_denom = np.sqrt(
        np.sum(np.abs(fourier_coefsoi[:-1])**2) *
        np.sum(np.abs(fourier_coefsoi[1:])**2))
    return np.abs(lcs_num / lcs_denom)
Exemple #34
0
def main(fn,start,end):
  fn = Path(fn).expanduser()
  #rx_array is loading the last 45% of the waveform from the file
  rx_array = load_bin(fn, start, end)
  #peak_array holds the indexes of each peak in the waveform
  #peak_distance is the smallest distance between each peak
  peak_array,peak_distance = get_peaks(rx_array)
  l = peak_distance-1
  print('using window: ',l,'\n')
  #remove first peak
  peak_array= peak_array[1:]
  Npulse=len(peak_array)-1
  print(Npulse,'pulses detected')
  wind = signal.hanning(l)
  Ntone = 2
  Nblockest = 160
  fs = 4e6  # [Hz]
  data = np.empty([Npulse,l])
  #set each row of data to window * (first l samples after each peak)
  for i in range(Npulse):
    data[i,:] = wind * rx_array[peak_array[i]:peak_array[i]+l]

  fb_est, sigma = esprit(data, Ntone, Nblockest, fs)
  print ('fb_est',fb_est)
  print ('sigma: ', sigma)
  drange = (3e8*fb_est) /  (2e6/.1)
  print ('range: ',drange,'\n')
    def enframe(self, datas, fs, frame_len, frame_inc, win):
        '''
        ' datas: 语音数据
        ' fs: 采样频率
        ' frame_len: 帧长,单位秒
        ' frame_inc: 帧移,单位秒
        ' win: 窗函数
        '''
        datas_len = len(datas)   # 数据总长度
        frame_len = int(round(frame_len * fs))   # 帧长,数据个数
        nstep = frame_len - int(round(frame_inc * fs))   # 帧移动步长,数据个数

        if datas_len < frame_len: # 若信号长度小于帧长,则帧数定义为1
            nf = 1
        else: 
            nf = int(np.ceil((1.0*datas_len-frame_len)/nstep)) + 1

        pad_len = int((nf-1)*nstep + frame_len)    # 所有帧总数据长度
        # 多余的数据使用0填充
        new_datas = np.concatenate((datas, np.zeros(pad_len - datas_len)))

        indices = np.tile(np.arange(0,frame_len),(nf,1))+np.tile(np.arange(0,nf*nstep,nstep),(frame_len,1)).T  
        indices = np.array(indices, dtype = np.int32) # 否则会报类型错误

        frames = new_datas[indices] #得到帧信号

        # 加窗
        if win == 'hamming':
            win = signal.hamming(frame_len) 
        elif win == 'hanning':
            win = signal.hanning(frame_len)
        else:
            win = signal.boxcar(frame_len)

        return frames * np.tile(win, (nf, 1))
Exemple #36
0
def hfa_filter(t, x, l=60, cutoff=0.3):
    """
    Filtrates signal for HFA using FIR filter

    :param t: Time sequence (sec)
    :type t: numpy.ndarray

    :param x: Sample sequence
    :type x: numpy.ndarray

    :param l: Length of operator
    :type l: float (sec)

    :param cutoff: Bound (Hz)
    :type cutoff: float

    :returns: numpy.ndarray

    """
    dt = t[1] - t[0]
    h = hanning(2 * l / dt)
    x[t < l] *= h[0:len(h) / 2]
    x[t > (t[-1] - l)] *= h[len(h) / 2:]
    taps = firwin(l / dt + 1, cutoff, pass_zero=False, nyq=1 / dt / 2)
    xf = np.convolve(x, taps, mode='same')
    return (t, xf)
Exemple #37
0
def stft(x, chunk_size, hop, w=None):
    """
    Takes the short time fourier transform of x.

    Args:
      x: samples to window and transform.
      chunk_size: size of analysis window.
      hop: hop distance between analysis windows
      w: windowing function to apply. Must be of length chunk_size

    Returns:
      STFT of x (X(t, omega)) hop size apart with windows of size chunk_size.

    Raises:
      ValueError if window w is not of size chunk_size
    """
    if not w:
        w = hanning(chunk_size)
    else:
        if len(w) != chunk_size:
            raise ValueError(
                "window w is not of the correct length {0}.".format(
                    chunk_size))
    X = array([
        fft(w * x[i:i + chunk_size]) for i in range(0,
                                                    len(x) - chunk_size, hop)
    ]) / np.sqrt(((float(chunk_size) / float(hop)) / 2.0))
    return X
Exemple #38
0
def istft(X, chunk_size, hop, w=None):
    """
    Naively inverts the short time fourier transform using an overlap and add
    method. The overlap is defined by hop

    Args:
      X: STFT windows to invert, overlap and add. 
      chunk_size: size of analysis window.
      hop: hop distance between analysis windows
      w: windowing function to apply. Must be of length chunk_size

    Returns:
      ISTFT of X using an overlap and add method. Windowing used to smooth.

    Raises:
      ValueError if window w is not of size chunk_size
    """

    if not w:
        w = hanning(chunk_size)
    else:
        if len(w) != chunk_size:
            raise ValueError(
                "window w is not of the correct length {0}.".format(
                    chunk_size))

    x = zeros(len(X) * (hop))
    i_p = 0
    for n, i in enumerate(range(0, len(x) - chunk_size, hop)):
        x[i:i + chunk_size] += w * real(ifft(X[n]))
    return x
Exemple #39
0
def spectrum_wwind(array, time, window='hanning'):  # time should be in seconds
    # Size of array
    Nw = array.shape[0]

    # Calculate time step (assumed to be in seconds)
    dt = time[1] - time[0]

    # prefactor
    # print 'dt = ',dt
    prefactor = dt

    # Calculate array of frequencies, shift
    w = np.fft.fftfreq(Nw, dt)
    w0 = np.fft.fftshift(w)

    # make window
    # blackman window
    if window == 'blackman':
        bwin = blackman(Nw)  # pretty good
    if window == 'hanning':
        bwin = hanning(Nw)  # pretty good
    if window == 'hamming':
        bwin = hamming(Nw)  # not as good
    if window == 'bartlett':
        bwin = bartlett(Nw)  # pretty good
    if window == 'kaiser':
        bwin = kaiser(Nw, 6)
    if window == 'None':
        bwin = 1.0

    # Calculate FFT
    aw = prefactor * np.fft.fft(array * bwin)
    aw0 = np.fft.fftshift(aw)

    # Calcuate Phase
    phase = np.angle(aw)
    phase0 = np.fft.fftshift(phase)

    # Adjust arrays if not div by 2
    if not np.mod(Nw, 2):
        w0 = np.append(w0, -w0[0])
        aw0 = np.append(aw0, -aw0[0])
        phase0 = np.append(phase0, -phase0[0])

    # Cut FFTs in half
    Nwi = Nw // 2
    w2 = w0[Nwi:]
    aw2 = aw0[Nwi:]
    phase2 = phase0[Nwi:]

    comp = aw
    pwr = (np.abs(aw2))**2
    pwr2 = (np.abs(aw))**2
    mag = np.sqrt(pwr)
    cos_phase = np.cos(phase2)
    freq = w2
    freq2 = w

    return freq, freq2, comp, pwr, mag, phase2, cos_phase, dt
Exemple #40
0
def hpsModelSynth(hfreq, hmag, hphase, mYst, N, H, fs):
	# Synthesis of a sound using the harmonic plus stochastic model
	# hfreq: harmonic frequencies, hmag:harmonic amplitudes, mYst: stochastic envelope
	# Ns: synthesis FFT size, H: hop size, fs: sampling rate 
	# y: output sound, yh: harmonic component, yst: stochastic component
	hN = N/2                                                  # half of FFT size for synthesis
	L = hfreq[:,0].size                                       # number of frames
	nH = hfreq[0,:].size                                      # number of harmonics
	pout = 0                                                  # initialize output sound pointer         
	ysize = H*(L+4)                                           # output sound size
	yhw = np.zeros(N)                                        # initialize output sound frame
	ysw = np.zeros(N)                                        # initialize output sound frame
	yh = np.zeros(ysize)                                      # initialize output array
	yst = np.zeros(ysize)                                     # initialize output array
	sw = np.zeros(N)     
	ow = triang(2*H)                                          # overlapping window
	sw[hN-H:hN+H] = ow      
	bh = blackmanharris(N)                                   # synthesis window
	bh = bh / sum(bh)                                         # normalize synthesis window
	wr = bh                                                   # window for residual
	sw[hN-H:hN+H] = sw[hN-H:hN+H] / bh[hN-H:hN+H]             # synthesis window for harmonic component
	sws = H*hanning(N)/2                                      # synthesis window for stochastic component
	lastyhfreq = hfreq[0,:]                                   # initialize synthesis harmonic frequencies
	yhphase = 2*np.pi*np.random.rand(nH)                      # initialize synthesis harmonic phases     
	for l in range(L):
		yhfreq = hfreq[l,:]                                     # synthesis harmonics frequencies
		yhmag = hmag[l,:]                                       # synthesis harmonic amplitudes
		mYrenv = mYst[l,:]                                      # synthesis residual envelope
		if (hphase.size > 0):
			yhphase = hphase[l,:] 
		else:
			yhphase += (np.pi*(lastyhfreq+yhfreq)/fs)*H             # propagate phases
		lastyhfreq = yhfreq
		Yh = UF.genSpecSines(yhfreq, yhmag, yhphase, N, fs)     # generate spec sines 
		mYs = resample(mYrenv, hN)                              # interpolate to original size
		mYs = 10**(mYs/20)                                      # dB to linear magnitude  
		pYs = 2*np.pi*np.random.rand(hN)                        # generate phase random values
		Ys = np.zeros(N, dtype = complex)
		Ys[:hN] = mYs * np.exp(1j*pYs)                         # generate positive freq.
		Ys[hN+1:] = mYs[:0:-1] * np.exp(-1j*pYs[:0:-1])        # generate negative freq.
		fftbuffer = np.zeros(N)
		fftbuffer = np.real(ifft(Yh))                           # inverse FFT of harm spectrum
		yhw[:hN-1] = fftbuffer[hN+1:]                         # undo zer-phase window
		yhw[hN-1:] = fftbuffer[:hN+1] 
		fftbuffer = np.zeros(N)
		fftbuffer = np.real(ifft(Ys))                           # inverse FFT of stochastic approximation spectrum
		ysw[:hN-1] = fftbuffer[hN+1:]                           # undo zero-phase window
		ysw[hN-1:] = fftbuffer[:hN+1]
		yh[pout:pout+N] += sw*yhw                               # overlap-add for sines
		yst[pout:pout+N] += sws*ysw                             # overlap-add for stoch
		pout += H                                               # advance sound pointer
	y = yh+yst                                                # sum harmonic and stochastic components
	return y, yh, yst
Exemple #41
0
def autocorrelation(block):
    w = hanning(len(block))
    if len(block.shape)==1:
        b = block*w
        res = correlate(b, b, mode='full')
        v = res[res.shape[0]/2:]
        return v/max(v)
    elif len(block.shape)==2:
        res = array([correlate(block[:,i]*w, block[:,i]*w, mode='full')
                     for i in range(block.shape[1])])
        v = res[:,res.shape[1]/2:]
        return v/max(v)
def slidingFFT(data, window=256, fs=400, overlap=0., ignore_dropped=False,
                frequencies=None, aggregate=True, phase=False):

    nChan, nSamples = data.shape
    noverlap = int(overlap * window)
    windowVals = hanning(window)

    # get the corresponding indices for custom frequencies
    freqs = np.fft.fftfreq(window, d=1./fs)[:window/2]
    idx_freqs = []
    if frequencies is not None:
        for fr in frequencies:
            tmp = (freqs >= fr[0]) & (freqs < fr[1])
            idx_freqs.append(np.where(tmp)[0])
            numFreqs = len(idx_freqs)
    else:
        numFreqs = len(freqs)
    # get the indices of dropped data
    if ignore_dropped:
        dropped = (np.sum(data**2, 0) == 0)

    ind = list(xrange(0, nSamples - window + 1, window-noverlap))

    numSlices = len(ind)
    slices = range(numSlices)
    Slices = np.zeros((numSlices, numFreqs, nChan), dtype=np.complex_)
    for iSlice in slices:
        sl = slice(ind[iSlice], ind[iSlice] + window)
        if ignore_dropped:
            if np.sum(dropped[sl]) > 0:
                continue

        thisSlice = data[:, sl]
        thisSlice = windowVals*thisSlice
        thisfft = np.fft.fft(thisSlice).T
        if frequencies is None:
            Slices[iSlice] = thisfft[1:(window/2 + 1)]
        else:
            for fr, idx in enumerate(idx_freqs):
                Slices[iSlice, fr, :] = thisfft[idx].mean(0)

    Slices = Slices.transpose(0, 2, 1)
    if aggregate:
        Slices = np.concatenate(Slices.transpose(1, 2, 0), axis=0)
    else:
        Slices = Slices.transpose(2, 1, 0)

    if phase:
        Slices = np.arctan2(np.imag(Slices), np.real(Slices))
    else:
        Slices = np.abs(Slices)

    return Slices
Exemple #43
0
    def smooth(self, n=4):

        win = signal.hanning(n, sym=True)
        win /= np.sum(win)

        K = self.data
        nt, M, N = K.shape
        for t in range(nt):
            for i in range(M):
                K[t, i, :] = np.convolve(K[t, i, :], win, mode='same')
            for j in range(N):
                K[t, :, j] = np.convolve(K[t, :, j], win, mode='same')
Exemple #44
0
def axes_correlation(block):
    w = hanning(len(block))
    if len(block.shape)==2:
        ax = range(block.shape[1])
        pairs = set([tuple(sort(z)) for z in
                     [[x,y] for x in ax for y in ax if x != y]])
        cor = []
        for p in pairs:
            cor.append(fftconvolve(block[:,p[0]]*w,
                                   (block[:,p[1]]*w)[::-1],
                                   mode='full'))
        v = reduce(lambda x,y:abs(x)+abs(y), cor)
        return v / v.max()
def plot_spectrogram(samples, sample_rate, title, pdf_file):
    
    window_size_sec = .005
    hop_size_percent = 20
    
    window_size = int(round(window_size_sec * sample_rate))
    window = signal.hanning(window_size, sym=False)
    hop_size = \
        int(round(window_size_sec * hop_size_percent / 100 * sample_rate))
        
    dft_size = 2 * tfa_utils.get_dft_size(window_size)
    
    gram = tfa_utils.compute_spectrogram(samples, window, hop_size, dft_size)
    
    gram = tfa_utils.linear_to_log(gram)
    
    # plot_histogram(gram)
    
    hop_size_sec = window_size_sec * hop_size_percent / 100
    times = np.arange(len(gram)) * hop_size_sec + window_size_sec / 2
    
    num_bins = dft_size / 2 + 1
    bin_size = sample_rate / dft_size
    freqs = np.arange(num_bins) * bin_size
        
    x = gram.transpose()
    
    plt.figure(figsize=(12, 6))
        
    start_time = times[0] - hop_size_sec / 2
    end_time = times[-1] + hop_size_sec / 2
    start_freq = freqs[0]
    end_freq = freqs[-1]
    extent = (start_time, end_time, start_freq, end_freq)
    
    # `vmin` and `vmax` were chosen by looking at histogram of spectrogram
    # values plotted by `plot_histogram` function.
    plt.imshow(
        x, cmap='gray_r', vmin=-25, vmax=125, origin='lower', extent=extent,
        aspect='auto')
    
    plt.title(title)
    plt.xlabel('Time (s)')
    plt.ylabel('Frequency (Hz)')
    # plt.ylim(0, 11000)

    pdf_file.savefig()
    
    plt.close()
def stochasticModel(x, H, N, stocf):
	"""
	Stochastic analysis/synthesis of a sound, one frame at a time
	x: input array sound, H: hop size, N: fft size 
	stocf: decimation factor of mag spectrum for stochastic analysis, bigger than 0, maximum of 1
	returns y: output sound
	"""
	hN = N/2+1                                            		# positive size of fft
	No2 = N/2							# half of N
	if (hN*stocf < 3):                                              # raise exception if decimation factor too small
		raise ValueError("Stochastic decimation factor too small")
		
	if (stocf > 1):                                          # raise exception if decimation factor too big
		raise ValueError("Stochastic decimation factor above 1")
	
	if (H <= 0):                                             # raise error if hop size 0 or negative
		raise ValueError("Hop size (H) smaller or equal to 0")
		
	if not(UF.isPower2(N)):                                  # raise error if N not a power of twou
		raise ValueError("FFT size (N) is not a power of 2")
		
	w = hanning(N)                                           # analysis/synthesis window
	x = np.append(np.zeros(No2),x)                           # add zeros at beginning to center first window at sample 0
	x = np.append(x,np.zeros(No2))                           # add zeros at the end to analyze last sample
	pin = No2                                                # initialize sound pointer in middle of analysis window       
	pend = x.size - No2                                      # last sample to start a frame
	y = np.zeros(x.size)                                     # initialize output array
	while pin<=pend:              
	#-----analysis-----             
		xw = x[pin-No2:pin+No2]*w                              # window the input sound
		X = fft(xw)                                            # compute FFT
		mX = 20 * np.log10(abs(X[:hN]))                        # magnitude spectrum of positive frequencies
		stocEnv = resample(np.maximum(-200, mX), hN*stocf)     # decimate the mag spectrum     
	#-----synthesis-----
		mY = resample(stocEnv, hN)                             # interpolate to original size
		pY = 2*np.pi*np.random.rand(hN)                        # generate phase random values
		Y = np.zeros(N, dtype = complex)
		Y[:hN] = 10**(mY/20) * np.exp(1j*pY)                   # generate positive freq.
		Y[hN:] = 10**(mY[-2:0:-1]/20) * np.exp(-1j*pY[-2:0:-1]) # generate negative freq.
		fftbuffer = np.real(ifft(Y))                           # inverse FFT
		y[pin-No2:pin+No2] += w*fftbuffer                      # overlap-add
		pin += H  					 # advance sound pointer
	y = np.delete(y, range(No2))                              # delete half of first window which was added 
	y = np.delete(y, range(y.size-No2, y.size))               # delete half of last window which was added                                            
	return y
    def PltUp(n):
#        Data = array.array('f', Plotting.read(Rate//10))
        Data = array.array('f', Plotting.read(Rate//10, 
                                              exception_on_overflow=False))
        Data = [_ * SBInAmpF for _ in Data]
        HWindow = signal.hanning(len(Data)//(Rate/1000))
        F, PxxSp = signal.welch(Data, Rate, HWindow, nperseg=len(HWindow), noverlap=0, 
                                scaling='density')
        
        Start = np.where(F > FreqBand[0])[0][0]-1
        End = np.where(F > FreqBand[1])[0][0]-1
        BinSize = F[1] - F[0]
        RMS = sum(PxxSp[Start:End] * BinSize)**0.5
        dB = 20*(math.log(RMS/MicSens_VPa, 10)) + 94
        print(dB, max(PxxSp))
        
        Plot.set_xdata(F)
        Plot.set_ydata(PxxSp)
        return Plot,
Exemple #48
0
def _lagged_coherence_1freq(x, f, Fs, N_cycles=3, f_step=1):
    """Calculate lagged coherence of x at frequency f using the hanning-taper FFT method"""
    Nsamp = int(np.ceil(N_cycles * Fs / f))
    # For each N-cycle chunk, calculate phase
    chunks = _nonoverlapping_chunks(x, Nsamp)
    C = len(chunks)
    hann_window = signal.hanning(Nsamp)
    fourier_f = np.fft.fftfreq(Nsamp, 1 / float(Fs))
    fourier_f_idx = _arg_closest_value(fourier_f, f)
    fourier_coefsoi = np.zeros(C, dtype=complex)
    for i2, c in enumerate(chunks):
        fourier_coef = np.fft.fft(c * hann_window)

        fourier_coefsoi[i2] = fourier_coef[fourier_f_idx]

    lcs_num = 0
    for i2 in range(C - 1):
        lcs_num += fourier_coefsoi[i2] * np.conj(fourier_coefsoi[i2 + 1])
    lcs_denom = np.sqrt(np.sum(
        np.abs(fourier_coefsoi[:-1])**2) * np.sum(np.abs(fourier_coefsoi[1:])**2))
    return np.abs(lcs_num / lcs_denom)
def stochasticModelSynth(mYst, H):
	# stochastic synthesis of a sound
	# mYst: stochastic envelope, H: hop size, 
	# returns y: output sound
	N = H*2                                                  # FFT size    
	L = mYst[:,0].size                                       # number of frames
	y = np.zeros(L*H+2*H)                                    # initialize output array
	ws = hanning(N)                                          # synthesis window
	pout = 0                                                 # output sound pointer
	for l in range(L):                    
		mY = resample(mYst[l,:], H)                            # interpolate to original size
		pY = 2*np.pi*np.random.rand(H)                         # generate phase random values
		Y = np.zeros(N, dtype = complex)
		Y[:H] = 10**(mY/20) * np.exp(1j*pY)                    # generate positive freq.
		Y[H+1:] = 10**(mY[:0:-1]/20) * np.exp(-1j*pY[:0:-1])   # generate negative freq.
		fftbuffer = np.real(ifft(Y))                           #inverse FFT
		y[pout:pout+N] += ws*fftbuffer                         # overlap-add
		pout += H  
	y = np.delete(y, range(H))                               # delete half of first window which was added 
	y = np.delete(y, range(y.size-H, y.size))                # delete half of last window which was added                                            # advance sound pointer
	return y
Exemple #50
0
def stochastic_model_anal(x, H, N, stocf):
    """
    Stochastic analysis of a sound
    x: input array sound, H: hop size, N: fftsize
    stocf: decimation factor of mag spectrum for stochastic analysis, bigger than 0, maximum of 1
    returns stocEnv: stochastic envelope
    """

    hN = N / 2 + 1  # positive size of fft
    No2 = N / 2  # half of N
    if hN * stocf < 3:  # raise exception if decimation factor too small
        raise ValueError("Stochastic decimation factor too small")

    if stocf > 1:  # raise exception if decimation factor too big
        raise ValueError("Stochastic decimation factor above 1")

    if H <= 0:  # raise error if hop size 0 or negative
        raise ValueError("Hop size (H) smaller or equal to 0")

    if not (UF.isPower2(N)):  # raise error if N not a power of two
        raise ValueError("FFT size (N) is not a power of 2")

    w = hanning(N)  # analysis window
    x = np.append(np.zeros(No2), x)  # add zeros at beginning to center first window at sample 0
    x = np.append(x, np.zeros(No2))  # add zeros at the end to analyze last sample
    pin = No2  # initialize sound pointer in middle of analysis window
    pend = x.size - No2  # last sample to start a frame

    stocEnv = None
    while pin <= pend:
        xw = x[pin - No2:pin + No2] * w  # window the input sound
        X = fft(xw)  # compute FFT
        mX = 20 * np.log10(abs(X[:hN]))  # magnitude spectrum of positive frequencies
        mY = resample(np.maximum(-200, mX), stocf * hN)  # decimate the mag spectrum
        if pin == No2:  # first frame
            stocEnv = np.array([mY])
        else:  # rest of frames
            stocEnv = np.vstack((stocEnv, np.array([mY])))
        pin += H  # advance sound pointer
    return stocEnv
def generate_midi(infile, fraction):
    samplerate, data = scipy.io.wavfile.read(infile)

    ws = 0.5/fraction

    ns = s_to_samples(samplerate, ws)
    print(ns)

    w = hanning(ns)

    spectra = []

    for chunk in chunks(data, ns):
        delta = len(w) - len(chunk)
        if delta:
            chunk = np.append(chunk, [0] * delta)

        spectrum = fft(chunk * w)

        spectra.append(spectrum)
        # print(d)

    return spectra
def stochasticModelAnal(x, H, stocf):
	# stochastic analysis of a sound
	# x: input array sound, H: hop size, 
	# stocf: decimation factor of mag spectrum for stochastic analysis
	# returns mYst: stochastic envelope
	N = H*2                                                  # FFT size   
	w = hanning(N)                                           # analysis window
	x = np.append(np.zeros(H),x)                             # add zeros at beginning to center first window at sample 0
	x = np.append(x,np.zeros(H))                             # add zeros at the end to analyze last sample
	pin = 0                                                  # initialize sound pointer in middle of analysis window       
	pend = x.size-N                                          # last sample to start a frame
	y = np.zeros(x.size)                                     # initialize output array
	while pin<=pend:                         
		xw = x[pin:pin+N] * w                                  # window the input sound
		X = fft(xw)                                            # compute FFT
		mX = 20 * np.log10(abs(X[:H]))                         # magnitude spectrum of positive frequencies
		mY = resample(np.maximum(-200, mX), mX.size*stocf)     # decimate the mag spectrum 
		if pin == 0:
			mYst = np.array([mY])
		else:
			mYst = np.vstack((mYst, np.array([mY])))
		pin += H                                               # advance sound pointer
	return mYst
Exemple #53
0
def axes_fft(block):
    w = hanning(len(block))
    if len(block.shape)==2:
        ax = range(block.shape[1])
        pairs = set([tuple(sort(z)) for z in
                     [[x,y] for x in ax for y in ax if x != y]])

        normalize = lambda x: ((x - x.mean())
                               / sqrt(average((x-x.mean())**2)))

        features = []
        for p in pairs:
            a = block[:,p[0]]
            b = block[:,p[1]]
            # a = normalize(a)
            # b = normalize(b)
            a = abs(fft(a*w))[:block.shape[0]/2]
            b = abs(fft(b*w))[:block.shape[0]/2]
            c = log10(1+a*b)
            features.append(c)

        v = sum(features,axis=0)/len(features)

        return v-v.mean(); #normalize(v)
    def get_specgram( self, samples, win_size, advance, oversample ):
        '''
        Gets spectrum for samples, returns as a matrix
        [in] samples    - audio samples
        [in] win_size   - analysis window size in samples
        [in] advance    - number of samples to advance between frames
        [in] oversample - how much to oversample by
        [ret] matrix    - nframes x fft_len

        Notes: uses a hanning window ( the scipy.signal version so it's periodic )
        ''' 
        num_frames    = samples.shape[0] / advance
        fft_full_len  = win_size * oversample
        fft_half_len  = ( fft_full_len / 2.0 ) + 1
        hh            = ss.hanning( win_size, False )
        spec_buf   = np.zeros( (num_frames, fft_half_len), dtype=np.complex )
        for frame_id in range( num_frames ):
            frame_start   = frame_id * advance
            frame_end     = frame_start + win_size
            if frame_end > samples.shape[0]:
                break
            spectrum      = np.fft.fft( hh * samples[frame_start:frame_end,0], n=fft_full_len )
            spec_buf[ frame_id, : ] = spectrum[ :fft_half_len ]
        return spec_buf
Exemple #55
0
    def finalize(self):
        discard = self.get_current_value('discard')
        smoothing_window = self.get_current_value('smoothing_window')
        ref_mic_sens = self.get_current_value('ref_mic_sens')
        ref_mic_gain = dbi(self.get_current_value('ref_mic_gain'))
        exp_mic_gain = dbi(self.get_current_value('exp_mic_gain'))
        waveform_averages = self.get_current_value('waveform_averages')
        results = self.iface.process(waveform_averages=waveform_averages,
                                     input_gains=[exp_mic_gain, ref_mic_gain],
                                     discard=discard)

        exp_mic_waveform, ref_mic_waveform = \
            results['mic_waveforms'].mean(axis=0)

        exp_mic_psd, ref_mic_psd = db(results['tf_psd'])
        exp_mic_phase, ref_mic_phase = results['tf_phase']
        exp_mic_sens = exp_mic_psd+db(ref_mic_sens)-ref_mic_psd
        if smoothing_window > 0:
            w = signal.hanning(smoothing_window)
            w /= w.sum()
            exp_mic_sens = np.convolve(exp_mic_sens, w, mode='same')

        results['exp_mic_waveform'] = exp_mic_waveform
        results['ref_mic_waveform'] = ref_mic_waveform
        results['ref_mic_psd'] = ref_mic_psd
        results['exp_mic_psd'] = exp_mic_psd
        results['ref_mic_phase'] = ref_mic_phase
        results['exp_mic_phase'] = exp_mic_phase
        results['exp_mic_sens'] = exp_mic_sens
        results['speaker_spl'] = ref_mic_psd-db(ref_mic_sens)-db(20e-6)
        results['frequency'] = results['mic_frequency']

        self.model.update_plots(results)
        self.results = results
        self.result_settings = dict(self.model.paradigm.items())
        self.complete = True
Exemple #56
0
def test_smooth():
    tr = get_rand_traj()
    assert len(tr.attrs_nstep) > 0
    trs = crys.smooth(tr, hanning(11))
    assert len(trs.attrs_nstep) > 0
    assert_attrs_not_none(trs, attr_lst=tr.attr_lst)
    for name in tr.attrs_nstep:
        a1 = getattr(tr, name)
        a2 = getattr(trs, name)
        assert a1.shape == a2.shape
        assert np.abs(a1 - a2).sum() > 0.0
    assert trs.timestep == tr.timestep
    assert trs.nstep == tr.nstep
    
    # reproduce data with kernel [0,1,0]
    trs = crys.smooth(tr, hanning(3))
    for name in tr.attrs_nstep:
        a1 = getattr(tr, name)
        a2 = getattr(trs, name)
        assert np.allclose(a1, a2)
    
    trs1 = crys.smooth(tr, hanning(3), method=1)
    trs2 = crys.smooth(tr, hanning(3), method=2)
    assert len(trs1.attrs_nstep) > 0
    assert len(trs2.attrs_nstep) > 0
    for name in tr.attrs_nstep:
        a1 = getattr(tr, name)
        a2 = getattr(trs1, name)
        a3 = getattr(trs2, name)
        assert np.allclose(a1, a2)
        assert np.allclose(a1, a3)
    
    trs1 = crys.smooth(tr, hanning(11), method=1)
    trs2 = crys.smooth(tr, hanning(11), method=2)
    assert len(trs1.attrs_nstep) > 0
    assert len(trs2.attrs_nstep) > 0
    for name in trs1.attrs_nstep:
        a1 = getattr(trs1, name)
        a2 = getattr(trs2, name)
        assert np.allclose(a1, a2)
Exemple #57
0
def compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
                       tmin=None, tmax=None, fmin=0., fmax=200.,
                       NFFT=2048, overlap=0.5, pick_ori=None, label=None,
                       nave=1, pca=True, verbose=None, pick_normal=None):
    """Compute source power spectrum density (PSD)

    Parameters
    ----------
    raw : instance of Raw
        The raw data
    inverse_operator : dict
        The inverse operator
    lambda2: float
        The regularization parameter
    method: "MNE" | "dSPM" | "sLORETA"
        Use mininum norm, dSPM or sLORETA
    tmin : float | None
        The beginning of the time interval of interest (in seconds). If None
        start from the beginning of the file.
    tmax : float | None
        The end of the time interval of interest (in seconds). If None
        stop at the end of the file.
    fmin : float
        The lower frequency of interest
    fmax : float
        The upper frequency of interest
    NFFT: int
        Window size for the FFT. Should be a power of 2.
    overlap: float
        The overlap fraction between windows. Should be between 0 and 1.
        0 means no overlap.
    pick_ori : None | "normal"
        If "normal", rather than pooling the orientations by taking the norm,
        only the radial component is kept. This is only implemented
        when working with loose orientations.
    label: Label
        Restricts the source estimates to a given label
    nave : int
        The number of averages used to scale the noise covariance matrix.
    pca: bool
        If True, the true dimension of data is estimated before running
        the time frequency transforms. It reduces the computation times
        e.g. with a dataset that was maxfiltered (true dim is 64)
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Returns
    -------
    stc : SourceEstimate | VolSourceEstimate
        The PSD (in dB) of each of the sources.
    """
    pick_ori = _check_ori(pick_ori, pick_normal)

    logger.info('Considering frequencies %g ... %g Hz' % (fmin, fmax))

    inv = prepare_inverse_operator(inverse_operator, nave, lambda2, method)
    is_free_ori = inverse_operator['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI

    #
    #   Pick the correct channels from the data
    #
    sel = _pick_channels_inverse_operator(raw.ch_names, inv)
    logger.info('Picked %d channels from the data' % len(sel))
    logger.info('Computing inverse...')
    #
    #   Simple matrix multiplication followed by combination of the
    #   three current components
    #
    #   This does all the data transformations to compute the weights for the
    #   eigenleads
    #
    K, noise_norm, vertno = _assemble_kernel(inv, label, method, pick_ori)

    if pca:
        U, s, Vh = linalg.svd(K, full_matrices=False)
        rank = np.sum(s > 1e-8 * s[0])
        K = s[:rank] * U[:, :rank]
        Vh = Vh[:rank]
        logger.info('Reducing data rank to %d' % rank)
    else:
        Vh = None

    start, stop = 0, raw.last_samp + 1 - raw.first_samp
    if tmin is not None:
        start = raw.time_as_index(tmin)[0]
    if tmax is not None:
        stop = raw.time_as_index(tmax)[0] + 1
    NFFT = int(NFFT)
    Fs = raw.info['sfreq']
    window = signal.hanning(NFFT)
    freqs = fftpack.fftfreq(NFFT, 1. / Fs)
    freqs_mask = (freqs >= 0) & (freqs >= fmin) & (freqs <= fmax)
    freqs = freqs[freqs_mask]
    fstep = np.mean(np.diff(freqs))
    psd = np.zeros((K.shape[0], np.sum(freqs_mask)))
    n_windows = 0

    for this_start in np.arange(start, stop, int(NFFT * (1. - overlap))):
        data, _ = raw[sel, this_start:this_start + NFFT]
        if data.shape[1] < NFFT:
            logger.info("Skipping last buffer")
            break

        if Vh is not None:
            data = np.dot(Vh, data)  # reducing data rank

        data *= window[None, :]

        data_fft = fftpack.fft(data)[:, freqs_mask]
        sol = np.dot(K, data_fft)

        if is_free_ori and pick_ori == None:
            sol = combine_xyz(sol, square=True)
        else:
            sol = np.abs(sol) ** 2

        if method != "MNE":
            sol *= noise_norm ** 2

        psd += sol
        n_windows += 1

    psd /= n_windows

    psd = 10 * np.log10(psd)

    subject = _subject_from_inverse(inverse_operator)
    stc = _make_stc(psd, vertices=vertno, tmin=fmin * 1e-3,
                    tstep=fstep * 1e-3, subject=subject)
    return stc