예제 #1
0
 def test_basic(self):
     assert_allclose(windows.hann(6, sym=False),
                     [0, 0.25, 0.75, 1.0, 0.75, 0.25])
     assert_allclose(windows.hann(7, sym=False),
                     [0, 0.1882550990706332, 0.6112604669781572,
                      0.9504844339512095, 0.9504844339512095,
                      0.6112604669781572, 0.1882550990706332])
     assert_allclose(windows.hann(6, True),
                     [0, 0.3454915028125263, 0.9045084971874737,
                      0.9045084971874737, 0.3454915028125263, 0])
     assert_allclose(windows.hann(7),
                     [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0])
예제 #2
0
 def test_basic(self):
     assert_allclose(windows.hann(6, sym=False),
                     [0, 0.25, 0.75, 1.0, 0.75, 0.25])
     assert_allclose(windows.hann(7, sym=False),
                     [0, 0.1882550990706332, 0.6112604669781572,
                      0.9504844339512095, 0.9504844339512095,
                      0.6112604669781572, 0.1882550990706332])
     assert_allclose(windows.hann(6, True),
                     [0, 0.3454915028125263, 0.9045084971874737,
                      0.9045084971874737, 0.3454915028125263, 0])
     assert_allclose(windows.hann(7),
                     [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0])
예제 #3
0
def nlfer(signal, pitch, parameters):

    #---------------------------------------------------------------
    # Set parameters.
    #---------------------------------------------------------------
    N_f0_min = np.around((parameters['f0_min']*2/float(signal.new_fs))*pitch.nfft)
    N_f0_max = np.around((parameters['f0_max']/float(signal.new_fs))*pitch.nfft)

    window = hann(pitch.frame_size+2)[1:-1]
    data = np.zeros((signal.size))  #Needs other array, otherwise stride and
    data[:] = signal.filtered     #windowing will modify signal.filtered

    #---------------------------------------------------------------
    # Main routine.
    #---------------------------------------------------------------
    samples = np.arange(int(np.fix(float(pitch.frame_size)/2)),
                        signal.size-int(np.fix(float(pitch.frame_size)/2)),
                        pitch.frame_jump)

    data_matrix = np.empty((len(samples), pitch.frame_size))
    data_matrix[:, :] = stride_matrix(data, len(samples),
                                    pitch.frame_size, pitch.frame_jump)
    data_matrix *= window

    specData = np.fft.rfft(data_matrix, pitch.nfft)

    frame_energy = np.abs(specData[:, int(N_f0_min-1):int(N_f0_max)]).sum(axis=1)
    pitch.set_energy(frame_energy, parameters['nlfer_thresh1'])
    pitch.set_frames_pos(samples)
예제 #4
0
def nlfer(signal, pitch, parameters):

    #---------------------------------------------------------------
    # Set parameters.
    #---------------------------------------------------------------
    N_f0_min = np.around(
        (parameters['f0_min'] * 2 / float(signal.new_fs)) * pitch.nfft)
    N_f0_max = np.around(
        (parameters['f0_max'] / float(signal.new_fs)) * pitch.nfft)

    window = hann(pitch.frame_size + 2)[1:-1]
    data = np.zeros((signal.size))  #Needs other array, otherwise stride and
    data[:] = signal.filtered  #windowing will modify signal.filtered

    #---------------------------------------------------------------
    # Main routine.
    #---------------------------------------------------------------
    samples = np.arange(int(np.fix(float(pitch.frame_size) / 2)),
                        signal.size - int(np.fix(float(pitch.frame_size) / 2)),
                        pitch.frame_jump)

    data_matrix = np.empty((len(samples), pitch.frame_size))
    data_matrix[:, :] = stride_matrix(data, len(samples), pitch.frame_size,
                                      pitch.frame_jump)
    data_matrix *= window

    specData = np.fft.rfft(data_matrix, pitch.nfft)

    frame_energy = np.abs(specData[:, int(N_f0_min -
                                          1):int(N_f0_max)]).sum(axis=1)
    pitch.set_energy(frame_energy, parameters['nlfer_thresh1'])
    pitch.set_frames_pos(samples)
예제 #5
0
    def _stft(self, data, inverse: bool = False, length=None):
        """ Single entrypoint for both stft and istft. This computes stft and
        istft with librosa on stereo data. The two channels are processed
        separately and are concatenated together in the result. The expected
        input formats are: (n_samples, 2) for stft and (T, F, 2) for istft.

        :param data:    np.array with either the waveform or the complex
                        spectrogram depending on the parameter inverse
        :param inverse: should a stft or an istft be computed.
        :returns:   Stereo data as numpy array for the transform.
                    The channels are stored in the last dimension.
        """
        assert not (inverse and length is None)
        data = np.asfortranarray(data)
        N = self._params['frame_length']
        H = self._params['frame_step']
        win = hann(N, sym=False)
        fstft = istft if inverse else stft
        win_len_arg = {
            'win_length': None,
            'length': None} if inverse else {'n_fft': N}
        n_channels = data.shape[-1]
        out = []
        for c in range(n_channels):
            d = np.concatenate(
                (np.zeros((N, )), data[:, c], np.zeros((N, )))
                ) if not inverse else data[:, :, c].T
            s = fstft(d, hop_length=H, window=win, center=False, **win_len_arg)
            if inverse:
                s = s[N:N+length]
            s = np.expand_dims(s.T, 2-inverse)
            out.append(s)
        if len(out) == 1:
            return out[0]
        return np.concatenate(out, axis=2-inverse)
예제 #6
0
def _calculate_dow_feature(
    day_of_week_array,
    dow_feature,
):
    """Add day of week and weekend to the features."""
    feature_to_dow_number = {
        constants.MONDAY: 0,
        constants.TUESDAY: 1,
        constants.WEDNESDAY: 2,
        constants.THURSDAY: 3,
        constants.FRIDAY: 4,
        constants.SATURDAY: 5,
        constants.SUNDAY: 6,
    }
    if dow_feature == constants.DAY_OF_WEEK:
        output = day_of_week_array
    elif dow_feature == constants.WEEKEND_DAY:
        output = (day_of_week_array > 4)
    elif dow_feature == constants.DOW_WINDOW:
        # Should help engage more of the GAM kernel at each timestep
        # Keep sym true to have the peak be unique (i.e. 1)
        window = windows.hann(7, sym=True)
        output = np.empty(day_of_week_array.shape, dtype=np.float32)
        for i in range(7):
            output[day_of_week_array == i] = window[i]
    elif dow_feature in feature_to_dow_number:
        output = (day_of_week_array == feature_to_dow_number[dow_feature])
    else:
        raise ValueError(f"{dow_feature} is not a supported DOW feature.")

    return output.astype(np.int32)
예제 #7
0
 def stft(self, data, inverse=False, length=None):
     """
     Single entrypoint for both stft and istft. This computes stft and istft with librosa on stereo data. The two
     channels are processed separately and are concatenated together in the result. The expected input formats are:
     (n_samples, 2) for stft and (T, F, 2) for istft.
     :param data: np.array with either the waveform or the complex spectrogram depending on the parameter inverse
     :param inverse: should a stft or an istft be computed.
     :return: Stereo data as numpy array for the transform. The channels are stored in the last dimension
     """
     assert not (inverse and length is None)
     data = np.asfortranarray(data)
     N = self._params["frame_length"]
     H = self._params["frame_step"]
     win = hann(N, sym=False)
     fstft = istft if inverse else stft
     win_len_arg = {
         "win_length": None,
         "length": length
     } if inverse else {
         "n_fft": N
     }
     dl, dr = (data[:, :,
                    0].T, data[:, :,
                               1].T) if inverse else (data[:, 0], data[:,
                                                                       1])
     s1 = fstft(dl, hop_length=H, window=win, center=False, **win_len_arg)
     s2 = fstft(dr, hop_length=H, window=win, center=False, **win_len_arg)
     s1 = np.expand_dims(s1.T, 2 - inverse)
     s2 = np.expand_dims(s2.T, 2 - inverse)
     return np.concatenate([s1, s2], axis=2 - inverse)
예제 #8
0
def make_spectra(filename,
                 filt=None,
                 n_fft=N_FFT,
                 n_acc=N_ACCUMULATION,
                 skip_n_samples=SKIP_N_SAMPLES,
                 n_sp=N_SPECTRA,
                 window=False):
    filepath = DATA_DIR / filename
    print(f'Process file {filepath}')

    params = parse_parameters(filepath)

    sampling_period = params.sampling_period
    central_freq = params.central_freq
    n_channels = params.n_channels

    n_samples = n_fft * n_acc

    freqs = central_freq + np.fft.fftfreq(n=n_fft, d=sampling_period)
    freqs = np.fft.fftshift(freqs)

    sp = {ch: [] for ch in range(n_channels)}

    generator = take_quadratures(filepath, n_samples, n_channels,
                                 skip_n_samples)
    for i, quads_dict in enumerate(generator):
        for ch, quads in quads_dict.items():
            new_quads = quads.reshape((n_acc, n_fft))
            if filt is not None:
                x, new_quads = filt(new_quads)

                n_dropped = quads.size - new_quads.size
                print(f'[{i+1}/{n_sp}] '
                      f'Number of dropped samples: {n_dropped} '
                      f'({n_dropped * 100 / n_samples:.2f} %)')
            else:
                x = np.arange(n_acc)

            if new_quads.size == 0:
                continue

            if window:
                new_quads *= hann(n_fft)
            fft = np.fft.fftshift(np.fft.fft(new_quads, axis=1))

            pws = calc_power_spectra(x, fft)
            sp[ch].append(pws)
            # plt.plot(freqs, pws)
            # plt.title(f'Channel {ch}')
            # plt.show()

        if i + 1 == n_sp:
            break

    print('Processing finished')

    arr_ch1 = np.array(sp[0])
    arr_ch2 = np.array(sp[1]) if len(sp) > 1 else None

    return params, freqs, arr_ch1, arr_ch2
def _stft(data, inverse=False, length=None):
    """
    Single entrypoint for both stft and istft. This computes stft and istft with librosa on stereo data. The two
    channels are processed separately and are concatenated together in the result. The expected input formats are:
    (n_samples, 2) for stft and (T, F, 2) for istft.
    :param data: np.array with either the waveform or the complex spectrogram depending on the parameter inverse
    :param inverse: should a stft or an istft be computed.
    :return: Stereo data as numpy array for the transform. The channels are stored in the last dimension
    """
    assert not (inverse and length is None)
    data = np.asfortranarray(data)
    N = 4096
    H = 1024
    win = hann(N, sym=False)
    fstft = istft if inverse else stft
    win_len_arg = {
        "win_length": None,
        "length": length
    } if inverse else {
        "n_fft": N
    }
    n_channels = data.shape[-1]
    out = []
    for c in range(n_channels):
        d = data[:, :, c].T if inverse else data[:, c]
        s = fstft(d, hop_length=H, window=win, center=False, **win_len_arg)
        s = np.expand_dims(s.T, 2 - inverse)
        out.append(s)
    if len(out) == 1:
        return out[0]
    return np.concatenate(out, axis=2 - inverse)
예제 #10
0
    def test_extremes(self):
        # Test extremes of alpha correspond to boxcar and hann
        tuk0 = windows.tukey(100, 0)
        box0 = windows.boxcar(100)
        assert_array_almost_equal(tuk0, box0)

        tuk1 = windows.tukey(100, 1)
        han1 = windows.hann(100)
        assert_array_almost_equal(tuk1, han1)
예제 #11
0
    def test_extremes(self):
        # Test extremes of alpha correspond to boxcar and hann
        tuk0 = windows.tukey(100, 0)
        box0 = windows.boxcar(100)
        assert_array_almost_equal(tuk0, box0)

        tuk1 = windows.tukey(100, 1)
        han1 = windows.hann(100)
        assert_array_almost_equal(tuk1, han1)
예제 #12
0
def smooth(data, window_type='hann', filter_width=11, sigma=2, plot_on=1):
    """
    Smooth 1d data with moving window (uses filtfilt to have zero phase distortion)
    Wrapper for scipy.signal.filtfilt
    To do: consider replacing with sosfiltfilt

    Inputs:
        data: numpy array
        window_type ('hann'): string ('boxcar', 'gaussian', 'hann', 'bartlett', 'blackman')
        filter_width (11): int (wider is more smooth) odd is ideal
        sigma (2.): scalar std deviation only used for gaussian
        plot_on (1): int determines plotting. 0 none, 1 plot signal, 2: also plot filter
    Outputs
        data_smoothed: signal after being smoothed
        filter_window: the window used for smoothing

    Notes:
        Uses gustaffson's method to handle edge artifacts
        Currently accepted window_type options:
            hann (default) - cosine bump filter_width is only param
            blackman - more narrowly peaked bump than hann
            boxcar - flat-top of length filter_width
            bartlett - triangle
            gaussian - sigma determines width

    """
    if window_type == 'boxcar':
        filter_window = windows.boxcar(filter_width)
    elif window_type == 'hann':
        filter_window = windows.hann(filter_width)
    elif window_type == 'bartlett':
        filter_window = windows.bartlett(filter_width)
    elif window_type == 'blackman':
        filter_window = windows.blackman(filter_width)
    elif window_type == 'gaussian':
        filter_window = windows.gaussian(filter_width, sigma)
    filter_window = filter_window / np.sum(filter_window)
    data_smoothed = signal.filtfilt(filter_window, 1, data,
                                    method="gust")  # pad

    if plot_on:
        if plot_on > 1:
            plt.plot(filter_window)
            plt.title(f'{window_type} filter')
        plt.figure('signal', figsize=(10, 5))
        plt.plot(data,
                 color=(0.7, 0.7, 0.7),
                 label='noisy signal',
                 linewidth=1)
        plt.plot(data_smoothed, color='r', label='smoothed signal')
        plt.xlim(0, len(data_smoothed))
        plt.xlabel('sample')
        plt.grid(True)
        plt.legend()

    return data_smoothed, filter_window
예제 #13
0
def apply_hann_window(data):
    ''' apply hanning window to the data. only first 10% and last 10% are applied
        the window is only applied to the data where is n
    '''
    ten_percent = int(data.shape[0] * 0.1)
    win = list(hann(ten_percent * 2))
    win = np.array(win[0:int(len(win) / 2)] + [1] * int(data.shape[0] * 0.8) +
                   win[int(len(win) / 2)::])
    windowed_data = data * win
    return windowed_data
예제 #14
0
def fft_wave_data(samplerate, data, NFFT=16384 * 2):
    w = hann(data.shape[0])
    yf = fft(data * w, NFFT)
    xf = fftfreq(NFFT, 1 / NFFT)[0:NFFT // 2] * samplerate / NFFT
    yf_amplitude = 2. / NFFT * np.abs(yf[0:NFFT // 2])

    p0 = 0.00002
    levels = 20 * np.log10(yf_amplitude / p0)

    return xf, levels
예제 #15
0
def signal_ramp(n, percent):
    if percent > 49: percent = 49
    length = int(numpy.floor((n * percent) / 100))
    window = hann(length * 2 + 1)
    window = window - numpy.min(window)
    window = window / numpy.max(window)
    left = window[0:length + 1]
    right = window[length:]
    buffer = numpy.ones(n - 2 * left.size)
    total = numpy.hstack((left, buffer, right))
    return total
예제 #16
0
def FFT_para():
    try:
        aa = request.get_data()
        bb = aa.decode('UTF-8')
        cc = json.loads(bb)

        Json_Samp = cc["Samp"]
        Json_Sz = cc["Sz"]
        Json_rows = cc["rows"]
        Json_Overlap = cc["OLR"]

        Norm_rows = json_normalize(Json_rows)
        df_lst = Norm_rows.columns.tolist()
        df_lst.remove('timestamp')
        df_lst.insert(0, 'timestamp')
        tmp_df_1 = Norm_rows[df_lst]
        tmp_df_2 = tmp_df_1.dropna(how='any').dropna(how='all', axis=1)
        df = tmp_df_2.drop('timestamp', axis=1)

        dt = 1 / Json_Samp  # サンプリング間隔
        M = Json_Sz  #FFTサイズ
        win = hann(M)  #窓関数
        acf = 1 / (sum(win) / M)
        overlap = Json_Overlap  #オーバラップ
        overlap_samples = int(round(M * overlap))  # overlap in samples

        # 周波数スケール作成
        freq = fftfreq(M, dt)[1:int(M // 2.56)]

        df_FFT = pd.DataFrame({'freq': freq})
        collist = list(df.columns)

        for colname in collist:
            x = df[colname].values
            #振幅
            t, f, S = stft(x,
                           fs=Json_Samp,
                           window=win,
                           nperseg=M,
                           noverlap=overlap_samples,
                           padded=False,
                           return_onesided=False,
                           boundary=None)
            SS = acf * np.abs(S)
            avg_SS = np.mean(SS, axis=1)
            SSS = pd.Series(data=avg_SS[1:int(M // 2.56)],
                            name=colname,
                            dtype='float')
            df_FFT = pd.concat([df_FFT, SSS], axis=1)
        FFT = df_FFT.to_json(orient='table')
        return FFT
    except:
        return "error"
예제 #17
0
 def _rms(self, signal):
     window = hann(self.window_size)
     rms_signal = []
     for i in range(0, len(signal) - self.window_size + 1, self.shift):
         subsignal = signal[i:i + self.window_size] * window
         rms_signal.append(np.sqrt(np.mean(subsignal**2)))
     rms_signal = np.array(rms_signal)
     rms_signal = np.interp(
         list(range(len(signal))),
         list(range(0,
                    len(signal) - self.window_size + 1, self.shift)),
         rms_signal)
     return rms_signal
예제 #18
0
def fft_wave(filename):
    ''' file format should be PCM or IEEE format. if not, using such as Audacity to convert to PCM.
    '''
    samplerate, data_1 = wavfile.read(filename)
    NFFT = 16384 * 2
    w = hann(data_1.shape[0])
    yf = fft(data_1 * w, NFFT)
    xf = fftfreq(NFFT, 1 / NFFT)[0:NFFT // 2] * samplerate / NFFT
    yf_amplitude = 2. / NFFT * np.abs(yf[0:NFFT // 2])

    p0 = 0.00002
    levels = 20 * np.log10(yf_amplitude / p0)
    return xf, levels
예제 #19
0
파일: decode.py 프로젝트: sys41x4/sstv
    def _peak_fft_freq(self, data):
        """Finds the peak frequency from a section of audio data"""

        windowed_data = data * hann(len(data))
        fft = np.abs(np.fft.rfft(windowed_data))

        # Get index of bin with highest magnitude
        x = np.argmax(fft)
        # Interpolated peak frequency
        peak = barycentric_peak_interp(fft, x)

        # Return frequency in hz
        return peak * self._sample_rate / len(windowed_data)
예제 #20
0
파일: LIA.py 프로젝트: edmundsj/liapy
    def modulate(self,
                 data,
                 modulation_frequency,
                 sync_phase_delay=pi,
                 window='hann'):
        """
        Modulates data with a sinusoid of known frequency.
        Returns data with the correct mean, but higher total signal power,
        by about a factor of 1.22. Recommended not to use directly except
        in boxcar mode for this reason.

        :param data: The data which you want modulated
        :param modulation_frequency: The desired frequency at which to modulate the signal (this is the expected signal frequency)
        :param sync_phase_delay: The phase of the synchronization points on a sin(x) signal, from 0-2pi
        """
        if window == 'hann':
            hann_window = hann(len(data))
            hann_mean = np.mean(hann_window)
            hann_normalized = hann_window / hann_mean
            window_data = hann_normalized
        elif window == 'box' or window == 'boxcar':
            window_data = 1
        else:
            raise ValueError('Window {window} not implemented. Choices ' +\
                             'are hann and boxcar')

        if isinstance(data, pd.DataFrame):
            sync_indices = np.nonzero(data['Sync'].values)[0]
            times = column_from_unit(data, ureg.s)
            time_phase_delay = times[sync_indices[0]]* \
                modulation_frequency * 2 * np.pi
            total_phase_delay = time_phase_delay - sync_phase_delay

            modulation_signal = sqrt(2) * np.sin(
                2 * pi * modulation_frequency * times - total_phase_delay)

            modulation_signal -= np.mean(modulation_signal)

            # This compensates for the offset of our sample points compared to the maxima of the sinewave - they have less power than they *should* as continuous-time signals
            squared_mean = np.mean(np.square(modulation_signal))
            modulation_signal /= squared_mean
            new_data = data.copy()
            if isinstance(modulation_signal, pint.Quantity):
                modulation_signal = modulation_signal.magnitude
            new_data.iloc[:, 1] *= modulation_signal
            return new_data
        else:
            # THIS DOES NOT CURRENTLY WORK. NEEDS TO BE FIXED.
            raise ValueError(
                'numpy arrays not currently supported. Just mimic the pandas stuff.'
            )
예제 #21
0
def spec_calc(audio_inp, params):
    """
	Calculates the framewise cepstral coefficients for the true envelope of the audio file.

	Parameters
	----------
	audio_inp : np.array
		Numpy array containing the audio signal, in the time domain 
	params : dict
		Parameter dictionary for the sine model) containing the following keys
			- fs : Sampling rate of the audio
			- W : Window size(number of frames)
			- N : FFT size(multiple of 2)
			- H : Hop size
			- t : Threshold for sinusoidal detection in dB
			- maxnSines : Number of sinusoids to detect
	factor : float
		Shift factor for the pitch. New pitch = f * (old pitch)
	choice : 0,1,2
		If 0, simply shifts the pitch without amplitude interpolation
		If 1, performs amplitude interpolation framewise to preserve timbre
		If 2, uses the True envelope of the amplitude spectrum to sample the points from
	choice_recon : 0 or 1
		If 0, returns only the sinusoidal reconstruction
		If 1, adds the original residue as well to the sinusoidal
	f0 : Hz
		The fundamental frequency of the note
		
	Returns
	-------
	audio_transformed : np.array
	    Returns the transformed signal in the time domain
	"""

    fs = params['fs']
    W = params['W']
    N = params['N']
    H = params['H']
    t = params['t']

    w = windows.hann(W)

    # Compute the STFT
    xmX, xpX = stftAnal(x=audio_inp, w=w, N=N, H=H)
    # xmX = stft_for_reconstruction(x = audio_inp, fft_size = N, hopsamp = H)
    # Remove the dB normalization done in the above function
    xmX = xmX / 20

    return xmX, xpX
예제 #22
0
def fft_wave_data(samplerate, data, NFFT=16384 * 2):
    ''' the input are as follows: 
        samplerate, data = wavfile.read(filename)
    '''
    w = hann(
        data.shape[0]
    )  # Adding hann window may have some effect if the signal is short and impulsive
    yf = fft(data * w, NFFT)
    # yf = fft(data,  NFFT)
    xf = fftfreq(NFFT, 1 / NFFT)[0:NFFT // 2] * samplerate / NFFT
    yf_amplitude = 2. / NFFT * np.abs(yf[0:NFFT // 2])

    p0 = 0.00002
    levels = 20 * np.log10(yf_amplitude / p0)

    return xf, levels
예제 #23
0
    def process(self, data):
        data = deepcopy(data)

        winData = data.data
        if self.windowType == FourierTransform.WindowTypes.Hann:
            winData *= windows.hann(len(winData), sym=False)
        elif self.windowType == FourierTransform.WindowTypes.Blackman:
            winData *= windows.blackman(len(winData), sym=False)
        elif self.windowType == FourierTransform.WindowTypes.Flattop:
            winData *= windows.flattop(len(winData), sym=False)
        elif self.windowType == FourierTransform.WindowTypes.Tukey:
            winData *= windows.tukey(len(winData), sym=False, alpha=self.alpha)

        data.data = np.fft.rfft(winData, axis=0, norm='ortho')
        data.axes[0] = np.fft.rfftfreq(len(data.axes[0]),
                                       np.mean(np.diff(data.axes[0])))
        return data
예제 #24
0
def GetLPCresidual(wave,L,shift,order,VUV):

    # ###
    #
    # Use: [res] = GetLPCresidual(wave,L,shift,VUV)
    #
    #
    # L=window length (samples) (typ.25ms)
    # shift=window shift (samples) (typ.5ms)
    # order= LPC order
    # VUV=vector of voicing decisions (=0 if Unvoiced, =1 if Voiced)
    #
    # Written originally by Thomas Drugman, TCTS Lab.
    #
    #Adapated to python by
    # J. C. Vasquez-Correa
    # Pattern recognition Lab, University of Erlangen-Nuremberg
    # Faculty of Enginerring, University of Antiqouia,
    # ###

    ## My Bit!!
    # use: L = 25/1000*fs # 25 ms frame length
    #     shift = 5/1000*fs # 5 ms shift
    #     order = 24

    start=0
    stop=int(start+L)
    res=np.zeros(len(wave))
    n=0
    while stop<len(wave):

        if np.sum(VUV[start:stop])==len(VUV[start:stop]): # if it is avoiced segment
            segment=wave[start:stop]
            segment=segment*hann(len(segment))
            try:
                A=pysptk.sptk.lpc(segment, order)
                inv=filtfilt(A,1,segment)
                inv=inv*np.sqrt(np.sum(segment**2)/np.sum(inv**2))
                res[start:stop]=inv
            except:
                print("WARNING: LPCs cannot be extracted for the segment")
        start=int(start+shift)
        stop=int(stop+shift)
        n=n+1
    res=res/max(abs(res))
    return res
예제 #25
0
def get_stft(signal_, w_, fft_len_, overlap_):
    '''
    signal is a trace
    w_: window length
    fft_len: length of stft
    overlap_: window overlap
    '''
    if fft_len_ / w_ < 2:
        fft_len_ = w_ * 2
        print('change fft length')
    dt = signal_.stats.delta
    fs = 1 / dt
    window_s = int(fs * w_)
    n = np.arange(window_s)
    t_n = np.arange(-int(window_s / 2), int(window_s / 2))
    ovlp = int(overlap_ * window_s)
    dn = window_s - ovlp
    h = hann(window_s)

    fft_npts_ = np.int(fft_len_ / dt)

    signal = signal_.data

    t_l = len(signal)
    time = np.arange(0, t_l * dt, dt)
    pos = np.arange(0, t_l - window_s, dn)
    time_new = time[pos]
    dt_new = time_new[1]
    pos_l = len(pos)

    freq = np.fft.fftfreq(fft_npts_, dt)
    S = np.zeros([fft_npts_, pos_l], dtype=np.complex64)
    S_del = np.zeros([fft_npts_, pos_l], dtype=np.complex64)

    norm_h = np.linalg.norm(h, ord=2)

    for i, j in enumerate(pos):

        S[window_s + n, i] = signal[j + n] * h / norm_h
        S_del[window_s + n, i] = signal[j + n - 1] * h / norm_h

    F_S = np.fft.fft(S, axis=0)
    F_S_del = np.fft.fft(S_del, axis=0)
    F_S_f_del = np.roll(F_S, 1, axis=0)

    return F_S, F_S_del, F_S_f_del, freq, time_new
예제 #26
0
파일: lc.py 프로젝트: soheilbr82/neurodsp
def lagged_coherence_1freq(sig, fs, freq, n_cycles):
    """Compute the lagged coherence of a frequency using the hanning-taper FFT method.

    Parameters
    ----------
    sig : 1d array
        Time series.
    fs : float
        Sampling rate, in Hz.
    freq : float
        The frequency at which to estimate lagged coherence.
    n_cycles : float
        Number of cycles at the examined frequency to use to compute lagged coherence.

    Returns
    -------
    float
        The computed lagged coherence value.
    """

    # Determine number of samples to be used in each window to compute lagged coherence
    n_samps = int(np.ceil(n_cycles * fs / freq))

    # Split the signal into chunks
    chunks = split_signal(sig, n_samps)
    n_chunks = len(chunks)

    # For each chunk, calculate the Fourier coefficients at the frequency of interest
    hann_window = hann(n_samps)
    fft_freqs = np.fft.fftfreq(n_samps, 1 / float(fs))
    fft_freqs_idx = np.argmin(np.abs(fft_freqs - freq))

    fft_coefs = np.zeros(n_chunks, dtype=complex)
    for ind, chunk in enumerate(chunks):
        fourier_coef = np.fft.fft(chunk * hann_window)
        fft_coefs[ind] = fourier_coef[fft_freqs_idx]

    # Compute the lagged coherence value
    lcs_num = 0
    for ind in range(n_chunks - 1):
        lcs_num += fft_coefs[ind] * np.conj(fft_coefs[ind + 1])
    lcs_denom = np.sqrt(
        np.sum(np.abs(fft_coefs[:-1])**2) * np.sum(np.abs(fft_coefs[1:])**2))

    return np.abs(lcs_num / lcs_denom)
예제 #27
0
def one_ping(rcr_range, rcr_depth, rcr_bearing):
    """one second of simulated data, single ping at specified range and bearing
    """
    # time axis of simulated recorded signal
    taxis = np.arange(fs) / fs  # one second of data
    start_i = 300  # small offset for plotting purposes

    # each receiver will have a slighly different coordinates based on bearing and
    # distance
    dx = rcr_range + array_position * sin(radians(rcr_bearing))
    dy = array_position * cos(radians(rcr_bearing))
    rr = np.sqrt(dx ** 2 + dy ** 2)

    # simulate first 3 in-plane arrivals
    r_dir = np.sqrt(rr ** 2 + (rcr_depth - source_depth) ** 2)
    r_surf = np.sqrt(rr ** 2 + (rcr_depth + source_depth) ** 2)
    r_bottom = np.sqrt(rr ** 2 + (2 * channel_depth - source_depth) ** 2)

    # time of arrival
    time_dir = r_dir / c
    time_surf = r_surf / c
    time_bottom = r_bottom / c

    # transmitted signal
    tsig = np.arange(np.ceil(T * fs)) / fs
    sig_xmitt = np.sin(2 * pi * fc * tsig)
    sig_xmitt *= hann(tsig.size)
    # create an interpolator, this allows for sub sample timing of arrivals
    sig_ier = interp1d(tsig,
                       sig_xmitt,
                       kind=3,
                       bounds_error=False,
                       fill_value=0.)

    # create time series as a sum of all arrivals
    x_sig = np.zeros((num_channels, taxis.size), dtype=np.float_)
    x_sig += sig_ier(taxis - time_dir[:, None]) / r_dir[:, None]
    x_sig -= sig_ier(taxis - time_surf[:, None]) / r_surf[:, None]
    x_sig -= sig_ier(taxis - time_bottom[:, None]) / r_bottom[:, None]

    # create some noise on all channels
    x_sig += np.random.randn(*x_sig.shape) * np.sqrt(noise_level)
    return x_sig
예제 #28
0
    def stft(self, wave):
        result = []

        # 按列为主序存储,也就是按通道为主序
        wave = np.asfortranarray(wave)
        window = hann(self.frame_length, sym=False)
        channels = wave.shape[-1]
        for c in range(channels):
            data = wave[..., c]
            spectrogram = stft(data,
                               n_fft=self.frame_length,
                               hop_length=self.frame_step,
                               window=window,
                               center=False)
            spectrogram = np.expand_dims(spectrogram.T, axis=-1)
            result.append(spectrogram)

        result = np.concatenate(result, axis=-1)
        return result
예제 #29
0
def make_cepts2(X, T_pi):
    """Calculate the squared real cepstral coefficents."""
    Y = F.unfold(X, kernel_size=[T_pi, 1], stride=T_pi)
    Y = torch.transpose(Y, 1, 2)

    # Compute the power spectral density
    window = torch.Tensor(hann(Y.shape[-1])[np.newaxis, np.newaxis]).type(Y.dtype)
    Yf = torch.rfft(Y * window, 1, onesided=True)
    spect = Yf[:, :, :, 0]**2 + Yf[:, :, :, 1]**2
    spect = spect.mean(dim=1)
    spect = torch.cat([torch.flip(spect[:, 1:], dims=(1,)), spect], dim=1)

    # Log of the DFT of the autocorrelation
    logspect = torch.log(spect) - np.log(float(Y.shape[-1]))

    # Compute squared cepstral coefs (b_k^2)
    cepts = torch.rfft(logspect, 1, onesided=True) / float(Y.shape[-1])
    cepts = torch.sqrt(cepts[:, :, 0]**2 + cepts[:, :, 1]**2)
    return cepts**2
예제 #30
0
    def istft(self, spectrogram, length):
        result = []

        # 按列为主序存储,也就是按通道为主序
        spectrogram = np.asfortranarray(spectrogram)
        window = hann(self.frame_length, sym=False)
        channels = spectrogram.shape[-1]
        for c in range(channels):
            data = spectrogram[..., c].T
            wave = istft(data,
                         hop_length=self.frame_step,
                         window=window,
                         center=False,
                         length=length)
            wave = np.expand_dims(wave.T, axis=1)
            result.append(wave)

        result = np.concatenate(result, axis=-1)

        return result
예제 #31
0
def sinc_pulse(timebandwidth, flip_angle, duration, dt, gamma=26747.52):
    """
    Generates an rf pulse with specified tbw, duration, and dt.

    Input:
    timebandwidth: Timebandwidth of desired rf pulse
    flip_angle: Flip angle of desired pulse
    duration: RF pulse duration in seconds.
    dt: dt value for rf samples
    gamma: Gamma value. Defaults to 2 * pi * 4257(hydrogren default in radians)

    Output:
    rf: Sinc pulse rf signal
    """
    samples = duration / dt
    theta = np.linspace(-timebandwidth/2, timebandwidth/2, samples+2)
    rf = np.sinc(theta[1:-1]) * hann(samples)
    rf = flip_angle * (rf/np.sum(rf))
    rf /= (gamma * dt)
    return rf
예제 #32
0
def _lagged_coherence_1freq(sig, freq, fs, n_cycles=3, warn=False):
    """Calculate lagged coherence of sig at frequency freq using the hanning-taper FFT method"""

    # Determine number of samples to be used in each window to compute lagged coherence
    n_samps = int(np.ceil(n_cycles * fs / freq))

    # For each N-cycle chunk, calculate the fourier coefficient at the frequency of interest, freq
    chunks = _nonoverlapping_chunks(sig, n_samps)
    chunks_len = len(chunks)

    if chunks_len < 2:
        if warn:
            print(
                '_lagged_coherence_1freq warning: need longer signal relative to frequency',
                'n_samps:', n_samps, 'len(sig)', len(sig), 'chunks_len:',
                chunks_len, 'freq:', freq)
        return -1.0

    hann_window = hann(n_samps)
    fourier_f = np.fft.fftfreq(n_samps, 1 / float(fs))
    fourier_f_idx = np.argmin(np.abs(fourier_f - freq))
    fourier_coefsoi = np.zeros(chunks_len, dtype=complex)

    for ind, chunk in enumerate(chunks):
        fourier_coef = np.fft.fft(chunk * hann_window)
        fourier_coefsoi[ind] = fourier_coef[fourier_f_idx]

    # Compute the lagged coherence value
    lcs_num = 0
    for ind in range(chunks_len - 1):
        lcs_num += fourier_coefsoi[ind] * np.conj(fourier_coefsoi[ind + 1])
    lcs_denom = np.sqrt(
        np.sum(np.abs(fourier_coefsoi[:-1])**2) *
        np.sum(np.abs(fourier_coefsoi[1:])**2))

    if lcs_denom <= 0.0:
        if warn: print('_lagged_coherence_1freq warning: lcs_denom <= 0.0')
        return -1.0  # invalid value
    else:
        return np.abs(lcs_num / lcs_denom)  # good values
예제 #33
0
def get_fft_window(window_type, window_length):
    # Generate the window with the right number of points
    window = None
    if window_type == "Bartlett":
        window = windows.bartlett(window_length)
    if window_type == "Blackman":
        window = windows.blackman(window_length)
    if window_type == "Blackman Harris":
        window = windows.blackmanharris(window_length)
    if window_type == "Flat Top":
        window = windows.flattop(window_length)
    if window_type == "Hamming":
        window = windows.hamming(window_length)
    if window_type == "Hanning":
        window = windows.hann(window_length)

    # If no window matched, use a rectangular window
    if window is None:
        window = np.ones(window_length)

    # Return the window
    return window