コード例 #1
0
def autocorrel(signal, tmax, dt):
    """
    argument : signal (np.array), tmax and dt (float)
    tmax, is the maximum length of the autocorrelation that we want to see
    returns : autocorrel (np.array), time_shift (np.array)
    take a signal of time sampling dt, and returns its autocorrelation
     function between [0,tstop] (normalized) !!
    """
    steps = int(tmax / dt)  # number of steps to sum on
    signal = (signal - signal.mean()) / signal.std()
    cr = np.correlate(signal[steps:], signal) / steps
    time_shift = np.arange(len(cr)) * dt
    return cr / cr.max(), time_shift
コード例 #2
0
ファイル: data_generator.py プロジェクト: templeblock/latest
def generate_feat_opts(path=None,
                       cfg={
                           'pkg': 'pysp',
                           'type': 'logfbank',
                           'nfilt': 40,
                           'delta': 2
                       },
                       signal=None,
                       rate=16000):
    cfg = dict(cfg)
    if cfg['pkg'] == 'pysp':  # python_speech_features #
        if signal is None:
            rate, signal = wavfile.read(path)

        if cfg['type'] == 'logfbank':
            feat_mat = pyspfeat.base.logfbank(signal,
                                              rate,
                                              nfilt=cfg.get('nfilt', 40))
        elif cfg['type'] == 'mfcc':
            feat_mat = pyspfeat.base.mfcc(signal,
                                          rate,
                                          numcep=cfg.get('nfilt', 26) // 2,
                                          nfilt=cfg.get('nfilt', 26))
        elif cfg['type'] == 'wav':
            feat_mat = pyspfeat.base.sigproc.framesig(
                signal,
                frame_len=cfg.get('frame_len', 400),
                frame_step=cfg.get('frame_step', 160))
        else:
            raise NotImplementedError(
                "feature type {} is not implemented/available".format(
                    cfg['type']))
            pass
        # delta #
        comb_feat_mat = [feat_mat]
        delta = cfg['delta']
        if delta > 0:
            delta_feat_mat = pyspfeat.base.delta(feat_mat, 2)
            comb_feat_mat.append(delta_feat_mat)
        if delta > 1:
            delta2_feat_mat = pyspfeat.base.delta(delta_feat_mat, 2)
            comb_feat_mat.append(delta2_feat_mat)
        if delta > 2:
            raise NotImplementedError(
                "max delta is 2, larger than 2 is not normal setting")
        return np.hstack(comb_feat_mat)
    elif cfg['pkg'] == 'rosa':
        if signal is None:
            signal, rate = librosa.core.load(path, sr=cfg['sample_rate'])

        assert rate == cfg[
            'sample_rate'], "sample rate is different with current data"

        if cfg.get('preemphasis', None) is not None:
            # signal = np.append(signal[0], signal[1:] - cfg['preemphasis']*signal[:-1])
            signal = signal_util.preemphasis(x, self.cfg['preemphasis'])

        if cfg.get('pre', None) == 'meanstd':
            signal = (signal - signal.mean()) / signal.std()
        elif cfg.get('pre', None) == 'norm':
            signal = (signal - signal.min()) / (signal.max() -
                                                signal.min()) * 2 - 1

        # raw feature
        if cfg['type'] == 'wav':
            if cfg.get('post', None) == 'mu':
                signal = linear2mu(signal)

            feat_mat = pyspfeat.base.sigproc.framesig(
                signal,
                frame_len=cfg.get('frame_len', 400),
                frame_step=cfg.get('frame_step', 160))
            return feat_mat
        # spectrogram-based feature
        raw_spec = signal_util.rosa_spectrogram(
            signal,
            n_fft=cfg['nfft'],
            hop_length=cfg.get('winstep', None),
            win_length=cfg.get('winlen', None))[0]
        if cfg['type'] in ['logmelfbank', 'melfbank']:
            mel_spec = signal_util.rosa_spec2mel(raw_spec, nfilt=cfg['nfilt'])
            if cfg['type'] == 'logmelfbank':
                return np.log(mel_spec)
            else:
                return mel_spec
        elif cfg['type'] == 'lograwfbank':
            return np.log(raw_spec)
        elif cfg['type'] == 'rawfbank':
            return raw_spec
        else:
            raise NotImplementedError()
    elif cfg['pkg'] == 'taco':
        # SPECIAL FOR TACOTRON #
        tacohelper = TacotronHelper(cfg)
        if signal is None:
            signal = tacohelper.load_wav(path)

        assert len(signal) != 0, ('file {} is empty'.format(path))

        try:
            if cfg['type'] == 'raw':
                feat = tacohelper.spectrogram(signal).T
            elif cfg['type'] == 'mel':
                feat = tacohelper.melspectrogram(signal).T
            else:
                raise NotImplementedError()
        except:
            import ipdb
            ipdb.set_trace()
            pass
        return feat
    elif cfg['pkg'] == 'world':
        if path is None:
            with tempfile.NamedTemporaryFile() as tmpfile:
                wavfile.write(tmpfile.name, rate, signal)
                logf0, bap, mgc = world_vocoder_util.world_analysis(
                    tmpfile.name, cfg['mcep'])
        else:
            logf0, bap, mgc = world_vocoder_util.world_analysis(
                path, cfg['mcep'])

        vuv, f0, bap, mgc = world_vocoder_util.world2feat(logf0, bap, mgc)

        # ignore delta, avoid curse of dimensionality #
        return vuv, f0, bap, mgc
    else:
        raise NotImplementedError()
        pass
コード例 #3
0
def dom_wn(signal, input_smooth, output_smooth, hann_width_z):
    """Calculation of dominant wavelength at every longitude

    INPUT:
        signal (array like):
            Input signal array
        input_smooth (boolean):
            If True, the input signal is smoothed to wavenumbers 0-20 before we do the wavelet analysis
        output_smooth (boolean):
            If True, the dominant wavenumber series is smoothed with a Hann window of hann_width before we output it
        hann_width (integer):
            Number of grid points in longitude for the smoothing (Hann window width). 

    OUTPUT:
        dom_wavenumber (array like) :
            Dominant wavenumber in every longitude of the input signal.
    """
    rectify = True  # If we don't use the rectification technique there is a bias toward low wavenumbers.
    kmin = 0  
    kmax = 20
    mother = Morlet(6)          # Morlet mother wavelet with wavenumber=6
    sigma = 0.7
    ap = 3                      # append the signal 2 times
    std = signal.std()                      # Standard deviation
    std2 = std ** 2                      # Variance
    if input_smooth:
        signal = wnedit(signal, kmin, kmax) # Edit function to only contain the frequencies we are interested in 
    var_temp = append(signal,signal,0) 
    var = append(var_temp,signal,0) 
    N = var.size     
    
    # Which scales (wavenumbers) to resolve with CWT
    dx = 0.1
    s0 = 2 * dx                          # Smallest resolvable scale (largest wavenumber)
    dj = 0.02                            # Determines scale resolution
    J = 6 / dj                           # Largest resolvable scale. Determines total number of scales

    wave, scales, freqs, coi, fft, fftfreqs = cwt(var, ap, sigma, dx, dj, s0, J, mother)
    wnumber = 2*pi*freqs
    power = (abs(wave)) ** 2 /std2           # Normalized wavelet power spectrum
    if rectify:
        power = power / (scales[:,None])

    max_wn = zeros(N)
    max_wn_smooth = zeros(N)



    for l in range(0,N):
        wnu = argmax(power[:,l])        # array index of dominant wavenumber in this latitude
        max_wn[l] = wnumber[wnu]        # the dominant wavenumber in this latitude

	
	# remove padding added for wavelets #
    limit1 = int(N/3)
    limit2 = int(2*N/3)
    max_wn = max_wn[limit1:limit2]

	# smooth along the zonal with circular comvolution (periodic b'ries) #
    if output_smooth:
        
        hann_zon = hann(hann_width_z)
        max_wn_smooth = ndimage.convolve(max_wn, hann_zon, mode='wrap')/sum(hann_zon)

    if output_smooth:
        return max_wn_smooth
    else:
        return max_wn
コード例 #4
0
 def add_noise(self, signal, relative_noise_power=0.05, seed=0):
     noise_std = signal.std() * relative_noise_power
     random_state = np.random.RandomState(seed)
     return signal + random_state.randn(*signal.shape) * noise_std
コード例 #5
0
def main_examples(ntrains):

    alphas, threshes, ks, snrs, freqs, t, base_frequency, \
      train_freq_hists, example_trains, example_signals, waves, peaks = get_results(ntrains)

    # remove the alpha=0.5, doesn't really add anything
    alphas = alphas[1:]
    waves = waves[1:, ...]
    peaks = peaks[1:, ...]
    example_trains = example_trains[1:, ...]
    example_signals = example_signals[1:, ...]
    train_freq_hists = train_freq_hists[1:, ...]

    nalpha, nsnr, ntrains, nthresh, nfreq = peaks.shape
    nks = len(ks)
    log2_freqs = np.log2(freqs)

    np.random.seed(2002171330)
    bootci_kwargs = dict(statfunc=lambda _x: np.nanmean(_x, axis=0),
                         alpha=0.05,
                         n_samples=1000)

    snr_example_idxs = [0]
    for s in [0.1, 0.3, 1, 3]:
        snr_example_idxs.append(np.argmin(np.abs(snrs - s)))

    log2_freq_edges = utils.make_edges(log2_freqs)
    freq_ticks = [1, 2, 4, 8, 16, 32, 64]
    freq_ticklabels = freq_ticks

    wave_kwargs = [
        dict(facecolor=c, edgecolor=c, alpha=0.5, zorder=100 - ci)
        for ci, c in enumerate(['0', '0.3', '0.5'])
    ]
    peak_kwargs = [
        dict(facecolor=c, edgecolor=c, alpha=0.5, zorder=90 - ci) for ci, c in
        enumerate(plt.rcParams['axes.prop_cycle'].by_key()['color'][:nthresh])
    ]

    # axes widths and x-positions (indexed from left)
    left_margin = 0.5
    right_margin = 0.1
    column_margin = 0.15
    width_ratios = [left_margin] + reduce(
        (lambda a, b: a + [column_margin] + b),
        [[1]] * nalpha) + [right_margin]
    xs, ws = ratios_to_pos_and_size(width_ratios)

    # axes heights and y-positions (indexed from bottom)
    top_margin = 0.8
    bottom_margin = 0.6
    height_ratios = [[1.0]] * len(snr_example_idxs) + [[1.5], [0.5], [0.5]]
    height_ratios = [bottom_margin] + reduce(
        (lambda a, b: a + [0.1] + b), height_ratios) + [top_margin]
    height_ratios[-3] = 0.8
    height_ratios[-5] = 0.6
    height_ratios[-7] = 1.1
    ys, hs = ratios_to_pos_and_size(height_ratios)

    fig = plt.figure(figsize=(9, 12))
    fig.text(0.5,
             0.98,
             'Exploration of noise and irregularity',
             fontsize=16,
             ha='center',
             va='center')
    for ai, alpha in enumerate(alphas):

        alpha_value_label = f'{alpha}'.rstrip('0').rstrip('.')
        if alpha > 1:
            sigma_value_label = f'1/{alpha_value_label}'
        else:
            sigma_value_label = f'{1/alpha}'.rstrip('0').rstrip('.')
        print(f'alpha = {alpha_value_label} ({ai+1}/{nalpha})')

        # -----------------------------------------------------------------
        #   1/ISI histogram

        print('  histogram')

        # plot
        ax_isi = fig.add_subplot(position=[xs[ai], ys[-1], ws[ai], hs[-1]])
        ax_isi.bar(log2_freq_edges[:-1],
                   train_freq_hists[ai],
                   width=np.diff(log2_freq_edges),
                   align='edge',
                   color='k')

        # configure axes
        ax_isi.set_xticks([], minor=True)
        ax_isi.set_xticks(np.log2(freq_ticks))
        ax_isi.set_xticklabels(freq_ticklabels)
        ax_isi.set_title(f'$\\sigma$ = {sigma_value_label}')
        for spine in ['left', 'top', 'right']:
            ax_isi.spines[spine].set_visible(False)
        ax_isi.set_yticks([])
        if ai == 0:
            ax_isi.set_ylabel('True\ndistribution', labelpad=10)
        ax_isi.set_xlabel('Frequency (ISI$^{-1}$)')
        ax_isi.set_xlim([log2_freq_edges[0], log2_freq_edges[-1]])

        # -----------------------------------------------------------------
        #   Raster and signal examples

        print('  raster and signal examples')

        # plot raster
        ax_r = fig.add_subplot(position=[xs[ai], ys[-2], ws[ai], hs[-2]])
        for i in range(example_trains.shape[1]):
            train = example_trains[ai, i]
            spikes = t[np.where(train > 0)[0]]
            ax_r.scatter(spikes, [i] * len(spikes), marker='.', color='k', s=1)

        # plot noisy signal
        ax_s = fig.add_subplot(position=[xs[ai], ys[-3], ws[ai], hs[-3]])
        for y, snri in enumerate(snr_example_idxs):
            signal = example_signals[ai, snri]
            signal = (signal - signal.mean()) / (
                0.3 + signal.std())  # wierd scaling for visual aesthetic
            ax_s.plot(t, signal + 4 * y, c='k', lw=1)

        # configure axes
        if ai == 0:
            ax_r.set_ylabel('True\nraster', labelpad=10)
            ax_s.set_ylabel('SNR examples')
        for ax in [ax_r, ax_s]:
            ax.set_xlim([0, 1])
            for spine in ['left', 'top', 'right']:
                ax.spines[spine].set_visible(False)
            ax.set_yticks([])
            ax.set_xlabel('Time')
            ax.set_xticks([0, 0.25, 0.5, 0.75, 1.0])
            ax.set_xticklabels([f'{tick:g}' for tick in ax.get_xticks()])
        if ai == 0:
            ax_s.set_yticks(4 * np.arange(len(snr_example_idxs)))
            ax_s.set_yticklabels([f'{s:.2g}' for s in snrs[snr_example_idxs]])
            ax_s.yaxis.set_tick_params(length=0)

        # -----------------------------------------------------------------
        #   Wavelet and peak examples

        print('  wavelet and peak examples')

        for i, snr_idx in enumerate(snr_example_idxs):
            ax = fig.add_subplot(position=[xs[ai], ys[i], ws[ai], hs[i]])

            snr_label = f'{snrs[snr_idx]:.2g}'
            print(
                f'    example snr {snr_label} ({i+1}/{len(snr_example_idxs)})')

            # nalpha, nsnr, ntrains, nthresh, nfreq = peaks.shape
            # nalpha, nsnr, ntrains, nks    , nfreq = waves.shape

            # plot wavelet
            for j in range(nks):
                y = waves[ai, snr_idx, :, j]
                y = y / np.sum(y, axis=-1, keepdims=True)
                ci = np.sqrt(bootci_pi(y, **bootci_kwargs))
                ax.fill_between(log2_freqs,
                                ci[0],
                                ci[1],
                                label=f'Mesaclip ($k$={ks[j]})',
                                **wave_kwargs[j])

            # plot peak
            for j in range(nthresh):
                y = peaks[ai, snr_idx, :, j]
                y = y / np.sum(y, axis=-1, keepdims=True)
                ci = np.sqrt(bootci_pi(y, **bootci_kwargs))
                ax.fill_between(log2_freqs,
                                ci[0],
                                ci[1],
                                label=f'Peak ($\\theta$={threshes[j]})',
                                **peak_kwargs[j])

            # configure axes
            ax.set_yticks([])
            ax.set_xticks([])
            ax.set_xlim(log2_freq_edges[0], log2_freq_edges[-1])
            if ai == 0:
                ax.set_ylabel(f'SNR\n{snr_label}', fontsize=10, labelpad=10)
            if i == 0:
                ax.set_xticks(np.log2(freq_ticks))
                ax.set_xticklabels(freq_ticklabels)
                ax.set_xlabel('Frequency')

        if ai == 0:
            handles, labels = ax.get_legend_handles_labels()
            handles = list(np.array(handles).reshape(2, -1).T.flatten())
            labels = list(np.array(labels).reshape(2, -1).T.flatten())
            ax.legend(handles,
                      labels,
                      loc='upper left',
                      ncol=3,
                      bbox_to_anchor=(-0.05, 1.6))

    fig.savefig('../output/snr_vs_peak_detect_examples.png', dpi=600)
    plt.close(fig)
コード例 #6
0
ファイル: channing.py プロジェクト: theunissenlab/tlab
def add_signal_to_noise(signal, noise, snr, start):
    std_signal = signal.std()
    signal = zero_pad(signal, len(noise), start)
    return signal * ((noise.std() * 10 ** (snr / 20)) / std_signal) + noise