Example #1
0
def bandpass_timefreq(s, frequencies, sample_rate):
    """
        Bandpass filter signal s at the given frequency bands, and then use the Hilber transform
        to produce a complex-valued time-frequency representation of the bandpass filtered signal.
    """

    freqs = sorted(frequencies)
    tf_raw = np.zeros([len(frequencies), len(s)], dtype='float')
    tf_freqs = list()

    for k, f in enumerate(freqs):
        #bandpass filter signal
        if k == 0:
            tf_raw[k, :] = lowpass_filter(s, sample_rate, f)
            tf_freqs.append((0.0, f))
        else:
            tf_raw[k, :] = bandpass_filter(s, sample_rate, freqs[k - 1], f)
            tf_freqs.append((freqs[k - 1], f))

    #compute analytic signal
    tf = hilbert(tf_raw, axis=1)
    #print 'tf_raw.shape=',tf_raw.shape
    #print 'tf.shape=',tf.shape

    return np.array(tf_freqs), tf_raw, tf
Example #2
0
def get_default_threshold(audio, window_duration=5.0):
    """Compute a default threshold over all windows in a signal

    Uses compute_smart_threshold in every time window for an
    entire audio signal. Returns the median threshold (after
    removing outliers) and std deviation of those thresholds.

    This can be useful as a reference when computing thresholds later on
    to reject thresholds that seem out of the ordinary

    Parameters
    ==========
    audio : instance of interfaces.audio.AudioSliceInterface
    window_duration : float
        Width in seconds of each window to compute threshold in
    """

    all_thresholds_list = []

    for t in np.arange(0, len(audio) / audio.sampling_rate, window_duration):
        end_time = min(t + window_duration, len(audio) / audio.sampling_rate)

        sliced = audio.time_slice(t, end_time)
        t_arr = sliced.t
        sig = sliced.data
        sig = sig - np.mean(sig, axis=0)
        sig = bandpass_filter(sig.T, audio.sampling_rate, 1000, 8000).T
        amp_env = get_amplitude_envelope(sig, audio.sampling_rate, highpass=1000, lowpass=8000)
        amp_env = np.mean(amp_env, axis=1)
        threshold = compute_smart_threshold(amp_env, sampling_rate=audio.sampling_rate)
        all_thresholds_list.append(threshold)

    all_thresholds_list = np.array(all_thresholds_list)

    # Remove outlier thresholds
    clf = LocalOutlierFactor(n_neighbors=5, contamination=0.1, algorithm="kd_tree")
    predicted_outlier = clf.fit_predict(all_thresholds_list[:, None])

    # Return median (non-outlier) threshold and std
    return (
        np.median(all_thresholds_list[predicted_outlier == 1]),
        np.std(all_thresholds_list[predicted_outlier == 1])
    )
        time.time() - _t))

    if not args.canary:
        # Split into smaller intervals
        print("Splitting {} intervals into smaller intervals".format(
            len(all_intervals)))
        _t = time.time()
        intervals = []
        for idx, (t1, t2) in enumerate(all_intervals):
            padding = 1.0
            if t1 - padding < 0 or t2 + padding > int(
                    len(audio_signal) / audio_signal.sampling_rate):
                continue
            t_arr, sig = audio_signal.time_slice(t1 - padding, t2 + padding)
            sig = sig - np.mean(sig, axis=0)
            sig = bandpass_filter(sig.T, audio_signal.sampling_rate, 1000,
                                  8000).T

            inner_intervals = split_individual_events(
                sig[:, channel],
                audio_signal.sampling_rate,
                expected_call_max_duration=0.1 if args.canary else 0.5,
                max_tries=10,
                scale_factor=1.25,
                amp_env_mode=amp_env_mode,
            )

            # since we padded the signal above, we need to be careful
            # about not accidentally adding intervals outside our original t1 -> t2
            # slice (they might belong to another detection window)
            for idx1, idx2 in inner_intervals:
                if (idx1 / audio_signal.sampling_rate) < padding:
Example #4
0
    def test_cross_psd(self):

        np.random.seed(1234567)
        sr = 1000.0
        dur = 1.0
        nt = int(dur * sr)
        t = np.arange(nt) / sr

        # create a simple signal
        freqs = list()
        freqs.extend(np.arange(8, 12))
        freqs.extend(np.arange(60, 71))
        freqs.extend(np.arange(130, 151))

        s1 = np.zeros([nt])
        for f in freqs:
            s1 += np.sin(2 * np.pi * f * t)
        s1 /= s1.max()

        # create a noise corrupted, bandpassed filtered version of s1
        noise = np.random.randn(nt) * 1e-1
        # s2 = convolve1d(s1, filt, mode='mirror') + noise
        s2 = bandpass_filter(s1, sample_rate=sr, low_freq=40., high_freq=90.)
        s2 /= s2.max()
        s2 += noise

        # compute the signal's power spectrums
        welch_freq1, welch_psd1 = welch(s1, fs=sr)
        welch_freq2, welch_psd2 = welch(s2, fs=sr)

        welch_psd_max = max(welch_psd1.max(), welch_psd2.max())
        welch_psd1 /= welch_psd_max
        welch_psd2 /= welch_psd_max

        # compute the auto-correlation functions
        lags = np.arange(-200, 201)
        acf1 = correlation_function(s1, s1, lags, normalize=True)
        acf2 = correlation_function(s2, s2, lags, normalize=True)

        # compute the cross correlation functions
        cf12 = correlation_function(s1, s2, lags, normalize=True)
        coh12 = coherency(s1,
                          s2,
                          lags,
                          window_fraction=0.75,
                          noise_floor_db=100.)

        # do an FFT shift to the lags and the window, otherwise the FFT of the ACFs is not equal to the power
        # spectrum for some numerical reason
        shift_lags = fftshift(lags)
        if len(lags) % 2 == 1:
            # shift zero from end of shift_lags to beginning
            shift_lags = np.roll(shift_lags, 1)
        acf1_shift = correlation_function(s1, s1, shift_lags)
        acf2_shift = correlation_function(s2, s2, shift_lags)

        # compute the power spectra from the auto-spectra
        ps1 = fft(acf1_shift)
        ps1_freq = fftfreq(len(acf1), d=1.0 / sr)
        fi = ps1_freq > 0
        ps1 = ps1[fi]
        assert np.sum(
            np.abs(ps1.imag) > 1e-8
        ) == 0, "Nonzero imaginary part for fft(acf1) (%d)" % np.sum(
            np.abs(ps1.imag) > 1e-8)
        ps1_auto = np.abs(ps1.real)
        ps1_auto_freq = ps1_freq[fi]

        ps2 = fft(acf2_shift)
        ps2_freq = fftfreq(len(acf2), d=1.0 / sr)
        fi = ps2_freq > 0
        ps2 = ps2[fi]
        assert np.sum(np.abs(ps2.imag) > 1e-8
                      ) == 0, "Nonzero imaginary part for fft(acf2)"
        ps2_auto = np.abs(ps2.real)
        ps2_auto_freq = ps2_freq[fi]

        assert np.sum(ps1_auto < 0) == 0, "negatives in ps1_auto"
        assert np.sum(ps2_auto < 0) == 0, "negatives in ps2_auto"

        # compute the cross spectral density from the correlation function
        cf12_shift = correlation_function(s1, s2, shift_lags, normalize=True)
        psd12 = fft(cf12_shift)
        psd12_freq = fftfreq(len(cf12_shift), d=1.0 / sr)
        fi = psd12_freq > 0

        psd12 = np.abs(psd12[fi])
        psd12_freq = psd12_freq[fi]

        # compute the cross spectral density from the power spectra
        psd12_welch = welch_psd1 * welch_psd2
        psd12_welch /= psd12_welch.max()

        # compute the coherence from the cross spectral density
        cfreq,coherence,coherence_var,phase_coherence,phase_coherence_var,coh12_freqspace,coh12_freqspace_t = \
            coherence_jn(s1, s2, sample_rate=sr, window_length=0.100, increment=0.050, return_coherency=True)

        coh12_freqspace /= np.abs(coh12_freqspace).max()

        # weight the coherence by one minus the normalized standard deviation
        coherence_std = np.sqrt(coherence_var)
        # cweight = coherence_std / coherence_std.sum()
        # coherence_weighted = (1.0 - cweight)*coherence
        coherence_weighted = coherence - coherence_std
        coherence_weighted[coherence_weighted < 0] = 0

        # compute the coherence from the fft of the coherency
        coherence2 = fft(fftshift(coh12))
        coherence2_freq = fftfreq(len(coherence2), d=1.0 / sr)
        fi = coherence2_freq > 0
        coherence2 = np.abs(coherence2[fi])
        coherence2_freq = coherence2_freq[fi]
        """
        plt.figure()
        ax = plt.subplot(2, 1, 1)
        plt.plot(ps1_auto_freq, ps1_auto*ps2_auto, 'c-', linewidth=2.0, alpha=0.75)
        plt.plot(psd12_freq, psd12, 'g-', linewidth=2.0, alpha=0.9)
        plt.plot(ps1_auto_freq, ps1_auto, 'k-', linewidth=2.0, alpha=0.75)
        plt.plot(ps2_auto_freq, ps2_auto, 'r-', linewidth=2.0, alpha=0.75)
        plt.axis('tight')
        plt.legend(['denom', '12', '1', '2'])

        ax = plt.subplot(2, 1, 2)
        plt.plot(psd12_freq, coherence, 'b-')
        plt.axis('tight')
        plt.show()
        """

        # normalize the cross-spectral density and power spectra
        psd12 /= psd12.max()
        ps_auto_max = max(ps1_auto.max(), ps2_auto.max())
        ps1_auto /= ps_auto_max
        ps2_auto /= ps_auto_max

        # make some plots
        plt.figure()

        nrows = 2
        ncols = 2

        # plot the signals
        ax = plt.subplot(nrows, ncols, 1)
        plt.plot(t, s1, 'k-', linewidth=2.0)
        plt.plot(t, s2, 'r-', alpha=0.75, linewidth=2.0)
        plt.xlabel('Time (s)')
        plt.ylabel('Signal')
        plt.axis('tight')

        # plot the spectra
        ax = plt.subplot(nrows, ncols, 2)
        plt.plot(welch_freq1, welch_psd1, 'k-', linewidth=2.0, alpha=0.85)
        plt.plot(ps1_auto_freq, ps1_auto, 'k--', linewidth=2.0, alpha=0.85)
        plt.plot(welch_freq2, welch_psd2, 'r-', alpha=0.75, linewidth=2.0)
        plt.plot(ps2_auto_freq, ps2_auto, 'r--', linewidth=2.0, alpha=0.75)
        plt.axis('tight')

        plt.xlabel('Frequency (Hz)')
        plt.ylabel('Power')

        # plot the correlation functions
        ax = plt.subplot(nrows, ncols, 3)
        plt.axhline(0, c='k')
        plt.plot(lags, acf1, 'k-', linewidth=2.0)
        plt.plot(lags, acf2, 'r-', alpha=0.75, linewidth=2.0)
        plt.plot(lags, cf12, 'g-', alpha=0.75, linewidth=2.0)
        plt.plot(lags, coh12, 'b-', linewidth=2.0, alpha=0.75)
        plt.plot(coh12_freqspace_t * 1e3,
                 coh12_freqspace,
                 'm-',
                 linewidth=1.0,
                 alpha=0.95)
        plt.xlabel('Lag (ms)')
        plt.ylabel('Correlation Function')
        plt.axis('tight')
        plt.ylim(-0.5, 1.0)
        handles = custom_legend(['k', 'r', 'g', 'b', 'c'],
                                ['acf1', 'acf2', 'cf12', 'coh12', 'coh12_f'])
        plt.legend(handles=handles)

        # plot the cross spectral density
        ax = plt.subplot(nrows, ncols, 4)
        handles = custom_legend(['g', 'k', 'b'],
                                ['CSD', 'Coherence', 'Weighted'])
        plt.axhline(0, c='k')
        plt.axhline(1, c='k')
        plt.plot(psd12_freq, psd12, 'g-', linewidth=3.0)
        plt.errorbar(cfreq,
                     coherence,
                     yerr=np.sqrt(coherence_var),
                     fmt='k-',
                     ecolor='r',
                     linewidth=3.0,
                     elinewidth=5.0,
                     alpha=0.8)
        plt.plot(cfreq, coherence_weighted, 'b-', linewidth=3.0, alpha=0.75)
        plt.xlabel('Frequency (Hz)')
        plt.ylabel('Cross-spectral Density/Coherence')
        plt.legend(handles=handles)
        """
        plt.figure()
        plt.axhline(0, c='k')
        plt.plot(lags, cf12, 'k-', alpha=1, linewidth=2.0)
        plt.plot(lags, coh12, 'b-', linewidth=3.0, alpha=0.75)
        plt.plot(coh12_freqspace_t*1e3, coh12_freqspace, 'r-', linewidth=2.0, alpha=0.95)
        plt.xlabel('Lag (ms)')
        plt.ylabel('Correlation Function')
        plt.axis('tight')
        plt.ylim(-0.5, 1.0)
        handles = custom_legend(['k', 'b', 'r'], ['cf12', 'coh12', 'coh12_f'])
        plt.legend(handles=handles)
        """

        plt.show()
Example #5
0
from soundsig.sound import WavFile
from scipy.signal import filtfilt
import os
import numpy as np
import matplotlib.pyplot as plt
from soundsig.signal import bandpass_filter

os.chdir('/Users/Alicia/ScienceProjects/DynamicalBirdCalls/Data/AdultVocalizations')
if not os.path.exists('filtered_calls'):
    os.makedirs('filtered_calls')
# Find all the wave files 
isound = 0   
low_freq = 250
high_freq = 12000
fs = 44100
for fname in os.listdir('.'):
    if fname.endswith('.wav') and 'DC' in fname:
        isound += 1;        
        # Read the sound file        
        soundIn = WavFile(file_name=fname).data.astype(float)
        filename, file_extension = os.path.splitext(fname)
        birdname = filename[0:10]
        calltype = filename[18:20]     
        print ('Processing sound %d:%s, %s' % (isound, fname, calltype))
        # bandpass filter the signal
        filtered_call = bandpass_filter(soundIn, sample_rate=fs, low_freq=low_freq, high_freq=high_freq)
        np.save("/Users/Alicia/ScienceProjects/DynamicalBirdCalls/Data/ProcessedData/filtered_calls/%s.npy" %fname[:-4], filtered_call)
Example #6
0
def extract_spectrograms(
        audio_signal,
        intervals,
        buffer=0.02,  # buffer around original interval to keep
        spec_buffer=0.04,  # buffer so that everything thas the right padding.
):
    """Extract spectrograms from audio_signal denoted by intervals list

    intervals is a list of (t_start, t_end) tuples
    """
    _time = time.time()
    print("Extracting spectrograms from {} intervals".format(len(intervals)))
    # all_calls = []
    all_call_spectrograms = []
    for idx, (t1, t2) in enumerate(intervals):
        print("Working on {}/{} ({:.2f}s elapsed)".format(
            idx + 1, len(intervals),
            time.time() - _time),
              end="\r")

        # Recentered signal with a small buffer of 40ms on either side
        t_arr, sig = audio_signal.time_slice(
            max(0, t1 - buffer), min(audio_signal.t_max, t2 + buffer))
        sig = sig - np.mean(sig, axis=0)
        sig = bandpass_filter(sig.T, audio_signal.sampling_rate, 1000, 8000).T

        amp_env = get_amplitude_envelope(sig,
                                         fs=audio_signal.sampling_rate,
                                         lowpass=8000,
                                         highpass=1000)

        # Compute the temporal center of mass of the signal
        center_of_mass = t1 - buffer + np.sum(
            (t_arr * np.sum(amp_env, axis=1))) / np.sum(amp_env)

        # Recentered signal with a small buffer of 40ms on either side
        t_arr, sig = audio_signal.time_slice(
            max(0, center_of_mass - spec_buffer),
            min(audio_signal.t_max, center_of_mass + spec_buffer))
        sig = sig - np.mean(sig, axis=0)
        sig = bandpass_filter(sig.T, audio_signal.sampling_rate, 1000, 8000).T

        specs = []
        # all_calls.append(sig)
        for ch in range(sig.shape[1]):
            # Sligtly lower resolution on the spectrograms can make this go faster
            # Can increase the params to 1000, 50 for a higher resolution spectrogram
            _, _, spec, _ = spectrogram(sig[:, ch],
                                        audio_signal.sampling_rate,
                                        500,
                                        100,
                                        min_freq=1000,
                                        max_freq=8000,
                                        cmplx=False)
            specs.append(spec)

        all_call_spectrograms.append(np.array(specs))

    all_call_spectrograms = np.array(all_call_spectrograms)
    # all_calls = np.array(all_calls)
    return all_call_spectrograms
Example #7
0
def threshold_all_events(
        audio_signal,
        window_size=10.0,
        channel=0,
        t_start=None,
        t_stop=None,
        ignore_width=0.05,
        min_size=0.05,
        fuse_duration=0.5,
        threshold_z=3.0,
        highpass=1000,
        lowpass=8000,
        amp_env_mode="broadband"
    ):
    """Find intervals of potential vocalizations periods (in seconds)

    The last two windows are combined in case the duration is not an
    even multiple of the window_size

    amp_env_mode can be "broadband" or "max_zscore"
    """
    sampling_rate = audio_signal.sampling_rate
    signal_duration = len(audio_signal) / sampling_rate
    if window_size is None:
        window_starts = np.array([0.0 if t_start is None else t_start])
        window_stops = np.array([audio_signal.t_max if t_stop is None else t_stop])

    else:
        window_starts = np.arange(0, signal_duration - window_size, window_size)
        window_stops = window_starts + window_size
        window_stops[-1] = signal_duration

        if t_start:
            mask = window_starts >= t_start
        else:
            mask = np.ones_like(window_starts).astype(np.bool)
        if t_stop:
            mask = mask.astype(np.bool) & (window_stops <= t_stop)

        window_starts = window_starts[mask]
        window_stops = window_stops[mask]

    last_interval_to_check = None
    all_intervals = []

    for window_start, window_stop in zip(window_starts, window_stops):
        t_arr, window_signal = audio_signal.time_slice(window_start, window_stop)
        window_signal = window_signal - np.mean(window_signal, axis=0)
        window_signal = bandpass_filter(window_signal.T, sampling_rate, 1000, 8000).T
        amp_env = get_amplitude_envelope(
            window_signal[:, channel],
            sampling_rate,
            highpass=highpass,
            lowpass=lowpass,
            mode=amp_env_mode
        )

        threshold = compute_smart_threshold(
            amp_env,
            sampling_rate=sampling_rate,
            z=threshold_z
        )

        intervals = threshold_events(
            amp_env,
            threshold,
            sampling_rate=sampling_rate,
            ignore_width=ignore_width,
            min_size=min_size,
            fuse_duration=fuse_duration
        )

        # Here begins the code that merges intervals across windows
        if last_interval_to_check is not None:
            if not len(intervals):
                all_intervals.append(last_interval_to_check)
            elif intervals[0][0] < (0.5 * sampling_rate):
                all_intervals.append((
                    last_interval_to_check[0],
                    intervals[0][1] / sampling_rate + window_start
                ))
                intervals = intervals[1:]
            else:
                all_intervals.append(last_interval_to_check)
                all_intervals.append((
                    intervals[0][0] / sampling_rate + window_start,
                    intervals[0][1] / sampling_rate + window_start
                ))
                intervals = intervals[1:]
            last_interval_to_check = None

        for i0, i1 in intervals:
            if i1 == len(window_signal):
                last_interval_to_check = (
                    i0 / sampling_rate + window_start,
                    i1 / sampling_rate + window_start
                )
                break
            all_intervals.append((
                i0 / sampling_rate + window_start,
                i1 / sampling_rate + window_start
            ))

    if last_interval_to_check is not None:
        all_intervals.append(last_interval_to_check)

    return all_intervals