Beispiel #1
0
def temporal_envelope(s, sample_rate, cutoff_freq=200.0, resample_rate=None):
    """
        Get the temporal envelope from the sound pressure waveform.

        s: the signal
        sample_rate: the sample rate of the signal
        cutoff_freq: the cutoff frequency of the low pass filter used to create the envelope

        Returns the temporal envelope of the signal, with same sample rate or downsampled.
    """

    #rectify
    srect = np.abs(s)
    #low pass filter
    if cutoff_freq is not None:
        srect = lowpass_filter(srect, sample_rate, cutoff_freq, filter_order=4)
        srect[srect < 0] = 0
        
    if resample_rate is not None:
        lensound = len(srect)
        t=(np.array(range(lensound),dtype=float))/sample_rate
        lenresampled = int(round(float(lensound)*resample_rate/sample_rate))
        (srectresampled, tresampled) = resample(srect, lenresampled, t=t, axis=0, window=None)
        return (srectresampled, tresampled)
    else:   
        return srect
Beispiel #2
0
def bandpass_timefreq(s, frequencies, sample_rate):
    """
        Bandpass filter signal s at the given frequency bands, and then use the Hilber transform
        to produce a complex-valued time-frequency representation of the bandpass filtered signal.
    """

    freqs = sorted(frequencies)
    tf_raw = np.zeros([len(frequencies), len(s)], dtype='float')
    tf_freqs = list()

    for k,f in enumerate(freqs):
        #bandpass filter signal
        if k == 0:
            tf_raw[k, :] = lowpass_filter(s, sample_rate, f)
            tf_freqs.append( (0.0, f) )
        else:
            tf_raw[k, :] = bandpass_filter(s, sample_rate,  freqs[k-1], f)
            tf_freqs.append( (freqs[k-1], f) )

    #compute analytic signal
    tf = hilbert(tf_raw, axis=1)
    #print 'tf_raw.shape=',tf_raw.shape
    #print 'tf.shape=',tf.shape

    return np.array(tf_freqs),tf_raw,tf
Beispiel #3
0
def bandpass_timefreq(s, frequencies, sample_rate):
    """
        Bandpass filter signal s at the given frequency bands, and then use the Hilber transform
        to produce a complex-valued time-frequency representation of the bandpass filtered signal.
    """

    freqs = sorted(frequencies)
    tf_raw = np.zeros([len(frequencies), len(s)], dtype='float')
    tf_freqs = list()

    for k, f in enumerate(freqs):
        #bandpass filter signal
        if k == 0:
            tf_raw[k, :] = lowpass_filter(s, sample_rate, f)
            tf_freqs.append((0.0, f))
        else:
            tf_raw[k, :] = bandpass_filter(s, sample_rate, freqs[k - 1], f)
            tf_freqs.append((freqs[k - 1], f))

    #compute analytic signal
    tf = hilbert(tf_raw, axis=1)
    #print 'tf_raw.shape=',tf_raw.shape
    #print 'tf.shape=',tf.shape

    return np.array(tf_freqs), tf_raw, tf
Beispiel #4
0
def temporal_envelope(s, sample_rate, cutoff_freq=200.0, resample_rate=None):
    """
        Get the temporal envelope from the sound pressure waveform.

        s: the signal
        sample_rate: the sample rate of the signal
        cutoff_freq: the cutoff frequency of the low pass filter used to create the envelope

        Returns the temporal envelope of the signal, with same sample rate or downsampled.
    """

    #rectify
    srect = np.abs(s)
    #low pass filter
    if cutoff_freq is not None:
        srect = lowpass_filter(srect, sample_rate, cutoff_freq, filter_order=4)
        srect[srect < 0] = 0

    if resample_rate is not None:
        lensound = len(srect)
        t=(np.array(range(lensound),dtype=float))/sample_rate
        lenresampled = int(round(float(lensound)*resample_rate/sample_rate))
        (srectresampled, tresampled) = resample(srect, lenresampled, t=t, axis=0, window=None)
        return (srectresampled, tresampled)
    else:
        return srect
Beispiel #5
0
def plotSoundSeg(fig, seg):
    # fig pointer to figure
    # seg is the segment
    # returns the filtered sound from the loudest of the two signals.

    # clear figure
    fig.clear()

    # The sound signal
    soundSnip = seg.analogsignals[1]
    fs = soundSnip.sampling_rate
    tvals = soundSnip.times

    # Calculate the rms of each mic
    rms0 = np.std(soundSnip[:, 0])
    rms1 = np.std(soundSnip[:, 1])

    # Choose the loudest to plot
    if rms1 > rms0:
        sound = np.asarray(soundSnip[:, 1]).squeeze()
    else:
        sound = np.asarray(soundSnip[:, 0]).squeeze()

    # Calculate envelope and spectrogram
    sound = sound - sound.mean()
    sound_env = lowpass_filter(np.abs(sound), float(fs),
                               250.0)  # Sound Enveloppe
    to, fo, spect, rms = spectrogram(sound, float(fs), 1000, 50)

    # Plot sonogram and spectrogram
    gs = gridspec.GridSpec(100, 1)

    ax = fig.add_subplot(gs[0:20, 0])

    ax.plot(tvals - tvals[0], sound / sound.max())
    ax.plot(tvals - tvals[0],
            sound_env / sound_env.max(),
            color="red",
            linewidth=2)
    plt.title('%s %d' % (seg.name, seg.index))
    plt.xlim(0.0, to[-1])

    ax = fig.add_subplot(gs[21:, 0])
    plot_spectrogram(to,
                     fo,
                     spect,
                     ax=ax,
                     ticks=True,
                     fmin=250,
                     fmax=8000,
                     colormap=None,
                     colorbar=False,
                     log=True,
                     dBNoise=50)
    plt.ylabel('Frequency')
    plt.tick_params(labelbottom='off')
    plt.xlim(0.0, to[-1])
    plt.show()
    return sound, fs
Beispiel #6
0
def organize_playbacks(segment, sm, sound_onset, sound_offset, fs_mic=25000):
    # TODO why don't I send this function sound_onset and sound_offset????
    """ 
        Compares times with sound present vs times when playbacks occurred to determine
        which sounds are actual vocalizations.         
        Returns sound_playback, which = -1 if the sound is not accounted for by 
        a playback, and an integer corresponding to the playback id otherwise.        
        Only checks for overlap in time, many playbacks may have vocalizations
        in them that would be discarded and just considered playbacks with this method.
        This is still pretty rough but works for now.
        Also orders the stimuli which were used as sound playback, putting them in 
        an array 'stimuli', with fields 'duration', 'time', and 'name'.        
    """
    stim_time = np.zeros(len(segment.epocharrays))
    stim_duration = np.zeros(len(segment.epocharrays))
    #stimuli_times = list() #used temporarily
    #   stim_id = list()
    stim_name = list()
    stim_env = list()
    i = 0
    for epoch in segment.epocharrays:
        #   stimuli_times.append(epoch.times)
        #       stim_id.append(sm.database.get_annotations(epoch.annotations["stim_id"]))
        stim_name.append(
            sm.database.get_annotations(
                epoch.annotations["stim_id"])['callid'])
        sample_rate = sm.database.get_annotations(
            epoch.annotations["stim_id"])['samplerate']
        s = sm.reconstruct(epoch.annotations["stim_id"])
        sound = np.asarray(s).squeeze()
        sound_env = lowpass_filter(np.abs(sound), float(sample_rate), 250.0)
        stim_env.append(sound_env)
        stim_time[i] = epoch.times  #* 1000 # in s
        stim_duration[i] = epoch.durations  # in s
        i = i + 1
        # to check plotting


#==============================================================================
#     figure
#     hold
#     plot(stim_env[i])
#     plot(mic[round(stim_time[i]*fs_mic):round(stim_time[i]*fs_mic+5000)])
#==============================================================================

# sort the stimuli name etc so it's not impossible to work with
    sorted_stim_time_idx = sorted(
        range(len(stim_time)), key=lambda x: stim_time[
            x])  # iindex of stim_time, as they aren't in order
    stim_env = [stim_env[i] for i in sorted_stim_time_idx
                ]  # organize stim_env in order of when stim_time happened
    stimuli = list()
    dtype = [('time', float), ('duration', float), ('name', 'S10')]
    for i in range(len(stim_time)):
        stimuli.append((stim_time[i], stim_duration[i], stim_name[i]))
    stimuli = np.array(stimuli, dtype=dtype)
    stimuli = np.sort(stimuli, order=['time'])

    # To figure out, roughly, which sounds are due to stimuli
    sound_playback = np.zeros(len(sound_onset), dtype=np.int) - 1
    for i in range(len(sound_onset)):
        for j in range(len(stimuli)):
            time_diff = (((sound_onset[i] + sound_offset[i]) / 2) /
                         fs_mic) - (stimuli['time'][j] +
                                    (stimuli['duration'][j] / 2))
            if np.abs(time_diff) < (
                (((sound_offset[i] - sound_onset[i]) / 2) / fs_mic) +
                (stimuli['duration'][j] /
                 2)):  # vocalization overlaps with a stimuli
                sound_playback[i] = j

    return stimuli, stim_env, sound_playback
Beispiel #7
0
t_mic = np.asarray(mic.times)
too_long = too_long * fs_mic
mic = mic.squeeze()
if normalize_mic:  # because our mic channel has leak current / noise
    i = 0
    previous_i = 0
    while i < len(mic):
        i = i + np.round(
            fs_mic / 2)  # filter by the half second, not optimized
        if i > len(mic):
            i = len(mic)
        mic[previous_i:i] = mic[previous_i:i] - np.mean(mic[previous_i:i])
        previous_i = i
if len(mic.shape) > 1:
    mic = mic[:, 0]  # discards any other mic channels beyond the first.
vocal_band = lowpass_filter(mic, fs_mic, vb_lowpass)
vocal_band = highpass_filter(vocal_band, fs_mic, vb_highpass)
unfiltered_mic = mic
mic = lowpass_filter(mic, fs_mic, mic_lowpass)
mic = highpass_filter(mic, fs_mic, mic_highpass)
mic_env = lowpass_filter(np.abs(mic), fs_mic, 250.0)
vocal_band = np.abs(vocal_band)
vocal_band = lowpass_filter(vocal_band, fs_mic, vb_low)

# find periods of sound on the mic channel that is likely vocalizations
# sound_onset is in data points, not time
sound_onset, sound_offset = find_vocal_periods(vocal_band, vb_stds, vd_stds,
                                               vd_win, fs_mic, onset_shift)

if shorten:  # if you're troubleshooting just deal with the beginning of the data
    sound_onset = sound_onset[0:18]
    experiment_dir,
    os.path.basename(experiment_file).replace(".rhd", "_stimuli.h5"))
sm = sound_manager.SoundManager(HDF5Store, stimulus_h5_filename)
# This is the entire microphone recording
mic = [
    asig for asig in block.segments[0].analogsignalarrays
    if asig.name == "Board ADC"
][0]
fs_mic = np.int(mic.sampling_rate)
t_mic = np.asarray(mic.times)
mic = mic.squeeze()
if normalize_mic:
    mic = mic - np.mean(
        mic
    )  # because our mic channel often has 1.652 v of dc leak from somewhere!
vocal_band = lowpass_filter(mic, fs_mic, low_power)
vocal_band = highpass_filter(vocal_band, fs_mic, high_power)
vocal_band = np.abs(vocal_band)
vocal_band = lowpass_filter(vocal_band, fs_mic, vb_low)

sound_onset, sound_offset = find_vocal_periods(vocal_band, vb_stds, vd_stds,
                                               vd_win, fs_mic, onset_shift)
#==============================================================================
# for i in range(len(sound_onset)):
#     t, freq, timefreq, rms = spectrogram(mic[sound_onset[i]:sound_offset[i]], fs_mic, 1000, 50)
#     plot_spectrogram(t, freq, timefreq, dBNoise=80, colorbar = False)
#     pause(.05)
#==============================================================================

# just a temporary template, ~ 1 motif, maybe even a playback but who cares
# TODO make a template finding function
ielec = 4

# This is the entire neural recording (amp) and microphone recording (mic)
amp = [
    asig for asig in segment.analogsignalarrays if asig.name == "Amplifier"
][0]
mic = [
    asig for asig in segment.analogsignalarrays if asig.name == "Board ADC"
][0]

# High pass the microphone
sample_rate = float(mic.sampling_rate)
micvals = np.asarray(mic).squeeze()
micvals -= micvals.mean()
micfilt = highpass_filter(micvals, sample_rate, 400.0)
mic_env = lowpass_filter(np.abs(micfilt), float(sample_rate), 125.0)
max_mic = np.std(mic_env)

# Find good plot boundaries
sample_rate = float(amp.sampling_rate)
amp_all = np.asarray(amp)[:, ielec]
low_amp = lowpass_filter(amp_all, sample_rate, 400.0)
low_maxabs = np.std(low_amp)
high_amp = highpass_filter(amp_all, sample_rate, 400.0)
high_maxabs = np.std(high_amp)
neural_signal_env = lowpass_filter(np.abs(high_amp), float(sample_rate), 50.0)
maxS = np.max(neural_signal_env)

# Find all the stims
all_stims = []
for epoch in segment.epocharrays:
Beispiel #10
0
# Testing the coherence

import numpy as np
import matplotlib.pyplot as plt

from lasp.coherence import coherence_jn
from lasp.signal import lowpass_filter

# Make two gaussian signals
sample_rate = 1000.0
tlen = 2.0  # 2 second signal

# Make space for both signals
s1 = np.random.normal(0, 1, int(tlen*sample_rate))
s2 = lowpass_filter(s1, sample_rate, 250.0) + np.random.normal(0, 1, int(tlen*sample_rate))


freq1,c_amp,c_var_amp,c_phase,c_phase_var, cohe_unbiased, cohe_se  = coherence_jn(s1, s2, sample_rate, 0.1, 0.05)

plt.figure()

plt.plot(freq1, cohe_unbiased, 'k-', linewidth=2.0, alpha=0.9)
plt.plot(freq1, cohe_unbiased+2*(cohe_se), 'g-', linewidth=2.0, alpha=0.75)
plt.plot(freq1, cohe_unbiased-2*(cohe_se), 'c-', linewidth=2.0, alpha=0.75)

plt.show()
#   and use time slice:
# mic_slice = mic.time_slice(epoch.times-tbefore, epoch.times + epoch.durations + tafter)

# pull the microphone channel, filter it etc
mic = [
    asig for asig in block.segments[0].analogsignalarrays
    if asig.name == "Board ADC"
][0]  # This is the entire microphone recording
fs_mic = np.int(mic.sampling_rate)
t_mic = np.asarray(mic.times)
mic = mic.squeeze()
if normalize_mic:
    mic = mic - np.mean(
        mic
    )  # because our mic channel often has 1.652 v of dc leak from somewhere!
vocal_band = lowpass_filter(mic, fs_mic, low_power)
vocal_band = highpass_filter(vocal_band, fs_mic, high_power)
vocal_band = np.abs(vocal_band)
vocal_band = lowpass_filter(vocal_band, fs_mic, vb_low)

# find periods of sound on the mic channel that is likely vocalizations
# sound_onset is in data points, not time
sound_onset, sound_offset = find_vocal_periods(vocal_band, vb_stds, vd_stds,
                                               vd_win, fs_mic, onset_shift)
if shorten:
    sound_onset = sound_onset[0:18]
    sound_offset = sound_offset[0:18]
    mic = mic[0:sound_offset[17] + fs_mic]

# get templates (temporary at this point still)
template = list()
Beispiel #12
0
ielec = 10

# This is the entire neural recording (amp) and microphone recording (mic)
amp = [
    asig for asig in segment.analogsignalarrays if asig.name == "Amplifier"
][0]
mic = [
    asig for asig in segment.analogsignalarrays if asig.name == "Board ADC"
][0]

# High pass the microphone
sample_rate = float(mic.sampling_rate)
micvals = np.asarray(mic).squeeze()
micvals -= micvals.mean()
micfilt = highpass_filter(micvals, sample_rate, 400.0)
mic_env = lowpass_filter(np.abs(micfilt), float(sample_rate), 125.0)
max_mic = np.std(mic_env)

# Find good plot boundaries
sample_rate = float(amp.sampling_rate)
amp_all = np.asarray(amp)[:, ielec]
low_amp = lowpass_filter(amp_all, sample_rate, 400.0)
low_maxabs = np.std(low_amp)
high_amp = highpass_filter(amp_all, sample_rate, 400.0)
high_maxabs = np.std(high_amp)
neural_signal_env = lowpass_filter(np.abs(high_amp), float(sample_rate), 50.0)
maxS = np.max(neural_signal_env)

# Find all the stims
all_stims = []
for epoch in segment.epocharrays:
Beispiel #13
0
# Testing the coherence

import numpy as np
import matplotlib.pyplot as plt

from lasp.coherence import coherence_jn
from lasp.signal import lowpass_filter

# Make two gaussian signals
sample_rate = 1000.0
tlen = 2.0  # 2 second signal

# Make space for both signals
s1 = np.random.normal(0, 1, int(tlen * sample_rate))
s2 = lowpass_filter(s1, sample_rate, 250.0) + np.random.normal(
    0, 1, int(tlen * sample_rate))

freq1, c_amp, c_var_amp, c_phase, c_phase_var, cohe_unbiased, cohe_se = coherence_jn(
    s1, s2, sample_rate, 0.1, 0.05)

plt.figure()

plt.plot(freq1, cohe_unbiased, 'k-', linewidth=2.0, alpha=0.9)
plt.plot(freq1, cohe_unbiased + 2 * (cohe_se), 'g-', linewidth=2.0, alpha=0.75)
plt.plot(freq1, cohe_unbiased - 2 * (cohe_se), 'c-', linewidth=2.0, alpha=0.75)

plt.show()