def get_spect_corr(sound_wav, templ_timefreq, freq_index, fs_mic = 25000, spec_sample_rate = 1000, freq_spacing = 50):
    """
        Calculate cross correlation between two spectrograms over only some frequencies, across time. Expects one wav file and
        one spectrogram and a frequency index. The spectrogram 'timefreq' should already be zscored.

        Returns: 
            correlation: spect_corr
              
        Arguments:
            REQUIRED:
                sound_wav, templ_timefreq, freq_index
                      
    """    
    t, freq, timefreq, rms = spectrogram(sound_wav, fs_mic, spec_sample_rate, freq_spacing)
    timefreq = timefreq[freq_index,:]
    timefreq = zscore(timefreq, axis = None) 
    if timefreq.shape[1] < templ_timefreq.shape[1]:
        zeropad = np.zeros((timefreq.shape[0],templ_timefreq.shape[1]-timefreq.shape[1]))
        timefreq = np.append(timefreq, zeropad, axis = 1)
    spect_corr = np.zeros(timefreq.shape[1])# - templ_timefreq.shape[1])
    # sliding time
    for time_win in range(timefreq.shape[1] - templ_timefreq.shape[1]): # no overlap before or after
        # correlate relative freq band to each other
        freq_corr = np.zeros(len(timefreq))
        for freq_i in range(len(freq_index)): 
            in1 = np.abs(templ_timefreq[freq_i])
            in2 = np.abs(timefreq[freq_i][time_win:time_win + len(in1)])
            freq_corr[freq_i] = correlate(in1,in2, mode = 'valid')
        spect_corr[time_win] = np.sum(freq_corr)
    return spect_corr  
Esempio n. 2
0
def plot_high_corrs(high_corrs, template, mic, template_corrs, amplitude_corrs,
                    corr_ind, amp_ind):
    """
        Aligns and plots a given template against the microphone channel. Returns alignment points.
        Requires high_corrs, an array of which sounds you want to align (index of sound_onsets), 
        and template (int), the template you are matching to.
        
    """
    alignments = list()
    figure(1)
    title('template')
    plot_zscore_spectrogram(template_t[template], template_freq[template],
                            template_timefreq[template])
    for sound in high_corrs:
        align = corr_ind[template][sound]
        good_align = np.int(sound_onset[sound]) + align - np.int(
            np.round(.5 * len(template_correlations[template][sound])))
        sound_align_wav = mic[good_align - fs_mic:good_align + 2 * fs_mic]
        sound_t, sound_freq, sound_timefreq, sound_rms = spectrogram(
            sound_align_wav, fs_mic, spec_sample_rate=1000, freq_spacing=50)
        #==============================================================================
        #         figure(2)
        #         plot_spectrogram(sound_t, sound_freq, sound_timefreq)
        #         pause(.1)
        #         close(2)
        #==============================================================================
        alignments.append(good_align)
    return alignments
Esempio n. 3
0
def plotSoundSeg(fig, seg):
    # fig pointer to figure
    # seg is the segment
    # returns the filtered sound from the loudest of the two signals.

    # clear figure
    fig.clear()

    # The sound signal
    soundSnip = seg.analogsignals[1]
    fs = soundSnip.sampling_rate
    tvals = soundSnip.times

    # Calculate the rms of each mic
    rms0 = np.std(soundSnip[:, 0])
    rms1 = np.std(soundSnip[:, 1])

    # Choose the loudest to plot
    if rms1 > rms0:
        sound = np.asarray(soundSnip[:, 1]).squeeze()
    else:
        sound = np.asarray(soundSnip[:, 0]).squeeze()

    # Calculate envelope and spectrogram
    sound = sound - sound.mean()
    sound_env = lowpass_filter(np.abs(sound), float(fs),
                               250.0)  # Sound Enveloppe
    to, fo, spect, rms = spectrogram(sound, float(fs), 1000, 50)

    # Plot sonogram and spectrogram
    gs = gridspec.GridSpec(100, 1)

    ax = fig.add_subplot(gs[0:20, 0])

    ax.plot(tvals - tvals[0], sound / sound.max())
    ax.plot(tvals - tvals[0],
            sound_env / sound_env.max(),
            color="red",
            linewidth=2)
    plt.title('%s %d' % (seg.name, seg.index))
    plt.xlim(0.0, to[-1])

    ax = fig.add_subplot(gs[21:, 0])
    plot_spectrogram(to,
                     fo,
                     spect,
                     ax=ax,
                     ticks=True,
                     fmin=250,
                     fmax=8000,
                     colormap=None,
                     colorbar=False,
                     log=True,
                     dBNoise=50)
    plt.ylabel('Frequency')
    plt.tick_params(labelbottom='off')
    plt.xlim(0.0, to[-1])
    plt.show()
    return sound, fs
def user_select_templates(sound_playback, stim_env, stimuli, mic, sound_onset, sound_offset, fs_mic, avoid_stims = 0, sound_length_thresh = 0):
    """ 
        Displays a series of spectrograms based on sound_onset (this is not adaptable yet), 
        asking the user which ones to use as templates and then asks the user to select the 
        area of interest.
        Returns lists of template associated vars:
            template = list()
            templ_t = list()
            templ_freq = list()
            templ_timefreq = list()
            template_index = list()
            
        Set avoid_stims to 1 if you want to only browse sound onsets that don't correspond to a stimulus
        
    """  
    global cid
    global coords
    template_wav = list()
    templ_t = list()
    templ_freq = list()
    templ_timefreq = list()
    template_index = list()
    template_starts = list()
    template_ends = list()
    i = -1
    keep_looking = 1
    while keep_looking and i < len(sound_onset-2):
        i += 1
        if avoid_stims and sound_playback[i] > 0:
            print('Skipping trial %s, stimulus detected' %i)
        elif sound_offset[i] - sound_onset[i] < sound_length_thresh: # only implementing for long templates at the moment
            print('Skipping trial %s, vocalization too short' %i)
        else:
            plot_stim_and_mic([i], sound_playback, stimuli, sound_onset, sound_offset, close_fig = 1, pause_time = 1) # plot a figure of stimulus and amp wav to help user

            template_temp, templ_t_temp, templ_freq_temp, templ_timefreq_temp = get_temporary_template(sound_onset, sound_offset, fs_mic, i) #plot spectrogram on the figure 
            save_template = str()
            save_template = raw_input("Enter 1 to save, q to quit looking for templates, anything else to continue: ")  
            if save_template == '1':
                print("Click around template of interest")
                # show(spect_figure) # this breaks the onclick stuff, so I can't seem to bring the figure to the forefront
                cid = spect_figure.canvas.mpl_connect('button_press_event', onclick)
                waitforbuttonpress()
                waitforbuttonpress()
                template_start = int(np.floor(sound_onset[i] + (coords[-2][0] * fs_mic)))
                template_end = int(np.floor(sound_onset[i] + (coords[-1][0] * fs_mic)))
                templ_wav = mic[template_start:template_end]
                templ_t_temp, templ_freq_temp, templ_timefreq_temp, templ_rms = spectrogram(templ_wav, fs_mic, spec_sample_rate = 1000, freq_spacing = 50)
                template_wav.append(templ_wav)
                templ_t.append(templ_t_temp)
                templ_freq.append(templ_freq_temp)
                templ_timefreq.append(templ_timefreq_temp)
                template_index.append(i)
                template_starts.append(template_start)
                template_ends.append(template_end)
            elif save_template == 'q':
                keep_looking = 0
    return template_wav, template_index, templ_t, templ_freq, templ_timefreq, template_starts, template_ends    
def get_temporary_template(i = 17):
    """
        Returns a spectrogram and a template, if i = 17 it's some sort of song
        Really just a placeholder for a better template algorithm
        TODO replace this with real code, like user defined sections of spectrograms
    """
    template = mic[sound_onset[i]:sound_offset[i]]
    template = template[4800:18000] #shift it, particular to this template
    templ_t, templ_freq, templ_timefreq, templ_rms = spectrogram(template, fs_mic, spec_sample_rate = 1000, freq_spacing = 50)
    figure()    
    plot_spectrogram(templ_t, templ_freq, templ_timefreq, dBNoise=80, colorbar = False)
    return template, templ_t, templ_freq, templ_timefreq
def plot_high_corrs(sound_onset,
                    high_corrs,
                    x,
                    y,
                    mic,
                    fs_mic,
                    template_correlations,
                    amplitude_correlations,
                    corr_ind,
                    amp_ind,
                    plot_figures=0):
    """
        Aligns and plots a given template against the microphone channel. Returns alignment points.
        Requires high_corrs, an array of which sounds you want to align (index of sound_onsets), 
        and template (x, z, int), the template you are matching to.
        
    """
    alignments = list()
    amp_alignments = list()
    figure(1)
    title('template')
    plot_zscore_spectrogram(template_t[x], template_freq[x],
                            template_timefreq[x])
    #    plot_zscore_spectrogram(template_t[y], template_freq[y], template_timefreq[y])

    for sound in high_corrs:
        align = corr_ind[x][sound]
        a_align = amp_ind[x][sound]
        good_align = np.int(sound_onset[sounds[sound]]) + align - np.int(
            np.round(.5 * len(template_correlations[x][sound])))
        amp_good_align = np.int(sound_onset[sounds[sound]]) + a_align - np.int(
            np.round(.5 * len(amplitude_correlations[x][sound])))
        sound_align_wav = mic[good_align - fs_mic:good_align + 2 * fs_mic]
        amp_align_wav = mic[amp_good_align - fs_mic:amp_good_align +
                            2 * fs_mic]
        if plot_figures:
            sound_t, sound_freq, sound_timefreq, sound_rms = spectrogram(
                np.squeeze(sound_align_wav),
                fs_mic,
                spec_sample_rate=1000,
                freq_spacing=50)

            figure(2)
            plot_spectrogram(sound_t, sound_freq, sound_timefreq)
            pause(.1)
            close(2)
        alignments.append(good_align)
        amp_alignments.append(amp_good_align)
    return alignments, amp_alignments
Esempio n. 7
0
def append_user_select_templates(sound_playback, stim_env, stimuli, mic,
                                 sound_onset, sound_offset, fs_mic,
                                 avoid_stims, sound_length_thresh,
                                 template_wav, template_index, template_t,
                                 template_freq, template_timefreq,
                                 template_starts, template_ends):
    """ 
       appends more templates to above, useful for saerching for templates towards teh end of the file or something.       
    """
    global cid
    global coords
    i = template_index[-1]  # start where you left off searching
    keep_looking = 1
    while keep_looking and i < len(sound_onset - 2):
        i += 1
        if stims_present:
            if avoid_stims and sound_playback[i] > 0:
                print('Skipping trial %s, stimulus detected' % i)
                continue
        if sound_offset[i] - sound_onset[
                i] < sound_length_thresh * fs_mic:  # only implementing for long templates at the moment
            print('Skipping trial %s, vocalization too short' % i)
        else:
            if stim_present:
                plot_stim_and_mic(
                    [i],
                    sound_playback,
                    stimuli,
                    sound_onset,
                    sound_offset,
                    close_fig=1,
                    pause_time=1
                )  # plot a figure of stimulus and amp wav to help user
            template_temp, templ_t_temp, templ_freq_temp, templ_timefreq_temp = get_temporary_template(
                sound_onset, sound_offset, fs_mic,
                i)  #plot spectrogram on the figure
            save_template = str()
            save_template = raw_input(
                "Enter 1 to save, q to quit looking for templates, anything else to continue: "
            )
            if save_template == '1':
                print("Click around template of interest")
                # show(spect_figure) # this breaks the onclick stuff, so I can't seem to bring the figure to the forefront
                cid = spect_figure.canvas.mpl_connect('button_press_event',
                                                      onclick)
                waitforbuttonpress()
                waitforbuttonpress()
                template_start = int(
                    np.floor(sound_onset[i] + (coords[-2][0] * fs_mic)))
                template_end = int(
                    np.floor(sound_onset[i] + (coords[-1][0] * fs_mic)))
                templ_wav = mic[template_start:template_end]
                templ_t_temp, templ_freq_temp, templ_timefreq_temp, templ_rms = spectrogram(
                    templ_wav, fs_mic, spec_sample_rate=1000, freq_spacing=50)
                template_wav.append(templ_wav)
                template_t.append(templ_t_temp)
                template_freq.append(templ_freq_temp)
                template_timefreq.append(templ_timefreq_temp)
                template_index.append(i)
                template_starts.append(template_start)
                template_ends.append(template_end)
            elif save_template == 'q':
                keep_looking = 0
    return template_wav, template_index, template_t, template_freq, template_timefreq, template_starts, template_ends
Esempio n. 8
0
    print(
        "Warning, some sound periods too long ( > %s s.), not analyzing those sounds"
        % (too_long / fs_mic))
sounds = np.squeeze(np.where(sound_offset - sound_onset < too_long))
template_correlations = list()
template_corr_peak = list()
amplitude_correlations = list()
for template in range(num_templates):  # the number of templates,
    corr_list = list()
    amplitude_list = list()
    peak_list = list()
    longest_corr = 0
    longest_amp_corr = 0
    for i in sounds:
        sound_wav = mic[int(sound_onset[i]):int(sound_offset[i])]
        sound_t, sound_freq, sound_timefreq, rms = spectrogram(
            sound_wav, fs_mic, spec_sample_rate, freq_spacing)
        sound_timefreq = sound_timefreq[freq_index, :]
        sound_timefreq = zscore(np.abs(sound_timefreq), axis=None)
        templ_len = len(template_t[template])
        #       spect_corr = get_symmetric_spect_corr(sound_wav, template_wav[template], freq_index, fs_mic)
        spect_corr, amplitude_corr = get_symmetric_spect_corr(
            sound_timefreq, template_timefreq[template], templ_len, fs_mic)

        #        amp_corr = get_amplitude_corr(sound_timefreq, template_timefreq, templ_len, fs_mic)
        #spect_corr = get_spect_corr(sound_wav, template_timefreq[template], freq_index, fs_mic)
        corr_list.append(spect_corr)
        amplitude_list.append(amplitude_corr)
        peaks = argrelextrema(
            spect_corr, np.greater, order=100
        )  # seems to work ok / except now it maybe doesn't- anyway, this is not max_corrs, it'sfor tracking multiple renditions in a sound period (not working yet)
        peak_list = np.append(peak_list, peaks)
sound_onset, sound_offset = find_vocal_periods(vocal_band, vb_stds, vd_stds,
                                               vd_win, fs_mic, onset_shift)
#==============================================================================
# for i in range(len(sound_onset)):
#     t, freq, timefreq, rms = spectrogram(mic[sound_onset[i]:sound_offset[i]], fs_mic, 1000, 50)
#     plot_spectrogram(t, freq, timefreq, dBNoise=80, colorbar = False)
#     pause(.05)
#==============================================================================

# just a temporary template, ~ 1 motif, maybe even a playback but who cares
# TODO make a template finding function
i = 17
template = mic[sound_onset[i]:sound_offset[i]]
template = template[4800:18000]  #shift it, particular to this template
templ_t, templ_freq, templ_timefreq, templ_rms = spectrogram(
    template, fs_mic, spec_sample_rate=1000, freq_spacing=50)
plot_spectrogram(templ_t,
                 templ_freq,
                 templ_timefreq,
                 dBNoise=80,
                 colorbar=False)
# Make xcorr of spectrogram - if i = 17 template is present in sound
# TODO this is the part that will have to loop through for every vocal chunk
# for now it is just a bout that has three motifs in it
t, freq, timefreq, rms = spectrogram(mic[sound_onset[i]:sound_offset[i]],
                                     fs_mic,
                                     spec_sample_rate=1000,
                                     freq_spacing=50)

# find the frequencies of interest
freq_index = [0]
     highpass_filter(channel, sample_rate, 400.0) for channel in amp_slice.T
 ]).T
 corr_table = np.corrcoef(high_amp.T)
 # Generate predictions for each channel based on the other channels
 high_amp_noise = np.zeros(high_amp.shape)
 high_amp_std = np.std(high_amp, axis=0)
 high_amp_clean = np.zeros(high_amp.shape)
 high_amp_suppressed = np.zeros(high_amp.shape)
 neural_signal_env = np.zeros(high_amp.shape)
 neural_noise_env = np.zeros(high_amp.shape)
 neural_clean_env = np.zeros(high_amp.shape)
 GS = 6  # Gain for the sigmoid
 if i == 0:
     # update
     ax = fig_clean.add_subplot(gs[0:29, 0])
     to, fo, spect, rms = spectrogram(mic_slice, sample_rate, 1000, 50)
     plot_spectrogram(to,
                      fo,
                      spect,
                      ax=ax,
                      ticks=False,
                      fmin=250,
                      fmax=8000,
                      colormap=None,
                      colorbar=False,
                      log=True,
                      dBNoise=50)
     ax = fig_raw.add_subplot(gs[0:29, 0])
     to, fo, spect, rms = spectrogram(mic_slice, sample_rate, 1000, 50)
     plot_spectrogram(to,
                      fo,
#   and use time slice:
# mic_slice = mic.time_slice(epoch.times-tbefore, epoch.times + epoch.durations + tafter)

# Now let's identify which vocalizations are actually playbacks!!!
# first i need stimuli in a format i can use: playbacks = -1 if it's not a playback
sound_playback = organize_playbacks(segment, sm, fs_mic)

# find max_corrs of second template (thuck) that are both very large and are not playbacks
# this is very particular to the grant but the idea is generalizable
nonplayback = np.where(sound_playback > -1)
matches = np.where(max_corrs[1][nonplayback] > 0.4)
nonplayback[0][matches]  # indexes of max_corrs[1] above .5
for i in range(len(matches[0])):
    sound = mic[np.int(sound_onset[nonplayback[0][matches][i]]):np.
                int(sound_offset[nonplayback[0][matches][i]])]
    t, freq, timefreq, rms = spectrogram(sound, sample_rate, 1000, 50)
    figure()
    plot_spectrogram(t, freq, timefreq, dBNoise=80, colorbar=False)

good_matches = [0, 1, 6, 7, 11, 12, 14]
zoomed = ([.37, .49], [.45, .66], [.4, .5], [.4, .5], [.4,
                                                       .5], [.47,
                                                             .57], [.54, .64])
for i in range(len(good_matches)):
    sound = mic[sound_onset[nonplayback[0][matches][good_matches[i]]]:
                sound_offset[nonplayback[0][matches][good_matches[i]]]]
    sound = sound[fs_mic * zoomed[i][0]:fs_mic * zoomed[i][1]]
    t, freq, timefreq, rms = spectrogram(sound, sample_rate, 1000, 50)
    figure()
    plot_spectrogram(t, freq, timefreq, dBNoise=80, colorbar=False)
        sound_onsets[0] = -1 # the case where sound onset happens on the first data point
    if sound_onsets[i] == 1:
        break 


# TODO Next is to iterate through vocal_density and check spectrograms, but I'm running out of time for now
# the idea will be to calculate xcorrs of both amplitude waveform and the spectrogram (row by row). Both will be zscored.
# this should give us 
#for i in range(vocal_density.shape[0]):
#    if vocal_density[i] > density_thresh:
#        t, freq, timefreq, rms = spectrogram(mic[i*(window-overlap):i*(window-overlap)+window], fs_mic, 1000, 50)
#        plot_spectrogram(t, freq, timefreq, dBNoise=80)

# xcorr for envelope and spectrogram, then make it slide, zscore, correlate


plot(vocal_density[0:200])
i = 0
t, freq, timefreq, rms = spectrogram(mic[i*(window-overlap):50*(window-overlap)+window], fs_mic, 1000, 50)
plot_spectrogram(t, freq, timefreq, dBNoise=80)

i = 75
t, freq, timefreq, rms = spectrogram(mic[i*(window-overlap):(i+25)*(window-overlap)+window], fs_mic, 1000, 50)
plot_spectrogram(t, freq, timefreq, dBNoise=80)


# mic[36600000:36750000] has a song
# t, freq, timefreq, rms = spectrogram(mic[36600000:36750000], fs_mic, 1000, 50)
# plot_spectrogram(t, freq, timefreq, dBNoise=80)

Esempio n. 13
0
def draw_figures(data_dir='/auto/tdrive/mschachter/data', bird='GreBlu9508M', output_dir='/auto/tdrive/mschachter/data/sounds'):

    spec_colormap()

    exp_dir = os.path.join(data_dir, bird)
    exp_file = os.path.join(exp_dir, '%s.h5' % bird)
    stim_file = os.path.join(exp_dir, 'stims.h5')
    
    exp = Experiment.load(exp_file, stim_file)
    
    bird = exp.bird_name
    all_stim_ids = list()
    # iterate through the segments and get the stim ids from each epoch table
    for seg in exp.get_all_segments():
        etable = exp.get_epoch_table(seg)
        stim_ids = etable['id'].unique()
        all_stim_ids.extend(stim_ids)

    stim_ids = np.unique(all_stim_ids)

    stim_info = list()
    for stim_id in stim_ids:
        si = exp.stim_table['id'] == stim_id
        assert si.sum() == 1, "More than one stimulus defined for id=%d" % stim_id
        stype = exp.stim_table['type'][si].values[0]
        if stype == 'call':
            stype = exp.stim_table['callid'][si].values[0]

        # get sound pressure waveform
        sound = exp.sound_manager.reconstruct(stim_id)
        waveform = np.array(sound.squeeze())
        sample_rate = float(sound.samplerate)
        stim_dur = len(waveform) / sample_rate

        stim_info.append( (stim_id, stype, sample_rate, waveform, stim_dur))

    durations = np.array([x[-1] for x in stim_info])
    max_dur = durations.max()
    min_dur = durations.min()

    max_fig_size = 15.
    min_fig_size = 5.

    for stim_id,stype,sample_rate,waveform,stim_dur in stim_info:

        fname = os.path.join(output_dir, '%s_stim_%d.wav' % (stype, stim_id))
        print 'Writing %s...' % fname
        wavfile.write(fname, sample_rate, waveform)

        dfrac = (stim_dur - min_dur) / (max_dur - min_dur)
        fig_width = dfrac*(max_fig_size - min_fig_size) + min_fig_size

        spec_t,spec_freq,spec,rms = spectrogram(waveform, sample_rate, sample_rate, 136., min_freq=300, max_freq=8000,
                                                log=True, noise_level_db=80, rectify=True, cmplx=False)

        figsize = (fig_width, 5)
        fig = plt.figure(figsize=figsize)
        plot_spectrogram(spec_t, spec_freq, spec, colormap='SpectroColorMap', colorbar=False)
        plt.title('Stim %d: %s' % (stim_id, stype))

        fname = os.path.join(output_dir, '%s_stim_%d.png' % (stype, stim_id))
        plt.savefig(fname, facecolor='w')
        plt.close('all')