too_long = too_long * fs_mic mic = mic.squeeze() if normalize_mic: # because our mic channel has leak current / noise i = 0 previous_i = 0 while i < len(mic): i = i + np.round( fs_mic / 2) # filter by the half second, not optimized if i > len(mic): i = len(mic) mic[previous_i:i] = mic[previous_i:i] - np.mean(mic[previous_i:i]) previous_i = i if len(mic.shape) > 1: mic = mic[:, 0] # discards any other mic channels beyond the first. vocal_band = lowpass_filter(mic, fs_mic, vb_lowpass) vocal_band = highpass_filter(vocal_band, fs_mic, vb_highpass) unfiltered_mic = mic mic = lowpass_filter(mic, fs_mic, mic_lowpass) mic = highpass_filter(mic, fs_mic, mic_highpass) mic_env = lowpass_filter(np.abs(mic), fs_mic, 250.0) vocal_band = np.abs(vocal_band) vocal_band = lowpass_filter(vocal_band, fs_mic, vb_low) # find periods of sound on the mic channel that is likely vocalizations # sound_onset is in data points, not time sound_onset, sound_offset = find_vocal_periods(vocal_band, vb_stds, vd_stds, vd_win, fs_mic, onset_shift) if shorten: # if you're troubleshooting just deal with the beginning of the data sound_onset = sound_onset[0:18] sound_offset = sound_offset[0:18]
# Chose the electrode to plot ielec = 4 # This is the entire neural recording (amp) and microphone recording (mic) amp = [ asig for asig in segment.analogsignalarrays if asig.name == "Amplifier" ][0] mic = [ asig for asig in segment.analogsignalarrays if asig.name == "Board ADC" ][0] # High pass the microphone sample_rate = float(mic.sampling_rate) micvals = np.asarray(mic).squeeze() micvals -= micvals.mean() micfilt = highpass_filter(micvals, sample_rate, 400.0) mic_env = lowpass_filter(np.abs(micfilt), float(sample_rate), 125.0) max_mic = np.std(mic_env) # Find good plot boundaries sample_rate = float(amp.sampling_rate) amp_all = np.asarray(amp)[:, ielec] low_amp = lowpass_filter(amp_all, sample_rate, 400.0) low_maxabs = np.std(low_amp) high_amp = highpass_filter(amp_all, sample_rate, 400.0) high_maxabs = np.std(high_amp) neural_signal_env = lowpass_filter(np.abs(high_amp), float(sample_rate), 50.0) maxS = np.max(neural_signal_env) # Find all the stims all_stims = []
os.path.basename(experiment_file).replace(".rhd", "_stimuli.h5")) sm = sound_manager.SoundManager(HDF5Store, stimulus_h5_filename) # This is the entire microphone recording mic = [ asig for asig in block.segments[0].analogsignalarrays if asig.name == "Board ADC" ][0] fs_mic = np.int(mic.sampling_rate) t_mic = np.asarray(mic.times) mic = mic.squeeze() if normalize_mic: mic = mic - np.mean( mic ) # because our mic channel often has 1.652 v of dc leak from somewhere! vocal_band = lowpass_filter(mic, fs_mic, low_power) vocal_band = highpass_filter(vocal_band, fs_mic, high_power) vocal_band = np.abs(vocal_band) vocal_band = lowpass_filter(vocal_band, fs_mic, vb_low) sound_onset, sound_offset = find_vocal_periods(vocal_band, vb_stds, vd_stds, vd_win, fs_mic, onset_shift) #============================================================================== # for i in range(len(sound_onset)): # t, freq, timefreq, rms = spectrogram(mic[sound_onset[i]:sound_offset[i]], fs_mic, 1000, 50) # plot_spectrogram(t, freq, timefreq, dBNoise=80, colorbar = False) # pause(.05) #============================================================================== # just a temporary template, ~ 1 motif, maybe even a playback but who cares # TODO make a template finding function i = 17
# Chose the electrode to plot ielec = 10 # This is the entire neural recording (amp) and microphone recording (mic) amp = [ asig for asig in segment.analogsignalarrays if asig.name == "Amplifier" ][0] mic = [ asig for asig in segment.analogsignalarrays if asig.name == "Board ADC" ][0] # High pass the microphone sample_rate = float(mic.sampling_rate) micvals = np.asarray(mic).squeeze() micvals -= micvals.mean() micfilt = highpass_filter(micvals, sample_rate, 400.0) mic_env = lowpass_filter(np.abs(micfilt), float(sample_rate), 125.0) max_mic = np.std(mic_env) # Find good plot boundaries sample_rate = float(amp.sampling_rate) amp_all = np.asarray(amp)[:, ielec] low_amp = lowpass_filter(amp_all, sample_rate, 400.0) low_maxabs = np.std(low_amp) high_amp = highpass_filter(amp_all, sample_rate, 400.0) high_maxabs = np.std(high_amp) neural_signal_env = lowpass_filter(np.abs(high_amp), float(sample_rate), 50.0) maxS = np.max(neural_signal_env) # Find all the stims all_stims = []