def stim_conditions(angles, onebeep_nb, twobeep_nb, onebeep_tc, twobeep_tc): """ Takes completed stimuli from above functions and makes all of the conditions for the flash beep with three possible locations and azmuith angles. """ ##### make single auditory stim################################################ #conditions_1A = [-30_1A, 0_1A, 30_1A, -30_2A, 0_2A, 30_2A] spatials = ('-30', '0', '30') beep_combos_1a = ('onebeep_nb', 'twobeep_nb', 'onebeep_tc', 'twobeep_tc') ##### make competing auditory stim############################################# #conditions_2A = [] spatials = ('-30x0', '0x30', '-30x30') beep_combos_2a = ('onebeep_nbxonebeep_tc', 'twobeep_nbxonebeep_tc', 'onebeep_nbxtc2', 'twobeep_nbxtwobeep_tc') all_spatials = [s.split('x') for s in spatials] for s in all_spatials[1:]: all_spatials[0] += s all_spatials = all_spatials[0] all_spatials = list(np.unique([float(s) for s in all_spatials])) all_combos = [ss.split('x') for ss in beep_combos_2a] for ss in all_combos[1:]: all_combos[0] += ss all_combos = all_combos[0] all_combos = list(np.unique([float(ss) for ss in all_combos])) ##### convolve with HRTF at appropriate angles ################################ move_sig = np.concatenate([convolve_hrtf(stim, fs, ang) for ang in range(-30, 30)], axis=1) return move_sig
def test_hrtf_convolution(): """Test HRTF convolution """ data = np.random.randn(2, 10000) assert_raises(ValueError, convolve_hrtf, data, 44100, 0) data = data[0] assert_raises(ValueError, convolve_hrtf, data, 44100, 0.5) # invalid angle out = convolve_hrtf(data, 44100, 0) out_2 = convolve_hrtf(data, 24414, 0) assert_equal(out.ndim, 2) assert_equal(out.shape[0], 2) assert_true(out.shape[1] > data.size) assert_true(out_2.shape[1] < out.shape[1]) # ensure that, at least for zero degrees, it's close out = convolve_hrtf(data, 44100, 0)[:, 1024:-1024] assert_allclose(np.mean(rms(out)), rms(data), rtol=1e-1) out = convolve_hrtf(data, 44100, -90) rmss = rms(out) assert_true(rmss[0] > 4 * rmss[1])
def test_hrtf_convolution(): """Test HRTF convolution """ data = np.random.randn(2, 10000) assert_raises(ValueError, convolve_hrtf, data, 44100, 0) data = data[0] assert_raises(ValueError, convolve_hrtf, data, 44100, 0.5) # invalid angle for source in ['barb', 'cipic']: out = convolve_hrtf(data, 44100, 0, source=source) out_2 = convolve_hrtf(data, 24414, 0, source=source) assert_equal(out.ndim, 2) assert_equal(out.shape[0], 2) assert_true(out.shape[1] > data.size) assert_true(out_2.shape[1] < out.shape[1]) # ensure that, at least for zero degrees, it's close out = convolve_hrtf(data, 44100, 0, source=source)[:, 1024:-1024] assert_allclose(np.mean(rms(out)), rms(data), rtol=1e-1) out = convolve_hrtf(data, 44100, -90, source=source) rmss = rms(out) assert_true(rmss[0] > 4 * rmss[1])
def test_hrtf_convolution(): """Test HRTF convolution.""" data = np.random.randn(2, 10000) assert_raises(ValueError, convolve_hrtf, data, 44100, 0, interp=False) data = data[0] assert_raises(ValueError, convolve_hrtf, data, 44100, 0.5, interp=False) assert_raises(ValueError, convolve_hrtf, data, 44100, 0, source='foo', interp=False) assert_raises(ValueError, convolve_hrtf, data, 44100, 90.5, interp=True) assert_raises(ValueError, convolve_hrtf, data, 44100, 0, interp='foo') # invalid angle when interp=False for interp in [True, False]: for source in ['barb', 'cipic']: if interp and source == 'barb': # raise an error when trying to interp with 'barb' assert_raises(ValueError, convolve_hrtf, data, 44100, 2.5, source=source, interp=interp) else: out = convolve_hrtf(data, 44100, 0, source=source, interp=interp) out_2 = convolve_hrtf(data, 24414, 0, source=source, interp=interp) assert_equal(out.ndim, 2) assert_equal(out.shape[0], 2) assert_true(out.shape[1] > data.size) assert_true(out_2.shape[1] < out.shape[1]) if interp: out_3 = convolve_hrtf(data, 44100, 2.5, source=source, interp=interp) out_4 = convolve_hrtf(data, 44100, -2.5, source=source, interp=interp) assert_equal(out_3.ndim, 2) assert_equal(out_4.ndim, 2) # ensure that, at least for zero degrees, it's close out = convolve_hrtf(data, 44100, 0, source=source, interp=interp)[:, 1024:-1024] assert_allclose(np.mean(rms(out)), rms(data), rtol=1e-1) out = convolve_hrtf(data, 44100, -90, source=source, interp=interp) rmss = rms(out) assert_true(rmss[0] > 4 * rmss[1])
def test_hrtf_convolution(): """Test HRTF convolution.""" data = np.random.randn(2, 10000) pytest.raises(ValueError, convolve_hrtf, data, 44100, 0, interp=False) data = data[0] pytest.raises(ValueError, convolve_hrtf, data, 44100, 0.5, interp=False) pytest.raises(ValueError, convolve_hrtf, data, 44100, 0, source='foo', interp=False) pytest.raises(ValueError, convolve_hrtf, data, 44100, 90.5, interp=True) pytest.raises(ValueError, convolve_hrtf, data, 44100, 0, interp='foo') # invalid angle when interp=False for interp in [True, False]: for source in ['barb', 'cipic']: if interp and source == 'barb': # raise an error when trying to interp with 'barb' pytest.raises(ValueError, convolve_hrtf, data, 44100, 2.5, source=source, interp=interp) else: out = convolve_hrtf(data, 44100, 0, source=source, interp=interp) out_2 = convolve_hrtf(data, 24414, 0, source=source, interp=interp) assert_equal(out.ndim, 2) assert_equal(out.shape[0], 2) assert (out.shape[1] > data.size) assert (out_2.shape[1] < out.shape[1]) if interp: out_3 = convolve_hrtf(data, 44100, 2.5, source=source, interp=interp) out_4 = convolve_hrtf(data, 44100, -2.5, source=source, interp=interp) assert_equal(out_3.ndim, 2) assert_equal(out_4.ndim, 2) # ensure that, at least for zero degrees, it's close out = convolve_hrtf(data, 44100, 0, source=source, interp=interp)[:, 1024:-1024] assert_allclose(np.mean(rms(out)), rms(data), rtol=1e-1) out = convolve_hrtf(data, 44100, -90, source=source, interp=interp) rmss = rms(out) assert (rmss[0] > 4 * rmss[1])
Generate more advanced auditory stimuli ======================================= This shows the methods that we provide that facilitate generation of more advanced stimuli. """ import numpy as np from expyfun.stimuli import convolve_hrtf, play_sound, window_edges fs = 44100 dur = 0.5 freq = 500. # let's make a square wave sig = np.sin(freq * 2 * np.pi * np.arange(dur * fs, dtype=float) / fs) sig = ((sig > 0) - 0.5) / 5. # make it reasonably quiet for play_sound sig = window_edges(sig, fs) play_sound(sig, norm=False, wait=True) move_sig = np.concatenate([convolve_hrtf(sig, fs, ang) for ang in range(-90, 91, 15)], axis=1) play_sound(move_sig, norm=False, wait=True) import matplotlib.pyplot as mpl mpl.ion() t = np.arange(move_sig.shape[1]) / float(fs) mpl.plot(t, move_sig.T) mpl.xlabel('Time (sec)')
longest_word_samples = np.max([x.shape[-1] for x in word_wavs.values()]) stream_len = np.max(tr_onset_samp) + longest_word_samples tr_mono = np.zeros((trials, streams, stream_len), dtype=float) for tnum in range(trials): for snum in range(streams): for wnum in range(waves): word = tr_words[tnum, snum, wnum] samps = word_wavs[word][0] onset = tr_onset_samp[tnum, snum, wnum] offset = onset + len(samps) tr_mono[tnum, snum, onset:offset] += samps del word, samps, onset, offset # HRTF CONVOLUTION print('Convolving with HRTFs') stream_len = stim.convolve_hrtf(np.zeros(stream_len), output_fs, 0).shape[-1] tr_hrtf = np.zeros((trials, streams, 2, stream_len), dtype=float) for tnum in range(trials): for snum in range(streams): tr_hrtf[tnum, snum] = stim.convolve_hrtf(tr_mono[tnum, snum], output_fs, angles[snum]) # RENORMALIZE print('Renormalizing') tr_original_rms = stim.rms(tr_mono) tr_convolved_rms = np.mean(stim.rms(tr_hrtf), axis=-1) multiplier = tr_original_rms / tr_convolved_rms tr_norm = (tr_hrtf.T * multiplier.T).T # broadcasting tr_norm_rms = np.mean(stim.rms(tr_norm), axis=-1) # TODO: test RMS # COMBINE L & R CHANNELS ACROSS STREAMS tr_stim = np.sum(tr_norm, axis=1)
This shows the methods that we provide that facilitate generation of more advanced stimuli. """ import numpy as np import matplotlib.pyplot as plt from expyfun import building_doc from expyfun.stimuli import convolve_hrtf, play_sound, window_edges fs = 24414 dur = 0.5 freq = 500. # let's make a square wave sig = np.sin(freq * 2 * np.pi * np.arange(dur * fs, dtype=float) / fs) sig = ((sig > 0) - 0.5) / 5. # make it reasonably quiet for play_sound sig = window_edges(sig, fs) play_sound(sig, fs, norm=False, wait=True) move_sig = np.concatenate( [convolve_hrtf(sig, fs, ang) for ang in range(-90, 91, 15)], axis=1) if not building_doc: play_sound(move_sig, fs, norm=False, wait=True) t = np.arange(move_sig.shape[1]) / float(fs) plt.plot(t, move_sig.T) plt.xlabel('Time (sec)') plt.show()
all_spatials = [s.split('x') for s in spatials] for s in all_spatials[1:]: all_spatials[0] += s all_spatials = all_spatials[0] all_spatials = list(np.unique([float(s) for s in all_spatials])) letter_dir = op.join(work_dir, 'letters') wavs = np.zeros((len(talkers), len(letters), len(all_spatials), 2, letter_ns)) for li, letter in enumerate(letters): for ti, talker in enumerate(talkers): data, fs_in = read_wav(op.join(letter_dir, talker, '%s.wav' % letter), verbose=False) data = resample(data[0], fs, fs_in) for si, angle in enumerate(all_spatials): dd = convolve_hrtf(data, fs, angle) dd *= 0.01 / np.mean(rms(data)) idx = min(dd.shape[1], letter_ns) wavs[ti, li, si, :, :idx] = dd[:, :idx] ############################################################################## # Randomization n_trials = n_tpc * len(attns) * run_matrix.sum() * len(gap_durs) trial_dur = (letter_dur * (n_cue_let + n_targ_let) + cue_targ_gap + np.mean(gap_durs) + inter_trial_dur) exp_dur = trial_dur * n_trials print('Experiment duration: %s min (%s blocks)' % (round(exp_dur / 60., 1), round((exp_dur / 60. / n_blocks), 1))) # figure out what positions work
finalstim_tc = window_edges(tonecomp, fs, ramp_tone, -1, 'hamming') #finalstim_tc *= 0.01 * np.sqrt(2) # Set RMS to 0.01 #finalstim_tc = finalstim_tc*toneamp # check the rms: #tc_rms = rms(finalstim_tc) # First: HRTF tonebeep and noisebeep at each angle and store # ############################################################################ # add 50ms delay between beeps: two_beep_delay = np.zeros(24414. * delay_beeps, float) finalstim_tc_delay = np.append(finalstim_tc, two_beep_delay, axis=1) finalstim_nb_delay = np.append(finalstim_nb, two_beep_delay, axis=1) # with delay one_1_noise_delay = convolve_hrtf(finalstim_nb_delay, fs, -30.0) one_2_noise_delay = convolve_hrtf(finalstim_nb_delay, fs, 0.0) one_3_noise_delay = convolve_hrtf(finalstim_nb_delay, fs, 30.0) one_1_tone_delay = convolve_hrtf(finalstim_tc_delay, fs, -30.0) one_2_tone_delay = convolve_hrtf(finalstim_tc_delay, fs, 0.0) one_3_tone_delay = convolve_hrtf(finalstim_tc_delay, fs, 30.0) #without delay one_1_noise = convolve_hrtf(finalstim_nb, fs, -30.0) one_2_noise = convolve_hrtf(finalstim_nb, fs, 0.0) one_3_noise = convolve_hrtf(finalstim_nb, fs, 30.0) one_1_tone = convolve_hrtf(finalstim_tc, fs, -30.0) one_2_tone = convolve_hrtf(finalstim_tc, fs, 0.0) one_3_tone = convolve_hrtf(finalstim_tc, fs, 30.0)
Generate more advanced auditory stimuli ======================================= This shows the methods that we provide that facilitate generation of more advanced stimuli. """ import numpy as np import matplotlib.pyplot as mpl from expyfun.stimuli import convolve_hrtf, play_sound, window_edges fs = 24414 dur = 0.5 freq = 500. # let's make a square wave sig = np.sin(freq * 2 * np.pi * np.arange(dur * fs, dtype=float) / fs) sig = ((sig > 0) - 0.5) / 5. # make it reasonably quiet for play_sound sig = window_edges(sig, fs) play_sound(sig, fs, norm=False, wait=True) move_sig = np.concatenate([convolve_hrtf(sig, fs, ang) for ang in range(-90, 91, 15)], axis=1) play_sound(move_sig, fs, norm=False, wait=True) mpl.ion() t = np.arange(move_sig.shape[1]) / float(fs) mpl.plot(t, move_sig.T) mpl.xlabel('Time (sec)')
genders = ['M', 'F'] # male in left channel, female in right regions = ['NW', 'CH'] talkers = [str(i) for i in range(5)] loc = ['C', 'S'] az = [0, -60] n_talkers = 3 wavs = [[np.zeros((2, 0)) for _ in range(2)] for _ in range(2)] for ti, t in enumerate(talkers[:n_talkers]): #for ri, r in enumerate(regions): ri = 0 r = regions[ri] wav, fs = stim.read_wav(join(opath, r + t + '.wav')) for gi, g in enumerate(genders): for li, (l, a) in enumerate(zip(loc, az)): wav_loc = stim.convolve_hrtf(wav[gi], fs, a) lens = [wavs[gi][li].shape[-1], wav_loc.shape[-1]] dl = lens[0] - lens[1] if dl < 0: wavs[gi][li] = np.concatenate((wavs[gi][li], np.zeros((2, -dl))), -1) if dl > 0: wav_loc = np.concatenate((wav_loc, np.zeros((2, dl))), -1) wavs[gi][li] += wav_loc for gi, g in enumerate(genders): for li, (l, a) in enumerate(zip(loc, az)): fn = join(spath, '%s_%s_%s.wav' % (r, g, l)) data = wavs[gi][li] / np.sqrt(n_talkers) data = np.minimum(1, np.maximum(-1, data)) stim.write_wav(fn, data, fs, overwrite=True)