def test_tdt_delay(hide_window): """Test the tdt_delay parameter.""" with ExperimentController(*std_args, audio_controller=dict(TYPE='tdt', TDT_DELAY=0), **std_kwargs) as ec: assert_equal(ec._ac._used_params['TDT_DELAY'], 0) with ExperimentController(*std_args, audio_controller=dict(TYPE='tdt', TDT_DELAY=1), **std_kwargs) as ec: assert_equal(ec._ac._used_params['TDT_DELAY'], 1) pytest.raises(ValueError, ExperimentController, *std_args, audio_controller=dict(TYPE='tdt', TDT_DELAY='foo'), **std_kwargs) pytest.raises(OverflowError, ExperimentController, *std_args, audio_controller=dict(TYPE='tdt', TDT_DELAY=np.inf), **std_kwargs) pytest.raises(TypeError, ExperimentController, *std_args, audio_controller=dict(TYPE='tdt', TDT_DELAY=np.ones(2)), **std_kwargs) pytest.raises(ValueError, ExperimentController, *std_args, audio_controller=dict(TYPE='tdt', TDT_DELAY=-1), **std_kwargs)
def test_reconstruct(): """Test Tracker objects reconstruction""" # test with one TrackerUD with ExperimentController(*std_args, **std_kwargs) as ec: tr = TrackerUD(ec, 1, 1, 3, 1, 5, np.inf, 3) while not tr.stopped: tr.respond(np.random.rand() < tr.x_current) tracker = reconstruct_tracker(ec.data_fname)[0] assert_true(tracker.stopped) tracker.x_current # test with one TrackerBinom with ExperimentController(*std_args, **std_kwargs) as ec: tr = TrackerBinom(ec, .05, .5, 10) while not tr.stopped: tr.respond(True) tracker = reconstruct_tracker(ec.data_fname)[0] assert_true(tracker.stopped) tracker.x_current # tracker not stopped with ExperimentController(*std_args, **std_kwargs) as ec: tr = TrackerUD(ec, 1, 1, 3, 1, 5, np.inf, 3) tr.respond(np.random.rand() < tr.x_current) assert_true(not tr.stopped) assert_raises(ValueError, reconstruct_tracker, ec.data_fname) # test with dealer with ExperimentController(*std_args, **std_kwargs) as ec: tr = [TrackerUD(ec, 1, 1, 3, 1, 5, np.inf, 3) for _ in range(3)] td = TrackerDealer(ec, tr) for _, x_current in td: td.respond(np.random.rand() < x_current) dealer = reconstruct_dealer(ec.data_fname)[0] assert_true(all(td._x_history == dealer._x_history)) assert_true(all(td._tracker_history == dealer._tracker_history)) assert_true(all(td._response_history == dealer._response_history)) assert_true(td.shape == dealer.shape) assert_true(td.trackers.shape == dealer.trackers.shape) # no tracker/dealer in file with ExperimentController(*std_args, **std_kwargs) as ec: ec.identify_trial(ec_id='one', ttl_id=[0]) ec.start_stimulus() ec.write_data_line('misc', 'trial one') ec.stop() ec.trial_ok() ec.write_data_line('misc', 'end') assert_raises(ValueError, reconstruct_tracker, ec.data_fname) assert_raises(ValueError, reconstruct_dealer, ec.data_fname)
def test_screen_monitor(screen_num, monitor): """Test screen and monitor option support.""" with ExperimentController( *std_args, screen_num=screen_num, monitor=monitor, **std_kwargs): pass with pytest.raises(TypeError, match='must be a dict'): ExperimentController(*std_args, monitor=1, **std_kwargs) with pytest.raises(KeyError, match='is missing required keys'): ExperimentController(*std_args, monitor={}, **std_kwargs)
def test_screen_monitor(screen_num, monitor, hide_window): """Test screen and monitor option support.""" with ExperimentController( *std_args, screen_num=screen_num, monitor=monitor, **std_kwargs): pass full_kwargs = deepcopy(std_kwargs) full_kwargs['full_screen'] = True with pytest.raises(RuntimeError, match='resolution set incorrectly'): ExperimentController(*std_args, **full_kwargs) with pytest.raises(TypeError, match='must be a dict'): ExperimentController(*std_args, monitor=1, **std_kwargs) with pytest.raises(KeyError, match='is missing required keys'): ExperimentController(*std_args, monitor={}, **std_kwargs)
def test_background_color(): """Test setting background color""" with ExperimentController(*std_args, participant='foo', session='01', output_dir=None, version='dev') as ec: ec.set_background_color('red') ec.screen_text('red', color='white') ss = ec.screenshot()[:, :, :3] white_mask = (ss == [255] * 3) assert_true(white_mask.any()) red_mask = (ss == [255, 0, 0]) assert_true(red_mask.any()) assert_true(np.logical_or(white_mask, red_mask).all()) ec.flip() ec.set_background_color('0.5') visual.Rectangle(ec, [0, 0, 1, 1], fill_color='black').draw() ss = ec.screenshot()[:, :, :3] gray_mask = (ss == [127] * 3) | (ss == [128] * 3) assert_true(gray_mask.any()) black_mask = (ss == [0] * 3) assert_true(black_mask.any()) assert_true(np.logical_or(gray_mask, black_mask).all())
def test_button_presses_and_window_size(): """Test EC window_size=None and button press capture """ warnings.simplefilter('ignore') # esc as quit key with ExperimentController(*std_args, audio_controller='pyglet', response_device='keyboard', window_size=None, output_dir=None, full_screen=False, participant='foo', session='01', force_quit='escape', version='dev') as ec: warnings.simplefilter('always') fake_button_press(ec, '1', 0.3) assert_equal( ec.screen_prompt('press 1', live_keys=['1'], max_wait=1.5), '1') ec.screen_text('press 1 again') ec.flip() fake_button_press(ec, '1', 0.3) assert_equal(ec.wait_one_press(1.5, live_keys=[1])[0], '1') ec.screen_text('press 1 one last time') ec.flip() fake_button_press(ec, '1', 0.3) out = ec.wait_for_presses(1.5, live_keys=['1'], timestamp=False) assert_equal(out[0], '1')
def test_unit_conversions(): """Test unit conversions """ for ws in [(2, 1), (1, 1)]: kwargs = deepcopy(std_kwargs) kwargs['stim_fs'] = 44100 kwargs['window_size'] = ws with ExperimentController(*std_args, **kwargs) as ec: verts = np.random.rand(2, 4) for to in ['norm', 'pix', 'deg']: for fro in ['norm', 'pix', 'deg']: print((ws, to, fro)) v2 = ec._convert_units(verts, fro, to) v2 = ec._convert_units(v2, to, fro) assert_allclose(verts, v2) # test that degrees yield equiv. pixels in both directions verts = np.ones((2, 1)) v0 = ec._convert_units(verts, 'deg', 'pix') verts = np.zeros((2, 1)) v1 = ec._convert_units(verts, 'deg', 'pix') v2 = v0 - v1 # must check deviation from zero position assert_allclose(v2[0], v2[1]) assert_raises(ValueError, ec._convert_units, verts, 'deg', 'nothing') assert_raises(RuntimeError, ec._convert_units, verts[0], 'deg', 'pix')
def test_mouse_clicks(hide_window): """Test EC mouse click support.""" with ExperimentController(*std_args, participant='foo', session='01', output_dir=None, version='dev') as ec: rect = visual.Rectangle(ec, [0, 0, 2, 2]) fake_mouse_click(ec, [1, 2], delay=0.3) assert_equal( ec.wait_for_click_on(rect, 1.5, timestamp=False)[0], ('left', 1, 2)) pytest.raises(TypeError, ec.wait_for_click_on, (rect, rect), 1.5) fake_mouse_click(ec, [2, 1], 'middle', delay=0.3) out = ec.wait_one_click(1.5, 0., ['middle'], timestamp=True) assert (out[3] < 1.5) assert_equal(out[:3], ('middle', 2, 1)) fake_mouse_click(ec, [3, 2], 'left', delay=0.3) fake_mouse_click(ec, [4, 5], 'right', delay=0.3) out = ec.wait_for_clicks(1.5, timestamp=False) assert_equal(len(out), 2) assert (any(o == ('left', 3, 2) for o in out)) assert (any(o == ('right', 4, 5) for o in out)) out = ec.wait_for_clicks(0.1) assert_equal(len(out), 0)
def test_background_color(hide_window): """Test setting background color""" with ExperimentController(*std_args, participant='foo', session='01', output_dir=None, version='dev') as ec: print((ec.window.width, ec.window.height)) ec.set_background_color('red') ss = ec.screenshot()[:, :, :3] red_mask = (ss == [255, 0, 0]).all(axis=-1) assert (red_mask.all()) ec.set_background_color('white') ss = ec.screenshot()[:, :, :3] white_mask = (ss == [255] * 3).all(axis=-1) assert (white_mask.all()) ec.flip() ec.set_background_color('0.5') visual.Rectangle(ec, [0, 0, 1, 1], fill_color='black').draw() ss = ec.screenshot()[:, :, :3] gray_mask = ((ss == [127] * 3).all(axis=-1) | (ss == [128] * 3).all(axis=-1)) assert (gray_mask.any()) black_mask = (ss == [0] * 3).all(axis=-1) assert (black_mask.any()) assert (np.logical_or(gray_mask, black_mask).all())
def test_parse(): """Test .tab parsing """ with ExperimentController(*std_args, stim_fs=44100, **std_kwargs) as ec: ec.identify_trial(ec_id='one', ttl_id=[0]) ec.start_stimulus() ec.write_data_line('misc', 'trial one') ec.stop() ec.trial_ok() ec.write_data_line('misc', 'between trials') ec.identify_trial(ec_id='two', ttl_id=[1]) ec.start_stimulus() ec.write_data_line('misc', 'trial two') ec.stop() ec.trial_ok() ec.write_data_line('misc', 'end of experiment') assert_raises(ValueError, read_tab, ec.data_fname, group_start='foo') assert_raises(ValueError, read_tab, ec.data_fname, group_end='foo') assert_raises(ValueError, read_tab, ec.data_fname, group_end='trial_id') assert_raises(RuntimeError, read_tab, ec.data_fname, group_end='misc') data = read_tab(ec.data_fname) keys = list(data[0].keys()) assert_equal(len(keys), 6) for key in ['trial_id', 'flip', 'play', 'stop', 'misc', 'trial_ok']: assert_in(key, keys) assert_equal(len(data[0]['misc']), 1) assert_equal(len(data[1]['misc']), 1) data = read_tab(ec.data_fname, group_end=None) assert_equal(len(data[0]['misc']), 2) # includes between-trials stuff assert_equal(len(data[1]['misc']), 2)
def test_logging(ac, tmpdir, hide_window): """Test logging to file (Pyglet).""" if ac != 'tdt': _check_skip_backend(ac) orig_dir = os.getcwd() os.chdir(str(tmpdir)) try: with ExperimentController(*std_args, audio_controller=ac, response_device='keyboard', trigger_controller='dummy', **std_kwargs) as ec: test_name = ec._log_file stamp = ec.current_time ec.wait_until(stamp) # wait_until called w/already passed timest. with pytest.warns(UserWarning, match='RMS'): ec.load_buffer([1., -1., 1., -1., 1., -1.]) # RMS warning with open(test_name) as fid: data = '\n'.join(fid.readlines()) # check for various expected log messages (TODO: add more) should_have = [ 'Participant: foo', 'Session: 01', 'wait_until was called', 'Stimulus max RMS (' ] if ac == 'tdt': should_have.append('TDT') else: should_have.append('sound card') if ac != 'auto' and ac['SOUND_CARD_BACKEND'] != 'auto': should_have.append(ac['SOUND_CARD_BACKEND']) assert_have_all(data, should_have) finally: os.chdir(orig_dir)
def test_logging(ac='pyglet'): """Test logging to file (Pyglet).""" tempdir = _TempDir() orig_dir = os.getcwd() os.chdir(tempdir) try: with ExperimentController(*std_args, audio_controller=ac, response_device='keyboard', trigger_controller='dummy', **std_kwargs) as ec: test_name = ec._log_file stamp = ec.current_time ec.wait_until(stamp) # wait_until called w/already passed timest. with warnings.catch_warnings(record=True): warnings.simplefilter('always') ec.load_buffer([1., -1., 1., -1., 1., -1.]) # RMS warning with open(test_name) as fid: data = '\n'.join(fid.readlines()) # check for various expected log messages (TODO: add more) should_have = ['Participant: foo', 'Session: 01', 'wait_until was called', 'Stimulus max RMS ('] if ac == 'pyglet': should_have.append('Pyglet') else: should_have.append('TDT') for s in should_have: if s not in data: raise ValueError('Missing data: "{0}" in:\n{1}' ''.format(s, data)) finally: os.chdir(orig_dir)
def test_parse(hide_window): """Test .tab parsing.""" with ExperimentController(*std_args, **std_kwargs) as ec: ec.identify_trial(ec_id='one', ttl_id=[0]) ec.start_stimulus() ec.write_data_line('misc', 'trial one') ec.stop() ec.trial_ok() ec.write_data_line('misc', 'between trials') ec.identify_trial(ec_id='two', ttl_id=[1]) ec.start_stimulus() ec.write_data_line('misc', 'trial two') ec.stop() ec.trial_ok() ec.write_data_line('misc', 'end of experiment') pytest.raises(ValueError, read_tab, ec.data_fname, group_start='foo') pytest.raises(ValueError, read_tab, ec.data_fname, group_end='foo') pytest.raises(ValueError, read_tab, ec.data_fname, group_end='trial_id') pytest.raises(RuntimeError, read_tab, ec.data_fname, group_end='misc') data = read_tab(ec.data_fname) keys = list(data[0].keys()) assert_equal(len(keys), 6) for key in ['trial_id', 'flip', 'play', 'stop', 'misc', 'trial_ok']: assert key in keys assert_equal(len(data[0]['misc']), 1) assert_equal(len(data[1]['misc']), 1) data, params = read_tab(ec.data_fname, group_end=None, return_params=True) assert_equal(len(data[0]['misc']), 2) # includes between-trials stuff assert_equal(len(data[1]['misc']), 2) assert_equal(params['version'], 'dev') assert_equal(params['version_used'], __version__) assert (params['file'].endswith('test_parse.py'))
def test_button_presses_and_window_size(): """Test EC window_size=None and button press capture.""" warnings.simplefilter('ignore') # esc as quit key with ExperimentController(*std_args, audio_controller='pyglet', response_device='keyboard', window_size=None, output_dir=None, full_screen=False, session='01', participant='foo', trigger_controller='dummy', force_quit='escape', version='dev') as ec: warnings.simplefilter('always') ec.listen_presses() ec.get_presses() assert_equal(ec.get_presses(), []) fake_button_press(ec, '1', 0.5) assert_equal( ec.screen_prompt('press 1', live_keys=['1'], max_wait=1.5), '1') ec.listen_presses() assert_equal(ec.get_presses(), []) fake_button_press(ec, '1') assert_equal(ec.get_presses(timestamp=False), [('1', )]) ec.listen_presses() fake_button_press(ec, '1') presses = ec.get_presses(timestamp=True, relative_to=0.2) assert_equal(len(presses), 1) assert_equal(len(presses[0]), 2) assert_equal(presses[0][0], '1') assert_true(isinstance(presses[0][1], float)) ec.listen_presses() fake_button_press(ec, '1') presses = ec.get_presses(timestamp=True, relative_to=0.1, return_kinds=True) assert_equal(len(presses), 1) assert_equal(len(presses[0]), 3) assert_equal(presses[0][::2], ('1', 'press')) assert_true(isinstance(presses[0][1], float)) ec.listen_presses() fake_button_press(ec, '1') presses = ec.get_presses(timestamp=False, return_kinds=True) assert_equal(presses, [('1', 'press')]) ec.listen_presses() ec.screen_text('press 1 again') ec.flip() fake_button_press(ec, '1', 0.3) assert_equal(ec.wait_one_press(1.5, live_keys=[1])[0], '1') ec.screen_text('press 1 one last time') ec.flip() fake_button_press(ec, '1', 0.3) out = ec.wait_for_presses(1.5, live_keys=['1'], timestamp=False) assert_equal(out[0], '1')
def test_parse_basic(hide_window, tmpdir): """Test .tab parsing.""" with ExperimentController(*std_args, **std_kwargs) as ec: ec.identify_trial(ec_id='one', ttl_id=[0]) ec.start_stimulus() ec.write_data_line('misc', 'trial one') ec.stop() ec.trial_ok() ec.write_data_line('misc', 'between trials') ec.identify_trial(ec_id='two', ttl_id=[1]) ec.start_stimulus() ec.write_data_line('misc', 'trial two') ec.stop() ec.trial_ok() ec.write_data_line('misc', 'end of experiment') pytest.raises(ValueError, read_tab, ec.data_fname, group_start='foo') pytest.raises(ValueError, read_tab, ec.data_fname, group_end='foo') pytest.raises(ValueError, read_tab, ec.data_fname, group_end='trial_id') pytest.raises(RuntimeError, read_tab, ec.data_fname, group_end='misc') data = read_tab(ec.data_fname) keys = list(data[0].keys()) assert_equal(len(keys), 6) for key in ['trial_id', 'flip', 'play', 'stop', 'misc', 'trial_ok']: assert key in keys assert_equal(len(data[0]['misc']), 1) assert_equal(len(data[1]['misc']), 1) data, params = read_tab(ec.data_fname, group_end=None, return_params=True) assert_equal(len(data[0]['misc']), 2) # includes between-trials stuff assert_equal(len(data[1]['misc']), 2) assert_equal(params['version'], 'dev') assert_equal(params['version_used'], __version__) assert (params['file'].endswith('test_parse.py')) # handle old files where the last trial_ok was missing bad_fname = str(tmpdir.join('bad.tab')) with open(ec.data_fname, 'r') as fid: lines = fid.readlines() assert 'trial_ok' in lines[-3] with open(bad_fname, 'w') as fid: # we used to write JSON badly fid.write(lines[0].replace('"', "'")) # and then sometimes missed the last trial_ok for line in lines[1:-3]: fid.write(line) with pytest.raises(RuntimeError, match='bad bounds'): read_tab(bad_fname) data, params = read_tab(ec.data_fname, return_params=True) data_2, params_2 = read_tab(bad_fname, return_params=True, allow_last_missing=True) assert params == params_2 t = data[-1].pop('trial_ok') t_2 = data_2[-1].pop('trial_ok') assert t != t_2 assert data_2 == data
def test_eyelink_methods(): """Test EL methods """ with ExperimentController(*std_args, **std_kwargs) as ec: assert_raises(ValueError, EyelinkController, ec, fs=999) el = EyelinkController(ec) assert_raises(RuntimeError, EyelinkController, ec) # can't have 2 open assert_raises(ValueError, el.custom_calibration, ctype='hey') el.custom_calibration() el._open_file() assert_raises(RuntimeError, el._open_file) el._start_recording() el.get_eye_position() assert_raises(ValueError, el.wait_for_fix, [1]) x = el.wait_for_fix([-10000, -10000], max_wait=0.1) assert_true(x is False) assert el.eye_used print(el.file_list) assert_true(len(el.file_list) > 0) print(el.fs) # run much of the calibration code, but don't *actually* do it el._fake_calibration = True el.calibrate(beep=False, prompt=False) el._fake_calibration = False # missing el_id assert_raises(KeyError, ec.identify_trial, ec_id='foo', ttl_id=[0]) ec.identify_trial(ec_id='foo', ttl_id=[0], el_id=[1]) ec.start_stimulus() ec.stop() ec.trial_ok() ec.identify_trial(ec_id='foo', ttl_id=[0], el_id=[1, 1]) ec.start_stimulus() ec.stop() ec.trial_ok() assert_raises(ValueError, ec.identify_trial, ec_id='foo', ttl_id=[0], el_id=[1, dict()]) assert_raises(ValueError, ec.identify_trial, ec_id='foo', ttl_id=[0], el_id=[0] * 13) assert_raises(TypeError, ec.identify_trial, ec_id='foo', ttl_id=[0], el_id=dict()) assert_raises(TypeError, el._message, 1) el.stop() el.transfer_remote_file(el.file_list[0]) assert_true(not el._closed) # ec.close() auto-calls el.close() assert_true(el._closed)
def test_joystick(hide_window, monkeypatch): """Test joystick support.""" import pyglet fake = _FakeJoystick() monkeypatch.setattr(pyglet.input, 'get_joysticks', lambda: [fake]) with ExperimentController(*std_args, joystick=True, **std_kwargs) as ec: ec.listen_joystick_button_presses() fake.on_joybutton_press(fake, 1) presses = ec.get_joystick_button_presses() assert len(presses) == 1 assert presses[0][0] == '1' assert ec.get_joystick_value('x') == 0.125
def test_sound_card_triggering(hide_window): """Test using the sound card as a trigger controller.""" audio_controller = dict(TYPE='sound_card', SOUND_CARD_TRIGGER_CHANNELS='0') with pytest.raises(ValueError, match='SOUND_CARD_TRIGGER_CHANNELS is zer'): ExperimentController(*std_args, audio_controller=audio_controller, trigger_controller='sound_card', suppress_resamp=True, **std_kwargs) audio_controller.update(SOUND_CARD_TRIGGER_CHANNELS='1') # Use 1 trigger ch and 1 output ch because this should work on all systems with ExperimentController(*std_args, audio_controller=audio_controller, trigger_controller='sound_card', n_channels=1, suppress_resamp=True, **std_kwargs) as ec: ec.identify_trial(ttl_id=[1, 0], ec_id='') ec.load_buffer([1e-2]) ec.start_stimulus() ec.stop()
def test_crm_response_menu(hide_window): """Test the CRM Response menu function.""" with ExperimentController('crm_menu', **std_kwargs) as ec: resp = crm_response_menu(ec, max_wait=0.05) crm_response_menu(ec, numbers=[0, 1, 2], max_wait=0.05) crm_response_menu(ec, colors=['blue'], max_wait=0.05) crm_response_menu(ec, colors=['r'], numbers=['7'], max_wait=0.05) assert_equal(resp, (None, None)) pytest.raises(ValueError, crm_response_menu, ec, max_wait=0, min_wait=1) pytest.raises(ValueError, crm_response_menu, ec, colors=['g', 'g'])
def test_tdtpy_failure(hide_window): """Test that failed TDTpy import raises ImportError.""" try: from tdt.util import connect_rpcox # noqa, analysis:ignore except ImportError: pass else: pytest.skip('Cannot test TDT import failure') ac = dict(TYPE='tdt', TDT_MODEL='RP2') with pytest.raises(ImportError, match='No module named'): ExperimentController( *std_args, audio_controller=ac, response_device='keyboard', trigger_controller='tdt', stim_fs=100., suppress_resamp=True, **std_kwargs)
def test_data_line(): """Test writing of data lines """ entries = [['foo'], ['bar', 'bar\tbar'], ['bar2', r'bar\tbar'], ['fb', None, -0.5]] # this is what should be written to the file for each one goal_vals = ['None', 'bar\\tbar', 'bar\\\\tbar', 'None'] assert_equal(len(entries), len(goal_vals)) temp_dir = _TempDir() these_kwargs = deepcopy(std_kwargs) these_kwargs['output_dir'] = temp_dir with ExperimentController(*std_args, stim_fs=44100, **these_kwargs) as ec: for ent in entries: ec.write_data_line(*ent) fname = ec._data_file.name with open(fname) as fid: lines = fid.readlines() # check the header assert_equal(len(lines), len(entries) + 4) # header, colnames, flip, stop assert_equal(lines[0][0], '#') # first line is a comment for x in ['timestamp', 'event', 'value']: # second line is col header assert_true(x in lines[1]) assert_true('flip' in lines[2]) # ec.__init__ ends with a flip assert_true('stop' in lines[-1]) # last line is stop (from __exit__) outs = lines[1].strip().split('\t') assert_true(all(l1 == l2 for l1, l2 in zip(outs, ['timestamp', 'event', 'value']))) # check the entries ts = [] for line, ent, gv in zip(lines[3:], entries, goal_vals): outs = line.strip().split('\t') assert_equal(len(outs), 3) # check timestamping if len(ent) == 3 and ent[2] is not None: assert_equal(outs[0], str(ent[2])) else: ts.append(float(outs[0])) # check events assert_equal(outs[1], ent[0]) # check values assert_equal(outs[2], gv) # make sure we got monotonically increasing timestamps ts = np.array(ts) assert_true(np.all(ts[1:] >= ts[:-1]))
def test_validate_audio(hide_window): """Test that validate_audio can pass through samples.""" with ExperimentController(*std_args, suppress_resamp=True, **std_kwargs) as ec: ec.set_stim_db(_get_dev_db(ec.audio_type) - 40) # 0.01 RMS assert ec._stim_scaler == 1. for shape in ((1000, ), (1, 1000), (2, 1000)): samples_in = np.zeros(shape) samples_out = ec._validate_audio(samples_in) assert samples_out.shape == (1000, 2) assert samples_out.dtype == np.float32 assert samples_out is not samples_in for order in 'CF': samples_in = np.zeros((2, 1000), dtype=np.float32, order=order) samples_out = ec._validate_audio(samples_in) assert samples_out.shape == samples_in.shape[::-1] assert samples_out.dtype == np.float32 # ensure that we have not bade a copy, just a view assert samples_out.base is samples_in
def test_video(hide_window): """Test EC video methods.""" std_kwargs.update(dict(enable_video=True, window_size=(640, 480))) video_path = fetch_data_file('video/example-video.mp4') with ExperimentController('test', **std_kwargs) as ec: ec.load_video(video_path) ec.video.play() pytest.raises(ValueError, ec.video.set_pos, [1, 2, 3]) pytest.raises(ValueError, ec.video.set_scale, 'foo') pytest.raises(ValueError, ec.video.set_scale, -1) ec.wait_secs(0.1) ec.video.set_visible(False) ec.wait_secs(0.1) ec.video.set_visible(True) ec.video.set_scale('fill') ec.video.set_scale('fit') ec.video.set_scale('0.5') ec.video.set_pos(pos=(0.1, 0), units='norm') ec.video.pause() ec.video.draw() ec.delete_video()
def test_tracker_binom(): """Test TrackerBinom""" tr = TrackerBinom(callback, 0.05, 0.1, 5) with ExperimentController('test', **std_kwargs) as ec: tr = TrackerBinom(ec, 0.05, 0.1, 5) tr = TrackerBinom(None, 0.05, 0.5, 2, stop_early=False) while not tr.stopped: tr.respond(False) assert(tr.n_trials == 2) assert(not tr.success) tr = TrackerBinom(None, 0.05, 0.5, 1000) while not tr.stopped: tr.respond(True) tr = TrackerBinom(None, 0.05, 0.5, 1000, 100) while not tr.stopped: tr.respond(True) assert(tr.n_trials == 100) tr.alpha tr.chance tr.max_trials tr.stop_early tr.p_val tr.min_p_val tr.max_p_val tr.n_trials tr.n_wrong tr.n_correct tr.pc tr.responses tr.stopped tr.success tr.x_current tr.x tr.stop_rule
""" # Author: Eric Larson <*****@*****.**> # # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt from expyfun import ExperimentController, EyelinkController, visual import expyfun.analyze as ea print(__doc__) with ExperimentController('testExp', full_screen=True, participant='foo', session='001', output_dir=None, version='dev') as ec: el = EyelinkController(ec) ec.screen_prompt('Welcome to the experiment!\n\nFirst, we will ' 'perform a screen calibration.\n\nPress a button ' 'to continue.') el.calibrate() # by default this starts recording EyeLink data ec.screen_prompt('Excellent! Now, follow the red circle around the edge ' 'of the big white circle.\n\nPress a button to ' 'continue') # make some circles to be drawn radius = 7.5 # degrees targ_rad = 0.2 # degrees theta = np.linspace(np.pi / 2., 2.5 * np.pi, 200)
def test_sound_card_triggering(hide_window): """Test using the sound card as a trigger controller.""" audio_controller = dict(TYPE='sound_card', SOUND_CARD_TRIGGER_CHANNELS='0') with pytest.raises(ValueError, match='SOUND_CARD_TRIGGER_CHANNELS is zer'): ExperimentController(*std_args, audio_controller=audio_controller, trigger_controller='sound_card', suppress_resamp=True, **std_kwargs) audio_controller.update(SOUND_CARD_TRIGGER_CHANNELS='1') # Use 1 trigger ch and 1 output ch because this should work on all systems with ExperimentController(*std_args, audio_controller=audio_controller, trigger_controller='sound_card', n_channels=1, suppress_resamp=True, **std_kwargs) as ec: ec.identify_trial(ttl_id=[1, 0], ec_id='') ec.load_buffer([1e-2]) ec.start_stimulus() ec.stop() # Test the drift triggers audio_controller.update(SOUND_CARD_DRIFT_TRIGGER=0.001) with ExperimentController(*std_args, audio_controller=audio_controller, trigger_controller='sound_card', n_channels=1, **std_kwargs) as ec: ec.identify_trial(ttl_id=[1, 0], ec_id='') with pytest.warns(UserWarning, match='Drift triggers overlap with ' 'onset triggers.'): ec.load_buffer(np.zeros(ec.stim_fs)) ec.start_stimulus() ec.stop() audio_controller.update(SOUND_CARD_DRIFT_TRIGGER=[1.1, 0.3, -0.3, 'end']) with ExperimentController(*std_args, audio_controller=audio_controller, trigger_controller='sound_card', n_channels=1, **std_kwargs) as ec: ec.identify_trial(ttl_id=[1, 0], ec_id='') with pytest.warns(UserWarning, match='Drift trigger at 1.1 seconds ' 'occurs outside stimulus window, not stamping ' 'trigger.'): ec.load_buffer(np.zeros(ec.stim_fs)) ec.start_stimulus() ec.stop() audio_controller.update(SOUND_CARD_DRIFT_TRIGGER=[0.5, 0.501]) with ExperimentController(*std_args, audio_controller=audio_controller, trigger_controller='sound_card', n_channels=1, **std_kwargs) as ec: ec.identify_trial(ttl_id=[1, 0], ec_id='') with pytest.warns(UserWarning, match='Some 2-triggers overlap.*'): ec.load_buffer(np.zeros(ec.stim_fs)) ec.start_stimulus() ec.stop() audio_controller.update(SOUND_CARD_DRIFT_TRIGGER=[]) with ExperimentController(*std_args, audio_controller=audio_controller, trigger_controller='sound_card', n_channels=1, **std_kwargs) as ec: ec.identify_trial(ttl_id=[1, 0], ec_id='') ec.load_buffer(np.zeros(ec.stim_fs)) ec.start_stimulus() ec.stop() audio_controller.update(SOUND_CARD_DRIFT_TRIGGER=[0.2, 0.5, -0.3]) with ExperimentController(*std_args, audio_controller=audio_controller, trigger_controller='sound_card', n_channels=1, **std_kwargs) as ec: ec.identify_trial(ttl_id=[1, 0], ec_id='') ec.load_buffer(np.zeros(ec.stim_fs)) ec.start_stimulus() ec.stop()
def test_button_presses_and_window_size(hide_window): """Test EC window_size=None and button press capture.""" with ExperimentController(*std_args, audio_controller='sound_card', response_device='keyboard', window_size=None, output_dir=None, full_screen=False, session='01', participant='foo', trigger_controller='dummy', force_quit='escape', version='dev') as ec: ec.listen_presses() ec.get_presses() assert_equal(ec.get_presses(), []) fake_button_press(ec, '1', 0.5) assert_equal( ec.screen_prompt('press 1', live_keys=['1'], max_wait=1.5), '1') ec.listen_presses() assert_equal(ec.get_presses(), []) fake_button_press(ec, '1') assert_equal(ec.get_presses(timestamp=False), [('1', )]) ec.listen_presses() fake_button_press(ec, '1') presses = ec.get_presses(timestamp=True, relative_to=0.2) assert_equal(len(presses), 1) assert_equal(len(presses[0]), 2) assert_equal(presses[0][0], '1') assert (isinstance(presses[0][1], float)) ec.listen_presses() fake_button_press(ec, '1') presses = ec.get_presses(timestamp=True, relative_to=0.1, return_kinds=True) assert_equal(len(presses), 1) assert_equal(len(presses[0]), 3) assert_equal(presses[0][::2], ('1', 'press')) assert (isinstance(presses[0][1], float)) ec.listen_presses() fake_button_press(ec, '1') presses = ec.get_presses(timestamp=False, return_kinds=True) assert_equal(presses, [('1', 'press')]) ec.listen_presses() ec.screen_text('press 1 again') ec.flip() fake_button_press(ec, '1', 0.3) assert_equal(ec.wait_one_press(1.5, live_keys=[1])[0], '1') ec.screen_text('press 1 one last time') ec.flip() fake_button_press(ec, '1', 0.3) out = ec.wait_for_presses(1.5, live_keys=['1'], timestamp=False) assert_equal(out[0], '1') fake_button_press(ec, 'a', 0.3) fake_button_press(ec, 'return', 0.5) assert ec.text_input() == 'A' fake_button_press(ec, 'a', 0.3) fake_button_press(ec, 'space', 0.35) fake_button_press(ec, 'backspace', 0.4) fake_button_press(ec, 'comma', 0.45) fake_button_press(ec, 'return', 0.5) # XXX this fails on OSX travis for some reason new_pyglet = _new_pyglet() bad = sys.platform == 'darwin' bad |= sys.platform == 'win32' and new_pyglet if not bad: assert ec.text_input(all_caps=False).strip() == 'a'
def test_ec(ac, hide_window, monkeypatch): """Test EC methods.""" if ac == 'tdt': rd, tc, fs = 'tdt', 'tdt', get_tdt_rates()['25k'] pytest.raises(ValueError, ExperimentController, *std_args, audio_controller=dict(TYPE=ac, TDT_MODEL='foo'), **std_kwargs) else: _check_skip_backend(ac) rd, tc, fs = 'keyboard', 'dummy', 44100 for suppress in (True, False): with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') with ExperimentController(*std_args, audio_controller=ac, response_device=rd, trigger_controller=tc, stim_fs=100., suppress_resamp=suppress, **std_kwargs) as ec: pass w = [ww for ww in w if 'TDT is in dummy mode' in str(ww.message)] assert len(w) == (1 if ac == 'tdt' else 0) SAFE_DELAY = 0.2 with ExperimentController(*std_args, audio_controller=ac, response_device=rd, trigger_controller=tc, stim_fs=fs, **std_kwargs) as ec: assert (ec.participant == std_kwargs['participant']) assert (ec.session == std_kwargs['session']) assert (ec.exp_name == std_args[0]) stamp = ec.current_time ec.write_data_line('hello') ec.wait_until(stamp + 0.02) ec.screen_prompt('test', 0.01, 0, None) ec.screen_prompt('test', 0.01, 0, ['1']) ec.screen_prompt(['test', 'ing'], 0.01, 0, ['1']) ec.screen_prompt('test', 1e-3, click=True) pytest.raises(ValueError, ec.screen_prompt, 'foo', np.inf, 0, []) pytest.raises(TypeError, ec.screen_prompt, 3, 0.01, 0, None) assert_equal(ec.wait_one_press(0.01), (None, None)) assert (ec.wait_one_press(0.01, timestamp=False) is None) assert_equal(ec.wait_for_presses(0.01), []) assert_equal(ec.wait_for_presses(0.01, timestamp=False), []) pytest.raises(ValueError, ec.get_presses) ec.listen_presses() assert_equal(ec.get_presses(), []) assert_equal(ec.get_presses(kind='presses'), []) pytest.raises(ValueError, ec.get_presses, kind='foo') if rd == 'tdt': # TDT does not have key release events, so should raise an # exception if asked for them: pytest.raises(RuntimeError, ec.get_presses, kind='releases') pytest.raises(RuntimeError, ec.get_presses, kind='both') else: assert_equal(ec.get_presses(kind='both'), []) assert_equal(ec.get_presses(kind='releases'), []) ec.set_noise_db(0) ec.set_stim_db(20) # test buffer data handling ec.set_rms_checking(None) ec.load_buffer([0, 0, 0, 0, 0, 0]) ec.load_buffer([]) pytest.raises(ValueError, ec.load_buffer, [0, 2, 0, 0, 0, 0]) ec.load_buffer(np.zeros((100, ))) with pytest.raises(ValueError, match='100 did not match .* count 2'): ec.load_buffer(np.zeros((100, 1))) with pytest.raises(ValueError, match='100 did not match .* count 2'): ec.load_buffer(np.zeros((100, 2))) ec.load_buffer(np.zeros((1, 100))) ec.load_buffer(np.zeros((2, 100))) data = np.zeros(int(5e6), np.float32) # too long for TDT if fs == get_tdt_rates()['25k']: pytest.raises(RuntimeError, ec.load_buffer, data) else: ec.load_buffer(data) ec.load_buffer(np.zeros(2)) del data pytest.raises(ValueError, ec.stamp_triggers, 'foo') pytest.raises(ValueError, ec.stamp_triggers, 0) pytest.raises(ValueError, ec.stamp_triggers, 3) pytest.raises(ValueError, ec.stamp_triggers, 1, check='foo') print(ec._tc) # test __repr__ if tc == 'dummy': assert_equal(ec._tc._trigger_list, []) ec.stamp_triggers(3, check='int4') ec.stamp_triggers(2) ec.stamp_triggers([2, 4, 8]) if tc == 'dummy': assert_equal(ec._tc._trigger_list, [3, 2, 2, 4, 8]) ec._tc._trigger_list = list() pytest.raises(ValueError, ec.load_buffer, np.zeros((100, 3))) pytest.raises(ValueError, ec.load_buffer, np.zeros((3, 100))) pytest.raises(ValueError, ec.load_buffer, np.zeros((1, 1, 1))) # test RMS checking pytest.raises(ValueError, ec.set_rms_checking, 'foo') # click: RMS 0.0135, should pass 'fullfile' and fail 'windowed' click = np.zeros((int(ec.fs / 4), )) # 250 ms click[len(click) // 2] = 1. click[len(click) // 2 + 1] = -1. # noise: RMS 0.03, should fail both 'fullfile' and 'windowed' noise = np.random.normal(scale=0.03, size=(int(ec.fs / 4), )) ec.set_rms_checking(None) ec.load_buffer(click) # should go unchecked ec.load_buffer(noise) # should go unchecked ec.set_rms_checking('wholefile') ec.load_buffer(click) # should pass with pytest.warns(UserWarning, match='exceeds stated'): ec.load_buffer(noise) ec.wait_secs(SAFE_DELAY) ec.set_rms_checking('windowed') with pytest.warns(UserWarning, match='exceeds stated'): ec.load_buffer(click) ec.wait_secs(SAFE_DELAY) with pytest.warns(UserWarning, match='exceeds stated'): ec.load_buffer(noise) if ac != 'tdt': # too many samples there monkeypatch.setattr(_experiment_controller, '_SLOW_LIMIT', 1) with pytest.warns(UserWarning, match='samples is slow'): ec.load_buffer(np.zeros(2, dtype=np.float32)) monkeypatch.setattr(_experiment_controller, '_SLOW_LIMIT', 1e7) ec.stop() ec.set_visible() ec.set_visible(False) ec.call_on_every_flip(partial(dummy_print, 'called start stimuli')) ec.wait_secs(SAFE_DELAY) # Note: we put some wait_secs in here because otherwise the delay in # play start (e.g. for trigdel and onsetdel) can # mess things up! So we probably eventually should add # some safeguard against stopping too quickly after starting... # # First: identify_trial # noise = np.random.normal(scale=0.01, size=(int(ec.fs), )) ec.load_buffer(noise) pytest.raises(RuntimeError, ec.start_stimulus) # order violation assert (ec._playing is False) if tc == 'dummy': assert_equal(ec._tc._trigger_list, []) ec.start_stimulus(start_of_trial=False) # should work if tc == 'dummy': assert_equal(ec._tc._trigger_list, [1]) ec.wait_secs(SAFE_DELAY) assert (ec._playing is True) pytest.raises(RuntimeError, ec.trial_ok) # order violation ec.stop() assert (ec._playing is False) # only binary for TTL pytest.raises(KeyError, ec.identify_trial, ec_id='foo') # need ttl_id pytest.raises(TypeError, ec.identify_trial, ec_id='foo', ttl_id='bar') pytest.raises(ValueError, ec.identify_trial, ec_id='foo', ttl_id=[2]) assert (ec._playing is False) if tc == 'dummy': ec._tc._trigger_list = list() ec.identify_trial(ec_id='foo', ttl_id=[0, 1]) assert (ec._playing is False) # # Second: start_stimuli # pytest.raises(RuntimeError, ec.identify_trial, ec_id='foo', ttl_id=[0]) assert (ec._playing is False) pytest.raises(RuntimeError, ec.trial_ok) # order violation assert (ec._playing is False) ec.start_stimulus(flip=False, when=-1) if tc == 'dummy': assert_equal(ec._tc._trigger_list, [4, 8, 1]) if ac != 'tdt': # dummy TDT version won't do this check properly, as # ec._ac._playing -> GetTagVal('playing') always gives False pytest.raises(RuntimeError, ec.play) # already played, must stop ec.wait_secs(SAFE_DELAY) ec.stop() assert (ec._playing is False) # # Third: trial_ok # pytest.raises(RuntimeError, ec.start_stimulus) # order violation pytest.raises(RuntimeError, ec.identify_trial) # order violation ec.trial_ok() # double-check pytest.raises(RuntimeError, ec.start_stimulus) # order violation ec.start_stimulus(start_of_trial=False) # should work pytest.raises(RuntimeError, ec.trial_ok) # order violation ec.wait_secs(SAFE_DELAY) ec.stop() assert (ec._playing is False) ec.flip(-np.inf) assert (ec._playing is False) ec.estimate_screen_fs() assert (ec._playing is False) ec.play() ec.wait_secs(SAFE_DELAY) assert (ec._playing is True) ec.call_on_every_flip(None) # something funny with the ring buffer in testing on OSX if sys.platform != 'darwin': ec.call_on_next_flip(ec.start_noise()) ec.flip() ec.wait_secs(SAFE_DELAY) ec.stop_noise() ec.stop() assert (ec._playing is False) ec.stop_noise() ec.wait_secs(SAFE_DELAY) ec.start_stimulus(start_of_trial=False) ec.stop() ec.start_stimulus(start_of_trial=False) ec.get_mouse_position() ec.listen_clicks() ec.get_clicks() ec.toggle_cursor(False) ec.toggle_cursor(True, True) ec.move_mouse_to((0, 0)) # center of the window ec.wait_secs(0.001) print(ec.id_types) print(ec.stim_db) print(ec.noise_db) print(ec.on_next_flip_functions) print(ec.on_every_flip_functions) print(ec.window) # we need to monkey-patch for old Pyglet try: from PIL import Image Image.fromstring except AttributeError: Image.fromstring = None data = ec.screenshot() # HiDPI sizes = [ tuple(std_kwargs['window_size']), tuple(np.array(std_kwargs['window_size']) * 2) ] assert data.shape[:2] in sizes print(ec.fs) # test fs support wait_secs(0.01) test_pix = (11.3, 0.5, 110003) print(test_pix) # test __repr__ assert all([x in repr(ec) for x in ['foo', '"test"', '01']]) ec.refocus() # smoke test for refocusing del ec
def test_tracker_ud(): """Test TrackerUD""" import matplotlib.pyplot as plt tr = TrackerUD(callback, 3, 1, 1, 1, np.inf, 10, 1) with ExperimentController('test', **std_kwargs) as ec: tr = TrackerUD(ec, 3, 1, 1, 1, np.inf, 10, 1) tr = TrackerUD(None, 3, 1, 1, 1, 10, np.inf, 1) rand = np.random.RandomState(0) while not tr.stopped: tr.respond(rand.rand() < tr.x_current) assert_equal(tr.n_reversals, tr.stop_reversals) tr = TrackerUD(None, 3, 1, 1, 1, np.inf, 10, 1) tr.threshold() rand = np.random.RandomState(0) while not tr.stopped: tr.respond(rand.rand() < tr.x_current) # test responding after stopped assert_raises(RuntimeError, tr.respond, 0) # all the properties better work tr.up tr.down tr.step_size_up tr.step_size_down tr.stop_reversals tr.stop_trials tr.start_value tr.x_min tr.x_max tr.stopped tr.x tr.responses tr.n_trials tr.n_reversals tr.reversals tr.reversal_inds fig, ax, lines = tr.plot() tr.plot_thresh(ax=ax) tr.plot_thresh() plt.close(fig) ax = plt.axes() fig, ax, lines = tr.plot(ax) plt.close(fig) tr.threshold() tr.check_valid(2) # bad callback type assert_raises(TypeError, TrackerUD, 'foo', 3, 1, 1, 1, 10, np.inf, 1) # test dynamic step size and error conditions tr = TrackerUD(None, 3, 1, [1, 0.5], [1, 0.5], 10, np.inf, 1, change_indices=[2]) tr.respond(True) with warnings.catch_warnings(record=True) as w: tr = TrackerUD(None, 1, 1, 0.75, 0.75, np.inf, 9, 1, x_min=0, x_max=2) responses = [True, True, True, False, False, False, False, True, False] for r in responses: # run long enough to encounter change_indices tr.respond(r) assert_equal(len(w), 1) assert(tr.check_valid(1)) # make sure checking validity is good assert(not tr.check_valid(3)) assert_raises(ValueError, tr.threshold, 1) tr.threshold(3) assert_equal(tr.n_trials, tr.stop_trials) # run tests with ignore too--should generate warnings, but no error with warnings.catch_warnings(record=True) as w: tr = TrackerUD(None, 1, 1, 0.75, 0.25, np.inf, 8, 1, x_min=0, x_max=2, repeat_limit='ignore') responses = [False, True, False, False, True, True, False, True] for r in responses: # run long enough to encounter change_indices tr.respond(r) assert_equal(len(w), 1) tr.threshold(0) # bad stop_trials assert_raises(ValueError, TrackerUD, None, 3, 1, 1, 1, 10, 'foo', 1) # bad stop_reversals assert_raises(ValueError, TrackerUD, None, 3, 1, 1, 1, 'foo', 10, 1) # change_indices too long assert_raises(ValueError, TrackerUD, None, 3, 1, [1, 0.5], [1, 0.5], 10, np.inf, 1, change_indices=[1, 2]) # step_size_up length mismatch assert_raises(ValueError, TrackerUD, None, 3, 1, [1], [1, 0.5], 10, np.inf, 1, change_indices=[2]) # step_size_down length mismatch assert_raises(ValueError, TrackerUD, None, 3, 1, [1, 0.5], [1], 10, np.inf, 1, change_indices=[2]) # bad change_rule assert_raises(ValueError, TrackerUD, None, 3, 1, [1, 0.5], [1, 0.5], 10, np.inf, 1, change_indices=[2], change_rule='foo') # no change_indices (i.e. change_indices=None) assert_raises(ValueError, TrackerUD, None, 3, 1, [1, 0.5], [1, 0.5], 10, np.inf, 1) # start_value scalar type checking assert_raises(TypeError, TrackerUD, None, 3, 1, [1, 0.5], [1, 0.5], 10, np.inf, [9, 5], change_indices=[2]) assert_raises(TypeError, TrackerUD, None, 3, 1, [1, 0.5], [1, 0.5], 10, np.inf, None, change_indices=[2]) # test with multiple change_indices tr = TrackerUD(None, 3, 1, [3, 2, 1], [3, 2, 1], 10, np.inf, 1, change_indices=[2, 4], change_rule='reversals')
temptype.append('Oddball') else: templist.append(baselist[i]) temptype.append('Base') templist = templist[:n_images] temptype = temptype[:n_images] paddlist = baselist[len(baselist) - pad_time * base_rate:] paddtype = [] for i in np.arange(0, len(paddlist) / 2): paddtype.append('Base') imagelist = np.concatenate((paddlist[:init_time * base_rate], templist, paddlist[len(paddlist) - init_time * base_rate:])) imtype = np.concatenate((paddtype, temptype, paddtype)) # Start instance of the experiment controller with ExperimentController('ShowImages', full_screen=True) as ec: #write_hdf5(op.splitext(ec.data_fname)[0] + '_trials.hdf5', # dict(imorder_shuf=imorder_shuf, # imtype_shuf=imtype_shuf)) fr = 1 / ec.estimate_screen_fs() # Estimate frame rate realRR = ec.estimate_screen_fs() realRR = round(realRR) adj = fr / 2 # Adjustment factor for accurate flip # Wait to fill the screen ec.set_visible(False) # Set the background color to gray ec.set_background_color(bgcolor) n_frames = round(total_time * realRR) img_frames = int(round(realRR / base_rate))