Esempio n. 1
0
    def setup(self):
        # Stimulus durations
        # T2|M2|ISI durations held constant
        # Note: all durations are in units of refresh_time (16.67ms)
        self.t2_duration = P.refresh_time * 2  # 33ms
        self.m2_duration = P.refresh_time * 4  # 66ms
        self.isi = P.refresh_time  # 16ms

        # T1 duration sets
        self.t1_timings = {
            # 50.01ms | 50.01ms
            EASY: [P.refresh_time * 3, P.refresh_time * 3],
            # 33.33ms | 66.66ms
            MEDIUM: [P.refresh_time * 2, P.refresh_time * 4],
            # 16.67ms | 83.33ms
            HARD: [P.refresh_time, P.refresh_time * 5]
        }

        self.block_conditions = [EASY, MEDIUM, HARD]
        random.shuffle(self.block_conditions)

        # Stimulus sizes
        fix_thickness = deg_to_px(0.1)
        fix_size = deg_to_px(0.6)
        target_size = deg_to_px(0.6)

        # Init drawbjects
        self.fixation = FixationCross(size=fix_size,
                                      thickness=fix_thickness,
                                      fill=BLACK)

        # Experiment messages
        self.anykey_txt = "{0}\nPress any key to continue..."
        self.practice_txt = "{0}\n(PRACTICE ROUND)"
        self.t1_id_request = "What was the first letter? If unsure, make your best guess."
        self.t2_id_request = "What was the second letter? If unsure, make your best guess."

        # Initialize ResponseCollectors
        self.t1_rc = ResponseCollector(uses=RC_KEYPRESS)
        self.t2_rc = ResponseCollector(uses=RC_KEYPRESS)

        # Initialize ResponseCollector Keymaps
        self.keymap = KeyMap('identity_response', letters, letters, [
            sdl2.SDLK_a, sdl2.SDLK_b, sdl2.SDLK_c, sdl2.SDLK_d, sdl2.SDLK_e,
            sdl2.SDLK_f, sdl2.SDLK_g, sdl2.SDLK_h, sdl2.SDLK_j, sdl2.SDLK_k,
            sdl2.SDLK_l, sdl2.SDLK_m, sdl2.SDLK_n, sdl2.SDLK_p, sdl2.SDLK_q,
            sdl2.SDLK_r, sdl2.SDLK_s, sdl2.SDLK_t, sdl2.SDLK_u, sdl2.SDLK_v,
            sdl2.SDLK_w, sdl2.SDLK_x, sdl2.SDLK_y, sdl2.SDLK_z
        ])

        # Insert practice round at beginning (T1 difficulty is mixed during practice)
        if P.run_practice_blocks:
            self.insert_practice_block(1, trial_counts=5)
Esempio n. 2
0
    def setup_response_collector(self):
        self.probe_rc = ResponseCollector(uses=RC_KEYPRESS)
        self.prime_rc = ResponseCollector(uses=RC_KEYPRESS)

        self.prime_rc.display_callback = self.present_filled_array
        self.prime_rc.display_kwargs = {'display': 'prime'}
        self.prime_rc.terminate_after = [5000, TK_MS]
        self.prime_rc.keypress_listener.interrupts = True
        self.prime_rc.keypress_listener.key_map = self.keymap

        self.probe_rc.display_callback = self.present_filled_array
        self.probe_rc.display_kwargs = {'display': 'probe'}
        self.probe_rc.terminate_after = [5000, TK_MS]
        self.probe_rc.keypress_listener.interrupts = True
        self.probe_rc.keypress_listener.key_map = self.keymap
Esempio n. 3
0
    def __init__(self):
        from klibs.KLAudio import AudioManager
        from klibs.KLResponseCollectors import ResponseCollector
        from klibs.KLTrialFactory import TrialFactory

        super(Experiment, self).__init__()

        self.incomplete = True  # flag for keeping track of session completeness
        self.blocks = None  # blocks of trials for the experiment
        self.tracker_dot = None  # overlay of eye tracker gaze location in devmode

        self.audio = AudioManager(
        )  # initialize audio management for the experiment
        self.rc = ResponseCollector()  # add default response collector
        self.database = self.db  # use database from evm

        self.trial_factory = TrialFactory()
        if P.manual_trial_generation is False:
            self.trial_factory.generate()
        self.event_code_generator = None
Esempio n. 4
0
class MSK_Mixed(klibs.Experiment):

	# Establishes factors held constant throughout experiment
	def setup(self):
		# Stimulus durations
		# T2|M2|ISI durations held constant
		# Note: all durations are in units of refresh_time (16.67ms)
		self.t2_duration = P.refresh_time * 2 # 33ms
		self.m2_duration = P.refresh_time * 4 # 66ms
		self.isi = P.refresh_time # 16ms

		# T1 duration sets
		self.t1_timings = { 
			# 50.01ms | 50.01ms
			EASY:[P.refresh_time * 3, P.refresh_time * 3], 
			# 33.33ms | 66.66ms
			MEDIUM:[P.refresh_time * 2, P.refresh_time * 4], 
			# 16.67ms | 83.33ms
			HARD:[P.refresh_time, P.refresh_time * 5] }

		# Stimulus sizes
		fix_thickness = deg_to_px(0.1)
		fix_size = deg_to_px(0.6)
		target_size = deg_to_px(0.6)

		# Init drawbjects
		self.fixation = FixationCross(size=fix_size, thickness=fix_thickness, fill=BLACK)

		# Experiment messages
		self.anykey_txt = "{0}\nPress any key to continue..."
		self.practice_txt = '{0}\n(PRACTICE ROUND)'
		self.t1_id_request = "What was the first letter? If unsure, make your best guess."
		self.t2_id_request = "What was the second letter? If unsure, make your best guess."

		# Initialize ResponseCollectors
		self.t1_rc = ResponseCollector(uses=RC_KEYPRESS)
		self.t2_rc = ResponseCollector(uses=RC_KEYPRESS)

		# Initialize ResponseCollector Keymaps
		self.keymap = KeyMap(
			'identity_response', letters, letters,
			[sdl2.SDLK_a, sdl2.SDLK_b, sdl2.SDLK_c, sdl2.SDLK_d, sdl2.SDLK_e, sdl2.SDLK_f,
			 sdl2.SDLK_g, sdl2.SDLK_h, sdl2.SDLK_j, sdl2.SDLK_k, sdl2.SDLK_l, sdl2.SDLK_m,
			 sdl2.SDLK_n, sdl2.SDLK_p, sdl2.SDLK_q, sdl2.SDLK_r, sdl2.SDLK_s, sdl2.SDLK_t, 
			 sdl2.SDLK_u, sdl2.SDLK_v, sdl2.SDLK_w, sdl2.SDLK_x, sdl2.SDLK_y, sdl2.SDLK_z]
		)

		# Insert practice block at beginning
		if P.run_practice_blocks:
			self.insert_practice_block(1, trial_counts=30)

	# Establishes block-wise factors
	def block(self):
		# Inform participant as to their progress
		block_txt = "Block {0} of {1}".format(P.block_number, P.blocks_per_experiment)

		if P.practicing: 
			block_txt = self.practice_txt.format(block_txt)

		progress_txt = block_txt.format(self.anykey_txt)

		self.present_txt(progress_txt)

	# Set response collector parameters
	def setup_response_collector(self):
		self.t1_rc.terminate_after = [10, TK_S]					# Timeout after 10s of no response
		self.t1_rc.display_callback = self.identity_callback	# Request identity response
		self.t1_rc.display_kwargs = {'target': 'T1'}			# Specify which response to request
		self.t1_rc.keypress_listener.key_map = self.keymap		# Set which responses are allowed
		self.t1_rc.keypress_listener.interrupts = True			# Abort collection once response made

		self.t2_rc.terminate_after = [10, TK_S]
		self.t2_rc.display_callback = self.identity_callback
		self.t2_rc.display_kwargs = {'target': 'T2'}
		self.t2_rc.keypress_listener.key_map = self.keymap
		self.t2_rc.keypress_listener.interrupts = True

	# Any trials factors that can be established prior to trial onset are set here
	def trial_prep(self):
		# Select target stimuli & durations
		self.t1_identity, self.t2_identity = random.sample(letters,2)
		self.t1_duration, self.m1_duration = self.t1_timings[self.t1_difficulty]
		
		# Init EventManager
		events = [[self.isoa, "T1_on"]]										# Present T1 after some SOA from initiation
		events.append([events[-1][0] + self.t1_duration, 'T1_off'])			# Remove T1 after its duration period
		events.append([events[-1][0] + self.isi, "T1_mask_on"])				# Present M1 shortly after T1 offset
		events.append([events[-1][0] + self.m1_duration, 'T1_mask_off'])	# Remove M1 after its duration period
		events.append([events[-4][0] + self.ttoa, 'T2_on']) 				# TTOA = Time between onset of T1 & T2
		events.append([events[-1][0] + self.t2_duration, 'T2_off'])			# Remove T2 after its duration period
		events.append([events[-1][0] + self.isi, 'T2_mask_on'])				# Present M2 shortly after T2 offset
		events.append([events[-1][0] + self.m2_duration, 'T2_mask_off'])	# Remove M2 after its duration period

		# Register events to EventManager
		for e in events:
			self.evm.register_ticket(ET(e[1],e[0]))

		# Prepare stimulus stream
		self.tmtm_stream = self.prep_stream()

		# Hide mouse cursor during trial
		hide_mouse_cursor()

		# Present fix and wait until initiation response before beginning tiral sequence
		self.present_fixation()

	def trial(self):
		while self.evm.before('T1_on', True): ui_request()			# Variable delay following trial initiation

		self.blit_it(self.tmtm_stream['t1_target'])					# Blit T1 to screen
		while self.evm.before('T1_off', True): ui_request()			# Wait (conditional duration)
		self.flip_it()												# Remove T1

		while self.evm.before('T1_mask_on', True): ui_request() 	# Wait (ISI; fixed at ~17ms)

		self.blit_it(self.tmtm_stream['t1_mask'])					# Blit M1 to screen
		while self.evm.before('T1_mask_off', True): ui_request()	# Wait (conditional duration)
		self.flip_it()												# Remove M1

		while self.evm.before('T2_on', True): ui_request()			# Wait (TTOA)

		self.blit_it(self.tmtm_stream['t2_target'])					# Blit T2 to screen
		while self.evm.before('T2_off', True): ui_request()			# Wait (fixed at ~50ms)
		self.flip_it()												# Remove T2

		while self.evm.before('T2_mask_on', True): ui_request()		# Wait (ISI; fixed at ~17ms)

		self.blit_it(self.tmtm_stream['t2_mask'])					# Blit M2 to screen
		while self.evm.before('T2_mask_off', True): ui_request()	# Wait (fixed at ~50ms)
		self.flip_it()												# Remove M2

		self.t1_rc.collect()										# Request T1 identity
		self.t2_rc.collect()										# Request T2 identity

		# Save responses
		t1_response = self.t1_rc.keypress_listener.response(rt=False)
		t2_response = self.t2_rc.keypress_listener.response(rt=False)

		# Clear remaining stimuli from screen
		clear()

		# Log trial factors & responses
		return {
			"block_num": P.block_number,
			"trial_num": P.trial_number,
			"practicing": str(P.practicing),
			"isoa": self.isoa,
			"isi": self.isi,
			"ttoa": self.ttoa,
			"t1_difficulty": self.t1_difficulty,
			"t1_duration": self.t1_duration,
			"m1_duration": self.m1_duration,
			"t2_duration": self.t2_duration,
			"m2_duration": self.m2_duration,
			"t1_identity": self.t1_identity,
			"t2_identity": self.t2_identity,
			"t1_response": t1_response,
			"t2_response": t2_response
		}

	def trial_clean_up(self):
		# Reset response listeners
		self.t1_rc.keypress_listener.reset()
		self.t2_rc.keypress_listener.reset()

	def clean_up(self):
		pass

	# Clears screen when called
	def flip_it(self):
		fill()
		flip()

	# Presents passed 'it' centre-screen
	def blit_it(self, it):
		fill()
		blit(it, registration=5, location=P.screen_c)
		flip()

	# Presents passed txt centre-screen, hangs until keypress
	def present_txt(self, txt):
		msg = message(txt, align='center', blit_txt=False)
		self.blit_it(msg)
		any_key()

	# Presents fix centre-screen, hangs until keypress
	def present_fixation(self):
		self.blit_it(self.fixation)
		any_key()

	# Request identity response from participants
	def identity_callback(self, target):
		# Request appropriate identity
		identity_request_msg = self.t1_id_request if target == "T1" else self.t2_id_request
		
		fill()
		message(identity_request_msg, location=P.screen_c, registration=5, blit_txt=True)
		flip()

	# Prepares stimulus stream
	def prep_stream(self):
		# Prepare unique masks for each target
		self.t1_mask = self.generate_mask()
		self.t2_mask = self.generate_mask()

		stream_items = {
			't1_target': message(self.t1_identity, align='center', blit_txt=False),
			't1_mask': self.t1_mask,
			't2_target': message(self.t2_identity, align='center', blit_txt=False),
			't2_mask': self.t2_mask
		}

		return stream_items

	# Generates target masks
	def generate_mask(self):
		# Set mask size
		canvas_size = deg_to_px(1)
		# Set cell size
		cell_size = canvas_size / 8 # Mask comprised of 64 smaller cells arranged 8x8
		# Each cell has a black outline
		cell_outline_width = deg_to_px(.01)

		# Initialize canvas to be painted w/ mask cells
		canvas = Image.new('RGBA', [canvas_size, canvas_size], (0,0,0,0))
		surface = aggdraw.Draw(canvas)

		# Initialize pen to draw cell outlines
		transparent_pen = aggdraw.Pen((0,0,0),cell_outline_width)

		# Generate cells
		for row in range(0,15):
			for col in range(0,15):
				# Randomly select colour for each cell
				cell_fill = random.choice([WHITE, BLACK])
				# Brush to apply colour
				fill_brush = aggdraw.Brush(tuple(cell_fill[:3]))
				# Determine cell boundary coords
				top_left = (row * cell_size, col * cell_size)
				bottom_right = ((row+1) * cell_size, (col+1) * cell_size)
				# Create cell
				surface.rectangle(
					(top_left[0], top_left[1], bottom_right[0], bottom_right[1]),
					transparent_pen,
					fill_brush)
		# Apply cells to mask
		surface.flush()

		return np.asarray(canvas)
Esempio n. 5
0
class Experiment(EnvAgent):

    window = None
    paused = False

    def __init__(self):
        from klibs.KLAudio import AudioManager
        from klibs.KLResponseCollectors import ResponseCollector
        from klibs.KLTrialFactory import TrialFactory

        super(Experiment, self).__init__()

        self.incomplete = True  # flag for keeping track of session completeness
        self.blocks = None  # blocks of trials for the experiment
        self.tracker_dot = None  # overlay of eye tracker gaze location in devmode

        self.audio = AudioManager(
        )  # initialize audio management for the experiment
        self.rc = ResponseCollector()  # add default response collector
        self.database = self.db  # use database from evm

        self.trial_factory = TrialFactory()
        if P.manual_trial_generation is False:
            self.trial_factory.generate()
        self.event_code_generator = None

    def __execute_experiment__(self, *args, **kwargs):
        """For internal use, actually runs the blocks/trials of the experiment in sequence.

		"""
        from klibs.KLGraphics import clear

        if self.blocks == None:
            self.blocks = self.trial_factory.export_trials()

        for block in self.blocks:
            P.recycle_count = 0
            P.block_number = self.blocks.i
            P.practicing = block.practice
            self.block()
            P.trial_number = 1
            for trial in block:  # ie. list of trials
                try:
                    try:
                        P.trial_id = self.database.last_id_from('trials') + 1
                    except TypeError:
                        P.trial_id = 1
                    self.__trial__(trial, block.practice)
                    P.trial_number += 1
                except TrialException:
                    block.recycle()
                    P.recycle_count += 1
                    clear()  # NOTE: is this actually wanted?
                self.rc.reset()
        self.clean_up()

        self.incomplete = False
        if 'session_info' in self.database.table_schemas.keys():
            where = {'session_number': P.session_number}
            self.database.update('session_info', {'complete': True}, where)

    def __trial__(self, trial, practice):
        """
		Private method; manages a trial.
		"""
        from klibs.KLUtilities import pump, show_mouse_cursor, hide_mouse_cursor

        # At start of every trial, before setup_response_collector or trial_prep are run, retrieve
        # the values of the independent variables (factors) for that trial (as generated earlier by
        # TrialFactory) and set them as attributes of the experiment object.
        factors = list(self.trial_factory.exp_factors.keys())
        for iv in factors:
            iv_value = trial[factors.index(iv)]
            setattr(self, iv, iv_value)

        pump()
        self.setup_response_collector()
        self.trial_prep()
        tx = None
        try:
            if P.development_mode and (P.dm_trial_show_mouse or
                                       (P.eye_tracking
                                        and not P.eye_tracker_available)):
                show_mouse_cursor()
            self.evm.start_clock()
            if P.eye_tracking and not P.manual_eyelink_recording:
                self.el.start(P.trial_number)
            P.in_trial = True
            self.__log_trial__(self.trial())
            P.in_trial = False
            if P.eye_tracking and not P.manual_eyelink_recording:
                self.el.stop()
            if P.development_mode and (P.dm_trial_show_mouse or
                                       (P.eye_tracking
                                        and not P.eye_tracker_available)):
                hide_mouse_cursor()
            self.evm.stop_clock()
            self.trial_clean_up()
        except TrialException as e:
            P.trial_id = False
            self.trial_clean_up()
            self.evm.stop_clock()
            tx = e
        if P.eye_tracking and not P.manual_eyelink_recording:
            # todo: add a warning, here, if the recording hasn't been stopped when under manual control
            self.el.stop()
        if tx:
            raise tx

    def __log_trial__(self, trial_data):
        """Internal method, logs trial data to database.

		"""
        from klibs.KLDatabase import EntryTemplate

        trial_template = EntryTemplate('trials')
        trial_template.log(P.id_field_name, P.participant_id)
        for attr in trial_data:
            trial_template.log(attr, trial_data[attr])

        return self.database.insert(trial_template)

    ## Define abstract methods to be overridden in experiment.py ##

    @abstractmethod
    def setup(self):
        """The first part of the experiment that gets run. Locations, sizes, stimuli, and
		other experiment resources that stay the same throughout the experiment should be
		initialized and defined here.

		"""
        pass

    @abstractmethod
    def block(self):
        """Run once at the start of every block. Block messages, block-level stimulus generation,
		and similar content should go here.

		"""
        pass

    @abstractmethod
    def setup_response_collector(self):
        """Run immediately before trial_prep during each iteration of the trial loop. If using a
		:obj:`~klibs.KLResponseCollectors.ResponseCollector` that requires configuration at the
		start of each trial, that code should go here.
		
		"""
        pass

    @abstractmethod
    def trial_prep(self):
        """Run immediately before the start of every trial. All trial preparation unrelated to
		response collection should go here.

		"""
        pass

    @abstractmethod
    def trial(self):
        """The core of the experiment. All code related to the presentation of stimuli during a
		given trial, the collection and processing of responses, and the writing out of primary
		data should go here.

		The timing of events in the built-in :obj:`~klibs.KLEventManager.EventManager` instance
		(``self.evm``) are all relative to when this method is called.

		"""
        pass

    @abstractmethod
    def trial_clean_up(self):
        """Run immediately after the end of every trial.

		"""
        pass

    @abstractmethod
    def clean_up(self):
        """Run once at the end of the experiment, after all trials have been completed. Anything
		you want to happen at the very end of the session should go here.

		"""
        pass

    def insert_practice_block(self,
                              block_nums,
                              trial_counts=None,
                              factor_mask=None):
        """
		Adds one or more practice blocks to the experiment. This function must be called during setup(),
		otherwise the trials will have already been exported and this function will no longer have
		any effect. If you want to add a block to the experiment after setup() for whatever reason,
		you can manually generate one using trial_factory.generate() and then insert it using
		self.blocks.insert().
		
		If multiple block indexes are given but only a single integer is given for trial counts, 
		then all practice blocks inserted will be trial_counts trials long. If not trial_counts 
		value is provided, the number of trials per practice block defaults to the global 
		experiment trials_per_block parameter.

		If multiple block indexes are given but only a single factor mask is provided, the same
		factor mask will be applied to all appended practice blocks. If no factor mask is provided,
		the function will generate a full set of trials based on all possible combination of factors,
		and will randomly select trial_counts trials from it for each practice block.

		Args:
			block_nums (:obj:`list` of int): Index numbers at which to insert the blocks.
			trial_counts (:obj:`list` of int, optional): The numbers of trials to insert for each
				of the inserted blocks.
			factor_mask (:obj:`dict` of :obj:`list`, optional): Override values for the variables
				specified in independent_variables.py.

		Raises:
			TrialException: If called after the experiment's :meth:`setup` method has run.

		"""
        if self.blocks:
            # If setup has passed and trial execution has started, blocks have already been exported
            # from trial_factory so this function will no longer work. If it is called after it is no
            # longer useful, we throw a TrialException
            raise TrialException(
                "Practice blocks cannot be inserted after setup() is complete."
            )
        try:
            iter(block_nums)
        except TypeError:
            block_nums = [block_nums]
        try:
            iter(trial_counts)
        except TypeError:
            trial_counts = ([P.trials_per_block] if trial_counts is None else
                            [trial_counts]) * len(block_nums)
        while len(trial_counts) < len(block_nums):
            trial_counts.append(P.trials_per_block)
        for i in range(0, len(block_nums)):
            self.trial_factory.insert_block(block_nums[i], True,
                                            trial_counts[i], factor_mask)
            P.blocks_per_experiment += 1

    def before_flip(self):
        """A method called immediately before every refresh of the screen (i.e. every time
		:func:`~klibs.KLGraphics.flip` is called).
		
		By default, this is used for drawing the current gaze location to the screen when using an
		eye tracker (and ``P.show_gaze_dot`` is True), but can be overridden with a different
		function if desired.

		"""
        from klibs.KLGraphics import blit

        if P.show_gaze_dot and self.el.recording:
            try:
                blit(self.tracker_dot, 5, self.el.gaze())
            except RuntimeError:
                pass

    def quit(self):
        """Safely exits the program, ensuring data has been saved and any connected EyeLink unit's
		recording is stopped. This, not Python's sys.exit(), should be used to exit an experiment.

		"""
        import sdl2
        if P.verbose_mode:
            print_tb(print_stack(), 6)

        err = ''
        try:
            self.database.commit()
            self.database.close()
        except Exception:
            err += "<red>Error encountered closing database connection:</red>\n\n"
            err += full_trace() + "\n\n"
            err += "<red>Some data may not have been saved.</red>\n\n\n"

        if P.eye_tracking and P.eye_tracker_available:
            try:
                self.el.shut_down(incomplete=self.incomplete)
            except Exception:
                err += "<red>Eye tracker encountered error during shutdown:</red>\n\n"
                err += full_trace() + "\n\n"
                err += "<red>You may need to manually stop the tracker from recording.</red>\n\n\n"

        if P.multi_user and P.version_dir:
            newpath = P.version_dir.replace(str(P.random_seed),
                                            str(P.participant_id))
            os.rename(P.version_dir, newpath)

        self.audio.shut_down()
        sdl2.ext.quit()

        if err:
            cso("\n\n" + err +
                "<red>*** Errors encountered during shutdown. ***</red>\n\n")
            os._exit(1)
        cso("\n\n<green>*** '{0}' successfully shut down. ***</green>\n\n".
            format(P.project_name))
        os._exit(1)

    def run(self, *args, **kwargs):
        """The method that gets run by 'klibs run' after the runtime environment is created. Runs
		the actual experiment.

		"""
        from klibs.KLGraphics.KLDraw import Ellipse

        if P.eye_tracking:
            RED = (255, 0, 0)
            WHITE = (255, 255, 255)
            self.tracker_dot = Ellipse(8, stroke=[2, WHITE], fill=RED).render()
            if not P.manual_eyelink_setup:
                self.el.setup()

        self.setup()
        try:
            self.__execute_experiment__(*args, **kwargs)
        except RuntimeError:
            print(full_trace())

        self.quit()

    def show_logo(self):
        from klibs.KLUtilities import flush
        from klibs.KLUserInterface import any_key
        from klibs.KLGraphics import fill, blit, flip
        from klibs.KLGraphics import NumpySurface as NpS
        logo = NpS(P.logo_file_path)
        flush()
        for i in (1, 2):
            fill()
            blit(logo, 5, P.screen_c)
            flip()
        any_key()
    def setup(self):
        # Stimulus sizes
        fix_thickness = deg_to_px(0.1)
        fix_size = deg_to_px(0.6)
        wheel_size = int(P.screen_y * 0.75)
        cursor_size = deg_to_px(1)
        cursor_thickness = deg_to_px(0.3)
        target_size = deg_to_px(2)

        # Make this a variable to be assigned & runtime
        self.item_duration = 0.120

        # Stimulus drawbjects
        self.fixation = Asterisk(fix_size, fix_thickness, fill=WHITE)
        self.t1_wheel = ColorWheel(diameter=wheel_size)
        self.t2_wheel = ColorWheel(diameter=wheel_size)
        self.cursor = Annulus(cursor_size, cursor_thickness, fill=BLACK)

        # Colour ResponseCollector needs to be passed an object whose fill (colour)
        # is that of the target colour. W/n trial_prep(), these dummies will be filled
        # w/ the target colour and then passed to their ResponseCollectors, respectively.
        self.t1_dummy = Ellipse(width=1)
        self.t2_dummy = Ellipse(width=1)

        # Target & distractor text styles
        self.txtm.add_style(label='T1Col', font_size=target_size)
        self.txtm.add_style(label='T2Col', font_size=target_size)
        self.txtm.add_style(label='stream', font_size=target_size)

        # Experiment messages
        self.anykey_txt = "{0}\nPress any key to continue."
        self.t1_id_request = "What number was the first target you saw?\n"
        self.t2_id_request = "What number was the second target you saw?\n"
        self.identity_instruct = "\nIn this block, you will be asked to report which two numbers were presented."
        self.colour_instruct = "\nIn this block, you will be asked to report which two colours were presented."

        # Initialize ResponseCollectors
        self.t1_identity_rc = ResponseCollector(uses=RC_KEYPRESS)
        self.t2_identity_rc = ResponseCollector(uses=RC_KEYPRESS)

        self.t1_colouring_rc = ResponseCollector(uses=RC_COLORSELECT)
        self.t2_colouring_rc = ResponseCollector(uses=RC_COLORSELECT)

        # Initialize ResponseCollector Keymaps
        self.keymap = KeyMap(
            'identity_response', ['1', '2', '3', '4', '5', '6', '7', '8', '9'],
            ['1', '2', '3', '4', '5', '6', '7', '8', '9'], [
                sdl2.SDLK_1, sdl2.SDLK_2, sdl2.SDLK_3, sdl2.SDLK_4,
                sdl2.SDLK_5, sdl2.SDLK_6, sdl2.SDLK_7, sdl2.SDLK_8, sdl2.SDLK_9
            ])

        # Pre-render letters & digits
        self.letters_rendered = {}
        for letter in letters:
            self.letters_rendered[letter] = message(letter,
                                                    style='stream',
                                                    align='center',
                                                    blit_txt=False)

        self.numbers_rendered = {}
        for number in numbers:
            self.numbers_rendered[number] = message(number,
                                                    style='stream',
                                                    align='center',
                                                    blit_txt=False)

        # Insert practice blocks (one for each response type)
        if P.run_practice_blocks:
            self.insert_practice_block(
                block_nums=1, trial_counts=P.trials_per_practice_block)
            self.insert_practice_block(
                block_nums=2, trial_counts=P.trials_per_practice_block)

        self.block_type = IDENTITY
class ABColour_NoSwitch(klibs.Experiment):
    def setup(self):
        # Stimulus sizes
        fix_thickness = deg_to_px(0.1)
        fix_size = deg_to_px(0.6)
        wheel_size = int(P.screen_y * 0.75)
        cursor_size = deg_to_px(1)
        cursor_thickness = deg_to_px(0.3)
        target_size = deg_to_px(2)

        # Make this a variable to be assigned & runtime
        self.item_duration = 0.120

        # Stimulus drawbjects
        self.fixation = Asterisk(fix_size, fix_thickness, fill=WHITE)
        self.t1_wheel = ColorWheel(diameter=wheel_size)
        self.t2_wheel = ColorWheel(diameter=wheel_size)
        self.cursor = Annulus(cursor_size, cursor_thickness, fill=BLACK)

        # Colour ResponseCollector needs to be passed an object whose fill (colour)
        # is that of the target colour. W/n trial_prep(), these dummies will be filled
        # w/ the target colour and then passed to their ResponseCollectors, respectively.
        self.t1_dummy = Ellipse(width=1)
        self.t2_dummy = Ellipse(width=1)

        # Target & distractor text styles
        self.txtm.add_style(label='T1Col', font_size=target_size)
        self.txtm.add_style(label='T2Col', font_size=target_size)
        self.txtm.add_style(label='stream', font_size=target_size)

        # Experiment messages
        self.anykey_txt = "{0}\nPress any key to continue."
        self.t1_id_request = "What number was the first target you saw?\n"
        self.t2_id_request = "What number was the second target you saw?\n"
        self.identity_instruct = "\nIn this block, you will be asked to report which two numbers were presented."
        self.colour_instruct = "\nIn this block, you will be asked to report which two colours were presented."

        # Initialize ResponseCollectors
        self.t1_identity_rc = ResponseCollector(uses=RC_KEYPRESS)
        self.t2_identity_rc = ResponseCollector(uses=RC_KEYPRESS)

        self.t1_colouring_rc = ResponseCollector(uses=RC_COLORSELECT)
        self.t2_colouring_rc = ResponseCollector(uses=RC_COLORSELECT)

        # Initialize ResponseCollector Keymaps
        self.keymap = KeyMap(
            'identity_response', ['1', '2', '3', '4', '5', '6', '7', '8', '9'],
            ['1', '2', '3', '4', '5', '6', '7', '8', '9'], [
                sdl2.SDLK_1, sdl2.SDLK_2, sdl2.SDLK_3, sdl2.SDLK_4,
                sdl2.SDLK_5, sdl2.SDLK_6, sdl2.SDLK_7, sdl2.SDLK_8, sdl2.SDLK_9
            ])

        # Pre-render letters & digits
        self.letters_rendered = {}
        for letter in letters:
            self.letters_rendered[letter] = message(letter,
                                                    style='stream',
                                                    align='center',
                                                    blit_txt=False)

        self.numbers_rendered = {}
        for number in numbers:
            self.numbers_rendered[number] = message(number,
                                                    style='stream',
                                                    align='center',
                                                    blit_txt=False)

        # Insert practice blocks (one for each response type)
        if P.run_practice_blocks:
            self.insert_practice_block(
                block_nums=1, trial_counts=P.trials_per_practice_block)
            self.insert_practice_block(
                block_nums=2, trial_counts=P.trials_per_practice_block)

        self.block_type = IDENTITY

    def block(self):
        # Present block progress
        block_txt = "Block {0} of {1}".format(P.block_number,
                                              P.blocks_per_experiment)
        progress_txt = self.anykey_txt.format(block_txt)

        if P.practicing:
            progress_txt += "\n(This is a practice block)"

        progress_msg = message(progress_txt, align='center', blit_txt=False)

        fill()
        blit(progress_msg, 5, P.screen_c)
        flip()
        any_key()

        # Inform as to block type
        if self.block_type == COLOUR:
            block_type_txt = self.anykey_txt.format(self.colour_instruct)
        else:
            block_type_txt = self.anykey_txt.format(self.identity_instruct)

        block_type_msg = message(block_type_txt,
                                 align='center',
                                 blit_txt=False)

        fill()
        blit(block_type_msg, 5, P.screen_c)
        flip()
        any_key()

    def setup_response_collector(self):
        # Configure identity collector
        self.t1_identity_rc.terminate_after = [10,
                                               TK_S]  # Waits 10s for response
        self.t1_identity_rc.display_callback = self.identity_callback  # Continuously draw images to screen
        self.t1_identity_rc.display_kwargs = {
            'target': "T1"
        }  # Passed as arg when identity_callback() is called
        self.t1_identity_rc.keypress_listener.key_map = self.keymap  # Assign key mappings
        self.t1_identity_rc.keypress_listener.interrupts = True  # Terminates listener after valid response

        self.t2_identity_rc.terminate_after = [10, TK_S]
        self.t2_identity_rc.display_callback = self.identity_callback
        self.t2_identity_rc.display_kwargs = {'target': "T2"}
        self.t2_identity_rc.keypress_listener.key_map = self.keymap
        self.t2_identity_rc.keypress_listener.interrupts = True

        # Configure colour collector
        # Because colours are randomly selected on a trial by trial basis
        # most properties of colouring_rc need to be assigned w/n trial_prep()
        self.t1_colouring_rc.terminate_after = [10, TK_S]
        self.t2_colouring_rc.terminate_after = [10, TK_S]

    def trial_prep(self):
        # Prepare colour wheels
        self.t1_wheel.rotation = random.randrange(
            0, 360)  # Randomly rotate wheel to prevent location biases
        self.t1_wheel.render()

        self.t2_wheel.rotation = random.randrange(
            0, 360)  # Randomly rotate wheel to prevent location biases
        self.t2_wheel.render()

        # Prepare T1 & T2
        self.t1_identity = random.sample(numbers,
                                         1)[0]  # Select & assign identity
        self.t1_colour = self.t1_wheel.color_from_angle(
            random.randrange(0, 360))  # Select & assign colouring
        self.t1_time = random.sample(
            range(5), 1
        )[0] + 5  # Select T1 stream position, no earlier than the 5th item

        self.t2_identity = random.sample(numbers, 1)[0]
        self.t2_colour = self.t2_wheel.color_from_angle(
            random.randrange(0, 360))
        self.t2_time = self.t1_time + self.lag  # Lag: # of items interspacing the two targets (can be 1-8)

        while self.t1_identity == self.t2_identity:  # Ensure that T1 & T2 identities are unique
            self.t2_identity = random.sample(numbers, 1)[0]

        while self.t1_colour == self.t2_colour:  # Similarly, colouring
            self.t2_colour = self.t2_wheel.color_from_angle(
                random.randrange(0, 360))

        # Dummy objects to serve as reference point when calculating response error
        self.t1_dummy.fill = self.t1_colour
        self.t2_dummy.fill = self.t2_colour

        self.t1_colouring_rc.display_callback = self.wheel_callback
        self.t1_colouring_rc.display_kwargs = {'wheel': self.t1_wheel}

        self.t1_colouring_rc.color_listener.set_wheel(
            self.t1_wheel)  # Set generated wheel as wheel to use
        self.t1_colouring_rc.color_listener.set_target(self.t1_dummy)

        self.t2_colouring_rc.display_callback = self.wheel_callback
        self.t2_colouring_rc.display_kwargs = {'wheel': self.t2_wheel}

        self.t2_colouring_rc.color_listener.set_wheel(self.t2_wheel)
        self.t2_colouring_rc.color_listener.set_target(self.t2_dummy)

        # Prepare stream according to response block (Identity | Colouring)
        self.rsvp_stream = self.prep_stream(self.block_type)

        # Initialize EventManager
        # Stream begins 1000ms after fixation
        self.evm.register_ticket(ET('stream_on', 1000))

    def trial(self):

        # Hide cursor during trial
        hide_mouse_cursor()

        # Present fixation & wait 1s before presenting RSVP stream
        self.present_fixation()
        while self.evm.before('stream_on', True):
            pass

        # Present RSVP stream
        self.present_stream()

        # For 'identity' blocks, request targets' numerical identity
        if self.block_type == IDENTITY:
            # Collect responses
            self.t1_identity_rc.collect()
            self.t2_identity_rc.collect()

            # Assign to return variables
            t1_id_response, t1_id_rt = self.t1_identity_rc.keypress_listener.response(
            )
            t2_id_response, t2_id_rt = self.t2_identity_rc.keypress_listener.response(
            )

            t1_response_err, t1_response_err_rt, t2_response_err, t2_response_err_rt = [
                'NA', 'NA', 'NA', 'NA'
            ]

        # For 'colour' blocks, request targets' colouring
        else:
            self.t1_colouring_rc.collect()
            self.t2_colouring_rc.collect()

            t1_response_err, t1_response_err_rt = self.t1_colouring_rc.color_listener.response(
            )
            t2_response_err, t2_response_err_rt = self.t2_colouring_rc.color_listener.response(
            )

            t1_id_response, t1_id_rt, t2_id_response, t2_id_rt = [
                'NA', 'NA', 'NA', 'NA'
            ]

        return {
            "block_num": P.block_number,
            "trial_num": P.trial_number,
            "block_type": self.block_type,
            "t1_time": self.t1_time,
            "t2_time": self.t2_time,
            "lag": self.lag,
            "t1_identity": self.t1_identity,
            "t2_identity": self.t2_identity,
            "t1_identity_response": t1_id_response,
            "t1_identity_rt": t1_id_rt,
            "t2_identity_response": t2_id_response,
            "t2_identity_rt": t2_id_rt,
            "t1_colour": self.t1_colour,
            "t2_colour": self.t2_colour,
            "t1_ang_err": t1_response_err,
            "t1_ang_err_rt": t1_response_err_rt,
            "t2_ang_err": t2_response_err,
            "t2_ang_err_rt": t2_response_err_rt,
            "t1_wheel_rotation": self.t1_wheel.rotation,
            "t2_wheel_rotation": self.t2_wheel.rotation
        }

        # Clear remaining stimuli from screen
        clear()

    def trial_clean_up(self):
        # Reset ResponseCollectors
        self.t1_colouring_rc.color_listener.reset()
        self.t2_colouring_rc.color_listener.reset()

        self.t1_identity_rc.keypress_listener.reset()
        self.t2_identity_rc.keypress_listener.reset()

        # Switch block type
        if not P.practicing:
            if P.trial_number == P.trials_per_block:
                if P.block_number < P.blocks_per_experiment:
                    if self.block_type == IDENTITY:
                        self.block_type = COLOUR
                    else:
                        self.block_type = IDENTITY
        else:
            if P.trial_number == P.trials_per_practice_block:
                if self.block_type == IDENTITY:
                    self.block_type = COLOUR
                else:
                    self.block_type = IDENTITY

        if P.trial_number == P.trials_per_block:
            break_txt = self.anykey_txt.format("Good work! Take a break")
            break_msg = message(break_txt, align='center', blit_txt=False)

            fill()
            blit(break_msg, registration=5, location=P.screen_c)
            flip()
            any_key()

    def clean_up(self):
        # Inform Ss that they have completed the experiment
        all_done_txt = "Whew! You're all done!\nPlease buzz the researcher to let them know."
        all_done_msg = message(all_done_txt, align="center", blit_txt=False)

        fill()
        blit(all_done_msg, 5, P.screen_c)
        flip()
        any_key()

    def present_fixation(self):
        fill()
        blit(self.fixation, location=P.screen_c, registration=5)
        flip()

    def wheel_callback(self, wheel):
        fill()
        # Hide cursor during selection phase
        hide_mouse_cursor()
        # Present appropriate wheel
        if wheel == self.t1_wheel:
            blit(self.t1_wheel, registration=5, location=P.screen_c)
        else:
            blit(self.t2_wheel, registration=5, location=P.screen_c)
        # Present annulus drawbject as cursor
        blit(self.cursor, registration=5, location=mouse_pos())
        flip()

    def identity_callback(self, target):
        # Request appropriate identity
        identity_request_msg = self.t1_id_request if target == "T1" else self.t2_id_request

        fill()
        message(identity_request_msg,
                location=P.screen_c,
                registration=5,
                blit_txt=True)
        flip()

    def prep_stream(self, block):
        # To be populated & returned
        stream_items = []

        # Set font colouring for targets (only used w/n COLOUR blocks)
        self.txtm.styles['T1Col'].color = self.t1_colour
        self.txtm.styles['T2Col'].color = self.t2_colour

        # For IDENTITY streams, targets=digits & distractors=letters.
        # All are uniformly coloured (gray)
        if block == IDENTITY:
            # Stream length is such that 6 items are always presented subsequent to T2
            for i in range(0, self.t2_time + 6):
                # Insert targets @ their respective positions
                if i == self.t1_time:
                    stream_items.append(
                        message(self.t1_identity,
                                align='center',
                                style='stream',
                                blit_txt=False))
                elif i == self.t2_time:
                    stream_items.append(
                        message(self.t2_identity,
                                align='center',
                                style='stream',
                                blit_txt=False))
                # Populate remaining positions w/ distractors (randomly sampled)
                else:
                    stream_items.append(
                        random.choice(self.letters_rendered.values()))
        # For COLOUR streams, targets & distractors are digits.
        # Targets are randomly coloured, distractors are gray
        else:

            for i in range(0, self.t2_time + 6):
                if i == self.t1_time:
                    stream_items.append(
                        message(self.t1_identity,
                                align='center',
                                style='T1Col',
                                blit_txt=False))
                elif i == self.t2_time:
                    stream_items.append(
                        message(self.t2_identity,
                                align='center',
                                style='T2Col',
                                blit_txt=False))
                else:
                    stream_items.append(
                        random.choice(self.numbers_rendered.values()))

        # Return stream

        return stream_items

    def present_stream(self):
        # Each stream item presented for a pre-specified duration
        cd = CountDown(self.item_duration)
        sw = Stopwatch()
        for item in self.rsvp_stream:
            cd.reset()
            sw.reset()
            fill()
            blit(item, registration=5, location=P.screen_c)
            flip()

            #print(cd.elapsed)
            while cd.counting():
                ui_request()
            print(sw.elapsed())
            sw.reset()
Esempio n. 8
0
    def setup(self):
        # Stimulus sizes
        fix_thickness = deg_to_px(0.1)
        fix_size = deg_to_px(0.6)
        wheel_size = int(P.screen_y * 0.75)
        cursor_size = deg_to_px(1)
        cursor_thickness = deg_to_px(0.3)
        target_size = deg_to_px(0.8)

        # Initilize drawbjects
        self.fixation = FixationCross(size=fix_size,
                                      thickness=fix_thickness,
                                      fill=WHITE)
        self.t1_wheel = ColorWheel(diameter=wheel_size)
        self.t2_wheel = ColorWheel(diameter=wheel_size)
        self.cursor = Annulus(diameter=cursor_size,
                              thickness=cursor_thickness,
                              fill=BLACK)

        # Create text styles to store target colouring
        self.txtm.add_style(label="T1", font_size=target_size)
        self.txtm.add_style(label="T2", font_size=target_size)

        # Stimulus presentation durations (intervals of 16.7ms refresh rate)
        self.id_target_duration = P.refresh_time * 5  # 83.3ms
        self.id_mask_duration = P.refresh_time
        self.col_target_duration = P.refresh_time * 5  # 167ms
        self.col_mask_duration = P.refresh_time
        self.isi = P.refresh_time  # ISI = inter-stimulus interval (target offset -> mask onset)

        # Colour ResponseCollector needs to be passed an object whose fill (colour)
        # is that of the target colour. W/n trial_prep(), these dummies will be filled
        # w/ the target colour and then passed to their ResponseCollectors, respectively.
        self.t1_dummy = Ellipse(width=1)
        self.t2_dummy = Ellipse(width=1)

        # Experiment messages
        self.anykey_txt = "{0}\nPress any key to continue."
        self.t1_id_request = "What was the first number?"
        self.t2_id_request = "What was the second number?"
        self.t1_col_request = "What was the first colour?"
        self.t2_col_request = "What was the second colour?"
        self.prac_identity_instruct = "\nIn this block, you will be asked to report what number was presented.\nIf you're unsure, make your best guess."
        self.prac_colour_instruct = "\nIn this block, you will be asked to report what colour was presented.\nIf you're unsure, make your best guess."
        self.test_identity_instruct = "\nIn this block, you will be asked to report which two numbers were presented.\nIf you're unsure, make your best guess."
        self.test_colour_instruct = "\nIn this block, you will be asked to report which two colours were presented.\nIf you're unsure, make your best guess."

        # Initialize ResponseCollectors
        self.t1_identity_rc = ResponseCollector(uses=RC_KEYPRESS)
        self.t2_identity_rc = ResponseCollector(uses=RC_KEYPRESS)

        self.t1_colouring_rc = ResponseCollector(uses=RC_COLORSELECT)
        self.t2_colouring_rc = ResponseCollector(uses=RC_COLORSELECT)

        # Initialize ResponseCollector Keymaps
        self.keymap = KeyMap(
            'identity_response', ['1', '2', '3', '4', '5', '6', '7', '8', '9'],
            ['1', '2', '3', '4', '5', '6', '7', '8', '9'], [
                sdl2.SDLK_1, sdl2.SDLK_2, sdl2.SDLK_3, sdl2.SDLK_4,
                sdl2.SDLK_5, sdl2.SDLK_6, sdl2.SDLK_7, sdl2.SDLK_8, sdl2.SDLK_9
            ])

        # Inserting practice blocks requires a pre-defined trial count; but in our case they are of an undefined length,
        # lasting for as long as it takes participants to reach a performance threshold. So, initially they are of length 1
        # but trials are inserted later on depending on participant performance.
        if P.run_practice_blocks:
            self.insert_practice_block([1, 3], trial_counts=1)

        # Randomly select starting condition
        self.block_type = random.choice([IDENTITY, COLOUR])
Esempio n. 9
0
    def setup(self):

        # Bandit Variables

        self.high_payout_baseline = 12
        self.low_payout_baseline = 8
        self.total_score = None
        self.penalty = -5

        # Stimulus Sizes

        thick_rect_border = deg_to_px(0.5)
        thin_rect_border = deg_to_px(0.1)
        star_size = deg_to_px(0.6)
        star_thickness = deg_to_px(0.1)
        square_size = deg_to_px(3)
        text_size = deg_to_px(0.65)
        large_text_size = deg_to_px(0.85)

        # Generate bandit colours from colour wheel

        self.bandit_colour_combos = []
        if P.blocks_per_experiment > 4:
            msg = (
                "Only 4 sets of colours available, experiment script must be modified if more"
                "than 4 blocks total are wanted.")
            raise RuntimeError(msg)
        for angle in [0, 45, 90, 135]:
            combo = [const_lum[angle], const_lum[angle + 180]]
            self.bandit_colour_combos.append(combo)
        random.shuffle(self.bandit_colour_combos)

        # Stimulus Drawbjects

        self.thick_rect = Rectangle(
            square_size, stroke=[thick_rect_border, WHITE, STROKE_CENTER])
        self.thin_rect = Rectangle(
            square_size, stroke=[thin_rect_border, WHITE, STROKE_CENTER])
        self.left_bandit = Rectangle(
            square_size, stroke=[thin_rect_border, WHITE, STROKE_CENTER])
        self.right_bandit = Rectangle(
            square_size, stroke=[thin_rect_border, WHITE, STROKE_CENTER])
        self.neutral_box = self.thin_rect.render()
        self.star = Asterisk(star_size, star_thickness, fill=WHITE)
        self.star_cueback = Asterisk(star_size * 2,
                                     star_thickness * 2,
                                     fill=WHITE)
        self.star_muted = Asterisk(star_size, star_thickness, fill=GREY)
        self.probe = Ellipse(int(0.75 * square_size), fill=WHITE).render()

        # Layout

        box_offset = deg_to_px(8.0)
        self.left_box_loc = (P.screen_c[0] - box_offset, P.screen_c[1])
        self.right_box_loc = (P.screen_c[0] + box_offset, P.screen_c[1])

        # Timing

        # Note: cotoa = cue-offset target-onset asynchrony
        self.cotoa_min = 700  # ms
        self.cotoa_max = 1000  # ms
        self.feedback_exposure_period = 1.25  # sec

        # EyeLink Boundaries

        fix_bounds = [P.screen_c, square_size / 2]
        self.el.add_boundary('fixation', fix_bounds, CIRCLE_BOUNDARY)

        # Experiment Messages

        self.txtm.styles[
            'default'].font_size = text_size  # re-define default font size in degrees
        self.txtm.add_style("score up", large_text_size, PASTEL_GREEN)
        self.txtm.add_style("score down", large_text_size, PASTEL_RED)
        self.txtm.add_style("timeout", large_text_size, WHITE)

        err_txt = "{0}\n\nPress any key to continue."
        lost_fixation_txt = err_txt.format(
            "Eyes moved! Please keep your eyes on the asterisk.")
        too_soon_txt = err_txt.format(
            "Responded too soon! Please wait until the 'go' signal to "
            "make a response.")
        probe_timeout_txt = err_txt.format(
            "No response detected! Please answer louder or faster.")
        bandit_timeout_txt = err_txt.format("Bandit selection timed out!")
        wrong_response_txt = err_txt.format(
            "Wrong response type!\nPlease make vocal responses "
            "to probes and keypress responses to bandits.")

        self.err_msgs = {
            'fixation':
            message(lost_fixation_txt, align='center', blit_txt=False),
            'too_soon':
            message(too_soon_txt, align='center', blit_txt=False),
            'probe_timeout':
            message(probe_timeout_txt,
                    'timeout',
                    align='center',
                    blit_txt=False),
            'bandit_timeout':
            message(bandit_timeout_txt,
                    'timeout',
                    align='center',
                    blit_txt=False),
            'wrong_response':
            message(wrong_response_txt, align='center', blit_txt=False)
        }

        # Initialize separate ResponseCollectors for probe and bandit responses

        self.probe_rc = ResponseCollector(uses=[RC_AUDIO, RC_KEYPRESS])
        self.bandit_rc = ResponseCollector(uses=[RC_AUDIO, RC_KEYPRESS])

        # Initialize ResponseCollector keymap

        self.keymap = KeyMap(
            'bandit_response',  # Name
            ['z', '/'],  # UI labels
            ["left", "right"],  # Data labels
            [sdl2.SDLK_z, sdl2.SDLK_SLASH]  # SDL2 Keysyms
        )

        # Add practice block of 20 trials to start of experiment

        if P.run_practice_blocks:
            self.insert_practice_block(1, trial_counts=20)
Esempio n. 10
0
class IOR_Reward(klibs.Experiment):

    #TODO: Add checking for audio responses on bandit trials (keyboard might make this difficult?)

    def __init__(self, *args, **kwargs):
        super(IOR_Reward, self).__init__(*args, **kwargs)

    def setup(self):

        # Bandit Variables

        self.high_payout_baseline = 12
        self.low_payout_baseline = 8
        self.total_score = None
        self.penalty = -5

        # Stimulus Sizes

        thick_rect_border = deg_to_px(0.5)
        thin_rect_border = deg_to_px(0.1)
        star_size = deg_to_px(0.6)
        star_thickness = deg_to_px(0.1)
        square_size = deg_to_px(3)
        text_size = deg_to_px(0.65)
        large_text_size = deg_to_px(0.85)

        # Generate bandit colours from colour wheel

        self.bandit_colour_combos = []
        if P.blocks_per_experiment > 4:
            msg = (
                "Only 4 sets of colours available, experiment script must be modified if more"
                "than 4 blocks total are wanted.")
            raise RuntimeError(msg)
        for angle in [0, 45, 90, 135]:
            combo = [const_lum[angle], const_lum[angle + 180]]
            self.bandit_colour_combos.append(combo)
        random.shuffle(self.bandit_colour_combos)

        # Stimulus Drawbjects

        self.thick_rect = Rectangle(
            square_size, stroke=[thick_rect_border, WHITE, STROKE_CENTER])
        self.thin_rect = Rectangle(
            square_size, stroke=[thin_rect_border, WHITE, STROKE_CENTER])
        self.left_bandit = Rectangle(
            square_size, stroke=[thin_rect_border, WHITE, STROKE_CENTER])
        self.right_bandit = Rectangle(
            square_size, stroke=[thin_rect_border, WHITE, STROKE_CENTER])
        self.neutral_box = self.thin_rect.render()
        self.star = Asterisk(star_size, star_thickness, fill=WHITE)
        self.star_cueback = Asterisk(star_size * 2,
                                     star_thickness * 2,
                                     fill=WHITE)
        self.star_muted = Asterisk(star_size, star_thickness, fill=GREY)
        self.probe = Ellipse(int(0.75 * square_size), fill=WHITE).render()

        # Layout

        box_offset = deg_to_px(8.0)
        self.left_box_loc = (P.screen_c[0] - box_offset, P.screen_c[1])
        self.right_box_loc = (P.screen_c[0] + box_offset, P.screen_c[1])

        # Timing

        # Note: cotoa = cue-offset target-onset asynchrony
        self.cotoa_min = 700  # ms
        self.cotoa_max = 1000  # ms
        self.feedback_exposure_period = 1.25  # sec

        # EyeLink Boundaries

        fix_bounds = [P.screen_c, square_size / 2]
        self.el.add_boundary('fixation', fix_bounds, CIRCLE_BOUNDARY)

        # Experiment Messages

        self.txtm.styles[
            'default'].font_size = text_size  # re-define default font size in degrees
        self.txtm.add_style("score up", large_text_size, PASTEL_GREEN)
        self.txtm.add_style("score down", large_text_size, PASTEL_RED)
        self.txtm.add_style("timeout", large_text_size, WHITE)

        err_txt = "{0}\n\nPress any key to continue."
        lost_fixation_txt = err_txt.format(
            "Eyes moved! Please keep your eyes on the asterisk.")
        too_soon_txt = err_txt.format(
            "Responded too soon! Please wait until the 'go' signal to "
            "make a response.")
        probe_timeout_txt = err_txt.format(
            "No response detected! Please answer louder or faster.")
        bandit_timeout_txt = err_txt.format("Bandit selection timed out!")
        wrong_response_txt = err_txt.format(
            "Wrong response type!\nPlease make vocal responses "
            "to probes and keypress responses to bandits.")

        self.err_msgs = {
            'fixation':
            message(lost_fixation_txt, align='center', blit_txt=False),
            'too_soon':
            message(too_soon_txt, align='center', blit_txt=False),
            'probe_timeout':
            message(probe_timeout_txt,
                    'timeout',
                    align='center',
                    blit_txt=False),
            'bandit_timeout':
            message(bandit_timeout_txt,
                    'timeout',
                    align='center',
                    blit_txt=False),
            'wrong_response':
            message(wrong_response_txt, align='center', blit_txt=False)
        }

        # Initialize separate ResponseCollectors for probe and bandit responses

        self.probe_rc = ResponseCollector(uses=[RC_AUDIO, RC_KEYPRESS])
        self.bandit_rc = ResponseCollector(uses=[RC_AUDIO, RC_KEYPRESS])

        # Initialize ResponseCollector keymap

        self.keymap = KeyMap(
            'bandit_response',  # Name
            ['z', '/'],  # UI labels
            ["left", "right"],  # Data labels
            [sdl2.SDLK_z, sdl2.SDLK_SLASH]  # SDL2 Keysyms
        )

        # Add practice block of 20 trials to start of experiment

        if P.run_practice_blocks:
            self.insert_practice_block(1, trial_counts=20)

    def block(self):

        if self.total_score:
            fill()
            score_txt = "Total block score: {0} points!".format(
                self.total_score)
            msg = message(score_txt, 'timeout', blit_txt=False)
            blit(msg, 5, P.screen_c)
            flip()
            any_key()
        self.total_score = 0  # reset total bandit score each block

        # Change bandit colours between blocks
        if P.practicing:
            greys = [(0, 0, 0, 255), (96, 96, 96, 255)]
            random.shuffle(greys)
            self.high_value_color = greys[0]
            self.low_value_color = greys[1]
        else:
            bandit_colours = self.bandit_colour_combos.pop()
            random.shuffle(bandit_colours)
            self.high_value_color = bandit_colours[0]
            self.low_value_color = bandit_colours[1]

        # Calibrate microphone for audio responses (people get quieter over time)
        threshold = self.audio.calibrate()
        self.probe_rc.audio_listener.threshold = threshold
        self.bandit_rc.audio_listener.threshold = threshold

    def setup_response_collector(self):

        # Configure probe response collector
        self.probe_rc.terminate_after = [2000, TK_MS]
        self.probe_rc.display_callback = self.probe_callback
        self.probe_rc.display_args = [self.trial_type == BOTH]
        self.probe_rc.flip = True
        self.probe_rc.keypress_listener.key_map = self.keymap
        self.probe_rc.keypress_listener.interrupts = True
        self.probe_rc.audio_listener.interrupts = True

        # Configure bandit response collector
        self.bandit_rc.terminate_after = [2000, TK_MS]
        self.bandit_rc.display_callback = self.bandit_callback
        self.bandit_rc.flip = True
        self.bandit_rc.keypress_listener.key_map = self.keymap
        self.bandit_rc.keypress_listener.interrupts = True
        if self.trial_type == BANDIT and not P.ignore_vocal_for_bandits:
            self.bandit_rc.audio_listener.interrupts = True
        else:
            self.bandit_rc.audio_listener.interrupts = False

    def trial_prep(self):

        # Reset error flag
        self.targets_shown = False
        self.err = None

        # If probed trial, establish location of probe (default: left box)
        self.probe_loc = self.right_box_loc if self.probe_location == RIGHT else self.left_box_loc

        if self.high_value_location == LEFT:
            self.left_bandit.fill = self.high_value_color
            self.right_bandit.fill = self.low_value_color
            self.low_value_location = RIGHT
        else:
            self.left_bandit.fill = self.low_value_color
            self.right_bandit.fill = self.high_value_color
            self.low_value_location = LEFT
        self.left_bandit.render()
        self.right_bandit.render()

        # Randomly choose cue off-target on asynchrony (cotoa) on each trial
        self.cotoa = self.random_interval(self.cotoa_min, self.cotoa_max)

        # Add timecourse of events to EventManager
        events = [[1000, 'cue_on']]
        events.append([events[-1][0] + 200, 'cue_off'])
        events.append([events[-1][0] + 200, 'cueback_off'])
        events.append([events[-2][0] + self.cotoa,
                       'target_on'])  # either probe or bandits
        if self.trial_type in [BANDIT, BOTH]:
            events.append([events[-1][0] + 500,
                           'nogo_end'])  # should reduce to 500 or less
        for e in events:
            self.evm.register_ticket(ET(e[1], e[0]))

        # Perform drift correct on EyeLink before trial start
        self.el.drift_correct()

    def trial(self):

        if P.development_mode:
            trial_info = (
                "\ntrial_type: '{0}', high_val_loc: '{1}', probe_loc: '{2}', "
                "cue_loc: '{3}', winning_bandit: '{4}'")
            print(
                trial_info.format(self.trial_type, self.high_value_location,
                                  self.probe_location, self.cue_location,
                                  self.winning_bandit))

        while self.evm.before('target_on', True) and not self.err:

            self.confirm_fixation()
            self.present_neutral_boxes()

            if self.evm.between('cue_on', 'cue_off'):
                if self.cue_location in [LEFT, DOUBLE]:
                    blit(self.thick_rect, 5, self.left_box_loc)
                if self.cue_location in [RIGHT, DOUBLE]:
                    blit(self.thick_rect, 5, self.right_box_loc)
            elif self.evm.between('cue_off', 'cueback_off'):
                blit(self.star_cueback, 5, P.screen_c)

            flip()

        self.targets_shown = True  # after bandits or probe shown, don't recycle trial on user error
        if self.trial_type in [BANDIT, BOTH] and not self.err:
            while self.evm.before('nogo_end') and not self.err:
                if key_pressed():
                    self.show_error_message('too_soon')
                    self.err = "early_response"
                    break
                self.confirm_fixation()
                self.bandit_callback(before_go=True)
                flip()

        #  PROBE RESPONSE PERIOD
        if self.trial_type in [PROBE, BOTH] and not self.err:
            self.probe_rc.collect()
            if not self.err:
                if len(self.probe_rc.keypress_listener.responses):
                    self.show_error_message('wrong_response')
                    self.err = 'keypress_on_probe'
                elif len(self.probe_rc.audio_listener.responses) == 0:
                    self.show_error_message('probe_timeout')
                    if self.probe_rc.audio_listener.stream_error:
                        self.err = 'microphone_error'
                    else:
                        self.err = 'probe_timeout'

        #  BANDIT RESPONSE PERIOD
        if self.trial_type in [BANDIT, BOTH] and not self.err:
            self.bandit_rc.collect()
            if self.trial_type == BANDIT and P.ignore_vocal_for_bandits == False:
                if len(self.bandit_rc.audio_listener.responses):
                    self.show_error_message('wrong_response')
                    self.err = 'vocal_on_bandit'

        # Retrieve collected response data before logging to database
        if self.err:
            bandit_choice, bandit_rt, reward = ['NA', 'NA', 'NA']
            probe_rt = 'NA'
        else:
            self.err = 'NA'
            # Retreive responses from RepsponseCollector(s) and record data
            if self.trial_type in [BANDIT, BOTH]:
                bandit_choice = self.bandit_rc.keypress_listener.response(
                    value=True, rt=False)
                bandit_rt = self.bandit_rc.keypress_listener.response(
                    value=False, rt=True)
                if bandit_rt == TIMEOUT:
                    self.show_error_message('bandit_timeout')
                    reward = 'NA'
                else:
                    # determine bandit payout (reward) and display feedback to participant
                    reward = self.feedback(bandit_choice)
            else:
                bandit_choice, bandit_rt, reward = ['NA', 'NA', 'NA']

            if self.trial_type in [PROBE, BOTH]:
                probe_rt = self.probe_rc.audio_listener.response(value=False,
                                                                 rt=True)
            else:
                probe_rt = 'NA'

        # Clear any remaining stimuli from screen before trial end
        clear()

        return {
            "block_num":
            P.block_number,
            "trial_num":
            P.trial_number,
            "trial_type":
            self.trial_type,
            "cue_loc":
            self.cue_location,
            "cotoa":
            self.cotoa,
            "high_value_col":
            self.high_value_color[:3] if self.trial_type != PROBE else "NA",
            "low_value_col":
            self.low_value_color[:3] if self.trial_type != PROBE else "NA",
            "high_value_loc":
            self.high_value_location if self.trial_type != PROBE else "NA",
            "winning_bandit":
            self.winning_bandit if self.trial_type != PROBE else "NA",
            "bandit_choice":
            bandit_choice,
            "bandit_rt":
            bandit_rt,
            "reward":
            reward,
            "probe_loc":
            self.probe_location if self.trial_type != BANDIT else "NA",
            "probe_rt":
            probe_rt,
            "err":
            self.err
        }

    def trial_clean_up(self):

        # Clear responses from response collectors before next trial
        self.probe_rc.audio_listener.reset()
        self.probe_rc.keypress_listener.reset()
        self.bandit_rc.audio_listener.reset()
        self.bandit_rc.keypress_listener.reset()

    def clean_up(self):
        pass

    def feedback(self, response):
        if self.winning_bandit == HIGH:
            winning_bandit_loc = self.high_value_location
        else:
            winning_bandit_loc = self.low_value_location

        if response == winning_bandit_loc:
            points = self.bandit_payout(value=self.winning_bandit)
            msg = message("You won {0} points!".format(points),
                          "score up",
                          blit_txt=False)
        else:
            points = self.penalty  # -5
            msg = message("You lost 5 points!", "score down", blit_txt=False)
        self.total_score += points
        feedback = [points, msg]

        feedback_exposure = CountDown(self.feedback_exposure_period)
        while feedback_exposure.counting():
            ui_request()
            fill()
            blit(feedback[1], location=P.screen_c, registration=5)
            flip()

        return feedback[0]

    def bandit_payout(self, value):
        mean = self.high_payout_baseline if value == HIGH else self.low_payout_baseline
        # sample from normal distribution with sd of 1 and round to nearest int
        return int(random.gauss(mean, 1) + 0.5)

    def confirm_fixation(self):
        if not self.el.within_boundary('fixation', EL_GAZE_POS):
            self.show_error_message('fixation')
            if self.targets_shown:
                self.err = 'left_fixation'
            else:
                raise TrialException('gaze left fixation')  # recycle trial

    def show_error_message(self, msg_key):
        fill()
        blit(self.err_msgs[msg_key], location=P.screen_c, registration=5)
        flip()
        any_key()

    def random_interval(self, lower, upper):
        # utility function to generate random time intervals with a given range
        # that are multiples of the current refresh rate (e.g. 16.7ms for a 60Hz monitor)
        min_flips = int(round(lower / P.refresh_time))
        max_flips = int(round(upper / P.refresh_time))
        return random.choice(range(min_flips, max_flips + 1,
                                   1)) * P.refresh_time

    def present_neutral_boxes(self):
        fill()
        blit(self.star, 5, P.screen_c)
        blit(self.neutral_box, 5, self.left_box_loc)
        blit(self.neutral_box, 5, self.right_box_loc)

    def bandit_callback(self, before_go=False):
        fill()
        blit(self.star if before_go else self.star_muted, 5, P.screen_c)
        blit(self.left_bandit, 5, self.left_box_loc)
        blit(self.right_bandit, 5, self.right_box_loc)

    def probe_callback(self, mixed=False):
        self.confirm_fixation()
        if mixed:
            self.bandit_callback(True)
        else:
            self.present_neutral_boxes()

        probe_loc = self.right_box_loc if self.probe_location == RIGHT else self.left_box_loc
        blit(self.probe, 5, probe_loc)
Esempio n. 11
0
    def setup(self):

        # Stimulus sizes
        thick_rect_border = deg_to_px(0.5)
        thin_rect_border = deg_to_px(0.1)
        star_size = deg_to_px(0.6)
        star_thickness = deg_to_px(0.1)
        square_size = deg_to_px(3)
        large_text_size = 0.65

        # Stimulus drawbjects
        self.thick_rect = Rectangle(
            square_size, stroke=[thick_rect_border, WHITE, STROKE_CENTER])
        self.thin_rect = Rectangle(
            square_size, stroke=[thin_rect_border, WHITE, STROKE_CENTER])
        self.neutral_box = self.thin_rect.render()

        self.star = Asterisk(star_size, star_thickness, fill=WHITE)
        self.star_cueback = Asterisk(star_size * 2,
                                     star_thickness * 2,
                                     fill=WHITE)

        self.go = FixationCross(star_size, star_thickness, fill=BLACK)
        self.go.render()
        self.nogo = FixationCross(star_size,
                                  star_thickness,
                                  fill=BLACK,
                                  rotation=45)
        self.nogo.render()

        self.left_bandit = Ellipse(int(0.75 * square_size))
        self.right_bandit = Ellipse(int(0.75 * square_size))
        self.probe = Ellipse(int(0.75 * square_size))

        # Layout
        box_offset = deg_to_px(8.0)
        self.left_box_loc = (P.screen_c[0] - box_offset, P.screen_c[1])
        self.right_box_loc = (P.screen_c[0] + box_offset, P.screen_c[1])

        # Set cotoa
        self.cotoa = 800  # ms

        self.feedback_exposure_period = 1.25  # sec

        # Bandit payout variables
        self.high_payout_baseline = 12
        self.low_payout_baseline = 8
        self.total_score = None
        self.penalty = -5

        # Generate colours from colour wheel
        self.target_colours = [const_lum[0], const_lum[120], const_lum[240]]
        random.shuffle(self.target_colours)

        # Assign to bandits & neutral probe
        self.high_value_colour = self.target_colours[0]
        self.low_value_colour = self.target_colours[1]
        self.neutral_value_colour = self.target_colours[2]

        # EyeLink Boundaries
        fix_bounds = [P.screen_c, square_size / 2]
        self.el.add_boundary('fixation', fix_bounds, CIRCLE_BOUNDARY)

        # Initialize response collectors
        self.probe_rc = ResponseCollector(uses=RC_KEYPRESS)
        self.bandit_rc = ResponseCollector(uses=RC_KEYPRESS)

        # Initialize ResponseCollector keymaps
        self.bandit_keymap = KeyMap(
            'bandit_response',  # Name
            ['z', '/'],  # UI labels
            ["left", "right"],  # Data labels
            [sdl2.SDLK_z, sdl2.SDLK_SLASH]  # SDL2 Keysyms
        )
        self.probe_keymap = KeyMap('probe_response', ['spacebar'], ["pressed"],
                                   [sdl2.SDLK_SPACE])

        # Experiment Messages
        self.txtm.add_style("payout", large_text_size, WHITE)
        self.txtm.add_style("timeout", large_text_size, WHITE)

        err_txt = "{0}\n\nPress any key to continue."
        lost_fixation_txt = err_txt.format(
            "Eyes moved! Please keep your eyes on the asterisk.")
        probe_timeout_txt = err_txt.format(
            "No response detected! Please respond as fast and as accurately as possible."
        )
        bandit_timeout_txt = err_txt.format("Bandit selection timed out!")
        response_on_nogo_txt = err_txt.format(
            "\'nogo\' signal (x) presented\nPlease only respond when you see "
            "the \'go\' signal (+).")

        self.err_msgs = {
            'fixation':
            message(lost_fixation_txt, align='center', blit_txt=False),
            'probe_timeout':
            message(probe_timeout_txt,
                    'timeout',
                    align='center',
                    blit_txt=False),
            'bandit_timeout':
            message(bandit_timeout_txt,
                    'timeout',
                    align='center',
                    blit_txt=False),
            'response_on_nogo':
            message(response_on_nogo_txt, align='center', blit_txt=False)
        }

        self.rest_break_txt = err_txt.format(
            "Whew! that was tricky eh? Go ahead and take a break before continuing."
        )
        self.end_of_block_txt = "You're done the first task! Please buzz the researcher to let them know!"

        # Insert bandit block
        if P.run_practice_blocks:
            self.insert_practice_block(1, trial_counts=P.trials_bandit_block)
Esempio n. 12
0
class IOR_Reward_V2(klibs.Experiment):
    def setup(self):

        # Stimulus sizes
        thick_rect_border = deg_to_px(0.5)
        thin_rect_border = deg_to_px(0.1)
        star_size = deg_to_px(0.6)
        star_thickness = deg_to_px(0.1)
        square_size = deg_to_px(3)
        large_text_size = 0.65

        # Stimulus drawbjects
        self.thick_rect = Rectangle(
            square_size, stroke=[thick_rect_border, WHITE, STROKE_CENTER])
        self.thin_rect = Rectangle(
            square_size, stroke=[thin_rect_border, WHITE, STROKE_CENTER])
        self.neutral_box = self.thin_rect.render()

        self.star = Asterisk(star_size, star_thickness, fill=WHITE)
        self.star_cueback = Asterisk(star_size * 2,
                                     star_thickness * 2,
                                     fill=WHITE)

        self.go = FixationCross(star_size, star_thickness, fill=BLACK)
        self.go.render()
        self.nogo = FixationCross(star_size,
                                  star_thickness,
                                  fill=BLACK,
                                  rotation=45)
        self.nogo.render()

        self.left_bandit = Ellipse(int(0.75 * square_size))
        self.right_bandit = Ellipse(int(0.75 * square_size))
        self.probe = Ellipse(int(0.75 * square_size))

        # Layout
        box_offset = deg_to_px(8.0)
        self.left_box_loc = (P.screen_c[0] - box_offset, P.screen_c[1])
        self.right_box_loc = (P.screen_c[0] + box_offset, P.screen_c[1])

        # Set cotoa
        self.cotoa = 800  # ms

        self.feedback_exposure_period = 1.25  # sec

        # Bandit payout variables
        self.high_payout_baseline = 12
        self.low_payout_baseline = 8
        self.total_score = None
        self.penalty = -5

        # Generate colours from colour wheel
        self.target_colours = [const_lum[0], const_lum[120], const_lum[240]]
        random.shuffle(self.target_colours)

        # Assign to bandits & neutral probe
        self.high_value_colour = self.target_colours[0]
        self.low_value_colour = self.target_colours[1]
        self.neutral_value_colour = self.target_colours[2]

        # EyeLink Boundaries
        fix_bounds = [P.screen_c, square_size / 2]
        self.el.add_boundary('fixation', fix_bounds, CIRCLE_BOUNDARY)

        # Initialize response collectors
        self.probe_rc = ResponseCollector(uses=RC_KEYPRESS)
        self.bandit_rc = ResponseCollector(uses=RC_KEYPRESS)

        # Initialize ResponseCollector keymaps
        self.bandit_keymap = KeyMap(
            'bandit_response',  # Name
            ['z', '/'],  # UI labels
            ["left", "right"],  # Data labels
            [sdl2.SDLK_z, sdl2.SDLK_SLASH]  # SDL2 Keysyms
        )
        self.probe_keymap = KeyMap('probe_response', ['spacebar'], ["pressed"],
                                   [sdl2.SDLK_SPACE])

        # Experiment Messages
        self.txtm.add_style("payout", large_text_size, WHITE)
        self.txtm.add_style("timeout", large_text_size, WHITE)

        err_txt = "{0}\n\nPress any key to continue."
        lost_fixation_txt = err_txt.format(
            "Eyes moved! Please keep your eyes on the asterisk.")
        probe_timeout_txt = err_txt.format(
            "No response detected! Please respond as fast and as accurately as possible."
        )
        bandit_timeout_txt = err_txt.format("Bandit selection timed out!")
        response_on_nogo_txt = err_txt.format(
            "\'nogo\' signal (x) presented\nPlease only respond when you see "
            "the \'go\' signal (+).")

        self.err_msgs = {
            'fixation':
            message(lost_fixation_txt, align='center', blit_txt=False),
            'probe_timeout':
            message(probe_timeout_txt,
                    'timeout',
                    align='center',
                    blit_txt=False),
            'bandit_timeout':
            message(bandit_timeout_txt,
                    'timeout',
                    align='center',
                    blit_txt=False),
            'response_on_nogo':
            message(response_on_nogo_txt, align='center', blit_txt=False)
        }

        self.rest_break_txt = err_txt.format(
            "Whew! that was tricky eh? Go ahead and take a break before continuing."
        )
        self.end_of_block_txt = "You're done the first task! Please buzz the researcher to let them know!"

        # Insert bandit block
        if P.run_practice_blocks:
            self.insert_practice_block(1, trial_counts=P.trials_bandit_block)

    def block(self):

        # Block type defaults to probe trials, overidden in practice block(s)
        self.block_type = PROBE

        # Show total score following completion of bandit task
        if self.total_score:
            fill()
            score_txt = "Total block score: {0} points!".format(
                self.total_score)
            msg = message(score_txt, 'timeout', blit_txt=False)
            blit(msg, 5, P.screen_c)
            flip()
            any_key()

        self.total_score = 0  # Reset score once presented

        # Bandit task
        if P.practicing:
            self.block_type == BANDIT
            # Initialize selection counters
            self.times_selected_high = 0
            self.time_selected_low = 0

        # End of block messaging
        if not P.practicing:
            self.block_type == PROBE
            fill()
            msg = message(self.end_of_block_txt, blit_txt=False)
            blit(msg, 5, P.screen_c)
            flip()
            any_key()

    def setup_response_collector(self):

        # Configure probe response collector
        self.probe_rc.terminate_after = [1500, TK_MS]
        self.probe_rc.display_callback = self.probe_callback
        self.probe_rc.flip = True
        self.probe_rc.keypress_listener.key_map = self.probe_keymap
        self.probe_rc.keypress_listener.interrupts = True

        # Configure bandit response collector
        self.bandit_rc.terminate_after = [1500, TK_MS]
        self.bandit_rc.display_callback = self.bandit_callback
        self.bandit_rc.flip = True
        self.bandit_rc.keypress_listener.key_map = self.bandit_keymap
        self.bandit_rc.keypress_listener.interrupts = True

    def trial_prep(self):
        # Reset error flag
        self.targets_shown = False
        self.err = None

        # BANDIT PROPERTIES
        if P.practicing:
            self.cotoa = 'NA'
            # Establish location & colour of bandits

            if self.high_value_location == LEFT:
                self.left_bandit.fill = self.high_value_colour
                self.right_bandit.fill = self.low_value_colour
                self.low_value_location = RIGHT
            else:
                self.left_bandit.fill = self.low_value_colour
                self.right_bandit.fill = self.high_value_colour
                self.low_value_location = LEFT
            self.left_bandit.render()
            self.right_bandit.render()

        # PROBE PROPERTIES
        else:
            # Rest breaks
            if P.trial_number % (P.trials_per_block / P.breaks_per_block) == 0:
                if P.trial_number < P.trials_per_block:
                    fill()
                    msg = message(self.rest_break_txt,
                                  'timeout',
                                  blit_txt=False)
                    blit(msg, 5, P.screen_c)
                    flip()
                    any_key()

            # Establish & assign probe location
            self.probe_loc = self.right_box_loc if self.probe_location == RIGHT else self.left_box_loc
            # go/nogo signal always presented w/probe
            self.go_nogo_loc = self.probe_loc

            # Establish & assign probe colour
            if self.probe_colour == HIGH:
                self.probe.fill = self.high_value_colour
            elif self.probe_colour == LOW:
                self.probe.fill = self.low_value_colour
            else:
                self.probe.fill = self.neutral_value_colour
            self.probe.render()

        # Add timecourse of events to EventManager
        if P.practicing:  # Bandit trials
            events = [[1000, 'target_on']]
        else:  # Probe trials
            events = [[1000, 'cue_on']]
            events.append([events[-1][0] + 200, 'cue_off'])
            events.append([events[-1][0] + 200, 'cueback_off'])
            events.append([events[-2][0] + 800, 'target_on'])
        for e in events:
            self.evm.register_ticket(ET(e[1], e[0]))

        # Perform drift correct on Eyelink before trial start
        self.el.drift_correct()

    def trial(self):

        # BANDIT TRIAL
        if P.practicing:
            cotoa, probe_rt = ['NA', 'NA']  # Don't occur in bandit blocks

            # Present placeholders
            while self.evm.before('target_on', True) and not self.err:
                self.confirm_fixation()
                self.present_neutral_boxes()
                flip()

            # BANDIT RESPONSE PERIOD
            self.targets_shown = True  # After bandits shown, don't recycle trial

            # Present bandits and listen for response
            self.bandit_rc.collect()

            # If wrong response made
            if self.err:
                bandit_choice, bandit_rt, reward = ['NA', 'NA', 'NA']
            else:
                self.err = 'NA'
                # Retrieve responses from ResponseCollector(s) & record data
                bandit_choice = self.bandit_rc.keypress_listener.response(
                    value=True, rt=False)
                bandit_rt = self.bandit_rc.keypress_listener.response(
                    value=False, rt=True)

                if bandit_rt == TIMEOUT:
                    self.show_error_message('bandit_timeout')
                    reward = 'NA'
                else:
                    # Determine bandit payout & display
                    reward = self.feedback(bandit_choice)

        # PROBE TRIAL
        else:
            bandit_choice, bandit_rt, reward = [
                'NA', 'NA', 'NA'
            ]  # Don't occur in probe trials

            # Present placeholders & confirm fixation
            while self.evm.before('target_on', True):
                self.confirm_fixation()
                self.present_neutral_boxes()

                # Present cue
                if self.evm.between('cue_on', 'cue_off'):
                    if self.cue_location == LEFT:
                        blit(self.thick_rect, 5, self.left_box_loc)
                    else:
                        blit(self.thick_rect, 5, self.right_box_loc)
                # Present cueback
                elif self.evm.between('cue_off', 'cueback_off'):
                    blit(self.star_cueback, 5, P.screen_c)

                flip()

            # PROBE RESPONSE PERIOD
            self.targets_shown = True  # After probe shown, don't recycle trial
            # Present probes & listen for response
            self.probe_rc.collect()

            # If 'go' trial, check for response
            if self.go_no_go == GO:
                # If wrong response made
                if self.err:
                    probe_rt = 'NA'
                # If correct response OR timeout
                else:
                    self.err = 'NA'
                    probe_rt = self.probe_rc.keypress_listener.response(
                        value=False, rt=True)
                    if probe_rt == TIMEOUT:
                        self.show_error_message('probe_timeout')
                        probe_rt = 'NA'
            # Similarly, for 'nogo' trials
            else:
                probe_rt = 'NA'
                # If response made, penalize
                if len(self.probe_rc.keypress_listener.responses):
                    self.show_error_message('response_on_nogo')
                    self.err = 'response_on_nogo'
                # If no response, continue as normal
                else:
                    self.err = 'NA'
        # Return trial data
        return {
            "block_num": P.block_number,
            "trial_num": P.trial_number,
            "block_type": "BANDIT" if P.practicing else "PROBE",
            "high_value_col":
            self.high_value_colour[:3] if P.practicing else 'NA',
            "high_value_loc":
            self.high_value_location if P.practicing else 'NA',
            "low_value_col":
            self.low_value_colour[:3] if P.practicing else 'NA',
            "low_value_loc": self.low_value_location if P.practicing else 'NA',
            "winning_trial": self.winning_trial if P.practicing else 'NA',
            "bandit_selected": self.bandit_selected if P.practicing else 'NA',
            "bandit_rt": bandit_rt,
            "reward": reward,
            "cue_loc": self.cue_location if not P.practicing else 'NA',
            "cotoa": self.cotoa if not P.practicing else 'NA',
            "probe_loc": self.probe_location if not P.practicing else 'NA',
            "probe_col": self.probe_colour if not P.practicing else 'NA',
            "go_no_go": self.go_no_go if not P.practicing else 'NA',
            "probe_rt": probe_rt,
            "err": self.err
        }
        # Clear remaining stimuli from screen
        clear()

    def trial_clean_up(self):
        # Clear responses from responses collectors before next trial
        self.probe_rc.keypress_listener.reset()
        self.bandit_rc.keypress_listener.reset()

    def clean_up(self):
        # Let Ss know when experiment is over
        self.all_done_text = "You're all done! Now I get to take a break.\nPlease buzz the researcher to let them know you're done!"
        fill()
        msg = message(self.all_done_text, 'timeout', blit_txt=False)
        blit(msg, 5, P.screen_c)
        flip()
        any_key()

    # Determines & presents feedback
    def feedback(self, response):

        # Keep count of bandit choices
        if response == self.high_value_location:
            self.bandit_selected = HIGH
            self.times_selected_high = self.times_selected_high + 1
            # Occasionally probe participant learning
            if self.times_selected_high in [5, 10, 15]:
                self.query_learning(HIGH)

        else:
            self.bandit_selected = LOW
            self.time_selected_low = self.time_selected_low + 1
            if self.time_selected_low in [5, 10, 15]:
                self.query_learning(LOW)

        # Determine payout
        if self.winning_trial == YES:
            points = self.bandit_payout(value=self.bandit_selected)
            msg = message("You won {0} points!".format(points),
                          "payout",
                          blit_txt=False)
        else:
            points = self.penalty  # -5
            msg = message("You lost 5 points!", "payout", blit_txt=False)

        # Running point total
        self.total_score += points
        feedback = [points, msg]

        # Present payout
        feedback_exposure = CountDown(self.feedback_exposure_period)
        while feedback_exposure.counting():
            ui_request()
            fill()
            blit(feedback[1], location=P.screen_c, registration=5)
            flip()

        return feedback[0]

    # Calculates bandit payout
    def bandit_payout(self, value):
        mean = self.high_payout_baseline if value == HIGH else self.low_payout_baseline
        # sample from normal distribution with sd of 1 and round to nearest int
        return int(random.gauss(mean, 1) + 0.5)

    # Confirms whether Ss are fixating
    def confirm_fixation(self):
        if not self.el.within_boundary('fixation', EL_GAZE_POS):
            self.show_error_message('fixation')
            if self.targets_shown:
                self.err = 'left_fixation'
            else:
                raise TrialException('gaze left fixation')  # recycle trial

    # Presents error messages
    def show_error_message(self, msg_key):
        fill()
        blit(self.err_msgs[msg_key], location=P.screen_c, registration=5)
        flip()
        any_key()

    # Utility function to generate random time intervals with a given range
    # that are multiples of the current refresh rate (e.g. 16.7ms for a 60Hz monitor)
    def random_interval(self, lower, upper):
        min_flips = int(round(lower / P.refresh_time))
        max_flips = int(round(upper / P.refresh_time))
        return random.choice(range(min_flips, max_flips + 1,
                                   1)) * P.refresh_time

    # Presents neutral boxes, duh
    def present_neutral_boxes(self):
        fill()
        blit(self.star, 5, P.screen_c)
        blit(self.neutral_box, 5, self.left_box_loc)
        blit(self.neutral_box, 5, self.right_box_loc)

    # Presents bandits
    def bandit_callback(self, before_go=False):
        self.confirm_fixation()
        self.present_neutral_boxes()

        blit(self.left_bandit, 5, self.left_box_loc)
        blit(self.right_bandit, 5, self.right_box_loc)

    # Presents probes
    def probe_callback(self):
        self.confirm_fixation()
        self.present_neutral_boxes()

        # Present probe & go/nogo stimulus
        if self.go_no_go == GO:
            blit(self.probe, 5, self.probe_loc)
            blit(self.go, 5, self.probe_loc)
        else:
            blit(self.probe, 5, self.probe_loc)
            blit(self.nogo, 5, self.probe_loc)

    # Assesses learning by asking Ss their anticipated trial earnings
    def query_learning(self, bandit):
        if bandit == HIGH:
            anticipated_reward_high = query(user_queries.experimental[0])
            anticipated_reward_survey = {
                'participant_id': P.participant_id,
                'anticipated_reward_high': anticipated_reward_high,
                'anticipated_reward_low': "NA"
            }
        else:
            anticipated_reward_low = query(user_queries.experimental[1])
            anticipated_reward_survey = {
                'participant_id': P.participant_id,
                'anticipated_reward_high': "NA",
                'anticipated_reward_low': anticipated_reward_low
            }

        self.db.insert(anticipated_reward_survey, table='surveys')
Esempio n. 13
0
    def setup(self):
        # ---------------------------------- #
        # 		  Setup Stimuli      		 #
        # ---------------------------------- #

        # Set stimulus sizes
        line_length = deg_to_px(2)
        line_thickness = deg_to_px(0.5)
        thick_rect_border = deg_to_px(0.7)
        thin_rect_border = deg_to_px(0.3)
        fix_size = deg_to_px(0.6)
        fix_thickness = deg_to_px(0.1)
        square_size = deg_to_px(3)
        large_text_size = deg_to_px(0.65)

        # Stimulus layout
        box_offset = deg_to_px(8.0)
        self.left_box_loc = (P.screen_c[0] - box_offset, P.screen_c[1])
        self.right_box_loc = (P.screen_c[0] + box_offset, P.screen_c[1])

        # Generate target colouring
        # Select target colours from randomly rotated colourwheel
        # ensuring those selected are unique and equidistant
        self.color_selecter = kld.ColorWheel(diameter=1,
                                             rotation=random.randrange(0, 360))
        self.target_colours = []
        for i in (0, 120, 240):
            self.target_colours.append(self.color_selecter.color_from_angle(i))

        # Assign colours to payout valences
        random.shuffle(self.target_colours)
        self.high_value_colour = self.target_colours[0]
        self.low_value_colour = self.target_colours[1]
        self.neutral_value_colour = self.target_colours[2]

        # Initialize drawbjects
        self.thick_rect = kld.Rectangle(
            square_size, stroke=[thick_rect_border, WHITE, STROKE_CENTER])
        self.thin_rect = kld.Rectangle(
            square_size, stroke=[thin_rect_border, WHITE, STROKE_CENTER])

        self.high_val_rect = kld.Rectangle(
            square_size,
            stroke=[thin_rect_border, self.high_value_colour, STROKE_CENTER])
        self.low_val_rect = kld.Rectangle(
            square_size,
            stroke=[thin_rect_border, self.low_value_colour, STROKE_CENTER])

        self.fixation = kld.Asterisk(fix_size, fix_thickness, fill=WHITE)
        self.fix_cueback = kld.Asterisk(fix_size * 2,
                                        fix_thickness * 2,
                                        fill=WHITE)

        self.go = kld.FixationCross(fix_size, fix_thickness, fill=BLACK)
        self.nogo = kld.FixationCross(fix_size,
                                      fix_thickness,
                                      fill=BLACK,
                                      rotation=45)

        self.flat_line = kld.Rectangle(line_length, line_thickness, fill=BLACK)
        self.tilt_line = kld.Rectangle(line_length,
                                       line_thickness,
                                       fill=BLACK,
                                       rotation=45)

        self.probe = kld.Ellipse(int(0.75 * square_size))

        # ---------------------------------- #
        #   Setup other experiment factors   #
        # ---------------------------------- #

        # COTOA = Cue-Offset|Target-Onset Asynchrony
        self.cotoa = 800  # ms
        self.feedback_exposure_period = 1.25  # sec

        # Training block payout variables
        self.high_payout_baseline = 12
        self.low_payout_baseline = 8
        self.total_score = None
        self.penalty = -5

        # ---------------------------------- #
        #     Setup Response Collectors      #
        # ---------------------------------- #

        # Initialize response collectors
        self.probe_rc = ResponseCollector(uses=RC_KEYPRESS)
        self.training_rc = ResponseCollector(uses=RC_KEYPRESS)

        # Initialize ResponseCollector keymaps
        self.training_keymap = KeyMap(
            'training_response',  # Name
            ['z', '/'],  # UI labels
            ["left", "right"],  # Data labels
            [sdl2.SDLK_z, sdl2.SDLK_SLASH]  # SDL2 Keysyms
        )
        self.probe_keymap = KeyMap('probe_response', ['spacebar'], ["pressed"],
                                   [sdl2.SDLK_SPACE])

        # --------------------------------- #
        #     Setup Experiment Messages     #
        # --------------------------------- #

        # Make default font size larger
        self.txtm.add_style('myText', large_text_size, WHITE)

        err_txt = "{0}\n\nPress any key to continue."
        lost_fixation_txt = err_txt.format(
            "Eyes moved! Please keep your eyes on the asterisk.")
        probe_timeout_txt = err_txt.format(
            "No response detected! Please respond as fast and as accurately as possible."
        )
        training_timeout_txt = err_txt.format("Line response timed out!")
        response_on_nogo_txt = err_txt.format(
            "\'nogo\' signal (x) presented\nPlease only respond when you see "
            "the \'go\' signal (+).")

        self.err_msgs = {
            'fixation':
            message(lost_fixation_txt,
                    'myText',
                    align='center',
                    blit_txt=False),
            'probe_timeout':
            message(probe_timeout_txt,
                    'myText',
                    align='center',
                    blit_txt=False),
            'training_timeout':
            message(training_timeout_txt,
                    'myText',
                    align='center',
                    blit_txt=False),
            'response_on_nogo':
            message(response_on_nogo_txt,
                    'myText',
                    align='center',
                    blit_txt=False)
        }

        self.rest_break_txt = err_txt.format(
            "Whew! that was tricky eh? Go ahead and take a break before continuing."
        )
        self.end_of_block_txt = "You're done the first task! Please buzz the researcher to let them know!"

        # -------------------------------- #
        #     Setup Eyelink boundaries     #
        # -------------------------------- #
        fix_bounds = [P.screen_c, square_size / 2]
        self.el.add_boundary('fixation', fix_bounds, CIRCLE_BOUNDARY)

        # --------------------------------- #
        # Insert training block (line task) #
        # --------------------------------- #
        if P.run_practice_blocks:
            self.insert_practice_block(1, trial_counts=P.trials_training_block)
Esempio n. 14
0
class IOReward_E2(klibs.Experiment):
    def setup(self):
        # ---------------------------------- #
        # 		  Setup Stimuli      		 #
        # ---------------------------------- #

        # Set stimulus sizes
        line_length = deg_to_px(2)
        line_thickness = deg_to_px(0.5)
        thick_rect_border = deg_to_px(0.7)
        thin_rect_border = deg_to_px(0.3)
        fix_size = deg_to_px(0.6)
        fix_thickness = deg_to_px(0.1)
        square_size = deg_to_px(3)
        large_text_size = deg_to_px(0.65)

        # Stimulus layout
        box_offset = deg_to_px(8.0)
        self.left_box_loc = (P.screen_c[0] - box_offset, P.screen_c[1])
        self.right_box_loc = (P.screen_c[0] + box_offset, P.screen_c[1])

        # Generate target colouring
        # Select target colours from randomly rotated colourwheel
        # ensuring those selected are unique and equidistant
        self.color_selecter = kld.ColorWheel(diameter=1,
                                             rotation=random.randrange(0, 360))
        self.target_colours = []
        for i in (0, 120, 240):
            self.target_colours.append(self.color_selecter.color_from_angle(i))

        # Assign colours to payout valences
        random.shuffle(self.target_colours)
        self.high_value_colour = self.target_colours[0]
        self.low_value_colour = self.target_colours[1]
        self.neutral_value_colour = self.target_colours[2]

        # Initialize drawbjects
        self.thick_rect = kld.Rectangle(
            square_size, stroke=[thick_rect_border, WHITE, STROKE_CENTER])
        self.thin_rect = kld.Rectangle(
            square_size, stroke=[thin_rect_border, WHITE, STROKE_CENTER])

        self.high_val_rect = kld.Rectangle(
            square_size,
            stroke=[thin_rect_border, self.high_value_colour, STROKE_CENTER])
        self.low_val_rect = kld.Rectangle(
            square_size,
            stroke=[thin_rect_border, self.low_value_colour, STROKE_CENTER])

        self.fixation = kld.Asterisk(fix_size, fix_thickness, fill=WHITE)
        self.fix_cueback = kld.Asterisk(fix_size * 2,
                                        fix_thickness * 2,
                                        fill=WHITE)

        self.go = kld.FixationCross(fix_size, fix_thickness, fill=BLACK)
        self.nogo = kld.FixationCross(fix_size,
                                      fix_thickness,
                                      fill=BLACK,
                                      rotation=45)

        self.flat_line = kld.Rectangle(line_length, line_thickness, fill=BLACK)
        self.tilt_line = kld.Rectangle(line_length,
                                       line_thickness,
                                       fill=BLACK,
                                       rotation=45)

        self.probe = kld.Ellipse(int(0.75 * square_size))

        # ---------------------------------- #
        #   Setup other experiment factors   #
        # ---------------------------------- #

        # COTOA = Cue-Offset|Target-Onset Asynchrony
        self.cotoa = 800  # ms
        self.feedback_exposure_period = 1.25  # sec

        # Training block payout variables
        self.high_payout_baseline = 12
        self.low_payout_baseline = 8
        self.total_score = None
        self.penalty = -5

        # ---------------------------------- #
        #     Setup Response Collectors      #
        # ---------------------------------- #

        # Initialize response collectors
        self.probe_rc = ResponseCollector(uses=RC_KEYPRESS)
        self.training_rc = ResponseCollector(uses=RC_KEYPRESS)

        # Initialize ResponseCollector keymaps
        self.training_keymap = KeyMap(
            'training_response',  # Name
            ['z', '/'],  # UI labels
            ["left", "right"],  # Data labels
            [sdl2.SDLK_z, sdl2.SDLK_SLASH]  # SDL2 Keysyms
        )
        self.probe_keymap = KeyMap('probe_response', ['spacebar'], ["pressed"],
                                   [sdl2.SDLK_SPACE])

        # --------------------------------- #
        #     Setup Experiment Messages     #
        # --------------------------------- #

        # Make default font size larger
        self.txtm.add_style('myText', large_text_size, WHITE)

        err_txt = "{0}\n\nPress any key to continue."
        lost_fixation_txt = err_txt.format(
            "Eyes moved! Please keep your eyes on the asterisk.")
        probe_timeout_txt = err_txt.format(
            "No response detected! Please respond as fast and as accurately as possible."
        )
        training_timeout_txt = err_txt.format("Line response timed out!")
        response_on_nogo_txt = err_txt.format(
            "\'nogo\' signal (x) presented\nPlease only respond when you see "
            "the \'go\' signal (+).")

        self.err_msgs = {
            'fixation':
            message(lost_fixation_txt,
                    'myText',
                    align='center',
                    blit_txt=False),
            'probe_timeout':
            message(probe_timeout_txt,
                    'myText',
                    align='center',
                    blit_txt=False),
            'training_timeout':
            message(training_timeout_txt,
                    'myText',
                    align='center',
                    blit_txt=False),
            'response_on_nogo':
            message(response_on_nogo_txt,
                    'myText',
                    align='center',
                    blit_txt=False)
        }

        self.rest_break_txt = err_txt.format(
            "Whew! that was tricky eh? Go ahead and take a break before continuing."
        )
        self.end_of_block_txt = "You're done the first task! Please buzz the researcher to let them know!"

        # -------------------------------- #
        #     Setup Eyelink boundaries     #
        # -------------------------------- #
        fix_bounds = [P.screen_c, square_size / 2]
        self.el.add_boundary('fixation', fix_bounds, CIRCLE_BOUNDARY)

        # --------------------------------- #
        # Insert training block (line task) #
        # --------------------------------- #
        if P.run_practice_blocks:
            self.insert_practice_block(1, trial_counts=P.trials_training_block)

    def block(self):
        # Show total score following completion of training task
        if self.total_score:
            fill()
            score_txt = "Total block score: {0} points!".format(
                self.total_score)
            msg = message(score_txt, 'myText', blit_txt=False)
            blit(msg, 5, P.screen_c)
            flip()
            any_key()

        self.total_score = 0  # Reset score once presented

        # Training task
        if P.practicing:
            self.block_type = TRAINING
            # Initialize selection counters
            self.high_value_trial_count = 0
            self.low_value_trial_count = 0

        # End of block messaging
        if not P.practicing:
            self.block_type = PROBE
            fill()
            msg = message(self.end_of_block_txt, 'myText', blit_txt=False)
            blit(msg, 5, P.screen_c)
            flip()
            any_key()

    def setup_response_collector(self):
        # Configure probe response collector
        self.probe_rc.terminate_after = [1500,
                                         TK_MS]  # Waits 1.5s for response
        self.probe_rc.display_callback = self.probe_callback  # Continuousy called when collection loop is initiated
        self.probe_rc.flip = True
        self.probe_rc.keypress_listener.key_map = self.probe_keymap
        self.probe_rc.keypress_listener.interrupts = True  # Abort collection loop once response made

        # Configure training response collector
        self.training_rc.terminate_after = [1500, TK_MS]
        self.training_rc.display_callback = self.training_callback
        self.training_rc.flip = True
        self.training_rc.keypress_listener.key_map = self.training_keymap
        self.training_rc.keypress_listener.interrupts = True

    def trial_prep(self):
        # Reset error flag
        self.targets_shown = False
        self.err = None

        # TRAINING PROPERTIES
        if P.practicing:
            self.cotoa = 'NA'  # No cue, so no COTOA
            # Establish location of target line
            if self.tilt_line_location == LEFT:
                self.tilt_line_loc = self.left_box_loc
                self.flat_line_loc = self.right_box_loc
            else:
                self.tilt_line_loc = self.right_box_loc
                self.flat_line_loc = self.left_box_loc

        # PROBE PROPERTIES
        else:
            # Rest breaks
            if P.trial_number % (P.trials_per_block / P.breaks_per_block) == 0:
                if P.trial_number < P.trials_per_block:
                    fill()
                    msg = message(self.rest_break_txt,
                                  'myText',
                                  blit_txt=False)
                    blit(msg, 5, P.screen_c)
                    flip()
                    any_key()

            # Establish & assign probe location
            self.probe_loc = self.right_box_loc if self.probe_location == RIGHT else self.left_box_loc
            # go/nogo signal always presented w/probe
            self.go_nogo_loc = self.probe_loc

            # Establish & assign probe colour
            if self.probe_colour == HIGH:
                self.probe.fill = self.high_value_colour
            elif self.probe_colour == LOW:
                self.probe.fill = self.low_value_colour
            else:
                self.probe.fill = self.neutral_value_colour

        # Add timecourse of events to EventManager
        if P.practicing:  # training trials
            events = [[1000, 'target_on']]
        else:  # Probe trials
            events = [[1000, 'cue_on']]
            events.append([events[-1][0] + 200, 'cue_off'])
            events.append([events[-1][0] + 200, 'cueback_off'])
            events.append([events[-2][0] + 800, 'target_on'])
        for e in events:
            self.evm.register_ticket(ET(e[1], e[0]))

        # Perform drift correct on Eyelink before trial start
        self.el.drift_correct()

    def trial(self):
        # TRAINING TRIAL
        if P.practicing:
            cotoa, probe_rt = ['NA', 'NA']  # Don't occur in training blocks

            # Present placeholders
            while self.evm.before('target_on', True) and not self.err:
                self.confirm_fixation()
                self.present_boxes()
                flip()

            # TRAINING RESPONSE PERIOD
            self.targets_shown = True  # After trainings shown, don't recycle trial
            self.training_rc.collect(
            )  # Present trainings and listen for response

            # If wrong response made
            if self.err:
                line_response, line_rt, reward = ['NA', 'NA', 'NA']
            else:
                self.err = 'NA'
                # Retrieve responses from ResponseCollector & record data
                line_response, line_rt = self.training_rc.keypress_listener.response(
                )

                if line_rt == TIMEOUT:
                    reward = 'NA'
                    self.show_error_message('training_timeout')
                else:
                    reward = self.feedback(
                        line_response)  # Determine training payout & display

        # PROBE TRIAL
        else:
            line_response, line_rt, reward = ['NA', 'NA', 'NA'
                                              ]  # Don't occur in probe trials

            # Present placeholders & confirm fixation
            while self.evm.before('target_on', True):
                self.confirm_fixation()
                self.present_boxes()

                # Present cue
                if self.evm.between('cue_on', 'cue_off'):
                    if self.cue_location == LEFT:
                        blit(self.thick_rect, 5, self.left_box_loc)
                    else:
                        blit(self.thick_rect, 5, self.right_box_loc)
                # Present cueback
                elif self.evm.between('cue_off', 'cueback_off'):
                    blit(self.fix_cueback, 5, P.screen_c)

                flip()

            # PROBE RESPONSE PERIOD
            self.targets_shown = True  # After probe shown, don't recycle trial
            self.probe_rc.collect()  # Present probes & listen for response

            # If 'go' trial, check for response
            if self.go_no_go == GO:

                if self.err:  # If wrong response made
                    probe_rt = 'NA'

                else:  # If correct response OR timeout
                    self.err = 'NA'
                    probe_rt = self.probe_rc.keypress_listener.response(
                        value=False, rt=True)

                    if probe_rt == TIMEOUT:
                        probe_rt = 'NA'
                        self.show_error_message('probe_timeout')

            # Similarly, for 'nogo' trials
            else:
                probe_rt = 'NA'
                # If response made, penalize
                if len(self.probe_rc.keypress_listener.responses):
                    self.show_error_message('response_on_nogo')
                    self.err = 'response_on_nogo'
                # If no response, continue as normal
                else:
                    self.err = 'NA'
        # Return trial data
        return {
            "block_num": P.block_number,
            "trial_num": P.trial_number,
            "block_type": "training" if P.practicing else "probe",
            "high_value_col":
            self.high_value_colour[:3] if P.practicing else 'NA',
            "tilt_line_loc": self.tilt_line_loc if P.practicing else 'NA',
            "low_value_col":
            self.low_value_colour[:3] if P.practicing else 'NA',
            "flat_line_loc": self.flat_line_loc if P.practicing else 'NA',
            "winning_trial": self.winning_trial if P.practicing else 'NA',
            "line_response": line_response,
            "line_rt": line_rt,
            "reward": reward,
            "cue_loc": self.cue_location if not P.practicing else 'NA',
            "cotoa": self.cotoa if not P.practicing else 'NA',
            "probe_loc": self.probe_location if not P.practicing else 'NA',
            "probe_col": self.probe_colour if not P.practicing else 'NA',
            "go_no_go": self.go_no_go if not P.practicing else 'NA',
            "probe_rt": probe_rt,
            "err": self.err
        }
        # Clear remaining stimuli from screen
        clear()

    def trial_clean_up(self):
        # Clear responses from responses collectors before next trial
        self.probe_rc.keypress_listener.reset()
        self.training_rc.keypress_listener.reset()

    def clean_up(self):
        # Let Ss know when experiment is over
        self.all_done_text = "You're all done! Now I get to take a break.\nPlease buzz the researcher to let them know you're done!"
        fill()
        msg = message(self.all_done_text, 'myText', blit_txt=False)
        blit(msg, 5, P.screen_c)
        flip()
        any_key()

    # ------------------------------------ #
    # Experiment specific helper functions #
    # ------------------------------------ #

    def feedback(self, response):
        correct_response = True if response == self.tilt_line_location else False

        # Every 5 trials of a particular payoff, ask anticipated earnings
        if self.potential_payoff == HIGH:
            self.high_value_trial_count += 1
            if self.high_value_trial_count in [5, 10, 15]:
                self.query_learning(HIGH)
        else:
            self.low_value_trial_count += 1
            if self.low_value_trial_count in [5, 10, 15]:
                self.query_learning(LOW)

        # Determine payout for trial
        if correct_response & (self.winning_trial == YES):
            points = self.payout()
            msg = message("You won {0} points!".format(points),
                          'myText',
                          blit_txt=False)
        else:
            points = self.penalty
            msg = message("You lost 5 points!", 'myText', blit_txt=False)
        # Keep tally of score
        self.total_score += points
        feedback = [points, msg]

        # Present score
        feedback_exposure = CountDown(self.feedback_exposure_period)
        fill()
        blit(feedback[1], location=P.screen_c, registration=5)
        flip()
        while feedback_exposure.counting():
            ui_request()

        return feedback[0]

    def payout(self):  # Calculates payout
        mean = self.high_payout_baseline if self.potential_payoff == HIGH else self.low_payout_baseline

        return int(random.gauss(mean, 1) + 0.5)

    def confirm_fixation(self):
        if not self.el.within_boundary('fixation', EL_GAZE_POS):
            self.show_error_message('fixation')
            if self.targets_shown:
                self.err = 'left_fixation'
            else:
                raise TrialException('gaze left fixation')  # recycle trial

    def show_error_message(self, msg_key):
        fill()
        blit(self.err_msgs[msg_key], location=P.screen_c, registration=5)
        flip()
        any_key()

    def present_boxes(self):
        fill()
        blit(self.fixation, 5, P.screen_c)
        if P.practicing:  # During training, box colour indicates potential payout
            if self.potential_payoff == HIGH:
                blit(self.high_val_rect, 5, self.left_box_loc)
                blit(self.high_val_rect, 5, self.right_box_loc)
            else:
                blit(self.low_val_rect, 5, self.left_box_loc)
                blit(self.low_val_rect, 5, self.right_box_loc)
        else:  # Probe trials, where boxes are white.
            blit(self.thin_rect, 5, self.left_box_loc)
            blit(self.thin_rect, 5, self.right_box_loc)

    # Presents target & non-target lines. Probably a better name for this out there....
    def training_callback(self):
        self.confirm_fixation()
        self.present_boxes()

        blit(self.tilt_line, 5, self.tilt_line_loc)
        blit(self.flat_line, 5, self.flat_line_loc)

    # Presents probes & go/no-go signal
    def probe_callback(self):
        self.confirm_fixation()
        self.present_boxes()

        # Present probe & go/nogo stimulus
        if self.go_no_go == GO:
            blit(self.probe, 5, self.probe_loc)
            blit(self.go, 5, self.probe_loc)
        else:
            blit(self.probe, 5, self.probe_loc)
            blit(self.nogo, 5, self.probe_loc)

    # Learning probe. Asks participants their anticipated earnings
    def query_learning(self, potential_payoff):
        if potential_payoff == HIGH:
            anticipated_reward_high = query(user_queries.experimental[0])
            anticipated_reward_survey = {
                'participant_id': P.participant_id,
                'anticipated_reward_high': anticipated_reward_high,
                'anticipated_reward_low': "NA"
            }
        else:
            anticipated_reward_low = query(user_queries.experimental[1])
            anticipated_reward_survey = {
                'participant_id': P.participant_id,
                'anticipated_reward_high': "NA",
                'anticipated_reward_low': anticipated_reward_low
            }

        self.db.insert(anticipated_reward_survey, table='surveys')
Esempio n. 15
0
class TOJ_Motion(klibs.Experiment):

    def __init__(self, *args, **kwargs):
        super(TOJ_Motion, self).__init__(*args, **kwargs)

    def setup(self):
        
        # Stimulus Sizes
        
        target_size = deg_to_px(3.0)
        diamond_size = sqrt(target_size**2/2.0)
        probe_diameter = deg_to_px(1.0)
        wheel_diameter = deg_to_px(16.0)
        
        # Stimulus Drawbjects
        
        self.line_a = kld.Rectangle(width=P.screen_x/2, height=2, fill=WHITE)
        self.line_b = kld.Rectangle(width=P.screen_x/2, height=2, fill=BLACK)
        self.diamond_a = kld.Rectangle(diamond_size, fill=WHITE, rotation=45)
        self.diamond_b = kld.Rectangle(diamond_size, fill=BLACK, rotation=45)
        self.probe = kld.Ellipse(probe_diameter, fill=None)
        self.wheel = kld.ColorWheel(wheel_diameter)
        
        self.line_a.render()
        self.line_b.render()
        self.diamond_a.render()
        self.diamond_b.render()
        
        # Layout
        
        self.left_x = P.screen_x/4
        self.right_x = 3*P.screen_x/4
        self.probe_positions = {
            "left": (self.left_x, P.screen_c[1]),
            "right": (self.right_x, P.screen_c[1])
        }
    
        self.start_baseline = P.screen_y/4
        self.end_offset = deg_to_px(5.0)
        self.left_start = [self.left_x, P.screen_y/4]
        self.right_start = [self.right_x, 3*P.screen_y/4]
        self.left_end = [self.left_x, P.screen_c[1]+self.end_offset]
        self.right_end = [self.right_x, P.screen_c[1]-self.end_offset]
        
        # Timing
        
        self.motion_duration = 1.5 # seconds
        
        # Experiment Messages
        
        if not P.condition:
            P.condition = P.default_condition
            
        toj_string = "Which shape {0} {1}?\n(White = 8   Black = 2)"
        stationary_string = toj_string.format("appeared", P.condition)
        motion_string = toj_string.format("touched the line", P.condition)
        self.toj_prompts = {
            'stationary': message(stationary_string, align="center", blit_txt=False),
            'motion': message(motion_string, align="center", blit_txt=False)
        }
        
        # Initialize ResponseCollector keymaps

        if P.use_numpad:
            keysyms = [sdl2.SDLK_KP_8, sdl2.SDLK_KP_2]
        else:
            keysyms = [sdl2.SDLK_8, sdl2.SDLK_2]

        self.toj_keymap = KeyMap(
            "toj_responses", # Name
            ['8', '2'], # UI labels
            ['white', 'black'], # Data labels
            keysyms # SDL2 Keysyms
        )

        # Initialize second ResponseCollector object for colour wheel responses

        self.wheel_rc = ResponseCollector()
        
        # Generate practice blocks
        
        default_soas = self.trial_factory.exp_factors['t1_t2_soa']
        toj_soas = [soa for soa in default_soas if soa!=0.0]
        toj_only = {"t1_t2_soa": toj_soas}
        probe_only = {"t1_t2_soa": [0.0]}
        
        if P.run_practice_blocks:
            num = P.trials_per_practice_block
            self.insert_practice_block(1, trial_counts=num, factor_mask=toj_only)
            self.insert_practice_block((2,4), trial_counts=num, factor_mask=probe_only)
        self.trial_factory.dump()


    def block(self):
        
        # Determine probe bias for block and generate list of probe locs accordingly
        
        if P.block_number > 3:
            self.probe_bias = "left"
            nonbiased_loc = "right"
        else:
            self.probe_bias = "right"
            nonbiased_loc = "left"
        loc_list = [self.probe_bias]*4 + [nonbiased_loc]
        self.probe_locs = loc_list * int(P.trials_per_block/float(len(loc_list))+1)
        random.shuffle(self.probe_locs)
        
        # At the start of each block, display a start message (block progress if experimental block,
        # practice message if practice block). After 3000ms, keypress will start first trial.
        
        probe_msg = (
            "During this block, the colour target will appear more often on the "
            "{0} and less often on the {1}.".format(self.probe_bias, nonbiased_loc)
        )
        header = "Block {0} of {1}".format(P.block_number, P.blocks_per_experiment)
        if P.practicing:
            header = "This is a practice block. ({0})".format(header)
        if P.block_number > 1:
            msg = message(header+"\n"+probe_msg, align="center", blit_txt=False)
        else:
            msg = message(header, blit_txt=False)

        message_interval = CountDown(1)
        while message_interval.counting():
            ui_request() # Allow quitting during loop
            fill()
            blit(msg, 8, (P.screen_c[0], P.screen_y*0.4))
            flip()
        flush()
        
        fill()
        blit(msg, 8, (P.screen_c[0], P.screen_y*0.4))
        message("Press any key to start.", registration=5, location=[P.screen_c[0], P.screen_y*0.6])
        flip()
        any_key()

        # When running participants, send halfway point and last-block notifications to researcher via Slack

        if not P.development_mode:
            if P.block_number == 3: # If participant is halfway done
                slack_message("Halfway done ({0}/{1})".format(P.block_number, P.blocks_per_experiment))
        
        
    def setup_response_collector(self):
        
        # Determine what type of trial it is before setting up response collector
        
        self.probe_trial = self.t1_t2_soa == 0
        
        # Set up Response Collector to get keypress responses

        self.rc.uses(RC_KEYPRESS)
        self.rc.terminate_after = [3500, TK_MS] # response period times out after 3500ms
        self.rc.keypress_listener.interrupts = True

        if self.probe_trial:
            self.wheel_rc.uses(RC_COLORSELECT)
            self.wheel_rc.terminate_after = [10, TK_S]
            self.wheel_rc.display_callback = self.wheel_callback
            self.wheel_rc.color_listener.interrupts = True
            self.wheel_rc.color_listener.color_response = True
            self.wheel_rc.color_listener.set_wheel(self.wheel)
            self.wheel_rc.color_listener.set_target(self.probe)
        else:
            self.rc.keypress_listener.key_map = self.toj_keymap
            self.rc.display_callback = None


    def trial_prep(self):
        
        # Determing the starting locations of the two target shapes
    
        if self.t1_location == "left":
            t1_x = self.left_x
            t2_x = self.right_x
        else:
            t1_x = self.right_x
            t2_x = self.left_x
            
        # Set shapes for t1 and t2
        
        if self.t1_shape == "a":
            self.t1 = self.diamond_a
            self.t2 = self.diamond_b
            self.t1_line = self.line_b
            self.t2_line = self.line_a
        else:
            self.t1 = self.diamond_b
            self.t2 = self.diamond_a
            self.t1_line = self.line_a
            self.t2_line = self.line_b
        
        self.t1_pos = (t1_x, P.screen_c[1])
        self.t2_pos = (t2_x, P.screen_c[1])

        # Initialize start/end positions and animation paths
        
        if self.toj_type == "motion":
            self.start_offset = P.screen_y/4 + deg_to_px(random.uniform(-2, 2))
            end_offset = deg_to_px(5.0)
            
            if self.upper_target == "t2":
                self.start_offset *= -1
                end_offset *= -1
                self.t1_reg = 8
                self.t2_reg = 2
            else:
                self.t1_reg = 2
                self.t2_reg = 8
                
            t1_start = (t1_x, P.screen_c[1]-self.start_offset)
            t1_end = (t1_x, P.screen_c[1]+end_offset)
            self.t1_path = Animation(t1_start, t1_end, self.motion_duration)
            
            t2_offset = self.t1_path.motion_per_ms[1] * self.t1_t2_soa
            t2_start = (t2_x, P.screen_c[1]+self.start_offset+t2_offset)
            t2_end = (t2_x, P.screen_c[1]-end_offset+t2_offset)
            self.t2_path = Animation(t2_start, t2_end, self.motion_duration)
            
            print(self.upper_target, self.t1_location, self.t1_t2_soa, self.start_offset, t2_offset)
            print("t1 start: {0} end: {1}".format(t1_start, t1_end))
            print("t2 start: {0} end: {1}".format(t2_start, t2_end))

        # Set up colour probe and colour wheel

        self.wheel.rotation = random.randrange(0, 360, 1)
        self.wheel.render()
        self.probe.fill = self.wheel.color_from_angle(random.randrange(0, 360, 1))
        self.probe.render()
        
        # Determine the probe location for the trial
        
        self.probe_location = self.probe_locs.pop()
        self.probe_pos = self.probe_positions[self.probe_location]
        
        # Calculate when t1 onset and t2 off are going to be based on motion
        
        if self.toj_type == "motion":
            self.t1_on = (1/self.t1_path.motion_per_ms[1])*self.start_offset
            self.t2_off = (1/self.t1_path.motion_per_ms[1])*(self.start_offset+end_offset)
        else:
            self.t1_on = self.random_interval(700, 1200)
            self.t2_off = self.t1_on + self.t1_t2_soa-1 + 300
        
        # Add timecourse of events to EventManager
        
        events = []
        events.append([self.t1_on, 't1_on'])
        events.append([events[-1][0] + 200, 'probe_off'])
        events.append([events[-2][0] + self.t1_t2_soa-1, 't2_on'])
        events.append([self.t2_off, 't2_off'])
        for e in events:
            self.evm.register_ticket(ET(e[1], e[0]))

    def trial(self):
        
        # Display the stimuli in sequence (which stimuli and in which sequence is
        # determined above in trial_prep).
        
        while self.evm.before('t2_off'):
            ui_request()
            
            fill()
            blit(self.t1_line, 5, self.t1_pos)
            blit(self.t2_line, 5, self.t2_pos)
            
            if self.toj_type == "motion":
                blit(self.t1, self.t1_reg, self.t1_path.position)
                blit(self.t2, self.t2_reg, self.t2_path.position)
            else:
                if self.evm.after('t1_on'):
                    blit(self.t1, 5, self.t1_pos)
                if self.evm.after('t2_on'):
                    blit(self.t2, 5, self.t2_pos)
                
            if self.probe_trial and self.evm.between('t1_on', 'probe_off'):
                blit(self.probe, 5, self.probe_pos)
                
            flip()
        
        # After 2nd target is off, collect either TOJ response or colour wheel response
        # depending on trial type.
        
        if self.probe_trial:
            self.wheel_rc.collect()
        else:
            self.toj_callback()
            self.rc.collect()
        
        # Parse collected response data before writing to the database
        
        if not self.probe_trial:
            toj_response = self.rc.keypress_listener.response(rt=False)
            toj_rt = self.rc.keypress_listener.response(value=False)
            if toj_response == 'NO_RESPONSE':
                toj_response, toj_rt = ['NA', 'timeout']
            response_col, angle_err, wheel_rt = ['NA', 'NA', 'NA']
        else:
            try:
                angle_err, response_col = self.wheel_rc.color_listener.response(rt=False)
                wheel_rt = self.wheel_rc.color_listener.response(value=False)
                response_col = list(response_col) # to be consistent with probe_col
            except ValueError:
                # if no response made (timeout), only one value will be returned
                angle_err, response_col, wheel_rt = ['NA', 'NA', 'timeout']
            toj_response, toj_rt = ['NA', 'NA']

        return {
            "block_num": P.block_number,
            "trial_num": P.trial_number,
            "toj_condition": P.condition,
            "trial_type": 'probe' if self.probe_trial else 'toj',
            "target_type": self.toj_type,
            "t1_location": self.t1_location,
            "t1_type": "white" if self.t1_shape == "a" else "black",
            "upper_target": self.upper_target if self.toj_type == "motion" else 'NA',
            "t1_t2_soa": self.t1_t2_soa,
            "toj_response": toj_response,
            "toj_rt": toj_rt,
            "probe_loc": self.probe_location if self.probe_trial else 'NA',
            "probe_col": str(self.probe.fill_color[:3]) if self.probe_trial else 'NA',
            "response_col": str(response_col[:3]),
            "angle_err": angle_err,
            "wheel_rt": wheel_rt
        }

    def trial_clean_up(self):
        self.wheel_rc.reset()

    def clean_up(self):
        pass
    
    
    def toj_callback(self):
        fill()
        blit(self.toj_prompts[self.toj_type], 5, P.screen_c)
        flip()
    
    def wheel_callback(self):
        fill()
        blit(self.wheel, location=P.screen_c, registration=5)
        flip()
        
    def random_interval(self, lower, upper, refresh=None):

        # utility function to generate random interval respecting the refresh rate of the monitor,
        # since stimuli can only be changed at refreshes. Converts upper/lower bounds in ms to
        # flips per the refresh rate, selects random number of flips, then converts flips back to ms.

        if not refresh:
            refresh = P.refresh_rate
        time_per_flip = 1000.0/refresh
        min_flips = int(round(lower/time_per_flip))
        max_flips = int(round(upper/time_per_flip))
        return random.choice(range(min_flips, max_flips, 1)) * time_per_flip
Esempio n. 16
0
class NP_IOR(klibs.Experiment):
    def setup(self):

        box_size = deg_to_px(1.8)
        box_thick = deg_to_px(0.05)
        stim_size = deg_to_px(0.95)
        stim_thick = deg_to_px(0.1)
        fix_size = deg_to_px(1)
        fix_offset = deg_to_px(2.8)

        box_stroke = [box_thick, WHITE, STROKE_CENTER]

        self.txtm.add_style(label='greentext', color=GREEN)

        self.target = kld.Annulus(diameter=stim_size,
                                  thickness=stim_thick,
                                  fill=WHITE)
        self.distractor = kld.FixationCross(size=stim_size,
                                            thickness=stim_thick,
                                            fill=WHITE)

        # Set the rotation of placeholder boxes & fixation to match that of the display (diamond, square)
        rotate = 45 if P.condition == 'diamond' else 0
        self.placeholder = kld.Rectangle(width=box_size,
                                         stroke=box_stroke,
                                         rotation=rotate)
        self.fixation = kld.Asterisk(size=fix_size,
                                     thickness=stim_thick,
                                     fill=WHITE,
                                     rotation=rotate,
                                     spokes=8)

        # Which locations are labelled far or near is dependent on arrangement of display
        # 'near' locations refer to those that lie at the intersection of 'far' locations
        if P.condition == 'square':
            self.far_locs = {
                1: [
                    point_pos(P.screen_c,
                              amplitude=fix_offset,
                              angle=315,
                              clockwise=True), 'NorthEast'
                ],
                2: [
                    point_pos(P.screen_c,
                              amplitude=fix_offset,
                              angle=45,
                              clockwise=True), 'SouthEast'
                ],
                3: [
                    point_pos(P.screen_c,
                              amplitude=fix_offset,
                              angle=135,
                              clockwise=True), 'SouthWest'
                ],
                4: [
                    point_pos(P.screen_c,
                              amplitude=fix_offset,
                              angle=225,
                              clockwise=True), 'NorthWest'
                ]
            }

            self.near_locs = {
                5:
                [midpoint(self.far_locs[4][0], self.far_locs[1][0]), 'North'],
                6:
                [midpoint(self.far_locs[1][0], self.far_locs[2][0]), 'East'],
                7:
                [midpoint(self.far_locs[3][0], self.far_locs[2][0]), 'South'],
                8:
                [midpoint(self.far_locs[3][0], self.far_locs[4][0]), 'West']
            }

        else:  # if P.condition == 'diamond'
            self.far_locs = {
                1: [
                    point_pos(P.screen_c,
                              amplitude=fix_offset,
                              angle=270,
                              clockwise=True), 'North'
                ],
                2: [
                    point_pos(P.screen_c,
                              amplitude=fix_offset,
                              angle=0,
                              clockwise=True), 'East'
                ],
                3: [
                    point_pos(P.screen_c,
                              amplitude=fix_offset,
                              angle=90,
                              clockwise=True), 'South'
                ],
                4: [
                    point_pos(P.screen_c,
                              amplitude=fix_offset,
                              angle=180,
                              clockwise=True), 'West'
                ]
            }

            self.near_locs = {
                5: [
                    midpoint(self.far_locs[4][0], self.far_locs[1][0]),
                    'NorthWest'
                ],
                6: [
                    midpoint(self.far_locs[1][0], self.far_locs[2][0]),
                    'NorthEast'
                ],
                7: [
                    midpoint(self.far_locs[3][0], self.far_locs[2][0]),
                    'SouthEast'
                ],
                8: [
                    midpoint(self.far_locs[3][0], self.far_locs[4][0]),
                    'SouthWest'
                ]
            }

        if not P.development_mode:
            self.keymap = KeyMap('directional_response', [
                'North', 'NorthEast', 'East', 'SouthEast', 'South',
                'SouthWest', 'West', 'NorthWest'
            ], [
                'North', 'NorthEast', 'East', 'SouthEast', 'South',
                'SouthWest', 'West', 'NorthWest'
            ], [
                sdl2.SDLK_KP_8, sdl2.SDLK_KP_9, sdl2.SDLK_KP_6, sdl2.SDLK_KP_3,
                sdl2.SDLK_KP_2, sdl2.SDLK_KP_1, sdl2.SDLK_KP_4, sdl2.SDLK_KP_7
            ])

        else:  # Don't have a numpad myself, so I need an alternative when developing
            self.keymap = KeyMap('directional_response', [
                'North', 'NorthEast', 'East', 'SouthEast', 'South',
                'SouthWest', 'West', 'NorthWest'
            ], [
                'North', 'NorthEast', 'East', 'SouthEast', 'South',
                'SouthWest', 'West', 'NorthWest'
            ], [
                sdl2.SDLK_i, sdl2.SDLK_o, sdl2.SDLK_l, sdl2.SDLK_PERIOD,
                sdl2.SDLK_COMMA, sdl2.SDLK_m, sdl2.SDLK_j, sdl2.SDLK_u
            ])

        # Prime items always presented in far locations
        self.prime_locs = self.far_locs.copy()
        # Probe items can be far or near, determined conditionally
        self.probe_locs = dict(self.near_locs.items() + self.far_locs.items())

        # So, to get a practice block of 25, we first need to generate the full set of 2048
        # possible permutations, trim that down to the 288 legitimate permutations,
        # then trim that down to 25...
        self.insert_practice_block(1, 2048)

        # Because KLibs auto-generates trials for each product of ind_vars.py,
        # a lot of 'vestigial' trials are generated that we don't want, this sorts
        # through the trial list and removes those trials.
        for ind_b, block in enumerate(self.trial_factory.blocks):
            for trial in block:

                # targets & distractors cannot overlap within a given display
                if trial[1] == trial[2] or trial[3] == trial[4]:

                    self.trial_factory.blocks[ind_b].remove(trial)
                    block.i -= 1
                    block.length = len(block.trials)
                    continue
                # For 'near' trials, Ts & Ds cannot appear at 'far' locations
                if trial[0] == 'near':
                    if trial[3] < 5 or trial[4] < 5:

                        self.trial_factory.blocks[ind_b].remove(trial)
                        block.i -= 1
                        block.length = len(block.trials)

                # Conversely, cannot appear at 'near' locations on 'far' trials
                else:
                    if trial[3] > 4 or trial[4] > 4:

                        self.trial_factory.blocks[ind_b].remove(trial)
                        block.i -= 1
                        block.length = len(block.trials)

            # We only want 25 trials for practice, this trims the block
            # to the appropriate length
            if ind_b == 0:
                for trial in block:
                    self.trial_factory.blocks[ind_b].remove(trial)
                    block.i -= 1
                    block.length = len(block.trials)

                    if block.length == 25:
                        break

        # Set to True once instructions are provided
        self.instructed = False

    def block(self):
        # Only present instructions the first time.
        if not self.instructed:
            self.instructed = True
            self.give_instructions()

        # Inform as to block progress
        if P.practicing:
            msg = message("PRACTICE ROUND\n\nPress '5' to begin...",
                          blit_txt=False)

        else:
            msg = message("TESTING ROUND\n\nPress '5' to begin...",
                          blit_txt=False)

        fill()
        blit(msg, location=P.screen_c, registration=5)
        flip()

        # Hangs until '5' key
        self.continue_on()

    def setup_response_collector(self):
        self.probe_rc = ResponseCollector(uses=RC_KEYPRESS)
        self.prime_rc = ResponseCollector(uses=RC_KEYPRESS)

        self.prime_rc.display_callback = self.present_filled_array
        self.prime_rc.display_kwargs = {'display': 'prime'}
        self.prime_rc.terminate_after = [5000, TK_MS]
        self.prime_rc.keypress_listener.interrupts = True
        self.prime_rc.keypress_listener.key_map = self.keymap

        self.probe_rc.display_callback = self.present_filled_array
        self.probe_rc.display_kwargs = {'display': 'probe'}
        self.probe_rc.terminate_after = [5000, TK_MS]
        self.probe_rc.keypress_listener.interrupts = True
        self.probe_rc.keypress_listener.key_map = self.keymap

    def trial_prep(self):
        # Grab locations (and their cardinal labels) for each T & D
        self.T_prime_loc = list(self.prime_locs[self.prime_target])
        self.D_prime_loc = list(self.prime_locs[self.prime_distractor])
        self.T_probe_loc = list(self.probe_locs[self.probe_target])
        self.D_probe_loc = list(self.probe_locs[self.probe_distractor])

        # Grab distance between each item pair
        self.T_prime_to_T_probe = line_segment_len(self.T_prime_loc[0],
                                                   self.T_probe_loc[0])
        self.T_prime_to_D_probe = line_segment_len(self.T_prime_loc[0],
                                                   self.D_probe_loc[0])
        self.D_prime_to_T_probe = line_segment_len(self.D_prime_loc[0],
                                                   self.T_probe_loc[0])
        self.D_prime_to_D_probe = line_segment_len(self.D_prime_loc[0],
                                                   self.D_probe_loc[0])

        # Once locations selected, determine which trial type this trial would fall under.
        self.trial_type = self.determine_trial_type()

        # Hide mouse cursor throughout trial
        hide_mouse_cursor()

        # Present fixation & start trial
        self.present_fixation()

    def trial(self):
        hide_mouse_cursor()

        # Begin with empty array...
        self.present_empty_array()

        smart_sleep(500)

        # 500ms later present prime array & record response
        self.prime_rc.collect()

        # If response, log, otherwise NA
        response_prime, rt_prime = 'NA', 'NA'

        if len(self.prime_rc.keypress_listener.response()):
            response_prime, rt_prime = self.prime_rc.keypress_listener.response(
            )

        # Reset to empty array following response
        self.present_empty_array()

        smart_sleep(300)

        # 300ms later present probe array
        self.probe_rc.collect()

        response_probe, rt_probe = 'NA', 'NA'

        if len(self.probe_rc.keypress_listener.response()):
            response_probe, rt_probe = self.probe_rc.keypress_listener.response(
            )

        # Determine accuracy of responses (i.e., whether target selected)
        prime_correct = response_prime == self.T_prime_loc[1]
        probe_correct = response_probe == self.T_probe_loc[1]

        # Present feedback on performance (mean RT for correct, 'WRONG' for incorrect)
        self.present_feedback(prime_correct, rt_prime, probe_correct, rt_probe)

        prime_choice, probe_choice = 'NA', 'NA'

        if response_prime == self.T_prime_loc[1]:
            prime_choice = 'target'
        elif response_prime == self.D_prime_loc[1]:
            prime_choice = 'distractor'
        else:
            prime_choice = "empty_cell"

        if response_probe == self.T_probe_loc[1]:
            probe_choice = 'target'
        elif response_probe == self.D_probe_loc[1]:
            probe_choice = 'distractor'
        else:
            probe_choice = "empty_cell"

        return {
            "block_num": P.block_number,
            "trial_num": P.trial_number,
            "practicing": str(P.practicing),
            "far_near": self.far_or_near,
            'trial_type': self.trial_type,
            'prime_rt': rt_prime,
            'probe_rt': rt_probe,
            'prime_correct': str(prime_correct),
            'probe_correct': str(probe_correct),
            't_prime_to_t_probe': self.T_prime_to_T_probe,
            't_prime_to_d_probe': self.T_prime_to_D_probe,
            'd_prime_to_t_probe': self.D_prime_to_T_probe,
            'd_prime_to_d_probe': self.D_prime_to_D_probe,
            'prime_choice': prime_choice,
            'probe_choice': probe_choice,
            'prime_response': response_prime,
            'probe_response': response_probe,
            't_prime_loc': self.T_prime_loc[1],
            'd_prime_loc': self.D_prime_loc[1],
            't_probe_loc': self.T_probe_loc[1],
            'd_probe_loc': self.D_probe_loc[1]
        }

    def trial_clean_up(self):

        # Provide break 1/2 through experimental block
        if P.trial_number == P.trials_per_block / 2:
            txt = "You're 1/2 through, take a break if you like\nand press '5' when you're ready to continue"
            msg = message(txt, blit_txt=False)

            fill()
            blit(msg, location=P.screen_c, registration=5)
            flip()

            while True:
                if key_pressed(key=sdl2.SDLK_KP_5):
                    break

    # When called, hangs until appropriate key is depressed
    def continue_on(self):
        while True:
            if not P.development_mode:
                if key_pressed(key=sdl2.SDLK_KP_5):
                    break

            else:
                if key_pressed(key=sdl2.SDLK_k):
                    break

    def present_fixation(self):
        fill()
        blit(self.fixation, location=P.screen_c, registration=5)
        flip()

        self.continue_on()

    def present_feedback(self, prime_correct, prime_rt, probe_correct,
                         probe_rt):

        prime_fb = int(prime_rt) if prime_correct else 'WRONG'
        probe_fb = int(probe_rt) if probe_correct else 'WRONG'

        fb_txt = "{0}\n{1}".format(prime_fb, probe_fb)

        fb = message(fb_txt, align='center', blit_txt=False)

        fill()
        blit(fb, location=P.screen_c, registration=5)
        flip()

        while True:
            if key_pressed(key=sdl2.SDLK_SPACE):
                break

    def present_empty_array(self):
        fill()
        for value in self.probe_locs.values():
            blit(self.placeholder, registration=5, location=value[0])
        blit(self.fixation, location=P.screen_c, registration=5)
        flip()

    def present_filled_array(self, display):
        fill()
        for value in self.probe_locs.values():
            blit(self.placeholder, registration=5, location=value[0])

        if display == 'prime':
            blit(self.target, registration=5, location=self.T_prime_loc[0])
            blit(self.distractor, registration=5, location=self.D_prime_loc[0])
        else:
            blit(self.target, registration=5, location=self.T_probe_loc[0])
            blit(self.distractor, registration=5, location=self.D_probe_loc[0])

        blit(self.fixation, location=P.screen_c, registration=5)
        flip()

    def determine_trial_type(self):

        if self.far_or_near == 'far':

            if (self.T_prime_loc, self.D_prime_loc) == (self.T_probe_loc,
                                                        self.D_probe_loc):
                return 'repeat'

            elif (self.T_prime_loc, self.D_prime_loc) == (self.D_probe_loc,
                                                          self.T_probe_loc):
                return 'switch'

            elif (self.T_prime_loc != self.T_probe_loc) and (self.D_prime_loc != self.D_probe_loc) and \
                 (self.T_prime_loc != self.D_probe_loc) and (self.D_prime_loc != self.T_probe_loc):
                return 'control-far'

            elif (self.T_prime_loc == self.T_probe_loc) and (
                    self.D_prime_loc != self.D_probe_loc):
                return 'T.to.T-far'

            elif (self.D_prime_loc == self.D_probe_loc) and (
                    self.T_prime_loc != self.T_probe_loc):
                return 'D.to.D-far'

            elif (self.D_probe_loc == self.T_prime_loc) and (
                    self.T_probe_loc != self.D_prime_loc):
                return 'D.to.T-far'

            elif (self.T_probe_loc == self.D_prime_loc) and (
                    self.D_probe_loc != self.T_prime_loc):
                return 'T.to.D-far'

            else:
                print "[FAR] - unanticipated display arrangement / trial type for trial {0}".format(
                    P.trial_number)
                return 'erroneous'

        else:

            if self.T_probe_loc[0] == midpoint(self.T_prime_loc[0],
                                               self.D_prime_loc[0]):
                return 'T.at.CoG-near'

            elif self.D_probe_loc[0] == midpoint(self.T_prime_loc[0],
                                                 self.D_prime_loc[0]):
                return 'D.at.CoG-near'

            else:
                return 'control-near'

    def give_instructions(self):
        button_map = {
            'North':
            message("8", align='center', blit_txt=False, style='greentext'),
            'East':
            message("6", align='center', blit_txt=False, style='greentext'),
            'South':
            message("2", align='center', blit_txt=False, style='greentext'),
            'West':
            message("4", align='center', blit_txt=False, style='greentext'),
            'NorthEast':
            message("9", align='center', blit_txt=False, style='greentext'),
            'NorthWest':
            message("7", align='center', blit_txt=False, style='greentext'),
            'SouthWest':
            message("1", align='center', blit_txt=False, style='greentext'),
            'SouthEast':
            message("3", align='center', blit_txt=False, style='greentext')
        }

        hide_mouse_cursor()

        txt = (
            "In this experiment, your task is to indicate the location of the target 'o'\n"
            "while ignoring the distractor '+'."
            "\n\n(press the '5' on the numpad to continue past each message)")

        instruction_msg = message(txt, align='center', blit_txt=False)

        fill()
        blit(instruction_msg, location=P.screen_c, registration=5)
        flip()

        self.continue_on()

        txt = (
            "Each trial will begin with a fixation cross, when you see this\n"
            "you may begin the trial by pressing the '5' key on the numpad.\n"
            "Shortly after which an array will appear")

        instruction_msg = message(txt, align='center', blit_txt=False)

        fill()
        blit(instruction_msg,
             location=(P.screen_c[0], int(P.screen_c[1] * 0.3)),
             registration=5)
        blit(self.fixation, location=P.screen_c, registration=5)
        flip()

        self.continue_on()

        fill()
        blit(instruction_msg,
             location=(P.screen_c[0], int(P.screen_c[1] * 0.3)),
             registration=5)
        for value in self.probe_locs.values():
            blit(self.placeholder, registration=5, location=value[0])
        blit(self.fixation, location=P.screen_c, registration=5)
        flip()

        self.continue_on()

        txt = (
            "Shortly after the array appears, both the target 'o' and distractor '+'\n"
            "will appear in random locations within the array...")

        instruction_msg = message(txt, align='center', blit_txt=False)

        t_loc = self.prime_locs[1][0]
        d_loc = self.prime_locs[3][0]

        fill()
        blit(instruction_msg,
             location=(P.screen_c[0], int(P.screen_c[1] * 0.3)),
             registration=5)
        for value in self.probe_locs.values():
            blit(self.placeholder, registration=5, location=value[0])
        blit(self.target, location=t_loc, registration=5)
        blit(self.distractor, location=d_loc, registration=5)
        blit(self.fixation, location=P.screen_c, registration=5)
        flip()

        self.continue_on()

        txt = (
            "Once they appear, please indicate the location of the 'o' as quickly and\n"
            "accurately as possible, using the numpad ('8' for North, '9' for Northeast, etc.,)\n"
            "Each trial will actually consist of two displays, each requiring their own response,\n"
            "one after the other")

        instruction_msg = message(txt, align='center', blit_txt=False)

        fill()
        blit(instruction_msg,
             location=(P.screen_c[0], int(P.screen_c[1] * 0.3)),
             registration=5)
        for value in self.probe_locs.values():
            blit(self.placeholder, registration=5, location=value[0])
            blit(button_map[value[1]], registration=5, location=value[0])
        blit(self.target, location=t_loc, registration=5)
        blit(self.distractor, location=d_loc, registration=5)
        blit(self.fixation, location=P.screen_c, registration=5)
        flip()

        self.continue_on()

        txt = (
            "Once you have made both responses, you will be provided with feedback,\n"
            "the upper and lower line referring to your performance\n"
            "in the first and second display, respectively.\n"
            "Please press spacebar to skip past the feedback display")

        instruction_msg = message(txt, align='center', blit_txt=False)

        fill()
        blit(instruction_msg, location=P.screen_c, registration=5)
        flip()

        self.continue_on()

        txt = "For correct responses, your reaction time will be provided to you."
        fb_txt = "360\n412"

        instruction_msg = message(txt, align='center', blit_txt=False)
        fb_msg = message(fb_txt, align='center', blit_txt=False)

        fill()
        blit(instruction_msg,
             location=(P.screen_c[0], int(P.screen_c[1] * 0.3)),
             registration=5)
        blit(fb_msg, location=P.screen_c, registration=5)
        flip()

        self.continue_on()

        txt = "For incorrect responses, your reaction time will be replaced by the word WRONG."
        fb_txt = "323\nWRONG"

        instruction_msg = message(txt, align='center', blit_txt=False)
        fb_msg = message(fb_txt, align='center', blit_txt=False)

        fill()
        blit(instruction_msg,
             location=(P.screen_c[0], int(P.screen_c[1] * 0.3)),
             registration=5)
        blit(fb_msg, location=P.screen_c, registration=5)
        flip()

        self.continue_on()

        continue_txt = (
            "Throughout the task, please keep your fingers rested on the numpad,\n"
            "with your middle finger resting on the '5' key\n\n"
            "The experiment will begin with a short practice round to familiarize you with the task\n\n"
            "When you're ready, press the '5' key to begin...")

        continue_msg = message(continue_txt, align='center', blit_txt=False)

        fill()
        blit(continue_msg, location=P.screen_c, registration=5)
        flip()

        self.continue_on()

    def clean_up(self):
        pass
Esempio n. 17
0
    def setup(self):
        
        # Stimulus Sizes
        
        target_size = deg_to_px(3.0)
        diamond_size = sqrt(target_size**2/2.0)
        probe_diameter = deg_to_px(1.0)
        wheel_diameter = deg_to_px(16.0)
        
        # Stimulus Drawbjects
        
        self.line_a = kld.Rectangle(width=P.screen_x/2, height=2, fill=WHITE)
        self.line_b = kld.Rectangle(width=P.screen_x/2, height=2, fill=BLACK)
        self.diamond_a = kld.Rectangle(diamond_size, fill=WHITE, rotation=45)
        self.diamond_b = kld.Rectangle(diamond_size, fill=BLACK, rotation=45)
        self.probe = kld.Ellipse(probe_diameter, fill=None)
        self.wheel = kld.ColorWheel(wheel_diameter)
        
        self.line_a.render()
        self.line_b.render()
        self.diamond_a.render()
        self.diamond_b.render()
        
        # Layout
        
        self.left_x = P.screen_x/4
        self.right_x = 3*P.screen_x/4
        self.probe_positions = {
            "left": (self.left_x, P.screen_c[1]),
            "right": (self.right_x, P.screen_c[1])
        }
    
        self.start_baseline = P.screen_y/4
        self.end_offset = deg_to_px(5.0)
        self.left_start = [self.left_x, P.screen_y/4]
        self.right_start = [self.right_x, 3*P.screen_y/4]
        self.left_end = [self.left_x, P.screen_c[1]+self.end_offset]
        self.right_end = [self.right_x, P.screen_c[1]-self.end_offset]
        
        # Timing
        
        self.motion_duration = 1.5 # seconds
        
        # Experiment Messages
        
        if not P.condition:
            P.condition = P.default_condition
            
        toj_string = "Which shape {0} {1}?\n(White = 8   Black = 2)"
        stationary_string = toj_string.format("appeared", P.condition)
        motion_string = toj_string.format("touched the line", P.condition)
        self.toj_prompts = {
            'stationary': message(stationary_string, align="center", blit_txt=False),
            'motion': message(motion_string, align="center", blit_txt=False)
        }
        
        # Initialize ResponseCollector keymaps

        if P.use_numpad:
            keysyms = [sdl2.SDLK_KP_8, sdl2.SDLK_KP_2]
        else:
            keysyms = [sdl2.SDLK_8, sdl2.SDLK_2]

        self.toj_keymap = KeyMap(
            "toj_responses", # Name
            ['8', '2'], # UI labels
            ['white', 'black'], # Data labels
            keysyms # SDL2 Keysyms
        )

        # Initialize second ResponseCollector object for colour wheel responses

        self.wheel_rc = ResponseCollector()
        
        # Generate practice blocks
        
        default_soas = self.trial_factory.exp_factors['t1_t2_soa']
        toj_soas = [soa for soa in default_soas if soa!=0.0]
        toj_only = {"t1_t2_soa": toj_soas}
        probe_only = {"t1_t2_soa": [0.0]}
        
        if P.run_practice_blocks:
            num = P.trials_per_practice_block
            self.insert_practice_block(1, trial_counts=num, factor_mask=toj_only)
            self.insert_practice_block((2,4), trial_counts=num, factor_mask=probe_only)
        self.trial_factory.dump()
Esempio n. 18
0
class ABColour_TMTM(klibs.Experiment):
    def setup(self):
        # Stimulus sizes
        fix_thickness = deg_to_px(0.1)
        fix_size = deg_to_px(0.6)
        wheel_size = int(P.screen_y * 0.75)
        cursor_size = deg_to_px(1)
        cursor_thickness = deg_to_px(0.3)
        target_size = deg_to_px(0.8)

        # Initilize drawbjects
        self.fixation = FixationCross(size=fix_size,
                                      thickness=fix_thickness,
                                      fill=WHITE)
        self.t1_wheel = ColorWheel(diameter=wheel_size)
        self.t2_wheel = ColorWheel(diameter=wheel_size)
        self.cursor = Annulus(diameter=cursor_size,
                              thickness=cursor_thickness,
                              fill=BLACK)

        # Create text styles to store target colouring
        self.txtm.add_style(label="T1", font_size=target_size)
        self.txtm.add_style(label="T2", font_size=target_size)

        # Stimulus presentation durations (intervals of 16.7ms refresh rate)
        self.id_target_duration = P.refresh_time * 5  # 83.3ms
        self.id_mask_duration = P.refresh_time
        self.col_target_duration = P.refresh_time * 5  # 167ms
        self.col_mask_duration = P.refresh_time
        self.isi = P.refresh_time  # ISI = inter-stimulus interval (target offset -> mask onset)

        # Colour ResponseCollector needs to be passed an object whose fill (colour)
        # is that of the target colour. W/n trial_prep(), these dummies will be filled
        # w/ the target colour and then passed to their ResponseCollectors, respectively.
        self.t1_dummy = Ellipse(width=1)
        self.t2_dummy = Ellipse(width=1)

        # Experiment messages
        self.anykey_txt = "{0}\nPress any key to continue."
        self.t1_id_request = "What was the first number?"
        self.t2_id_request = "What was the second number?"
        self.t1_col_request = "What was the first colour?"
        self.t2_col_request = "What was the second colour?"
        self.prac_identity_instruct = "\nIn this block, you will be asked to report what number was presented.\nIf you're unsure, make your best guess."
        self.prac_colour_instruct = "\nIn this block, you will be asked to report what colour was presented.\nIf you're unsure, make your best guess."
        self.test_identity_instruct = "\nIn this block, you will be asked to report which two numbers were presented.\nIf you're unsure, make your best guess."
        self.test_colour_instruct = "\nIn this block, you will be asked to report which two colours were presented.\nIf you're unsure, make your best guess."

        # Initialize ResponseCollectors
        self.t1_identity_rc = ResponseCollector(uses=RC_KEYPRESS)
        self.t2_identity_rc = ResponseCollector(uses=RC_KEYPRESS)

        self.t1_colouring_rc = ResponseCollector(uses=RC_COLORSELECT)
        self.t2_colouring_rc = ResponseCollector(uses=RC_COLORSELECT)

        # Initialize ResponseCollector Keymaps
        self.keymap = KeyMap(
            'identity_response', ['1', '2', '3', '4', '5', '6', '7', '8', '9'],
            ['1', '2', '3', '4', '5', '6', '7', '8', '9'], [
                sdl2.SDLK_1, sdl2.SDLK_2, sdl2.SDLK_3, sdl2.SDLK_4,
                sdl2.SDLK_5, sdl2.SDLK_6, sdl2.SDLK_7, sdl2.SDLK_8, sdl2.SDLK_9
            ])

        # Inserting practice blocks requires a pre-defined trial count; but in our case they are of an undefined length,
        # lasting for as long as it takes participants to reach a performance threshold. So, initially they are of length 1
        # but trials are inserted later on depending on participant performance.
        if P.run_practice_blocks:
            self.insert_practice_block([1, 3], trial_counts=1)

        # Randomly select starting condition
        self.block_type = random.choice([IDENTITY, COLOUR])

    def block(self):
        if not P.practicing:
            if P.trial_number % 60 == 0:
                rest_txt = "Whew, go ahead a take a break!\nPress any key when you're ready to continue."
                rest_msg = message(rest_txt, align='center', blit_txt=False)
                fill()
                blit(rest_msg, 5, P.screen_c)
                flip()
                any_key()

        self.t1_performance = 0

        # Present block progress
        block_txt = "Block {0} of {1}".format(P.block_number,
                                              P.blocks_per_experiment)
        progress_txt = self.anykey_txt.format(block_txt)

        if P.practicing:
            progress_txt += "\n(This is a practice block)"

        progress_msg = message(progress_txt, align='center', blit_txt=False)

        fill()
        blit(progress_msg, 5, P.screen_c)
        flip()
        any_key()

        # Inform as to block type
        if self.block_type == COLOUR:
            if P.practicing:
                block_type_txt = self.anykey_txt.format(
                    self.prac_colour_instruct)
            else:
                block_type_txt = self.anykey_txt.format(
                    self.test_colour_instruct)
        else:
            if P.practicing:
                block_type_txt = self.anykey_txt.format(
                    self.prac_identity_instruct)
            else:
                block_type_txt = self.anykey_txt.format(
                    self.test_identity_instruct)

        block_type_msg = message(block_type_txt,
                                 align='center',
                                 blit_txt=False)

        fill()
        blit(block_type_msg, 5, P.screen_c)
        flip()
        any_key()

        # Pre-run: First 10 practice trials, no performance adjustments
        self.pre_run_complete = False
        # Practice: Subsequent practice trials wherein performance is adjusted
        self.practice_complete = False
        self.practice_trial_num = 1
        # Reset T1 performance each practice block
        self.t1_performance = 0

        # The following block manually inserts trials one at a time
        # during which performance is checked and adjusted for.
        if P.practicing:
            while P.practicing:
                self.itoa = random.choice([100, 200, 300])
                self.ttoa = random.choice([120, 240, 360, 480, 600])

                self.setup_response_collector()
                self.trial_prep()
                self.evm.start_clock()

                try:
                    self.trial()
                except TrialException:
                    pass

                self.evm.stop_clock()
                self.trial_clean_up()
                # Once practice is complete, the loop is exited
                if self.practice_complete:
                    P.practicing = False

    def setup_response_collector(self):
        # Configure identity collector
        self.t1_identity_rc.terminate_after = [10,
                                               TK_S]  # Waits 10s for response
        self.t1_identity_rc.display_callback = self.identity_callback  # Continuously draw images to screen
        self.t1_identity_rc.display_kwargs = {
            'target': "T1"
        }  # Passed as arg when identity_callback() is called
        self.t1_identity_rc.keypress_listener.key_map = self.keymap  # Assign key mappings
        self.t1_identity_rc.keypress_listener.interrupts = True  # Terminates listener after valid response

        self.t2_identity_rc.terminate_after = [10, TK_S]
        self.t2_identity_rc.display_callback = self.identity_callback
        self.t2_identity_rc.display_kwargs = {'target': "T2"}
        self.t2_identity_rc.keypress_listener.key_map = self.keymap
        self.t2_identity_rc.keypress_listener.interrupts = True

        # Configure colour collector
        # Because colours are randomly selected on a trial by trial basis
        # most properties of colouring_rc need to be assigned within trial_prep()
        self.t1_colouring_rc.terminate_after = [10, TK_S]
        self.t2_colouring_rc.terminate_after = [10, TK_S]

    def trial_prep(self):
        # Prepare colour wheels
        self.t1_wheel.rotation = random.randrange(
            0, 360)  # Randomly rotate wheel to prevent location biases
        self.t2_wheel.rotation = random.randrange(0, 360)

        while self.t1_wheel.rotation == self.t2_wheel.rotation:  # Ensure unique rotation values
            self.t2_wheel.rotation = random.randrange(0, 360)

        self.t1_wheel.render()
        self.t2_wheel.render()

        # Select target identities
        self.t1_identity = random.sample(numbers,
                                         1)[0]  # Select & assign identity
        self.t2_identity = random.sample(numbers, 1)[0]

        while self.t1_identity == self.t2_identity:  # Ensure that T1 & T2 identities are unique
            self.t2_identity = random.sample(numbers, 1)[0]

        # Select target angles (for selecting colour from wheel)
        self.t1_angle = random.randrange(0, 360)
        self.t2_angle = random.randrange(0, 360)

        while self.t1_angle == self.t2_angle:
            self.t2_angle = random.randrange(0, 360)

        self.t1_colour = self.t1_wheel.color_from_angle(
            self.t1_angle)  # Assign colouring
        self.t2_colour = self.t2_wheel.color_from_angle(self.t2_angle)

        # Dummy objects to serve as reference point when calculating response error
        self.t1_dummy.fill = self.t1_colour
        self.t2_dummy.fill = self.t2_colour

        self.t1_colouring_rc.display_callback = self.wheel_callback
        self.t1_colouring_rc.display_kwargs = {'wheel': self.t1_wheel}

        self.t1_colouring_rc.color_listener.set_wheel(
            self.t1_wheel)  # Set generated wheel as wheel to use
        self.t1_colouring_rc.color_listener.set_target(
            self.t1_dummy)  # Set dummy as target reference point

        self.t2_colouring_rc.display_callback = self.wheel_callback
        self.t2_colouring_rc.display_kwargs = {
            'wheel': self.t2_wheel
        }  # Passed as arg w/ calling wheel_callback()

        self.t2_colouring_rc.color_listener.set_wheel(self.t2_wheel)
        self.t2_colouring_rc.color_listener.set_target(self.t2_dummy)

        if self.block_type == IDENTITY:
            self.target_duration = self.id_target_duration
            self.mask_duration = self.id_mask_duration
        else:
            self.target_duration = self.col_target_duration
            self.mask_duration = self.col_mask_duration

        # Initialize EventManager
        if P.practicing:  # T2 not present during practice blocks
            events = [[self.itoa, "T1_on"]]
            events.append([events[-1][0] + self.target_duration, 'T1_off'])
            events.append([events[-1][0] + self.isi, 'T1_mask_on'])
            events.append([events[-1][0] + self.mask_duration, 'T1_mask_off'])
            events.append([events[-1][0] + 300, 'response_foreperiod'])
        else:
            events = [[self.itoa, 'T1_on']]
            events.append([events[-1][0] + self.target_duration, 'T1_off'])
            events.append([events[-1][0] + self.isi, 'T1_mask_on'])
            events.append([events[-1][0] + self.mask_duration, 'T1_mask_off'])
            events.append([events[-4][0] + self.ttoa,
                           'T2_on'])  # SOA = Time between onset of T1 & T2
            events.append([events[-1][0] + self.target_duration, 'T2_off'])
            events.append([events[-1][0] + self.isi, 'T2_mask_on'])
            events.append([events[-1][0] + self.mask_duration, 'T2_mask_off'])
            events.append([events[-1][0] + 300, 'response_foreperiod'])

        # Stream begins 1000ms after fixation
        for e in events:
            self.evm.register_ticket(ET(e[1], e[0]))

        # Prepare stream
        self.tmtm_stream = self.prep_stream()

        # Present fixation & wait for initiation
        self.present_fixation()

    def trial(self):
        # Hide cursor during trial
        hide_mouse_cursor()

        # Wait some foreperiod before presenting T1
        while self.evm.before('T1_on', True):
            ui_request()

        # Present T1
        fill()
        blit(self.tmtm_stream['t1_target'],
             registration=5,
             location=P.screen_c)
        flip()

        self.t1_sw = Stopwatch()

        # Don't do anything during T1 presentation
        while self.evm.before('T1_off', True):
            ui_request()

        self.t1_isi_sw = Stopwatch()
        # Remove T1
        fill()
        flip()
        self.t1_sw.pause()
        print("T1 duration actual: " + str(self.t1_sw.elapsed()))
        fill()
        flip()

        while self.evm.before('T1_mask_on', True):
            ui_request()

        self.t1_isi_sw.pause()

        print("ISI actual: " + str(self.t1_isi_sw.elapsed()))

        # After one refresh rate (how long it takes to remove T1) present mask
        fill()
        blit(self.tmtm_stream['t1_mask'], registration=5, location=P.screen_c)
        flip()

        self.t1_mask_sw = Stopwatch()

        # Don't do anything during presentation
        while self.evm.before('T1_mask_off', True):
            ui_request()

        # Remove mask
        fill()
        flip()
        self.t1_mask_sw.pause()
        print("T1 mask duration actual: " + str(self.t1_mask_sw.elapsed()))

        # If not practicing, present T2
        if not P.practicing:

            # After TTOA is up, present T2
            while self.evm.before('T2_on', True):
                ui_request()

            fill()
            blit(self.tmtm_stream['t2_target'],
                 registration=5,
                 location=P.screen_c)
            flip()

            self.t2_sw = Stopwatch()

            # Don't do anything during presentation
            while self.evm.before('T2_off', True):
                ui_request()

            self.t2_isi_sw = Stopwatch()

            # Remove T2
            fill()
            flip()

            self.t2_sw.pause()

            fill()
            flip()

            while self.evm.before('T2_mask_on', True):
                ui_request()

            self.t2_isi_sw.pause()

            # After one refresh rate, present mask
            fill()
            blit(self.tmtm_stream['t2_mask'],
                 registration=5,
                 location=P.screen_c)
            flip()

            self.t2_mask_sw = Stopwatch()

            # Don't do anything during presentation
            while self.evm.before('T2_mask_off', True):
                ui_request()

            # Remove mask
            fill()
            flip()

            self.t2_mask_sw.pause()

        # Wait 1/3 second before asking for responses
        while self.evm.before('response_foreperiod', True):
            ui_request()

        # Request & record responses
        if self.block_type == IDENTITY:
            # Not relevant to identity trials
            t1_response_err, t1_response_err_rt, t2_response_err, t2_response_err_rt = [
                'NA', 'NA', 'NA', 'NA'
            ]

            # Collect identity responses
            self.t1_identity_rc.collect()

            if not P.practicing:
                self.t2_identity_rc.collect()

            # Assign to variables returned
            t1_id_response, t1_id_rt = self.t1_identity_rc.keypress_listener.response(
            )

            # No T2 present during practice
            if not P.practicing:
                t2_id_response, t2_id_rt = self.t2_identity_rc.keypress_listener.response(
                )
            else:
                t2_id_response, t2_id_rt = ['NA', 'NA']

            # During practice, keep a tally of T1 performance
            if P.practicing:
                if t1_id_response == self.t1_identity:
                    self.t1_performance += 1

        else:  # Colour block
            # Not relevant to colour trials
            t1_id_response, t1_id_rt, t2_id_response, t2_id_rt = [
                'NA', 'NA', 'NA', 'NA'
            ]

            # Collect colour responses
            self.t1_colouring_rc.collect()

            if not P.practicing:
                self.t2_colouring_rc.collect()

            # Assign to variables returned
            t1_response_err, t1_response_err_rt = self.t1_colouring_rc.color_listener.response(
            )

            # T2 only presented during test blocks
            if not P.practicing:
                t2_response_err, t2_response_err_rt = self.t2_colouring_rc.color_listener.response(
                )
            else:
                t2_response_err, t2_response_err_rt = ['NA', 'NA']

            if P.practicing:
                # As numeric identities have 9 possible values, similarly the colour wheel can
                # be thought of as having 9 'bins' (each 40º wide). Colour responses are labelled
                # as 'correct' if their angular error does not exceed 20º in either direction.

                if (abs(t1_response_err) <= 20):
                    self.t1_performance += 1

        clear()

        print(self.target_duration)
        print(self.mask_duration)

        return {
            "practicing":
            str(P.practicing),
            "block_num":
            P.block_number,
            "trial_num":
            P.trial_number,
            "block_type":
            self.block_type,
            "itoa":
            self.itoa,
            "ttoa":
            self.ttoa,
            "target_duration":
            self.target_duration,
            "mask_duration":
            self.mask_duration,
            "t1_identity":
            self.t1_identity,
            "t2_identity":
            self.t2_identity if not P.practicing else 'NA',
            "t1_identity_response":
            t1_id_response,
            "t1_identity_rt":
            t1_id_rt,
            "t2_identity_response":
            t2_id_response,
            "t2_identity_rt":
            t2_id_rt,
            "t1_colour":
            self.t1_colour,
            "t1_angle":
            self.t1_angle,
            "t1_wheel_rotation":
            self.t1_wheel.rotation,
            "t2_colour":
            self.t2_colour if not P.practicing else 'NA',
            "t2_angle":
            self.t2_angle if not P.practicing else 'NA',
            "t2_wheel_rotation":
            self.t2_wheel.rotation if not P.practicing else 'NA',
            "t1_ang_err":
            t1_response_err,
            "t1_colour_rt":
            t1_response_err_rt,
            "t2_ang_err":
            t2_response_err,
            "t2_colour_rt":
            t2_response_err_rt,
            "t1_performance_practice":
            self.t1_performance if P.practicing else 'NA',
            "t1_duration_actual":
            self.t1_sw.elapsed(),
            "t1_isi_actual":
            self.t1_isi_sw.elapsed(),
            "t1_mask_duration_actual":
            self.t1_mask_sw.elapsed(),
            "t2_duration_actual":
            self.t2_sw.elapsed() if not P.practicing else 'NA',
            "t2_isi_actual":
            self.t2_isi_sw.elapsed() if not P.practicing else 'NA',
            "t2_mask_duration_actual":
            self.t2_mask_sw.elapsed() if not P.practicing else 'NA'
        }

    def trial_clean_up(self):
        # Reset response listeners
        self.t1_identity_rc.keypress_listener.reset()
        self.t2_identity_rc.keypress_listener.reset()

        self.t1_colouring_rc.color_listener.reset()
        self.t2_colouring_rc.color_listener.reset()

        # Performance checks during practice
        if P.practicing:
            if not self.practice_complete:
                # First 10 trials considered an 'introductory' run, where no performance check is performed
                if not self.pre_run_complete:
                    if self.practice_trial_num == 10:

                        self.t1_performance = 0

                        self.pre_run_complete = True

                else:
                    #else:
                    # Every 10 trials, check performance & adjust difficulty as necessary
                    if self.practice_trial_num % 10 == 0:

                        # If subj accuracy above 80%,
                        if self.t1_performance > 8:

                            # Make task harder by adjusting target & mask durations
                            if self.block_type == IDENTITY:
                                if self.id_target_duration > P.refresh_time:
                                    self.id_target_duration -= P.refresh_time
                                    self.id_mask_duration += P.refresh_time

                            else:
                                if self.col_target_duration > P.refresh_time:
                                    self.col_target_duration -= P.refresh_time
                                    self.col_mask_duration += P.refresh_time

                            self.t1_performance = 0

                        # Conversely, if performance is below/at chance, make easier
                        elif self.t1_performance <= 2:
                            if self.block_type == IDENTITY:
                                self.id_target_duration += P.refresh_time
                                self.id_mask_duration -= P.refresh_time

                            else:
                                self.col_target_duration += P.refresh_time
                                self.col_mask_duration -= P.refresh_time
                            self.t1_performance = 0

                        else:
                            self.t1_identity_performance = self.t1_performance
                            self.practice_complete = True

                self.practice_trial_num += 1
        else:
            if P.trial_number == P.trials_per_block and P.block_number == 2:
                self.block_type = COLOUR if self.block_type == IDENTITY else IDENTITY

    def clean_up(self):
        pass

    # --------------------------------- #
    # Project specific helper functions
    # --------------------------------- #

    def present_fixation(self):
        fill()
        blit(self.fixation, location=P.screen_c, registration=5)
        flip()

        any_key()

    def prep_stream(self):
        # Dynamically assign target colouring
        self.txtm.styles['T1'].color = self.t1_colour
        self.txtm.styles['T2'].color = self.t2_colour

        # Generate unique masks for each target
        self.t1_mask = self.generate_mask()
        self.t2_mask = self.generate_mask()

        stream_items = {
            't1_target':
            message(self.t1_identity,
                    align='center',
                    style='T1',
                    blit_txt=False),
            't1_mask':
            self.t1_mask,
            't2_target':
            message(self.t2_identity,
                    align='center',
                    style='T2',
                    blit_txt=False),
            't2_mask':
            self.t2_mask
        }

        return stream_items

    def wheel_callback(self, wheel):
        # Hide cursor during selection phase
        hide_mouse_cursor()

        # Response request msg
        colour_request_msg = self.t1_col_request if wheel == self.t1_wheel else self.t2_col_request
        message_offset = deg_to_px(1.5)
        message_loc = (P.screen_c[0], (P.screen_c[1] - message_offset))

        fill()

        # Present appropriate wheel
        if wheel == self.t1_wheel:
            blit(self.t1_wheel, registration=5, location=P.screen_c)
        else:
            blit(self.t2_wheel, registration=5, location=P.screen_c)
        # Present response request
        message(colour_request_msg,
                location=message_loc,
                registration=5,
                blit_txt=True)
        # Present annulus drawbject as cursor
        blit(self.cursor, registration=5, location=mouse_pos())

        flip()

    def identity_callback(self, target):
        # Request appropriate identity
        identity_request_msg = self.t1_id_request if target == "T1" else self.t2_id_request

        fill()
        message(identity_request_msg,
                location=P.screen_c,
                registration=5,
                blit_txt=True)
        flip()

    def generate_mask(self):
        # Set mask size
        canvas_size = deg_to_px(1)
        # Set cell size
        cell_size = canvas_size / 5  # Mask comprised of 16 smaller cells arranged 4x4
        # Each cell has a black outline
        cell_outline_width = deg_to_px(.05)

        # Initialize canvas to be painted w/ mask cells
        canvas = Image.new('RGBA', [canvas_size, canvas_size], (0, 0, 0, 0))

        surface = aggdraw.Draw(canvas)

        # Initialize pen to draw cell outlines
        transparent_pen = aggdraw.Pen((0, 0, 0), cell_outline_width)

        # Generate cells, arranged in 4x4 array
        for row in [0, 1, 2, 3, 4]:
            for col in [0, 1, 2, 3, 4]:
                # Randomly select colour for each cell
                cell_colour = const_lum[random.randrange(0, 360)]
                # Brush to apply colour
                colour_brush = aggdraw.Brush(tuple(cell_colour[:3]))
                # Determine cell boundary coords
                top_left = (row * cell_size, col * cell_size)
                bottom_right = ((row + 1) * cell_size, (col + 1) * cell_size)
                # Create cell
                surface.rectangle((top_left[0], top_left[1], bottom_right[0],
                                   bottom_right[1]), transparent_pen,
                                  colour_brush)
        # Apply cells to mask
        surface.flush()

        return np.asarray(canvas)