def get_peak_during(self, period, msg=None): """Determines the peak loudness value recorded over a given period. Displays a visual callback that shows the current input volume and the loudest peak encounteredduring the interval so far. Args: period (numeric): the number of seconds to record input for. msg (:obj:`~klibs.KLGraphics.KLNumpySurface.NumpySurface`, optional): a rendered message to display in the top-right corner of the screen during the sampling loop. Returns: int: the loudest peak of all samples recorded during the period. """ local_peak = 0 last_sample = 0 if msg: msg = message(msg, blit_txt=False) flush() self.stream.start() sample_period = CountDown(period + 0.05) while sample_period.counting(): ui_request() sample = self.stream.sample().peak if sample_period.elapsed() < 0.05: # Sometimes 1st or 2nd peaks are extremely high for no reason, so ignore first 50ms continue if sample > local_peak: local_peak = sample sample_avg = (sample + last_sample) / 2 peak_circle = peak(5, int( (local_peak / 32767.0) * P.screen_y * 0.8)) sample_circle = peak( 5, int((sample_avg / 32767.0) * P.screen_y * 0.8)) last_sample = sample fill() blit(Ellipse(peak_circle, fill=[255, 145, 0]), location=P.screen_c, registration=5) blit(Ellipse(sample_circle, fill=[84, 60, 182]), location=P.screen_c, registration=5) if msg: blit(msg, location=[25, 25], registration=7) flip() self.stream.stop() return local_peak
def __init__(self, import_path=None, animate_time=5000.0, manufacture=None): self.animate_target_time = animate_time self.seg_count = None self.min_spq = P.avg_seg_per_q[0] - P.avg_seg_per_q[1] self.max_spq = P.avg_seg_per_q[0] + P.avg_seg_per_q[1] self.min_spf = P.avg_seg_per_f[0] - P.avg_seg_per_f[1] self.max_spf = P.avg_seg_per_f[0] + P.avg_seg_per_f[1] # segment generation controls self.min_lin_ang_width = P.min_linear_acuteness * 180 # NOTE: these next two almost certainly don't work as intended, since we sometimes # get straight lines & near-straight curves despite minimum curve slope being 0.25 self.curve_min_slope = int( math.floor(90.0 - P.slope_magnitude[0] * 90.0)) self.curve_max_slope = int( math.ceil(90.0 + P.slope_magnitude[0] * 90.0)) if self.min_spf > 4 * self.min_spq > self.max_spf: raise ValueError( "Impossible min/max values chosen for points per figure/quadrant." ) if P.outer_margin_h + P.inner_margin_h // 2 > P.screen_c[0]: raise ValueError("Margins too large; no drawable area remains.") if P.outer_margin_v + P.inner_margin_v // 2 > P.screen_c[1]: raise ValueError("Margins too large; no drawable area remains.") self.quad_ranges = None self.r_dot = Ellipse(5, fill=(255, 45, 45)).render() self.total_spf = 0 self.points = [] self.raw_segments = [] self.a_frames = [ ] # interpolated frames tracing figure at given duration / fps self.trial_a_frames = [ ] # a_frames plus frame onset times for previous animation self.screen_res = [P.screen_x, P.screen_y] self.avg_velocity = None # last call to animate only self.animate_time = None # last call to animate only self.rendered = False if import_path: self.__import_figure(import_path) elif manufacture: self.points = manufacture['points'] self.seg_count = len(self.points) self.raw_segments = manufacture['segments'] else: self.__generate_null_points() self.__gen_quad_intersects() self.__gen_real_points(not P.generate_quadrant_intersections) self.__gen_segments() self.prepare_animation( duration=5000.0) # pre-render animation frames at slowest rate
def __init__(self, y_pos, bar_length, bar_height, handle_radius, bar_fill, handle_fill): BoundaryInspector.__init__(self) EnvAgent.__init__(self) self.boundaries = {} self.pos = (P.screen_c[0] - bar_length // 2, y_pos) # upper-left self.message_pos = (P.screen_c[0], y_pos - 50) self.__handle_pos = (self.pos[0], self.pos[1] + bar_height // 2) self.handle_color = handle_fill self.handle_radius = handle_radius self.handle_stroke = None self.handle_boundary = None self.bar = None self.bar_size = (bar_length, bar_height) self.bar_color = bar_fill self.bar_stroke = None self.show_increment_ticks = True self.show_increment_text = False self.increment_count = None self.increments = [] self.increment_by = 1 self.increment_surfs = {} self.lower_bound = None self.upper_bound = None self.handle = Ellipse(self.handle_radius * 2, fill=self.handle_color, stroke=self.handle_stroke) self.bar = Rectangle(self.bar_size[0], self.bar_size[1], fill=self.bar_color, stroke=self.bar_stroke) self.add_boundary("handle", [ (self.pos[0] + self.handle_radius, self.pos[1]), self.handle_radius ], CIRCLE_BOUNDARY) self.msg = message("How many corners did the dot traverse?", "default", blit_txt=False) self.lb_msg = None self.ub_msg = None self.ok_text = message("OK", blit_txt=False) self.ok_inactive_button = Rectangle(100, 50, stroke=(1, (255, 255, 255)), fill=(125, 125, 125)).render() self.ok_active_button = Rectangle(100, 50, stroke=(1, (255, 255, 255)), fill=(5, 175, 45)).render() self.button_active = False self.button_pos = (P.screen_c[0], y_pos + bar_height + 50) button_upper_left = (self.button_pos[0] - 50, self.button_pos[1] - 25) button_botton_right = (self.button_pos[0] + 50, self.button_pos[1] + 25) self.add_boundary("button", (button_upper_left, button_botton_right), RECT_BOUNDARY) self.response = None
def __init__(self, data): self.media_type = IMAGE_FILE self.height = None self.width = None self.duration = None if data.text: # todo: make style optional self.contents = message(data.text.string, data.text.style, align="center", blit_txt=False) elif data.drawbject: d = data.drawbject if d.shape == "rectangle": self.contents = Rectangle(d.width, d.height, d.stroke, d.fill).render() if d.shape == "ellipse": self.contents = Ellipse(d.width, d.height, d.stroke, d.fill).render() if d.shape == "annulus": self.contents = Annulus(d.diameter, d.ring_width, d.stroke, d.fill).render() else: self.media_type = data.file.media_type if self.is_audio: self.duration = data.file self.contents = AudioClip( join(P.resources_dir, "audio", data.file.filename)) else: # If asset is image file, import and scale for current screen size (animations # originally hard-coded at 1920x1080 so we scale relative to that) img = Image.open(join(P.image_dir, data.file.filename)) target_size = (P.screen_x, (P.screen_x / 16.0) * 9 ) # corrected for aspect ratio scaled_size = scale(img.size, (1920, 1080), target_size, center=False) self.contents = np.asarray( img.resize(scaled_size, Image.BILINEAR)) try: self.height = self.contents.height self.width = self.contents.width except AttributeError: try: self.height = self.contents.shape[0] self.width = self.contents.shape[1] except AttributeError: pass # ie. audio file
def run(self, *args, **kwargs): """The method that gets run by 'klibs run' after the runtime environment is created. Runs the actual experiment. """ from klibs.KLGraphics.KLDraw import Ellipse if P.eye_tracking: RED = (255, 0, 0) WHITE = (255, 255, 255) self.tracker_dot = Ellipse(8, stroke=[2, WHITE], fill=RED).render() if not P.manual_eyelink_setup: self.el.setup() self.setup() try: self.__execute_experiment__(*args, **kwargs) except RuntimeError: print(full_trace()) self.quit()
def setup(self): # Set up custom text styles for the experiment self.txtm.add_style('instructions', 18, [255, 255, 255, 255]) self.txtm.add_style('error', 18, [255, 0, 0, 255]) self.txtm.add_style('tiny', 12, [255, 255, 255, 255]) self.txtm.add_style('small', 14, [255, 255, 255, 255]) # Pre-render shape stimuli dot_stroke = [P.dot_stroke, P.dot_stroke_col, STROKE_OUTER ] if P.dot_stroke > 0 else None self.tracker_dot = Ellipse(P.dot_size, stroke=dot_stroke, fill=P.dot_color).render() self.origin_active = Ellipse(P.origin_size, fill=self.origin_active_color).render() self.origin_inactive = Ellipse( P.origin_size, fill=self.origin_inactive_color).render() # If capture figures mode, generate, view, and optionally save some figures if P.capture_figures_mode: self.fig_dir = os.path.join(P.resources_dir, "figures") self.capture_figures() self.quit() # Initialize participant ID and session options, reloading ID if it already exists self.session = TraceLabSession() self.user_id = self.session.user_id # Once session initialized, show loading screen and finish setup self.loading_msg = message("Loading...", "default", blit_txt=False) fill() blit(self.loading_msg, 5, P.screen_c) flip() # Scale UI size variables to current screen resolution P.btn_s_pad = scale((P.btn_s_pad, 0), (1920, 1080))[0] P.y_pad = scale((0, P.y_pad), (1920, 1080))[1] # Initialize messages and response buttons for control trials control_fail_txt = "Please keep your finger on the start area for the complete duration." self.control_fail_msg = message(control_fail_txt, 'error', blit_txt=False) ctrl_buttons = ["1", "2", "3", "4", "5"] self.control_bar = ButtonBar(buttons=[(i, P.btn_size, None) for i in ctrl_buttons], button_size=P.btn_size, screen_margins=P.btn_s_pad, y_offset=P.y_pad, message_txt=P.control_q) # Initialize 'next trial' button button_x = 250 if self.handedness == LEFT_HANDED else P.screen_x - 250 button_y = P.screen_y - 100 self.next_trial_msg = message(P.next_trial_message, 'default', blit_txt=False) self.next_trial_box = Rectangle(300, 75, stroke=(2, (255, 255, 255), STROKE_OUTER)) self.next_trial_button_loc = (button_x, button_y) bounds = [(button_x - 150, button_y - 38), (button_x + 150, button_y + 38)] self.add_boundary("next trial button", bounds, RECT_BOUNDARY) # Initialize instructions and practice button bar for each condition self.instruction_files = { PHYS: { 'text': "physical_group_instructions.txt", 'frames': "physical_key_frames" }, MOTR: { 'text': "imagery_group_instructions.txt", 'frames': "imagery_key_frames" }, CTRL: { 'text': "control_group_instructions.txt", 'frames': "control_key_frames" } } self.practice_instructions = message(P.practice_instructions, "instructions", align="center", blit_txt=False) practice_buttons = [('Replay', [200, 100], self.practice), ('Practice', [200, 100], self.__practice__), ('Begin', [200, 100], any_key)] self.practice_button_bar = ButtonBar(practice_buttons, [200, 100], P.btn_s_pad, P.y_pad, finish_button=False) # Import all pre-generated figures needed for the current session figures = list(set(self.trial_factory.exp_factors["figure_name"])) figures.append(P.practice_figure) for f in figures: if f != "random": ui_request() fig_path = os.path.join(P.resources_dir, "figures", f) self.test_figures[f] = TraceLabFigure(fig_path)
def setup(self): # Bandit Variables self.high_payout_baseline = 12 self.low_payout_baseline = 8 self.total_score = None self.penalty = -5 # Stimulus Sizes thick_rect_border = deg_to_px(0.5) thin_rect_border = deg_to_px(0.1) star_size = deg_to_px(0.6) star_thickness = deg_to_px(0.1) square_size = deg_to_px(3) text_size = deg_to_px(0.65) large_text_size = deg_to_px(0.85) # Generate bandit colours from colour wheel self.bandit_colour_combos = [] if P.blocks_per_experiment > 4: msg = ( "Only 4 sets of colours available, experiment script must be modified if more" "than 4 blocks total are wanted.") raise RuntimeError(msg) for angle in [0, 45, 90, 135]: combo = [const_lum[angle], const_lum[angle + 180]] self.bandit_colour_combos.append(combo) random.shuffle(self.bandit_colour_combos) # Stimulus Drawbjects self.thick_rect = Rectangle( square_size, stroke=[thick_rect_border, WHITE, STROKE_CENTER]) self.thin_rect = Rectangle( square_size, stroke=[thin_rect_border, WHITE, STROKE_CENTER]) self.left_bandit = Rectangle( square_size, stroke=[thin_rect_border, WHITE, STROKE_CENTER]) self.right_bandit = Rectangle( square_size, stroke=[thin_rect_border, WHITE, STROKE_CENTER]) self.neutral_box = self.thin_rect.render() self.star = Asterisk(star_size, star_thickness, fill=WHITE) self.star_cueback = Asterisk(star_size * 2, star_thickness * 2, fill=WHITE) self.star_muted = Asterisk(star_size, star_thickness, fill=GREY) self.probe = Ellipse(int(0.75 * square_size), fill=WHITE).render() # Layout box_offset = deg_to_px(8.0) self.left_box_loc = (P.screen_c[0] - box_offset, P.screen_c[1]) self.right_box_loc = (P.screen_c[0] + box_offset, P.screen_c[1]) # Timing # Note: cotoa = cue-offset target-onset asynchrony self.cotoa_min = 700 # ms self.cotoa_max = 1000 # ms self.feedback_exposure_period = 1.25 # sec # EyeLink Boundaries fix_bounds = [P.screen_c, square_size / 2] self.el.add_boundary('fixation', fix_bounds, CIRCLE_BOUNDARY) # Experiment Messages self.txtm.styles[ 'default'].font_size = text_size # re-define default font size in degrees self.txtm.add_style("score up", large_text_size, PASTEL_GREEN) self.txtm.add_style("score down", large_text_size, PASTEL_RED) self.txtm.add_style("timeout", large_text_size, WHITE) err_txt = "{0}\n\nPress any key to continue." lost_fixation_txt = err_txt.format( "Eyes moved! Please keep your eyes on the asterisk.") too_soon_txt = err_txt.format( "Responded too soon! Please wait until the 'go' signal to " "make a response.") probe_timeout_txt = err_txt.format( "No response detected! Please answer louder or faster.") bandit_timeout_txt = err_txt.format("Bandit selection timed out!") wrong_response_txt = err_txt.format( "Wrong response type!\nPlease make vocal responses " "to probes and keypress responses to bandits.") self.err_msgs = { 'fixation': message(lost_fixation_txt, align='center', blit_txt=False), 'too_soon': message(too_soon_txt, align='center', blit_txt=False), 'probe_timeout': message(probe_timeout_txt, 'timeout', align='center', blit_txt=False), 'bandit_timeout': message(bandit_timeout_txt, 'timeout', align='center', blit_txt=False), 'wrong_response': message(wrong_response_txt, align='center', blit_txt=False) } # Initialize separate ResponseCollectors for probe and bandit responses self.probe_rc = ResponseCollector(uses=[RC_AUDIO, RC_KEYPRESS]) self.bandit_rc = ResponseCollector(uses=[RC_AUDIO, RC_KEYPRESS]) # Initialize ResponseCollector keymap self.keymap = KeyMap( 'bandit_response', # Name ['z', '/'], # UI labels ["left", "right"], # Data labels [sdl2.SDLK_z, sdl2.SDLK_SLASH] # SDL2 Keysyms ) # Add practice block of 20 trials to start of experiment if P.run_practice_blocks: self.insert_practice_block(1, trial_counts=20)
def setup(self): # Stimulus sizes thick_rect_border = deg_to_px(0.5) thin_rect_border = deg_to_px(0.1) star_size = deg_to_px(0.6) star_thickness = deg_to_px(0.1) square_size = deg_to_px(3) large_text_size = 0.65 # Stimulus drawbjects self.thick_rect = Rectangle( square_size, stroke=[thick_rect_border, WHITE, STROKE_CENTER]) self.thin_rect = Rectangle( square_size, stroke=[thin_rect_border, WHITE, STROKE_CENTER]) self.neutral_box = self.thin_rect.render() self.star = Asterisk(star_size, star_thickness, fill=WHITE) self.star_cueback = Asterisk(star_size * 2, star_thickness * 2, fill=WHITE) self.go = FixationCross(star_size, star_thickness, fill=BLACK) self.go.render() self.nogo = FixationCross(star_size, star_thickness, fill=BLACK, rotation=45) self.nogo.render() self.left_bandit = Ellipse(int(0.75 * square_size)) self.right_bandit = Ellipse(int(0.75 * square_size)) self.probe = Ellipse(int(0.75 * square_size)) # Layout box_offset = deg_to_px(8.0) self.left_box_loc = (P.screen_c[0] - box_offset, P.screen_c[1]) self.right_box_loc = (P.screen_c[0] + box_offset, P.screen_c[1]) # Set cotoa self.cotoa = 800 # ms self.feedback_exposure_period = 1.25 # sec # Bandit payout variables self.high_payout_baseline = 12 self.low_payout_baseline = 8 self.total_score = None self.penalty = -5 # Generate colours from colour wheel self.target_colours = [const_lum[0], const_lum[120], const_lum[240]] random.shuffle(self.target_colours) # Assign to bandits & neutral probe self.high_value_colour = self.target_colours[0] self.low_value_colour = self.target_colours[1] self.neutral_value_colour = self.target_colours[2] # EyeLink Boundaries fix_bounds = [P.screen_c, square_size / 2] self.el.add_boundary('fixation', fix_bounds, CIRCLE_BOUNDARY) # Initialize response collectors self.probe_rc = ResponseCollector(uses=RC_KEYPRESS) self.bandit_rc = ResponseCollector(uses=RC_KEYPRESS) # Initialize ResponseCollector keymaps self.bandit_keymap = KeyMap( 'bandit_response', # Name ['z', '/'], # UI labels ["left", "right"], # Data labels [sdl2.SDLK_z, sdl2.SDLK_SLASH] # SDL2 Keysyms ) self.probe_keymap = KeyMap('probe_response', ['spacebar'], ["pressed"], [sdl2.SDLK_SPACE]) # Experiment Messages self.txtm.add_style("payout", large_text_size, WHITE) self.txtm.add_style("timeout", large_text_size, WHITE) err_txt = "{0}\n\nPress any key to continue." lost_fixation_txt = err_txt.format( "Eyes moved! Please keep your eyes on the asterisk.") probe_timeout_txt = err_txt.format( "No response detected! Please respond as fast and as accurately as possible." ) bandit_timeout_txt = err_txt.format("Bandit selection timed out!") response_on_nogo_txt = err_txt.format( "\'nogo\' signal (x) presented\nPlease only respond when you see " "the \'go\' signal (+).") self.err_msgs = { 'fixation': message(lost_fixation_txt, align='center', blit_txt=False), 'probe_timeout': message(probe_timeout_txt, 'timeout', align='center', blit_txt=False), 'bandit_timeout': message(bandit_timeout_txt, 'timeout', align='center', blit_txt=False), 'response_on_nogo': message(response_on_nogo_txt, align='center', blit_txt=False) } self.rest_break_txt = err_txt.format( "Whew! that was tricky eh? Go ahead and take a break before continuing." ) self.end_of_block_txt = "You're done the first task! Please buzz the researcher to let them know!" # Insert bandit block if P.run_practice_blocks: self.insert_practice_block(1, trial_counts=P.trials_bandit_block)
class IOR_Reward_V2(klibs.Experiment): def setup(self): # Stimulus sizes thick_rect_border = deg_to_px(0.5) thin_rect_border = deg_to_px(0.1) star_size = deg_to_px(0.6) star_thickness = deg_to_px(0.1) square_size = deg_to_px(3) large_text_size = 0.65 # Stimulus drawbjects self.thick_rect = Rectangle( square_size, stroke=[thick_rect_border, WHITE, STROKE_CENTER]) self.thin_rect = Rectangle( square_size, stroke=[thin_rect_border, WHITE, STROKE_CENTER]) self.neutral_box = self.thin_rect.render() self.star = Asterisk(star_size, star_thickness, fill=WHITE) self.star_cueback = Asterisk(star_size * 2, star_thickness * 2, fill=WHITE) self.go = FixationCross(star_size, star_thickness, fill=BLACK) self.go.render() self.nogo = FixationCross(star_size, star_thickness, fill=BLACK, rotation=45) self.nogo.render() self.left_bandit = Ellipse(int(0.75 * square_size)) self.right_bandit = Ellipse(int(0.75 * square_size)) self.probe = Ellipse(int(0.75 * square_size)) # Layout box_offset = deg_to_px(8.0) self.left_box_loc = (P.screen_c[0] - box_offset, P.screen_c[1]) self.right_box_loc = (P.screen_c[0] + box_offset, P.screen_c[1]) # Set cotoa self.cotoa = 800 # ms self.feedback_exposure_period = 1.25 # sec # Bandit payout variables self.high_payout_baseline = 12 self.low_payout_baseline = 8 self.total_score = None self.penalty = -5 # Generate colours from colour wheel self.target_colours = [const_lum[0], const_lum[120], const_lum[240]] random.shuffle(self.target_colours) # Assign to bandits & neutral probe self.high_value_colour = self.target_colours[0] self.low_value_colour = self.target_colours[1] self.neutral_value_colour = self.target_colours[2] # EyeLink Boundaries fix_bounds = [P.screen_c, square_size / 2] self.el.add_boundary('fixation', fix_bounds, CIRCLE_BOUNDARY) # Initialize response collectors self.probe_rc = ResponseCollector(uses=RC_KEYPRESS) self.bandit_rc = ResponseCollector(uses=RC_KEYPRESS) # Initialize ResponseCollector keymaps self.bandit_keymap = KeyMap( 'bandit_response', # Name ['z', '/'], # UI labels ["left", "right"], # Data labels [sdl2.SDLK_z, sdl2.SDLK_SLASH] # SDL2 Keysyms ) self.probe_keymap = KeyMap('probe_response', ['spacebar'], ["pressed"], [sdl2.SDLK_SPACE]) # Experiment Messages self.txtm.add_style("payout", large_text_size, WHITE) self.txtm.add_style("timeout", large_text_size, WHITE) err_txt = "{0}\n\nPress any key to continue." lost_fixation_txt = err_txt.format( "Eyes moved! Please keep your eyes on the asterisk.") probe_timeout_txt = err_txt.format( "No response detected! Please respond as fast and as accurately as possible." ) bandit_timeout_txt = err_txt.format("Bandit selection timed out!") response_on_nogo_txt = err_txt.format( "\'nogo\' signal (x) presented\nPlease only respond when you see " "the \'go\' signal (+).") self.err_msgs = { 'fixation': message(lost_fixation_txt, align='center', blit_txt=False), 'probe_timeout': message(probe_timeout_txt, 'timeout', align='center', blit_txt=False), 'bandit_timeout': message(bandit_timeout_txt, 'timeout', align='center', blit_txt=False), 'response_on_nogo': message(response_on_nogo_txt, align='center', blit_txt=False) } self.rest_break_txt = err_txt.format( "Whew! that was tricky eh? Go ahead and take a break before continuing." ) self.end_of_block_txt = "You're done the first task! Please buzz the researcher to let them know!" # Insert bandit block if P.run_practice_blocks: self.insert_practice_block(1, trial_counts=P.trials_bandit_block) def block(self): # Block type defaults to probe trials, overidden in practice block(s) self.block_type = PROBE # Show total score following completion of bandit task if self.total_score: fill() score_txt = "Total block score: {0} points!".format( self.total_score) msg = message(score_txt, 'timeout', blit_txt=False) blit(msg, 5, P.screen_c) flip() any_key() self.total_score = 0 # Reset score once presented # Bandit task if P.practicing: self.block_type == BANDIT # Initialize selection counters self.times_selected_high = 0 self.time_selected_low = 0 # End of block messaging if not P.practicing: self.block_type == PROBE fill() msg = message(self.end_of_block_txt, blit_txt=False) blit(msg, 5, P.screen_c) flip() any_key() def setup_response_collector(self): # Configure probe response collector self.probe_rc.terminate_after = [1500, TK_MS] self.probe_rc.display_callback = self.probe_callback self.probe_rc.flip = True self.probe_rc.keypress_listener.key_map = self.probe_keymap self.probe_rc.keypress_listener.interrupts = True # Configure bandit response collector self.bandit_rc.terminate_after = [1500, TK_MS] self.bandit_rc.display_callback = self.bandit_callback self.bandit_rc.flip = True self.bandit_rc.keypress_listener.key_map = self.bandit_keymap self.bandit_rc.keypress_listener.interrupts = True def trial_prep(self): # Reset error flag self.targets_shown = False self.err = None # BANDIT PROPERTIES if P.practicing: self.cotoa = 'NA' # Establish location & colour of bandits if self.high_value_location == LEFT: self.left_bandit.fill = self.high_value_colour self.right_bandit.fill = self.low_value_colour self.low_value_location = RIGHT else: self.left_bandit.fill = self.low_value_colour self.right_bandit.fill = self.high_value_colour self.low_value_location = LEFT self.left_bandit.render() self.right_bandit.render() # PROBE PROPERTIES else: # Rest breaks if P.trial_number % (P.trials_per_block / P.breaks_per_block) == 0: if P.trial_number < P.trials_per_block: fill() msg = message(self.rest_break_txt, 'timeout', blit_txt=False) blit(msg, 5, P.screen_c) flip() any_key() # Establish & assign probe location self.probe_loc = self.right_box_loc if self.probe_location == RIGHT else self.left_box_loc # go/nogo signal always presented w/probe self.go_nogo_loc = self.probe_loc # Establish & assign probe colour if self.probe_colour == HIGH: self.probe.fill = self.high_value_colour elif self.probe_colour == LOW: self.probe.fill = self.low_value_colour else: self.probe.fill = self.neutral_value_colour self.probe.render() # Add timecourse of events to EventManager if P.practicing: # Bandit trials events = [[1000, 'target_on']] else: # Probe trials events = [[1000, 'cue_on']] events.append([events[-1][0] + 200, 'cue_off']) events.append([events[-1][0] + 200, 'cueback_off']) events.append([events[-2][0] + 800, 'target_on']) for e in events: self.evm.register_ticket(ET(e[1], e[0])) # Perform drift correct on Eyelink before trial start self.el.drift_correct() def trial(self): # BANDIT TRIAL if P.practicing: cotoa, probe_rt = ['NA', 'NA'] # Don't occur in bandit blocks # Present placeholders while self.evm.before('target_on', True) and not self.err: self.confirm_fixation() self.present_neutral_boxes() flip() # BANDIT RESPONSE PERIOD self.targets_shown = True # After bandits shown, don't recycle trial # Present bandits and listen for response self.bandit_rc.collect() # If wrong response made if self.err: bandit_choice, bandit_rt, reward = ['NA', 'NA', 'NA'] else: self.err = 'NA' # Retrieve responses from ResponseCollector(s) & record data bandit_choice = self.bandit_rc.keypress_listener.response( value=True, rt=False) bandit_rt = self.bandit_rc.keypress_listener.response( value=False, rt=True) if bandit_rt == TIMEOUT: self.show_error_message('bandit_timeout') reward = 'NA' else: # Determine bandit payout & display reward = self.feedback(bandit_choice) # PROBE TRIAL else: bandit_choice, bandit_rt, reward = [ 'NA', 'NA', 'NA' ] # Don't occur in probe trials # Present placeholders & confirm fixation while self.evm.before('target_on', True): self.confirm_fixation() self.present_neutral_boxes() # Present cue if self.evm.between('cue_on', 'cue_off'): if self.cue_location == LEFT: blit(self.thick_rect, 5, self.left_box_loc) else: blit(self.thick_rect, 5, self.right_box_loc) # Present cueback elif self.evm.between('cue_off', 'cueback_off'): blit(self.star_cueback, 5, P.screen_c) flip() # PROBE RESPONSE PERIOD self.targets_shown = True # After probe shown, don't recycle trial # Present probes & listen for response self.probe_rc.collect() # If 'go' trial, check for response if self.go_no_go == GO: # If wrong response made if self.err: probe_rt = 'NA' # If correct response OR timeout else: self.err = 'NA' probe_rt = self.probe_rc.keypress_listener.response( value=False, rt=True) if probe_rt == TIMEOUT: self.show_error_message('probe_timeout') probe_rt = 'NA' # Similarly, for 'nogo' trials else: probe_rt = 'NA' # If response made, penalize if len(self.probe_rc.keypress_listener.responses): self.show_error_message('response_on_nogo') self.err = 'response_on_nogo' # If no response, continue as normal else: self.err = 'NA' # Return trial data return { "block_num": P.block_number, "trial_num": P.trial_number, "block_type": "BANDIT" if P.practicing else "PROBE", "high_value_col": self.high_value_colour[:3] if P.practicing else 'NA', "high_value_loc": self.high_value_location if P.practicing else 'NA', "low_value_col": self.low_value_colour[:3] if P.practicing else 'NA', "low_value_loc": self.low_value_location if P.practicing else 'NA', "winning_trial": self.winning_trial if P.practicing else 'NA', "bandit_selected": self.bandit_selected if P.practicing else 'NA', "bandit_rt": bandit_rt, "reward": reward, "cue_loc": self.cue_location if not P.practicing else 'NA', "cotoa": self.cotoa if not P.practicing else 'NA', "probe_loc": self.probe_location if not P.practicing else 'NA', "probe_col": self.probe_colour if not P.practicing else 'NA', "go_no_go": self.go_no_go if not P.practicing else 'NA', "probe_rt": probe_rt, "err": self.err } # Clear remaining stimuli from screen clear() def trial_clean_up(self): # Clear responses from responses collectors before next trial self.probe_rc.keypress_listener.reset() self.bandit_rc.keypress_listener.reset() def clean_up(self): # Let Ss know when experiment is over self.all_done_text = "You're all done! Now I get to take a break.\nPlease buzz the researcher to let them know you're done!" fill() msg = message(self.all_done_text, 'timeout', blit_txt=False) blit(msg, 5, P.screen_c) flip() any_key() # Determines & presents feedback def feedback(self, response): # Keep count of bandit choices if response == self.high_value_location: self.bandit_selected = HIGH self.times_selected_high = self.times_selected_high + 1 # Occasionally probe participant learning if self.times_selected_high in [5, 10, 15]: self.query_learning(HIGH) else: self.bandit_selected = LOW self.time_selected_low = self.time_selected_low + 1 if self.time_selected_low in [5, 10, 15]: self.query_learning(LOW) # Determine payout if self.winning_trial == YES: points = self.bandit_payout(value=self.bandit_selected) msg = message("You won {0} points!".format(points), "payout", blit_txt=False) else: points = self.penalty # -5 msg = message("You lost 5 points!", "payout", blit_txt=False) # Running point total self.total_score += points feedback = [points, msg] # Present payout feedback_exposure = CountDown(self.feedback_exposure_period) while feedback_exposure.counting(): ui_request() fill() blit(feedback[1], location=P.screen_c, registration=5) flip() return feedback[0] # Calculates bandit payout def bandit_payout(self, value): mean = self.high_payout_baseline if value == HIGH else self.low_payout_baseline # sample from normal distribution with sd of 1 and round to nearest int return int(random.gauss(mean, 1) + 0.5) # Confirms whether Ss are fixating def confirm_fixation(self): if not self.el.within_boundary('fixation', EL_GAZE_POS): self.show_error_message('fixation') if self.targets_shown: self.err = 'left_fixation' else: raise TrialException('gaze left fixation') # recycle trial # Presents error messages def show_error_message(self, msg_key): fill() blit(self.err_msgs[msg_key], location=P.screen_c, registration=5) flip() any_key() # Utility function to generate random time intervals with a given range # that are multiples of the current refresh rate (e.g. 16.7ms for a 60Hz monitor) def random_interval(self, lower, upper): min_flips = int(round(lower / P.refresh_time)) max_flips = int(round(upper / P.refresh_time)) return random.choice(range(min_flips, max_flips + 1, 1)) * P.refresh_time # Presents neutral boxes, duh def present_neutral_boxes(self): fill() blit(self.star, 5, P.screen_c) blit(self.neutral_box, 5, self.left_box_loc) blit(self.neutral_box, 5, self.right_box_loc) # Presents bandits def bandit_callback(self, before_go=False): self.confirm_fixation() self.present_neutral_boxes() blit(self.left_bandit, 5, self.left_box_loc) blit(self.right_bandit, 5, self.right_box_loc) # Presents probes def probe_callback(self): self.confirm_fixation() self.present_neutral_boxes() # Present probe & go/nogo stimulus if self.go_no_go == GO: blit(self.probe, 5, self.probe_loc) blit(self.go, 5, self.probe_loc) else: blit(self.probe, 5, self.probe_loc) blit(self.nogo, 5, self.probe_loc) # Assesses learning by asking Ss their anticipated trial earnings def query_learning(self, bandit): if bandit == HIGH: anticipated_reward_high = query(user_queries.experimental[0]) anticipated_reward_survey = { 'participant_id': P.participant_id, 'anticipated_reward_high': anticipated_reward_high, 'anticipated_reward_low': "NA" } else: anticipated_reward_low = query(user_queries.experimental[1]) anticipated_reward_survey = { 'participant_id': P.participant_id, 'anticipated_reward_high': "NA", 'anticipated_reward_low': anticipated_reward_low } self.db.insert(anticipated_reward_survey, table='surveys')