def update_task_state(self, image_path, task_height, rect_color, window_size, is_word): """ Updates task state of Icon to Icon/Word Matching Task by changing the image or text displayed at the top of the screen. Also updates rectangle size. Args: image_path(str): the path to the image to be displayed task_height(int): the height of the task image rect_color(str): the color of the rectangle window_size(tuple): The size of the window is_word(bool): Whether or not this is an icon to word matching task""" if is_word: #Display text at top of screen if we are matching icons to words txt = image_path if len(image_path) > 0 else ' ' tmp2 = visual.TextStim(win=self.win, font=self.task.font, text=txt) x_pos_task = (tmp2.boundingBox[0] * 2.2) / self.win.size[0] - 1 self.pos_task = (x_pos_task, self.pos_task[1]) self.update_task(text=txt, color_list=['white'], pos=self.pos_task) else: #Otherwise, display an image at the top of the screen self.task.image = image_path image_width, image_height = resize_image(image_path, window_size, task_height) self.target_text.pos = (self.pos_sti[0] - image_width - 0.5, self.pos_sti[1]) self.task.pos=(self.pos_task[0] + image_width * 2, self.pos_task[1] - image_width/2) self.task.size = (image_width * 2, image_height * 2) self.rect_drawn_frames = 0 self.rect.width = image_width/task_height * self.sti_height self.rect.height = image_height/task_height * self.sti_height self.rect.lineColor = rect_color
def _construct_stimulus(self, stimulus, pos): if '.png' in stimulus: image_stim = visual.ImageStim(win=self.display, image=stimulus, mask=None, pos=pos, ori=0.0) image_stim.size = resize_image(stimulus, self.display.size, self.height_stim) self.rect.width = image_stim.size[0] self.rect.height = image_stim.size[1] self.rect.opacity = 1 self.rect.lineColor = self.message_color return image_stim else: return visual.TextStim(win=self.display, font=self.font_stim, text=stimulus, height=self.height_stim, pos=pos)
def wait_screen(self, message, color): """Wait Screen. Args: message(string): message to be displayed while waiting """ # Construct the wait message wait_message = visual.TextStim(win=self.win, font=self.font_stim, text=message, height=.1, color=color, pos=(0, -.5), wrapWidth=2, colorSpace='rgb', opacity=1, depth=-6.0) # Try adding our BCI logo. Pass if not found. try: wait_logo = visual.ImageStim( self.win, image='bcipy/static/images/gui_images/bci_cas_logo.png', pos=(0, .5), mask=None, ori=0.0) wait_logo.size = resize_image( 'bcipy/static/images/gui_images/bci_cas_logo.png', self.win.size, 1) wait_logo.draw() except Exception: self.logger.debug("Cannot load logo image") pass # Draw and flip the screen. wait_message.draw() self.win.flip()
def _calibration_trigger(experiment_clock: core.Clock, trigger_type: str = 'sound', trigger_name: str = 'calibration_trigger', display=None, on_trigger=None) -> List[tuple]: """Calibration Trigger. Outputs triggers for the purpose of calibrating data and stimuli. This is an ongoing difficulty between OS, DAQ devices and stimuli type. This code aims to operationalize the approach to finding the correct DAQ samples in relation to our trigger code. PARAMETERS --------- experiment_clock(clock): clock with getTime() method, which is used in the code to report timing of stimuli trigger_type(string): type of trigger that is desired (sound, image, etc) display(DisplayWindow): a window that can display stimuli. Currently, a Psychopy window. on_trigger(function): optional callback; if present gets called when the calibration trigger is fired; accepts a single parameter for the timing information. Return: timing(array): timing values for the calibration triggers to be written to trigger file or used to calculate offsets. """ trigger_callback = TriggerCallback() # If sound trigger is selected, output calibration tones if trigger_type == SOUND_TYPE: import sounddevice as sd import soundfile as sf play_sound( sound_file_path='bcipy/static/sounds/1k_800mV_20ms_stereo.wav', dtype='float32', track_timing=True, sound_callback=trigger_callback, sound_load_buffer_time=0.5, experiment_clock=experiment_clock, trigger_name='calibration_trigger') elif trigger_type == IMAGE_TYPE: if display: calibration_box = visual.ImageStim( win=display, image='bcipy/static/images/testing_images/white.png', pos=(-.5, -.5), mask=None, ori=0.0) calibration_box.size = resize_image( 'bcipy/static/images/testing_images/white.png', display.size, 0.75) display.callOnFlip(trigger_callback.callback, experiment_clock, trigger_name) if on_trigger is not None: display.callOnFlip(on_trigger, trigger_name) presentation_time = int(1 * display.getActualFrameRate()) for num_frames in range(presentation_time): calibration_box.draw() display.flip() else: raise Exception( 'Display object required for calibration with images!') else: raise Exception('Trigger type not implemented for Calibration yet!') return trigger_callback.timing
def do_sequence(self): """Do Sequence. Animates a sequence of flashing letters to achieve RSVP. """ # init an array for timing information timing = [] if self.first_run: # play a sequence start sound to help orient triggers stim_timing = _calibration_trigger( self.experiment_clock, trigger_type=self.trigger_type, display=self.win, on_trigger=self.marker_writer.push_marker) timing.append(stim_timing) self.first_stim_time = stim_timing[-1] self.first_run = False # do the sequence for idx in range(len(self.stim_sequence)): # set a static period to do all our stim setting. # will warn if ISI value is violated. self.staticPeriod.start(self.static_period_time) # turn ms timing into frames! Much more accurate! self.time_to_present = int(self.time_list_sti[idx] * self.refresh_rate) # check if stimulus needs to use a non-default size if self.size_list_sti: this_stimuli_size = self.size_list_sti[idx] else: this_stimuli_size = self.height_stim # Set the Stimuli attrs if self.stim_sequence[idx].endswith('.png'): self.sti = self.create_stimulus(mode='image', height_int=this_stimuli_size) self.sti.image = self.stim_sequence[idx] self.sti.size = resize_image(self.sti.image, self.sti.win.size, this_stimuli_size) sti_label = self.stim_sequence[idx].split('/')[-1].split('.')[0] else: # text stimulus self.sti = self.create_stimulus(mode='text', height_int=this_stimuli_size) txt = self.stim_sequence[idx] # customize presentation of space char. self.sti.text = txt if txt != SPACE_CHAR else self.space_char self.sti.color = self.color_list_sti[idx] sti_label = txt # test whether the word will be too big for the screen text_width = self.sti.boundingBox[0] if text_width > self.win.size[0]: info = get_system_info() text_height = self.sti.boundingBox[1] # If we are in full-screen, text size in Psychopy norm units # is monitor width/monitor height if self.win.size[0] == info['RESOLUTION'][0]: new_text_width = info['RESOLUTION'][0] / info['RESOLUTION'][1] else: # If not, text width is calculated relative to both # monitor size and window size new_text_width = ( self.win.size[1] / info['RESOLUTION'][1]) * ( info['RESOLUTION'][0] / info['RESOLUTION'][1]) new_text_height = (text_height * new_text_width) / text_width self.sti.height = new_text_height # End static period self.staticPeriod.complete() # Reset the timing clock to start presenting self.win.callOnFlip(self.trigger_callback.callback, self.experiment_clock, sti_label) self.win.callOnFlip(self.marker_writer.push_marker, sti_label) if idx == 0 and callable(self.first_stim_callback): self.first_stim_callback(self.sti) # Draw stimulus for n frames for _n_frames in range(self.time_to_present): self.sti.draw() self.draw_static() self.win.flip() # append timing information if self.is_txt_sti: timing.append(self.trigger_callback.timing) else: timing.append(self.trigger_callback.timing) self.trigger_callback.reset() # draw in static and flip once more self.draw_static() self.win.flip() return timing