예제 #1
0
 def _construct_stimulus(self, stimulus, pos, line_color, fill_color,
                         stimuli_type):
     if stimuli_type == FeedbackType.IMAGE:
         image_stim = visual.ImageStim(win=self.display,
                                       image=stimulus,
                                       mask=None,
                                       pos=pos,
                                       ori=0.0)
         image_stim.size = resize_image(stimulus, self.display.size,
                                        self.height_stim)
         return image_stim
     if stimuli_type == FeedbackType.TEXT:
         return visual.TextStim(win=self.display,
                                font=self.font_stim,
                                text=stimulus,
                                height=self.height_stim,
                                pos=pos,
                                color=fill_color)
     if stimuli_type == FeedbackType.SHAPE:
         return visual.Rect(fillColor=fill_color,
                            win=self.display,
                            width=self.width_stim,
                            height=self.height_stim,
                            lineColor=line_color,
                            pos=(self.pos_stim),
                            lineWidth=self.feedback_line_width,
                            ori=0.0)
예제 #2
0
파일: display.py 프로젝트: CAMBI-tech/BciPy
    def _generate_inquiry(self):
        """Generate inquiry.

        Generate stimuli for next RSVP inquiry.
        """
        stim_info = []
        for idx in range(len(self.stimuli_inquiry)):
            current_stim = {}

            # turn ms timing into frames! Much more accurate!
            current_stim['time_to_present'] = int(self.stimuli_timing[idx] *
                                                  self.refresh_rate)

            # check if stimulus needs to use a non-default size
            if self.size_list_sti:
                this_stimuli_size = self.size_list_sti[idx]
            else:
                this_stimuli_size = self.stimuli_height

            # Set the Stimuli attrs
            if self.stimuli_inquiry[idx].endswith('.png'):
                current_stim['sti'] = self.create_stimulus(
                    mode='image', height_int=this_stimuli_size)
                current_stim['sti'].image = self.stimuli_inquiry[idx]
                current_stim['sti'].size = resize_image(
                    current_stim['sti'].image, current_stim['sti'].win.size,
                    this_stimuli_size)
                current_stim['sti_label'] = path.splitext(
                    path.basename(self.stimuli_inquiry[idx]))[0]
            else:
                # text stimulus
                current_stim['sti'] = self.create_stimulus(
                    mode='text', height_int=this_stimuli_size)
                txt = self.stimuli_inquiry[idx]
                # customize presentation of space char.
                current_stim[
                    'sti'].text = txt if txt != SPACE_CHAR else self.space_char
                current_stim['sti'].color = self.stimuli_colors[idx]
                current_stim['sti_label'] = txt

                # test whether the word will be too big for the screen
                text_width = current_stim['sti'].boundingBox[0]
                if text_width > self.window.size[0]:
                    monitor_width, monitor_height = get_screen_resolution()
                    text_height = current_stim['sti'].boundingBox[1]
                    # If we are in full-screen, text size in Psychopy norm units
                    # is monitor width/monitor height
                    if self.window.size[0] == monitor_width:
                        new_text_width = monitor_width / monitor_height
                    else:
                        # If not, text width is calculated relative to both
                        # monitor size and window size
                        new_text_width = (self.window.size[1] / monitor_height
                                          ) * (monitor_width / monitor_height)
                    new_text_height = (text_height *
                                       new_text_width) / text_width
                    current_stim['sti'].height = new_text_height
            stim_info.append(current_stim)
        return stim_info
예제 #3
0
    def update_task_state(
            self,
            image_path,
            task_height,
            rect_color,
            window_size,
            is_word):
        """ Updates task state of Icon to Icon/Word Matching Task by changing the
        image or text displayed at the top of the screen.
        Also updates rectangle size.
            Args:
                image_path(str): the path to the image to be displayed
                task_height(int): the height of the task image
                rect_color(str): the color of the rectangle
                window_size(tuple): The size of the window
                is_word(bool): word matching task
        """

        if is_word:
            # Display text at top of screen if we are matching icons to words
            txt = image_path if len(image_path) > 0 else ' '
            tmp2 = visual.TextStim(
                win=self.window, font=self.task.font, text=txt)
            x_task_pos = (tmp2.boundingBox[0] * 2.2) / self.window.size[0] - 1
            self.task_pos = (x_task_pos, self.task_pos[1])
            self.update_task(text=txt, color_list=['white'], pos=self.task_pos)
        else:
            # Otherwise, display an image at the top of the screen
            self.task.image = image_path

            image_width, image_height = resize_image(
                image_path,
                window_size,
                task_height)

            self.target_text.pos = (
                self.stim_pos[0] - image_width - 0.5,
                self.stim_pos[1])

            self.task.pos = (
                self.task_pos[0] + image_width * 2,
                self.task_pos[1] - image_width / 2)
            self.task.size = (image_width * 2, image_height * 2)

            self.rect_drawn_frames = 0
            self.rect.width = image_width / task_height * self.stim_height
            self.rect.height = image_height / task_height * self.stim_height
            self.rect.lineColor = rect_color
예제 #4
0
    def wait_screen(self, message, color):
        """Wait Screen.

        Args:
            message(string): message to be displayed while waiting
        """

        # Construct the wait message
        wait_message = visual.TextStim(win=self.window,
                                       font=self.stimuli_font,
                                       text=message,
                                       height=.1,
                                       color=color,
                                       pos=(0, -.5),
                                       wrapWidth=2,
                                       colorSpace='rgb',
                                       opacity=1,
                                       depth=-6.0)

        # Try adding our BCI logo. Pass if not found.
        try:
            wait_logo = visual.ImageStim(
                self.window,
                image='bcipy/static/images/gui_images/bci_cas_logo.png',
                pos=(0, .5),
                mask=None,
                ori=0.0)
            wait_logo.size = resize_image(
                'bcipy/static/images/gui_images/bci_cas_logo.png',
                self.window.size, 1)
            wait_logo.draw()

        except Exception:
            self.logger.debug('Cannot load logo image')
            pass

        # Draw and flip the screen.
        wait_message.draw()
        self.window.flip()
예제 #5
0
def _calibration_trigger(experiment_clock: core.Clock,
                         trigger_type: str='sound',
                         trigger_name: str='calibration_trigger',
                         display=None,
                         on_trigger=None) -> List[tuple]:
    """Calibration Trigger.

        Outputs triggers for the purpose of calibrating data and stimuli.
        This is an ongoing difficulty between OS, DAQ devices and stimuli type. This
        code aims to operationalize the approach to finding the correct DAQ samples in
        relation to our trigger code.

    PARAMETERS
    ---------
        experiment_clock(clock): clock with getTime() method, which is used in the code
            to report timing of stimuli
        trigger_type(string): type of trigger that is desired (sound, image, etc)
        display(DisplayWindow): a window that can display stimuli. Currently, a Psychopy window.
        on_trigger(function): optional callback; if present gets called
                 when the calibration trigger is fired; accepts a single
                 parameter for the timing information.
        Return:
            timing(array): timing values for the calibration triggers to be written to trigger file or
                    used to calculate offsets.
    """
    trigger_callback = TriggerCallback()

    # If sound trigger is selected, output calibration tones
    if trigger_type == SOUND_TYPE:
        import sounddevice as sd
        import soundfile as sf

        play_sound(
            sound_file_path='bcipy/static/sounds/1k_800mV_20ms_stereo.wav',
            dtype='float32',
            track_timing=True,
            sound_callback=trigger_callback,
            sound_load_buffer_time=0.5,
            experiment_clock=experiment_clock,
            trigger_name='calibration_trigger')

    elif trigger_type == IMAGE_TYPE:
        if display:
            calibration_box = visual.ImageStim(
                win=display,
                image='bcipy/static/images/testing_images/white.png',
                pos=(-.5, -.5),
                mask=None,
                ori=0.0)
            calibration_box.size = resize_image(
                'bcipy/static/images/testing_images/white.png',
                display.size, 0.75)

            display.callOnFlip(trigger_callback.callback, experiment_clock, trigger_name)
            if on_trigger is not None:
                display.callOnFlip(on_trigger, trigger_name)

            presentation_time = int(1 * display.getActualFrameRate())
            for num_frames in range(presentation_time):
                calibration_box.draw()
                display.flip()

        else:
            raise Exception(
                'Display object required for calibration with images!')

    else:
        raise Exception('Trigger type not implemented for Calibration yet!')

    return trigger_callback.timing
예제 #6
0
    def do_sequence(self):
        """Do Sequence.

        Animates a sequence of flashing letters to achieve RSVP.
        """

        # init an array for timing information
        timing = []

        if self.first_run:
            # play a sequence start sound to help orient triggers
            first_stim_timing = _calibration_trigger(
                self.experiment_clock,
                trigger_type=self.trigger_type, display=self.window,
                on_trigger=self.marker_writer.push_marker)

            timing.append(first_stim_timing)

            self.first_stim_time = first_stim_timing[-1]
            self.first_run = False

        # do the sequence
        for idx in range(len(self.stimuli_sequence)):

            # set a static period to do all our stim setting.
            #   will warn if ISI value is violated.
            self.staticPeriod.start(self.static_time)

            # turn ms timing into frames! Much more accurate!
            self.time_to_present = int(self.stimuli_timing[idx] * self.refresh_rate)

            # check if stimulus needs to use a non-default size
            if self.size_list_sti:
                this_stimuli_size = self.size_list_sti[idx]
            else:
                this_stimuli_size = self.stimuli_height

            # Set the Stimuli attrs
            if self.stimuli_sequence[idx].endswith('.png'):
                self.sti = self.create_stimulus(mode='image', height_int=this_stimuli_size)
                self.sti.image = self.stimuli_sequence[idx]
                self.sti.size = resize_image(self.sti.image, self.sti.win.size, this_stimuli_size)
                sti_label = path.splitext(
                    path.basename(self.stimuli_sequence[idx]))[0]
            else:
                # text stimulus
                self.sti = self.create_stimulus(mode='text', height_int=this_stimuli_size)
                txt = self.stimuli_sequence[idx]
                # customize presentation of space char.
                self.sti.text = txt if txt != SPACE_CHAR else self.space_char
                self.sti.color = self.stimuli_colors[idx]
                sti_label = txt

                # test whether the word will be too big for the screen
                text_width = self.sti.boundingBox[0]
                if text_width > self.window.size[0]:
                    info = get_system_info()
                    text_height = self.sti.boundingBox[1]
                    # If we are in full-screen, text size in Psychopy norm units
                    # is monitor width/monitor height
                    if self.window.size[0] == info['RESOLUTION'][0]:
                        new_text_width = info['RESOLUTION'][0] / info['RESOLUTION'][1]
                    else:
                        # If not, text width is calculated relative to both
                        # monitor size and window size
                        new_text_width = (
                            self.window.size[1] / info['RESOLUTION'][1]) * (
                                info['RESOLUTION'][0] / info['RESOLUTION'][1])
                    new_text_height = (text_height * new_text_width) / text_width
                    self.sti.height = new_text_height

            # End static period
            self.staticPeriod.complete()

            # Reset the timing clock to start presenting
            self.window.callOnFlip(self.trigger_callback.callback, self.experiment_clock, sti_label)
            self.window.callOnFlip(self.marker_writer.push_marker, sti_label)

            if idx == 0 and callable(self.first_stim_callback):
                self.first_stim_callback(self.sti)

            # Draw stimulus for n frames
            for _n_frames in range(self.time_to_present):
                self.sti.draw()
                self.draw_static()
                self.window.flip()

            # append timing information
            if self.is_txt_stim:
                timing.append(self.trigger_callback.timing)
            else:
                timing.append(self.trigger_callback.timing)

            self.trigger_callback.reset()

        # draw in static and flip once more
        self.draw_static()
        self.window.flip()

        return timing