Exemplo n.º 1
0
class RSVPAlertToneCalibrationTask(Task):
    """RSVP Calibration Task that uses alert tones to maintain user focus.

    Calibration task performs an RSVP stimulus sequence to elicit an ERP.
    Parameters will change how many stim and for how long they present.
    Parameters also change color and text / image inputs and alert sounds.

    This task uses the 'alert_sounds_path' parameter to determine which sounds
    to play when a letter is presented to the participant. Sounds are read in
    from the configured directory and cycled through in alphabetical order.
    Sounds with the word 'blank' in the file name will not be played, allowing
    experiments to be setup in which some letters do not have alerts.

    Input:
        win (PsychoPy Display Object)
        daq (Data Acquisition Object)
        parameters (Dictionary)
        file_save (String)

    Output:
        file_save (String)
    """
    TASK_NAME = 'RSVP Alert Tone Calibration Task'

    def __init__(self, win, daq, parameters, file_save):
        super(RSVPAlertToneCalibrationTask, self).__init__()
        self._task = RSVPCalibrationTask(win, daq, parameters, file_save)

        # Delay between sound and letter presentation; default is 400 ms.
        # see: Testing the Efficiency and Independence of Attentional Networks
        # by Jin Fan, Bruce D. McCandliss, Tobias Sommer, Amir Raz, and
        # Michael I. Posner
        sound_delay = parameters.get('alert_sound_delay', 0.4)
        alerts = soundfiles(parameters['alert_sounds_path'])

        def play_sound_callback(_sti):
            sound_path = next(alerts)
            if "blank" in sound_path:
                return
            play_sound(sound_path,
                       sound_load_buffer_time=0,
                       sound_post_buffer_time=sound_delay)

        self._task.rsvp.first_stim_callback = play_sound_callback

    def execute(self):
        self.logger.debug(f'Starting {self.name()}!')
        self._task.execute()

    @classmethod
    def label(cls):
        return RSVPAlertToneCalibrationTask.TASK_NAME

    def name(self):
        return RSVPAlertToneCalibrationTask.TASK_NAME
Exemplo n.º 2
0
    def __init__(self, win, daq, parameters, file_save):
        super(RSVPAlertToneCalibrationTask, self).__init__()
        self._task = RSVPCalibrationTask(win, daq, parameters, file_save)

        # Delay between sound and letter presentation; default is 400 ms.
        # see: Testing the Efficiency and Independence of Attentional Networks
        # by Jin Fan, Bruce D. McCandliss, Tobias Sommer, Amir Raz, and
        # Michael I. Posner
        sound_delay = parameters.get('alert_sound_delay', 0.4)
        alerts = soundfiles(parameters['alert_sounds_path'])

        def play_sound_callback(_sti):
            sound_path = next(alerts)
            if "blank" in sound_path:
                return
            play_sound(sound_path,
                       sound_load_buffer_time=0,
                       sound_post_buffer_time=sound_delay)

        self._task.rsvp.first_stim_callback = play_sound_callback
Exemplo n.º 3
0
def make_task(display_window,
              daq,
              exp_type,
              parameters,
              file_save,
              signal_model=None,
              language_model=None,
              fake=True,
              auc_filename=None) -> Task:
    """Creates a Task based on the provided parameters.

    Parameters:
    -----------
        display_window: pyschopy Window
        daq: DataAcquisitionClient
        exp_type: ExperimentType
        parameters: dict
        file_save: str - path to file in which to save data
        signal_model
        language_model - language model
        fake: boolean - true if eeg stream is randomly generated
        auc_filename: str
    Returns:
    --------
        Task instance
    """

    # NORMAL RSVP MODES
    if exp_type is ExperimentType.RSVP_CALIBRATION:
        return RSVPCalibrationTask(display_window, daq, parameters, file_save)

    if exp_type is ExperimentType.RSVP_COPY_PHRASE:
        return RSVPCopyPhraseTask(display_window,
                                  daq,
                                  parameters,
                                  file_save,
                                  signal_model,
                                  language_model,
                                  fake=fake)

    # ICON TASKS
    if exp_type is ExperimentType.RSVP_ICON_TO_ICON:
        return RSVPIconToIconTask(display_window, daq, parameters, file_save,
                                  signal_model, language_model, fake, False,
                                  auc_filename)

    if exp_type is ExperimentType.RSVP_ICON_TO_WORD:
        # pylint: disable=fixme
        # TODO: consider a new class for this scenario.
        return RSVPIconToIconTask(display_window, daq, parameters, file_save,
                                  signal_model, language_model, fake, True,
                                  auc_filename)

    # CALIBRATION FEEDBACK TASKS
    if exp_type is ExperimentType.RSVP_ALERT_TONE_CALIBRATION:
        return RSVPAlertToneCalibrationTask(display_window, daq, parameters,
                                            file_save)

    if exp_type is ExperimentType.RSVP_INTER_SEQUENCE_FEEDBACK_CALIBRATION:
        return RSVPInterSequenceFeedbackCalibration(display_window, daq,
                                                    parameters, file_save)

    raise TaskRegistryException(
        'The provided experiment type is not registered.')
Exemplo n.º 4
0
class RSVPTimingVerificationCalibration(Task):
    """RSVP Calibration Task that for verifying timing.

    Input:
        win (PsychoPy Display Object)
        daq (Data Acquisition Object)
        parameters (Dictionary)
        file_save (String)

    Output:
        file_save (String)
    """
    TASK_NAME = 'RSVP Timing Verification Task'

    def __init__(self, win, daq, parameters, file_save):
        super(RSVPTimingVerificationCalibration, self).__init__()
        parameters['stim_height'] = 0.8
        parameters['stim_pos_y'] = 0.2
        self._task = RSVPCalibrationTask(win, daq, parameters, file_save)
        self._task.generate_stimuli = self.generate_stimuli

    def generate_stimuli(self):
        """Generates the inquiries to be presented.
        Returns:
        --------
            tuple(
                samples[list[list[str]]]: list of inquiries
                timing(list[list[float]]): list of timings
                color(list(list[str])): list of colors)
        """
        samples, times, colors = [], [], []

        solid_box = '\u25A0'
        empty_box = '\u25A1'

        target = 'x'  # solid_box  # 'X'
        fixation = '\u25CB'  # circle

        # alternate between solid and empty boxes
        letters = cycle([solid_box, empty_box])
        time_target, time_fixation, time_stim = self._task.timing
        color_target, color_fixation, color_stim = self._task.color

        inq_len = self._task.stim_length
        inq_stim = [target, fixation, *[next(letters) for _ in range(inq_len)]]
        inq_times = [time_target, time_fixation, *[time_stim for _ in range(inq_len)]]
        inq_colors = [color_target, color_fixation, *[color_stim for _ in range(inq_len)]]

        for _ in range(self._task.stim_number):
            samples.append(inq_stim)
            times.append(inq_times)
            colors.append(inq_colors)

        return (samples, times, colors)

    def execute(self):
        self.logger.debug(f'Starting {self.name()}!')
        self._task.execute()

    @classmethod
    def label(cls):
        return RSVPTimingVerificationCalibration.TASK_NAME

    def name(self):
        return RSVPTimingVerificationCalibration.TASK_NAME
Exemplo n.º 5
0
 def __init__(self, win, daq, parameters, file_save):
     super(RSVPTimingVerificationCalibration, self).__init__()
     parameters['stim_height'] = 0.8
     parameters['stim_pos_y'] = 0.2
     self._task = RSVPCalibrationTask(win, daq, parameters, file_save)
     self._task.generate_stimuli = self.generate_stimuli
Exemplo n.º 6
0
    def __init__(self, win, daq, parameters, file_save):
        super(RSVPInterInquiryFeedbackCalibration, self).__init__()
        self._task = RSVPCalibrationTask(win, daq, parameters, file_save)

        self.daq = daq
        self.fs = self.daq.device_info.fs
        self.alp = self._task.alp
        self.rsvp = self._task.rsvp
        self.parameters = parameters
        self.file_save = file_save
        self.enable_breaks = self._task.enable_breaks
        self.window = self._task.window
        self.stim_number = self._task.stim_number
        self.stim_length = self._task.stim_length
        self.is_txt_stim = self.rsvp.is_txt_stim
        self.stimuli_height = self._task.stimuli_height
        self.color = self._task.color
        self.timing = self._task.timing
        self.wait_screen_message = self._task.wait_screen_message
        self.wait_screen_message_color = self._task.wait_screen_message_color

        self.visual_feedback = LevelFeedback(display=self.window,
                                             parameters=self.parameters,
                                             clock=self._task.experiment_clock)

        self.static_offset = self.parameters['static_trigger_offset']
        self.nonletters = ['+', 'PLUS', 'calibration_trigger']
        self.valid_targets = set(self.alp)

        self.time_flash = self.parameters['time_flash']

        self.downsample_rate = self.parameters['down_sampling_rate']
        self.filtered_sampling_rate = self.fs / self.downsample_rate

        self.device_name = self.daq.device_info.name
        self.channel_map = analysis_channels(self.daq.device_info.channels,
                                             self.device_name)

        # EDIT ME FOR FEEDBACK CONFIGURATION

        self.feedback_buffer_time = self.parameters['feedback_buffer_time']
        self.feedback_line_color = self.parameters['feedback_line_color']

        self.psd_method = PSD_TYPE.WELCH

        # The channel used to calculate the PSD from RSVP inquiry.
        self.psd_channel_index = self.PSD_CHANNEL_INDEX

        # filter parameters
        self.filter_low = self.parameters['filter_low']
        self.filter_high = self.parameters['filter_high']
        self.filter_order = self.parameters['filter_order']
        self.notch_filter_frequency = self.parameters['notch_filter_frequency']

        # get the feedback band of interest
        self.psd_lower_limit = self.parameters['feedback_band_lower_limit']
        self.psd_upper_limit = self.parameters['feedback_band_upper_limit']

        # psd band of interest to use for feeback (low, high)
        self.psd_export_band = (self.psd_lower_limit, self.psd_upper_limit)

        # length of time to use for PSD calculation
        self.trial_length = self.time_flash * self.stim_length

        self.lvl_5_threshold = self.parameters['feedback_level_5_threshold']
        self.lvl_4_threshold = self.parameters['feedback_level_4_threshold']
        self.lvl_3_threshold = self.parameters['feedback_level_3_threshold']
        self.lvl_2_threshold = self.parameters['feedback_level_2_threshold']

        # true/false order is desceding from 5 -> 1 for level
        self.feedback_descending = self.parameters['feedback_level_descending']