コード例 #1
0
ファイル: nback.py プロジェクト: noanutke/fMRI_E_Project
    def save_trial_data(self, key, rt, counter):
        target_type = self.get_target_type(counter)
        response_to_target, is_success = self.get_response_to_target(
            target_type, key)

        self.exp.data.add([str(datetime.datetime.now()), self.digit, self.position_text, target_type, \
                           key, rt, response_to_target, is_success, self.n, self.order, self.is_practice])

        utils.push_sample_current_time(self.outlet, [
            "trialResultDescription_task_nBack_resultType_" +
            response_to_target
        ])

        expected_response = "1"
        actual_response = "1"

        if target_type == "Dual":
            expected_response = "2"
        elif target_type is None:
            expected_response = "0"

        if key == self.dual_target:
            actual_response = "2"
        elif key is None:
            actual_response = "0"
        utils.push_sample_current_time(self.outlet, ["trialResult_task_nBack_expected_" + expected_response + \
                                 "_actual_" + actual_response])
コード例 #2
0
    def __init__(self, screen_height, screen_width, exp, subNumber, outlet):
        self.screen_height = screen_height
        self.screen_width = screen_width
        self.subNumber = subNumber
        self.exp = exp
        self.fixationTimes = [6,6,3,9]  # Array of fixation time for each block - the average of the array is 6
        random.shuffle(self.fixationTimes)

        self.current_block_order_number = "";    # block order (1-4)
        self.blocks_order = []  # blocks array
        self.trials_locations_array = []    #arrow locations lists for all blocks
        self.trials_directions_array = []   #arrow direction lists for all blocks

        self.outlet = outlet # LSL stream

        # init stroop class
        self.experiment = stroop(self.exp, self.outlet, self.subNumber)

        # create stress and load self reports files
        current_hour = str(datetime.datetime.now().hour)
        current_min = str(datetime.datetime.now().minute)
        self.stress_evaluation_log = WriteToExcel("stress_evaluation_stroop" + self.subNumber + "_" + current_hour + "_" + current_min, "stress")
        self.cognitive_load_log = WriteToExcel("cognitive_load_evaluation_stroop" + self.subNumber + "_" + current_hour + "_" + current_min, "load")

        self.start_again = True #should choose condition (practice/test) again
        utils.push_sample_current_time(self.outlet, ["startTask_task_stroop"] )
コード例 #3
0
    def __init__(self, screen_height, screen_width, exp, subNumber, outlet):
        self.n = ""
        # nLevel
        self.outlet = outlet
        # LSL stream
        self.stimuli_type = ""  # practice type: auditory(a) / visual(b) / both
        self.subNumber = subNumber  # subject number
        self.exp = exp
        self.screen_height = screen_height
        self.screen_width = screen_width
        self.is_practice = False

        self.fixationTimes = [6, 6, 9, 9, 9, 3, 3, 3]  # fixation times array
        random.shuffle(self.fixationTimes)

        self.is_baseline = False  # is this a baseline block (nLevel = 0)
        self.current_block_order_number = ""  # block order (1-4)
        self.blocks_order = []  # blocks array
        self.letters_lists = [""]  #letters lists for all blocks
        self.locations_lists = [""]  #locations list for all blocks

        # init Nback class
        self.experiment = Nback(self.exp, self.outlet, self.subNumber)

        # create stress and load self reports files
        current_hour = str(datetime.datetime.now().hour)
        current_min = str(datetime.datetime.now().minute)
        self.stress_evaluation_log = WriteToExcel(self.stress_evaluation_file_prefix + \
                                                  self.subNumber + "_" + current_hour + "_" + current_min, "stress")
        self.cognitive_load_log = WriteToExcel(self.load_evaluation_file_prefix + \
                                               self.subNumber + "_" + current_hour + "_" + current_min, "load")

        utils.push_sample_current_time(
            self.outlet,
            ["startTask_task_nBack"])  #send start task trigger to LSL
コード例 #4
0
    def __init__(self, exp, screen_height, screen_width, type, cognitive_load_log, outlet, subNumber, \
                 order, task):

        self.cognitive_load_log = cognitive_load_log
        self.task = task
        self.subNumber = subNumber
        self.order = order
        self.task = task
        self.screen_height = screen_height - 300
        self.screen_width = screen_width
        self.outlet = outlet  #LSL stream

        self.line_length = self.screen_width * 0.5
        self.line_start = 0 - self.line_length / 2
        self.line_end = 0 + self.line_length / 2
        self.exp = exp

        self.text_array = []
        self.edges_text = []
        self.line_positions_y = []
        self.old_marks_positions = []
        self.new_marks_positions = []

        self.highest_height = 0 + self.screen_height / 2

        if type == "stress":
            self.text_array = self.stressTitleText
            self.edges_text = self.stressEdgesText
            self.wait_for_miliseconds = 9000

        else:
            self.text_array = self.loadTitleText
            self.edges_text = self.loadEdgesText
            self.wait_for_miliseconds = 20000  # we wait only 20 seconds because due to different delays the
            # actual duration is 21 seconds

        self.number_of_lines = len(self.text_array)

        # create array to hold all line positions
        spaces = self.screen_height / (self.number_of_lines + 1)
        index = 1
        for line_text in self.text_array:
            position_y = self.highest_height - spaces * index
            self.line_positions_y.insert(len(self.line_positions_y),
                                         position_y)
            self.new_marks_positions.insert(len(self.new_marks_positions), 0)
            self.old_marks_positions.insert(len(self.old_marks_positions), 0)
            index += 1

        self.paint_all_lines()

        # wait for responses
        utils.push_sample_current_time(
            self.outlet, ["eval_task_" + task + "_start_1_" + type + "_1"])
        self.wait_for_marks()
        utils.push_sample_current_time(
            self.outlet, ["eval_task_" + task + "_end_1_" + type + "_1"])

        self.save_current_evaluation_to_file()
コード例 #5
0
ファイル: stroop.py プロジェクト: noanutke/fMRI_E_Project
 def save_trial_data(self, key, rt, trial_index):
     is_success = self.is_success(self.directions[trial_index], key)
     utils.push_sample_current_time(self.outlet, \
                                    ["trialResult_task_stroop_success_" + ("1" if is_success else "0")])
     self.exp.data.add([str(datetime.datetime.now()),
                        self.locations[trial_index], self.directions[trial_index], \
                        key, rt, is_success, self.is_practice \
                           ,self.block, self.order])
コード例 #6
0
ファイル: stroop.py プロジェクト: noanutke/fMRI_E_Project
    def run(self, block, is_practice, locations, directions, order, block_index=0):
        self.blockIndex = block_index
        self.correct_trials = 0
        self.order = order
        self.block = block
        self.is_practice = is_practice
        self.up_key = self.practice_up_key if self.is_practice else self.test_up_key
        self.down_key = self.practice_down_key if self.is_practice else self.test_down_key
        self.locations = locations
        self.directions = directions

        # send start stroop block LSL trigger
        utils.push_sample_current_time(self.outlet, ["startBlock_task_stroop_practice_" + ("1" if self.is_practice else "0") \
                                 + "_incong_" + ("1" if "incong" in self.block else "0") \
                                 + "_order_" + self.order \
                                 + "_subNumber_" + self.subNumber + "_blockIndex_" + str(block_index+1)])

        self.run_experiment()

        # send end stroop block LSL trigger
        utils.push_sample_current_time(self.outlet,["endBlock_task_stroop"])
コード例 #7
0
ファイル: nback.py プロジェクト: noanutke/fMRI_E_Project
    def run(self, n, letters_list, locations_list, is_practice, stimuli_type="both",\
            isBaseline=False, order = "", block_index=0):
        self.blockIndex = block_index
        self.stimuli_type = stimuli_type
        self.with_audio_stimuli = False if stimuli_type == "v" else True
        self.with_visual_stimuli = False if stimuli_type == "a" else True
        self.correct_trials = 0
        self.order = order
        self.trials_number = 0
        self.correct_trials = 0
        self.hit_trials = 0
        self.digit_list = []
        self.positions_list_text = []
        self.positions_list_numbers = []

        self.n = int(n)
        self.digit = None
        self.position = None
        self.position_text = None

        self.is_practice = is_practice
        self.is_dual_practice = True if is_practice and stimuli_type == 'both' else False
        self.single_target = self.practice_single_target if self.is_practice else self.test_single_target
        self.dual_target = self.practice_dual_target if self.is_practice else self.test_dual_target

        self.is_baseline = isBaseline

        # send start nBack block LSL trigger
        utils.push_sample_current_time(self.outlet, ["startBlock_task_nBack_practice_" + ("1" if self.is_practice else "0") \
                                 + "_baseline_" + ("1" if self.is_baseline else "0") \
                                 + "_level_" + str(self.n) + "_order_" + self.order \
                                 + "_subNumber_" + self.subNumber + "_blockIndex_" + str(block_index+1)])

        self.init_stimuli(letters_list, locations_list)

        self.last_trial_error = False
        self.run_experiment()

        # send end nBack block LSL trigger
        utils.push_sample_current_time(self.outlet, ["endBlock_task_nBack"])
コード例 #8
0
ファイル: restBlock.py プロジェクト: noanutke/fMRI_E_Project
    def start_rest(self):

        utils.push_sample_current_time(self.lsl_stream, ["instructions_start_1_task_" + self.task])
        cont = self.write_anticipation_text()

        utils.push_sample_current_time(self.lsl_stream, ["instructions_end_1_task_" + self.task])
        if cont == False:
            return False
        if self.is_practice:
            return True;
        else:
            utils.push_sample_current_time(self.lsl_stream, ["fixation_start_1_task_" + self.task])
            cont = self.paint_cross(self.exp)
            utils.push_sample_current_time(self.lsl_stream, ["fixation_end_1_task_" + self.task])
            if cont == True:
                return True
            return False
コード例 #9
0
ファイル: stroop.py プロジェクト: noanutke/fMRI_E_Project
    def run_experiment(self):
        # define headers for expyriment log file output table (we currently don't use it,
        # but if we want - we can improve it and use it if we want
        self.exp.data_variable_names = ["time", "location", "direction", "trialType", "response", "rt"\
                                        ,"is success", "is practice", "blockType", "order"]

        currentTrailsNumber = self.test_trials_number;
        if self.is_practice:
            currentTrailsNumber = self.practice_trials_number;
        for trial_index in range(0, currentTrailsNumber):
            canvas = stimuli.BlankScreen()

            time_delay = 0

            # plot cross on canvas
            cross = stimuli.FixCross((50, 50), (0, 0), 5)
            time_delay += cross.plot(canvas)

            # plot arrow on canvas
            picture_arrow = stimuli.Picture(self.direction_file_converter[self.directions[trial_index]] \
                                            , position=self.locations_converter[self.locations[trial_index]])
            time_delay += picture_arrow.preload()
            time_delay += picture_arrow.plot(canvas)

            # show canvas
            time_delay += canvas.present();

            # send trigger to LSL with arrow details
            utils.push_sample_current_time(self.outlet, ["stimulus_task_stroop_type_arrow_location_" + ("u" if "up" in self.directions[trial_index] else "d") \
                                    + "_direction_"  + ("u" if "up" in self.locations[trial_index] else "d")])

            # wait for subject's response. Wait only for "duration" time
            key, rt = self.game_controller.wait_press(self.possible_joystick_buttons, self.duration,\
                                                      process_control_events=False)

            # we get here is subjects responded or of duration of stimulus ended
            if key != None: # subject responded and stimulus duration hasn't ended
                utils.push_sample_current_time(self.outlet, ["keyPressed_task_stroop_key_" + str(key)])
                self.exp.clock.wait(self.duration - rt) # wait the rest of the stimulus duration before going on

                # we get here when stimulus duration has ended (and subject responded)
                time_delay += self.paint_cross();
                self.exp.clock.wait(
                    self.isi - time_delay)  # wait for the ISI before going on

            else:
                # we get here if subject hasn't responded but stimulus duration ended - so we clean the screen
                time_delay += self.paint_cross();

                key = None;
                # we wait for subject to respond (but only for the ISI duration)
                key, rt = self.game_controller.wait_press(self.possible_joystick_buttons, self.isi - time_delay,
                                                          process_control_events=False)
                if key != None: # we get here if subject has responded - so we need to wait for the rest of
                                # the ISI
                    self.exp.clock.wait(
                        self.isi - rt - time_delay)  # wait the rest of the ISI before going on

                utils.push_sample_current_time(self.outlet, ["keyPressed_task_stroop_key_" + str(key)])
            self.save_trial_data(key, rt, trial_index)

        self.show_feedback_if_needed()
コード例 #10
0
ファイル: nback.py プロジェクト: noanutke/fMRI_E_Project
    def run_experiment(self):
        game1 = io.GamePad(0, True, True)  #init joystick

        # define headers for expyriment log file output table (we currently don't use it,
        # but if we want - we can improve it and use it if we want
        self.exp.data_variable_names = ["time", "digit", "position", "targetType", "response", "rt",\
                                        "responseType", "is success", "n", "order", "is practice"]

        self.trials_number = len(self.positions_list_text) if len(
            self.positions_list_text) > 0 else len(self.digit_list)

        # create grid
        grid = Grid(len(self.positions_list_text))\

        for trial in range(self.trials_number):
            target = None
            if self.with_audio_stimuli:  # we have auditory stimuli in this block
                # so we initialize the auditory stimulus
                self.digit = self.digit_list[trial]
                audio = stimuli.Audio(self.sounds_folder +
                                      str(int(self.digit)) +
                                      self.sound_files_suffix)

            canvas = stimuli.BlankScreen()
            time_delay_for_isi = 0
            if self.with_visual_stimuli:  # we have visual stimuli in this block
                # so we initialize the visual stimuus
                self.position_text = self.positions_list_text[trial]
                self.position = Grid.positions_locations[self.position_text]
                target = stimuli.Rectangle((30, 30), misc.constants.C_BLACK, 0,
                                           None, None, self.position)
                time_delay_for_isi += target.preload()
                time_delay_for_isi += target.plot(canvas)

            #prepare grid on canvas
            time_delay_for_isi += grid.paint_grid(canvas)
            #show canvas
            time_delay_for_isi += canvas.present()

            if self.with_audio_stimuli:
                audio.play(
                )  # we have auditory stimuli so we play the letter now
                utils.push_sample_current_time(self.outlet,\
                                               ["stimulus_task_nBack_type_letter_letter_" + str(self.digit)])

            if self.with_visual_stimuli:
                utils.push_sample_current_time(self.outlet, \
                                               ["stimulus_task_nBack_type_vis_location_" + self.position_text])

            # wait for subject's response. Wait only for "duration" time
            key, rt = game1.wait_press(self.possible_joystick_buttons,
                                       self.stimuli_duration,
                                       process_control_events=False)
            if key is None:
                # we have now waited stimuliDuration so we can remove stimulus
                canvas = stimuli.BlankScreen()
                time_delay_for_isi += grid.paint_grid(canvas)
                time_delay_for_isi += canvas.present()

                time_delay_after_stimuli = 0

                time_delay_after_stimuli += grid.paint_grid(canvas)

                time_delay_after_stimuli += canvas.present()
                if target != None:
                    time_delay_after_stimuli += target.unload()

                # we wait for subject to respond (but only for the ISI duration)
                key, rt = game1.wait_press(self.possible_joystick_buttons, self.ISI\
                                                 - time_delay_after_stimuli- time_delay_for_isi)
                if key != None:  # we get here if subject has responded - so we need to wait for the rest of
                    # the ISI
                    utils.push_sample_current_time(
                        self.outlet, ["keyPressed_task_nBack_key_" + str(key)])

                    # wait the rest of the ISI before going on
                    self.exp.clock.wait(
                        self.ISI - rt - time_delay_for_isi -
                        time_delay_after_stimuli
                    )  # wait the rest of the ISI before going on
                    rt = rt + self.stimuli_duration + time_delay_after_stimuli
            else:  # subject responded and stimulus duration hasn't ended
                utils.push_sample_current_time(
                    self.outlet, ["keyPressed_task_nBack_key_" + str(key)])

                # wait the rest of the stimulus duration before going on
                self.exp.clock.wait(
                    self.stimuli_duration -
                    rt)  # wait the rest of the stimuliDuration before removing
                # we have now waited stimuliDuration so we can remove stimulus
                canvas = stimuli.BlankScreen()
                time_delay_for_isi += grid.paint_grid(canvas)
                time_delay_for_isi += canvas.present()
                if target != None:
                    time_delay_for_isi += target.unload()

                # wait for the ISI before going on
                self.exp.clock.wait(self.ISI - time_delay_for_isi)

            self.save_trial_data(key, rt, trial)

        self.show_feedback_if_needed()