Exemple #1
0
def epoch_data(eeg_indexes, raw_data, trial_starts, trial_stops, trim=False):
    """
    Takes raw data of shape [sample, channel] and returns epoched data of shape [epoch, sample channel].
    The epoches are taken according to the indexing of the start and stop values in the eeg indexes

    :param eeg_indexes: epoch index for each sample in raw data (eeg index or time)
    :param raw_data: data shape [sample, channel]
    :param trial_starts: lst of trial start values (eeg index or time)
    :param trial_stops: lst of trial start values (eeg index or time)
    :param trim: if trim, we will cut the end of one axis to make the concat work.
    :return:
    """
    AV.assert_equal(len(eeg_indexes), raw_data.shape[0])
    AV.assert_equal(len(trial_starts), len(trial_stops))
    epoched_data = None
    for start_stop_index in xrange(len(trial_starts)):
        start_packet_index = bisect.bisect_right(eeg_indexes, trial_starts[start_stop_index])
        end_packet_index = bisect.bisect_left(eeg_indexes, trial_stops[start_stop_index])
        AV.assert_less(start_packet_index, end_packet_index)
        trial_epoched_data = np.expand_dims(raw_data[start_packet_index:end_packet_index], axis=0)
        try:
            epoched_data = trial_epoched_data if epoched_data is None else np.concatenate((epoched_data, trial_epoched_data), axis=0)
        except ValueError:
            if trim:
                min_shape = min(epoched_data.shape[1], trial_epoched_data.shape[1])
                epoched_data = epoched_data[:, :min_shape, :]
                trial_epoched_data = trial_epoched_data[:, :min_shape, :]
                epoched_data = np.concatenate((epoched_data, trial_epoched_data), axis=0)
            else:
                raise ValueError('Epoched data shape', epoched_data.shape, 'Trial Epoched data shape', trial_epoched_data.shape)
    AV.assert_equal(len(trial_stops), epoched_data.shape[0])
    return epoched_data
Exemple #2
0
def main(data_folder):
    # Set up the TMS Machine
    tms = None
    if FIRE_TMS:
        tms = TMS.TMS()
        tms.tms_arm()
    # sampling rate
    fs = SystemsInfo.get_eeg_sampling_rate(EEGConstants.NAME)
    # manage data storage / Start EEG threads
    subject_num, subject_data_folder_path = FileParser.manage_storage(data_storage_location=data_folder,
                                                                      take_init=TAKE_INIT)
    condition, experiment_num = 0, 0
    # for BrainAmp
    live_channels = ['Oz']

    if TAKE_INIT:
        experiment_num = int(raw_input('TMS Group Experiment Number Tracker:'))
        condition = int(raw_input('Enter condition (int):'))
        tms_low = int(raw_input('Enter TMS Low intensity (integer between 1 and 100):'))
        tms_high = int(raw_input('Enter TMS High intensity (integer between 1 and 100):'))
        # check intensity range
        Assert.assert_less(tms_low, tms_high)
        Assert.assert_less(tms_high, 100)
        Assert.assert_greater(tms_low, 0)
    else:
        tms_high, tms_low = 80, 50

    # Create Arduino
    if RUN_ARDUINO:
        ard = Arduino(com_port=ARDUINO_COMPORT)
    else:
        ard = None
    # Load our reference list
    ref_list = FileParser.load_yaml_file('ref/Condition%d.yaml' % condition)
    verbose_info(VERBOSE, ref_list)
    # Set up our EEG system
    if RUN_EEG:
        eeg = EEG.start_eeg(EEGConstants.NAME, live_channels, subject_data_folder_path, subject_num)
    else:
        eeg, data_save_queue = None, Queue.Queue()
    # out buffer queue
    out_buffer_queue = eeg.out_buffer_queue if eeg is not None else None
    # Start our Logging
    log_file_save_location = subject_data_folder_path + 'Subject%s_log.txt' % subject_num
    logger = Log.Log(subject_log_file_path=log_file_save_location)
    verbose_info(VERBOSE, "Saving Log File: " + log_file_save_location)

    # graphics
    bgm = start_graphics()
    # Meta contains all the meta info about the experiment, such as the condition number and the subject number.
    meta = {'condition': condition, 'subject_id': subject_num, 'high_tms': tms_high, 'low_tms': tms_low,
            'experiment_num': experiment_num, 'subject_num': subject_num, 'data_path': subject_data_folder_path}
    logger.info(str(meta))  # Save our meta info to our log file.

    # Start
    run_main_logic(ref_list=ref_list, bgm=bgm, ard=ard, logger=logger, eeg=eeg, fs=fs,
                   out_buffer_queue=out_buffer_queue, tms=tms, tms_low=tms_low, tms_high=tms_high)
Exemple #3
0
def fire(high_flag, bgm, tms, high_intensity, low_intensity):
    """
    Fire the TMS at the high intensity if high_flag, else we'll fire it at the low intensity.
    """
    Assert.assert_less(low_intensity, high_intensity)
    # flash red crosshair
    fire_time = time.time()
    bgm.flash_red()
    if high_flag:
        tms.tms_fire(i=high_intensity)
    else:
        tms.tms_fire(i=low_intensity)
    return fire_time
Exemple #4
0
def fire_twice(c1_turn, c2_turn, bgm, tms, high_intensity, low_intensity, attempt):
    """
    Fires the TMS twice if fire_tms_flag.  If not fire_tms_flag, we'll just flash the crosshairs red.
    
    Fires according to:
        TMS- High threshold if cX_turn is True (we need to rotate the piece)

    Fire High if Rotate.  Fire low if Not Rotate.

    :param c1_turn: True if we need to rotate the piece as according to c1, false otherwise
    :param c2_turn: True if we need to rotate the piece as according to c2, false otherwise
    :param bgm: Block game manager (so we can flash the cross hairs)
    :param tms: the TMS object
    :param high_intensity: the high intensity to fire TMS
    :param low_intensity: the low intensity to fire TMS
    :param attempt: show the round index (1 or 2)
    """
    # set up
    Assert.assert_less(low_intensity, high_intensity)
    verbose_info(VERBOSE, "Firing TMS: %s, %s" % ('High' if c1_turn else "Low", 'High' if c2_turn else "Low"))
    fire_times = []
    for i, cx_turn in enumerate([c1_turn, c2_turn]):
        # show prompt
        prompt_screen(bgm, attempt + 1, i + 1)
        # fire
        if FIRE_TMS:
            # Set our intensity now so we can fire sooner later.
            if cx_turn:
                tms.set_intensity(intensity=high_intensity)
            else:
                tms.set_intensity(intensity=low_intensity)

        if FIRE_TMS:
            fire_times.append(fire(cx_turn, bgm=bgm, tms=tms, high_intensity=high_intensity, low_intensity=low_intensity))
        # Only sleep after c1_turn
        if i == 0:  # We need to sleep between firings for safety reasons.
            time.sleep(Constants.SLEEP_BETWEEN_FIRINGS)

    if not FIRE_TMS:
        fire_times = [None, None]

    return fire_times
Exemple #5
0
def epoch_data(eeg_indexes, raw_data, trial_starts, trial_stops, trim=False):
    """
    Takes raw data of shape [sample, channel] and returns epoched data of shape [epoch, sample channel].
    The epoches are taken according to the indexing of the start and stop values in the eeg indexes

    :param eeg_indexes: epoch index for each sample in raw data (eeg index or time)
    :param raw_data: data shape [sample, channel]
    :param trial_starts: lst of trial start values (eeg index or time)
    :param trial_stops: lst of trial start values (eeg index or time)
    :param trim: if trim, we will cut the end of one axis to make the concat work.
    :return:
    """
    AV.assert_equal(len(eeg_indexes), raw_data.shape[0])
    AV.assert_equal(len(trial_starts), len(trial_stops))
    epoched_data = None
    for start_stop_index in xrange(len(trial_starts)):
        start_packet_index = bisect.bisect_right(
            eeg_indexes, trial_starts[start_stop_index])
        end_packet_index = bisect.bisect_left(eeg_indexes,
                                              trial_stops[start_stop_index])
        AV.assert_less(start_packet_index, end_packet_index)
        trial_epoched_data = np.expand_dims(
            raw_data[start_packet_index:end_packet_index], axis=0)
        try:
            epoched_data = trial_epoched_data if epoched_data is None else np.concatenate(
                (epoched_data, trial_epoched_data), axis=0)
        except ValueError:
            if trim:
                min_shape = min(epoched_data.shape[1],
                                trial_epoched_data.shape[1])
                epoched_data = epoched_data[:, :min_shape, :]
                trial_epoched_data = trial_epoched_data[:, :min_shape, :]
                epoched_data = np.concatenate(
                    (epoched_data, trial_epoched_data), axis=0)
            else:
                raise ValueError('Epoched data shape', epoched_data.shape,
                                 'Trial Epoched data shape',
                                 trial_epoched_data.shape)
    AV.assert_equal(len(trial_stops), epoched_data.shape[0])
    return epoched_data
Exemple #6
0
    def start_buffer(self):
        """
         Starts the buffer, reading from buffer_queue and writing to out_buffer_queue
         once the buffer reaches moving_window_size.  Once the buffer reaches moving_window_size and is placed on the
         queue, the buffer is cleared.
        """
        # This is our lead and trailing storage indexes.  This means the data in our buffer (as seen by the client)
            # is self.buffer[trail_buffer_storage_index:lead_buffer_storage_index, :]
        lead_buffer_storage_index = -1  # Set to -1 so first update will make it 0. This avoids a fense-post problem.
        trail_buffer_storage_index = lead_buffer_storage_index - self.moving_window_size
        sample_index = 0
        while True:
            # We follow the algorithm
                # 1. Get new sample and update indexes
                # 2. Check if we need to adjust the buffer bounds
                # 3. Add sample to the buffer at sample_index
                # 4. Check if we need to put our buffer on the sample queue
                    # The buffer should be formatted so this can be done easily

            ###############################################
            #  Step 1 - Get new sample and update indexes #
            ###############################################
            sample_arr = self.buffer_queue.get()  # A blocking call
            if sample_arr == 'stop':
                # Reset our indexes.
                lead_buffer_storage_index = -1  # Set to -1 so first update will make it 0. This avoids a fense-post problem.
                trail_buffer_storage_index = lead_buffer_storage_index - self.moving_window_size
                sample_index = 0
                self.handle_stop()


            # This one we just collected is our nth sample.
            # Samples are zero based indexed
            sample_index += 1
            lead_buffer_storage_index += 1
            trail_buffer_storage_index += 1


            ############################################################
            #  Step 2  - Check if we need to adjust the buffer bounds  #
            ############################################################
            # Check if we're going to go over the buffer limit.
            if lead_buffer_storage_index == self.internal_buffer_size:
                # Fix buffer by coping over data.  We use self.moving_window_size - 1 because we have not yet
                # inserted the new data.
                self.buffer[0:self.moving_window_size - 1, :] = self.buffer[trail_buffer_storage_index: lead_buffer_storage_index - 1]
                # Reset our indexes.
                lead_buffer_storage_index = self.moving_window_size
                trail_buffer_storage_index = 0

            # Run some assertions
            AV.assert_less(lead_buffer_storage_index, self.internal_buffer_size)
            AV.assert_equal(lead_buffer_storage_index - trail_buffer_storage_index, self.moving_window_size)

            ############################################
            #  Step 3  - Add Add sample to the buffer  #
            ############################################
            self.buffer[lead_buffer_storage_index, :] = sample_arr


            #####################################################################
            #  Step 4 - Check if we need to put our buffer on the sample queue  #
            #####################################################################
            # Check if we need to put the buffer on the queue.
            if sample_index == self.update_interval:
                sub_buffer_to_send = self.buffer[trail_buffer_storage_index:lead_buffer_storage_index, :]
                AV.assert_equal(sub_buffer_to_send.shape[0], self.moving_window_size, message="Internal Error - buffer is incorrectly formatted")
                # Add our buffer to the queue.
                self.out_queue.put(sub_buffer_to_send)
                # Reset our sample index
                sample_index = 0
Exemple #7
0
    def start_buffer(self):
        """
         Starts the buffer, reading from buffer_queue and writing to out_buffer_queue
         once the buffer reaches moving_window_size.  Once the buffer reaches moving_window_size and is placed on the
         queue, the buffer is cleared.
        """
        # This is our lead and trailing storage indexes.  This means the data in our buffer (as seen by the client)
        # is self.buffer[trail_buffer_storage_index:lead_buffer_storage_index, :]
        lead_buffer_storage_index = -1  # Set to -1 so first update will make it 0. This avoids a fense-post problem.
        trail_buffer_storage_index = lead_buffer_storage_index - self.moving_window_size
        sample_index = 0
        while True:
            # We follow the algorithm
            # 1. Get new sample and update indexes
            # 2. Check if we need to adjust the buffer bounds
            # 3. Add sample to the buffer at sample_index
            # 4. Check if we need to put our buffer on the sample queue
            # The buffer should be formatted so this can be done easily

            ###############################################
            #  Step 1 - Get new sample and update indexes #
            ###############################################
            sample_arr = self.buffer_queue.get()  # A blocking call
            if sample_arr == 'stop':
                # Reset our indexes.
                lead_buffer_storage_index = -1  # Set to -1 so first update will make it 0. This avoids a fense-post problem.
                trail_buffer_storage_index = lead_buffer_storage_index - self.moving_window_size
                sample_index = 0
                self.handle_stop()

            # This one we just collected is our nth sample.
            # Samples are zero based indexed
            sample_index += 1
            lead_buffer_storage_index += 1
            trail_buffer_storage_index += 1

            ############################################################
            #  Step 2  - Check if we need to adjust the buffer bounds  #
            ############################################################
            # Check if we're going to go over the buffer limit.
            if lead_buffer_storage_index == self.internal_buffer_size:
                # Fix buffer by coping over data.  We use self.moving_window_size - 1 because we have not yet
                # inserted the new data.
                self.buffer[0:self.moving_window_size - 1, :] = self.buffer[
                    trail_buffer_storage_index:lead_buffer_storage_index - 1]
                # Reset our indexes.
                lead_buffer_storage_index = self.moving_window_size
                trail_buffer_storage_index = 0

            # Run some assertions
            AV.assert_less(lead_buffer_storage_index,
                           self.internal_buffer_size)
            AV.assert_equal(
                lead_buffer_storage_index - trail_buffer_storage_index,
                self.moving_window_size)

            ############################################
            #  Step 3  - Add Add sample to the buffer  #
            ############################################
            self.buffer[lead_buffer_storage_index, :] = sample_arr

            #####################################################################
            #  Step 4 - Check if we need to put our buffer on the sample queue  #
            #####################################################################
            # Check if we need to put the buffer on the queue.
            if sample_index == self.update_interval:
                sub_buffer_to_send = self.buffer[
                    trail_buffer_storage_index:lead_buffer_storage_index, :]
                AV.assert_equal(
                    sub_buffer_to_send.shape[0],
                    self.moving_window_size,
                    message="Internal Error - buffer is incorrectly formatted")
                # Add our buffer to the queue.
                self.out_queue.put(sub_buffer_to_send)
                # Reset our sample index
                sample_index = 0