def run_leave_one_out_cv(features, labels, classifier=LinearDiscriminantAnalysis()): """ Runs leave one out CV. :param features: Features shape(epoch, feature) :param labels: list of lables of length num epochs :param classifier: Sklearn classifier (Defaults to LDA) :return: A list of cross validation scores. Use np.average on the result to find the average score. """ loo = LeaveOneOut() scores = [] for train_indexes, test_indexes in loo.split(features, labels): # Assert our split maintains the same number of features CCDLAssert.assert_equal(len(train_indexes) + len(test_indexes), features.shape[0]) # Assert we have the same number of features X_train, X_test = features[train_indexes, :], features[test_indexes, :] Y_train, Y_test = np.asarray(labels)[train_indexes], np.asarray(labels)[test_indexes] # Assert our X_train and X_test have the same number of features CCDLAssert.assert_equal(X_train.shape[1], X_test.shape[1]) # Fit our classifier to our classifier.fit(X_train, Y_train) score = classifier.score(X_test, Y_test) scores.append(score) return scores
def run_leave_one_out_cv(features, labels, classifier=LinearDiscriminantAnalysis()): """ Runs leave one out CV. :param features: Features shape(epoch, feature) :param labels: list of lables of length num epochs :param classifier: Sklearn classifier (Defaults to LDA) :return: A list of cross validation scores. Use np.average on the result to find the average score. """ loo = LeaveOneOut() scores = [] for train_indexes, test_indexes in loo.split(features, labels): # Assert our split maintains the same number of features CCDLAssert.assert_equal( len(train_indexes) + len(test_indexes), features.shape[0]) # Assert we have the same number of features X_train, X_test = features[train_indexes, :], features[test_indexes, :] Y_train, Y_test = np.asarray(labels)[train_indexes], np.asarray( labels)[test_indexes] # Assert our X_train and X_test have the same number of features CCDLAssert.assert_equal(X_train.shape[1], X_test.shape[1]) # Fit our classifier to our classifier.fit(X_train, Y_train) score = classifier.score(X_test, Y_test) scores.append(score) return scores
def extract_min_difference_between_lists(lst_large, lst_small): """ Takes a list and extracts the minimum difference between two lists. For example [3, 5, 125, 23543] [1, 4, 120, 2354] Would return 1 """ AV.assert_equal(len(lst_large), len(lst_small)) return min([a_i - b_i for a_i, b_i in zip(lst_large, lst_small)])
def convert_ununiform_start_stop_lists_to_uniform_start_stop_lists(start_lst, stop_lst): """ Takes a list of start indexes and a list of end indexes a returns a new end index list such that trial_dur = stop_lst[i] - start_lst[i] and trial dur is the same for each list (set to the minimum trial dur contained in the list) :param start_lst: list of floats denoting starts of trials :param stop_lst: list of floats denoting end of trials :return: new stop list """ AV.assert_equal(len(start_lst), len(stop_lst)) dur = extract_min_difference_between_lists(stop_lst, start_lst) return [start_lst_val + dur for start_lst_val in start_lst]
def convert_start_end_index_lists_to_single_duration_trials(start_trial_index, end_trial_index): """ Takes two lists of start and end indexes and returns a new end trial index list that ensures that all epochs will be the same duration. The duration used is the minimum duration between corresponding entries in the lists Example: start_trial_index = [0, 50, 100] end_trial_index = [10, 60, 109] returns -> [9, 59, 109] :param start_trial_index: Indexes marking the start of trials :param end_trial_index: Indexes marking the end of trials. :return: List of new end trial indexes that is equal to the start index list + the minimum duration """ AV.assert_equal(len(start_trial_index), len(end_trial_index)) return CCDLArrayParser.convert_ununiform_start_stop_lists_to_uniform_start_stop_lists(start_lst=start_trial_index, stop_lst=end_trial_index)
def trim_freqs(freqs, density, high=None, low=None): """ Takes freqs and density and trims them according to the high and low values. if freqs = [ 10. 11. 12. 13. 14. 15. 16] and trim freqs is called on this list with high=15 and low=10, the result would be [ 10. 11. 12. 13. 14.] :param freqs: freqs (numpy array) :param density: density -- Shape: (epoch, sample, channel) OR (epoch, sample) :param high: removes all freqs above and equal to this val . Cast to int if passed a float. :param low: removes all freqs below this val. Cast to int if passed a float. :return: freqs, density Both elements are modified. Lenght of freqs is equal to the size of the first axis of density (samples). """ original_num_samples = density.shape[1] if high is None and low is None: raise ValueError('High or low must be an int') if high is not None: high = int(high) index_of_high = bisect.bisect_left(a=freqs, x=high) freqs = freqs[:index_of_high] try: density = density[:, :index_of_high, :] except IndexError: density = density[:, :index_of_high] if low is not None: low = int(low) index_of_low = bisect.bisect_left(a=freqs, x=low) freqs = freqs[index_of_low:] try: density = density[:, index_of_low:, :] except IndexError: density = density[:, index_of_low:] # Assure we trimmed something AV.assert_not_equal(original_num_samples, density.shape[1]) # Ensure each density pos has a corresponding freq. AV.assert_equal(len(freqs), density.shape[1]) # return the trimmed freqs and trimmed density. return freqs, density
def convert_start_end_index_lists_to_single_duration_trials( start_trial_index, end_trial_index): """ Takes two lists of start and end indexes and returns a new end trial index list that ensures that all epochs will be the same duration. The duration used is the minimum duration between corresponding entries in the lists Example: start_trial_index = [0, 50, 100] end_trial_index = [10, 60, 109] returns -> [9, 59, 109] :param start_trial_index: Indexes marking the start of trials :param end_trial_index: Indexes marking the end of trials. :return: List of new end trial indexes that is equal to the start index list + the minimum duration """ AV.assert_equal(len(start_trial_index), len(end_trial_index)) return CCDLArrayParser.convert_ununiform_start_stop_lists_to_uniform_start_stop_lists( start_lst=start_trial_index, stop_lst=end_trial_index)
def epoch_data(eeg_indexes, raw_data, trial_starts, trial_stops, trim=False): """ Takes raw data of shape [sample, channel] and returns epoched data of shape [epoch, sample channel]. The epoches are taken according to the indexing of the start and stop values in the eeg indexes :param eeg_indexes: epoch index for each sample in raw data (eeg index or time) :param raw_data: data shape [sample, channel] :param trial_starts: lst of trial start values (eeg index or time) :param trial_stops: lst of trial start values (eeg index or time) :param trim: if trim, we will cut the end of one axis to make the concat work. :return: """ AV.assert_equal(len(eeg_indexes), raw_data.shape[0]) AV.assert_equal(len(trial_starts), len(trial_stops)) epoched_data = None for start_stop_index in xrange(len(trial_starts)): start_packet_index = bisect.bisect_right(eeg_indexes, trial_starts[start_stop_index]) end_packet_index = bisect.bisect_left(eeg_indexes, trial_stops[start_stop_index]) AV.assert_less(start_packet_index, end_packet_index) trial_epoched_data = np.expand_dims(raw_data[start_packet_index:end_packet_index], axis=0) try: epoched_data = trial_epoched_data if epoched_data is None else np.concatenate((epoched_data, trial_epoched_data), axis=0) except ValueError: if trim: min_shape = min(epoched_data.shape[1], trial_epoched_data.shape[1]) epoched_data = epoched_data[:, :min_shape, :] trial_epoched_data = trial_epoched_data[:, :min_shape, :] epoched_data = np.concatenate((epoched_data, trial_epoched_data), axis=0) else: raise ValueError('Epoched data shape', epoched_data.shape, 'Trial Epoched data shape', trial_epoched_data.shape) AV.assert_equal(len(trial_stops), epoched_data.shape[0]) return epoched_data
def average_density_over_epochs(density): """ Takes our densioty of shape (epoch, sample, channel) and averages over all trials. :param density: Spectral density of the form (epoch, sample, channel) :return: New density of the form (1, sample, channel), where the sample dimension is averaged over epoch """ # Assert density is of the form (epoch, sample, channel) assert len(density.shape) == 3 num_samples = density.shape[1] num_channles = density.shape[2] averaged = np.average(density, axis=0) # restore our epoch dim. averaged = np.expand_dims(averaged, axis=0) # Ensure we didn't change any unwanted dims. Only num_epochs (dim 0) should change. AV.assert_equal(num_samples, averaged.shape[1]) AV.assert_equal(num_channles, averaged.shape[2]) return averaged
def epoch_data(eeg_indexes, raw_data, trial_starts, trial_stops, trim=False): """ Takes raw data of shape [sample, channel] and returns epoched data of shape [epoch, sample channel]. The epoches are taken according to the indexing of the start and stop values in the eeg indexes :param eeg_indexes: epoch index for each sample in raw data (eeg index or time) :param raw_data: data shape [sample, channel] :param trial_starts: lst of trial start values (eeg index or time) :param trial_stops: lst of trial start values (eeg index or time) :param trim: if trim, we will cut the end of one axis to make the concat work. :return: """ AV.assert_equal(len(eeg_indexes), raw_data.shape[0]) AV.assert_equal(len(trial_starts), len(trial_stops)) epoched_data = None for start_stop_index in xrange(len(trial_starts)): start_packet_index = bisect.bisect_right( eeg_indexes, trial_starts[start_stop_index]) end_packet_index = bisect.bisect_left(eeg_indexes, trial_stops[start_stop_index]) AV.assert_less(start_packet_index, end_packet_index) trial_epoched_data = np.expand_dims( raw_data[start_packet_index:end_packet_index], axis=0) try: epoched_data = trial_epoched_data if epoched_data is None else np.concatenate( (epoched_data, trial_epoched_data), axis=0) except ValueError: if trim: min_shape = min(epoched_data.shape[1], trial_epoched_data.shape[1]) epoched_data = epoched_data[:, :min_shape, :] trial_epoched_data = trial_epoched_data[:, :min_shape, :] epoched_data = np.concatenate( (epoched_data, trial_epoched_data), axis=0) else: raise ValueError('Epoched data shape', epoched_data.shape, 'Trial Epoched data shape', trial_epoched_data.shape) AV.assert_equal(len(trial_stops), epoched_data.shape[0]) return epoched_data
def start_buffer(self): """ Starts the buffer, reading from buffer_queue and writing to out_buffer_queue once the buffer reaches moving_window_size. Once the buffer reaches moving_window_size and is placed on the queue, the buffer is cleared. """ # This is our lead and trailing storage indexes. This means the data in our buffer (as seen by the client) # is self.buffer[trail_buffer_storage_index:lead_buffer_storage_index, :] lead_buffer_storage_index = -1 # Set to -1 so first update will make it 0. This avoids a fense-post problem. trail_buffer_storage_index = lead_buffer_storage_index - self.moving_window_size sample_index = 0 while True: # We follow the algorithm # 1. Get new sample and update indexes # 2. Check if we need to adjust the buffer bounds # 3. Add sample to the buffer at sample_index # 4. Check if we need to put our buffer on the sample queue # The buffer should be formatted so this can be done easily ############################################### # Step 1 - Get new sample and update indexes # ############################################### sample_arr = self.buffer_queue.get() # A blocking call if sample_arr == 'stop': # Reset our indexes. lead_buffer_storage_index = -1 # Set to -1 so first update will make it 0. This avoids a fense-post problem. trail_buffer_storage_index = lead_buffer_storage_index - self.moving_window_size sample_index = 0 self.handle_stop() # This one we just collected is our nth sample. # Samples are zero based indexed sample_index += 1 lead_buffer_storage_index += 1 trail_buffer_storage_index += 1 ############################################################ # Step 2 - Check if we need to adjust the buffer bounds # ############################################################ # Check if we're going to go over the buffer limit. if lead_buffer_storage_index == self.internal_buffer_size: # Fix buffer by coping over data. We use self.moving_window_size - 1 because we have not yet # inserted the new data. self.buffer[0:self.moving_window_size - 1, :] = self.buffer[trail_buffer_storage_index: lead_buffer_storage_index - 1] # Reset our indexes. lead_buffer_storage_index = self.moving_window_size trail_buffer_storage_index = 0 # Run some assertions AV.assert_less(lead_buffer_storage_index, self.internal_buffer_size) AV.assert_equal(lead_buffer_storage_index - trail_buffer_storage_index, self.moving_window_size) ############################################ # Step 3 - Add Add sample to the buffer # ############################################ self.buffer[lead_buffer_storage_index, :] = sample_arr ##################################################################### # Step 4 - Check if we need to put our buffer on the sample queue # ##################################################################### # Check if we need to put the buffer on the queue. if sample_index == self.update_interval: sub_buffer_to_send = self.buffer[trail_buffer_storage_index:lead_buffer_storage_index, :] AV.assert_equal(sub_buffer_to_send.shape[0], self.moving_window_size, message="Internal Error - buffer is incorrectly formatted") # Add our buffer to the queue. self.out_queue.put(sub_buffer_to_send) # Reset our sample index sample_index = 0
CURSOR_TASK_SIZE = (1920, 1080) # the distance of steps taken in the task STEP = 50 # results STOP_EARLY = 'stop_early' STOP_LATE = 'stop_late' YES = 'yes' NO = 'no' # some constants CELL_SIZE = 100 # Size of the cells NUM_COLS = 12 # Number of columns for Tetris pieces on the board (Affects Size) NUM_ROWS = 10 # Number of rows for Tetris pieces in the board BOARD_WIDTH = CELL_SIZE * NUM_COLS BOARD_HEIGHT = CELL_SIZE * NUM_ROWS # We want our number of columns to be divisible by 3 Assert.assert_equal(NUM_COLS % 3, 0) """ SSVEP Related """ EEG_COLLECT_TIME_SECONDS = 15 WINDOW_SIZE_SECONDS = 2 RECEIVER_Q = 'Did you see a phosphene?' SENDER_Q = 'Should the piece be turned?' """ Communication Related """ PORT = 9999 # Guthrie # 128.95.226.122 # 69.91.185.63 # 173.250.200.83
def start_buffer(self): """ Starts the buffer, reading from buffer_queue and writing to out_buffer_queue once the buffer reaches moving_window_size. Once the buffer reaches moving_window_size and is placed on the queue, the buffer is cleared. """ # This is our lead and trailing storage indexes. This means the data in our buffer (as seen by the client) # is self.buffer[trail_buffer_storage_index:lead_buffer_storage_index, :] lead_buffer_storage_index = -1 # Set to -1 so first update will make it 0. This avoids a fense-post problem. trail_buffer_storage_index = lead_buffer_storage_index - self.moving_window_size sample_index = 0 while True: # We follow the algorithm # 1. Get new sample and update indexes # 2. Check if we need to adjust the buffer bounds # 3. Add sample to the buffer at sample_index # 4. Check if we need to put our buffer on the sample queue # The buffer should be formatted so this can be done easily ############################################### # Step 1 - Get new sample and update indexes # ############################################### sample_arr = self.buffer_queue.get() # A blocking call if sample_arr == 'stop': # Reset our indexes. lead_buffer_storage_index = -1 # Set to -1 so first update will make it 0. This avoids a fense-post problem. trail_buffer_storage_index = lead_buffer_storage_index - self.moving_window_size sample_index = 0 self.handle_stop() # This one we just collected is our nth sample. # Samples are zero based indexed sample_index += 1 lead_buffer_storage_index += 1 trail_buffer_storage_index += 1 ############################################################ # Step 2 - Check if we need to adjust the buffer bounds # ############################################################ # Check if we're going to go over the buffer limit. if lead_buffer_storage_index == self.internal_buffer_size: # Fix buffer by coping over data. We use self.moving_window_size - 1 because we have not yet # inserted the new data. self.buffer[0:self.moving_window_size - 1, :] = self.buffer[ trail_buffer_storage_index:lead_buffer_storage_index - 1] # Reset our indexes. lead_buffer_storage_index = self.moving_window_size trail_buffer_storage_index = 0 # Run some assertions AV.assert_less(lead_buffer_storage_index, self.internal_buffer_size) AV.assert_equal( lead_buffer_storage_index - trail_buffer_storage_index, self.moving_window_size) ############################################ # Step 3 - Add Add sample to the buffer # ############################################ self.buffer[lead_buffer_storage_index, :] = sample_arr ##################################################################### # Step 4 - Check if we need to put our buffer on the sample queue # ##################################################################### # Check if we need to put the buffer on the queue. if sample_index == self.update_interval: sub_buffer_to_send = self.buffer[ trail_buffer_storage_index:lead_buffer_storage_index, :] AV.assert_equal( sub_buffer_to_send.shape[0], self.moving_window_size, message="Internal Error - buffer is incorrectly formatted") # Add our buffer to the queue. self.out_queue.put(sub_buffer_to_send) # Reset our sample index sample_index = 0