trial_times = trials.stimOn_times[incl_trials] probability_left = trials.probabilityLeft[incl_trials] trial_blocks = (trials.probabilityLeft[incl_trials] == 0.2).astype(int) # Get clusters in this brain region region_clusters = combine_layers_cortex(clusters[PROBE]['acronym']) clusters_in_region = clusters[PROBE].metrics.cluster_id[region_clusters == REGION] # Select spikes and clusters spks_region = spikes[PROBE].times[np.isin(spikes[PROBE].clusters, clusters_in_region)] clus_region = spikes[PROBE].clusters[np.isin(spikes[PROBE].clusters, clusters_in_region)] # Decode block identity decode_block = decode(spks_region, clus_region, trial_times, trial_blocks, pre_time=PRE_TIME, post_time=POST_TIME, classifier=DECODER, cross_validation='kfold-interleaved', num_splits=5) shuffle_block = decode(spks_region, clus_region, trial_times, trial_blocks, pre_time=PRE_TIME, post_time=POST_TIME, classifier=DECODER, cross_validation='kfold-interleaved', num_splits=5, shuffle=True, iterations=ITERATIONS) pseudo_block = decode(spks_region, clus_region, trial_times, trial_blocks, pre_time=PRE_TIME, post_time=POST_TIME, classifier=DECODER, cross_validation='kfold-interleaved', num_splits=5, pseudo_blocks=True, iterations=ITERATIONS) # %% """ figure_style()
print( '%d out of %d drift neurons detected' % (drift_neurons.shape[0], np.unique(clus_region).shape[0])) spks_region = spks_region[~np.isin(clus_region, drift_neurons)] clus_region = clus_region[~np.isin(clus_region, drift_neurons)] # Check if there are enough neurons in this brain region if np.unique(clus_region).shape[0] < MIN_NEURONS: continue # Decode decode_result = decode(spks_region, clus_region, trial_times, trial_ids, pre_time=PRE_TIME, post_time=POST_TIME, classifier=clf, cross_validation=VALIDATION, num_splits=NUM_SPLITS) # Estimate chance level if CHANCE_LEVEL == 'phase-rand': decode_chance = decode(spks_region, clus_region, trial_times, trial_ids, pre_time=PRE_TIME, post_time=POST_TIME, classifier=clf, cross_validation=VALIDATION,
# Select spikes and clusters spks_region = spikes[probe].times[np.isin(spikes[probe].clusters, clusters_in_region)] clus_region = spikes[probe].clusters[np.isin(spikes[probe].clusters, clusters_in_region)] if len(spks_region) == 0: continue # Decode block identity decode_block = decode(spks_region, clus_region, trial_times, trial_blocks, pre_time=PRE_TIME, post_time=POST_TIME, classifier=DECODER, cross_validation=VALIDATION, num_splits=5) pseudo_block = decode(spks_region, clus_region, trial_times, trial_blocks, pre_time=PRE_TIME, post_time=POST_TIME, classifier=DECODER, cross_validation=VALIDATION, num_splits=5, pseudo_blocks=True,
region_clusters)] clus_region = spikes[probe].clusters[np.isin(spikes[probe].clusters, region_clusters)] if len(region_clusters) <= N_NEURONS: continue # Decode over time decode_time = pd.DataFrame() shuffle_time = pd.DataFrame() for j, win_center in enumerate(WIN_CENTERS): print('Decoding window [%d of %d]' % (j + 1, WIN_CENTERS.shape[0])) decode_result = decode(spks_region, clus_region, stimon_times, stimon_blocks, pre_time=-win_center + (WIN_SIZE / 2), post_time=win_center + (WIN_SIZE / 2), classifier='bayes', cross_validation='kfold', n_neurons=N_NEURONS, iterations=ITERATIONS) decode_time = decode_time.append(pd.DataFrame({ 'f1': decode_result['f1'], 'accuracy': decode_result['accuracy'] * 100, 'auroc': decode_result['auroc'], 'win_center': win_center, 'session': '%s_%s' % (ses[i]['subject'], ses[i]['start_time'][:10])
spikes.clusters, clusters.metrics.cluster_id[clusters_to_use])] spikes.clusters = spikes.clusters[np.isin( spikes.clusters, clusters.metrics.cluster_id[clusters_to_use])] cluster_ids = clusters.metrics.cluster_id[clusters_to_use] # %% Do decoding print('Decoding whether the stimulus was on the left or the right..') stim_times = trials.goCue_times stim_sides = np.isnan(trials.contrastLeft).astype(int) # Decode left vs right stimulus from a 1 second window after stimulus onset using default settings: # Naive Bayes classifier with 5-fold cross-validation decode_result = decode(spikes.times, spikes.clusters, stim_times, stim_sides, pre_time=0, post_time=1) # Get the accuracy over chance print('\nNaive Bayes with 5-fold cross-validation') print('Performance: %.2f%% correct [chance level: %.2f%%]' % (decode_result['accuracy'] * 100, ((stim_sides.sum() / stim_sides.shape[0]) * 100))) # Decode stimulus side using a subset of 50 random neurons drawn 300 times decode_result = decode(spikes.times, spikes.clusters, stim_times, stim_sides, pre_time=0,
region_clusters = combine_layers_cortex(clusters[PROBE]['acronym']) clusters_in_region = clusters[PROBE].metrics.cluster_id[region_clusters == REGION] # Select spikes and clusters spks_region = spikes[PROBE].times[np.isin(spikes[PROBE].clusters, clusters_in_region)] clus_region = spikes[PROBE].clusters[np.isin(spikes[PROBE].clusters, clusters_in_region)] # Decode block identity decode_5fold = decode(spks_region, clus_region, trial_times, trial_blocks, pre_time=PRE_TIME, post_time=POST_TIME, classifier=DECODER, cross_validation='kfold', num_splits=5) shuffle_5fold = decode(spks_region, clus_region, trial_times, trial_blocks, pre_time=PRE_TIME, post_time=POST_TIME, classifier=DECODER, cross_validation='kfold', num_splits=5, shuffle=True,
spikes.clusters = spikes.clusters[np.isin( spikes.clusters, clusters.metrics.cluster_id[ clusters.metrics.ks2_label == 'good'])] clusters.channels = clusters.channels[clusters.metrics.ks2_label == 'good'] clusters.depths = clusters.depths[clusters.metrics.ks2_label == 'good'] cluster_ids = clusters.metrics.cluster_id[clusters.metrics.ks2_label == 'good'] # Get trial vectors incl_trials = (trials.probabilityLeft > 0.55) | (trials.probabilityLeft < 0.45) trial_times = trials.goCue_times[incl_trials] probability_left = trials.probabilityLeft[incl_trials] trial_blocks = (trials.probabilityLeft[incl_trials] > 0.55).astype(int) # Decode block identity bayes_kfold = decode(spikes.times, spikes.clusters, trial_times, trial_blocks, pre_time=PRE_TIME, post_time=POST_TIME, classifier='bayes', cross_validation='kfold') bayes_block = decode(spikes.times, spikes.clusters, trial_times, trial_blocks, pre_time=PRE_TIME, post_time=POST_TIME, classifier='bayes', cross_validation='block', prob_left=probability_left) bayes_loo = decode(spikes.times, spikes.clusters, trial_times, trial_blocks, pre_time=PRE_TIME, post_time=POST_TIME, classifier='bayes', cross_validation='leave-one-out') forest_kfold = decode(spikes.times, spikes.clusters, trial_times, trial_blocks, pre_time=PRE_TIME, post_time=POST_TIME, classifier='forest', cross_validation='kfold') forest_block = decode(spikes.times, spikes.clusters, trial_times, trial_blocks, pre_time=PRE_TIME, post_time=POST_TIME, classifier='forest', cross_validation='block', prob_left=probability_left) forest_loo = decode(spikes.times, spikes.clusters, trial_times, trial_blocks, pre_time=PRE_TIME, post_time=POST_TIME,
for i, n_neurons in enumerate(N_NEURONS): print('Decoding from groups of %d neurons [%d of %d]' % (n_neurons, i + 1, len(N_NEURONS))) for j in range(N_NEURON_PICK): # Subselect neurons use_neurons = np.random.choice(clusters_in_region, n_neurons, replace=False) # Decode decode_result = decode(spks_region[np.isin(clus_region, use_neurons)], clus_region[np.isin(clus_region, use_neurons)], trial_times, trial_ids, pre_time=PRE_TIME, post_time=POST_TIME, classifier=DECODER, cross_validation=VALIDATION, num_splits=NUM_SPLITS) decode_subselects = decode_subselects.append( pd.DataFrame(index=[decode_subselects.shape[0] + 1], data={ 'accuracy': decode_result['accuracy'], 'null': 'Original', 'n_neurons': n_neurons })) if CHANCE_LEVEL == 'shuffle': decode_chance = decode(spks_region[np.isin(clus_region, use_neurons)], clus_region[np.isin(clus_region,