if VALIDATION[6:] == 'interleaved': y_pred, y_probs = decode(this_pop_vector, trial_ids, NUM_SPLITS, True) else: y_pred, y_probs = decode(this_pop_vector, trial_ids, NUM_SPLITS, False) # Calculate performance metrics and confusion matrix decode_result.loc[k, 'accuracy'] = accuracy_score( trial_ids, y_pred) decode_result.loc[k, 'f1'] = f1_score(trial_ids, y_pred) decode_result.loc[k, 'auroc'] = roc_auc_score( trial_ids[~np.isnan(y_probs)], y_probs[~np.isnan(y_probs)]) # Decode pseudo session pseudo_trials = generate_pseudo_session(trials) incl_pseudo_trials = balanced_trial_set(pseudo_trials) # Select activity matrix and trial ids for this iteration this_pseudo_pop_vector = pop_vector[incl_pseudo_trials] pseudo_trial_ids = (trials.probabilityLeft[incl_pseudo_trials] == 0.2).astype(int) if VALIDATION[6:] == 'interleaved': y_pred, y_probs = decode(this_pseudo_pop_vector, pseudo_trial_ids, NUM_SPLITS, True) else: y_pred, y_probs = decode(this_pseudo_pop_vector, pseudo_trial_ids, NUM_SPLITS, False)
these_priors = priors[j][:trials.shape[0]][incl_trials] pred_prior, pred_prior_train = regress( population_activity, these_priors, cross_validation=cv, return_training=True) r_prior[n] = pearsonr(these_priors, pred_prior)[0] r_prior_train[n] = pearsonr(these_priors, pred_prior_train)[0] # Estimate chance level r_pseudo, r_train_pseudo = np.empty(ITERATIONS), np.empty( ITERATIONS) for k in range(ITERATIONS): if 'prior-stimside' in TARGET: pseudo_trials = generate_pseudo_session( trials, generate_choices=False) pseudo_trials['choice'] = np.nan elif 'prior-prevaction' in TARGET: pseudo_trials = generate_pseudo_session( trials, generate_choices=True) stim_side, stimuli, actions, prob_left = utils.format_data( pseudo_trials) p_priors = model.compute_prior( np.array(actions), np.array(stimuli), np.array(stim_side), parameter_type='posterior_mean')[0] # Decode prior p_pred_prior, p_pred_prior_train = regress( population_activity,