Example #1
0
def sequence_rewards_errors_regression(data, perm=True):

    dm = data['DM'][0]
    firing = data['Data'][0]
    C = []
    cpd = []

    for s, sess in enumerate(dm):
        runs_list = []
        runs_list.append(0)
        DM = dm[s]
        firing_rates = firing[s]
        n_trials, n_neurons, n_timepoints = firing_rates.shape

        choices = DM[:, 1]
        reward = DM[:, 2]
        state = DM[:, 0]

        cum_error = []

        err = 0
        for r in reward:
            if r == 0:
                err += 1
            else:
                err = 0
            cum_error.append(err)

        cum_reward = []
        for r in reward:
            if r == 1:
                err += 1
            else:
                err = 0
            cum_reward.append(err)

        ones = np.ones(len(reward))

        predictors_all = OrderedDict([('Reward', reward), ('Choice', choices),
                                      ('State', state), ('Errors', cum_error),
                                      ('Rewards', cum_reward), ('ones', ones)])

        X = np.vstack(
            predictors_all.values()).T[:len(choices), :].astype(float)
        n_predictors = X.shape[1]
        y = firing_rates.reshape(
            [len(firing_rates),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)

        C.append(tstats.reshape(n_predictors, n_neurons,
                                n_timepoints))  # Predictor loadings
        cpd.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

    cpd = np.nanmean(np.concatenate(cpd, 0), axis=0)
    C = np.concatenate(C, 1)

    high_loadings_rewards = np.where(abs(np.mean(C[4, :, :20], 1)) > 2.5)[0]
    high_loadings_errors = np.where(abs(np.mean(C[3, :, :20], 1)) > 2.5)[0]

    return high_loadings_errors, high_loadings_rewards
def within_taks_codes(data, area='HP', perm=5):

    dm = data['DM'][0]
    firing = data['Data'][0]
    C = []
    cpd = []
    cpd_perm_p = []

    for s, sess in enumerate(dm):

        cpd_perm = [[] for i in range(perm)
                    ]  # To store permuted predictor loadings for each session.

        runs_list = []
        runs_list.append(0)
        DM = dm[s]
        firing_rates = firing[s]
        n_trials, n_neurons, n_timepoints = firing_rates.shape

        state = DM[:, 0]
        choices = DM[:, 1]

        reward = DM[:, 2]

        reward_2_ago = 0.5 - reward[1:-2]
        reward_3_ago = 0.5 - reward[:-3]
        reward_prev = 0.5 - reward[2:-1]
        reward_current = reward[3:]

        # reward_o_1_ago = np.asarray(reward_prev)
        # reward_o_2_ago = np.asarray(reward_2_ago)
        # reward_o_3_ago = np.asarray(reward_3_ago)

        firing_rates = firing_rates[3:]

        choices_2_ago = 0.5 - choices[1:-2]
        choices_3_ago = 0.5 - choices[:-3]
        choices_prev = 0.5 - choices[2:-1]
        choices_current = choices[3:]
        state = state[3:]

        cum_reward_orth = np.vstack(
            [reward_current, np.ones(len(reward_current))]).T
        xt = np.linalg.pinv(cum_reward_orth)
        identity = np.identity(len(reward_current))
        id_x = (identity - np.matmul(cum_reward_orth, xt))

        # choice_o_1_ago = np.matmul(id_x, np.asarray(choices_prev))
        # choice_o_2_ago = np.matmul(id_x, np.asarray(choices_2_ago))
        # choice_o_3_ago = np.matmul(id_x, np.asarray(choices_3_ago))

        ch_rew_int_1 = choices_prev * reward_prev
        ch_rew_int_2 = choices_2_ago * reward_2_ago
        ch_rew_int_3 = choices_3_ago * reward_3_ago

        ones = np.ones(len(choices_3_ago))

        predictors_all = OrderedDict([('Reward', reward_current),
                                      ('Choice', choices_current),
                                      ('1 ago Outcome', reward_prev),
                                      ('2 ago Outcome', reward_2_ago),
                                      ('3 ago Outcome', reward_3_ago),
                                      ('1 ago Choice', choices_prev),
                                      ('2 ago Choice', choices_2_ago),
                                      ('3 ago Choice', choices_3_ago),
                                      ('1 Rew x Choice', ch_rew_int_1),
                                      ('2 Rew x Choice', ch_rew_int_2),
                                      ('3 Rew x Choice', ch_rew_int_3),
                                      ('ones', ones)])

        X = np.vstack(
            predictors_all.values()).T[:len(choices_current), :].astype(float)
        rank = np.linalg.matrix_rank(X)
        print(rank)
        print(X.shape[1])
        n_predictors = X.shape[1]
        y = firing_rates.reshape(
            [len(firing_rates),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)
        C.append(tstats.reshape(n_predictors, n_neurons,
                                n_timepoints))  # Predictor loadings
        cpd.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))
        if perm:
            for i in range(perm):
                y_perm = np.roll(y, np.random.randint(len(y)), axis=0)
                cpd_temp = re._CPD(X, y_perm).reshape(n_neurons, n_timepoints,
                                                      n_predictors)
                cpd_perm[i].append(np.nanmean(cpd_temp, axis=0))

        cpd_perm_p.append(np.percentile(cpd_perm, 95, axis=0))
    if perm:  # Evaluate P values.
        cpd_perm_pval = np.mean(cpd_perm_p, 0)[0]
        #cpd_perm_p = np.percentile(cpd_perm,95, axis = 0)

    C = np.concatenate(C, 1)
    cpd = np.nanmean(np.concatenate(cpd, 0), axis=0)

    plt.figure()
    pred = list(predictors_all.keys())

    array_pvals = np.ones((cpd.shape[0], cpd.shape[1]))

    for i in range(cpd.shape[1]):
        array_pvals[(np.where(cpd[:, i] > cpd_perm_pval[:, i])[0]), i] = 0.05

    ymax = np.max(cpd[:, 2:-1].T)
    t = np.arange(0, 121)
    c = wes.Darjeeling2_5.mpl_colors + wes.Mendl_4.mpl_colors + wes.GrandBudapest1_4.mpl_colors + wes.Moonrise1_5.mpl_colors

    for i in np.arange(cpd.shape[1]):
        if i > 1 and i < cpd.shape[1] - 1:
            plt.plot(cpd[:, i], label=pred[i], color=c[i])
            y = ymax * (1 + 0.04 * i)
            p_vals = array_pvals[:, i]
            t05 = t[p_vals == 0.05]
            plt.plot(t05,
                     np.ones(t05.shape) * y,
                     '.',
                     markersize=5,
                     color=c[i])

    plt.legend()
    plt.ylabel('CPD')
    plt.xlabel('Time in Trial')
    plt.xticks([24, 35, 42], ['I', 'C', 'R'])
    plt.legend()
    sns.despine()
    plt.title(area)
Example #3
0
def chosen_value_reg(data, area='PFC', n=10, perm=True):

    if perm:
        dm = data[0]
        firing = data[1]

    else:
        dm = data['DM'][0]
        firing = data['Data'][0]

    C_1 = []
    C_2 = []
    C_3 = []
    cpd_1 = []
    cpd_2 = []
    cpd_3 = []
    average = vg.rew_prev_behaviour(data, n=n, perm=perm)

    for s, sess in enumerate(dm):

        DM = dm[s]
        firing_rates = firing[s]
        # firing_rates = scipy.stats.zscore(firing_rates,0)
        #firing_rates = firing_rates - np.mean(firing_rates,0)

        n_trials, n_neurons, n_timepoints = firing_rates.shape

        choices = DM[:, 1]
        reward = DM[:, 2]

        task = DM[:, 5]
        a_pokes = DM[:, 6]
        b_pokes = DM[:, 7]

        taskid = vg.task_ind(task, a_pokes, b_pokes)

        task_1 = np.where(taskid == 1)[0]
        task_2 = np.where(taskid == 2)[0]
        task_3 = np.where(taskid == 3)[0]

        reward_current = reward
        choices_current = choices - 0.5

        rewards_1 = reward_current[task_1]
        choices_1 = choices_current[task_1]

        previous_rewards_1 = scipy.linalg.toeplitz(rewards_1, np.zeros(
            (1, n)))[n - 1:-1]

        previous_choices_1 = scipy.linalg.toeplitz(0.5 - choices_1,
                                                   np.zeros((1, n)))[n - 1:-1]

        interactions_1 = scipy.linalg.toeplitz(
            (((0.5 - choices_1) * (rewards_1 - 0.5)) * 2), np.zeros(
                (1, n)))[n - 1:-1]

        ones = np.ones(len(interactions_1)).reshape(len(interactions_1), 1)

        X_1 = np.hstack(
            [previous_rewards_1, previous_choices_1, interactions_1, ones])
        #average_val_ex_ch = np.concatenate([average[n].reshape(1),average[n*2:]])
        #X_exl_1 = np.concatenate([X_1[:,n].reshape(len(X_1),1),X_1[:,n*2:]],1)
        #value = np.matmul(X[:,n*2:], average[n*2:])
        value_1 = np.matmul(X_1, average)
        #value_1 =np.matmul(X_exl_1, average_val_ex_ch)

        rewards_1 = rewards_1[n:]
        choices_1 = choices_1[n:]

        value_1_choice_1 = value_1 * choices_1
        ones_1 = np.ones(len(choices_1))
        trials_1 = len(choices_1)
        prev_ch_reward_1 = choices_1 * rewards_1

        firing_rates_1 = firing_rates[task_1][n:]

        predictors_all = OrderedDict([
            ('Choice', choices_1),
            ('Reward', rewards_1),
            ('Value', value_1),
            ('Value Сhoice', value_1_choice_1),
            ('Prev Rew Ch', prev_ch_reward_1),

            #      ('Prev Rew', prev_reward_1),
            #  ('Prev Ch', prev_choice_1),
            ('ones', ones_1)
        ])

        X_1 = np.vstack(predictors_all.values()).T[:trials_1, :].astype(float)

        n_predictors = X_1.shape[1]
        y_1 = firing_rates_1.reshape(
            [len(firing_rates_1),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        # tstats,x = regression_code_session(y_1, X_1)
        #tstats =  reg_f.regression_code(y_1, X_1)
        ols = LinearRegression()
        ols.fit(X_1, y_1)
        C_1.append(ols.coef_.reshape(n_neurons, n_timepoints,
                                     n_predictors))  # Predictor loadings
        #C_1.append(tstats.reshape(n_predictors,n_neurons,n_timepoints)) # Predictor loadings
        cpd_1.append(
            re._CPD(X_1, y_1).reshape(n_neurons, n_timepoints, n_predictors))

        rewards_2 = reward_current[task_2]
        choices_2 = choices_current[task_2]

        previous_rewards_2 = scipy.linalg.toeplitz(rewards_2, np.zeros(
            (1, n)))[n - 1:-1]

        previous_choices_2 = scipy.linalg.toeplitz(0.5 - choices_2,
                                                   np.zeros((1, n)))[n - 1:-1]

        interactions_2 = scipy.linalg.toeplitz(
            (((0.5 - choices_2) * (rewards_2 - 0.5)) * 2), np.zeros(
                (1, n)))[n - 1:-1]

        ones = np.ones(len(interactions_2)).reshape(len(interactions_2), 1)

        X_2 = np.hstack(
            [previous_rewards_2, previous_choices_2, interactions_2, ones])
        # average_val_ex_ch = np.concatenate([average[n].reshape(1),average[n*2:]])
        # X_exl_2 = np.concatenate([X_2[:,n].reshape(len(X_2),1),X_2[:,n*2:]],1)
        # value = np.matmul(X[:,n*2:], average[n*2:])
        value_2 = np.matmul(X_2, average)
        #value_2 =np.matmul(X_exl_2, average_val_ex_ch)

        rewards_2 = rewards_2[n:]
        choices_2 = choices_2[n:]

        value_2_choice_2 = choices_2 * value_2

        ones_2 = np.ones(len(choices_2))
        trials_2 = len(choices_2)

        firing_rates_2 = firing_rates[task_2][n:]

        prev_ch_reward_2 = choices_2 * rewards_2

        predictors_all = OrderedDict([
            ('Choice', choices_2),
            ('Reward', rewards_2),
            ('Value', value_2),
            ('Value Сhoice', value_2_choice_2),
            ('Prev Rew Ch', prev_ch_reward_2),
            #
            #    ('Prev Rew', prev_reward_2),
            # ('Prev Ch', prev_choice_2),
            ('ones', ones_2)
        ])

        X_2 = np.vstack(predictors_all.values()).T[:trials_2, :].astype(float)

        n_predictors = X_2.shape[1]
        y_2 = firing_rates_2.reshape(
            [len(firing_rates_2),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        # tstats,x = regression_code_session(y_2, X_2)
        ols = LinearRegression()
        ols.fit(X_2, y_2)
        C_2.append(ols.coef_.reshape(n_neurons, n_timepoints,
                                     n_predictors))  # Predictor loadings
        cpd_2.append(
            re._CPD(X_2, y_2).reshape(n_neurons, n_timepoints, n_predictors))

        rewards_3 = reward_current[task_3]
        choices_3 = choices_current[task_3]

        previous_rewards_3 = scipy.linalg.toeplitz(rewards_3, np.zeros(
            (1, n)))[n - 1:-1]

        previous_choices_3 = scipy.linalg.toeplitz(0.5 - choices_3,
                                                   np.zeros((1, n)))[n - 1:-1]

        interactions_3 = scipy.linalg.toeplitz(
            (((0.5 - choices_3) * (rewards_3 - 0.5)) * 2), np.zeros(
                (1, n)))[n - 1:-1]

        ones = np.ones(len(interactions_3)).reshape(len(interactions_3), 1)

        X_3 = np.hstack(
            [previous_rewards_3, previous_choices_3, interactions_3, ones])
        #  average_val_ex_ch = np.concatenate([average[n].reshape(1),average[n*2:]])
        #  X_exl_3 = np.concatenate([X_3[:,n].reshape(len(X_3),1),X_3[:,n*2:]],1)
        # # value = np.matmul(X[:,n*2:], average[n*2:])
        value_3 = np.matmul(X_3, average)
        # value_3 =np.matmul(X_exl_3, average_val_ex_ch)

        rewards_3 = rewards_3[n:]
        choices_3 = choices_3[n:]
        value_3_choice_3 = choices_3 * value_3

        prev_ch_reward_3 = choices_3 * rewards_3
        ones_3 = np.ones(len(choices_3))
        trials_3 = len(choices_3)

        firing_rates_3 = firing_rates[task_3][n:]

        predictors_all = OrderedDict([
            ('Ch', choices_3),
            ('Rew', rewards_3),
            ('Value', value_3),
            ('Value Сhoice', value_3_choice_3),
            #
            ('Prev Rew Ch', prev_ch_reward_3),
            #    ('Prev Rew', prev_reward_3),
            # ('Prev Ch', prev_choice_3),
            ('ones', ones_3)
        ])

        X_3 = np.vstack(predictors_all.values()).T[:trials_3, :].astype(float)
        n_predictors = X_3.shape[1]
        y_3 = firing_rates_3.reshape(
            [len(firing_rates_3),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        #tstats,x = regression_code_session(y_3, X_3)
        ols = LinearRegression()
        ols.fit(X_3, y_3)
        C_3.append(ols.coef_.reshape(n_neurons, n_timepoints,
                                     n_predictors))  # Predictor loadings

        # C_3.append(tstats.reshape(n_predictors,n_neurons,n_timepoints)) # Predictor loadings
        cpd_3.append(
            re._CPD(X_3, y_3).reshape(n_neurons, n_timepoints, n_predictors))

    C_1 = np.concatenate(C_1, 0)

    C_2 = np.concatenate(C_2, 0)

    C_3 = np.concatenate(C_3, 0)

    C_2_inf = [~np.isinf(C_2[0]).any(axis=1)]
    C_2_nan = [~np.isnan(C_2[0]).any(axis=1)]
    C_3_inf = [~np.isinf(C_3[0]).any(axis=1)]
    C_3_nan = [~np.isnan(C_3[0]).any(axis=1)]
    C_1_inf = [~np.isinf(C_1[0]).any(axis=1)]
    C_1_nan = [~np.isnan(C_1[0]).any(axis=1)]
    nans = np.asarray(C_1_inf) & np.asarray(C_1_nan) & np.asarray(
        C_3_inf) & np.asarray(C_3_nan) & np.asarray(C_2_inf) & np.asarray(
            C_2_nan)
    C_1 = np.transpose(C_1[:, nans[0], :], [2, 0, 1])
    C_2 = np.transpose(C_2[:, nans[0], :], [2, 0, 1])
    C_3 = np.transpose(C_3[:, nans[0], :], [2, 0, 1])

    return C_1, C_2, C_3
Example #4
0
def regression_on_Q_values(experiment,experiment_sim_Q1_value_a,experiment_sim_Q1_value_b, experiment_sim_Q4_values):
    
    C_1 = []
    cpd = []
    C_sq = []

    # Finding correlation coefficients for task 1 
    for s,session in enumerate(experiment):
        aligned_spikes= session.aligned_rates[:]
        if aligned_spikes.shape[1] > 0: # sessions with neurons? 
            n_trials, n_neurons, n_timepoints = aligned_spikes.shape 
            
            Q_1_a = np.asarray(experiment_sim_Q1_value_a[s])
            Q_1_b = np.asarray(experiment_sim_Q1_value_b[s])

            Q_4 = np.asarray(experiment_sim_Q4_values[s])
            Q_1_a = Q_1_a[:-1]
            Q_1_b = Q_1_b[:-1]
            Q_4 = Q_4[:-1]

            # Getting out task indicies
            forced_trials = session.trial_data['forced_trial']
            outcomes = session.trial_data['outcomes']

            choices = session.trial_data['choices']
            non_forced_array = np.where(forced_trials == 0)[0]
            
            Q_1_a = Q_1_a[non_forced_array]
            Q_1_b = Q_1_b[non_forced_array]

            Q_4 = Q_4[non_forced_array]
            choices = choices[non_forced_array]
            aligned_spikes = aligned_spikes[:len(choices),:,:]
            outcomes = outcomes[non_forced_array]
            # Getting out task indicies

            ones = np.ones(len(choices))
            
            predictors = OrderedDict([('Q1_a', Q_1_a),
                                      ('Q1_b', Q_1_b),
                                      ('Q_4', Q_4), 
                                      ('choice', choices),
                                      ('reward', outcomes),
                                      ('ones', ones)])
        
           
            X = np.vstack(predictors.values()).T[:len(choices),:].astype(float)
            n_predictors = X.shape[1]
            y = aligned_spikes.reshape([len(aligned_spikes),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
            ols = LinearRegression(copy_X = True,fit_intercept= True)
            ols.fit(X,y)
            C_1.append(ols.coef_.reshape(n_neurons,n_timepoints, n_predictors)) # Predictor loadings
            C_sq.append((ols.coef_.reshape(n_neurons,n_timepoints, n_predictors)**2))

            cpd.append(re._CPD(X,y).reshape(n_neurons,n_timepoints, n_predictors))


    cpd = np.nanmean(np.concatenate(cpd,0), axis = 0) # Population CPD is mean over neurons.
    C_sq = np.nanmean(np.concatenate(C_sq,0), axis = 0) # Population CPD is mean over neurons.

    return cpd, predictors,C_sq, C_1
def regression_latent_state(experiment, experiment_sim_Q4_values):  
    
    C_1 = []
    C_coef = []
    cpd_1 = []
    
    # Finding correlation coefficients for task 1 
    for s,session in enumerate(experiment):
        aligned_spikes= session.aligned_rates[:]
        if aligned_spikes.shape[1] > 0: # sessions with neurons? 
            n_trials, n_neurons, n_timepoints = aligned_spikes.shape 
            
            #aligned_spikes = np.mean(aligned_spikes, axis =  2) 
            
            # Getting out task indicies   
            task = session.trial_data['task']
            forced_trials = session.trial_data['forced_trial']
            non_forced_array = np.where(forced_trials == 0)[0]
            task_non_forced = task[non_forced_array]
            task_1 = np.where(task_non_forced == 1)[0]
            task_2 = np.where(task_non_forced == 2)[0]    
            predictor_A_Task_1, predictor_A_Task_2, predictor_A_Task_3,\
            predictor_B_Task_1, predictor_B_Task_2, predictor_B_Task_3, reward,\
            predictor_a_good_task_1,predictor_a_good_task_2, predictor_a_good_task_3 = re.predictors_pokes(session)    
            # Getting out task indicies
            Q4 = experiment_sim_Q4_values[s]
            forced_trials = session.trial_data['forced_trial']
            outcomes = session.trial_data['outcomes']

            choices = session.trial_data['choices']
            non_forced_array = np.where(forced_trials == 0)[0]
                       
            choices = choices[non_forced_array]
            Q4 = Q4[non_forced_array]
            aligned_spikes = aligned_spikes[:len(choices),:]
            outcomes = outcomes[non_forced_array]
            # Getting out task indicies
            
            ones = np.ones(len(choices))
            choices = choices[:len(task_1)]
            outcomes = outcomes[:len(task_1)]
            latent_state = np.ones(len(task_1))
            latent_state[predictor_a_good_task_1] = -1
            ones = ones[:len(task_1)]
            aligned_spikes = aligned_spikes[:len(task_1)]
            Q4 = Q4[:len(task_1)]
            choice_Q4 = choices*Q4


            predictors = OrderedDict([#('latent_state',latent_state), 
                                      ('choice', choices),
                                      ('reward', outcomes),
                                      ('Q4', Q4),
                                      ('choice_Q4',choice_Q4),
                                      ('ones', ones)])
        
           
            X = np.vstack(predictors.values()).T[:len(choices),:].astype(float)
            n_predictors = X.shape[1]
            y = aligned_spikes.reshape([len(aligned_spikes),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
            tstats = reg_f.regression_code(y, X)
            ols = LinearRegression(copy_X = True,fit_intercept= True)
            ols.fit(X,y)
            C_coef.append(ols.coef_.reshape(n_neurons, n_predictors,n_timepoints)) # Predictor loadings     
            C_1.append(tstats.reshape(n_predictors,n_neurons,n_timepoints)) # Predictor loadings
            cpd_1.append(re._CPD(X,y).reshape(n_neurons,n_timepoints, n_predictors))

    C_1 = np.concatenate(C_1, axis = 1) # 
    C_coef = np.concatenate(C_coef, axis = 0) #
    cpd_1 = np.nanmean(np.concatenate(cpd_1,0), axis = 0)

    C_2 = []
    C_coef_2 = []
    cpd_2 = []

    # Finding correlation coefficients for task 1 
    for s,session in enumerate(experiment):
        aligned_spikes= session.aligned_rates[:]
        if aligned_spikes.shape[1] > 0: # sessions with neurons? 
            n_trials, n_neurons, n_timepoints = aligned_spikes.shape 
            #aligned_spikes = np.mean(aligned_spikes, axis =  2) 
            Q4 = experiment_sim_Q4_values[s]

            # Getting out task indicies   
            task = session.trial_data['task']
            forced_trials = session.trial_data['forced_trial']
            non_forced_array = np.where(forced_trials == 0)[0]
            task_non_forced = task[non_forced_array]
            task_1 = np.where(task_non_forced == 1)[0]
            task_2 = np.where(task_non_forced == 2)[0]    
            
            predictor_A_Task_1, predictor_A_Task_2, predictor_A_Task_3,\
            predictor_B_Task_1, predictor_B_Task_2, predictor_B_Task_3, reward,\
            predictor_a_good_task_1,predictor_a_good_task_2, predictor_a_good_task_3 = re.predictors_pokes(session)    

            # Getting out task indicies
            forced_trials = session.trial_data['forced_trial']
            outcomes = session.trial_data['outcomes']

            choices = session.trial_data['choices']
            non_forced_array = np.where(forced_trials == 0)[0]
            Q4 = Q4[non_forced_array]

            
            choices = choices[non_forced_array]
            aligned_spikes = aligned_spikes[:len(choices),:]
            outcomes = outcomes[non_forced_array]
            # Getting out task indicies

            ones = np.ones(len(choices))
            
            choices = choices[len(task_1):len(task_1)+len(task_2)]
            latent_state = np.ones(len(choices))
            latent_state[predictor_a_good_task_2] = -1
            
            outcomes = outcomes[len(task_1):len(task_1)+len(task_2)]
            ones = ones[len(task_1):len(task_1)+len(task_2)]
            aligned_spikes = aligned_spikes[len(task_1):len(task_1)+len(task_2)]
            Q4 = Q4[len(task_1):len(task_1)+len(task_2)]
            choice_Q4 = choices*Q4

            predictors = OrderedDict([#('latent_state',latent_state),
                                      ('choice', choices),
                                      ('reward', outcomes),
                                      ('Q4',Q4),
                                      ('choice_Q4',choice_Q4),
                                      ('ones', ones)])
        
           
            X = np.vstack(predictors.values()).T[:len(choices),:].astype(float)
            n_predictors = X.shape[1]
            y = aligned_spikes.reshape([len(aligned_spikes),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
            tstats = reg_f.regression_code(y, X)
            C_2.append(tstats.reshape(n_predictors,n_neurons,n_timepoints)) # Predictor loadings
            
            ols = LinearRegression(copy_X = True,fit_intercept= True)
            ols.fit(X,y)
            C_coef_2.append(ols.coef_.reshape(n_neurons, n_predictors,n_timepoints)) # Predictor loadings
            cpd_2.append(re._CPD(X,y).reshape(n_neurons,n_timepoints, n_predictors))


    C_2 = np.concatenate(C_2, axis = 1) # Population CPD is mean over neurons.
    C_coef_2 = np.concatenate(C_coef_2, axis = 0) # Population CPD is mean over neurons.
    cpd_2 = np.nanmean(np.concatenate(cpd_2,0), axis = 0)

    C_3 = []
    C_coef_3 = []
    cpd_3 = []
    # Finding correlation coefficients for task 1 
    for s,session in enumerate(experiment):
        aligned_spikes= session.aligned_rates[:]
        if aligned_spikes.shape[1] > 0: # sessions with neurons? 
            n_trials, n_neurons, n_timepoints = aligned_spikes.shape 
            #aligned_spikes = np.mean(aligned_spikes, axis =  2) 

            
            # Getting out task indicies   
            task = session.trial_data['task']
            forced_trials = session.trial_data['forced_trial']
            non_forced_array = np.where(forced_trials == 0)[0]
            task_non_forced = task[non_forced_array]
            task_1 = np.where(task_non_forced == 1)[0]
            task_2 = np.where(task_non_forced == 2)[0]    
            Q4 = experiment_sim_Q4_values[s]

            predictor_A_Task_1, predictor_A_Task_2, predictor_A_Task_3,\
            predictor_B_Task_1, predictor_B_Task_2, predictor_B_Task_3, reward,\
            predictor_a_good_task_1,predictor_a_good_task_2, predictor_a_good_task_3 = re.predictors_pokes(session)    


            # Getting out task indicies
            forced_trials = session.trial_data['forced_trial']
            outcomes = session.trial_data['outcomes']

            choices = session.trial_data['choices']
            non_forced_array = np.where(forced_trials == 0)[0]
            
            Q4 = Q4[non_forced_array]
            choices = choices[non_forced_array]
            aligned_spikes = aligned_spikes[:len(choices),:]
            outcomes = outcomes[non_forced_array]
            # Getting out task indicies

            ones = np.ones(len(choices))
  
            choices = choices[len(task_1)+len(task_2):]
            latent_state = np.ones(len(choices))
            latent_state[predictor_a_good_task_3] = -1
            
            outcomes = outcomes[len(task_1)+len(task_2):]
            ones = ones[len(task_1)+len(task_2):]
            Q4 = Q4[len(task_1)+len(task_2):]
            choice_Q4 = choices*Q4
            aligned_spikes = aligned_spikes[len(task_1)+len(task_2):]
            
            predictors = OrderedDict([#('latent_state', latent_state),
                                      ('choice', choices),
                                      ('reward', outcomes),
                                      ('Q4', Q4),
                                      ('choice_Q4',choice_Q4),
                                      ('ones', ones)])
        
           
            X = np.vstack(predictors.values()).T[:len(choices),:].astype(float)
            n_predictors = X.shape[1]
            y = aligned_spikes.reshape([len(aligned_spikes),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
            tstats = reg_f.regression_code(y, X)

            C_3.append(tstats.reshape(n_predictors,n_neurons,n_timepoints)) # Predictor loadings
            
            ols = LinearRegression(copy_X = True,fit_intercept= True)
            ols.fit(X,y)
            C_coef_3.append(ols.coef_.reshape(n_neurons,n_timepoints, n_predictors)) # Predictor loadings
            cpd_3.append(re._CPD(X,y).reshape(n_neurons,n_timepoints, n_predictors))


    C_3 = np.concatenate(C_3, axis = 1) # Population CPD is mean over neurons.
    C_coef_3 = np.concatenate(C_coef_3, axis = 0) # Population CPD is mean over neurons.
    cpd_3 = np.nanmean(np.concatenate(cpd_3,0), axis = 0)
    
    return C_1, C_2, C_3, C_coef,C_coef_2,C_coef_3,cpd_1,cpd_2,cpd_3,predictors
def time_in_block_corr(PFC, area = 'PFC', n = 11, plot_a = False, plot_b = False):
   
   
    all_subjects = [PFC['DM'][0][:9], PFC['DM'][0][9:25],PFC['DM'][0][25:39],PFC['DM'][0][39:]]
    all_firing = [PFC['Data'][0][:9], PFC['Data'][0][9:25],PFC['Data'][0][25:39],PFC['Data'][0][39:]]
   #  all_subjects = [HP['DM'][0][:16], HP['DM'][0][16:24],HP['DM'][0][24:],PFC['DM'][0][:9], PFC['DM'][0][9:25],PFC['DM'][0][25:39],PFC['DM'][0][39:]]
    # all_firing = [HP['Data'][0][:16], HP['Data'][0][16:24],HP['Data'][0][24:],PFC['Data'][0][:9], PFC['Data'][0][9:25],PFC['Data'][0][25:39],PFC['Data'][0][39:]]
    # all_subjects = [HP['DM'][0][:16], HP['DM'][0][16:24],HP['DM'][0][24:]]
    # all_firing = [HP['Data'][0][:16], HP['Data'][0][16:24],HP['Data'][0][24:]]
    
    C_1_all = []; C_2_all = []; C_3_all = []
    
    cpd_1 = []; cpd_2 = []; cpd_3 = []
    
    average = vg.rew_prev_behaviour(PFC, n = n, perm = False)
    # average = vg.rew_prev_behaviour(HP, n = n, perm = False)

    for d,dd in enumerate(all_subjects):
        C_1 = []; C_2 = []; C_3 = []

        dm = all_subjects[d]
        firing = all_firing[d]

        for  s, sess in enumerate(dm):
            # if d <3:
            #     average=average_2
            # else:
            #     average=average_1
 
            
           
            DM = dm[s]
            firing_rates = firing[s]
           
            n_trials, n_neurons, n_timepoints = firing_rates.shape
            
            choices = DM[:,1]
            reward = DM[:,2]  
            task =  DM[:,5]
            a_pokes = DM[:,6]
            b_pokes = DM[:,7]
            
            taskid = vg.task_ind(task, a_pokes, b_pokes)
          
            
            task_1 = np.where(taskid == 1)[0]
            task_2 = np.where(taskid == 2)[0]
            task_3 = np.where(taskid == 3)[0]
    
            reward_current = reward
            choices_current = choices-0.5
    
           
            rewards_1 = reward_current[task_1]
            choices_1 = choices_current[task_1]
            
            previous_rewards_1 = scipy.linalg.toeplitz(rewards_1, np.zeros((1,n)))[n-1:-1]
             
            previous_choices_1 = scipy.linalg.toeplitz(0.5-choices_1, np.zeros((1,n)))[n-1:-1]
             
            interactions_1 = scipy.linalg.toeplitz((((0.5-choices_1)*(rewards_1-0.5))*2),np.zeros((1,n)))[n-1:-1]
             
    
            ones = np.ones(len(interactions_1)).reshape(len(interactions_1),1)
             
            X_1 = np.hstack([previous_rewards_1,previous_choices_1,interactions_1,ones])
            #average_val_ex_ch = np.concatenate([average[n].reshape(1),average[n*2:]])
            #X_exl_1 = np.concatenate([X_1[:,n].reshape(len(X_1),1),X_1[:,n*2:]],1)
            #value = np.matmul(X[:,n*2:], average[n*2:])
            value_1 =np.matmul(X_1, average)
            #value_1 =np.matmul(X_exl_1, average_val_ex_ch)
    
            rewards_1 = rewards_1[n:]
            choices_1 = choices_1[n:]
              
            
            ones_1 = np.ones(len(choices_1))
            trials_1 = len(choices_1)
            value_1_choice_1 = choices_1*value_1
           
          
            firing_rates_1 = firing_rates[task_1][n:]
            
            a_1 = np.where(choices_1 == 0.5)[0]
            b_1 = np.where(choices_1 == -0.5)[0]
            
            if plot_a == True:
                rewards_1 = rewards_1[a_1] 
                choices_1 = choices_1[a_1]    
                value_1 = value_1[a_1]
                ones_1  = ones_1[a_1]
                firing_rates_1 = firing_rates_1[a_1]
               # rewards_1 = scipy.stats.zscore(rewards_1)
              #  value_1 = scipy.stats.zscore(value_1)
    
              
            elif plot_b == True:
                
                rewards_1 = rewards_1[b_1] 
                choices_1 = choices_1[b_1]
                value_1 = value_1[b_1]
                ones_1  = ones_1[b_1]
                firing_rates_1 = firing_rates_1[b_1]
              #  rewards_1 = scipy.stats.zscore(rewards_1)
              #  value_1 = scipy.stats.zscore(value_1)
    
            predictors_all = OrderedDict([
                                        ('Choice', choices_1),
                                        ('Reward', rewards_1),
                                        ('Value',value_1), 
                                        ('Value Сhoice',value_1_choice_1), 
                                   #     ('Prev Rew Ch', prev_ch_reward_1),
    
                               #      ('Prev Rew', prev_reward_1),
                                     #  ('Prev Ch', prev_choice_1),
                                       ('ones', ones_1)
                                        ])
            
            X_1 = np.vstack(predictors_all.values()).T[:trials_1,:].astype(float)
            
            n_predictors = X_1.shape[1]
            y_1 = firing_rates_1.reshape([len(firing_rates_1),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
           # tstats,x = regression_code_session(y_1, X_1)
            #tstats =  reg_f.regression_code(y_1, X_1)
            ols = LinearRegression()
            ols.fit(X_1,y_1)
            C_1.append(ols.coef_.reshape(n_neurons, n_timepoints, n_predictors)) # Predictor loadings
            #C_1.append(tstats.reshape(n_predictors,n_neurons,n_timepoints)) # Predictor loadings
            cpd_1.append(re._CPD(X_1,y_1).reshape(n_neurons, n_timepoints, n_predictors))
            
            
            rewards_2 = reward_current[task_2]
            choices_2 = choices_current[task_2]
            
            previous_rewards_2 = scipy.linalg.toeplitz(rewards_2, np.zeros((1,n)))[n-1:-1]
             
            previous_choices_2 = scipy.linalg.toeplitz(0.5-choices_2, np.zeros((1,n)))[n-1:-1]
             
            interactions_2 = scipy.linalg.toeplitz((((0.5-choices_2)*(rewards_2-0.5))*2),np.zeros((1,n)))[n-1:-1]
             
    
            ones = np.ones(len(interactions_2)).reshape(len(interactions_2),1)
             
            X_2 = np.hstack([previous_rewards_2,previous_choices_2,interactions_2,ones])
            # average_val_ex_ch = np.concatenate([average[n].reshape(1),average[n*2:]])
            # X_exl_2 = np.concatenate([X_2[:,n].reshape(len(X_2),1),X_2[:,n*2:]],1)
            # value = np.matmul(X[:,n*2:], average[n*2:])
            value_2 =np.matmul(X_2, average)
            #value_2 =np.matmul(X_exl_2, average_val_ex_ch)
    
            rewards_2 = rewards_2[n:]
            choices_2 = choices_2[n:]
              
            
            ones_2 = np.ones(len(choices_2))
            trials_2 = len(choices_2)
    
            firing_rates_2 = firing_rates[task_2][n:]
            
            value_2_choice_2 = choices_2*value_2

            a_2 = np.where(choices_2 == 0.5)[0]
            b_2 = np.where(choices_2 == -0.5)[0]
            
            if plot_a == True:
                rewards_2 = rewards_2[a_2] 
                choices_2 = choices_2[a_2]
                value_2 = value_2[a_2]
                ones_2  = ones_2[a_2]
                firing_rates_2 = firing_rates_2[a_2]
               # rewards_2 = scipy.stats.zscore(rewards_2)
               # value_2 = scipy.stats.zscore(value_2)
    
            elif plot_b == True:
                
                rewards_2 = rewards_2[b_2] 
                choices_2 = choices_2[b_2]
                value_2 = value_2[b_2]
                ones_2  = ones_2[b_2]
                firing_rates_2 = firing_rates_2[b_2]
               # rewards_2 = scipy.stats.zscore(rewards_2)
               # value_2 = scipy.stats.zscore(value_2)
    
            predictors_all = OrderedDict([
                                         ('Choice', choices_2),
                                        ('Reward', rewards_2),
                                        ('Value',value_2), 
                                         ('Value Сhoice',value_2_choice_2), 
                                     #   ('Prev Rew Ch', prev_ch_reward_2),
    #
                                   #    ('Prev Rew', prev_reward_2),
                                      # ('Prev Ch', prev_choice_2),
                                        ('ones', ones_2)
                                        ])
            
            X_2 = np.vstack(predictors_all.values()).T[:trials_2,:].astype(float)
            
            n_predictors = X_2.shape[1]
            y_2 = firing_rates_2.reshape([len(firing_rates_2),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
           # tstats,x = regression_code_session(y_2, X_2)
            ols = LinearRegression()
            ols.fit(X_2,y_2)
            C_2.append(ols.coef_.reshape(n_neurons, n_timepoints, n_predictors)) # Predictor loadings
            cpd_2.append(re._CPD(X_2,y_2).reshape(n_neurons, n_timepoints, n_predictors))
      
        
            
            rewards_3 = reward_current[task_3]
            choices_3 = choices_current[task_3]
            
            previous_rewards_3 = scipy.linalg.toeplitz(rewards_3, np.zeros((1,n)))[n-1:-1]
             
            previous_choices_3 = scipy.linalg.toeplitz(0.5-choices_3, np.zeros((1,n)))[n-1:-1]
             
            interactions_3 = scipy.linalg.toeplitz((((0.5-choices_3)*(rewards_3-0.5))*2),np.zeros((1,n)))[n-1:-1]
             
    
            ones = np.ones(len(interactions_3)).reshape(len(interactions_3),1)
             
            X_3 = np.hstack([previous_rewards_3,previous_choices_3,interactions_3,ones])
           #  average_val_ex_ch = np.concatenate([average[n].reshape(1),average[n*2:]])
           #  X_exl_3 = np.concatenate([X_3[:,n].reshape(len(X_3),1),X_3[:,n*2:]],1)
           # # value = np.matmul(X[:,n*2:], average[n*2:])
            value_3 =np.matmul(X_3, average)
            # value_3 =np.matmul(X_exl_3, average_val_ex_ch)
    
            rewards_3 = rewards_3[n:]
            choices_3 = choices_3[n:]
              
            
            ones_3 = np.ones(len(choices_3))
            trials_3 = len(choices_3)
    
            firing_rates_3 = firing_rates[task_3][n:]
            
            value_3_choice_3 = choices_3*value_3

            a_3 = np.where(choices_3 == 0.5)[0]
            b_3 = np.where(choices_3 == -0.5)[0]
            
            if plot_a == True:
                rewards_3 = rewards_3[a_3] 
                choices_3 = choices_3[a_3]
                value_3 = value_3[a_3]
                ones_3  = ones_3[a_3]
    
                firing_rates_3 = firing_rates_3[a_3]
              #  rewards_3 = scipy.stats.zscore(rewards_3)
              #  value_3 = scipy.stats.zscore(value_3)
    
               
            elif plot_b == True:
                rewards_3 = rewards_3[b_3] 
                choices_3 = choices_3[b_3]
              
                value_3 = value_3[b_3]
                ones_3  = ones_3[b_3]
    
                firing_rates_3 = firing_rates_3[b_3]
             #   rewards_3 = scipy.stats.zscore(rewards_3)
             #   value_3 = scipy.stats.zscore(value_3)
    
               
              
      
            predictors_all = OrderedDict([
                                        ('Ch', choices_3),
                                        ('Rew', rewards_3),
                                        ('Value',value_3), 
                                        ('Value Сhoice',value_3_choice_3), 
    #                                   
                                     #   ('Prev Rew Ch', prev_ch_reward_3),
                                  #    ('Prev Rew', prev_reward_3),
                                      # ('Prev Ch', prev_choice_3),
                                        ('ones', ones_3)
                                        ])
            
            X_3 = np.vstack(predictors_all.values()).T[:trials_3,:].astype(float)
            rank = np.linalg.matrix_rank(X_1)
            print(rank)
            n_predictors = X_3.shape[1]
            print(n_predictors)
            y_3 = firing_rates_3.reshape([len(firing_rates_3),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
            #tstats,x = regression_code_session(y_3, X_3)
            ols = LinearRegression()
            ols.fit(X_3,y_3)
            C_3.append(ols.coef_.reshape(n_neurons, n_timepoints, n_predictors)) # Predictor loadings
    
           # C_3.append(tstats.reshape(n_predictors,n_neurons,n_timepoints)) # Predictor loadings
            cpd_3.append(re._CPD(X_3,y_3).reshape(n_neurons, n_timepoints, n_predictors))
            
        C_1_all.append(np.concatenate(C_1,0)); C_2_all.append(np.concatenate(C_2,0)); C_3_all.append(np.concatenate(C_3,0))
     
    return C_1_all,C_2_all,C_3_all
    
Example #7
0
def regression_RPE(data, perm=True):

    C = []
    cpd = []

    dm = data['DM']
    firing = data['Data']

    if perm:
        C_perm = [[] for i in range(perm)
                  ]  # To store permuted predictor loadings for each session.
        cpd_perm = [[] for i in range(perm)
                    ]  # To store permuted cpd for each session.

    for s, sess in enumerate(dm):
        DM = dm[s]
        firing_rates = firing[s]
        n_trials, n_neurons, n_timepoints = firing_rates.shape
        choices = DM[:, 1]
        reward = DM[:, 2]
        # state = DM[:,0]
        rpe = DM[:, 14]
        q1 = DM[:, 9]
        q4 = DM[:, 10]
        rand = np.random.normal(np.mean(q4), np.std(q4), len(q4))
        ones = np.ones(len(rpe))
        trials = len(ones)

        predictors_all = OrderedDict([
            ('Reward', reward),
            ('Choice', choices),
            #('State', state),
            ('RPE', rpe),
            #('Q4', q4),
            #('Q1', q1),
            #('Noise', rand),
            ('ones', ones)
        ])

        X = np.vstack(
            predictors_all.values()).T[:len(choices), :].astype(float)
        n_predictors = X.shape[1]
        y = firing_rates.reshape(
            [len(firing_rates),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)

        C.append(tstats.reshape(n_predictors, n_neurons,
                                n_timepoints))  # Predictor loadings
        cpd.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

        if perm:
            for i in range(perm):
                X_perm = np.roll(X, np.random.randint(trials), axis=0)
                tstats = reg_f.regression_code(y, X_perm)

                C_perm[i].append(
                    tstats.reshape(n_predictors, n_neurons,
                                   n_timepoints))  # Predictor loadings
                cpd_perm[i].append(
                    re._CPD(X_perm, y).reshape(n_neurons, n_timepoints,
                                               n_predictors))

    if perm:  # Evaluate P values.
        cpd_perm = np.stack(
            [np.nanmean(np.concatenate(cpd_i, 0), 0) for cpd_i in cpd_perm], 0)
        p = np.percentile(cpd_perm, 95, axis=0)

    cpd = np.nanmean(np.concatenate(cpd, 0), axis=0)
    C = np.concatenate(C, 1)

    return cpd_perm, p, cpd, C, predictors_all
Example #8
0
def regression_past_choice(data, perm=True):

    C = []
    cpd = []

    dm = data['DM'][0]
    firing = data['Data'][0]

    if perm:
        C_perm = [[] for i in range(perm)
                  ]  # To store permuted predictor loadings for each session.
        cpd_perm = [[] for i in range(perm)
                    ]  # To store permuted cpd for each session.

    for s, sess in tqdm(enumerate(dm)):
        DM = dm[s]
        firing_rates = firing[s]
        n_trials, n_neurons, n_timepoints = firing_rates.shape
        choices = DM[:, 1]
        reward = DM[:, 2]
        block = DM[:, 4]
        block_df = np.diff(block)
        task = DM[:, 5]
        task_id = np.where(np.diff(task))[0]

        ones = np.ones(len(block))
        trials = len(ones)
        stay = []
        for c, ch in enumerate(choices):
            if c > 0:
                if choices[c - 1] == choices[c]:
                    stay.append(1)
                else:
                    stay.append(0)
            else:
                stay.append(0)

        a_side = 0
        a_side_l = []
        for r, rew in enumerate(reward):
            if r in task_id:
                a_side = 0
            elif reward[r] == 1 and choices[r] == 1:
                a_side += 1
            a_side_l.append(a_side)

        b_side = 0
        b_side_l = []
        for r, rew in enumerate(reward):
            if r in task_id:
                b_side = 0
            elif reward[r] == 1 and choices[r] == 0:
                b_side += 1
            b_side_l.append(b_side)

        stay_ch = np.asarray(stay) * choices
        predictors_all = OrderedDict([('Reward', reward), ('Choice', choices),
                                      ('Stay', stay),
                                      ('Stay x Choice', stay_ch),
                                      ('Reward Cum A', a_side_l),
                                      ('Reward Cum B', b_side_l),
                                      ('ones', ones)])

        X = np.vstack(
            predictors_all.values()).T[:len(choices), :].astype(float)
        n_predictors = X.shape[1]
        y = firing_rates.reshape(
            [len(firing_rates),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)

        C.append(tstats.reshape(n_predictors, n_neurons,
                                n_timepoints))  # Predictor loadings
        cpd.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

        if perm:
            for i in range(perm):
                X_perm = np.roll(X, np.random.randint(trials), axis=0)
                tstats = reg_f.regression_code(y, X_perm)

                C_perm[i].append(
                    tstats.reshape(n_predictors, n_neurons,
                                   n_timepoints))  # Predictor loadings
                cpd_perm[i].append(
                    re._CPD(X_perm, y).reshape(n_neurons, n_timepoints,
                                               n_predictors))

    if perm:  # Evaluate P values.
        cpd_perm = np.stack(
            [np.nanmean(np.concatenate(cpd_i, 0), 0) for cpd_i in cpd_perm], 0)
        p = np.percentile(cpd_perm, 95, axis=0)

    cpd = np.nanmean(np.concatenate(cpd, 0), axis=0)
    C = np.concatenate(C, 1)

    return cpd_perm, p, cpd, C, predictors_all
Example #9
0
def regression_bayes(experiment, bayes_prior, bayes_posterior):
    C = []
    cpd = []
    C_sq = []

    # Finding correlation coefficients for task 1
    for s, session in enumerate(experiment):
        aligned_spikes = session.aligned_rates[:]
        if aligned_spikes.shape[1] > 0:  # sessions with neurons?
            n_trials, n_neurons, n_timepoints = aligned_spikes.shape

            prior = np.asarray(bayes_prior[s][:, 0])
            posterior = np.asarray(bayes_posterior[s][:, 0])

            # Getting out task indicies
            forced_trials = session.trial_data['forced_trial']
            outcomes = session.trial_data['outcomes']

            choices = session.trial_data['choices']
            non_forced_array = np.where(forced_trials == 0)[0]

            choices = choices[non_forced_array]
            aligned_spikes = aligned_spikes[1:len(choices), :, :]
            outcomes = outcomes[non_forced_array]
            outcomes = outcomes[1:]
            choices = choices[1:]
            # Getting out task indicies

            ones = np.ones(len(choices))

            predictors = OrderedDict([('Prior', prior),
                                      ('Posterior', posterior),
                                      ('choice', choices),
                                      ('reward', outcomes), ('ones', ones)])

            X = np.vstack(
                predictors.values()).T[:len(choices), :].astype(float)
            n_predictors = X.shape[1]
            y = aligned_spikes.reshape(
                [len(aligned_spikes),
                 -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
            ols = LinearRegression(copy_X=True, fit_intercept=False)
            ols.fit(X, y)
            C.append(ols.coef_.reshape(n_neurons, n_timepoints,
                                       n_predictors))  # Predictor loadings
            sq = ols.coef_.reshape(n_neurons, n_timepoints, n_predictors)
            sq = sq**2
            C_sq.append(sq)
            cpd.append(
                re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

    cpd = np.nanmean(np.concatenate(cpd, 0),
                     axis=0)  # Population CPD is mean over neurons.
    C_sq_all = np.concatenate(C, 0)

    C = np.nanmean(np.concatenate(C, 0), axis=0)

    C_sq = np.nanmean(np.concatenate(C_sq, 0),
                      axis=0)  # Population CPD is mean over neurons.

    return cpd, predictors, C_sq, C_sq_all
Example #10
0
def regression_Q_values_choices(experiment):
    C_1_choice = []
    cpd_1_choice = []
    C_2_choice = []
    cpd_2_choice= []
    C_3_choice = []
    cpd_3_choice  = []

    # Finding correlation coefficients for task 1 
    for s,session in enumerate(experiment):
        aligned_spikes= session.aligned_rates[:]
        if aligned_spikes.shape[1] > 0: # sessions with neurons? 
            
            predictor_A_Task_1, predictor_A_Task_2, predictor_A_Task_3,\
            predictor_B_Task_1, predictor_B_Task_2, predictor_B_Task_3, reward,\
            predictor_a_good_task_1,predictor_a_good_task_2, predictor_a_good_task_3,\
            reward_previous,previous_trial_task_1,previous_trial_task_2,previous_trial_task_3,\
            same_outcome_task_1, same_outcome_task_2, same_outcome_task_3,different_outcome_task_1,\
            different_outcome_task_2, different_outcome_task_3 = re.predictors_include_previous_trial(session)     
            
            n_trials, n_neurons, n_timepoints = aligned_spikes.shape 
            
            # Task indicies 
            task = session.trial_data['task']
            forced_trials = session.trial_data['forced_trial']

            non_forced_array = np.where(forced_trials == 0)[0]
            task_non_forced = task[non_forced_array]
            task_1 = np.where(task_non_forced == 1)[0]
            task_2 = np.where(task_non_forced == 2)[0]        
            
            # (1) Better correlated with the *previous* choice, or the *current choice*
            predictor_A_Task_1 = predictor_A_Task_1[1:len(task_1)]
            previous_trial_task_1 = previous_trial_task_1[1:len(task_1)]
            aligned_spikes_task_1 = aligned_spikes[1:len(task_1)]
            
            ones = np.ones(len(predictor_A_Task_1))
            
            # Task 1 
            predictors = OrderedDict([('Current Choice', predictor_A_Task_1),
                                      ('Previous Choice', previous_trial_task_1),             
                                      ('ones', ones)])
        
           
            X = np.vstack(predictors.values()).T[:len(predictor_A_Task_1),:].astype(float)
            n_predictors = X.shape[1]
            y = aligned_spikes_task_1.reshape([len(aligned_spikes_task_1),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
            ols = LinearRegression(copy_X = True,fit_intercept= True)
            ols.fit(X,y)
            C_1_choice.append(ols.coef_.reshape(n_neurons,n_timepoints, n_predictors)) # Predictor loadings
            cpd_1_choice.append(re._CPD(X,y).reshape(n_neurons,n_timepoints, n_predictors))
            
            # Task 2 
            # (1) Better correlated with the *previous* choice, or the *current choice*
            predictor_A_Task_2 = predictor_A_Task_2[len(task_1)+1:len(task_1)+len(task_2)]
            previous_trial_task_2 = previous_trial_task_2[len(task_1)+1:len(task_1)+len(task_2)]
            aligned_spikes_task_2 = aligned_spikes[len(task_1)+1:len(task_1)+len(task_2)]
            ones = np.ones(len(predictor_A_Task_2))

            predictors = OrderedDict([('Current Choice', predictor_A_Task_2),
                                      ('Previous CHoice', previous_trial_task_2),             
                                      ('ones', ones)])
        
           
            X = np.vstack(predictors.values()).T[:len(predictor_A_Task_2),:].astype(float)
            n_predictors = X.shape[1]
            y = aligned_spikes_task_2.reshape([len(aligned_spikes_task_2),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
            ols = LinearRegression(copy_X = True,fit_intercept= True)
            ols.fit(X,y)
            C_2_choice.append(ols.coef_.reshape(n_neurons,n_timepoints, n_predictors)) # Predictor loadings
            cpd_2_choice.append(re._CPD(X,y).reshape(n_neurons,n_timepoints, n_predictors))
            
            # Task 3
            # (1) Better correlated with the *previous* choice, or the *current choice*
            predictor_A_Task_3 = predictor_A_Task_3[len(task_1)+len(task_2)+1:]
            previous_trial_task_3 = previous_trial_task_3[len(task_1)+len(task_2)+1:]
            aligned_spikes_task_3 = aligned_spikes[len(task_1)+len(task_2)+1:]
            aligned_spikes_task_3 = aligned_spikes_task_3[:len(predictor_A_Task_3)]
            ones = np.ones(len(predictor_A_Task_3))

            predictors = OrderedDict([('Current Choice', predictor_A_Task_3),
                                      ('Previous Choice', previous_trial_task_3),             
                                      ('ones', ones)])
        
           
            X = np.vstack(predictors.values()).T[:len(predictor_A_Task_3),:].astype(float)
            n_predictors = X.shape[1]
            y = aligned_spikes_task_3.reshape([len(aligned_spikes_task_3),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
            ols = LinearRegression(copy_X = True,fit_intercept= True)
            ols.fit(X,y)
            C_3_choice.append(ols.coef_.reshape(n_neurons,n_timepoints, n_predictors)) # Predictor loadings
            cpd_3_choice.append(re._CPD(X,y).reshape(n_neurons,n_timepoints, n_predictors))
            

           

    cpd_1_choice = np.nanmean(np.concatenate(cpd_1_choice,0), axis = 0) # Population CPD is mean over neurons.
    cpd_2_choice = np.nanmean(np.concatenate(cpd_2_choice,0), axis = 0) # Population CPD is mean over neurons.
    cpd_3_choice = np.nanmean(np.concatenate(cpd_3_choice,0), axis = 0) # Population CPD is mean over neurons.


    return C_1_choice, C_2_choice, C_3_choice, cpd_1_choice, cpd_2_choice, cpd_3_choice, predictors
Example #11
0
def between_tasks(Data, DM, PFC=True):

    cpd_true = []
    C_true = []
    cpd_aligned = []
    C_aligned = []

    C_sq_true = []
    C_sq_aligned = []

    if PFC == True:
        ind_1 = np.arange(0, 26)
        ind_2 = np.arange(1, 27)
        ind_a = np.hstack((ind_1, ind_2))
        ind_b = np.hstack((ind_2, ind_1))

    else:
        ind_1 = np.arange(0, 15)
        ind_2 = np.arange(1, 16)
        ind_a = np.hstack((ind_1, ind_2))
        ind_b = np.hstack((ind_2, ind_1))

    all_sessions_firing, all_session_dm = select_trials(Data, DM, 10)

    for a, b in zip(ind_a, ind_b):
        aligned_a, aligned_b, original_a, original_b, task_1_aligned = procrustes(
            a, b, all_sessions_firing, all_session_dm)

        target = np.transpose(task_1_aligned, [1, 0, 2])
        source = np.transpose(original_a, [1, 0, 2])[40:80, :, :]

        #  Session 1 Task 2
        dm_test = all_session_dm[a]

        trials, n_neurons, n_timepoints = source.shape

        reward_test = dm_test[:, 2][40:80]
        state_test = dm_test[:, 0][40:80]
        choices_test = dm_test[:, 1][40:80]
        ones_test = np.ones(len(choices_test))

        reward_choice = choices_test * reward_test

        # Aligned Task 2 from Task 1
        predictors_train = OrderedDict([('State', state_test),
                                        ('Reward', reward_test),
                                        ('Choice', choices_test),
                                        ('Reward Choice Int', reward_choice),
                                        ('ones', ones_test)])

        X = np.vstack(predictors_train.values()).T[:trials, :].astype(float)
        n_predictors = X.shape[1]
        y = source.reshape(
            [len(source),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)
        C_true.append(tstats.reshape(n_predictors, n_neurons,
                                     n_timepoints))  # Predictor loadings
        C_sq_true.append(
            tstats.reshape(n_predictors, n_neurons, n_timepoints)**2)
        cpd_true.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

        # Aligned Using Neurons
        n_predictors = X.shape[1]
        y = target.reshape(
            [len(target),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)
        C_aligned.append(tstats.reshape(n_predictors, n_neurons,
                                        n_timepoints))  # Predictor loadings
        C_sq_aligned.append(
            tstats.reshape(n_predictors, n_neurons, n_timepoints)**2)
        cpd_aligned.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

    cpd_true = np.nanmean(np.concatenate(cpd_true, 0), axis=0)
    C_sq_true = np.concatenate(C_sq_true, 1)

    cpd_aligned = np.nanmean(np.concatenate(cpd_aligned, 0), axis=0)
    C_sq_aligned = np.concatenate(C_sq_aligned, 1)


    c =  ['violet', 'black', 'green', 'blue', 'turquoise', 'grey', 'yellow', 'pink',\
          'purple', 'darkblue', 'darkred', 'darkgreen','darkyellow','lightgreen']
    p = [*predictors_train]

    plt.figure()

    for i in np.arange(cpd_true.shape[1] - 1):
        plt.plot(cpd_true[:, i], label=p[i] + 'Real', color=c[i])
        plt.plot(cpd_aligned[:, i],
                 label=p[i] + 'Aligned',
                 color=c[i],
                 linestyle='--')

    plt.legend()
    plt.ylabel('CPD')
    plt.xlabel('Time in Trial')
    plt.xticks([25, 35, 42], ['I', 'C', 'R'])

    return cpd_aligned, cpd_true
Example #12
0
def regression(Data, DM, PFC=True):

    cpd_true = []
    C_true = []
    cpd_misaligned = []
    C_misaligned = []
    cpd_aligned = []
    C_aligned = []

    C_sq_true = []
    C_sq_misaligned = []
    C_sq_aligned = []

    if PFC == True:
        ind_1 = np.arange(0, 26)
        ind_2 = np.arange(1, 27)
        ind_a = np.hstack((ind_1, ind_2))
        ind_b = np.hstack((ind_2, ind_1))

    else:
        ind_1 = np.arange(0, 15)
        ind_2 = np.arange(1, 16)
        ind_a = np.hstack((ind_1, ind_2))
        ind_b = np.hstack((ind_2, ind_1))

    misaligned_list = []
    aligned_list = []

    for a, b in zip(ind_a, ind_b):

        all_sessions, all_session_dm, aligned_by_trial, original_a, original_b = in_progress(
            a, b, Data, DM, misaligned_list, aligned_list)
        # plt.figure()

        # for i in np.arange(aligned_by_trial.shape[0]):
        #     plt.plot(np.mean(aligned_by_trial,1)[i,:])

        # plt.figure()
        # for i in np.arange(original_b.shape[0]):
        #     plt.plot(np.mean(original_b,1)[i,:])

        #session_a = all_sessions[ind_a]

        dm_test = all_session_dm[a]

        session_training = np.transpose(original_a, [1, 0, 2])
        session_misaligned = np.transpose(original_b, [1, 0, 2])
        session_aligned = np.transpose(aligned_by_trial, [1, 0, 2])

        trials, n_neurons, n_timepoints = session_training.shape

        reward_test = dm_test[:, 2]
        state_test = dm_test[:, 0]
        choices_test = dm_test[:, 1]
        ones_test = np.ones(len(choices_test))

        trials_since_block = np.arange(0, 10)
        trials_since_block = np.tile(trials_since_block, 12)

        reward_choice = choices_test * reward_test
        trial_sq = (np.asarray(trials_since_block) - 0.5)**2
        choice_trials_sq = choices_test * trial_sq
        interaction_trials_choice = trials_since_block * choices_test

        # Original Using Neurons
        predictors_train = OrderedDict([
            ('State', state_test),
            ('Reward', reward_test),
            ('Choice', choices_test),
            #('Reward Choice Int', reward_choice),
            #('Trials in Block', trials_since_block),
            #('Squared Time in Block', trial_sq),
            #('Trials x Choice', interaction_trials_choice),
            #('Trials x Choice Sq',choice_trials_sq),
            ('ones', ones_test)
        ])

        X = np.vstack(predictors_train.values()).T[:trials, :].astype(float)
        n_predictors = X.shape[1]
        y = session_training.reshape(
            [len(session_training),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)
        C_true.append(tstats.reshape(n_predictors, n_neurons,
                                     n_timepoints))  # Predictor loadings
        C_sq_true.append(
            tstats.reshape(n_predictors, n_neurons, n_timepoints)**2)
        cpd_true.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

        trials, n_neurons, n_timepoints = session_misaligned.shape

        # Misaligned Using Neurons

        y = session_misaligned.reshape(
            [len(session_misaligned),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)
        C_misaligned.append(
            tstats.reshape(n_predictors, n_neurons,
                           n_timepoints))  # Predictor loadings
        C_sq_misaligned.append(
            tstats.reshape(n_predictors, n_neurons, n_timepoints)**2)
        cpd_misaligned.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

        # Aligned Using Neurons

        n_predictors = X.shape[1]
        y = session_aligned.reshape(
            [len(session_aligned),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)
        C_aligned.append(tstats.reshape(n_predictors, n_neurons,
                                        n_timepoints))  # Predictor loadings
        C_sq_aligned.append(
            tstats.reshape(n_predictors, n_neurons, n_timepoints)**2)
        cpd_aligned.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

    cpd_true = np.nanmean(np.concatenate(cpd_true, 0), axis=0)
    C_true = np.concatenate(C_true, 1)

    cpd_misaligned = np.nanmean(np.concatenate(cpd_misaligned, 0), axis=0)
    C_misaligned = np.concatenate(C_misaligned, 1)

    cpd_aligned = np.nanmean(np.concatenate(cpd_aligned, 0), axis=0)
    C_aligned = np.concatenate(C_aligned, 1)

    C_sq_true = np.concatenate(C_sq_true, 1)
    C_sq_misaligned = np.concatenate(C_sq_misaligned, 1)
    C_sq_aligned = np.concatenate(C_sq_aligned, 1)

    C_sq_true[np.isfinite(C_sq_true) == False] = np.NaN
    C_sq_true = np.nanmean(C_sq_true, 1)[:-1, :]
    C_sq_misaligned = np.nanmean(C_sq_misaligned, 1)[:-1, :]
    C_sq_aligned = np.nanmean(C_sq_aligned, 1)[:-1, :]

    c =  ['violet', 'black', 'green', 'blue', 'turquoise', 'grey', 'yellow', 'pink',\
          'purple', 'darkblue', 'darkred', 'darkgreen','darkyellow','lightgreen']
    p = [*predictors_train]

    plt.figure()

    for i in np.arange(C_sq_true.shape[0]):
        plt.plot(C_sq_true[i, :], label=p[i] + 'Real', color=c[i])
        plt.plot(C_sq_misaligned[i, :],
                 label=p[i] + 'Misaligned',
                 color=c[i],
                 linestyle='--')
        plt.plot(C_sq_aligned[i, :],
                 label=p[i] + 'Aligned',
                 color=c[i],
                 linestyle=':')

    plt.legend()
    plt.ylabel('Coef Sq')
    plt.xlabel('Time in Trial')
    plt.xticks([25, 35, 42], ['I', 'C', 'R'])

    cpd_true = cpd_true[:, :-1]
    cpd_misaligned = cpd_misaligned[:, :-1]
    cpd_aligned = cpd_aligned[:, :-1]

    plt.figure()

    for i in np.arange(cpd_true.shape[1]):
        plt.plot(cpd_true[:, i], label=p[i] + 'Real', color=c[i])
        plt.plot(cpd_misaligned[:, i],
                 label=p[i] + 'Misaligned',
                 color=c[i],
                 linestyle='--')
        plt.plot(cpd_aligned[:, i],
                 label=p[i] + 'Aligned',
                 color=c[i],
                 linestyle=':')

    plt.legend()
    plt.ylabel('CPD')
    plt.xlabel('Time in Trial')
    plt.xticks([25, 35, 42], ['I', 'C', 'R'])

    return misaligned_list, aligned_list
Example #13
0
def regression_prev_choice(data, perm=True):

    C = []
    cpd = []

    dm = data['DM'][0]
    firing = data['Data'][0]

    if perm:
        C_perm = [[] for i in range(perm)
                  ]  # To store permuted predictor loadings for each session.
        cpd_perm = [[] for i in range(perm)
                    ]  # To store permuted cpd for each session.

    for s, sess in tqdm(enumerate(dm)):
        DM = dm[s]
        firing_rates = firing[s][1:, :, :]
        n_trials, n_neurons, n_timepoints = firing_rates.shape
        choices = DM[:, 1]
        reward = DM[:, 2]
        task = DM[:, 5]
        task_id = np.where(np.diff(task))[0]
        state = DM[:, 0]

        stay = choices[0:-1] == choices[1:]
        stay = stay * 1
        stay = np.insert(stay, 0, 0)
        lastreward = reward[0:-1]
        lastreward = np.insert(lastreward, 0, 0)

        rl = np.zeros(len(stay))
        rl[0] = 1

        rl_right = np.zeros(len(stay))
        rl_right[0] = choices[0] == state[0]
        choice_rr_start = -100

        rl_wrong = np.zeros(len(stay))
        rl_wrong[0] = choices[0] != state[0]
        choice_rw_start = -100

        for tr in range(len(stay)):
            if tr > 0:
                if stay[tr] == 1:
                    rl[tr] = rl[tr - 1] + 1
                else:
                    rl[tr] = 1

                if ((choices[tr] == choice_rr_start) &
                    (choices[tr] == state[tr])):
                    rl_right[tr] = rl_right[tr - 1] + 1

                elif (choices[tr] == state[tr]):

                    rl_right[tr] = 1
                    choice_rr_start = choices[tr]
                else:
                    rl_right[tr] = 0
                    choice_rr_start = -100
                    #If he made the wrong choice it can't be part of a correct run.

                if ((choices[tr] == choice_rw_start) &
                    (choices[tr] != state[tr])):
                    rl_wrong[tr] = rl_wrong[tr - 1] + 1

                elif choices[tr] != state[tr]:
                    rl_wrong[tr] = 1
                    choice_rw_start = choices[tr]
                else:
                    rl_wrong[tr] = 0
                    choice_rw_start = -100  #If he made the right choice it can't be part of a wrong run.

        trials = len(reward)
        rl_wrong = rl_wrong[1:]
        rl_right = rl_right[1:]
        rl = rl[1:]
        prev_choice = DM[:-1, 1]
        choices = choices[1:]
        reward = reward[1:]
        task = task[1:]
        state = state[1:]
        ones = np.ones(len(reward))
        int_repeat = choices * rl
        int_repeat_corr = state * rl_right
        int_repeat_incorr = state * rl_wrong
        error_count = []
        err_count = 0
        for r, rew in enumerate(reward):
            if rew == 0:
                if reward[r] == reward[r - 1]:
                    err_count += 1
            else:
                err_count = 0
            error_count.append(err_count)

        predictors_all = OrderedDict([
            ('Reward', reward),
            ('Choice', choices),
            ('State', state),
            ('Previous Choice 1', prev_choice),
            ('Repeat', rl),
            #('Error Count',error_count),
            #('Repeat Incorrect', rl_wrong),
            #('Repeat Correct', rl_right),
            # ('Repeat Int', int_repeat),
            #('Repeat Corr Int', int_repeat_corr),
            #('Repeat Incorr Int', int_repeat_incorr),
            ('ones', ones)
        ])

        X = np.vstack(
            predictors_all.values()).T[:len(choices), :].astype(float)
        n_predictors = X.shape[1]
        y = firing_rates.reshape(
            [len(firing_rates),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)

        C.append(tstats.reshape(n_predictors, n_neurons,
                                n_timepoints))  # Predictor loadings
        cpd.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

        if perm:
            for i in range(perm):
                X_perm = np.roll(X, np.random.randint(trials), axis=0)
                tstats = reg_f.regression_code(y, X_perm)

                C_perm[i].append(
                    tstats.reshape(n_predictors, n_neurons,
                                   n_timepoints))  # Predictor loadings
                cpd_perm[i].append(
                    re._CPD(X_perm, y).reshape(n_neurons, n_timepoints,
                                               n_predictors))

    if perm:  # Evaluate P values.
        cpd_perm = np.stack(
            [np.nanmean(np.concatenate(cpd_i, 0), 0) for cpd_i in cpd_perm], 0)
        p = np.percentile(cpd_perm, 95, axis=0)

    cpd = np.nanmean(np.concatenate(cpd, 0), axis=0)
    C = np.concatenate(C, 1)

    return cpd_perm, p, cpd, C, predictors_all
def sequence_rewards_errors_regression_generalisation_rew(
        data,
        area='HP_',
        c_to_plot=1,
        c_to_proj=2,
        c_to_proj_3=3,
        ind_rew=1,
        plot_a=False,
        plot_b=False):

    dm = data['DM'][0]
    firing = data['Data'][0]
    C_1 = []
    C_2 = []
    C_3 = []
    cpd_1 = []
    cpd_2 = []
    cpd_3 = []
    c_to_plot = c_to_plot
    c_to_proj = c_to_proj
    c_to_proj_3 = c_to_proj_3
    ind_rew = ind_rew

    for s, sess in enumerate(dm):

        runs_list = []
        runs_list.append(0)
        DM = dm[s]
        firing_rates = firing[s]

        # firing_rates = firing_rates[:,:,:63]
        n_trials, n_neurons, n_timepoints = firing_rates.shape

        state = DM[:, 0]
        choices = DM[:, 1] - 0.5
        reward = DM[:, 2] - 0.5

        task = DM[:, 5]
        a_pokes = DM[:, 6]
        b_pokes = DM[:, 7]

        taskid = task_ind(task, a_pokes, b_pokes)

        trial_to_start = 7

        taskid = taskid[trial_to_start:]

        task_1 = np.where(taskid == 1)[0]
        task_2 = np.where(taskid == 2)[0]
        task_3 = np.where(taskid == 3)[0]

        # reward_2_ago = reward[1:-2]
        # reward_3_ago = reward[:-3]

        reward_2_ago = np.mean(
            [reward[trial_to_start - 2:-2], reward[trial_to_start - 3:-3]], 0)
        reward_3_ago = np.mean(
            [reward[trial_to_start - 4:-4], reward[trial_to_start - 5:-5]], 0)
        reward_4_ago = np.mean([reward[trial_to_start - 6:-6], reward[:-7]], 0)

        reward_prev = reward[trial_to_start - 1:-1]
        reward_current = reward[trial_to_start:]

        firing_rates = firing_rates[trial_to_start:]

        choices_2_ago = np.mean(
            [choices[trial_to_start - 2:-2], choices[trial_to_start - 3:-3]],
            0)
        choices_3_ago = np.mean(
            [choices[trial_to_start - 4:-4], choices[trial_to_start - 5:-5]],
            0)
        choices_4_ago = np.mean([choices[trial_to_start - 6:-6], choices[:-7]],
                                0)

        choices_prev = choices[trial_to_start - 1:-1]
        choices_current = choices[trial_to_start:]
        state = state[trial_to_start:]

        cum_reward_orth = np.vstack(
            [reward_current, np.ones(len(reward_current))]).T
        xt = np.linalg.pinv(cum_reward_orth)
        identity = np.identity(len(reward_current))
        id_x = (identity - np.matmul(cum_reward_orth, xt))

        reward_o_1_ago = np.matmul(id_x, np.asarray(reward_prev))
        reward_o_2_ago = np.matmul(id_x, np.asarray(reward_2_ago))
        reward_o_3_ago = np.matmul(id_x, np.asarray(reward_3_ago))
        reward_o_4_ago = np.matmul(id_x, np.asarray(reward_4_ago))

        cum_ch_orth = np.vstack(
            [choices_current, np.ones(len(choices_current))]).T
        xt = np.linalg.pinv(cum_ch_orth)
        identity = np.identity(len(choices_current))
        id_x = (identity - np.matmul(cum_ch_orth, xt))

        choice_o_1_ago = np.matmul(id_x, np.asarray(choices_prev))
        choice_o_2_ago = np.matmul(id_x, np.asarray(choices_2_ago))
        choice_o_3_ago = np.matmul(id_x, np.asarray(choices_3_ago))
        choice_o_4_ago = np.matmul(id_x, np.asarray(choices_4_ago))

        ones = np.ones(len(reward_current))
        reward_1 = reward_current[task_1]
        choices_1 = choices_current[task_1]

        _1_reward_1 = reward_o_1_ago[task_1]
        _2_reward_1 = reward_o_2_ago[task_1]
        _3_reward_1 = reward_o_3_ago[task_1]
        _4_reward_1 = reward_o_4_ago[task_1]

        _1_choices_1 = choice_o_1_ago[task_1]
        _2_choices_1 = choice_o_2_ago[task_1]
        _3_choices_1 = choice_o_3_ago[task_1]
        _4_choices_1 = choice_o_4_ago[task_1]

        _1_choices_1_for_int = choices_prev[task_1]
        _2_choices_1_for_int = choices_2_ago[task_1]
        _3_choices_1_for_int = choices_3_ago[task_1]
        _4_choices_1_for_int = choices_4_ago[task_1]

        _1_reward_1_ch = (_1_reward_1 * _1_choices_1_for_int)
        _2_reward_1_ch = (_2_reward_1 * _2_choices_1_for_int)
        _3_reward_1_ch = (_3_reward_1 * _3_choices_1_for_int)
        _4_reward_1_ch = (_4_reward_1 * _4_choices_1_for_int)

        ones_1 = ones[task_1]
        firing_rates_1 = firing_rates[task_1]
        a_1 = np.where(choices_1 == 0.5)[0]
        b_1 = np.where(choices_1 == -0.5)[0]

        if plot_a == True:
            reward_1 = reward_1[a_1]

            firing_rates_1 = firing_rates_1[a_1]
            _1_reward_1 = _1_reward_1[a_1]
            _2_reward_1 = _2_reward_1[a_1]
            _3_reward_1 = _3_reward_1[a_1]
            _4_reward_1 = _4_reward_1[a_1]

            _1_choices_1 = _1_choices_1[a_1]
            _2_choices_1 = _2_choices_1[a_1]
            _3_choices_1 = _3_choices_1[a_1]
            _4_choices_1 = _4_choices_1[a_1]

            _1_reward_1_ch = _1_reward_1_ch[a_1]
            _2_reward_1_ch = _2_reward_1_ch[a_1]
            _3_reward_1_ch = _3_reward_1_ch[a_1]
            _4_reward_1_ch = _4_reward_1_ch[a_1]

            ones_1 = ones_1[a_1]

        elif plot_b == True:

            reward_1 = reward_1[b_1]

            firing_rates_1 = firing_rates_1[b_1]
            _1_reward_1 = _1_reward_1[b_1]
            _2_reward_1 = _2_reward_1[b_1]
            _3_reward_1 = _3_reward_1[b_1]
            _4_reward_1 = _4_reward_1[b_1]

            _1_choices_1 = _1_choices_1[b_1]
            _2_choices_1 = _2_choices_1[b_1]
            _3_choices_1 = _3_choices_1[b_1]
            _4_choices_1 = _4_choices_1[b_1]

            _1_reward_1_ch = _1_reward_1_ch[b_1]
            _2_reward_1_ch = _2_reward_1_ch[b_1]
            _3_reward_1_ch = _3_reward_1_ch[b_1]
            _4_reward_1_ch = _4_reward_1_ch[b_1]

            ones_1 = ones_1[b_1]

        predictors_all = OrderedDict([('Reward', reward_1),
                                      ('Choice', choices_1),
                                      ('1 ago Outcome', _1_reward_1),
                                      ('2 ago Outcome', _2_reward_1),
                                      ('3 ago Outcome', _3_reward_1),
                                      ('4 ago Outcome', _4_reward_1),
                                      ('1 ago Choice', _1_choices_1),
                                      ('2 ago Choice', _2_choices_1),
                                      ('3 ago Choice', _3_choices_1),
                                      ('4 ago Choice', _4_choices_1),
                                      ('Prev Rew by Ch ', _1_reward_1_ch),
                                      ('2 Rew ago by Ch ', _2_reward_1_ch),
                                      ('3 Rew ago by Ch ', _3_reward_1_ch),
                                      ('4 Rew ago by Ch ', _4_reward_1_ch),
                                      ('ones', ones_1)])

        X_1 = np.vstack(
            predictors_all.values()).T[:len(choices_1), :].astype(float)
        rank = np.linalg.matrix_rank(X_1)

        n_predictors = X_1.shape[1]
        y_1 = firing_rates_1.reshape(
            [len(firing_rates_1),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        #tstats = reg_f.regression_code(y_1, X_1)
        tstats, cope = regression_code_session(y_1, X_1)
        C_1.append(tstats.reshape(n_predictors, n_neurons,
                                  n_timepoints))  # Predictor loadings
        cpd_1.append(
            re._CPD(X_1, y_1).reshape(n_neurons, n_timepoints, n_predictors))

        reward_2 = reward_current[task_2]
        choices_2 = choices_current[task_2]
        _1_reward_2 = reward_o_1_ago[task_2]
        _2_reward_2 = reward_o_2_ago[task_2]
        _3_reward_2 = reward_o_3_ago[task_2]
        _4_reward_2 = reward_o_4_ago[task_2]

        _1_choices_2 = choice_o_1_ago[task_2]
        _2_choices_2 = choice_o_2_ago[task_2]
        _3_choices_2 = choice_o_3_ago[task_2]
        _4_choices_2 = choice_o_4_ago[task_2]

        _1_choices_2_for_int = choices_prev[task_2]
        _2_choices_2_for_int = choices_2_ago[task_2]
        _3_choices_2_for_int = choices_3_ago[task_2]
        _4_choices_2_for_int = choices_4_ago[task_2]

        _1_reward_2_ch = (_1_reward_2 * _1_choices_2_for_int)
        _2_reward_2_ch = (_2_reward_2 * _2_choices_2_for_int)
        _3_reward_2_ch = (_3_reward_2 * _3_choices_2_for_int)
        _4_reward_2_ch = (_4_reward_2 * _4_choices_2_for_int)

        ones_2 = ones[task_2]
        firing_rates_2 = firing_rates[task_2]
        a_2 = np.where(choices_2 == 0.5)[0]
        b_2 = np.where(choices_2 == -0.5)[0]

        if plot_a == True:
            reward_2 = reward_2[a_2]

            firing_rates_2 = firing_rates_2[a_2]
            _1_reward_2 = _1_reward_2[a_2]
            _2_reward_2 = _2_reward_2[a_2]
            _3_reward_2 = _3_reward_2[a_2]
            _4_reward_2 = _4_reward_2[a_2]

            _1_choices_2 = _1_choices_2[a_2]
            _2_choices_2 = _2_choices_2[a_2]
            _3_choices_2 = _3_choices_2[a_2]
            _4_choices_2 = _4_choices_2[a_2]

            _1_reward_2_ch = _1_reward_2_ch[a_2]
            _2_reward_2_ch = _2_reward_2_ch[a_2]
            _3_reward_2_ch = _3_reward_2_ch[a_2]
            _4_reward_2_ch = _4_reward_2_ch[a_2]

            ones_2 = ones_2[a_2]

        elif plot_b == True:
            reward_2 = reward_2[b_2]
            firing_rates_2 = firing_rates_2[b_2]
            _1_reward_2 = _1_reward_2[b_2]
            _2_reward_2 = _2_reward_2[b_2]
            _3_reward_2 = _3_reward_2[b_2]
            _4_reward_2 = _4_reward_2[b_2]

            _1_choices_2 = _1_choices_2[b_2]
            _2_choices_2 = _2_choices_2[b_2]
            _3_choices_2 = _3_choices_2[b_2]
            _4_choices_2 = _4_choices_2[b_2]

            _1_reward_2_ch = _1_reward_2_ch[b_2]
            _2_reward_2_ch = _2_reward_2_ch[b_2]
            _3_reward_2_ch = _3_reward_2_ch[b_2]
            _4_reward_2_ch = _4_reward_2_ch[b_2]

            ones_2 = ones_2[b_2]

        predictors_all = OrderedDict([('Reward', reward_2),
                                      ('Choice', choices_2),
                                      ('1 ago Outcome', _1_reward_2),
                                      ('2 ago Outcome', _2_reward_2),
                                      ('3 ago Outcome', _3_reward_2),
                                      ('4 ago Outcome', _4_reward_2),
                                      ('1 ago Choice', _1_choices_2),
                                      ('2 ago Choice', _2_choices_2),
                                      ('3 ago Choice', _3_choices_2),
                                      ('4 ago Choice', _4_choices_2),
                                      (' Prev Rew by Ch ', _1_reward_2_ch),
                                      (' 2 Rew ago by Ch ', _2_reward_2_ch),
                                      (' 3 Rew ago by Ch ', _3_reward_2_ch),
                                      (' 4 Rew ago by Ch ', _4_reward_2_ch),
                                      ('ones', ones_2)])

        X_2 = np.vstack(
            predictors_all.values()).T[:len(choices_2), :].astype(float)
        y_2 = firing_rates_2.reshape(
            [len(firing_rates_2),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        #tstats = reg_f.regression_code(y_2, X_2)
        tstats, cope = regression_code_session(y_2, X_2)
        C_2.append(tstats.reshape(n_predictors, n_neurons,
                                  n_timepoints))  # Predictor loadings
        cpd_2.append(
            re._CPD(X_2, y_2).reshape(n_neurons, n_timepoints, n_predictors))

        reward_3 = reward_current[task_3]
        choices_3 = choices_current[task_3]

        _1_reward_3 = reward_o_1_ago[task_3]
        _2_reward_3 = reward_o_2_ago[task_3]
        _3_reward_3 = reward_o_3_ago[task_3]
        _4_reward_3 = reward_o_4_ago[task_3]

        _1_choices_3 = choice_o_1_ago[task_3]
        _2_choices_3 = choice_o_2_ago[task_3]
        _3_choices_3 = choice_o_3_ago[task_3]
        _4_choices_3 = choice_o_4_ago[task_3]

        _1_choices_3_for_int = choices_prev[task_3]
        _2_choices_3_for_int = choices_2_ago[task_3]
        _3_choices_3_for_int = choices_3_ago[task_3]
        _4_choices_3_for_int = choices_4_ago[task_3]

        _1_reward_3_ch = (_1_reward_3 * _1_choices_3_for_int)
        _2_reward_3_ch = (_2_reward_3 * _2_choices_3_for_int)
        _3_reward_3_ch = (_3_reward_3 * _3_choices_3_for_int)
        _4_reward_3_ch = (_4_reward_3 * _4_choices_3_for_int)

        ones_3 = ones[task_3]

        firing_rates_3 = firing_rates[task_3]
        a_3 = np.where(choices_3 == 0.5)[0]
        b_3 = np.where(choices_3 == -0.5)[0]

        if plot_a == True:
            reward_3 = reward_3[a_3]
            firing_rates_3 = firing_rates_3[a_3]
            _1_reward_3 = _1_reward_3[a_3]
            _2_reward_3 = _2_reward_3[a_3]
            _3_reward_3 = _3_reward_3[a_3]
            _4_reward_3 = _4_reward_3[a_3]

            _1_choices_3 = _1_choices_3[a_3]
            _2_choices_3 = _2_choices_3[a_3]
            _3_choices_3 = _3_choices_3[a_3]
            _4_choices_3 = _4_choices_3[a_3]

            _1_reward_3_ch = _1_reward_3_ch[a_3]
            _2_reward_3_ch = _2_reward_3_ch[a_3]
            _3_reward_3_ch = _3_reward_3_ch[a_3]
            _4_reward_3_ch = _4_reward_3_ch[a_3]

            ones_3 = ones_3[a_3]

        elif plot_b == True:

            reward_3 = reward_3[b_3]
            firing_rates_3 = firing_rates_3[b_3]
            _1_reward_3 = _1_reward_3[b_3]
            _2_reward_3 = _2_reward_3[b_3]
            _3_reward_3 = _3_reward_3[b_3]
            _4_reward_3 = _4_reward_3[b_3]

            _1_choices_3 = _1_choices_3[b_3]
            _2_choices_3 = _2_choices_3[b_3]
            _3_choices_3 = _3_choices_3[b_3]
            _4_choices_3 = _4_choices_3[b_3]

            _1_reward_3_ch = _1_reward_3_ch[b_3]
            _2_reward_3_ch = _2_reward_3_ch[b_3]
            _3_reward_3_ch = _3_reward_3_ch[b_3]
            _4_reward_3_ch = _4_reward_3_ch[b_3]

            ones_3 = ones_3[b_3]

        predictors_all = OrderedDict([('Reward', reward_3),
                                      ('Choice', choices_3),
                                      ('1 ago Outcome', _1_reward_3),
                                      ('2 ago Outcome', _2_reward_3),
                                      ('3 ago Outcome', _3_reward_3),
                                      ('4 ago Outcome', _4_reward_3),
                                      ('1 ago Choice', _1_choices_3),
                                      ('2 ago Choice', _2_choices_3),
                                      ('3 ago Choice', _3_choices_3),
                                      ('4 ago Choice', _4_choices_3),
                                      (' Prev Rew by Ch', _1_reward_3_ch),
                                      (' 2 Rew ago by Ch', _2_reward_3_ch),
                                      (' 3 Rew ago by Ch', _3_reward_3_ch),
                                      (' 4 Rew ago by Ch', _4_reward_3_ch),
                                      ('ones', ones_3)])

        X_3 = np.vstack(
            predictors_all.values()).T[:len(choices_3), :].astype(float)
        y_3 = firing_rates_3.reshape(
            [len(firing_rates_3),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        #tstats = reg_f.regression_code(y_3, X_3)
        tstats, cope = regression_code_session(y_3, X_3)

        C_3.append(tstats.reshape(n_predictors, n_neurons,
                                  n_timepoints))  # Predictor loadings
        cpd_3.append(
            re._CPD(X_3, y_3).reshape(n_neurons, n_timepoints, n_predictors))

    C_1 = np.concatenate(C_1, 1)

    C_2 = np.concatenate(C_2, 1)

    C_3 = np.concatenate(C_3, 1)

    cpd_1 = np.nanmean(np.concatenate(cpd_1, 0), axis=0)
    cpd_2 = np.nanmean(np.concatenate(cpd_2, 0), axis=0)
    cpd_3 = np.nanmean(np.concatenate(cpd_3, 0), axis=0)
    cpd = np.mean([cpd_1, cpd_2, cpd_3], 0)

    C_2_inf = [~np.isinf(C_2[0]).any(axis=1)]
    C_2_nan = [~np.isnan(C_2[0]).any(axis=1)]
    C_3_inf = [~np.isinf(C_3[0]).any(axis=1)]
    C_3_nan = [~np.isnan(C_3[0]).any(axis=1)]
    C_1_inf = [~np.isinf(C_1[0]).any(axis=1)]
    C_1_nan = [~np.isnan(C_1[0]).any(axis=1)]

    nans = np.asarray(C_1_inf) & np.asarray(C_1_nan) & np.asarray(
        C_3_inf) & np.asarray(C_3_nan) & np.asarray(C_2_inf) & np.asarray(
            C_2_nan)
    C_1 = C_1[:, nans[0], :]
    C_2 = C_2[:, nans[0], :]
    C_3 = C_3[:, nans[0], :]

    c = wes.Darjeeling2_5.mpl_colors + wes.Mendl_4.mpl_colors + wes.GrandBudapest1_4.mpl_colors + wes.Moonrise1_5.mpl_colors

    j = 0
    plt.figure()
    pred = list(predictors_all.keys())
    pred = pred[:-1]
    for ii, i in enumerate(cpd.T[:-1]):
        plt.plot(i, color=c[j], label=pred[j])

        j += 1
    plt.legend()
    sns.despine()

    plt.title(area + 't-values')

    cell_id_cum_reward = np.where(np.mean(abs(C_1[1, :, :20]), 1) > 1.5)[0]
    cell_id_cum_error = np.where(np.mean(abs(C_1[2, :, :20]), 1) > 1.5)[0]
    cell_id_prev_ch = np.where(np.mean(abs(C_1[3, :, :20]), 1) > 1.5)[0]

    C_1_rew = C_1[ind_rew]
    C_2_rew = C_2[ind_rew]
    C_3_rew = C_3[ind_rew]
    C_1_rew_count = C_1[c_to_plot]
    C_2_rew_count = C_2[c_to_plot]
    C_3_rew_count = C_3[c_to_plot]

    reward_times_to_choose = np.asarray([20, 24, 35, 41])
    #reward_times_to_choose = np.arange(0,63,10)
    # reward_times_to_choose = np.arange(0,80,10)

    ones = np.ones(len(C_1_rew))
    C_1_rew_proj = np.ones(
        (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))
    C_2_rew_proj = np.ones(
        (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))
    C_3_rew_proj = np.ones(
        (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))

    j = 0
    for i in reward_times_to_choose:
        if i == reward_times_to_choose[0]:
            C_1_rew_proj[:, j] = np.mean(C_1_rew[:, i - 20:i], 1)
            C_2_rew_proj[:, j] = np.mean(C_2_rew[:, i - 20:i], 1)
            C_3_rew_proj[:, j] = np.mean(C_3_rew[:, i - 20:i], 1)
        elif i == reward_times_to_choose[1] or i == reward_times_to_choose[2]:
            C_1_rew_proj[:, j] = np.mean(C_1_rew[:, i - 5:i + 5], 1)
            C_2_rew_proj[:, j] = np.mean(C_2_rew[:, i - 5:i + 5], 1)
            C_3_rew_proj[:, j] = np.mean(C_3_rew[:, i - 5:i + 5], 1)
        elif i == reward_times_to_choose[3]:
            C_1_rew_proj[:, j] = np.mean(C_1_rew[:, i:i + 5], 1)
            C_2_rew_proj[:, j] = np.mean(C_2_rew[:, i:i + 5], 1)
            C_3_rew_proj[:, j] = np.mean(C_3_rew[:, i:i + 5], 1)

        j += 1

    C_1_rew_count_proj = np.ones(
        (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))
    C_2_rew_count_proj = np.ones(
        (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))
    C_3_rew_count_proj = np.ones(
        (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))
    j = 0
    for i in reward_times_to_choose:
        if i == reward_times_to_choose[0]:
            C_1_rew_count_proj[:, j] = np.mean(C_1_rew_count[:, i - 20:i], 1)
            C_2_rew_count_proj[:, j] = np.mean(C_2_rew_count[:, i - 20:i], 1)
            C_3_rew_count_proj[:, j] = np.mean(C_3_rew_count[:, i - 20:i], 1)
        elif i == reward_times_to_choose[1] or i == reward_times_to_choose[2]:
            C_1_rew_count_proj[:, j] = np.mean(C_1_rew_count[:, i - 5:i + 5],
                                               1)
            C_2_rew_count_proj[:, j] = np.mean(C_2_rew_count[:, i - 5:i + 5],
                                               1)
            C_3_rew_count_proj[:, j] = np.mean(C_3_rew_count[:, i - 5:i + 5],
                                               1)
        elif i == reward_times_to_choose[3]:
            C_1_rew_count_proj[:, j] = np.mean(C_1_rew_count[:, i:i + 5], 1)
            C_2_rew_count_proj[:, j] = np.mean(C_2_rew_count[:, i:i + 5], 1)
            C_3_rew_count_proj[:, j] = np.mean(C_3_rew_count[:, i:i + 5], 1)

        j += 1

    cpd_1_2_rew, cpd_1_2_rew_var = regression_code_session(
        C_2_rew_count, C_1_rew_proj)
    cpd_1_3_rew, cpd_1_3_rew_var = regression_code_session(
        C_3_rew_count, C_1_rew_proj)
    cpd_2_3_rew, cpd_2_3_rew_var = regression_code_session(
        C_3_rew_count, C_2_rew_proj)
    rew_to_count_cpd = (cpd_1_2_rew + cpd_1_3_rew + cpd_2_3_rew) / np.sqrt(
        (cpd_1_2_rew_var + cpd_1_3_rew_var + cpd_2_3_rew_var))

    # cpd_1_2_rew = re._CPD(C_1_rew_proj,C_2_rew_count)
    # cpd_1_3_rew = re._CPD(C_1_rew_proj,C_3_rew_count)
    # cpd_2_3_rew = re._CPD(C_2_rew_proj,C_3_rew_count)

    # rew_to_count_cpd = np.mean([cpd_1_2_rew, cpd_1_3_rew, cpd_2_3_rew],0)

    cpd_1_rew, cpd_1_rew_var = regression_code_session(C_1_rew_count,
                                                       C_1_rew_proj)
    cpd_2_rew, cpd_2_rew_var = regression_code_session(C_2_rew_count,
                                                       C_2_rew_proj)
    cpd_3_rew, cpd_3_rew_var = regression_code_session(C_3_rew_count,
                                                       C_3_rew_proj)

    cpd_1_2_rew_count, cpd_1_2_rew_count_var = regression_code_session(
        C_2_rew_count, C_1_rew_count_proj)
    cpd_1_3_rew_count, cpd_1_3_rew_count_var = regression_code_session(
        C_3_rew_count, C_1_rew_count_proj)
    cpd_2_3_rew_count, cpd_2_3_rew_count_var = regression_code_session(
        C_3_rew_count, C_2_rew_count_proj)

    within_cpd = (cpd_1_rew + cpd_2_rew + cpd_3_rew) / np.sqrt(
        (cpd_1_rew_var + cpd_2_rew_var + cpd_3_rew_var))

    count_to_count_cpd = (cpd_1_2_rew_count + cpd_1_3_rew_count +
                          cpd_2_3_rew_count) / np.sqrt(
                              (cpd_1_2_rew_count_var + cpd_1_3_rew_count_var +
                               cpd_2_3_rew_count_var))
    df = 2
    count_to_count_p = 1 - stats.t.cdf(abs(count_to_count_cpd), df=df)

    # cpd_1_rew = re._CPD(C_1_rew_proj, C_1_rew_count)
    # cpd_2_rew = re._CPD(C_2_rew_proj, C_2_rew_count)
    # cpd_3_rew= re._CPD(C_3_rew_proj,C_3_rew_count)

    # cpd_1_2_rew_count = re._CPD(C_1_rew_count_proj, C_2_rew_count);
    # cpd_1_3_rew_count = re._CPD(C_1_rew_count_proj, C_3_rew_count);
    # cpd_2_3_rew_count = re._CPD(C_2_rew_count_proj, C_3_rew_count)

    # within_cpd =np.mean([cpd_1_rew, cpd_2_rew, cpd_3_rew],0)

    # count_to_count_cpd =  np.mean([cpd_1_2_rew_count + cpd_1_3_rew_count + cpd_2_3_rew_count],0)

    cpd_1_rew_bias, cpd_1_rew_bias_var = regression_code_session(
        C_1_rew, C_1_rew_proj)
    cpd_2_rew_bias, cpd_2_rew_bias_var = regression_code_session(
        C_2_rew, C_2_rew_proj)
    cpd_3_rew_bias, cpd_3_rew_bias_var = regression_code_session(
        C_3_rew, C_3_rew_proj)

    bias_cpd = (cpd_1_rew_bias + cpd_2_rew_bias + cpd_3_rew_bias) / np.sqrt(
        (cpd_1_rew_bias_var + cpd_2_rew_bias_var + cpd_3_rew_bias_var))

    # cpd_1_rew_bias = re._CPD(C_1_rew_proj, C_1_rew)
    # cpd_2_rew_bias = re._CPD(C_2_rew_proj, C_2_rew)
    # cpd_3_rew_bias = re._CPD(C_3_rew_proj, C_3_rew)

    # bias_cpd = np.mean([cpd_1_rew_bias, cpd_2_rew_bias, cpd_3_rew_bias], 0)

    c = wes.Darjeeling2_5.mpl_colors + wes.Mendl_4.mpl_colors + wes.GrandBudapest1_4.mpl_colors + wes.Moonrise1_5.mpl_colors + wes.Moonrise6_5.mpl_colors
    plt.figure(figsize=(20, 3))

    plt.subplot(2, 4, 1)

    j = 0
    for i in bias_cpd[:-1]:
        plt.plot(i, color=c[j], label=str(j))
        j += 1
    plt.legend()
    sns.despine()
    plt.title('Vectors within Task Rewards to Rewards Biased')
    plt.ylabel(' T-stats')
    # plt.ylabel('cpd')

    plt.subplot(2, 4, 2)

    j = 0
    for i in within_cpd[:-1]:
        plt.plot(i, color=c[j], label=str(j))
        j += 1
    plt.legend()
    sns.despine()
    plt.title(' Vectors within Task Rewards to Reward Counts ')
    plt.ylabel(' T-stats')
    # plt.ylabel('cpd')

    plt.subplot(2, 4, 3)

    j = 0
    for i in count_to_count_cpd[:-1]:
        plt.plot(i, color=c[j], label=str(j))
        j += 1
    plt.legend()
    plt.title(
        str(list(predictors_all.keys())[c_to_plot]) + ' ' + 'between tasks')
    sns.despine()
    plt.ylabel(' T-stats')
    # plt.ylabel('cpd')

    plt.subplot(2, 4, 4)

    j = 0
    for i in rew_to_count_cpd[:-1]:
        plt.plot(i, color=c[j], label=str(j))
        j += 1
    plt.legend()
    plt.title('Vectors from Rewards to Reward Counts Tasks ')
    sns.despine()
    # plt.ylabel('cpd')

    plt.ylabel(' T-stats')

    cpd_1_2_rew_rev, cpd_1_2_rew_rev_var = regression_code_session(
        C_2_rew, C_1_rew_count_proj)

    cpd_1_3_rew_rev, cpd_1_3_rew_rev_var = regression_code_session(
        C_3_rew, C_1_rew_count_proj)

    cpd_2_3_rew_rev, cpd_2_3_rew_rev_var = regression_code_session(
        C_3_rew, C_2_rew_count_proj)

    count_to_rew_cpd = (
        cpd_1_2_rew_rev + cpd_1_3_rew_rev + cpd_2_3_rew_rev) / np.sqrt(
            (cpd_1_2_rew_rev_var + cpd_1_3_rew_rev_var + cpd_2_3_rew_rev_var))

    # cpd_1_2_rew_rev = re._CPD(C_1_rew_count_proj, C_2_rew)

    # cpd_1_3_rew_rev = re._CPD(C_1_rew_count_proj, C_3_rew)

    # cpd_2_3_rew_rev  = re._CPD(C_2_rew_count_proj, C_3_rew)

    # count_to_rew_cpd = np.mean([cpd_1_2_rew_rev, cpd_1_3_rew_rev, cpd_2_3_rew_rev],0)

    cpd_1_rew, cpd_1_rew_var = regression_code_session(C_1_rew,
                                                       C_1_rew_count_proj)
    cpd_2_rew, cpd_2_rew_var = regression_code_session(C_2_rew,
                                                       C_2_rew_count_proj)
    cpd_3_rew, cpd_3_rew_var = regression_code_session(C_3_rew,
                                                       C_3_rew_count_proj)

    cpd_1_2_rew_within, cpd_1_2_rew_within_var = regression_code_session(
        C_2_rew, C_1_rew_proj)
    cpd_1_3_rew_within, cpd_1_3_rew_within_var = regression_code_session(
        C_3_rew, C_1_rew_proj)
    cpd_2_3_rew_within, cpd_2_3_rew_within_var = regression_code_session(
        C_3_rew, C_2_rew_proj)

    within_cpd_rev = (cpd_1_rew + cpd_2_rew + cpd_3_rew) / np.sqrt(
        (cpd_1_rew_var + cpd_2_rew_var + cpd_3_rew_var))
    rew_to_count_rew = (cpd_1_2_rew_within + cpd_1_3_rew_within +
                        cpd_2_3_rew_within) / np.sqrt(
                            (cpd_1_2_rew_within_var + cpd_1_3_rew_within_var +
                             cpd_2_3_rew_within_var))

    # cpd_1_rew = re._CPD(C_1_rew_count_proj,C_1_rew)
    # cpd_2_rew= re._CPD(C_2_rew_count_proj, C_2_rew)
    # cpd_3_rew = re._CPD(C_3_rew_count_proj, C_3_rew)

    # cpd_1_2_rew_within = re._CPD(C_1_rew_proj, C_2_rew)
    # cpd_1_3_rew_within = re._CPD(C_1_rew_proj, C_3_rew)
    # cpd_2_3_rew_within = re._CPD(C_2_rew_proj, C_3_rew)

    # within_cpd_rev = np.mean([cpd_1_rew, cpd_2_rew , cpd_3_rew],0)
    # rew_to_count_rew = np.mean([cpd_1_2_rew_within,cpd_1_3_rew_within, cpd_2_3_rew_within],0)

    cpd_1_rew_bias, cpd_1_rew_bias_var = regression_code_session(
        C_1_rew_count, C_1_rew_count_proj)
    cpd_2_rew_bias, cpd_2_rew_bias_var = regression_code_session(
        C_2_rew_count, C_2_rew_count_proj)
    cpd_3_rew_bias, cpd_3_rew_bias_var = regression_code_session(
        C_3_rew_count, C_3_rew_count_proj)

    bias_cpd_rev = (
        cpd_1_rew_bias + cpd_2_rew_bias + cpd_3_rew_bias) / np.sqrt(
            (cpd_1_rew_bias_var + cpd_2_rew_bias_var + cpd_3_rew_bias_var))

    # cpd_1_rew_bias = re._CPD(C_1_rew_count_proj, C_1_rew_count)
    # cpd_2_rew_bias = re._CPD(C_2_rew_count_proj, C_2_rew_count)
    # cpd_3_rew_bias = re._CPD(C_3_rew_count_proj, C_3_rew_count)

    # bias_cpd_rev = np.mean([cpd_1_rew_bias,cpd_2_rew_bias,cpd_3_rew_bias],0)
    c = wes.Darjeeling2_5.mpl_colors + wes.Mendl_4.mpl_colors + wes.GrandBudapest1_4.mpl_colors + wes.Moonrise1_5.mpl_colors + wes.Moonrise6_5.mpl_colors

    plt.subplot(2, 4, 5)

    j = 0
    for i in bias_cpd_rev[:-1]:
        plt.plot(i, color=c[j], label=str(j))
        j += 1
    plt.legend()
    sns.despine()
    plt.title('Vectors within Task Reward Counts to Counts Biased Rev')
    plt.ylabel(' T-stats')
    #plt.ylabel('cpd')

    plt.subplot(2, 4, 6)

    j = 0
    for i in within_cpd_rev[:-1]:
        plt.plot(i, color=c[j], label=str(j))
        j += 1
    plt.legend()
    sns.despine()
    plt.title(' Vectors within Task Rewards Counts to Reward  ')
    plt.ylabel(' T-stats')
    #plt.ylabel('cpd')

    plt.subplot(2, 4, 7)

    j = 0
    for i in rew_to_count_rew[:-1]:
        plt.plot(i, color=c[j], label=str(j))
        j += 1
    plt.legend()
    plt.title('Vectors between Tasks ')
    sns.despine()
    plt.ylabel(' T-stats')
    #plt.ylabel('cpd')

    plt.subplot(2, 4, 8)

    j = 0
    for i in count_to_rew_cpd[:-1]:
        plt.plot(i, color=c[j], label=str(j))
        j += 1
    plt.legend()
    plt.title('Vectors from Rewards Counts to Reward Tasks ')
    sns.despine()
    plt.ylabel(' T-stats')
    #plt.ylabel('cpd')
    plt.tight_layout()

    C_1_rew_2 = C_1[c_to_proj]
    C_2_rew_2 = C_2[c_to_proj]
    C_3_rew_2 = C_3[c_to_proj]
    C_1_rew_3 = C_1[c_to_proj_3]
    C_2_rew_3 = C_2[c_to_proj_3]
    C_3_rew_3 = C_3[c_to_proj_3]

    cpd_1_prev_1_2, cpd_1_prev_1_2_var = regression_code_session(
        C_1_rew_2, C_1_rew_count_proj)
    cpd_2_prev_1_2, cpd_2_prev_1_2_var = regression_code_session(
        C_2_rew_2, C_2_rew_count_proj)
    cpd_3_prev_1_2, cpd_3_prev_1_2_var = regression_code_session(
        C_3_rew_2, C_3_rew_count_proj)

    prev_rew_1_2 = (
        cpd_1_prev_1_2 + cpd_2_prev_1_2 + cpd_3_prev_1_2) / np.sqrt(
            (cpd_1_prev_1_2_var + cpd_2_prev_1_2_var + cpd_3_prev_1_2_var))

    cpd_1_prev_1_3, cpd_1_prev_1_3_var = regression_code_session(
        C_1_rew_3, C_1_rew_count_proj)
    cpd_2_prev_1_3, cpd_2_prev_1_3_var = regression_code_session(
        C_2_rew_3, C_2_rew_count_proj)
    cpd_3_prev_1_3, cpd_3_prev_1_3_var = regression_code_session(
        C_3_rew_3, C_3_rew_count_proj)

    prev_rew_1_3 = (
        cpd_1_prev_1_3 + cpd_2_prev_1_3 + cpd_3_prev_1_3) / np.sqrt(
            (cpd_1_prev_1_3_var + cpd_2_prev_1_3_var + cpd_3_prev_1_3_var))

    # plt.figure(figsize = (15,2))

    # plt.subplot(1,3,1)

    # j = 0
    # for i in count_to_count_cpd[:-1]:
    #     plt.plot(i, color = c[j], label = str(j))
    #     j+=1
    # plt.legend()
    # plt.title(str(list(predictors_all.keys())[c_to_plot]) + ' ' + 'between tasks')
    # sns.despine()
    # plt.ylabel(' T-stats')
    # # plt.ylabel('cpd')

    # plt.subplot(1,3,2)
    # j = 0
    # for i in prev_rew_1_3[:-1]:
    #     plt.plot(i, color = c[j], label = str(j))
    #     j+=1
    # plt.legend()
    # plt.title('from' + ' '+ str(list(predictors_all.keys())[c_to_plot]) + ' '+ 'to' + ' ' +str(list(predictors_all.keys())[c_to_proj_3]))
    # sns.despine()
    # plt.ylabel(' T-stats')
    # #plt.ylabel('cpd')
    # plt.tight_layout()

    # plt.subplot(1,3,3)
    # j = 0
    # for i in prev_rew_1_2[:-1]:
    #     plt.plot(i, color = c[j], label = str(j))
    #     j+=1
    # plt.legend()
    # plt.title('from' + ' '+ str(list(predictors_all.keys())[c_to_plot]) + ' '+ 'to' + ' ' +str(list(predictors_all.keys())[c_to_proj]))
    # sns.despine()
    # plt.ylabel(' T-stats')
    # #plt.ylabel('cpd')
    # plt.tight_layout()

    return cell_id_cum_reward, cell_id_cum_error, cell_id_prev_ch
def sequence_rewards_errors_regression_generalisation(data,
                                                      perm=True,
                                                      area='HP_',
                                                      interactions=True,
                                                      a=True):

    dm = data['DM'][0]
    firing = data['Data'][0]
    C_1 = []
    cpd_1 = []

    C_2 = []
    cpd_2 = []

    C_3 = []
    cpd_3 = []
    if perm:

        C_1_perm = [[] for i in range(perm)]

        C_2_perm = [[] for i in range(perm)]

        C_3_perm = [[] for i in range(perm)]

        cpd_perm_all = [[] for i in range(perm)]

    for s, sess in enumerate(dm):
        runs_list = []
        runs_list.append(0)
        DM = dm[s]
        firing_rates = firing[s]
        n_trials, n_neurons, n_timepoints = firing_rates.shape

        choices = DM[:, 1]
        # choices[np.where(choices ==0)[0]] = -1

        reward = DM[:, 2]

        task = DM[:, 5]
        task_1 = np.where(task == 1)[0]
        task_2 = np.where(task == 2)[0]
        task_3 = np.where(task == 3)[0]
        state = DM[:, 0]

        correct = np.where(state == choices)[0]
        incorrect = np.where(state != choices)[0]

        cum_error = []
        runs_list_corr = []
        runs_list_incorr = []
        err = 0
        for r in reward:
            if r == 0:
                err += 1
            else:
                err = 0
            cum_error.append(err)

        cum_reward = []
        for r in reward:
            if r == 1:
                err += 1
            else:
                err = 0
            cum_reward.append(err)

        run = 0
        for c, ch in enumerate(choices):
            if c > 0:
                if choices[c] == choices[c - 1]:
                    run += 1
                elif choices[c] != choices[c - 1]:
                    run = 0
                runs_list.append(run)

        corr_run = 0
        run_ind_c = []
        for c, ch in enumerate(choices):
            if c > 0 and c in correct:
                if choices[c] == choices[c - 1]:
                    if corr_run == 0:
                        run_ind_c.append(c)
                    corr_run += 1
                elif choices[c] != choices[c - 1]:
                    corr_run = 0
            else:
                corr_run = 0
            runs_list_corr.append(corr_run)

        incorr_run = 0
        run_ind_inc = []
        for c, ch in enumerate(choices):
            if c > 0 and c in incorrect:
                if choices[c] == choices[c - 1]:
                    if incorr_run == 0:
                        run_ind_inc.append(c)
                    incorr_run += 1
                elif choices[c] != choices[c - 1]:
                    incorr_run = 0
            else:
                incorr_run = 0

            runs_list_incorr.append(incorr_run)

        choices_a = np.where(choices == 1)[0]
        choices_b = np.where(choices == 0)[0]

        a_cum_rew = np.copy(np.asarray(np.asarray(cum_reward)))
        b_cum_rew = np.copy(np.asarray(np.asarray(cum_reward)))

        a_cum_rew[choices_b] = 0
        b_cum_rew[choices_a] = 0

        a_cum_error = np.copy(np.asarray(np.asarray(cum_error)))
        b_cum_error = np.copy(np.asarray(np.asarray(cum_error)))

        a_cum_error[choices_b] = 0
        b_cum_error[choices_a] = 0

        ones = np.ones(len(reward))
        reward_1 = reward[task_1]
        choices_1 = choices[task_1]
        cum_error_1 = np.asarray(cum_error)[task_1]
        cum_reward_1 = np.asarray(cum_reward)[task_1]

        cum_error_1_a = np.asarray(a_cum_error)[task_1]
        cum_error_1_b = np.asarray(b_cum_error)[task_1]

        cum_reward_1_a = np.asarray(a_cum_rew)[task_1]
        cum_reward_1_b = np.asarray(b_cum_rew)[task_1]

        ones_1 = ones[task_1]
        cum_error_1_ch = cum_error_1 * choices_1
        cum_rew_1_ch = cum_reward_1 * choices_1
        firing_rates_1 = firing_rates[task_1]

        int_rew_ch_1 = reward_1 * choices_1
        if interactions == True:
            predictors_all = OrderedDict([
                ('Reward', reward_1), ('Choice', choices_1),
                ('Errors', cum_error_1), ('Rewards', cum_reward_1),
                ('Choice x Cum Error', cum_error_1_ch),
                ('Choice x Cum Reward', cum_rew_1_ch),
                ('Choice x Reward', int_rew_ch_1), ('ones', ones_1)
            ])
        else:
            predictors_all = OrderedDict([('Reward', reward_1),
                                          ('Choice', choices_1),
                                          ('Errors A', cum_error_1_a),
                                          ('Errors B', cum_error_1_b),
                                          ('Rewards A', cum_reward_1_a),
                                          ('Rewards B', cum_reward_1_b),
                                          ('Choice x Reward', int_rew_ch_1),
                                          ('ones', ones_1)])

        X_1 = np.vstack(
            predictors_all.values()).T[:len(choices_1), :].astype(float)
        rank = np.linalg.matrix_rank(X_1)
        # print(rank)
        # print(X_1.shape[1])
        n_predictors = X_1.shape[1]
        y_1 = firing_rates_1.reshape(
            [len(firing_rates_1),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y_1, X_1)

        C_1.append(tstats.reshape(n_predictors, n_neurons,
                                  n_timepoints))  # Predictor loadings
        cpd_1.append(
            re._CPD(X_1, y_1).reshape(n_neurons, n_timepoints, n_predictors))

        reward_2 = reward[task_2]
        choices_2 = choices[task_2]
        cum_error_2 = np.asarray(cum_error)[task_2]
        cum_reward_2 = np.asarray(cum_reward)[task_2]
        ones_2 = ones[task_2]
        cum_error_2_ch = cum_error_2 * choices_2
        cum_rew_2_ch = cum_reward_2 * choices_2
        int_rew_ch_2 = reward_2 * choices_2
        cum_error_2_a = np.asarray(a_cum_error)[task_2]
        cum_error_2_b = np.asarray(b_cum_error)[task_2]

        cum_reward_2_a = np.asarray(a_cum_rew)[task_2]
        cum_reward_2_b = np.asarray(b_cum_rew)[task_2]

        firing_rates_2 = firing_rates[task_2]
        if interactions == True:

            predictors_all = OrderedDict([
                ('Reward', reward_2), ('Choice', choices_2),
                ('Errors', cum_error_2), ('Rewards', cum_reward_2),
                ('Choice x Cum Error', cum_error_2_ch),
                ('Choice x Cum Reward', cum_rew_2_ch),
                ('Choice x Reward', int_rew_ch_2), ('ones', ones_2)
            ])
        else:
            predictors_all = OrderedDict([('Reward', reward_2),
                                          ('Choice', choices_2),
                                          ('Errors A', cum_error_2_a),
                                          ('Errors B', cum_error_2_b),
                                          ('Rewards A', cum_reward_2_a),
                                          ('Rewards B', cum_reward_2_b),
                                          ('Choice x Reward', int_rew_ch_2),
                                          ('ones', ones_2)])

        X_2 = np.vstack(
            predictors_all.values()).T[:len(choices_2), :].astype(float)
        y_2 = firing_rates_2.reshape(
            [len(firing_rates_2),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y_2, X_2)

        C_2.append(tstats.reshape(n_predictors, n_neurons,
                                  n_timepoints))  # Predictor loadings
        cpd_2.append(
            re._CPD(X_2, y_2).reshape(n_neurons, n_timepoints, n_predictors))

        reward_3 = reward[task_3]
        choices_3 = choices[task_3]
        cum_error_3 = np.asarray(cum_error)[task_3]
        cum_reward_3 = np.asarray(cum_reward)[task_3]
        ones_3 = ones[task_3]
        cum_error_3_ch = cum_error_3 * choices_3
        cum_rew_3_ch = cum_reward_3 * choices_3
        int_rew_ch_3 = reward_3 * choices_3
        cum_error_3_a = np.asarray(a_cum_error)[task_3]
        cum_error_3_b = np.asarray(b_cum_error)[task_3]

        cum_reward_3_a = np.asarray(a_cum_rew)[task_3]
        cum_reward_3_b = np.asarray(b_cum_rew)[task_3]

        firing_rates_3 = firing_rates[task_3]
        if interactions == True:

            predictors_all = OrderedDict([
                ('Reward', reward_3), ('Choice', choices_3),
                ('Errors', cum_error_3), ('Rewards', cum_reward_3),
                ('Choice x Cum Error', cum_error_3_ch),
                ('Choice x Cum Reward', cum_rew_3_ch),
                ('Choice x Reward', int_rew_ch_3), ('ones', ones_3)
            ])

        else:
            predictors_all = OrderedDict([('Reward', reward_3),
                                          ('Choice', choices_3),
                                          ('Errors A', cum_error_3_a),
                                          ('Errors B', cum_error_3_b),
                                          ('Rewards A', cum_reward_3_a),
                                          ('Rewards B', cum_reward_3_b),
                                          ('Choice x Reward', int_rew_ch_3),
                                          ('ones', ones_3)])

        X_3 = np.vstack(
            predictors_all.values()).T[:len(choices_3), :].astype(float)
        y_3 = firing_rates_3.reshape(
            [len(firing_rates_3),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y_3, X_3)

        C_3.append(tstats.reshape(n_predictors, n_neurons,
                                  n_timepoints))  # Predictor loadings
        cpd_3.append(
            re._CPD(X_3, y_3).reshape(n_neurons, n_timepoints, n_predictors))

        if perm:
            for i in range(perm):
                X_perm_1 = np.roll(X_1, np.random.randint(len(X_1)), axis=0)
                tstats = reg_f.regression_code(y_1, X_perm_1)
                C_1_perm[i].append(
                    tstats.reshape(n_predictors, n_neurons,
                                   n_timepoints))  # Predictor loadings
                cpd_perm_1 = re._CPD(X_perm_1,
                                     y_1).reshape(n_neurons, n_timepoints,
                                                  n_predictors)

                X_perm_2 = np.roll(X_2, np.random.randint(len(X_2)), axis=0)

                tstats = reg_f.regression_code(y_2, X_perm_2)
                C_2_perm[i].append(
                    tstats.reshape(n_predictors, n_neurons,
                                   n_timepoints))  # Predictor loadings
                cpd_perm_2 = re._CPD(X_perm_2,
                                     y_2).reshape(n_neurons, n_timepoints,
                                                  n_predictors)

                X_perm_3 = np.roll(X_3, np.random.randint(len(X_3)), axis=0)

                tstats = reg_f.regression_code(y_3, X_perm_3)
                C_3_perm[i].append(
                    tstats.reshape(n_predictors, n_neurons,
                                   n_timepoints))  # Predictor loadings
                cpd_perm_3 = re._CPD(X_perm_3,
                                     y_3).reshape(n_neurons, n_timepoints,
                                                  n_predictors)

                cpd_perm_all[i].append(
                    np.nanmean([cpd_perm_1, cpd_perm_2, cpd_perm_3], 0))

    cpd_perm_all = np.stack(
        [np.mean(np.concatenate(cpd_i, 0), 0) for cpd_i in cpd_perm_all], 0)

    cpd_1 = np.nanmean(np.concatenate(cpd_1, 0), axis=0)
    C_1 = np.concatenate(C_1, 1)

    cpd_2 = np.nanmean(np.concatenate(cpd_2, 0), axis=0)
    C_2 = np.concatenate(C_2, 1)

    cpd_3 = np.nanmean(np.concatenate(cpd_3, 0), axis=0)
    C_3 = np.concatenate(C_3, 1)

    cpds_true = np.mean([cpd_1, cpd_2, cpd_3], 0)

    pal_c = sns.cubehelix_palette(8, start=2, rot=0, dark=0, light=.95)
    c = wes.Darjeeling2_5.mpl_colors + wes.Mendl_4.mpl_colors + wes.GrandBudapest1_4.mpl_colors + wes.Moonrise1_5.mpl_colors

    cpd_perm_all = np.max(np.percentile(cpd_perm_all, 95, axis=0), 0)
    j = 0
    plt.figure()
    for i in cpds_true.T[:-1]:
        plt.plot(i, color=c[j], label=list(predictors_all.keys())[j])
        plt.hlines(cpd_perm_all[j], xmin=0, xmax=63, color=c[j], linestyle=':')

        j += 1
    plt.legend()
    sns.despine()

    plt.title(area + 'CPDs')

    if interactions == True:
        X_nan, firing_rates_1_inf_rew,firing_rates_2_inf_rew,firing_rates_3_inf_rew,firing_rates_1_inf_error,firing_rates_2_inf_error,\
        firing_rates_3_inf_error,firing_rates_1_inf_rew_int,firing_rates_2_inf_rew_int,firing_rates_3_inf_rew_int,firing_rates_1_inf_error_int,\
           firing_rates_2_inf_error_int,firing_rates_3_inf_error_int = find_coefficients(C_1,C_2,C_3,interactions = interactions, a = a)

        X_1_rew = X_nan[:, :5]
        X_1_rew_int = X_nan[:, 5:10]

        X_1_error = X_nan[:, 10:15]
        X_1_error_int = X_nan[:, 15:20]

        X_2_rew = X_nan[:, 20:25]
        X_2_rew_int = X_nan[:, 25:30]

        X_2_error = X_nan[:, 30:35]
        X_2_error_int = X_nan[:, 35:40]

        X_3_rew = X_nan[:, 40:45]
        X_3_rew_int = X_nan[:, 45:50]

        X_3_error = X_nan[:, 50:55]
        X_3_error_int = X_nan[:, 55:60]

        # plt.figure()

        # for i in range(4):
        #     plt.subplot(2,2,i+1)
        #     sns.regplot(X_1_rew[:,i],X_2_rew[:,i], color = 'red',label = 'Reward')
        #     #sns.regplot(X_1_rew[:,i],X_3_rew[:,i], color = 'black')
        #     #sns.regplot(X_3_rew[:,i],X_2_rew[:,i], color = 'grey')
        #     corr = np.mean([np.corrcoef(X_1_rew[:,i],X_2_rew[:,i])[0,1],np.corrcoef(X_1_rew[:,i],X_3_rew[:,i])[0,1],np.corrcoef(X_2_rew[:,i],X_3_rew[:,i])[0,1]])
        #     plt.annotate(np.round(corr,2),(1,1.5))
        # plt.legend()

        # sns.despine()

        # plt.figure()

        # for i in range(4):
        #     plt.subplot(2,2,i+1)
        #     sns.regplot(X_1_rew_int[:,i],X_2_rew_int[:,i], color = 'grey', label = 'Reward Interaction')

        #     corr = np.mean([np.corrcoef(X_1_rew_int[:,i],X_2_rew_int[:,i])[0,1],np.corrcoef(X_1_rew_int[:,i],X_3_rew_int[:,i])[0,1],np.corrcoef(X_2_rew_int[:,i],X_3_rew_int[:,i])[0,1]])
        #     plt.annotate(np.round(corr,2),(1,1.5))
        # plt.legend()
        # sns.despine()

        # plt.figure()

        # for i in range(4):
        #     plt.subplot(2,2,i+1)
        #     sns.regplot(X_1_error[:,i],X_2_error[:,i], color = 'purple', label = 'Error')

        #     corr = np.mean([np.corrcoef(X_1_error[:,i],X_2_error[:,i])[0,1],np.corrcoef(X_1_error[:,i],X_3_error[:,i])[0,1],np.corrcoef(X_2_error[:,i],X_3_error[:,i])[0,1]])
        #     plt.annotate(np.round(corr,2),(1,1.5))

        # plt.figure()

        # for i in range(4):
        #     plt.subplot(2,2,i+1)
        #     sns.regplot(X_1_error_int[:,i],X_2_error_int[:,i], color = 'purple', label = 'Error')

        #     corr = np.mean([np.corrcoef(X_1_error_int[:,i],X_2_error_int[:,i])[0,1],np.corrcoef(X_1_error_int[:,i],X_3_error_int[:,i])[0,1],np.corrcoef(X_2_error_int[:,i],X_3_error_int[:,i])[0,1]])
        #     plt.annotate(np.round(corr,2),(1,1.5))

        # plt.legend()
        # sns.despine()

        cpd_1_2_rew = re._CPD(X_1_rew, firing_rates_2_inf_rew)
        cpd_1_3_rew = re._CPD(X_1_rew, firing_rates_3_inf_rew)

        cpd_2_1_rew = re._CPD(X_2_rew, firing_rates_1_inf_rew)
        cpd_2_3_rew = re._CPD(X_2_rew, firing_rates_3_inf_rew)

        cpd_3_1_rew = re._CPD(X_3_rew, firing_rates_1_inf_rew)
        cpd_3_2_rew = re._CPD(X_3_rew, firing_rates_2_inf_rew)

        cpd_1_2_error = re._CPD(X_1_error, firing_rates_2_inf_error)
        cpd_1_3_error = re._CPD(X_1_error, firing_rates_3_inf_error)

        cpd_2_1_error = re._CPD(X_2_error, firing_rates_1_inf_error)
        cpd_2_3_error = re._CPD(X_2_error, firing_rates_3_inf_error)

        cpd_3_1_error = re._CPD(X_3_error, firing_rates_1_inf_error)
        cpd_3_2_error = re._CPD(X_3_error, firing_rates_2_inf_error)

        cpd_1_2_rew_int = re._CPD(X_1_rew_int, firing_rates_2_inf_rew_int)
        cpd_1_3_rew_int = re._CPD(X_1_rew_int, firing_rates_3_inf_rew_int)

        cpd_2_1_rew_int = re._CPD(X_2_rew_int, firing_rates_1_inf_rew_int)
        cpd_2_3_rew_int = re._CPD(X_2_rew_int, firing_rates_3_inf_rew_int)

        cpd_3_1_rew_int = re._CPD(X_3_rew_int, firing_rates_1_inf_rew_int)
        cpd_3_2_rew_int = re._CPD(X_3_rew_int, firing_rates_2_inf_rew_int)

        cpd_1_2_error_int = re._CPD(X_1_error_int,
                                    firing_rates_2_inf_error_int)
        cpd_1_3_error_int = re._CPD(X_1_error_int,
                                    firing_rates_3_inf_error_int)

        cpd_2_1_error_int = re._CPD(X_2_error_int,
                                    firing_rates_1_inf_error_int)
        cpd_2_3_error_int = re._CPD(X_2_error_int,
                                    firing_rates_3_inf_error_int)

        cpd_3_1_error_int = re._CPD(X_3_error_int,
                                    firing_rates_1_inf_error_int)
        cpd_3_2_error_int = re._CPD(X_3_error_int,
                                    firing_rates_2_inf_error_int)

        cpd_rew_int = np.nanmean([
            cpd_1_2_rew_int, cpd_1_3_rew_int, cpd_2_1_rew_int, cpd_2_3_rew_int,
            cpd_3_1_rew_int, cpd_3_2_rew_int
        ], 0)
        cpd_error_int = np.nanmean([
            cpd_1_2_error_int, cpd_1_3_error_int, cpd_2_1_error_int,
            cpd_2_3_error_int, cpd_3_1_error_int, cpd_3_2_error_int
        ], 0)

    else:

        X_nan, firing_rates_1_inf_rew,firing_rates_2_inf_rew,firing_rates_3_inf_rew,firing_rates_1_inf_error,firing_rates_2_inf_error,\
        firing_rates_3_inf_error  = find_coefficients(C_1,C_2,C_3,interactions = interactions, a = a)

        X_1_rew = X_nan[:, :5]

        X_1_error = X_nan[:, 5:10]

        X_2_rew = X_nan[:, 10:15]

        X_2_error = X_nan[:, 15:20]

        X_3_rew = X_nan[:, 20:25]

        X_3_error = X_nan[:, 25:30]

        # plt.figure()

        # for i in range(4):
        #     plt.subplot(2,2,i+1)
        #     sns.regplot(X_1_rew[:,i],X_2_rew[:,i], color = 'red',label = 'Reward')
        #     #sns.regplot(X_1_rew[:,i],X_3_rew[:,i], color = 'black')
        #     #sns.regplot(X_3_rew[:,i],X_2_rew[:,i], color = 'grey')
        #     corr = np.mean([np.corrcoef(X_1_rew[:,i],X_2_rew[:,i])[0,1],np.corrcoef(X_1_rew[:,i],X_3_rew[:,i])[0,1],np.corrcoef(X_2_rew[:,i],X_3_rew[:,i])[0,1]])
        #     plt.annotate(np.round(corr,2),(1,1.5))
        # plt.legend()

        # plt.figure()

        # for i in range(4):
        #     plt.subplot(2,2,i+1)
        #     sns.regplot(X_1_error[:,i],X_2_error[:,i], color = 'purple', label = 'Error')

        #     corr = np.mean([np.corrcoef(X_1_error[:,i],X_2_error[:,i])[0,1],np.corrcoef(X_1_error[:,i],X_3_error[:,i])[0,1],np.corrcoef(X_2_error[:,i],X_3_error[:,i])[0,1]])
        #     plt.annotate(np.round(corr,2),(1,1.5))

        cpd_1_2_rew = re._CPD(X_1_rew, firing_rates_2_inf_rew)
        cpd_1_3_rew = re._CPD(X_1_rew, firing_rates_3_inf_rew)

        cpd_2_1_rew = re._CPD(X_2_rew, firing_rates_1_inf_rew)
        cpd_2_3_rew = re._CPD(X_2_rew, firing_rates_3_inf_rew)

        cpd_3_1_rew = re._CPD(X_3_rew, firing_rates_1_inf_rew)
        cpd_3_2_rew = re._CPD(X_3_rew, firing_rates_2_inf_rew)

        cpd_1_2_error = re._CPD(X_1_error, firing_rates_2_inf_error)
        cpd_1_3_error = re._CPD(X_1_error, firing_rates_3_inf_error)

        cpd_2_1_error = re._CPD(X_2_error, firing_rates_1_inf_error)
        cpd_2_3_error = re._CPD(X_2_error, firing_rates_3_inf_error)

        cpd_3_1_error = re._CPD(X_3_error, firing_rates_1_inf_error)
        cpd_3_2_error = re._CPD(X_3_error, firing_rates_2_inf_error)

    cpd_rew = np.nanmean([
        cpd_1_2_rew, cpd_1_3_rew, cpd_2_1_rew, cpd_2_3_rew, cpd_3_1_rew,
        cpd_3_2_rew
    ], 0)
    cpd_error = np.nanmean([
        cpd_1_2_error, cpd_1_3_error, cpd_2_1_error, cpd_2_3_error,
        cpd_3_1_error, cpd_3_2_error
    ], 0)

    pal = sns.cubehelix_palette(8)
    pal_c = sns.cubehelix_palette(8, start=2, rot=0, dark=0, light=.95)
    if interactions == True:
        sub = 2
    else:
        sub = 1
    plt.figure()
    plt.subplot(2, sub, 1)
    plt.plot(cpd_rew[:, 0], color=pal[0], label='Pre Init Period')
    plt.plot(cpd_rew[:, 1], color=pal[2], label='Init Period')
    plt.plot(cpd_rew[:, 2], color=pal[4], label='Choice Period')
    plt.plot(cpd_rew[:, 3], color=pal[6], label='Reward Period')
    plt.title(area + 'Between Tasks Reward Runs')
    plt.legend()
    sns.despine()
    # plt.ylim(0,0.18)

    plt.subplot(2, sub, 2)

    plt.plot(cpd_error[:, 0], color=pal_c[0], label='Pre Init Period')
    plt.plot(cpd_error[:, 1], color=pal_c[2], label='Init Period')
    plt.plot(cpd_error[:, 2], color=pal_c[4], label='Choice Period')
    plt.plot(cpd_error[:, 3], color=pal_c[6], label='Reward Period')
    plt.title(area + 'Between Tasks Error Runs')
    # plt.ylim(0,0.06)
    if interactions == True:
        plt.subplot(2, sub, 3)
        plt.plot(cpd_rew_int[:, 0], color=pal[0], label='Pre Init Period')
        plt.plot(cpd_rew_int[:, 1], color=pal[2], label='Init Period')
        plt.plot(cpd_rew_int[:, 2], color=pal[4], label='Choice Period')
        plt.plot(cpd_rew_int[:, 3], color=pal[6], label='Reward Period')
        plt.title(area + 'Between Tasks Reward Runs Interactions with Choice')
        plt.legend()
        sns.despine()
        # plt.ylim(0,0.18)

        plt.subplot(2, sub, 4)

        plt.plot(cpd_error_int[:, 0], color=pal_c[0], label='Pre Init Period')
        plt.plot(cpd_error_int[:, 1], color=pal_c[2], label='Init Period')
        plt.plot(cpd_error_int[:, 2], color=pal_c[4], label='Choice Period')
        plt.plot(cpd_error_int[:, 3], color=pal_c[6], label='Reward Period')
        plt.title(area + 'Between Tasks Error Runs Interactions with Choice')
        # plt.ylim(0,0.06)

    plt.legend()
    sns.despine()

    cpd_rew_perm = []
    cpd_error_perm = []
    cpd_rew_int_perm = []
    cpd_error_int_perm = []

    for i, ii, in enumerate(C_3_perm):

        C_1 = np.concatenate(C_1_perm[i], 1)

        C_2 = np.concatenate(C_3_perm[i], 1)

        C_3 = np.concatenate(C_2_perm[i], 1)

        if interactions == True:
            X_nan, firing_rates_1_inf_rew,firing_rates_2_inf_rew,firing_rates_3_inf_rew,firing_rates_1_inf_error,firing_rates_2_inf_error,\
            firing_rates_3_inf_error,firing_rates_1_inf_rew_int,firing_rates_2_inf_rew_int,firing_rates_3_inf_rew_int,firing_rates_1_inf_error_int,\
               firing_rates_2_inf_error_int,firing_rates_3_inf_error_int = find_coefficients(C_1,C_2,C_3,interactions = interactions,  a = a)

            X_1_rew = X_nan[:, :5]
            X_1_rew_int = X_nan[:, 5:10]

            X_1_error = X_nan[:, 10:15]
            X_1_error_int = X_nan[:, 15:20]

            X_2_rew = X_nan[:, 20:25]
            X_2_rew_int = X_nan[:, 25:30]

            X_2_error = X_nan[:, 30:35]
            X_2_error_int = X_nan[:, 35:40]

            X_3_rew = X_nan[:, 40:45]
            X_3_rew_int = X_nan[:, 45:50]

            X_3_error = X_nan[:, 50:55]
            X_3_error_int = X_nan[:, 55:60]

            cpd_1_2_rew = re._CPD(X_1_rew, firing_rates_2_inf_rew)
            cpd_1_3_rew = re._CPD(X_1_rew, firing_rates_3_inf_rew)

            cpd_2_1_rew = re._CPD(X_2_rew, firing_rates_1_inf_rew)
            cpd_2_3_rew = re._CPD(X_2_rew, firing_rates_3_inf_rew)

            cpd_3_1_rew = re._CPD(X_3_rew, firing_rates_1_inf_rew)
            cpd_3_2_rew = re._CPD(X_3_rew, firing_rates_2_inf_rew)

            cpd_1_2_error = re._CPD(X_1_error, firing_rates_2_inf_error)
            cpd_1_3_error = re._CPD(X_1_error, firing_rates_3_inf_error)

            cpd_2_1_error = re._CPD(X_2_error, firing_rates_1_inf_error)
            cpd_2_3_error = re._CPD(X_2_error, firing_rates_3_inf_error)

            cpd_3_1_error = re._CPD(X_3_error, firing_rates_1_inf_error)
            cpd_3_2_error = re._CPD(X_3_error, firing_rates_2_inf_error)

            cpd_1_2_rew_int = re._CPD(X_1_rew_int, firing_rates_2_inf_rew_int)
            cpd_1_3_rew_int = re._CPD(X_1_rew_int, firing_rates_3_inf_rew_int)

            cpd_2_1_rew_int = re._CPD(X_2_rew_int, firing_rates_1_inf_rew_int)
            cpd_2_3_rew_int = re._CPD(X_2_rew_int, firing_rates_3_inf_rew_int)

            cpd_3_1_rew_int = re._CPD(X_3_rew_int, firing_rates_1_inf_rew_int)
            cpd_3_2_rew_int = re._CPD(X_3_rew_int, firing_rates_2_inf_rew_int)

            cpd_1_2_error_int = re._CPD(X_1_error_int,
                                        firing_rates_2_inf_error_int)
            cpd_1_3_error_int = re._CPD(X_1_error_int,
                                        firing_rates_3_inf_error_int)

            cpd_2_1_error_int = re._CPD(X_2_error_int,
                                        firing_rates_1_inf_error_int)
            cpd_2_3_error_int = re._CPD(X_2_error_int,
                                        firing_rates_3_inf_error_int)

            cpd_3_1_error_int = re._CPD(X_3_error_int,
                                        firing_rates_1_inf_error_int)
            cpd_3_2_error_int = re._CPD(X_3_error_int,
                                        firing_rates_2_inf_error_int)

            cpd_rew_int_perm.append(
                np.nanmean([
                    cpd_1_2_rew_int, cpd_1_3_rew_int, cpd_2_1_rew_int,
                    cpd_2_3_rew_int, cpd_3_1_rew_int, cpd_3_2_rew_int
                ], 0))
            cpd_error_int_perm.append(
                np.nanmean([
                    cpd_1_2_error_int, cpd_1_3_error_int, cpd_2_1_error_int,
                    cpd_2_3_error_int, cpd_3_1_error_int, cpd_3_2_error_int
                ], 0))

        else:

            X_nan, firing_rates_1_inf_rew,firing_rates_2_inf_rew,firing_rates_3_inf_rew,firing_rates_1_inf_error,firing_rates_2_inf_error,\
            firing_rates_3_inf_error  = find_coefficients(C_1,C_2,C_3,interactions = interactions,  a = a)

            X_1_rew = X_nan[:, :5]

            X_1_error = X_nan[:, 5:10]

            X_2_rew = X_nan[:, 10:15]

            X_2_error = X_nan[:, 15:20]

            X_3_rew = X_nan[:, 20:25]

            X_3_error = X_nan[:, 25:30]

            cpd_1_2_rew = re._CPD(X_1_rew, firing_rates_2_inf_rew)
            cpd_1_3_rew = re._CPD(X_1_rew, firing_rates_3_inf_rew)

            cpd_2_1_rew = re._CPD(X_2_rew, firing_rates_1_inf_rew)
            cpd_2_3_rew = re._CPD(X_2_rew, firing_rates_3_inf_rew)

            cpd_3_1_rew = re._CPD(X_3_rew, firing_rates_1_inf_rew)
            cpd_3_2_rew = re._CPD(X_3_rew, firing_rates_2_inf_rew)

            cpd_1_2_error = re._CPD(X_1_error, firing_rates_2_inf_error)
            cpd_1_3_error = re._CPD(X_1_error, firing_rates_3_inf_error)

            cpd_2_1_error = re._CPD(X_2_error, firing_rates_1_inf_error)
            cpd_2_3_error = re._CPD(X_2_error, firing_rates_3_inf_error)

            cpd_3_1_error = re._CPD(X_3_error, firing_rates_1_inf_error)
            cpd_3_2_error = re._CPD(X_3_error, firing_rates_2_inf_error)

        cpd_rew_perm.append(
            np.nanmean([
                cpd_1_2_rew, cpd_1_3_rew, cpd_2_1_rew, cpd_2_3_rew,
                cpd_3_1_rew, cpd_3_2_rew
            ], 0))
        cpd_error_perm.append(
            np.nanmean([
                cpd_1_2_error, cpd_1_3_error, cpd_2_1_error, cpd_2_3_error,
                cpd_3_1_error, cpd_3_2_error
            ], 0))

    cpd_rew_perm = np.max(np.percentile(cpd_rew_perm, 95, axis=0), 0)
    cpd_error_perm = np.max(np.percentile(cpd_error_perm, 95, axis=0), 0)

    plt.subplot(2, sub, 1)
    plt.hlines(cpd_rew_perm[0],
               xmin=0,
               xmax=63,
               color=pal[0],
               label='Pre Init Period',
               linestyle=':')
    plt.hlines(cpd_rew_perm[1],
               xmin=0,
               xmax=63,
               color=pal[2],
               label='Init Period',
               linestyle=':')
    plt.hlines(cpd_rew_perm[2],
               xmin=0,
               xmax=63,
               color=pal[4],
               label='Choice Period',
               linestyle=':')
    plt.hlines(cpd_rew_perm[3],
               xmin=0,
               xmax=63,
               color=pal[6],
               label='Reward Period',
               linestyle=':')
    plt.legend()
    sns.despine()

    plt.subplot(2, sub, 2)

    plt.hlines(cpd_error_perm[0],
               xmin=0,
               xmax=63,
               color=pal_c[0],
               label='Pre Init Period',
               linestyle=':')
    plt.hlines(cpd_error_perm[1],
               xmin=0,
               xmax=63,
               color=pal_c[2],
               label='Init Period',
               linestyle=':')
    plt.hlines(cpd_error_perm[2],
               xmin=0,
               xmax=63,
               color=pal_c[4],
               label='Choice Period',
               linestyle=':')
    plt.hlines(cpd_error_perm[3],
               xmin=0,
               xmax=63,
               color=pal_c[6],
               label='Reward Period',
               linestyle=':')
    plt.legend()
    sns.despine()

    if interactions == True:

        cpd_rew_int_perm = np.max(np.percentile(cpd_rew_int_perm, 95, axis=0),
                                  0)
        cpd_error_int_perm = np.max(
            np.percentile(cpd_error_int_perm, 95, axis=0), 0)

        plt.subplot(2, sub, 3)
        plt.hlines(cpd_rew_int_perm[0],
                   xmin=0,
                   xmax=63,
                   color=pal[0],
                   label='Pre Init Period',
                   linestyle=':')
        plt.hlines(cpd_rew_int_perm[1],
                   xmin=0,
                   xmax=63,
                   color=pal[2],
                   label='Init Period',
                   linestyle=':')
        plt.hlines(cpd_rew_int_perm[2],
                   xmin=0,
                   xmax=63,
                   color=pal[4],
                   label='Choice Period',
                   linestyle=':')
        plt.hlines(cpd_rew_int_perm[3],
                   xmin=0,
                   xmax=63,
                   color=pal[6],
                   label='Reward Period',
                   linestyle=':')
        plt.legend()
        sns.despine()
        plt.subplot(2, sub, 4)

        plt.hlines(cpd_error_int_perm[0],
                   xmin=0,
                   xmax=63,
                   color=pal_c[0],
                   label='Pre Init Period',
                   linestyle=':')
        plt.hlines(cpd_error_int_perm[1],
                   xmin=0,
                   xmax=63,
                   color=pal_c[2],
                   label='Init Period',
                   linestyle=':')
        plt.hlines(cpd_error_int_perm[2],
                   xmin=0,
                   xmax=63,
                   color=pal_c[4],
                   label='Choice Period',
                   linestyle=':')
        plt.hlines(cpd_error_int_perm[3],
                   xmin=0,
                   xmax=63,
                   color=pal_c[6],
                   label='Reward Period',
                   linestyle=':')
        plt.legend()
        sns.despine()
Example #16
0
def time_in_block(data, area='PFC'):

    dm = data['DM'][0]
    firing = data['Data'][0]
    C_1 = []
    C_2 = []
    C_3 = []
    cpd_1 = []
    cpd_2 = []
    cpd_3 = []
    # cpd_perm_p_1 = []; cpd_perm_p_2 = []; cpd_perm_p_3 = []

    for s, sess in enumerate(dm):

        DM = dm[s]
        firing_rates = firing[s][1:]

        # firing_rates = firing_rates[:,:,:63]
        n_trials, n_neurons, n_timepoints = firing_rates.shape

        choices = DM[:, 1] - 0.5
        reward = DM[:, 2] - 0.5

        task = DM[:, 5][1:]
        a_pokes = DM[:, 6][1:]
        b_pokes = DM[:, 7][1:]

        reward_prev = reward[:-1]
        reward = reward[1:]

        choices_prev = choices[:-1]
        choices = choices[1:]

        taskid = task_ind(task, a_pokes, b_pokes)

        task_1 = np.where(taskid == 1)[0]
        task_2 = np.where(taskid == 2)[0]
        task_3 = np.where(taskid == 3)[0]
        reward_PE = np.zeros(len(task))
        for r, rr in enumerate(reward):
            if reward[r] != reward[r - 1]:
                reward_PE[r] = 0.5
            elif reward[r] == reward[r - 1]:
                reward_PE[r] = -0.5

        choice_PE = np.zeros(len(task))
        for r, rr in enumerate(choices):
            if choices[r] != choices[r - 1]:
                choice_PE[r] = 0.5
            elif choices[r] == choices[r - 1]:
                choice_PE[r] = -0.5

        reward_PE_1 = reward_PE[task_1]
        choice_PE_1 = choice_PE[task_1]

        rewards_1 = reward[task_1]
        choices_1 = choices[task_1]
        rewards_1 = reward[task_1]
        ones_1 = np.ones(len(choices_1))
        trials_1 = len(choices_1)
        prev_reward_1 = reward_prev[task_1]
        prev_choice_1 = choices_prev[task_1]
        #prev_choice_1 = choices_1*choice_PE_1
        choice_PE_1_reward_current = choice_PE_1 * rewards_1
        choice_PE_1_reward_prev = choice_PE_1 * prev_reward_1

        rew_ch_1 = choices_1 * rewards_1
        prev_choice_1_lr = prev_choice_1 * prev_reward_1

        firing_rates_1 = firing_rates[task_1]
        predictors_all = OrderedDict([
            ('Choice', choices_1), ('Reward', rewards_1),
            ('Reward Repeat/Switch', reward_PE_1),
            ('Choice Repeat/Switch', choice_PE_1),
            ('Repeat/Switch Current Reward', choice_PE_1_reward_current),
            ('Repeat/Switch Prev Reward', choice_PE_1_reward_prev),
            ('Choice x Reward ', rew_ch_1),
            ('Prev Ch x Last Reward', prev_choice_1_lr),
            ('Prev Rew', prev_reward_1), ('Prev Ch', prev_choice_1),
            ('ones', ones_1)
        ])

        X_1 = np.vstack(predictors_all.values()).T[:trials_1, :].astype(float)

        n_predictors = X_1.shape[1]
        y_1 = firing_rates_1.reshape(
            [len(firing_rates_1),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y_1, X_1)
        C_1.append(tstats.reshape(n_predictors, n_neurons,
                                  n_timepoints))  # Predictor loadings
        cpd_1.append(
            re._CPD(X_1, y_1).reshape(n_neurons, n_timepoints, n_predictors))

        choices_2 = choices[task_2]
        rewards_2 = reward[task_2]
        ones_2 = np.ones(len(choices_2))
        reward_PE_2 = reward_PE[task_2]
        choice_PE_2 = choice_PE[task_2]

        prev_reward_2 = reward_prev[task_2]
        prev_choice_2 = choices_prev[task_2]
        #prev_choice_2 = choices_2*choice_PE_2

        choice_PE_2_reward_current = choice_PE_2 * rewards_2
        choice_PE_2_reward_prev = choice_PE_2 * prev_reward_2

        trials_2 = len(choices_2)
        rew_ch_2 = choices_2 * rewards_2
        prev_choice_2_lr = prev_choice_2 * prev_reward_2

        ones_2 = np.ones(len(choices_2))
        firing_rates_2 = firing_rates[task_2]

        predictors_all = OrderedDict([
            ('Choice', choices_2), ('Reward', rewards_2),
            ('Reward Repeat/Switch', reward_PE_2),
            ('Choice Repeat/Switch', choice_PE_2),
            ('Repeat/Switch Current Reward', choice_PE_2_reward_current),
            ('Repeat/Switch Prev Reward', choice_PE_2_reward_prev),
            ('Choice x Reward ', rew_ch_2),
            (' Prev Ch x Last Reward', prev_choice_2_lr),
            ('Prev Rew', prev_reward_2), ('Prev Ch', prev_choice_2),
            ('ones', ones_2)
        ])

        X_2 = np.vstack(predictors_all.values()).T[:trials_2, :].astype(float)

        n_predictors = X_2.shape[1]
        y_2 = firing_rates_2.reshape(
            [len(firing_rates_2),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y_2, X_2)
        C_2.append(tstats.reshape(n_predictors, n_neurons,
                                  n_timepoints))  # Predictor loadings
        cpd_2.append(
            re._CPD(X_2, y_2).reshape(n_neurons, n_timepoints, n_predictors))

        choices_3 = choices[task_3]
        rewards_3 = reward[task_3]
        ones_3 = np.ones(len(choices_3))
        trials_3 = len(choices_3)
        ones_3 = np.ones(len(choices_3))
        prev_reward_3 = reward_prev[task_3]
        choice_PE_3 = choice_PE[task_3]
        reward_PE_3 = reward_PE[task_3]
        choice_PE_3_reward_current = choice_PE_3 * rewards_3
        choice_PE_3_reward_prev = choice_PE_3 * prev_reward_3
        prev_choice_3 = choices_prev[task_3]

        #prev_choice_3 = choices_3*choice_PE_3

        rew_ch_3 = choices_3 * rewards_3
        prev_choice_3_lr = prev_choice_3 * prev_reward_3

        firing_rates_3 = firing_rates[task_3]

        predictors_all = OrderedDict([
            ('Ch', choices_3), ('Rew', rewards_3), ('Rew Stay', reward_PE_3),
            ('Ch Stay', choice_PE_3),
            ('Stay Cur Rew', choice_PE_3_reward_current),
            ('Stay Prev Rew', choice_PE_3_reward_prev),
            ('Ch x Rew ', rew_ch_3), (' Prev Ch x Prev Rew', prev_choice_3_lr),
            ('Prev Rew', prev_reward_3), ('Prev Ch', prev_choice_3),
            ('ones', ones_3)
        ])

        X_3 = np.vstack(predictors_all.values()).T[:trials_3, :].astype(float)
        rank = np.linalg.matrix_rank(X_3)
        print(rank)
        n_predictors = X_3.shape[1]
        print(n_predictors)
        y_3 = firing_rates_3.reshape(
            [len(firing_rates_3),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y_3, X_3)
        C_3.append(tstats.reshape(n_predictors, n_neurons,
                                  n_timepoints))  # Predictor loadings
        cpd_3.append(
            re._CPD(X_3, y_3).reshape(n_neurons, n_timepoints, n_predictors))

    C_1 = np.concatenate(C_1, 1)

    C_2 = np.concatenate(C_2, 1)

    C_3 = np.concatenate(C_3, 1)

    cpd_1 = np.nanmean(np.concatenate(cpd_1, 0), axis=0)
    cpd_2 = np.nanmean(np.concatenate(cpd_2, 0), axis=0)
    cpd_3 = np.nanmean(np.concatenate(cpd_3, 0), axis=0)
    cpd = np.mean([cpd_1, cpd_2, cpd_3], 0)

    c = wes.Darjeeling2_5.mpl_colors + wes.Mendl_4.mpl_colors + wes.GrandBudapest1_4.mpl_colors + wes.Moonrise1_5.mpl_colors

    j = 0
    plt.figure()
    pred = list(predictors_all.keys())
    pred = pred[2:-1]
    for ii, i in enumerate(cpd.T[2:-1]):
        plt.plot(i, color=c[j], label=pred[j])

        j += 1
    plt.legend()
    sns.despine()

    C_2_inf = [~np.isinf(C_2[0]).any(axis=1)]
    C_2_nan = [~np.isnan(C_2[0]).any(axis=1)]
    C_3_inf = [~np.isinf(C_3[0]).any(axis=1)]
    C_3_nan = [~np.isnan(C_3[0]).any(axis=1)]
    C_1_inf = [~np.isinf(C_1[0]).any(axis=1)]
    C_1_nan = [~np.isnan(C_1[0]).any(axis=1)]
    nans = np.asarray(C_1_inf) & np.asarray(C_1_nan) & np.asarray(
        C_3_inf) & np.asarray(C_3_nan) & np.asarray(C_2_inf) & np.asarray(
            C_2_nan)

    C_1 = C_1[:, nans[0], :]
    C_2 = C_2[:, nans[0], :]
    C_3 = C_3[:, nans[0], :]

    preds = np.arange(len(list(predictors_all.keys())))
    coef_1 = np.tile(preds, 11)
    coef_2 = np.concatenate((preds, np.roll(preds,1), np.roll(preds,2),np.roll(preds,3), np.roll(preds,4),np.roll(preds,5),\
                            np.roll(preds,6), np.roll(preds,7), np.roll(preds,8),np.roll(preds,9), np.roll(preds,10)))

    m = 0
    l = 0
    plt.figure(figsize=(10, 10))
    for c_1, c_2 in zip(coef_1, coef_2):
        title = list(predictors_all.keys())[c_1] + ' ' + 'on' + ' ' + list(
            predictors_all.keys())[c_2]

        m += 1
        l += 1
        if m == 10:
            plt.savefig('/Users/veronikasamborska/Desktop/runs/within' + area +
                        str(l) + '.png')
            plt.figure(figsize=(10, 10))
            m -= 9

        C_1_rew = C_1[c_1]
        C_2_rew = C_2[c_1]
        C_3_rew = C_3[c_1]
        C_1_rew_count = C_1[c_2]
        C_2_rew_count = C_2[c_2]
        C_3_rew_count = C_3[c_2]

        reward_times_to_choose = np.asarray([20, 24, 35, 41])

        C_1_rew_proj = np.ones(
            (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))
        C_2_rew_proj = np.ones(
            (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))
        C_3_rew_proj = np.ones(
            (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))

        j = 0
        for i in reward_times_to_choose:
            if i == reward_times_to_choose[0]:
                C_1_rew_proj[:, j] = np.mean(C_1_rew[:, i - 20:i], 1)
                C_2_rew_proj[:, j] = np.mean(C_2_rew[:, i - 20:i], 1)
                C_3_rew_proj[:, j] = np.mean(C_3_rew[:, i - 20:i], 1)
            elif i == reward_times_to_choose[1] or i == reward_times_to_choose[
                    2]:
                C_1_rew_proj[:, j] = np.mean(C_1_rew[:, i - 5:i + 5], 1)
                C_2_rew_proj[:, j] = np.mean(C_2_rew[:, i - 5:i + 5], 1)
                C_3_rew_proj[:, j] = np.mean(C_3_rew[:, i - 5:i + 5], 1)
            elif i == reward_times_to_choose[3]:
                C_1_rew_proj[:, j] = np.mean(C_1_rew[:, i:i + 5], 1)
                C_2_rew_proj[:, j] = np.mean(C_2_rew[:, i:i + 5], 1)
                C_3_rew_proj[:, j] = np.mean(C_3_rew[:, i:i + 5], 1)

            j += 1

        C_1_rew_count_proj = np.ones(
            (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))
        C_2_rew_count_proj = np.ones(
            (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))
        C_3_rew_count_proj = np.ones(
            (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))
        j = 0
        for i in reward_times_to_choose:
            if i == reward_times_to_choose[0]:
                C_1_rew_count_proj[:, j] = np.mean(C_1_rew_count[:, i - 20:i],
                                                   1)
                C_2_rew_count_proj[:, j] = np.mean(C_2_rew_count[:, i - 20:i],
                                                   1)
                C_3_rew_count_proj[:, j] = np.mean(C_3_rew_count[:, i - 20:i],
                                                   1)
            elif i == reward_times_to_choose[1] or i == reward_times_to_choose[
                    2]:
                C_1_rew_count_proj[:,
                                   j] = np.mean(C_1_rew_count[:, i - 5:i + 5],
                                                1)
                C_2_rew_count_proj[:,
                                   j] = np.mean(C_2_rew_count[:, i - 5:i + 5],
                                                1)
                C_3_rew_count_proj[:,
                                   j] = np.mean(C_3_rew_count[:, i - 5:i + 5],
                                                1)
            elif i == reward_times_to_choose[3]:
                C_1_rew_count_proj[:, j] = np.mean(C_1_rew_count[:, i:i + 5],
                                                   1)
                C_2_rew_count_proj[:, j] = np.mean(C_2_rew_count[:, i:i + 5],
                                                   1)
                C_3_rew_count_proj[:, j] = np.mean(C_3_rew_count[:, i:i + 5],
                                                   1)

            j += 1

        cpd_1_2_rew, cpd_1_2_rew_var = regression_code_session(
            C_1_rew_count, C_1_rew_proj)
        cpd_1_3_rew, cpd_1_3_rew_var = regression_code_session(
            C_2_rew_count, C_2_rew_proj)
        cpd_2_3_rew, cpd_2_3_rew_var = regression_code_session(
            C_3_rew_count, C_3_rew_proj)

        rew_to_count_cpd = (cpd_1_2_rew + cpd_1_3_rew + cpd_2_3_rew) / np.sqrt(
            (cpd_1_2_rew_var + cpd_1_3_rew_var + cpd_2_3_rew_var))

        j = 0
        plt.subplot(5, 2, m)
        for i in rew_to_count_cpd[:-1]:
            plt.plot(i, color=c[j], label=str(j))
            j += 1
    # plt.legend()
        plt.title(area + ' ' + str(title))
        sns.despine()

        plt.tight_layout()
def regression_general(data):

    C = []
    cpd = []

    C_1 = []
    C_2 = []
    C_3 = []

    cpd_1_2 = []
    cpd_2_3 = []

    dm = data['DM']
    #dm = dm[:-1]
    firing = data['Data']
    #firing = firing[:-1]

    for s, sess in enumerate(dm):
        DM = dm[s]
        firing_rates = firing[s]
        n_trials, n_neurons, n_timepoints = firing_rates.shape

        if n_neurons > 10:
            session_trials_since_block = []

            state = DM[:, 0]
            choices = DM[:, 1]
            reward = DM[:, 2]
            b_pokes = DM[:, 7]
            a_pokes = DM[:, 6]
            task = DM[:, 5]
            block = DM[:, 4]
            block_df = np.diff(block)
            taskid = rc.task_ind(task, a_pokes, b_pokes)

            correct_choice = np.where(choices == state)[0]
            correct = np.zeros(len(choices))
            correct[correct_choice] = 1

            a_since_block = []
            trials_since_block = []
            t = 0

            #Bug in the state?
            for st, s in enumerate(block):
                if state[st - 1] != state[st]:
                    t = 0
                else:
                    t += 1
                trials_since_block.append(t)

            session_trials_since_block.append(trials_since_block)

            t = 0
            for st, (s, c) in enumerate(zip(block, choices)):
                if state[st - 1] != state[st]:
                    t = 0
                    a_since_block.append(t)

                elif c == 1:
                    t += 1
                    a_since_block.append(t)
                else:
                    a_since_block.append(0)

            negative_reward_count = []
            rew = 0
            block_df = np.append(block_df, 0)
            for r, b in zip(reward, block_df):

                if r == 0:
                    rew += 1
                    negative_reward_count.append(rew)
                elif r == 1:
                    rew -= 1
                    negative_reward_count.append(rew)
                if b != 0:
                    rew = 0

            positive_reward_count = []
            rew = 0
            block_df = np.append(block_df, 0)
            for r, b in zip(reward, block_df):

                if r == 1:
                    rew += 1
                    positive_reward_count.append(rew)
                elif r == 0:
                    rew += 0
                    positive_reward_count.append(rew)
                if b != 0:
                    rew = 0

            positive_reward_count = np.asarray(positive_reward_count)
            negative_reward_count = np.asarray(negative_reward_count)
            choices_int = np.ones(len(reward))

            choices_int[np.where(choices == 0)] = -1
            reward_choice_int = choices_int * reward
            interaction_trial_latent = trials_since_block * state
            interaction_a_latent = a_since_block * state
            int_a_reward = a_since_block * reward

            interaction_trial_choice = trials_since_block * choices_int
            reward_trial_in_block = trials_since_block * positive_reward_count
            negative_reward_count_st = negative_reward_count * correct
            positive_reward_count_st = positive_reward_count * correct
            negative_reward_count_ch = negative_reward_count * choices
            positive_reward_count_ch = positive_reward_count * choices
            ones = np.ones(len(choices))

            predictors_all = OrderedDict([
                ('Reward', reward),
                ('Choice', choices),
                #('Correct', correct),
                #('A in Block', a_since_block),
                #('A in Block x Reward', int_a_reward),
                ('State', state),
                ('Trial in Block', trials_since_block),
                #('Interaction State x Trial in Block', interaction_trial_latent),
                #('Interaction State x A count', interaction_a_latent),
                ('Choice x Trials in Block', interaction_trial_choice),
                ('Reward x Choice', reward_choice_int),
                # ('No Reward Count in a Block', negative_reward_count),
                # ('No Reward x Correct', negative_reward_count_st),
                # ('Reward Count in a Block', positive_reward_count),
                # ('Reward Count x Correct', positive_reward_count_st),
                # ('No reward Count x Choice',negative_reward_count_ch),
                # ('Reward Count x Choice',positive_reward_count_ch),
                # ('Reward x Trial in Block',reward_trial_in_block),
                ('ones', ones)
            ])

            X = np.vstack(
                predictors_all.values()).T[:len(choices), :].astype(float)
            n_predictors = X.shape[1]
            y = firing_rates.reshape(
                [len(firing_rates),
                 -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
            tstats = reg_f.regression_code(y, X)

            C.append(tstats.reshape(n_predictors, n_neurons,
                                    n_timepoints))  # Predictor loadings
            cpd.append(
                re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

            task_1 = np.where(taskid == 1)[0]
            task_2 = np.where(taskid == 2)[0]
            task_3 = np.where(taskid == 3)[0]

            # Task 1
            reward_t1 = reward[task_1]
            choices_t1 = choices[task_1]
            correct_t1 = correct[task_1]

            a_since_block_t1 = np.asarray(a_since_block)[task_1]
            int_a_reward_t1 = int_a_reward[task_1]
            state_t1 = state[task_1]
            trials_since_block_t1 = np.asarray(trials_since_block)[task_1]
            interaction_trial_latent_t1 = interaction_trial_latent[task_1]
            interaction_a_latent_t1 = interaction_a_latent[task_1]
            interaction_trial_choice_t1 = interaction_trial_choice[task_1]
            reward_choice_int_t1 = reward_choice_int[task_1]
            negative_reward_count_t1 = negative_reward_count[task_1]
            negative_reward_count_st_t1 = negative_reward_count_st[task_1]
            positive_reward_count_t1 = positive_reward_count[task_1]
            positive_reward_count_st_t1 = positive_reward_count_st[task_1]
            negative_reward_count_ch_t1 = negative_reward_count_ch[task_1]
            positive_reward_count_ch_t1 = positive_reward_count_ch[task_1]
            reward_trial_in_block_t1 = reward_trial_in_block[task_1]

            firing_rates_t1 = firing_rates[task_1]
            ones = np.ones(len(choices_t1))

            predictors = OrderedDict([
                ('Reward', reward_t1), ('Choice', choices_t1),
                ('Correct', correct_t1), ('A in Block', a_since_block_t1),
                ('A in Block x Reward', int_a_reward_t1), ('State', state_t1),
                ('Trial in Block', trials_since_block_t1),
                ('Interaction State x Trial in Block',
                 interaction_trial_latent_t1),
                ('Interaction State x A count', interaction_a_latent_t1),
                ('Choice x Trials in Block', interaction_trial_choice_t1),
                ('Reward x Choice', reward_choice_int_t1),
                ('No Reward Count in a Block', negative_reward_count_t1),
                ('No Reward x Correct', negative_reward_count_st_t1),
                ('Reward Count in a Block', positive_reward_count_t1),
                ('Reward Count x Correct', positive_reward_count_st_t1),
                ('No reward Count x Choice', negative_reward_count_ch_t1),
                ('Reward Count x Choice', positive_reward_count_ch_t1),
                ('Reward x Trial in Block', reward_trial_in_block_t1),
                ('ones', ones)
            ])

            X_1 = np.vstack(
                predictors.values()).T[:len(choices_t1), :].astype(float)
            n_predictors = X_1.shape[1]
            y_1 = firing_rates_t1.reshape(
                [len(firing_rates_t1),
                 -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
            tstats = reg_f.regression_code(y_1, X_1)

            C_1.append(tstats.reshape(n_predictors, n_neurons,
                                      n_timepoints))  # Predictor loadings

            # Task 2
            reward_t2 = reward[task_2]
            choices_t2 = choices[task_2]
            correct_t2 = correct[task_2]

            a_since_block_t2 = np.asarray(a_since_block)[task_2]
            int_a_reward_t2 = int_a_reward[task_2]
            state_t2 = state[task_2]
            trials_since_block_t2 = np.asarray(trials_since_block)[task_2]
            interaction_trial_latent_t2 = interaction_trial_latent[task_2]
            interaction_a_latent_t2 = interaction_a_latent[task_2]
            interaction_trial_choice_t2 = interaction_trial_choice[task_2]
            reward_choice_int_t2 = reward_choice_int[task_2]
            negative_reward_count_t2 = negative_reward_count[task_2]
            negative_reward_count_st_t2 = negative_reward_count_st[task_2]
            positive_reward_count_t2 = positive_reward_count[task_2]
            positive_reward_count_st_t2 = positive_reward_count_st[task_2]
            negative_reward_count_ch_t2 = negative_reward_count_ch[task_2]
            positive_reward_count_ch_t2 = positive_reward_count_ch[task_2]
            reward_trial_in_block_t2 = reward_trial_in_block[task_2]

            firing_rates_t2 = firing_rates[task_2]
            ones = np.ones(len(choices_t2))

            predictors = OrderedDict([
                ('Reward', reward_t2), ('Choice', choices_t2),
                ('Correct', correct_t2), ('A in Block', a_since_block_t2),
                ('A in Block x Reward', int_a_reward_t2), ('State', state_t2),
                ('Trial in Block', trials_since_block_t2),
                ('Interaction State x Trial in Block',
                 interaction_trial_latent_t2),
                ('Interaction State x A count', interaction_a_latent_t2),
                ('Choice x Trials in Block', interaction_trial_choice_t2),
                ('Reward x Choice', reward_choice_int_t2),
                ('No Reward Count in a Block', negative_reward_count_t2),
                ('No Reward x Correct', negative_reward_count_st_t2),
                ('Reward Count in a Block', positive_reward_count_t2),
                ('Reward Count x Correct', positive_reward_count_st_t2),
                ('No reward Count x Choice', negative_reward_count_ch_t2),
                ('Reward Count x Choice', positive_reward_count_ch_t2),
                ('Reward x Trial in Block', reward_trial_in_block_t2),
                ('ones', ones)
            ])

            X_2 = np.vstack(
                predictors.values()).T[:len(choices_t2), :].astype(float)
            n_predictors = X_2.shape[1]
            y_2 = firing_rates_t2.reshape(
                [len(firing_rates_t2),
                 -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
            tstats = reg_f.regression_code(y_2, X_2)

            C_2.append(tstats.reshape(n_predictors, n_neurons,
                                      n_timepoints))  # Predictor loadings

            # Task 3
            reward_t3 = reward[task_3]
            choices_t3 = choices[task_3]
            correct_t3 = correct[task_3]

            a_since_block_t3 = np.asarray(a_since_block)[task_3]
            int_a_reward_t3 = int_a_reward[task_3]
            state_t3 = state[task_3]
            trials_since_block_t3 = np.asarray(trials_since_block)[task_3]
            interaction_trial_latent_t3 = interaction_trial_latent[task_3]
            interaction_a_latent_t3 = interaction_a_latent[task_3]
            interaction_trial_choice_t3 = interaction_trial_choice[task_3]
            reward_choice_int_t3 = reward_choice_int[task_3]
            negative_reward_count_t3 = negative_reward_count[task_3]
            negative_reward_count_st_t3 = negative_reward_count_st[task_3]
            positive_reward_count_t3 = positive_reward_count[task_3]
            positive_reward_count_st_t3 = positive_reward_count_st[task_3]
            negative_reward_count_ch_t3 = negative_reward_count_ch[task_3]
            positive_reward_count_ch_t3 = positive_reward_count_ch[task_3]
            reward_trial_in_block_t3 = reward_trial_in_block[task_3]

            firing_rates_t3 = firing_rates[task_3]
            ones = np.ones(len(choices_t3))

            predictors = OrderedDict([
                ('Reward', reward_t3), ('Choice', choices_t3),
                ('Correct', correct_t3), ('A in Block', a_since_block_t3),
                ('A in Block x Reward', int_a_reward_t3), ('State', state_t3),
                ('Trial in Block', trials_since_block_t3),
                ('Interaction State x Trial in Block',
                 interaction_trial_latent_t3),
                ('Interaction State x A count', interaction_a_latent_t3),
                ('Choice x Trials in Block', interaction_trial_choice_t3),
                ('Reward x Choice', reward_choice_int_t3),
                ('No Reward Count in a Block', negative_reward_count_t3),
                ('No Reward x Correct', negative_reward_count_st_t3),
                ('Reward Count in a Block', positive_reward_count_t3),
                ('Reward Count x Correct', positive_reward_count_st_t3),
                ('No reward Count x Choice', negative_reward_count_ch_t3),
                ('Reward Count x Choice', positive_reward_count_ch_t3),
                ('Reward x Trial in Block', reward_trial_in_block_t3),
                ('ones', ones)
            ])

            X_3 = np.vstack(
                predictors.values()).T[:len(choices_t3), :].astype(float)
            n_predictors = X_3.shape[1]
            y_3 = firing_rates_t3.reshape(
                [len(firing_rates_t3),
                 -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
            tstats = reg_f.regression_code(y_3, X_3)

            C_3.append(tstats.reshape(n_predictors, n_neurons,
                                      n_timepoints))  # Predictor loadings

            cpd_1_2.append(
                _CPD_cross_task(X_1, X_2, y_1,
                                y_2).reshape(n_neurons, n_timepoints,
                                             n_predictors))

            cpd_2_3.append(
                _CPD_cross_task(X_2, X_3, y_2,
                                y_3).reshape(n_neurons, n_timepoints,
                                             n_predictors))

            print(n_neurons)

    cpd = np.nanmean(np.concatenate(cpd, 0), axis=0)
    C = np.concatenate(C, 1)

    C_1 = np.concatenate(C_1, 1)

    C_2 = np.concatenate(C_2, 1)

    C_3 = np.concatenate(C_3, 1)

    cpd_1_2 = np.nanmean(np.concatenate(cpd_1_2, 0), axis=0)
    cpd_2_3 = np.nanmean(np.concatenate(cpd_2_3, 0), axis=0)

    return C, cpd, C_1, C_2, C_3, cpd_1_2, cpd_2_3, predictors_all, session_trials_since_block