def SR_reg(data):
    
    C = []

    dm = data['DM'][0]
    firing = data['Data'][0]
        
    for  s, sess in enumerate(dm):
        DM = dm[s]
        firing_rates = firing[s]
        n_trials, n_neurons, n_timepoints = firing_rates.shape
        choices = DM[:,1]
        reward = DM[:,2]    
        ones = np.ones(len(reward))

        predictors_all = OrderedDict([('Reward', reward),
                                      ('Choice', choices),
                                      ('ones', ones)])
            
               
        X = np.vstack(predictors_all.values()).T[:len(choices),:].astype(float)
        n_predictors = X.shape[1]

        y = firing_rates.reshape([len(firing_rates),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)
        C.append(tstats.reshape(n_predictors,n_neurons,n_timepoints)) # Predictor loadings
    C = np.concatenate(C,1)
    
    return C
Пример #2
0
def time_in_block(data, area='PFC'):

    dm = data['DM'][0]
    firing = data['Data'][0]
    C_1 = []
    C_2 = []
    C_3 = []
    cpd_1 = []
    cpd_2 = []
    cpd_3 = []
    # cpd_perm_p_1 = []; cpd_perm_p_2 = []; cpd_perm_p_3 = []

    for s, sess in enumerate(dm):

        DM = dm[s]
        firing_rates = firing[s][1:]

        # firing_rates = firing_rates[:,:,:63]
        n_trials, n_neurons, n_timepoints = firing_rates.shape

        choices = DM[:, 1] - 0.5
        reward = DM[:, 2] - 0.5

        task = DM[:, 5][1:]
        a_pokes = DM[:, 6][1:]
        b_pokes = DM[:, 7][1:]

        reward_prev = reward[:-1]
        reward = reward[1:]

        choices_prev = choices[:-1]
        choices = choices[1:]

        taskid = task_ind(task, a_pokes, b_pokes)

        task_1 = np.where(taskid == 1)[0]
        task_2 = np.where(taskid == 2)[0]
        task_3 = np.where(taskid == 3)[0]
        reward_PE = np.zeros(len(task))
        for r, rr in enumerate(reward):
            if reward[r] != reward[r - 1]:
                reward_PE[r] = 0.5
            elif reward[r] == reward[r - 1]:
                reward_PE[r] = -0.5

        choice_PE = np.zeros(len(task))
        for r, rr in enumerate(choices):
            if choices[r] != choices[r - 1]:
                choice_PE[r] = 0.5
            elif choices[r] == choices[r - 1]:
                choice_PE[r] = -0.5

        reward_PE_1 = reward_PE[task_1]
        choice_PE_1 = choice_PE[task_1]

        rewards_1 = reward[task_1]
        choices_1 = choices[task_1]
        rewards_1 = reward[task_1]
        ones_1 = np.ones(len(choices_1))
        trials_1 = len(choices_1)
        prev_reward_1 = reward_prev[task_1]
        prev_choice_1 = choices_prev[task_1]
        #prev_choice_1 = choices_1*choice_PE_1
        choice_PE_1_reward_current = choice_PE_1 * rewards_1
        choice_PE_1_reward_prev = choice_PE_1 * prev_reward_1

        rew_ch_1 = choices_1 * rewards_1
        prev_choice_1_lr = prev_choice_1 * prev_reward_1

        firing_rates_1 = firing_rates[task_1]
        predictors_all = OrderedDict([
            ('Choice', choices_1), ('Reward', rewards_1),
            ('Reward Repeat/Switch', reward_PE_1),
            ('Choice Repeat/Switch', choice_PE_1),
            ('Repeat/Switch Current Reward', choice_PE_1_reward_current),
            ('Repeat/Switch Prev Reward', choice_PE_1_reward_prev),
            ('Choice x Reward ', rew_ch_1),
            ('Prev Ch x Last Reward', prev_choice_1_lr),
            ('Prev Rew', prev_reward_1), ('Prev Ch', prev_choice_1),
            ('ones', ones_1)
        ])

        X_1 = np.vstack(predictors_all.values()).T[:trials_1, :].astype(float)

        n_predictors = X_1.shape[1]
        y_1 = firing_rates_1.reshape(
            [len(firing_rates_1),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y_1, X_1)
        C_1.append(tstats.reshape(n_predictors, n_neurons,
                                  n_timepoints))  # Predictor loadings
        cpd_1.append(
            re._CPD(X_1, y_1).reshape(n_neurons, n_timepoints, n_predictors))

        choices_2 = choices[task_2]
        rewards_2 = reward[task_2]
        ones_2 = np.ones(len(choices_2))
        reward_PE_2 = reward_PE[task_2]
        choice_PE_2 = choice_PE[task_2]

        prev_reward_2 = reward_prev[task_2]
        prev_choice_2 = choices_prev[task_2]
        #prev_choice_2 = choices_2*choice_PE_2

        choice_PE_2_reward_current = choice_PE_2 * rewards_2
        choice_PE_2_reward_prev = choice_PE_2 * prev_reward_2

        trials_2 = len(choices_2)
        rew_ch_2 = choices_2 * rewards_2
        prev_choice_2_lr = prev_choice_2 * prev_reward_2

        ones_2 = np.ones(len(choices_2))
        firing_rates_2 = firing_rates[task_2]

        predictors_all = OrderedDict([
            ('Choice', choices_2), ('Reward', rewards_2),
            ('Reward Repeat/Switch', reward_PE_2),
            ('Choice Repeat/Switch', choice_PE_2),
            ('Repeat/Switch Current Reward', choice_PE_2_reward_current),
            ('Repeat/Switch Prev Reward', choice_PE_2_reward_prev),
            ('Choice x Reward ', rew_ch_2),
            (' Prev Ch x Last Reward', prev_choice_2_lr),
            ('Prev Rew', prev_reward_2), ('Prev Ch', prev_choice_2),
            ('ones', ones_2)
        ])

        X_2 = np.vstack(predictors_all.values()).T[:trials_2, :].astype(float)

        n_predictors = X_2.shape[1]
        y_2 = firing_rates_2.reshape(
            [len(firing_rates_2),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y_2, X_2)
        C_2.append(tstats.reshape(n_predictors, n_neurons,
                                  n_timepoints))  # Predictor loadings
        cpd_2.append(
            re._CPD(X_2, y_2).reshape(n_neurons, n_timepoints, n_predictors))

        choices_3 = choices[task_3]
        rewards_3 = reward[task_3]
        ones_3 = np.ones(len(choices_3))
        trials_3 = len(choices_3)
        ones_3 = np.ones(len(choices_3))
        prev_reward_3 = reward_prev[task_3]
        choice_PE_3 = choice_PE[task_3]
        reward_PE_3 = reward_PE[task_3]
        choice_PE_3_reward_current = choice_PE_3 * rewards_3
        choice_PE_3_reward_prev = choice_PE_3 * prev_reward_3
        prev_choice_3 = choices_prev[task_3]

        #prev_choice_3 = choices_3*choice_PE_3

        rew_ch_3 = choices_3 * rewards_3
        prev_choice_3_lr = prev_choice_3 * prev_reward_3

        firing_rates_3 = firing_rates[task_3]

        predictors_all = OrderedDict([
            ('Ch', choices_3), ('Rew', rewards_3), ('Rew Stay', reward_PE_3),
            ('Ch Stay', choice_PE_3),
            ('Stay Cur Rew', choice_PE_3_reward_current),
            ('Stay Prev Rew', choice_PE_3_reward_prev),
            ('Ch x Rew ', rew_ch_3), (' Prev Ch x Prev Rew', prev_choice_3_lr),
            ('Prev Rew', prev_reward_3), ('Prev Ch', prev_choice_3),
            ('ones', ones_3)
        ])

        X_3 = np.vstack(predictors_all.values()).T[:trials_3, :].astype(float)
        rank = np.linalg.matrix_rank(X_3)
        print(rank)
        n_predictors = X_3.shape[1]
        print(n_predictors)
        y_3 = firing_rates_3.reshape(
            [len(firing_rates_3),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y_3, X_3)
        C_3.append(tstats.reshape(n_predictors, n_neurons,
                                  n_timepoints))  # Predictor loadings
        cpd_3.append(
            re._CPD(X_3, y_3).reshape(n_neurons, n_timepoints, n_predictors))

    C_1 = np.concatenate(C_1, 1)

    C_2 = np.concatenate(C_2, 1)

    C_3 = np.concatenate(C_3, 1)

    cpd_1 = np.nanmean(np.concatenate(cpd_1, 0), axis=0)
    cpd_2 = np.nanmean(np.concatenate(cpd_2, 0), axis=0)
    cpd_3 = np.nanmean(np.concatenate(cpd_3, 0), axis=0)
    cpd = np.mean([cpd_1, cpd_2, cpd_3], 0)

    c = wes.Darjeeling2_5.mpl_colors + wes.Mendl_4.mpl_colors + wes.GrandBudapest1_4.mpl_colors + wes.Moonrise1_5.mpl_colors

    j = 0
    plt.figure()
    pred = list(predictors_all.keys())
    pred = pred[2:-1]
    for ii, i in enumerate(cpd.T[2:-1]):
        plt.plot(i, color=c[j], label=pred[j])

        j += 1
    plt.legend()
    sns.despine()

    C_2_inf = [~np.isinf(C_2[0]).any(axis=1)]
    C_2_nan = [~np.isnan(C_2[0]).any(axis=1)]
    C_3_inf = [~np.isinf(C_3[0]).any(axis=1)]
    C_3_nan = [~np.isnan(C_3[0]).any(axis=1)]
    C_1_inf = [~np.isinf(C_1[0]).any(axis=1)]
    C_1_nan = [~np.isnan(C_1[0]).any(axis=1)]
    nans = np.asarray(C_1_inf) & np.asarray(C_1_nan) & np.asarray(
        C_3_inf) & np.asarray(C_3_nan) & np.asarray(C_2_inf) & np.asarray(
            C_2_nan)

    C_1 = C_1[:, nans[0], :]
    C_2 = C_2[:, nans[0], :]
    C_3 = C_3[:, nans[0], :]

    preds = np.arange(len(list(predictors_all.keys())))
    coef_1 = np.tile(preds, 11)
    coef_2 = np.concatenate((preds, np.roll(preds,1), np.roll(preds,2),np.roll(preds,3), np.roll(preds,4),np.roll(preds,5),\
                            np.roll(preds,6), np.roll(preds,7), np.roll(preds,8),np.roll(preds,9), np.roll(preds,10)))

    m = 0
    l = 0
    plt.figure(figsize=(10, 10))
    for c_1, c_2 in zip(coef_1, coef_2):
        title = list(predictors_all.keys())[c_1] + ' ' + 'on' + ' ' + list(
            predictors_all.keys())[c_2]

        m += 1
        l += 1
        if m == 10:
            plt.savefig('/Users/veronikasamborska/Desktop/runs/within' + area +
                        str(l) + '.png')
            plt.figure(figsize=(10, 10))
            m -= 9

        C_1_rew = C_1[c_1]
        C_2_rew = C_2[c_1]
        C_3_rew = C_3[c_1]
        C_1_rew_count = C_1[c_2]
        C_2_rew_count = C_2[c_2]
        C_3_rew_count = C_3[c_2]

        reward_times_to_choose = np.asarray([20, 24, 35, 41])

        C_1_rew_proj = np.ones(
            (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))
        C_2_rew_proj = np.ones(
            (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))
        C_3_rew_proj = np.ones(
            (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))

        j = 0
        for i in reward_times_to_choose:
            if i == reward_times_to_choose[0]:
                C_1_rew_proj[:, j] = np.mean(C_1_rew[:, i - 20:i], 1)
                C_2_rew_proj[:, j] = np.mean(C_2_rew[:, i - 20:i], 1)
                C_3_rew_proj[:, j] = np.mean(C_3_rew[:, i - 20:i], 1)
            elif i == reward_times_to_choose[1] or i == reward_times_to_choose[
                    2]:
                C_1_rew_proj[:, j] = np.mean(C_1_rew[:, i - 5:i + 5], 1)
                C_2_rew_proj[:, j] = np.mean(C_2_rew[:, i - 5:i + 5], 1)
                C_3_rew_proj[:, j] = np.mean(C_3_rew[:, i - 5:i + 5], 1)
            elif i == reward_times_to_choose[3]:
                C_1_rew_proj[:, j] = np.mean(C_1_rew[:, i:i + 5], 1)
                C_2_rew_proj[:, j] = np.mean(C_2_rew[:, i:i + 5], 1)
                C_3_rew_proj[:, j] = np.mean(C_3_rew[:, i:i + 5], 1)

            j += 1

        C_1_rew_count_proj = np.ones(
            (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))
        C_2_rew_count_proj = np.ones(
            (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))
        C_3_rew_count_proj = np.ones(
            (C_1_rew.shape[0], reward_times_to_choose.shape[0] + 1))
        j = 0
        for i in reward_times_to_choose:
            if i == reward_times_to_choose[0]:
                C_1_rew_count_proj[:, j] = np.mean(C_1_rew_count[:, i - 20:i],
                                                   1)
                C_2_rew_count_proj[:, j] = np.mean(C_2_rew_count[:, i - 20:i],
                                                   1)
                C_3_rew_count_proj[:, j] = np.mean(C_3_rew_count[:, i - 20:i],
                                                   1)
            elif i == reward_times_to_choose[1] or i == reward_times_to_choose[
                    2]:
                C_1_rew_count_proj[:,
                                   j] = np.mean(C_1_rew_count[:, i - 5:i + 5],
                                                1)
                C_2_rew_count_proj[:,
                                   j] = np.mean(C_2_rew_count[:, i - 5:i + 5],
                                                1)
                C_3_rew_count_proj[:,
                                   j] = np.mean(C_3_rew_count[:, i - 5:i + 5],
                                                1)
            elif i == reward_times_to_choose[3]:
                C_1_rew_count_proj[:, j] = np.mean(C_1_rew_count[:, i:i + 5],
                                                   1)
                C_2_rew_count_proj[:, j] = np.mean(C_2_rew_count[:, i:i + 5],
                                                   1)
                C_3_rew_count_proj[:, j] = np.mean(C_3_rew_count[:, i:i + 5],
                                                   1)

            j += 1

        cpd_1_2_rew, cpd_1_2_rew_var = regression_code_session(
            C_1_rew_count, C_1_rew_proj)
        cpd_1_3_rew, cpd_1_3_rew_var = regression_code_session(
            C_2_rew_count, C_2_rew_proj)
        cpd_2_3_rew, cpd_2_3_rew_var = regression_code_session(
            C_3_rew_count, C_3_rew_proj)

        rew_to_count_cpd = (cpd_1_2_rew + cpd_1_3_rew + cpd_2_3_rew) / np.sqrt(
            (cpd_1_2_rew_var + cpd_1_3_rew_var + cpd_2_3_rew_var))

        j = 0
        plt.subplot(5, 2, m)
        for i in rew_to_count_cpd[:-1]:
            plt.plot(i, color=c[j], label=str(j))
            j += 1
    # plt.legend()
        plt.title(area + ' ' + str(title))
        sns.despine()

        plt.tight_layout()
def within_taks_codes(data, area='HP', perm=5):

    dm = data['DM'][0]
    firing = data['Data'][0]
    C = []
    cpd = []
    cpd_perm_p = []

    for s, sess in enumerate(dm):

        cpd_perm = [[] for i in range(perm)
                    ]  # To store permuted predictor loadings for each session.

        runs_list = []
        runs_list.append(0)
        DM = dm[s]
        firing_rates = firing[s]
        n_trials, n_neurons, n_timepoints = firing_rates.shape

        state = DM[:, 0]
        choices = DM[:, 1]

        reward = DM[:, 2]

        reward_2_ago = 0.5 - reward[1:-2]
        reward_3_ago = 0.5 - reward[:-3]
        reward_prev = 0.5 - reward[2:-1]
        reward_current = reward[3:]

        # reward_o_1_ago = np.asarray(reward_prev)
        # reward_o_2_ago = np.asarray(reward_2_ago)
        # reward_o_3_ago = np.asarray(reward_3_ago)

        firing_rates = firing_rates[3:]

        choices_2_ago = 0.5 - choices[1:-2]
        choices_3_ago = 0.5 - choices[:-3]
        choices_prev = 0.5 - choices[2:-1]
        choices_current = choices[3:]
        state = state[3:]

        cum_reward_orth = np.vstack(
            [reward_current, np.ones(len(reward_current))]).T
        xt = np.linalg.pinv(cum_reward_orth)
        identity = np.identity(len(reward_current))
        id_x = (identity - np.matmul(cum_reward_orth, xt))

        # choice_o_1_ago = np.matmul(id_x, np.asarray(choices_prev))
        # choice_o_2_ago = np.matmul(id_x, np.asarray(choices_2_ago))
        # choice_o_3_ago = np.matmul(id_x, np.asarray(choices_3_ago))

        ch_rew_int_1 = choices_prev * reward_prev
        ch_rew_int_2 = choices_2_ago * reward_2_ago
        ch_rew_int_3 = choices_3_ago * reward_3_ago

        ones = np.ones(len(choices_3_ago))

        predictors_all = OrderedDict([('Reward', reward_current),
                                      ('Choice', choices_current),
                                      ('1 ago Outcome', reward_prev),
                                      ('2 ago Outcome', reward_2_ago),
                                      ('3 ago Outcome', reward_3_ago),
                                      ('1 ago Choice', choices_prev),
                                      ('2 ago Choice', choices_2_ago),
                                      ('3 ago Choice', choices_3_ago),
                                      ('1 Rew x Choice', ch_rew_int_1),
                                      ('2 Rew x Choice', ch_rew_int_2),
                                      ('3 Rew x Choice', ch_rew_int_3),
                                      ('ones', ones)])

        X = np.vstack(
            predictors_all.values()).T[:len(choices_current), :].astype(float)
        rank = np.linalg.matrix_rank(X)
        print(rank)
        print(X.shape[1])
        n_predictors = X.shape[1]
        y = firing_rates.reshape(
            [len(firing_rates),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)
        C.append(tstats.reshape(n_predictors, n_neurons,
                                n_timepoints))  # Predictor loadings
        cpd.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))
        if perm:
            for i in range(perm):
                y_perm = np.roll(y, np.random.randint(len(y)), axis=0)
                cpd_temp = re._CPD(X, y_perm).reshape(n_neurons, n_timepoints,
                                                      n_predictors)
                cpd_perm[i].append(np.nanmean(cpd_temp, axis=0))

        cpd_perm_p.append(np.percentile(cpd_perm, 95, axis=0))
    if perm:  # Evaluate P values.
        cpd_perm_pval = np.mean(cpd_perm_p, 0)[0]
        #cpd_perm_p = np.percentile(cpd_perm,95, axis = 0)

    C = np.concatenate(C, 1)
    cpd = np.nanmean(np.concatenate(cpd, 0), axis=0)

    plt.figure()
    pred = list(predictors_all.keys())

    array_pvals = np.ones((cpd.shape[0], cpd.shape[1]))

    for i in range(cpd.shape[1]):
        array_pvals[(np.where(cpd[:, i] > cpd_perm_pval[:, i])[0]), i] = 0.05

    ymax = np.max(cpd[:, 2:-1].T)
    t = np.arange(0, 121)
    c = wes.Darjeeling2_5.mpl_colors + wes.Mendl_4.mpl_colors + wes.GrandBudapest1_4.mpl_colors + wes.Moonrise1_5.mpl_colors

    for i in np.arange(cpd.shape[1]):
        if i > 1 and i < cpd.shape[1] - 1:
            plt.plot(cpd[:, i], label=pred[i], color=c[i])
            y = ymax * (1 + 0.04 * i)
            p_vals = array_pvals[:, i]
            t05 = t[p_vals == 0.05]
            plt.plot(t05,
                     np.ones(t05.shape) * y,
                     '.',
                     markersize=5,
                     color=c[i])

    plt.legend()
    plt.ylabel('CPD')
    plt.xlabel('Time in Trial')
    plt.xticks([24, 35, 42], ['I', 'C', 'R'])
    plt.legend()
    sns.despine()
    plt.title(area)
Пример #4
0
def sequence_rewards_errors_regression(data, perm=True):

    dm = data['DM'][0]
    firing = data['Data'][0]
    C = []
    cpd = []

    for s, sess in enumerate(dm):
        runs_list = []
        runs_list.append(0)
        DM = dm[s]
        firing_rates = firing[s]
        n_trials, n_neurons, n_timepoints = firing_rates.shape

        choices = DM[:, 1]
        reward = DM[:, 2]
        state = DM[:, 0]

        cum_error = []

        err = 0
        for r in reward:
            if r == 0:
                err += 1
            else:
                err = 0
            cum_error.append(err)

        cum_reward = []
        for r in reward:
            if r == 1:
                err += 1
            else:
                err = 0
            cum_reward.append(err)

        ones = np.ones(len(reward))

        predictors_all = OrderedDict([('Reward', reward), ('Choice', choices),
                                      ('State', state), ('Errors', cum_error),
                                      ('Rewards', cum_reward), ('ones', ones)])

        X = np.vstack(
            predictors_all.values()).T[:len(choices), :].astype(float)
        n_predictors = X.shape[1]
        y = firing_rates.reshape(
            [len(firing_rates),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)

        C.append(tstats.reshape(n_predictors, n_neurons,
                                n_timepoints))  # Predictor loadings
        cpd.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

    cpd = np.nanmean(np.concatenate(cpd, 0), axis=0)
    C = np.concatenate(C, 1)

    high_loadings_rewards = np.where(abs(np.mean(C[4, :, :20], 1)) > 2.5)[0]
    high_loadings_errors = np.where(abs(np.mean(C[3, :, :20], 1)) > 2.5)[0]

    return high_loadings_errors, high_loadings_rewards
def regression_latent_state(experiment, experiment_sim_Q4_values):  
    
    C_1 = []
    C_coef = []
    cpd_1 = []
    
    # Finding correlation coefficients for task 1 
    for s,session in enumerate(experiment):
        aligned_spikes= session.aligned_rates[:]
        if aligned_spikes.shape[1] > 0: # sessions with neurons? 
            n_trials, n_neurons, n_timepoints = aligned_spikes.shape 
            
            #aligned_spikes = np.mean(aligned_spikes, axis =  2) 
            
            # Getting out task indicies   
            task = session.trial_data['task']
            forced_trials = session.trial_data['forced_trial']
            non_forced_array = np.where(forced_trials == 0)[0]
            task_non_forced = task[non_forced_array]
            task_1 = np.where(task_non_forced == 1)[0]
            task_2 = np.where(task_non_forced == 2)[0]    
            predictor_A_Task_1, predictor_A_Task_2, predictor_A_Task_3,\
            predictor_B_Task_1, predictor_B_Task_2, predictor_B_Task_3, reward,\
            predictor_a_good_task_1,predictor_a_good_task_2, predictor_a_good_task_3 = re.predictors_pokes(session)    
            # Getting out task indicies
            Q4 = experiment_sim_Q4_values[s]
            forced_trials = session.trial_data['forced_trial']
            outcomes = session.trial_data['outcomes']

            choices = session.trial_data['choices']
            non_forced_array = np.where(forced_trials == 0)[0]
                       
            choices = choices[non_forced_array]
            Q4 = Q4[non_forced_array]
            aligned_spikes = aligned_spikes[:len(choices),:]
            outcomes = outcomes[non_forced_array]
            # Getting out task indicies
            
            ones = np.ones(len(choices))
            choices = choices[:len(task_1)]
            outcomes = outcomes[:len(task_1)]
            latent_state = np.ones(len(task_1))
            latent_state[predictor_a_good_task_1] = -1
            ones = ones[:len(task_1)]
            aligned_spikes = aligned_spikes[:len(task_1)]
            Q4 = Q4[:len(task_1)]
            choice_Q4 = choices*Q4


            predictors = OrderedDict([#('latent_state',latent_state), 
                                      ('choice', choices),
                                      ('reward', outcomes),
                                      ('Q4', Q4),
                                      ('choice_Q4',choice_Q4),
                                      ('ones', ones)])
        
           
            X = np.vstack(predictors.values()).T[:len(choices),:].astype(float)
            n_predictors = X.shape[1]
            y = aligned_spikes.reshape([len(aligned_spikes),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
            tstats = reg_f.regression_code(y, X)
            ols = LinearRegression(copy_X = True,fit_intercept= True)
            ols.fit(X,y)
            C_coef.append(ols.coef_.reshape(n_neurons, n_predictors,n_timepoints)) # Predictor loadings     
            C_1.append(tstats.reshape(n_predictors,n_neurons,n_timepoints)) # Predictor loadings
            cpd_1.append(re._CPD(X,y).reshape(n_neurons,n_timepoints, n_predictors))

    C_1 = np.concatenate(C_1, axis = 1) # 
    C_coef = np.concatenate(C_coef, axis = 0) #
    cpd_1 = np.nanmean(np.concatenate(cpd_1,0), axis = 0)

    C_2 = []
    C_coef_2 = []
    cpd_2 = []

    # Finding correlation coefficients for task 1 
    for s,session in enumerate(experiment):
        aligned_spikes= session.aligned_rates[:]
        if aligned_spikes.shape[1] > 0: # sessions with neurons? 
            n_trials, n_neurons, n_timepoints = aligned_spikes.shape 
            #aligned_spikes = np.mean(aligned_spikes, axis =  2) 
            Q4 = experiment_sim_Q4_values[s]

            # Getting out task indicies   
            task = session.trial_data['task']
            forced_trials = session.trial_data['forced_trial']
            non_forced_array = np.where(forced_trials == 0)[0]
            task_non_forced = task[non_forced_array]
            task_1 = np.where(task_non_forced == 1)[0]
            task_2 = np.where(task_non_forced == 2)[0]    
            
            predictor_A_Task_1, predictor_A_Task_2, predictor_A_Task_3,\
            predictor_B_Task_1, predictor_B_Task_2, predictor_B_Task_3, reward,\
            predictor_a_good_task_1,predictor_a_good_task_2, predictor_a_good_task_3 = re.predictors_pokes(session)    

            # Getting out task indicies
            forced_trials = session.trial_data['forced_trial']
            outcomes = session.trial_data['outcomes']

            choices = session.trial_data['choices']
            non_forced_array = np.where(forced_trials == 0)[0]
            Q4 = Q4[non_forced_array]

            
            choices = choices[non_forced_array]
            aligned_spikes = aligned_spikes[:len(choices),:]
            outcomes = outcomes[non_forced_array]
            # Getting out task indicies

            ones = np.ones(len(choices))
            
            choices = choices[len(task_1):len(task_1)+len(task_2)]
            latent_state = np.ones(len(choices))
            latent_state[predictor_a_good_task_2] = -1
            
            outcomes = outcomes[len(task_1):len(task_1)+len(task_2)]
            ones = ones[len(task_1):len(task_1)+len(task_2)]
            aligned_spikes = aligned_spikes[len(task_1):len(task_1)+len(task_2)]
            Q4 = Q4[len(task_1):len(task_1)+len(task_2)]
            choice_Q4 = choices*Q4

            predictors = OrderedDict([#('latent_state',latent_state),
                                      ('choice', choices),
                                      ('reward', outcomes),
                                      ('Q4',Q4),
                                      ('choice_Q4',choice_Q4),
                                      ('ones', ones)])
        
           
            X = np.vstack(predictors.values()).T[:len(choices),:].astype(float)
            n_predictors = X.shape[1]
            y = aligned_spikes.reshape([len(aligned_spikes),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
            tstats = reg_f.regression_code(y, X)
            C_2.append(tstats.reshape(n_predictors,n_neurons,n_timepoints)) # Predictor loadings
            
            ols = LinearRegression(copy_X = True,fit_intercept= True)
            ols.fit(X,y)
            C_coef_2.append(ols.coef_.reshape(n_neurons, n_predictors,n_timepoints)) # Predictor loadings
            cpd_2.append(re._CPD(X,y).reshape(n_neurons,n_timepoints, n_predictors))


    C_2 = np.concatenate(C_2, axis = 1) # Population CPD is mean over neurons.
    C_coef_2 = np.concatenate(C_coef_2, axis = 0) # Population CPD is mean over neurons.
    cpd_2 = np.nanmean(np.concatenate(cpd_2,0), axis = 0)

    C_3 = []
    C_coef_3 = []
    cpd_3 = []
    # Finding correlation coefficients for task 1 
    for s,session in enumerate(experiment):
        aligned_spikes= session.aligned_rates[:]
        if aligned_spikes.shape[1] > 0: # sessions with neurons? 
            n_trials, n_neurons, n_timepoints = aligned_spikes.shape 
            #aligned_spikes = np.mean(aligned_spikes, axis =  2) 

            
            # Getting out task indicies   
            task = session.trial_data['task']
            forced_trials = session.trial_data['forced_trial']
            non_forced_array = np.where(forced_trials == 0)[0]
            task_non_forced = task[non_forced_array]
            task_1 = np.where(task_non_forced == 1)[0]
            task_2 = np.where(task_non_forced == 2)[0]    
            Q4 = experiment_sim_Q4_values[s]

            predictor_A_Task_1, predictor_A_Task_2, predictor_A_Task_3,\
            predictor_B_Task_1, predictor_B_Task_2, predictor_B_Task_3, reward,\
            predictor_a_good_task_1,predictor_a_good_task_2, predictor_a_good_task_3 = re.predictors_pokes(session)    


            # Getting out task indicies
            forced_trials = session.trial_data['forced_trial']
            outcomes = session.trial_data['outcomes']

            choices = session.trial_data['choices']
            non_forced_array = np.where(forced_trials == 0)[0]
            
            Q4 = Q4[non_forced_array]
            choices = choices[non_forced_array]
            aligned_spikes = aligned_spikes[:len(choices),:]
            outcomes = outcomes[non_forced_array]
            # Getting out task indicies

            ones = np.ones(len(choices))
  
            choices = choices[len(task_1)+len(task_2):]
            latent_state = np.ones(len(choices))
            latent_state[predictor_a_good_task_3] = -1
            
            outcomes = outcomes[len(task_1)+len(task_2):]
            ones = ones[len(task_1)+len(task_2):]
            Q4 = Q4[len(task_1)+len(task_2):]
            choice_Q4 = choices*Q4
            aligned_spikes = aligned_spikes[len(task_1)+len(task_2):]
            
            predictors = OrderedDict([#('latent_state', latent_state),
                                      ('choice', choices),
                                      ('reward', outcomes),
                                      ('Q4', Q4),
                                      ('choice_Q4',choice_Q4),
                                      ('ones', ones)])
        
           
            X = np.vstack(predictors.values()).T[:len(choices),:].astype(float)
            n_predictors = X.shape[1]
            y = aligned_spikes.reshape([len(aligned_spikes),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
            tstats = reg_f.regression_code(y, X)

            C_3.append(tstats.reshape(n_predictors,n_neurons,n_timepoints)) # Predictor loadings
            
            ols = LinearRegression(copy_X = True,fit_intercept= True)
            ols.fit(X,y)
            C_coef_3.append(ols.coef_.reshape(n_neurons,n_timepoints, n_predictors)) # Predictor loadings
            cpd_3.append(re._CPD(X,y).reshape(n_neurons,n_timepoints, n_predictors))


    C_3 = np.concatenate(C_3, axis = 1) # Population CPD is mean over neurons.
    C_coef_3 = np.concatenate(C_coef_3, axis = 0) # Population CPD is mean over neurons.
    cpd_3 = np.nanmean(np.concatenate(cpd_3,0), axis = 0)
    
    return C_1, C_2, C_3, C_coef,C_coef_2,C_coef_3,cpd_1,cpd_2,cpd_3,predictors
def sequence_rewards_errors_regression_generalisation(data,
                                                      perm=True,
                                                      area='HP_',
                                                      interactions=True,
                                                      a=True):

    dm = data['DM'][0]
    firing = data['Data'][0]
    C_1 = []
    cpd_1 = []

    C_2 = []
    cpd_2 = []

    C_3 = []
    cpd_3 = []
    if perm:

        C_1_perm = [[] for i in range(perm)]

        C_2_perm = [[] for i in range(perm)]

        C_3_perm = [[] for i in range(perm)]

        cpd_perm_all = [[] for i in range(perm)]

    for s, sess in enumerate(dm):
        runs_list = []
        runs_list.append(0)
        DM = dm[s]
        firing_rates = firing[s]
        n_trials, n_neurons, n_timepoints = firing_rates.shape

        choices = DM[:, 1]
        # choices[np.where(choices ==0)[0]] = -1

        reward = DM[:, 2]

        task = DM[:, 5]
        task_1 = np.where(task == 1)[0]
        task_2 = np.where(task == 2)[0]
        task_3 = np.where(task == 3)[0]
        state = DM[:, 0]

        correct = np.where(state == choices)[0]
        incorrect = np.where(state != choices)[0]

        cum_error = []
        runs_list_corr = []
        runs_list_incorr = []
        err = 0
        for r in reward:
            if r == 0:
                err += 1
            else:
                err = 0
            cum_error.append(err)

        cum_reward = []
        for r in reward:
            if r == 1:
                err += 1
            else:
                err = 0
            cum_reward.append(err)

        run = 0
        for c, ch in enumerate(choices):
            if c > 0:
                if choices[c] == choices[c - 1]:
                    run += 1
                elif choices[c] != choices[c - 1]:
                    run = 0
                runs_list.append(run)

        corr_run = 0
        run_ind_c = []
        for c, ch in enumerate(choices):
            if c > 0 and c in correct:
                if choices[c] == choices[c - 1]:
                    if corr_run == 0:
                        run_ind_c.append(c)
                    corr_run += 1
                elif choices[c] != choices[c - 1]:
                    corr_run = 0
            else:
                corr_run = 0
            runs_list_corr.append(corr_run)

        incorr_run = 0
        run_ind_inc = []
        for c, ch in enumerate(choices):
            if c > 0 and c in incorrect:
                if choices[c] == choices[c - 1]:
                    if incorr_run == 0:
                        run_ind_inc.append(c)
                    incorr_run += 1
                elif choices[c] != choices[c - 1]:
                    incorr_run = 0
            else:
                incorr_run = 0

            runs_list_incorr.append(incorr_run)

        choices_a = np.where(choices == 1)[0]
        choices_b = np.where(choices == 0)[0]

        a_cum_rew = np.copy(np.asarray(np.asarray(cum_reward)))
        b_cum_rew = np.copy(np.asarray(np.asarray(cum_reward)))

        a_cum_rew[choices_b] = 0
        b_cum_rew[choices_a] = 0

        a_cum_error = np.copy(np.asarray(np.asarray(cum_error)))
        b_cum_error = np.copy(np.asarray(np.asarray(cum_error)))

        a_cum_error[choices_b] = 0
        b_cum_error[choices_a] = 0

        ones = np.ones(len(reward))
        reward_1 = reward[task_1]
        choices_1 = choices[task_1]
        cum_error_1 = np.asarray(cum_error)[task_1]
        cum_reward_1 = np.asarray(cum_reward)[task_1]

        cum_error_1_a = np.asarray(a_cum_error)[task_1]
        cum_error_1_b = np.asarray(b_cum_error)[task_1]

        cum_reward_1_a = np.asarray(a_cum_rew)[task_1]
        cum_reward_1_b = np.asarray(b_cum_rew)[task_1]

        ones_1 = ones[task_1]
        cum_error_1_ch = cum_error_1 * choices_1
        cum_rew_1_ch = cum_reward_1 * choices_1
        firing_rates_1 = firing_rates[task_1]

        int_rew_ch_1 = reward_1 * choices_1
        if interactions == True:
            predictors_all = OrderedDict([
                ('Reward', reward_1), ('Choice', choices_1),
                ('Errors', cum_error_1), ('Rewards', cum_reward_1),
                ('Choice x Cum Error', cum_error_1_ch),
                ('Choice x Cum Reward', cum_rew_1_ch),
                ('Choice x Reward', int_rew_ch_1), ('ones', ones_1)
            ])
        else:
            predictors_all = OrderedDict([('Reward', reward_1),
                                          ('Choice', choices_1),
                                          ('Errors A', cum_error_1_a),
                                          ('Errors B', cum_error_1_b),
                                          ('Rewards A', cum_reward_1_a),
                                          ('Rewards B', cum_reward_1_b),
                                          ('Choice x Reward', int_rew_ch_1),
                                          ('ones', ones_1)])

        X_1 = np.vstack(
            predictors_all.values()).T[:len(choices_1), :].astype(float)
        rank = np.linalg.matrix_rank(X_1)
        # print(rank)
        # print(X_1.shape[1])
        n_predictors = X_1.shape[1]
        y_1 = firing_rates_1.reshape(
            [len(firing_rates_1),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y_1, X_1)

        C_1.append(tstats.reshape(n_predictors, n_neurons,
                                  n_timepoints))  # Predictor loadings
        cpd_1.append(
            re._CPD(X_1, y_1).reshape(n_neurons, n_timepoints, n_predictors))

        reward_2 = reward[task_2]
        choices_2 = choices[task_2]
        cum_error_2 = np.asarray(cum_error)[task_2]
        cum_reward_2 = np.asarray(cum_reward)[task_2]
        ones_2 = ones[task_2]
        cum_error_2_ch = cum_error_2 * choices_2
        cum_rew_2_ch = cum_reward_2 * choices_2
        int_rew_ch_2 = reward_2 * choices_2
        cum_error_2_a = np.asarray(a_cum_error)[task_2]
        cum_error_2_b = np.asarray(b_cum_error)[task_2]

        cum_reward_2_a = np.asarray(a_cum_rew)[task_2]
        cum_reward_2_b = np.asarray(b_cum_rew)[task_2]

        firing_rates_2 = firing_rates[task_2]
        if interactions == True:

            predictors_all = OrderedDict([
                ('Reward', reward_2), ('Choice', choices_2),
                ('Errors', cum_error_2), ('Rewards', cum_reward_2),
                ('Choice x Cum Error', cum_error_2_ch),
                ('Choice x Cum Reward', cum_rew_2_ch),
                ('Choice x Reward', int_rew_ch_2), ('ones', ones_2)
            ])
        else:
            predictors_all = OrderedDict([('Reward', reward_2),
                                          ('Choice', choices_2),
                                          ('Errors A', cum_error_2_a),
                                          ('Errors B', cum_error_2_b),
                                          ('Rewards A', cum_reward_2_a),
                                          ('Rewards B', cum_reward_2_b),
                                          ('Choice x Reward', int_rew_ch_2),
                                          ('ones', ones_2)])

        X_2 = np.vstack(
            predictors_all.values()).T[:len(choices_2), :].astype(float)
        y_2 = firing_rates_2.reshape(
            [len(firing_rates_2),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y_2, X_2)

        C_2.append(tstats.reshape(n_predictors, n_neurons,
                                  n_timepoints))  # Predictor loadings
        cpd_2.append(
            re._CPD(X_2, y_2).reshape(n_neurons, n_timepoints, n_predictors))

        reward_3 = reward[task_3]
        choices_3 = choices[task_3]
        cum_error_3 = np.asarray(cum_error)[task_3]
        cum_reward_3 = np.asarray(cum_reward)[task_3]
        ones_3 = ones[task_3]
        cum_error_3_ch = cum_error_3 * choices_3
        cum_rew_3_ch = cum_reward_3 * choices_3
        int_rew_ch_3 = reward_3 * choices_3
        cum_error_3_a = np.asarray(a_cum_error)[task_3]
        cum_error_3_b = np.asarray(b_cum_error)[task_3]

        cum_reward_3_a = np.asarray(a_cum_rew)[task_3]
        cum_reward_3_b = np.asarray(b_cum_rew)[task_3]

        firing_rates_3 = firing_rates[task_3]
        if interactions == True:

            predictors_all = OrderedDict([
                ('Reward', reward_3), ('Choice', choices_3),
                ('Errors', cum_error_3), ('Rewards', cum_reward_3),
                ('Choice x Cum Error', cum_error_3_ch),
                ('Choice x Cum Reward', cum_rew_3_ch),
                ('Choice x Reward', int_rew_ch_3), ('ones', ones_3)
            ])

        else:
            predictors_all = OrderedDict([('Reward', reward_3),
                                          ('Choice', choices_3),
                                          ('Errors A', cum_error_3_a),
                                          ('Errors B', cum_error_3_b),
                                          ('Rewards A', cum_reward_3_a),
                                          ('Rewards B', cum_reward_3_b),
                                          ('Choice x Reward', int_rew_ch_3),
                                          ('ones', ones_3)])

        X_3 = np.vstack(
            predictors_all.values()).T[:len(choices_3), :].astype(float)
        y_3 = firing_rates_3.reshape(
            [len(firing_rates_3),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y_3, X_3)

        C_3.append(tstats.reshape(n_predictors, n_neurons,
                                  n_timepoints))  # Predictor loadings
        cpd_3.append(
            re._CPD(X_3, y_3).reshape(n_neurons, n_timepoints, n_predictors))

        if perm:
            for i in range(perm):
                X_perm_1 = np.roll(X_1, np.random.randint(len(X_1)), axis=0)
                tstats = reg_f.regression_code(y_1, X_perm_1)
                C_1_perm[i].append(
                    tstats.reshape(n_predictors, n_neurons,
                                   n_timepoints))  # Predictor loadings
                cpd_perm_1 = re._CPD(X_perm_1,
                                     y_1).reshape(n_neurons, n_timepoints,
                                                  n_predictors)

                X_perm_2 = np.roll(X_2, np.random.randint(len(X_2)), axis=0)

                tstats = reg_f.regression_code(y_2, X_perm_2)
                C_2_perm[i].append(
                    tstats.reshape(n_predictors, n_neurons,
                                   n_timepoints))  # Predictor loadings
                cpd_perm_2 = re._CPD(X_perm_2,
                                     y_2).reshape(n_neurons, n_timepoints,
                                                  n_predictors)

                X_perm_3 = np.roll(X_3, np.random.randint(len(X_3)), axis=0)

                tstats = reg_f.regression_code(y_3, X_perm_3)
                C_3_perm[i].append(
                    tstats.reshape(n_predictors, n_neurons,
                                   n_timepoints))  # Predictor loadings
                cpd_perm_3 = re._CPD(X_perm_3,
                                     y_3).reshape(n_neurons, n_timepoints,
                                                  n_predictors)

                cpd_perm_all[i].append(
                    np.nanmean([cpd_perm_1, cpd_perm_2, cpd_perm_3], 0))

    cpd_perm_all = np.stack(
        [np.mean(np.concatenate(cpd_i, 0), 0) for cpd_i in cpd_perm_all], 0)

    cpd_1 = np.nanmean(np.concatenate(cpd_1, 0), axis=0)
    C_1 = np.concatenate(C_1, 1)

    cpd_2 = np.nanmean(np.concatenate(cpd_2, 0), axis=0)
    C_2 = np.concatenate(C_2, 1)

    cpd_3 = np.nanmean(np.concatenate(cpd_3, 0), axis=0)
    C_3 = np.concatenate(C_3, 1)

    cpds_true = np.mean([cpd_1, cpd_2, cpd_3], 0)

    pal_c = sns.cubehelix_palette(8, start=2, rot=0, dark=0, light=.95)
    c = wes.Darjeeling2_5.mpl_colors + wes.Mendl_4.mpl_colors + wes.GrandBudapest1_4.mpl_colors + wes.Moonrise1_5.mpl_colors

    cpd_perm_all = np.max(np.percentile(cpd_perm_all, 95, axis=0), 0)
    j = 0
    plt.figure()
    for i in cpds_true.T[:-1]:
        plt.plot(i, color=c[j], label=list(predictors_all.keys())[j])
        plt.hlines(cpd_perm_all[j], xmin=0, xmax=63, color=c[j], linestyle=':')

        j += 1
    plt.legend()
    sns.despine()

    plt.title(area + 'CPDs')

    if interactions == True:
        X_nan, firing_rates_1_inf_rew,firing_rates_2_inf_rew,firing_rates_3_inf_rew,firing_rates_1_inf_error,firing_rates_2_inf_error,\
        firing_rates_3_inf_error,firing_rates_1_inf_rew_int,firing_rates_2_inf_rew_int,firing_rates_3_inf_rew_int,firing_rates_1_inf_error_int,\
           firing_rates_2_inf_error_int,firing_rates_3_inf_error_int = find_coefficients(C_1,C_2,C_3,interactions = interactions, a = a)

        X_1_rew = X_nan[:, :5]
        X_1_rew_int = X_nan[:, 5:10]

        X_1_error = X_nan[:, 10:15]
        X_1_error_int = X_nan[:, 15:20]

        X_2_rew = X_nan[:, 20:25]
        X_2_rew_int = X_nan[:, 25:30]

        X_2_error = X_nan[:, 30:35]
        X_2_error_int = X_nan[:, 35:40]

        X_3_rew = X_nan[:, 40:45]
        X_3_rew_int = X_nan[:, 45:50]

        X_3_error = X_nan[:, 50:55]
        X_3_error_int = X_nan[:, 55:60]

        # plt.figure()

        # for i in range(4):
        #     plt.subplot(2,2,i+1)
        #     sns.regplot(X_1_rew[:,i],X_2_rew[:,i], color = 'red',label = 'Reward')
        #     #sns.regplot(X_1_rew[:,i],X_3_rew[:,i], color = 'black')
        #     #sns.regplot(X_3_rew[:,i],X_2_rew[:,i], color = 'grey')
        #     corr = np.mean([np.corrcoef(X_1_rew[:,i],X_2_rew[:,i])[0,1],np.corrcoef(X_1_rew[:,i],X_3_rew[:,i])[0,1],np.corrcoef(X_2_rew[:,i],X_3_rew[:,i])[0,1]])
        #     plt.annotate(np.round(corr,2),(1,1.5))
        # plt.legend()

        # sns.despine()

        # plt.figure()

        # for i in range(4):
        #     plt.subplot(2,2,i+1)
        #     sns.regplot(X_1_rew_int[:,i],X_2_rew_int[:,i], color = 'grey', label = 'Reward Interaction')

        #     corr = np.mean([np.corrcoef(X_1_rew_int[:,i],X_2_rew_int[:,i])[0,1],np.corrcoef(X_1_rew_int[:,i],X_3_rew_int[:,i])[0,1],np.corrcoef(X_2_rew_int[:,i],X_3_rew_int[:,i])[0,1]])
        #     plt.annotate(np.round(corr,2),(1,1.5))
        # plt.legend()
        # sns.despine()

        # plt.figure()

        # for i in range(4):
        #     plt.subplot(2,2,i+1)
        #     sns.regplot(X_1_error[:,i],X_2_error[:,i], color = 'purple', label = 'Error')

        #     corr = np.mean([np.corrcoef(X_1_error[:,i],X_2_error[:,i])[0,1],np.corrcoef(X_1_error[:,i],X_3_error[:,i])[0,1],np.corrcoef(X_2_error[:,i],X_3_error[:,i])[0,1]])
        #     plt.annotate(np.round(corr,2),(1,1.5))

        # plt.figure()

        # for i in range(4):
        #     plt.subplot(2,2,i+1)
        #     sns.regplot(X_1_error_int[:,i],X_2_error_int[:,i], color = 'purple', label = 'Error')

        #     corr = np.mean([np.corrcoef(X_1_error_int[:,i],X_2_error_int[:,i])[0,1],np.corrcoef(X_1_error_int[:,i],X_3_error_int[:,i])[0,1],np.corrcoef(X_2_error_int[:,i],X_3_error_int[:,i])[0,1]])
        #     plt.annotate(np.round(corr,2),(1,1.5))

        # plt.legend()
        # sns.despine()

        cpd_1_2_rew = re._CPD(X_1_rew, firing_rates_2_inf_rew)
        cpd_1_3_rew = re._CPD(X_1_rew, firing_rates_3_inf_rew)

        cpd_2_1_rew = re._CPD(X_2_rew, firing_rates_1_inf_rew)
        cpd_2_3_rew = re._CPD(X_2_rew, firing_rates_3_inf_rew)

        cpd_3_1_rew = re._CPD(X_3_rew, firing_rates_1_inf_rew)
        cpd_3_2_rew = re._CPD(X_3_rew, firing_rates_2_inf_rew)

        cpd_1_2_error = re._CPD(X_1_error, firing_rates_2_inf_error)
        cpd_1_3_error = re._CPD(X_1_error, firing_rates_3_inf_error)

        cpd_2_1_error = re._CPD(X_2_error, firing_rates_1_inf_error)
        cpd_2_3_error = re._CPD(X_2_error, firing_rates_3_inf_error)

        cpd_3_1_error = re._CPD(X_3_error, firing_rates_1_inf_error)
        cpd_3_2_error = re._CPD(X_3_error, firing_rates_2_inf_error)

        cpd_1_2_rew_int = re._CPD(X_1_rew_int, firing_rates_2_inf_rew_int)
        cpd_1_3_rew_int = re._CPD(X_1_rew_int, firing_rates_3_inf_rew_int)

        cpd_2_1_rew_int = re._CPD(X_2_rew_int, firing_rates_1_inf_rew_int)
        cpd_2_3_rew_int = re._CPD(X_2_rew_int, firing_rates_3_inf_rew_int)

        cpd_3_1_rew_int = re._CPD(X_3_rew_int, firing_rates_1_inf_rew_int)
        cpd_3_2_rew_int = re._CPD(X_3_rew_int, firing_rates_2_inf_rew_int)

        cpd_1_2_error_int = re._CPD(X_1_error_int,
                                    firing_rates_2_inf_error_int)
        cpd_1_3_error_int = re._CPD(X_1_error_int,
                                    firing_rates_3_inf_error_int)

        cpd_2_1_error_int = re._CPD(X_2_error_int,
                                    firing_rates_1_inf_error_int)
        cpd_2_3_error_int = re._CPD(X_2_error_int,
                                    firing_rates_3_inf_error_int)

        cpd_3_1_error_int = re._CPD(X_3_error_int,
                                    firing_rates_1_inf_error_int)
        cpd_3_2_error_int = re._CPD(X_3_error_int,
                                    firing_rates_2_inf_error_int)

        cpd_rew_int = np.nanmean([
            cpd_1_2_rew_int, cpd_1_3_rew_int, cpd_2_1_rew_int, cpd_2_3_rew_int,
            cpd_3_1_rew_int, cpd_3_2_rew_int
        ], 0)
        cpd_error_int = np.nanmean([
            cpd_1_2_error_int, cpd_1_3_error_int, cpd_2_1_error_int,
            cpd_2_3_error_int, cpd_3_1_error_int, cpd_3_2_error_int
        ], 0)

    else:

        X_nan, firing_rates_1_inf_rew,firing_rates_2_inf_rew,firing_rates_3_inf_rew,firing_rates_1_inf_error,firing_rates_2_inf_error,\
        firing_rates_3_inf_error  = find_coefficients(C_1,C_2,C_3,interactions = interactions, a = a)

        X_1_rew = X_nan[:, :5]

        X_1_error = X_nan[:, 5:10]

        X_2_rew = X_nan[:, 10:15]

        X_2_error = X_nan[:, 15:20]

        X_3_rew = X_nan[:, 20:25]

        X_3_error = X_nan[:, 25:30]

        # plt.figure()

        # for i in range(4):
        #     plt.subplot(2,2,i+1)
        #     sns.regplot(X_1_rew[:,i],X_2_rew[:,i], color = 'red',label = 'Reward')
        #     #sns.regplot(X_1_rew[:,i],X_3_rew[:,i], color = 'black')
        #     #sns.regplot(X_3_rew[:,i],X_2_rew[:,i], color = 'grey')
        #     corr = np.mean([np.corrcoef(X_1_rew[:,i],X_2_rew[:,i])[0,1],np.corrcoef(X_1_rew[:,i],X_3_rew[:,i])[0,1],np.corrcoef(X_2_rew[:,i],X_3_rew[:,i])[0,1]])
        #     plt.annotate(np.round(corr,2),(1,1.5))
        # plt.legend()

        # plt.figure()

        # for i in range(4):
        #     plt.subplot(2,2,i+1)
        #     sns.regplot(X_1_error[:,i],X_2_error[:,i], color = 'purple', label = 'Error')

        #     corr = np.mean([np.corrcoef(X_1_error[:,i],X_2_error[:,i])[0,1],np.corrcoef(X_1_error[:,i],X_3_error[:,i])[0,1],np.corrcoef(X_2_error[:,i],X_3_error[:,i])[0,1]])
        #     plt.annotate(np.round(corr,2),(1,1.5))

        cpd_1_2_rew = re._CPD(X_1_rew, firing_rates_2_inf_rew)
        cpd_1_3_rew = re._CPD(X_1_rew, firing_rates_3_inf_rew)

        cpd_2_1_rew = re._CPD(X_2_rew, firing_rates_1_inf_rew)
        cpd_2_3_rew = re._CPD(X_2_rew, firing_rates_3_inf_rew)

        cpd_3_1_rew = re._CPD(X_3_rew, firing_rates_1_inf_rew)
        cpd_3_2_rew = re._CPD(X_3_rew, firing_rates_2_inf_rew)

        cpd_1_2_error = re._CPD(X_1_error, firing_rates_2_inf_error)
        cpd_1_3_error = re._CPD(X_1_error, firing_rates_3_inf_error)

        cpd_2_1_error = re._CPD(X_2_error, firing_rates_1_inf_error)
        cpd_2_3_error = re._CPD(X_2_error, firing_rates_3_inf_error)

        cpd_3_1_error = re._CPD(X_3_error, firing_rates_1_inf_error)
        cpd_3_2_error = re._CPD(X_3_error, firing_rates_2_inf_error)

    cpd_rew = np.nanmean([
        cpd_1_2_rew, cpd_1_3_rew, cpd_2_1_rew, cpd_2_3_rew, cpd_3_1_rew,
        cpd_3_2_rew
    ], 0)
    cpd_error = np.nanmean([
        cpd_1_2_error, cpd_1_3_error, cpd_2_1_error, cpd_2_3_error,
        cpd_3_1_error, cpd_3_2_error
    ], 0)

    pal = sns.cubehelix_palette(8)
    pal_c = sns.cubehelix_palette(8, start=2, rot=0, dark=0, light=.95)
    if interactions == True:
        sub = 2
    else:
        sub = 1
    plt.figure()
    plt.subplot(2, sub, 1)
    plt.plot(cpd_rew[:, 0], color=pal[0], label='Pre Init Period')
    plt.plot(cpd_rew[:, 1], color=pal[2], label='Init Period')
    plt.plot(cpd_rew[:, 2], color=pal[4], label='Choice Period')
    plt.plot(cpd_rew[:, 3], color=pal[6], label='Reward Period')
    plt.title(area + 'Between Tasks Reward Runs')
    plt.legend()
    sns.despine()
    # plt.ylim(0,0.18)

    plt.subplot(2, sub, 2)

    plt.plot(cpd_error[:, 0], color=pal_c[0], label='Pre Init Period')
    plt.plot(cpd_error[:, 1], color=pal_c[2], label='Init Period')
    plt.plot(cpd_error[:, 2], color=pal_c[4], label='Choice Period')
    plt.plot(cpd_error[:, 3], color=pal_c[6], label='Reward Period')
    plt.title(area + 'Between Tasks Error Runs')
    # plt.ylim(0,0.06)
    if interactions == True:
        plt.subplot(2, sub, 3)
        plt.plot(cpd_rew_int[:, 0], color=pal[0], label='Pre Init Period')
        plt.plot(cpd_rew_int[:, 1], color=pal[2], label='Init Period')
        plt.plot(cpd_rew_int[:, 2], color=pal[4], label='Choice Period')
        plt.plot(cpd_rew_int[:, 3], color=pal[6], label='Reward Period')
        plt.title(area + 'Between Tasks Reward Runs Interactions with Choice')
        plt.legend()
        sns.despine()
        # plt.ylim(0,0.18)

        plt.subplot(2, sub, 4)

        plt.plot(cpd_error_int[:, 0], color=pal_c[0], label='Pre Init Period')
        plt.plot(cpd_error_int[:, 1], color=pal_c[2], label='Init Period')
        plt.plot(cpd_error_int[:, 2], color=pal_c[4], label='Choice Period')
        plt.plot(cpd_error_int[:, 3], color=pal_c[6], label='Reward Period')
        plt.title(area + 'Between Tasks Error Runs Interactions with Choice')
        # plt.ylim(0,0.06)

    plt.legend()
    sns.despine()

    cpd_rew_perm = []
    cpd_error_perm = []
    cpd_rew_int_perm = []
    cpd_error_int_perm = []

    for i, ii, in enumerate(C_3_perm):

        C_1 = np.concatenate(C_1_perm[i], 1)

        C_2 = np.concatenate(C_3_perm[i], 1)

        C_3 = np.concatenate(C_2_perm[i], 1)

        if interactions == True:
            X_nan, firing_rates_1_inf_rew,firing_rates_2_inf_rew,firing_rates_3_inf_rew,firing_rates_1_inf_error,firing_rates_2_inf_error,\
            firing_rates_3_inf_error,firing_rates_1_inf_rew_int,firing_rates_2_inf_rew_int,firing_rates_3_inf_rew_int,firing_rates_1_inf_error_int,\
               firing_rates_2_inf_error_int,firing_rates_3_inf_error_int = find_coefficients(C_1,C_2,C_3,interactions = interactions,  a = a)

            X_1_rew = X_nan[:, :5]
            X_1_rew_int = X_nan[:, 5:10]

            X_1_error = X_nan[:, 10:15]
            X_1_error_int = X_nan[:, 15:20]

            X_2_rew = X_nan[:, 20:25]
            X_2_rew_int = X_nan[:, 25:30]

            X_2_error = X_nan[:, 30:35]
            X_2_error_int = X_nan[:, 35:40]

            X_3_rew = X_nan[:, 40:45]
            X_3_rew_int = X_nan[:, 45:50]

            X_3_error = X_nan[:, 50:55]
            X_3_error_int = X_nan[:, 55:60]

            cpd_1_2_rew = re._CPD(X_1_rew, firing_rates_2_inf_rew)
            cpd_1_3_rew = re._CPD(X_1_rew, firing_rates_3_inf_rew)

            cpd_2_1_rew = re._CPD(X_2_rew, firing_rates_1_inf_rew)
            cpd_2_3_rew = re._CPD(X_2_rew, firing_rates_3_inf_rew)

            cpd_3_1_rew = re._CPD(X_3_rew, firing_rates_1_inf_rew)
            cpd_3_2_rew = re._CPD(X_3_rew, firing_rates_2_inf_rew)

            cpd_1_2_error = re._CPD(X_1_error, firing_rates_2_inf_error)
            cpd_1_3_error = re._CPD(X_1_error, firing_rates_3_inf_error)

            cpd_2_1_error = re._CPD(X_2_error, firing_rates_1_inf_error)
            cpd_2_3_error = re._CPD(X_2_error, firing_rates_3_inf_error)

            cpd_3_1_error = re._CPD(X_3_error, firing_rates_1_inf_error)
            cpd_3_2_error = re._CPD(X_3_error, firing_rates_2_inf_error)

            cpd_1_2_rew_int = re._CPD(X_1_rew_int, firing_rates_2_inf_rew_int)
            cpd_1_3_rew_int = re._CPD(X_1_rew_int, firing_rates_3_inf_rew_int)

            cpd_2_1_rew_int = re._CPD(X_2_rew_int, firing_rates_1_inf_rew_int)
            cpd_2_3_rew_int = re._CPD(X_2_rew_int, firing_rates_3_inf_rew_int)

            cpd_3_1_rew_int = re._CPD(X_3_rew_int, firing_rates_1_inf_rew_int)
            cpd_3_2_rew_int = re._CPD(X_3_rew_int, firing_rates_2_inf_rew_int)

            cpd_1_2_error_int = re._CPD(X_1_error_int,
                                        firing_rates_2_inf_error_int)
            cpd_1_3_error_int = re._CPD(X_1_error_int,
                                        firing_rates_3_inf_error_int)

            cpd_2_1_error_int = re._CPD(X_2_error_int,
                                        firing_rates_1_inf_error_int)
            cpd_2_3_error_int = re._CPD(X_2_error_int,
                                        firing_rates_3_inf_error_int)

            cpd_3_1_error_int = re._CPD(X_3_error_int,
                                        firing_rates_1_inf_error_int)
            cpd_3_2_error_int = re._CPD(X_3_error_int,
                                        firing_rates_2_inf_error_int)

            cpd_rew_int_perm.append(
                np.nanmean([
                    cpd_1_2_rew_int, cpd_1_3_rew_int, cpd_2_1_rew_int,
                    cpd_2_3_rew_int, cpd_3_1_rew_int, cpd_3_2_rew_int
                ], 0))
            cpd_error_int_perm.append(
                np.nanmean([
                    cpd_1_2_error_int, cpd_1_3_error_int, cpd_2_1_error_int,
                    cpd_2_3_error_int, cpd_3_1_error_int, cpd_3_2_error_int
                ], 0))

        else:

            X_nan, firing_rates_1_inf_rew,firing_rates_2_inf_rew,firing_rates_3_inf_rew,firing_rates_1_inf_error,firing_rates_2_inf_error,\
            firing_rates_3_inf_error  = find_coefficients(C_1,C_2,C_3,interactions = interactions,  a = a)

            X_1_rew = X_nan[:, :5]

            X_1_error = X_nan[:, 5:10]

            X_2_rew = X_nan[:, 10:15]

            X_2_error = X_nan[:, 15:20]

            X_3_rew = X_nan[:, 20:25]

            X_3_error = X_nan[:, 25:30]

            cpd_1_2_rew = re._CPD(X_1_rew, firing_rates_2_inf_rew)
            cpd_1_3_rew = re._CPD(X_1_rew, firing_rates_3_inf_rew)

            cpd_2_1_rew = re._CPD(X_2_rew, firing_rates_1_inf_rew)
            cpd_2_3_rew = re._CPD(X_2_rew, firing_rates_3_inf_rew)

            cpd_3_1_rew = re._CPD(X_3_rew, firing_rates_1_inf_rew)
            cpd_3_2_rew = re._CPD(X_3_rew, firing_rates_2_inf_rew)

            cpd_1_2_error = re._CPD(X_1_error, firing_rates_2_inf_error)
            cpd_1_3_error = re._CPD(X_1_error, firing_rates_3_inf_error)

            cpd_2_1_error = re._CPD(X_2_error, firing_rates_1_inf_error)
            cpd_2_3_error = re._CPD(X_2_error, firing_rates_3_inf_error)

            cpd_3_1_error = re._CPD(X_3_error, firing_rates_1_inf_error)
            cpd_3_2_error = re._CPD(X_3_error, firing_rates_2_inf_error)

        cpd_rew_perm.append(
            np.nanmean([
                cpd_1_2_rew, cpd_1_3_rew, cpd_2_1_rew, cpd_2_3_rew,
                cpd_3_1_rew, cpd_3_2_rew
            ], 0))
        cpd_error_perm.append(
            np.nanmean([
                cpd_1_2_error, cpd_1_3_error, cpd_2_1_error, cpd_2_3_error,
                cpd_3_1_error, cpd_3_2_error
            ], 0))

    cpd_rew_perm = np.max(np.percentile(cpd_rew_perm, 95, axis=0), 0)
    cpd_error_perm = np.max(np.percentile(cpd_error_perm, 95, axis=0), 0)

    plt.subplot(2, sub, 1)
    plt.hlines(cpd_rew_perm[0],
               xmin=0,
               xmax=63,
               color=pal[0],
               label='Pre Init Period',
               linestyle=':')
    plt.hlines(cpd_rew_perm[1],
               xmin=0,
               xmax=63,
               color=pal[2],
               label='Init Period',
               linestyle=':')
    plt.hlines(cpd_rew_perm[2],
               xmin=0,
               xmax=63,
               color=pal[4],
               label='Choice Period',
               linestyle=':')
    plt.hlines(cpd_rew_perm[3],
               xmin=0,
               xmax=63,
               color=pal[6],
               label='Reward Period',
               linestyle=':')
    plt.legend()
    sns.despine()

    plt.subplot(2, sub, 2)

    plt.hlines(cpd_error_perm[0],
               xmin=0,
               xmax=63,
               color=pal_c[0],
               label='Pre Init Period',
               linestyle=':')
    plt.hlines(cpd_error_perm[1],
               xmin=0,
               xmax=63,
               color=pal_c[2],
               label='Init Period',
               linestyle=':')
    plt.hlines(cpd_error_perm[2],
               xmin=0,
               xmax=63,
               color=pal_c[4],
               label='Choice Period',
               linestyle=':')
    plt.hlines(cpd_error_perm[3],
               xmin=0,
               xmax=63,
               color=pal_c[6],
               label='Reward Period',
               linestyle=':')
    plt.legend()
    sns.despine()

    if interactions == True:

        cpd_rew_int_perm = np.max(np.percentile(cpd_rew_int_perm, 95, axis=0),
                                  0)
        cpd_error_int_perm = np.max(
            np.percentile(cpd_error_int_perm, 95, axis=0), 0)

        plt.subplot(2, sub, 3)
        plt.hlines(cpd_rew_int_perm[0],
                   xmin=0,
                   xmax=63,
                   color=pal[0],
                   label='Pre Init Period',
                   linestyle=':')
        plt.hlines(cpd_rew_int_perm[1],
                   xmin=0,
                   xmax=63,
                   color=pal[2],
                   label='Init Period',
                   linestyle=':')
        plt.hlines(cpd_rew_int_perm[2],
                   xmin=0,
                   xmax=63,
                   color=pal[4],
                   label='Choice Period',
                   linestyle=':')
        plt.hlines(cpd_rew_int_perm[3],
                   xmin=0,
                   xmax=63,
                   color=pal[6],
                   label='Reward Period',
                   linestyle=':')
        plt.legend()
        sns.despine()
        plt.subplot(2, sub, 4)

        plt.hlines(cpd_error_int_perm[0],
                   xmin=0,
                   xmax=63,
                   color=pal_c[0],
                   label='Pre Init Period',
                   linestyle=':')
        plt.hlines(cpd_error_int_perm[1],
                   xmin=0,
                   xmax=63,
                   color=pal_c[2],
                   label='Init Period',
                   linestyle=':')
        plt.hlines(cpd_error_int_perm[2],
                   xmin=0,
                   xmax=63,
                   color=pal_c[4],
                   label='Choice Period',
                   linestyle=':')
        plt.hlines(cpd_error_int_perm[3],
                   xmin=0,
                   xmax=63,
                   color=pal_c[6],
                   label='Reward Period',
                   linestyle=':')
        plt.legend()
        sns.despine()
Пример #7
0
def regression_RPE(data, perm=True):

    C = []
    cpd = []

    dm = data['DM']
    firing = data['Data']

    if perm:
        C_perm = [[] for i in range(perm)
                  ]  # To store permuted predictor loadings for each session.
        cpd_perm = [[] for i in range(perm)
                    ]  # To store permuted cpd for each session.

    for s, sess in enumerate(dm):
        DM = dm[s]
        firing_rates = firing[s]
        n_trials, n_neurons, n_timepoints = firing_rates.shape
        choices = DM[:, 1]
        reward = DM[:, 2]
        # state = DM[:,0]
        rpe = DM[:, 14]
        q1 = DM[:, 9]
        q4 = DM[:, 10]
        rand = np.random.normal(np.mean(q4), np.std(q4), len(q4))
        ones = np.ones(len(rpe))
        trials = len(ones)

        predictors_all = OrderedDict([
            ('Reward', reward),
            ('Choice', choices),
            #('State', state),
            ('RPE', rpe),
            #('Q4', q4),
            #('Q1', q1),
            #('Noise', rand),
            ('ones', ones)
        ])

        X = np.vstack(
            predictors_all.values()).T[:len(choices), :].astype(float)
        n_predictors = X.shape[1]
        y = firing_rates.reshape(
            [len(firing_rates),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)

        C.append(tstats.reshape(n_predictors, n_neurons,
                                n_timepoints))  # Predictor loadings
        cpd.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

        if perm:
            for i in range(perm):
                X_perm = np.roll(X, np.random.randint(trials), axis=0)
                tstats = reg_f.regression_code(y, X_perm)

                C_perm[i].append(
                    tstats.reshape(n_predictors, n_neurons,
                                   n_timepoints))  # Predictor loadings
                cpd_perm[i].append(
                    re._CPD(X_perm, y).reshape(n_neurons, n_timepoints,
                                               n_predictors))

    if perm:  # Evaluate P values.
        cpd_perm = np.stack(
            [np.nanmean(np.concatenate(cpd_i, 0), 0) for cpd_i in cpd_perm], 0)
        p = np.percentile(cpd_perm, 95, axis=0)

    cpd = np.nanmean(np.concatenate(cpd, 0), axis=0)
    C = np.concatenate(C, 1)

    return cpd_perm, p, cpd, C, predictors_all
def regression_time_choices_rewards_b_blocks_diff_tasks(data_HP, data_PFC,experiment_aligned_HP, experiment_aligned_PFC,start = 0, end = 20, HP = True):
    
    C_1 = []
    C_2 = []
    C_3 = []


    a_a_matrix_t_1_list, b_b_matrix_t_1_list,\
    a_a_matrix_t_2_list, b_b_matrix_t_2_list,\
    a_a_matrix_t_3_list, b_b_matrix_t_3_list,\
    a_a_matrix_t_1_list_rewards, b_b_matrix_t_1_list_rewards,\
    a_a_matrix_t_2_list_rewards, b_b_matrix_t_2_list_rewards,\
    a_a_matrix_t_3_list_rewards, b_b_matrix_t_3_list_rewards,\
    a_a_matrix_t_1_list_choices, b_b_matrix_t_1_list_choices,\
    a_a_matrix_t_2_list_choices, b_b_matrix_t_2_list_choices,\
    a_a_matrix_t_3_list_choices, b_b_matrix_t_3_list_choices = ch_rew_align.hieararchies_extract_rewards_choices(data_HP, data_PFC,experiment_aligned_HP, experiment_aligned_PFC,start = start, end = end, HP = HP)
    
    for s, session in enumerate(b_b_matrix_t_1_list):
        
        firing_rates_1 = b_b_matrix_t_1_list[s]
        firing_rates_2 = b_b_matrix_t_2_list[s]
        firing_rates_3 = b_b_matrix_t_3_list[s]
        n_neurons = firing_rates_1.shape[0]
        
        rewards_b_1 = b_b_matrix_t_1_list_rewards[s]
        rewards_b_2 = b_b_matrix_t_2_list_rewards[s]
        rewards_b_3 = b_b_matrix_t_3_list_rewards[s]
        #rewards = np.hstack([rewards_a_1,rewards_a_2,rewards_a_3])

        choices_b_1 = b_b_matrix_t_1_list_choices[s]
        choices_b_2 = b_b_matrix_t_2_list_choices[s]
        choices_b_3 = b_b_matrix_t_3_list_choices[s]
        #choices = np.hstack([choices_a_1,choices_a_2,choices_a_3])

        block_length = np.tile(np.arange(session.shape[1]/2),2)
       # trial_number = np.tile(block_length,3)
        ones_1 = np.ones(len(choices_b_1))
        ones_2 = np.ones(len(choices_b_2))
        ones_3 = np.ones(len(choices_b_3))

        # Task 1 
        
        predictors = OrderedDict([('Reward', rewards_b_1),
                                      ('Choice', choices_b_1),
                                      ('Trial Number',block_length),
                                      ('Constant', ones_1)])
                
        
        X = np.vstack(predictors.values()).T[:len(choices_b_1),:].astype(float)
        n_predictors = X.shape[1]
        y = firing_rates_1.reshape([len(firing_rates_1),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]

        tstats = reg_f.regression_code(y.T, X)
        
        C_1.append(tstats.reshape(n_predictors,n_neurons)) # Predictor loadings
        
        # Task 2
        predictors = OrderedDict([('Reward', rewards_b_2),
                                      ('Choice', choices_b_2),
                                      ('Trial Number',block_length),
                                      ('Constant', ones_2)])
                
        
        X = np.vstack(predictors.values()).T[:len(choices_b_2),:].astype(float)
        n_predictors = X.shape[1]
        y = firing_rates_2.reshape([len(firing_rates_2),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]

        tstats = reg_f.regression_code(y.T, X)
        
        C_2.append(tstats.reshape(n_predictors,n_neurons)) # Predictor loadings
       
        
        # Task 3
        predictors = OrderedDict([('Reward', rewards_b_3),
                                      ('Choice', choices_b_3),
                                      ('Trial Number',block_length),
                                      ('Constant', ones_3)])
                
        
        X = np.vstack(predictors.values()).T[:len(choices_b_3),:].astype(float)
        n_predictors = X.shape[1]
        y = firing_rates_3.reshape([len(firing_rates_3),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]

        tstats = reg_f.regression_code(y.T, X)
        
        C_3.append(tstats.reshape(n_predictors,n_neurons)) # Predictor loadings
       
     
    C_1 = np.concatenate(C_1,1)
    C_2 = np.concatenate(C_2,1)
    C_3 = np.concatenate(C_3,1)

    return C_1,C_2,C_3
Пример #9
0
def crosscorr_weighted(all_sessions,HP, Data, DM, subj = 'm484'):
    
    all_session_b1, all_session_a1, all_session_i1, all_session_b2, all_session_a2,\
    all_session_i2, all_session_b3, all_session_a3, all_session_i3 = a_b_i_coding(Data,DM)

    if subj == 'm484':  
        all_session_b1 = all_session_b1[:16]
        all_session_a1 = all_session_a1[:16]
        all_session_i1 = all_session_i1[:16]
        all_session_b2 = all_session_b2[:16]
        all_session_a2 = all_session_a2[:16]
        all_session_i2 = all_session_i2[:16]
        all_session_b3 = all_session_b3[:16]
        all_session_a3 = all_session_a3[:16]
        all_session_i3 = all_session_i3[:16]
        
    elif subj == 'm479': 
        all_session_b1 = all_session_b1[16:24]
        all_session_a1 = all_session_a1[16:24]
        all_session_i1 = all_session_i1[16:24]
        all_session_b2 = all_session_b2[16:24]
        all_session_a2 = all_session_a2[16:24]
        all_session_i2 = all_session_i2[16:24]
        all_session_b3 = all_session_b3[16:24]
        all_session_a3 = all_session_a3[16:24]
        all_session_i3 = all_session_i3[16:24]
      
    elif subj == 'm483': 
        all_session_b1 = all_session_b1[24:]
        all_session_a1 = all_session_a1[24:]
        all_session_i1 = all_session_i1[24:]
        all_session_b2 = all_session_b2[24:]
        all_session_a2 = all_session_a2[24:]
        all_session_i2 = all_session_i2[24:]
        all_session_b3 = all_session_b3[24:]
        all_session_a3 = all_session_a3[24:]
        all_session_i3 = all_session_i3[24:]
        
    session_mean = []
    
    for s,session in enumerate(all_sessions):
        
        
        a1_ind, b1_ind,a2_ind, b2_ind,a3_ind, b3_ind, taskid, task = plot_tuning_curves(HP,subj,s)
        
        all_session_b1_s = all_session_b1[s]
        all_session_a1_s = all_session_a1[s]
        all_session_i1_s = all_session_i1[s]
        all_session_b2_s = all_session_b2[s]
        all_session_a2_s = all_session_a2[s]
        all_session_i2_s = all_session_i2[s]
        all_session_b3_s = all_session_b3[s]
        all_session_a3_s = all_session_a3[s]
        all_session_i3_s = all_session_i3[s]
        
        task_id_1 = task[(np.where(taskid == 1)[0])][0]
        task_id_2 = task[(np.where(taskid == 2)[0])][0]
        task_id_3 = task[(np.where(taskid == 3)[0])][0]
        task = str(task_id_1) + '_' + str(task_id_2)  + '_'+str(task_id_3)
        colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
        by_hsv = [*colors]
        by_hsv = [color for color in by_hsv if 'dark' in color or 'medium' in color]
        by_hsv = [color for color in by_hsv if not 'grey' in color]
        fig = plt.figure()

        for n in range(a1_ind.shape[1]):
            fig.add_subplot(3,5, n+1)
            plt.plot(np.mean(b2_ind[:,n,:],  axis = 0),color=by_hsv[n], linestyle = '--', label = 'B2')
            plt.plot(np.mean(b1_ind[:,n,:],  axis = 0),color=by_hsv[n],linestyle = ':', label = 'B1')
            plt.plot(np.mean(b3_ind[:,n,:],  axis = 0),color=by_hsv[n],linestyle = '-.', label = 'B3')

            plt.vlines(25,  ymin = 0,  ymax = np.max([np.mean(b2_ind[:,n,:], axis = 0),np.mean(b1_ind[:,n,:],  axis = 0)]), color = 'grey', alpha = 0.5)
            plt.vlines(36,  ymin = 0, ymax = np.max([np.mean(b2_ind[:,n,:],  axis = 0),np.mean(b1_ind[:,n,:],  axis = 0)]), color = 'pink', alpha = 0.5)
      
        plt.legend()  
        plt.title(s)

        round_session = np.round(session)
        x_max = 200
        smooth_sd_ms = 0.5
        bin_width_ms = 1
        bin_edges_trial = np.arange(0,x_max, bin_width_ms)
        hist_array = np.zeros((session.shape[0],session.shape[1],len(bin_edges_trial)-1))
        ticks_label = []
            
        for n,neuron in enumerate(round_session):
            ticks_label.append(str(n+1))

            for r,ripple in enumerate(neuron):
                hist,edges = np.histogram(ripple, bins= bin_edges_trial)
                smoothed = gaussian_filter1d(hist.astype(float), smooth_sd_ms)         
                hist_array[n,r,:] = smoothed
                
        hist_mean = np.mean(hist_array, axis = 1)
        figav = plt.figure()
        
        for i,ii in enumerate(hist_mean):
           figav.add_subplot(5,4,i+1)
           plt.plot(ii, color = 'black')
        
       
        combinations = list(itertools.combinations(range(hist_array.shape[0]), 2))
        
        pos_1 = []
        pos_2 = []
        
        for i in combinations:
            pos_1.append(i[0])
            pos_2.append(i[1])

        for i in combinations:
            pos_1.append(i[1])
            pos_2.append(i[0])

        hist_array_cut = hist_array[:,:50,:]
        c = []
        corr_n_r = []
        for i1,i2 in zip(pos_1,pos_2):
            i = [i1,i2]
            p = []
            corr_n = []
            for r,rr in zip(hist_array_cut[i1],hist_array_cut[i2]):
                ripple = []
                corr = []
                correlation = np.correlate(r,rr, 'full')
                for lag in range(1,50):
                    Design = np.ones((3,len(r)))
                    Design[1,:]  = r
                    Design[2,:] = rr
                    model = LinearRegression().fit(Design[:,:-lag].T, rr[lag:])
                    corr.append(correlation[1])
                    ripple.append(model.coef_[1])
                p.append(ripple)
                corr_n.append(correlation)
            if i == [1,0]:
                plt.plot(np.mean(corr_n,axis = 0))
            elif i == [0,1]:
                plt.plot(np.mean(corr_n,axis = 0))
            c.append(p)
            corr_n_r.append(corr_n)
        c = np.asarray(c)
        mean_c = np.nanmean(c, axis = 1)
        
        session_mean.append(mean_c)
       
        BA = []
        AB = []
        AI = []
        IA = []
        BI = []
        IB = []

        BA_t2 = []
        AB_t2 = []
        AI_t2 = []
        IA_t2 = []
        BI_t2 = []
        IB_t2 = []
        
        BA_t3 = []
        AB_t3 = []
        AI_t3 = []
        IA_t3 = []
        BI_t3 = []
        IB_t3 = []

        for i1,i2 in zip(pos_1,pos_2):

            BA.append(all_session_b1_s[i1]*all_session_a1_s[i2])
            AB.append(all_session_a1_s[i1]*all_session_b1_s[i2])
            AI.append(all_session_a1_s[i1]*all_session_i1_s[i2])
            IA.append(all_session_i1_s[i1]*all_session_a1_s[i2])
            BI.append(all_session_b1_s[i1]*all_session_i1_s[i2])
            IB.append(all_session_i1_s[i1]*all_session_b1_s[i2])

            BA_t2.append(all_session_b2_s[i1]*all_session_a2_s[i2])
            AB_t2.append(all_session_a2_s[i1]*all_session_b2_s[i2])
            AI_t2.append(all_session_a2_s[i1]*all_session_i2_s[i2])
            IA_t2.append(all_session_i2_s[i1]*all_session_a2_s[i2])
            BI_t2.append(all_session_b2_s[i1]*all_session_i2_s[i2])
            IB_t2.append(all_session_i2_s[i1]*all_session_b2_s[i2])

            BA_t3.append(all_session_b3[i1]*all_session_a3_s[i2])
            AB_t3.append(all_session_a3[i1]*all_session_b3_s[i2])
            AI_t3.append(all_session_a3[i1]*all_session_i3_s[i2])
            IA_t3.append(all_session_i3[i1]*all_session_a3_s[i2])
            BI_t3.append(all_session_b3[i1]*all_session_i3_s[i2])
            IB_t3.append(all_session_i3[i1]*all_session_b3_s[i2])
        
        Design_ports = np.asarray([np.ones(len(BA_t2)),BA_t2,AB_t2,AI_t2,IA_t2,BI_t2,IB_t2])
        coefs = re.regression_code(mean_c,Design_ports.T)

    plt.figure()
    plt.plot(coefs[1], label = 'BA')
    plt.plot(coefs[2], label = 'AB')

    plt.plot(coefs[3], label = 'AI')
    plt.plot(coefs[4], label = 'IA')

    plt.plot(coefs[5], label = 'BI')
    plt.plot(coefs[6], label = 'IB')

    plt.legend()
Пример #10
0
def between_tasks(Data, DM, PFC=True):

    cpd_true = []
    C_true = []
    cpd_aligned = []
    C_aligned = []

    C_sq_true = []
    C_sq_aligned = []

    if PFC == True:
        ind_1 = np.arange(0, 26)
        ind_2 = np.arange(1, 27)
        ind_a = np.hstack((ind_1, ind_2))
        ind_b = np.hstack((ind_2, ind_1))

    else:
        ind_1 = np.arange(0, 15)
        ind_2 = np.arange(1, 16)
        ind_a = np.hstack((ind_1, ind_2))
        ind_b = np.hstack((ind_2, ind_1))

    all_sessions_firing, all_session_dm = select_trials(Data, DM, 10)

    for a, b in zip(ind_a, ind_b):
        aligned_a, aligned_b, original_a, original_b, task_1_aligned = procrustes(
            a, b, all_sessions_firing, all_session_dm)

        target = np.transpose(task_1_aligned, [1, 0, 2])
        source = np.transpose(original_a, [1, 0, 2])[40:80, :, :]

        #  Session 1 Task 2
        dm_test = all_session_dm[a]

        trials, n_neurons, n_timepoints = source.shape

        reward_test = dm_test[:, 2][40:80]
        state_test = dm_test[:, 0][40:80]
        choices_test = dm_test[:, 1][40:80]
        ones_test = np.ones(len(choices_test))

        reward_choice = choices_test * reward_test

        # Aligned Task 2 from Task 1
        predictors_train = OrderedDict([('State', state_test),
                                        ('Reward', reward_test),
                                        ('Choice', choices_test),
                                        ('Reward Choice Int', reward_choice),
                                        ('ones', ones_test)])

        X = np.vstack(predictors_train.values()).T[:trials, :].astype(float)
        n_predictors = X.shape[1]
        y = source.reshape(
            [len(source),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)
        C_true.append(tstats.reshape(n_predictors, n_neurons,
                                     n_timepoints))  # Predictor loadings
        C_sq_true.append(
            tstats.reshape(n_predictors, n_neurons, n_timepoints)**2)
        cpd_true.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

        # Aligned Using Neurons
        n_predictors = X.shape[1]
        y = target.reshape(
            [len(target),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)
        C_aligned.append(tstats.reshape(n_predictors, n_neurons,
                                        n_timepoints))  # Predictor loadings
        C_sq_aligned.append(
            tstats.reshape(n_predictors, n_neurons, n_timepoints)**2)
        cpd_aligned.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

    cpd_true = np.nanmean(np.concatenate(cpd_true, 0), axis=0)
    C_sq_true = np.concatenate(C_sq_true, 1)

    cpd_aligned = np.nanmean(np.concatenate(cpd_aligned, 0), axis=0)
    C_sq_aligned = np.concatenate(C_sq_aligned, 1)


    c =  ['violet', 'black', 'green', 'blue', 'turquoise', 'grey', 'yellow', 'pink',\
          'purple', 'darkblue', 'darkred', 'darkgreen','darkyellow','lightgreen']
    p = [*predictors_train]

    plt.figure()

    for i in np.arange(cpd_true.shape[1] - 1):
        plt.plot(cpd_true[:, i], label=p[i] + 'Real', color=c[i])
        plt.plot(cpd_aligned[:, i],
                 label=p[i] + 'Aligned',
                 color=c[i],
                 linestyle='--')

    plt.legend()
    plt.ylabel('CPD')
    plt.xlabel('Time in Trial')
    plt.xticks([25, 35, 42], ['I', 'C', 'R'])

    return cpd_aligned, cpd_true
Пример #11
0
def regression(Data, DM, PFC=True):

    cpd_true = []
    C_true = []
    cpd_misaligned = []
    C_misaligned = []
    cpd_aligned = []
    C_aligned = []

    C_sq_true = []
    C_sq_misaligned = []
    C_sq_aligned = []

    if PFC == True:
        ind_1 = np.arange(0, 26)
        ind_2 = np.arange(1, 27)
        ind_a = np.hstack((ind_1, ind_2))
        ind_b = np.hstack((ind_2, ind_1))

    else:
        ind_1 = np.arange(0, 15)
        ind_2 = np.arange(1, 16)
        ind_a = np.hstack((ind_1, ind_2))
        ind_b = np.hstack((ind_2, ind_1))

    misaligned_list = []
    aligned_list = []

    for a, b in zip(ind_a, ind_b):

        all_sessions, all_session_dm, aligned_by_trial, original_a, original_b = in_progress(
            a, b, Data, DM, misaligned_list, aligned_list)
        # plt.figure()

        # for i in np.arange(aligned_by_trial.shape[0]):
        #     plt.plot(np.mean(aligned_by_trial,1)[i,:])

        # plt.figure()
        # for i in np.arange(original_b.shape[0]):
        #     plt.plot(np.mean(original_b,1)[i,:])

        #session_a = all_sessions[ind_a]

        dm_test = all_session_dm[a]

        session_training = np.transpose(original_a, [1, 0, 2])
        session_misaligned = np.transpose(original_b, [1, 0, 2])
        session_aligned = np.transpose(aligned_by_trial, [1, 0, 2])

        trials, n_neurons, n_timepoints = session_training.shape

        reward_test = dm_test[:, 2]
        state_test = dm_test[:, 0]
        choices_test = dm_test[:, 1]
        ones_test = np.ones(len(choices_test))

        trials_since_block = np.arange(0, 10)
        trials_since_block = np.tile(trials_since_block, 12)

        reward_choice = choices_test * reward_test
        trial_sq = (np.asarray(trials_since_block) - 0.5)**2
        choice_trials_sq = choices_test * trial_sq
        interaction_trials_choice = trials_since_block * choices_test

        # Original Using Neurons
        predictors_train = OrderedDict([
            ('State', state_test),
            ('Reward', reward_test),
            ('Choice', choices_test),
            #('Reward Choice Int', reward_choice),
            #('Trials in Block', trials_since_block),
            #('Squared Time in Block', trial_sq),
            #('Trials x Choice', interaction_trials_choice),
            #('Trials x Choice Sq',choice_trials_sq),
            ('ones', ones_test)
        ])

        X = np.vstack(predictors_train.values()).T[:trials, :].astype(float)
        n_predictors = X.shape[1]
        y = session_training.reshape(
            [len(session_training),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)
        C_true.append(tstats.reshape(n_predictors, n_neurons,
                                     n_timepoints))  # Predictor loadings
        C_sq_true.append(
            tstats.reshape(n_predictors, n_neurons, n_timepoints)**2)
        cpd_true.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

        trials, n_neurons, n_timepoints = session_misaligned.shape

        # Misaligned Using Neurons

        y = session_misaligned.reshape(
            [len(session_misaligned),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)
        C_misaligned.append(
            tstats.reshape(n_predictors, n_neurons,
                           n_timepoints))  # Predictor loadings
        C_sq_misaligned.append(
            tstats.reshape(n_predictors, n_neurons, n_timepoints)**2)
        cpd_misaligned.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

        # Aligned Using Neurons

        n_predictors = X.shape[1]
        y = session_aligned.reshape(
            [len(session_aligned),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)
        C_aligned.append(tstats.reshape(n_predictors, n_neurons,
                                        n_timepoints))  # Predictor loadings
        C_sq_aligned.append(
            tstats.reshape(n_predictors, n_neurons, n_timepoints)**2)
        cpd_aligned.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

    cpd_true = np.nanmean(np.concatenate(cpd_true, 0), axis=0)
    C_true = np.concatenate(C_true, 1)

    cpd_misaligned = np.nanmean(np.concatenate(cpd_misaligned, 0), axis=0)
    C_misaligned = np.concatenate(C_misaligned, 1)

    cpd_aligned = np.nanmean(np.concatenate(cpd_aligned, 0), axis=0)
    C_aligned = np.concatenate(C_aligned, 1)

    C_sq_true = np.concatenate(C_sq_true, 1)
    C_sq_misaligned = np.concatenate(C_sq_misaligned, 1)
    C_sq_aligned = np.concatenate(C_sq_aligned, 1)

    C_sq_true[np.isfinite(C_sq_true) == False] = np.NaN
    C_sq_true = np.nanmean(C_sq_true, 1)[:-1, :]
    C_sq_misaligned = np.nanmean(C_sq_misaligned, 1)[:-1, :]
    C_sq_aligned = np.nanmean(C_sq_aligned, 1)[:-1, :]

    c =  ['violet', 'black', 'green', 'blue', 'turquoise', 'grey', 'yellow', 'pink',\
          'purple', 'darkblue', 'darkred', 'darkgreen','darkyellow','lightgreen']
    p = [*predictors_train]

    plt.figure()

    for i in np.arange(C_sq_true.shape[0]):
        plt.plot(C_sq_true[i, :], label=p[i] + 'Real', color=c[i])
        plt.plot(C_sq_misaligned[i, :],
                 label=p[i] + 'Misaligned',
                 color=c[i],
                 linestyle='--')
        plt.plot(C_sq_aligned[i, :],
                 label=p[i] + 'Aligned',
                 color=c[i],
                 linestyle=':')

    plt.legend()
    plt.ylabel('Coef Sq')
    plt.xlabel('Time in Trial')
    plt.xticks([25, 35, 42], ['I', 'C', 'R'])

    cpd_true = cpd_true[:, :-1]
    cpd_misaligned = cpd_misaligned[:, :-1]
    cpd_aligned = cpd_aligned[:, :-1]

    plt.figure()

    for i in np.arange(cpd_true.shape[1]):
        plt.plot(cpd_true[:, i], label=p[i] + 'Real', color=c[i])
        plt.plot(cpd_misaligned[:, i],
                 label=p[i] + 'Misaligned',
                 color=c[i],
                 linestyle='--')
        plt.plot(cpd_aligned[:, i],
                 label=p[i] + 'Aligned',
                 color=c[i],
                 linestyle=':')

    plt.legend()
    plt.ylabel('CPD')
    plt.xlabel('Time in Trial')
    plt.xticks([25, 35, 42], ['I', 'C', 'R'])

    return misaligned_list, aligned_list
Пример #12
0
def regression_prev_choice(data, perm=True):

    C = []
    cpd = []

    dm = data['DM'][0]
    firing = data['Data'][0]

    if perm:
        C_perm = [[] for i in range(perm)
                  ]  # To store permuted predictor loadings for each session.
        cpd_perm = [[] for i in range(perm)
                    ]  # To store permuted cpd for each session.

    for s, sess in tqdm(enumerate(dm)):
        DM = dm[s]
        firing_rates = firing[s][1:, :, :]
        n_trials, n_neurons, n_timepoints = firing_rates.shape
        choices = DM[:, 1]
        reward = DM[:, 2]
        task = DM[:, 5]
        task_id = np.where(np.diff(task))[0]
        state = DM[:, 0]

        stay = choices[0:-1] == choices[1:]
        stay = stay * 1
        stay = np.insert(stay, 0, 0)
        lastreward = reward[0:-1]
        lastreward = np.insert(lastreward, 0, 0)

        rl = np.zeros(len(stay))
        rl[0] = 1

        rl_right = np.zeros(len(stay))
        rl_right[0] = choices[0] == state[0]
        choice_rr_start = -100

        rl_wrong = np.zeros(len(stay))
        rl_wrong[0] = choices[0] != state[0]
        choice_rw_start = -100

        for tr in range(len(stay)):
            if tr > 0:
                if stay[tr] == 1:
                    rl[tr] = rl[tr - 1] + 1
                else:
                    rl[tr] = 1

                if ((choices[tr] == choice_rr_start) &
                    (choices[tr] == state[tr])):
                    rl_right[tr] = rl_right[tr - 1] + 1

                elif (choices[tr] == state[tr]):

                    rl_right[tr] = 1
                    choice_rr_start = choices[tr]
                else:
                    rl_right[tr] = 0
                    choice_rr_start = -100
                    #If he made the wrong choice it can't be part of a correct run.

                if ((choices[tr] == choice_rw_start) &
                    (choices[tr] != state[tr])):
                    rl_wrong[tr] = rl_wrong[tr - 1] + 1

                elif choices[tr] != state[tr]:
                    rl_wrong[tr] = 1
                    choice_rw_start = choices[tr]
                else:
                    rl_wrong[tr] = 0
                    choice_rw_start = -100  #If he made the right choice it can't be part of a wrong run.

        trials = len(reward)
        rl_wrong = rl_wrong[1:]
        rl_right = rl_right[1:]
        rl = rl[1:]
        prev_choice = DM[:-1, 1]
        choices = choices[1:]
        reward = reward[1:]
        task = task[1:]
        state = state[1:]
        ones = np.ones(len(reward))
        int_repeat = choices * rl
        int_repeat_corr = state * rl_right
        int_repeat_incorr = state * rl_wrong
        error_count = []
        err_count = 0
        for r, rew in enumerate(reward):
            if rew == 0:
                if reward[r] == reward[r - 1]:
                    err_count += 1
            else:
                err_count = 0
            error_count.append(err_count)

        predictors_all = OrderedDict([
            ('Reward', reward),
            ('Choice', choices),
            ('State', state),
            ('Previous Choice 1', prev_choice),
            ('Repeat', rl),
            #('Error Count',error_count),
            #('Repeat Incorrect', rl_wrong),
            #('Repeat Correct', rl_right),
            # ('Repeat Int', int_repeat),
            #('Repeat Corr Int', int_repeat_corr),
            #('Repeat Incorr Int', int_repeat_incorr),
            ('ones', ones)
        ])

        X = np.vstack(
            predictors_all.values()).T[:len(choices), :].astype(float)
        n_predictors = X.shape[1]
        y = firing_rates.reshape(
            [len(firing_rates),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)

        C.append(tstats.reshape(n_predictors, n_neurons,
                                n_timepoints))  # Predictor loadings
        cpd.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

        if perm:
            for i in range(perm):
                X_perm = np.roll(X, np.random.randint(trials), axis=0)
                tstats = reg_f.regression_code(y, X_perm)

                C_perm[i].append(
                    tstats.reshape(n_predictors, n_neurons,
                                   n_timepoints))  # Predictor loadings
                cpd_perm[i].append(
                    re._CPD(X_perm, y).reshape(n_neurons, n_timepoints,
                                               n_predictors))

    if perm:  # Evaluate P values.
        cpd_perm = np.stack(
            [np.nanmean(np.concatenate(cpd_i, 0), 0) for cpd_i in cpd_perm], 0)
        p = np.percentile(cpd_perm, 95, axis=0)

    cpd = np.nanmean(np.concatenate(cpd, 0), axis=0)
    C = np.concatenate(C, 1)

    return cpd_perm, p, cpd, C, predictors_all
Пример #13
0
def regression_past_choice(data, perm=True):

    C = []
    cpd = []

    dm = data['DM'][0]
    firing = data['Data'][0]

    if perm:
        C_perm = [[] for i in range(perm)
                  ]  # To store permuted predictor loadings for each session.
        cpd_perm = [[] for i in range(perm)
                    ]  # To store permuted cpd for each session.

    for s, sess in tqdm(enumerate(dm)):
        DM = dm[s]
        firing_rates = firing[s]
        n_trials, n_neurons, n_timepoints = firing_rates.shape
        choices = DM[:, 1]
        reward = DM[:, 2]
        block = DM[:, 4]
        block_df = np.diff(block)
        task = DM[:, 5]
        task_id = np.where(np.diff(task))[0]

        ones = np.ones(len(block))
        trials = len(ones)
        stay = []
        for c, ch in enumerate(choices):
            if c > 0:
                if choices[c - 1] == choices[c]:
                    stay.append(1)
                else:
                    stay.append(0)
            else:
                stay.append(0)

        a_side = 0
        a_side_l = []
        for r, rew in enumerate(reward):
            if r in task_id:
                a_side = 0
            elif reward[r] == 1 and choices[r] == 1:
                a_side += 1
            a_side_l.append(a_side)

        b_side = 0
        b_side_l = []
        for r, rew in enumerate(reward):
            if r in task_id:
                b_side = 0
            elif reward[r] == 1 and choices[r] == 0:
                b_side += 1
            b_side_l.append(b_side)

        stay_ch = np.asarray(stay) * choices
        predictors_all = OrderedDict([('Reward', reward), ('Choice', choices),
                                      ('Stay', stay),
                                      ('Stay x Choice', stay_ch),
                                      ('Reward Cum A', a_side_l),
                                      ('Reward Cum B', b_side_l),
                                      ('ones', ones)])

        X = np.vstack(
            predictors_all.values()).T[:len(choices), :].astype(float)
        n_predictors = X.shape[1]
        y = firing_rates.reshape(
            [len(firing_rates),
             -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
        tstats = reg_f.regression_code(y, X)

        C.append(tstats.reshape(n_predictors, n_neurons,
                                n_timepoints))  # Predictor loadings
        cpd.append(
            re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

        if perm:
            for i in range(perm):
                X_perm = np.roll(X, np.random.randint(trials), axis=0)
                tstats = reg_f.regression_code(y, X_perm)

                C_perm[i].append(
                    tstats.reshape(n_predictors, n_neurons,
                                   n_timepoints))  # Predictor loadings
                cpd_perm[i].append(
                    re._CPD(X_perm, y).reshape(n_neurons, n_timepoints,
                                               n_predictors))

    if perm:  # Evaluate P values.
        cpd_perm = np.stack(
            [np.nanmean(np.concatenate(cpd_i, 0), 0) for cpd_i in cpd_perm], 0)
        p = np.percentile(cpd_perm, 95, axis=0)

    cpd = np.nanmean(np.concatenate(cpd, 0), axis=0)
    C = np.concatenate(C, 1)

    return cpd_perm, p, cpd, C, predictors_all
def regression_general(data):

    C = []
    cpd = []

    C_1 = []
    C_2 = []
    C_3 = []

    cpd_1_2 = []
    cpd_2_3 = []

    dm = data['DM']
    #dm = dm[:-1]
    firing = data['Data']
    #firing = firing[:-1]

    for s, sess in enumerate(dm):
        DM = dm[s]
        firing_rates = firing[s]
        n_trials, n_neurons, n_timepoints = firing_rates.shape

        if n_neurons > 10:
            session_trials_since_block = []

            state = DM[:, 0]
            choices = DM[:, 1]
            reward = DM[:, 2]
            b_pokes = DM[:, 7]
            a_pokes = DM[:, 6]
            task = DM[:, 5]
            block = DM[:, 4]
            block_df = np.diff(block)
            taskid = rc.task_ind(task, a_pokes, b_pokes)

            correct_choice = np.where(choices == state)[0]
            correct = np.zeros(len(choices))
            correct[correct_choice] = 1

            a_since_block = []
            trials_since_block = []
            t = 0

            #Bug in the state?
            for st, s in enumerate(block):
                if state[st - 1] != state[st]:
                    t = 0
                else:
                    t += 1
                trials_since_block.append(t)

            session_trials_since_block.append(trials_since_block)

            t = 0
            for st, (s, c) in enumerate(zip(block, choices)):
                if state[st - 1] != state[st]:
                    t = 0
                    a_since_block.append(t)

                elif c == 1:
                    t += 1
                    a_since_block.append(t)
                else:
                    a_since_block.append(0)

            negative_reward_count = []
            rew = 0
            block_df = np.append(block_df, 0)
            for r, b in zip(reward, block_df):

                if r == 0:
                    rew += 1
                    negative_reward_count.append(rew)
                elif r == 1:
                    rew -= 1
                    negative_reward_count.append(rew)
                if b != 0:
                    rew = 0

            positive_reward_count = []
            rew = 0
            block_df = np.append(block_df, 0)
            for r, b in zip(reward, block_df):

                if r == 1:
                    rew += 1
                    positive_reward_count.append(rew)
                elif r == 0:
                    rew += 0
                    positive_reward_count.append(rew)
                if b != 0:
                    rew = 0

            positive_reward_count = np.asarray(positive_reward_count)
            negative_reward_count = np.asarray(negative_reward_count)
            choices_int = np.ones(len(reward))

            choices_int[np.where(choices == 0)] = -1
            reward_choice_int = choices_int * reward
            interaction_trial_latent = trials_since_block * state
            interaction_a_latent = a_since_block * state
            int_a_reward = a_since_block * reward

            interaction_trial_choice = trials_since_block * choices_int
            reward_trial_in_block = trials_since_block * positive_reward_count
            negative_reward_count_st = negative_reward_count * correct
            positive_reward_count_st = positive_reward_count * correct
            negative_reward_count_ch = negative_reward_count * choices
            positive_reward_count_ch = positive_reward_count * choices
            ones = np.ones(len(choices))

            predictors_all = OrderedDict([
                ('Reward', reward),
                ('Choice', choices),
                #('Correct', correct),
                #('A in Block', a_since_block),
                #('A in Block x Reward', int_a_reward),
                ('State', state),
                ('Trial in Block', trials_since_block),
                #('Interaction State x Trial in Block', interaction_trial_latent),
                #('Interaction State x A count', interaction_a_latent),
                ('Choice x Trials in Block', interaction_trial_choice),
                ('Reward x Choice', reward_choice_int),
                # ('No Reward Count in a Block', negative_reward_count),
                # ('No Reward x Correct', negative_reward_count_st),
                # ('Reward Count in a Block', positive_reward_count),
                # ('Reward Count x Correct', positive_reward_count_st),
                # ('No reward Count x Choice',negative_reward_count_ch),
                # ('Reward Count x Choice',positive_reward_count_ch),
                # ('Reward x Trial in Block',reward_trial_in_block),
                ('ones', ones)
            ])

            X = np.vstack(
                predictors_all.values()).T[:len(choices), :].astype(float)
            n_predictors = X.shape[1]
            y = firing_rates.reshape(
                [len(firing_rates),
                 -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
            tstats = reg_f.regression_code(y, X)

            C.append(tstats.reshape(n_predictors, n_neurons,
                                    n_timepoints))  # Predictor loadings
            cpd.append(
                re._CPD(X, y).reshape(n_neurons, n_timepoints, n_predictors))

            task_1 = np.where(taskid == 1)[0]
            task_2 = np.where(taskid == 2)[0]
            task_3 = np.where(taskid == 3)[0]

            # Task 1
            reward_t1 = reward[task_1]
            choices_t1 = choices[task_1]
            correct_t1 = correct[task_1]

            a_since_block_t1 = np.asarray(a_since_block)[task_1]
            int_a_reward_t1 = int_a_reward[task_1]
            state_t1 = state[task_1]
            trials_since_block_t1 = np.asarray(trials_since_block)[task_1]
            interaction_trial_latent_t1 = interaction_trial_latent[task_1]
            interaction_a_latent_t1 = interaction_a_latent[task_1]
            interaction_trial_choice_t1 = interaction_trial_choice[task_1]
            reward_choice_int_t1 = reward_choice_int[task_1]
            negative_reward_count_t1 = negative_reward_count[task_1]
            negative_reward_count_st_t1 = negative_reward_count_st[task_1]
            positive_reward_count_t1 = positive_reward_count[task_1]
            positive_reward_count_st_t1 = positive_reward_count_st[task_1]
            negative_reward_count_ch_t1 = negative_reward_count_ch[task_1]
            positive_reward_count_ch_t1 = positive_reward_count_ch[task_1]
            reward_trial_in_block_t1 = reward_trial_in_block[task_1]

            firing_rates_t1 = firing_rates[task_1]
            ones = np.ones(len(choices_t1))

            predictors = OrderedDict([
                ('Reward', reward_t1), ('Choice', choices_t1),
                ('Correct', correct_t1), ('A in Block', a_since_block_t1),
                ('A in Block x Reward', int_a_reward_t1), ('State', state_t1),
                ('Trial in Block', trials_since_block_t1),
                ('Interaction State x Trial in Block',
                 interaction_trial_latent_t1),
                ('Interaction State x A count', interaction_a_latent_t1),
                ('Choice x Trials in Block', interaction_trial_choice_t1),
                ('Reward x Choice', reward_choice_int_t1),
                ('No Reward Count in a Block', negative_reward_count_t1),
                ('No Reward x Correct', negative_reward_count_st_t1),
                ('Reward Count in a Block', positive_reward_count_t1),
                ('Reward Count x Correct', positive_reward_count_st_t1),
                ('No reward Count x Choice', negative_reward_count_ch_t1),
                ('Reward Count x Choice', positive_reward_count_ch_t1),
                ('Reward x Trial in Block', reward_trial_in_block_t1),
                ('ones', ones)
            ])

            X_1 = np.vstack(
                predictors.values()).T[:len(choices_t1), :].astype(float)
            n_predictors = X_1.shape[1]
            y_1 = firing_rates_t1.reshape(
                [len(firing_rates_t1),
                 -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
            tstats = reg_f.regression_code(y_1, X_1)

            C_1.append(tstats.reshape(n_predictors, n_neurons,
                                      n_timepoints))  # Predictor loadings

            # Task 2
            reward_t2 = reward[task_2]
            choices_t2 = choices[task_2]
            correct_t2 = correct[task_2]

            a_since_block_t2 = np.asarray(a_since_block)[task_2]
            int_a_reward_t2 = int_a_reward[task_2]
            state_t2 = state[task_2]
            trials_since_block_t2 = np.asarray(trials_since_block)[task_2]
            interaction_trial_latent_t2 = interaction_trial_latent[task_2]
            interaction_a_latent_t2 = interaction_a_latent[task_2]
            interaction_trial_choice_t2 = interaction_trial_choice[task_2]
            reward_choice_int_t2 = reward_choice_int[task_2]
            negative_reward_count_t2 = negative_reward_count[task_2]
            negative_reward_count_st_t2 = negative_reward_count_st[task_2]
            positive_reward_count_t2 = positive_reward_count[task_2]
            positive_reward_count_st_t2 = positive_reward_count_st[task_2]
            negative_reward_count_ch_t2 = negative_reward_count_ch[task_2]
            positive_reward_count_ch_t2 = positive_reward_count_ch[task_2]
            reward_trial_in_block_t2 = reward_trial_in_block[task_2]

            firing_rates_t2 = firing_rates[task_2]
            ones = np.ones(len(choices_t2))

            predictors = OrderedDict([
                ('Reward', reward_t2), ('Choice', choices_t2),
                ('Correct', correct_t2), ('A in Block', a_since_block_t2),
                ('A in Block x Reward', int_a_reward_t2), ('State', state_t2),
                ('Trial in Block', trials_since_block_t2),
                ('Interaction State x Trial in Block',
                 interaction_trial_latent_t2),
                ('Interaction State x A count', interaction_a_latent_t2),
                ('Choice x Trials in Block', interaction_trial_choice_t2),
                ('Reward x Choice', reward_choice_int_t2),
                ('No Reward Count in a Block', negative_reward_count_t2),
                ('No Reward x Correct', negative_reward_count_st_t2),
                ('Reward Count in a Block', positive_reward_count_t2),
                ('Reward Count x Correct', positive_reward_count_st_t2),
                ('No reward Count x Choice', negative_reward_count_ch_t2),
                ('Reward Count x Choice', positive_reward_count_ch_t2),
                ('Reward x Trial in Block', reward_trial_in_block_t2),
                ('ones', ones)
            ])

            X_2 = np.vstack(
                predictors.values()).T[:len(choices_t2), :].astype(float)
            n_predictors = X_2.shape[1]
            y_2 = firing_rates_t2.reshape(
                [len(firing_rates_t2),
                 -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
            tstats = reg_f.regression_code(y_2, X_2)

            C_2.append(tstats.reshape(n_predictors, n_neurons,
                                      n_timepoints))  # Predictor loadings

            # Task 3
            reward_t3 = reward[task_3]
            choices_t3 = choices[task_3]
            correct_t3 = correct[task_3]

            a_since_block_t3 = np.asarray(a_since_block)[task_3]
            int_a_reward_t3 = int_a_reward[task_3]
            state_t3 = state[task_3]
            trials_since_block_t3 = np.asarray(trials_since_block)[task_3]
            interaction_trial_latent_t3 = interaction_trial_latent[task_3]
            interaction_a_latent_t3 = interaction_a_latent[task_3]
            interaction_trial_choice_t3 = interaction_trial_choice[task_3]
            reward_choice_int_t3 = reward_choice_int[task_3]
            negative_reward_count_t3 = negative_reward_count[task_3]
            negative_reward_count_st_t3 = negative_reward_count_st[task_3]
            positive_reward_count_t3 = positive_reward_count[task_3]
            positive_reward_count_st_t3 = positive_reward_count_st[task_3]
            negative_reward_count_ch_t3 = negative_reward_count_ch[task_3]
            positive_reward_count_ch_t3 = positive_reward_count_ch[task_3]
            reward_trial_in_block_t3 = reward_trial_in_block[task_3]

            firing_rates_t3 = firing_rates[task_3]
            ones = np.ones(len(choices_t3))

            predictors = OrderedDict([
                ('Reward', reward_t3), ('Choice', choices_t3),
                ('Correct', correct_t3), ('A in Block', a_since_block_t3),
                ('A in Block x Reward', int_a_reward_t3), ('State', state_t3),
                ('Trial in Block', trials_since_block_t3),
                ('Interaction State x Trial in Block',
                 interaction_trial_latent_t3),
                ('Interaction State x A count', interaction_a_latent_t3),
                ('Choice x Trials in Block', interaction_trial_choice_t3),
                ('Reward x Choice', reward_choice_int_t3),
                ('No Reward Count in a Block', negative_reward_count_t3),
                ('No Reward x Correct', negative_reward_count_st_t3),
                ('Reward Count in a Block', positive_reward_count_t3),
                ('Reward Count x Correct', positive_reward_count_st_t3),
                ('No reward Count x Choice', negative_reward_count_ch_t3),
                ('Reward Count x Choice', positive_reward_count_ch_t3),
                ('Reward x Trial in Block', reward_trial_in_block_t3),
                ('ones', ones)
            ])

            X_3 = np.vstack(
                predictors.values()).T[:len(choices_t3), :].astype(float)
            n_predictors = X_3.shape[1]
            y_3 = firing_rates_t3.reshape(
                [len(firing_rates_t3),
                 -1])  # Activity matrix [n_trials, n_neurons*n_timepoints]
            tstats = reg_f.regression_code(y_3, X_3)

            C_3.append(tstats.reshape(n_predictors, n_neurons,
                                      n_timepoints))  # Predictor loadings

            cpd_1_2.append(
                _CPD_cross_task(X_1, X_2, y_1,
                                y_2).reshape(n_neurons, n_timepoints,
                                             n_predictors))

            cpd_2_3.append(
                _CPD_cross_task(X_2, X_3, y_2,
                                y_3).reshape(n_neurons, n_timepoints,
                                             n_predictors))

            print(n_neurons)

    cpd = np.nanmean(np.concatenate(cpd, 0), axis=0)
    C = np.concatenate(C, 1)

    C_1 = np.concatenate(C_1, 1)

    C_2 = np.concatenate(C_2, 1)

    C_3 = np.concatenate(C_3, 1)

    cpd_1_2 = np.nanmean(np.concatenate(cpd_1_2, 0), axis=0)
    cpd_2_3 = np.nanmean(np.concatenate(cpd_2_3, 0), axis=0)

    return C, cpd, C_1, C_2, C_3, cpd_1_2, cpd_2_3, predictors_all, session_trials_since_block