Example #1
0
def choice_mov_ave(session, plot_pos = [], show_TO = True):
    'Plot of choice moving average and reward block structure for single session.'
    setup_axis(plot_pos)
    choices, transitions, second_steps, outcomes = ut.CTSO_unpack(session.CTSO, dtype = bool)
    second_steps = second_steps * 1.1-0.05
    mov_ave = ut.exp_mov_ave(choices)

    p.plot(mov_ave,'k.-', markersize = 3)

    if hasattr(session, 'blocks'):
        transitions = transitions == session.blocks['trial_trans_state'] # Convert transitions AB to transtions CR.
        for i in range(len(session.blocks['start_trials'])):
            y = [0.1,0.5,0.9][session.blocks['reward_states'][i]]  # y position coresponding to reward state.
            x = [session.blocks['start_trials'][i], session.blocks['end_trials'][i]]
            if session.blocks['transition_states'][i]:
                p.plot(x, [y,y], 'orange', linewidth = 2)
            else:
                y = 1 - y  # Invert y position if transition is inverted.
                p.plot(x, [y,y], 'purple', linewidth = 2)    
    if show_TO:
        symplot(second_steps,  transitions &  outcomes,'ob' )
        symplot(second_steps,  transitions & ~outcomes,'xb')
        symplot(second_steps, ~transitions &  outcomes,'og')
        symplot(second_steps, ~transitions & ~outcomes,'xg')  
    p.plot([0,len(choices)],[0.75,0.75],'--k')
    p.plot([0,len(choices)],[0.25,0.25],'--k')

    p.xlabel('Trial Number')
    p.yticks([0,0.5,1])
    p.ylim(-0.1, 1.1)
    p.xlim(0,len(choices))
    p.ylabel('Choice moving average')
def session_plot_moving_average(session, fig_no=1, is_subplot=False):
    block = session.trial_data['block']
    'Plot reward probabilities and moving average of choices for a single session.'
    if not is_subplot: plt.figure(fig_no, figsize=[7.5, 1.8]).clf()
    Block_transitions = block[1:] - block[:-1]
    choices = session.trial_data['choices']
    #threshold = block_transsitions(session.trial_data['pre-reversal trials']) # If you want threshold crossed on the plot
    index_block = []
    for i in Block_transitions:
        index_block = np.where(Block_transitions == 1)[0]
    for i in index_block:
        plot.axvline(x=i, color='g', linestyle='-', lw='0.6')
    #for i in threshold: # If you want threshold crossed on the plot
    #    plot.axvline(x=i,color='k',linestyle='--', lw='0.6')
    plot.axhline(y=0.25, color='r', lw=0.1)
    plot.axhline(y=0.75, color='r', lw=0.1)

    exp_average = ut.exp_mov_ave(choices, initValue=0.5, tau=8)
    plt.plot(exp_average, '--')
    plt.ylim(-0, 1)
    plt.xlim(1, session.trial_data['n_trials'])
    if not is_subplot:
        plt.ylabel('Exp moving average ')
        plt.xlabel('Trials')
Example #3
0
def raw_data_time_warp_beh(data, experiment_aligned_data):
    
    dm = data['DM'][0]
    firing = data['Data'][0]

    res_list = []
    list_block_changes = []
    trials_since_block_list = []
    state_list = []
    task_list = []
   
    for  s, sess in enumerate(dm):
        
        
        DM = dm[s]
        state = DM[:,0]
        choices = DM[:,1]
     
        task = DM[:,5]
        task_ind = np.where(np.diff(task)!=0)[0]
        
        firing_rates = firing[s] 
        block = DM[:,4]
        block_df = np.diff(block)
        ind_block = np.where(block_df != 0)[0]

        if len(ind_block) >= 12:
         
            #Because moving average resets --> calucate corrects for all tasks
            
            task_1_state = state[:task_ind[0]]
            task_2_state=  state[task_ind[0]:task_ind[1]]
            task_3_state = state[task_ind[1]:]
            task_1_choice = choices[:task_ind[0]]
            task_2_choice=  choices[task_ind[0]:task_ind[1]]
            task_3_choice = choices[task_ind[1]:]
            correct_ind_task_1 = np.where(task_1_state == task_1_choice)
            correct_ind_task_2 = np.where(task_2_state == task_2_choice)
            correct_ind_task_3 = np.where(task_3_state == task_3_choice)

            correct_task_1 = np.zeros(len(task_1_state))
            correct_task_1[correct_ind_task_1] = 1
            correct_task_2 = np.zeros(len(task_2_state))
            correct_task_2[correct_ind_task_2] = 1
            correct_task_3 = np.zeros(len(task_3_state))
            correct_task_3[correct_ind_task_3] = 1

            # Calculate movign average to determine behavioural switches
            mov_av_task_1 = ut.exp_mov_ave(correct_task_1,initValue = 0.5,tau = 8)
            mov_av_task_2 = ut.exp_mov_ave(correct_task_2,initValue = 0.5,tau = 8)
            mov_av_task_3 = ut.exp_mov_ave(correct_task_3,initValue = 0.5,tau = 8)
            mov_av = np.concatenate((mov_av_task_1,mov_av_task_2,mov_av_task_3))
            moving_av_0_6 = np.where(mov_av > 0.63)[0]
            
 
            b_1 = [m for m  in moving_av_0_6 if m in np.where(block == 0)[0]]
            b_2 = [m for m  in moving_av_0_6 if m in np.where(block == 1)[0]]
            b_3 = [m for m  in moving_av_0_6 if m in np.where(block == 2)[0]]
            b_4 = [m for m  in moving_av_0_6 if m in np.where(block == 3)[0]]
            b_5 = [m for m  in moving_av_0_6 if m in np.where(block == 4)[0]]
            b_6 = [m for m  in moving_av_0_6 if m in np.where(block == 5)[0]]
            b_7 = [m for m  in moving_av_0_6 if m in np.where(block == 6)[0]]
            b_8 = [m for m  in moving_av_0_6 if m in np.where(block == 7)[0]]
            b_9 = [m for m  in moving_av_0_6 if m in np.where(block == 8)[0]]
            b_10 = [m for m  in moving_av_0_6 if m in np.where(block == 9)[0]]
            b_11 = [m for m  in moving_av_0_6 if m in np.where(block == 10)[0]]
            b_12 = [m for m  in moving_av_0_6 if m in np.where(block == 11)[0]]

           
            all_ind_triggered_on_beh = np.concatenate((b_1,b_2,b_3,b_4,b_5,b_6,b_7,b_8,b_9,b_10,b_11,b_12))
            ind_blocks = np.median(np.hstack((len(b_1), len(b_2), len(b_3),  len(b_4),  len(b_5),\
                len(b_6),  (len(b_7), len(b_8),  len(b_9),  len(b_10),\
                      len(b_11),len(b_12)))))
                
            trials_since_block = np.hstack((np.arange(len(b_1)), np.arange(len(b_2)), np.arange(len(b_3)),np.arange(len(b_4)),\
                                            np.arange(len(b_5)), np.arange(len(b_6)), np.arange(len(b_7)),  np.arange(len(b_8)),\
                                            np.arange(len(b_9)), np.arange(len(b_10)), np.arange(len(b_11)),\
                                                                                               np.arange(len(b_12))))
            firing_rates  = firing_rates[all_ind_triggered_on_beh]
            n_trials, n_neurons, n_timepoints = firing_rates.shape
            
            state = state[all_ind_triggered_on_beh]
            task = task[all_ind_triggered_on_beh]
            
           
    
            res_list.append(firing_rates)
            trials_since_block_list.append(trials_since_block)
            list_block_changes.append(ind_blocks)
            state_list.append(state)
            task_list.append(task)
           
    return res_list, list_block_changes, trials_since_block_list, state_list,task_list
Example #4
0
def regression_residuals_blocks_aligned_on_beh(data,experiment_aligned_data):
    
    dm = data['DM'][0]
    firing = data['Data'][0]

    res_list = []
    list_block_changes = []
    trials_since_block_list = []
    state_list = []
    task_list = []
    for  s, sess in enumerate(dm):
        
        
        DM = dm[s]
        state = DM[:,0]
        choices = DM[:,1]
        reward = DM[:,2]
        task = DM[:,5]
        task_ind = np.where(np.diff(task)!=0)[0]
        
        firing_rates = firing[s] 
        block = DM[:,4]
        block_df = np.diff(block)
        ind_block = np.where(block_df != 0)[0]

        if len(ind_block) >= 12:
         
            #Because moving average resets --> calucate corrects for all tasks
            
            task_1_state = state[:task_ind[0]]
            task_2_state=  state[task_ind[0]:task_ind[1]]
            task_3_state = state[task_ind[1]:]
            task_1_choice = choices[:task_ind[0]]
            task_2_choice=  choices[task_ind[0]:task_ind[1]]
            task_3_choice = choices[task_ind[1]:]
            correct_ind_task_1 = np.where(task_1_state == task_1_choice)
            correct_ind_task_2 = np.where(task_2_state == task_2_choice)
            correct_ind_task_3 = np.where(task_3_state == task_3_choice)

            correct_task_1 = np.zeros(len(task_1_state))
            correct_task_1[correct_ind_task_1] = 1
            correct_task_2 = np.zeros(len(task_2_state))
            correct_task_2[correct_ind_task_2] = 1
            correct_task_3 = np.zeros(len(task_3_state))
            correct_task_3[correct_ind_task_3] = 1

            # Calculate movign average to determine behavioural switches
            mov_av_task_1 = ut.exp_mov_ave(correct_task_1,initValue = 0.5,tau = 8)
            mov_av_task_2 = ut.exp_mov_ave(correct_task_2,initValue = 0.5,tau = 8)
            mov_av_task_3 = ut.exp_mov_ave(correct_task_3,initValue = 0.5,tau = 8)
            mov_av = np.concatenate((mov_av_task_1,mov_av_task_2,mov_av_task_3))
            moving_av_0_6 = np.where(mov_av > 0.63)[0]
            
 
            b_1 = [m for m  in moving_av_0_6 if m in np.where(block == 0)[0]]
            b_2 = [m for m  in moving_av_0_6 if m in np.where(block == 1)[0]]
            b_3 = [m for m  in moving_av_0_6 if m in np.where(block == 2)[0]]
            b_4 = [m for m  in moving_av_0_6 if m in np.where(block == 3)[0]]
            b_5 = [m for m  in moving_av_0_6 if m in np.where(block == 4)[0]]
            b_6 = [m for m  in moving_av_0_6 if m in np.where(block == 5)[0]]
            b_7 = [m for m  in moving_av_0_6 if m in np.where(block == 6)[0]]
            b_8 = [m for m  in moving_av_0_6 if m in np.where(block == 7)[0]]
            b_9 = [m for m  in moving_av_0_6 if m in np.where(block == 8)[0]]
            b_10 = [m for m  in moving_av_0_6 if m in np.where(block == 9)[0]]
            b_11 = [m for m  in moving_av_0_6 if m in np.where(block == 10)[0]]
            b_12 = [m for m  in moving_av_0_6 if m in np.where(block == 11)[0]]

           
            all_ind_triggered_on_beh = np.concatenate((b_1,b_2,b_3,b_4,b_5,b_6,b_7,b_8,b_9,b_10,b_11,b_12))
            ind_blocks = np.median(np.hstack((len(b_1), len(b_2), len(b_3),  len(b_4),  len(b_5),\
                len(b_6),  (len(b_7), len(b_8),  len(b_9),  len(b_10),\
                      len(b_11),len(b_12)))))
                
            trials_since_block = np.hstack((np.arange(len(b_1)), np.arange(len(b_2)), np.arange(len(b_3)),np.arange(len(b_4)),\
                                            np.arange(len(b_5)), np.arange(len(b_6)), np.arange(len(b_7)),  np.arange(len(b_8)),\
                                            np.arange(len(b_9)), np.arange(len(b_10)), np.arange(len(b_11)),\
                                                                                               np.arange(len(b_12))))
            firing_rates  = firing_rates[all_ind_triggered_on_beh]
            n_trials, n_neurons, n_timepoints = firing_rates.shape
            
            state = state[all_ind_triggered_on_beh]
            choices = choices[all_ind_triggered_on_beh]            
            reward = reward[all_ind_triggered_on_beh]
            task = task[all_ind_triggered_on_beh]
            ones = np.ones(len(state))
            task_1 = np.where(task == 1)[0]
            task_2 = np.where(task == 2)[0]
            task_3 = np.where(task == 3)[0]

           
           ## Task 1 
            firing_rates_1  = firing_rates[task_1]
            choices_1 = choices[task_1]        
            reward_1 = reward[task_1]        
            reward_choice_1= choices_1*reward_1
            task_1 = task[task_1]
            ones_1 = np.ones(len(reward_choice_1))
           
            n_trials, n_neurons, n_timepoints = firing_rates_1.shape
            predictors_all = OrderedDict([#('Time', trials_since_block),
                                             ('Reward', reward_1),
                                             ('Choice', choices_1),
                                            # ('Reward x Choice',reward_choice_1),
                                             ('ones', ones_1)])
           
                     
            X = np.vstack(predictors_all.values()).T[:len(ones),:].astype(float)
            y = firing_rates_1.reshape([len(firing_rates_1),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
              
            pdes = np.linalg.pinv(X)
            pe = np.matmul(pdes,y)
            res = y - np.matmul(X,pe)
            res_list_task_1 = res.reshape(n_trials, n_neurons, n_timepoints)
           
           
            ## Task 2 
            firing_rates_2  = firing_rates[task_2]
            choices_2 = choices[task_2]        
            reward_2 = reward[task_2]        
            reward_choice_2 = choices_2*reward_2  
            task_2 = task[task_2]
            ones_2 = np.ones(len(reward_choice_2))
            
            n_trials, n_neurons, n_timepoints = firing_rates_2.shape
            predictors_all = OrderedDict([#('Time', trials_since_block),
                                             ('Reward', reward_2),
                                             ('Choice', choices_2),
                                           #  ('Reward x Choice',reward_choice_2),
                                             ('ones', ones_2)])
           
                     
            X = np.vstack(predictors_all.values()).T[:len(ones),:].astype(float)
            y = firing_rates_2.reshape([len(firing_rates_2),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
             
            pdes = np.linalg.pinv(X)
            pe = np.matmul(pdes,y)
            res = y - np.matmul(X,pe)
            res_list_task_2 = res.reshape(n_trials, n_neurons, n_timepoints)
           
            ## Task 3 
            firing_rates_3  = firing_rates[task_3]
            choices_3 = choices[task_3]        
            reward_3 = reward[task_3]        
            reward_choice_3 = choices_3*reward_3    
            task_3 = task[task_3]
            ones_3 = np.ones(len(reward_choice_3))
           
            n_trials, n_neurons, n_timepoints = firing_rates_3.shape
            predictors_all = OrderedDict([#('Time', trials_since_block),
                                             ('Reward', reward_3),
                                             ('Choice', choices_3),
                                           #  ('Reward x Choice',reward_choice_3),
                                             ('ones', ones_3)])
           
                     
            X = np.vstack(predictors_all.values()).T[:len(ones),:].astype(float)
            y = firing_rates_3.reshape([len(firing_rates_3),-1]) # Activity matrix [n_trials, n_neurons*n_timepoints]
             
            pdes = np.linalg.pinv(X)
            pe = np.matmul(pdes,y)
            res = y - np.matmul(X,pe)
            res_list_task_3 = res.reshape(n_trials, n_neurons, n_timepoints)
    
    
    
            res_list.append(np.concatenate((res_list_task_1,res_list_task_2,res_list_task_3),0))
            trials_since_block_list.append(trials_since_block)
            list_block_changes.append(ind_blocks)
            state_list.append(state)
            task_list.append(task)

    return res_list, list_block_changes, trials_since_block_list, state_list,task_list
Example #5
0
def select_trials(Data, DM, max_number_per_block, ind_time=np.arange(0, 63)):

    all_sessions = []
    for data, dm in zip(Data, DM):

        trials, neurons, time = data.shape
        choices = dm[:, 1]
        block = dm[:, 4]
        task = dm[:, 5]
        state = dm[:, 0]

        data = np.mean(data[:, :, ind_time], axis=2)

        b_pokes = dm[:, 7]
        a_pokes = dm[:, 6]
        taskid = rc.task_ind(task, a_pokes, b_pokes)

        task_1 = np.where(taskid == 1)
        task_2 = np.where(taskid == 2)
        task_3 = np.where(taskid == 3)

        correct_a = 1 * choices.astype(bool) & state.astype(bool)
        choices_b = (choices - 1) * -1
        state_b = (state - 1) * -1
        correct_b = 1 * choices_b.astype(bool) & state_b.astype(bool)
        correct = correct_a + correct_b
        exp_choices = ut.exp_mov_ave(correct, tau=8, initValue=0.5, alpha=None)
        ind_choosing_correct = np.where(exp_choices > 0.65)[0]

        state_change = np.where(np.diff(block) != 0)[0] + 1
        state_change = np.append(state_change, 0)
        state_change = np.sort(state_change)

        choice_a_state_a = np.where((choices == 1) & (state == 1))[0]
        choice_b_state_b = np.where((choices == 0) & (state == 0))[0]
        if len(state_change) > 12:
            block_12_ind = state_change[12]
            state_change = state_change[:12]

        data = data[:block_12_ind]

        if len(state_change) > 11:

            state_1_correct = np.intersect1d(ind_choosing_correct,
                                             (np.where(block == 0)))
            state_2_correct = np.intersect1d(ind_choosing_correct,
                                             (np.where(block == 1)))
            state_3_correct = np.intersect1d(ind_choosing_correct,
                                             (np.where(block == 2)))
            state_4_correct = np.intersect1d(ind_choosing_correct,
                                             (np.where(block == 3)))
            state_5_correct = np.intersect1d(ind_choosing_correct,
                                             (np.where(block == 4)))
            state_6_correct = np.intersect1d(ind_choosing_correct,
                                             (np.where(block == 5)))
            state_7_correct = np.intersect1d(ind_choosing_correct,
                                             (np.where(block == 6)))

            state_8_correct = np.intersect1d(ind_choosing_correct,
                                             (np.where(block == 7)))
            state_9_correct = np.intersect1d(ind_choosing_correct,
                                             (np.where(block == 8)))
            state_10_correct = np.intersect1d(ind_choosing_correct,
                                              (np.where(block == 9)))
            state_11_correct = np.intersect1d(ind_choosing_correct,
                                              (np.where(block == 10)))
            state_12_correct = np.intersect1d(ind_choosing_correct,
                                              (np.where(block == 11)))

            change = [np.asarray([state_1_correct[0],state_2_correct[0],state_3_correct[0],state_4_correct[0],\
                                         state_5_correct[0],state_6_correct[0], state_7_correct[0],state_8_correct[0],\
                                         state_9_correct[0],state_10_correct[0], state_11_correct[0], state_12_correct[0]])][0]

            block_ch = np.zeros(12)
            ch = np.zeros(12)
            if task_1[0][-1] < task_2[0][-1] < task_3[0][-1]:
                block_ch[:] = state_change
                ch[:] = change

            elif task_1[0][-1] < task_3[0][-1] and task_3[0][-1] < task_2[0][
                    -1]:
                block_ch[:4] = state_change[:4]
                block_ch[4:8] = state_change[8:]
                block_ch[8:12] = state_change[4:8]
                ch[:4] = change[:4]
                ch[4:8] = change[8:]
                ch[8:12] = change[4:8]

            elif task_3[0][-1] < task_2[0][-1] and task_2[0][-1] < task_1[0][
                    -1]:
                block_ch[:4] = state_change[8:]
                block_ch[4:8] = state_change[4:8]
                block_ch[8:12] = state_change[:4]
                ch[:4] = change[8:]
                ch[4:8] = change[4:8]
                ch[8:12] = change[:4]

            elif task_3[0][-1] < task_1[0][-1] and task_3[0][-1] < task_2[0][
                    -1] and task_1[0][-1] < task_2[0][-1]:
                block_ch[:4] = state_change[8:]
                block_ch[4:8] = state_change[:4]
                block_ch[8:12] = state_change[4:8]
                ch[:4] = change[8:]
                ch[4:8] = change[:4]
                ch[8:12] = change[4:8]

            elif task_2[0][-1] < task_3[0][-1] and task_3[0][-1] < task_1[0][
                    -1]:
                block_ch[:4] = state_change[4:8]
                block_ch[4:8] = state_change[8:]
                block_ch[8:12] = state_change[:4]
                ch[:4] = change[4:8]
                ch[4:8] = change[8:]
                ch[8:12] = change[:4]

            elif task_2[0][-1] < task_1[0][-1] and task_1[0][-1] < task_3[0][
                    -1]:
                block_ch[:4] = state_change[4:8]
                block_ch[4:8] = state_change[:4]
                block_ch[8:12] = state_change[8:]
                ch[:4] = change[4:8]
                ch[4:8] = change[:4]
                ch[8:12] = change[8:]

            state_change_t1 = ch[:4]
            state_change_t2 = ch[4:8]
            state_change_t3 = ch[8:]


            state_change_t1_1_ind,state_change_t1_2_ind, state_change_t1_3_ind,state_change_t1_4_ind,\
            state_change_t2_1_ind,state_change_t2_2_ind, state_change_t2_3_ind,state_change_t2_4_ind,\
            state_change_t3_1_ind,state_change_t3_2_ind, state_change_t3_3_ind,state_change_t3_4_ind = state_behaviour_ind(state_change_t1,state_change_t2,state_change_t3, change, data)


            t1_a_state_1, t1_a_state_2 ,t1_b_state_1, t1_b_state_2 ,t2_a_state_1,t2_a_state_2, t2_b_state_1,t2_b_state_2,t3_a_state_1,t3_a_state_2,\
            t3_b_state_1,t3_b_state_2 = choose_a_a_b_b(choice_a_state_a, choice_b_state_b,state_change_t1_1_ind,state_change_t1_2_ind, state_change_t1_3_ind,state_change_t1_4_ind,\
            state_change_t2_1_ind,state_change_t2_2_ind, state_change_t2_3_ind,state_change_t2_4_ind,\
            state_change_t3_1_ind,state_change_t3_2_ind, state_change_t3_3_ind,state_change_t3_4_ind, block, block_ch)

            data_t1_1 = data[t1_a_state_1, :]
            data_t1_2 = data[t1_a_state_2, :]
            data_t1_3 = data[t1_b_state_1, :]
            data_t1_4 = data[t1_b_state_2, :]

            data_t2_1 = data[t2_a_state_1, :]
            data_t2_2 = data[t2_a_state_2, :]
            data_t2_3 = data[t2_b_state_1, :]
            data_t2_4 = data[t2_b_state_2, :]

            data_t3_1 = data[t3_a_state_1, :]
            data_t3_2 = data[t3_a_state_2, :]
            data_t3_3 = data[t3_b_state_1, :]
            data_t3_4 = data[t3_b_state_2, :]


            dict_names = {'data_t1_1':data_t1_1,'data_t1_2':data_t1_2,'data_t1_3':data_t1_3,'data_t1_4':data_t1_4,\
                         'data_t2_1':data_t2_1,'data_t2_2':data_t2_2,'data_t2_3':data_t2_3,'data_t2_4':data_t2_4,\
                         'data_t3_1':data_t3_1,'data_t3_2':data_t3_2,'data_t3_3':data_t3_3,'data_t3_4':data_t3_4}

            all_dict = {}
            for i in dict_names.keys():
                data_dict = {
                    i: np.full((data_t1_1.shape[1], max_number_per_block),
                               np.nan)
                }
                for n in range(dict_names[i].shape[1]):
                    x = np.arange(dict_names[i][:, n].shape[0])
                    y = dict_names[i][:, n]
                    f = interpolate.interp1d(x, y)

                    xnew = np.arange(0, dict_names[i][:, n].shape[0] - 1,
                                     (dict_names[i][:, n].shape[0] - 1) /
                                     max_number_per_block)
                    ynew = f(
                        xnew
                    )  # use interpolation function returned by `interp1d`
                    ynew = gaussian_filter1d(ynew, 10)
                    data_dict[i][n, :] = ynew[:max_number_per_block]

                all_dict.update(data_dict)
            all_sessions.append(all_dict)

    session_list = []
    for s in all_sessions:
        neuron_list = []
        for i in dict_names.keys():
            neuron_list.append(s[i])
        session_list.append(np.asarray(neuron_list))
    session_list = np.concatenate(session_list, 1)

    return session_list
    def _get_session_predictors(self, session):
        '''Calculate and return values of predictor variables for all trials in session.
        '''

        # Evaluate base (non-lagged) predictors from session events.

        choices, transitions_AB, second_steps, outcomes = ut.CTSO_unpack(session.CTSO, dtype = bool)
        trans_state = session.blocks['trial_trans_state']    # Trial by trial state of the tranistion matrix (A vs B)

        if self.mov_ave_CR:
            trans_mov_ave = np.zeros(len(choices))
            trans_mov_ave[1:] = (5./3.) * ut.exp_mov_ave(transitions_AB - 0.5, self.tau, 0.)[:-1] # Average of 0.5 for constant 0.8 transition prob.
            transitions_CR = 2 * (transitions_AB - 0.5) * trans_mov_ave
            transition_CR_x_outcome = 2. * transitions_CR * (outcomes - 0.5) 
            choices_0_mean = 2 * (choices - 0.5)
        else:  
            transitions_CR = transitions_AB == trans_state
            transition_CR_x_outcome = transitions_CR == outcomes 

        bp_values = {} 

        for p in self.base_predictors:

            if p == 'correct':  # 0.5, 0, -1 for high poke being correct, neutral, incorrect option.
                bp_values[p] = 0.5 * (session.blocks['trial_rew_state'] - 1) * \
                              (2 * session.blocks['trial_trans_state'] - 1)  
      
            elif p == 'side': # 0.5, -0.5 for left, right side reached at second step. 
                bp_values[p] = second_steps - 0.5

            elif p == 'side_x_out': # 0.5, -0.5.  Side predictor invered by trial outcome.
                bp_values[p] = (second_steps == outcomes) - 0.5

            # The following predictors all predict stay probability rather than high vs low.
            # e.g the outcome predictor represents the effect of outcome on stay probabilty.
            # This is implemented by inverting the predictor dependent on the choice made on the trial.

            elif p ==  'choice': # 0.5, - 0.5 for choices high, low.
                bp_values[p] = choices - 0.5

            elif p == 'good_side': # 0.5, 0, -0.5 for reaching good, neutral, bad second link state.
                bp_values[p] = 0.5 * (session.blocks['trial_rew_state'] - 1) * (2 * (second_steps == choices) - 1)
                    
            elif p ==  'outcome': # 0.5 , -0.5 for  rewarded , not rewarded.
                bp_values[p] = (outcomes == choices) - 0.5

            elif p ==  'block':     # 0.5, -0.5 for A , B blocks.
                bp_values[p] = (trans_state == choices) - 0.5

            elif p == 'block_x_out': # 0.5, -0.5 for A , B blocks inverted by trial outcome.
                bp_values[p] = ((outcomes == trans_state) == choices) - 0.5

            elif p ==  'trans_CR': # 0.5, -0.5 for common, rare transitions.     
                if self.mov_ave_CR:            
                    bp_values[p] = transitions_CR * choices_0_mean 
                else: 
                    bp_values[p] = ((transitions_CR) == choices)  - 0.5

            elif p == 'trCR_x_out': # 0.5, -0.5 for common, rare transitions inverted by trial outcome.
                if self.mov_ave_CR: 
                    bp_values[p] = transition_CR_x_outcome * choices_0_mean 
                else:
                    bp_values[p] = (transition_CR_x_outcome  == choices) - 0.5

            elif p ==  'trans_CR_rew': # 0.5, -0.5, for common, rare transitions on rewarded trials, otherwise 0.
                    if self.mov_ave_CR: 
                        bp_values[p] = transitions_CR * choices_0_mean * outcomes
                    else: 
                        bp_values[p] = (((transitions_CR) == choices)  - 0.5) * outcomes

            elif p ==  'trans_CR_non_rew': # 0.5, -0.5, for common, rare transitions on non-rewarded trials, otherwise 0.
                    if self.mov_ave_CR: 
                        bp_values[p] = transitions_CR * choices_0_mean * ~outcomes
                    else: 
                        bp_values[p] = (((transitions_CR) == choices)  - 0.5) * ~outcomes

        # predictor orthogonalization.

        if self.orth: 
            for A, B in self.orth: # Remove component of predictor A that is parrallel to predictor B. 
                bp_values[A] = bp_values[A] - ut.projection(bp_values[B], bp_values[A])

        # predictor normalization.
        if self.norm:
            for p in self.base_predictors:
                bp_values[p] = bp_values[p] * 0.5 / np.mean(np.abs(bp_values[p]))

        # Generate lagged predictors from base predictors.

        predictors = np.zeros([session.n_trials, self.n_predictors])

        for i,p in enumerate(self.predictors):  
            if '-' in p: # Get lag from predictor name.
                lag = int(p.split('-')[1]) 
                bp_name = p.split('-')[0]
            else:        # Use default lag.
                lag = 1
                bp_name = p
            predictors[lag:, i] = bp_values[bp_name][:-lag]

        return predictors