def psy_by_mouse(unique_signed_contrasts): mice = pd.DataFrame.from_dict( (subject.Subject()) * (subject.SubjectLab()) * (behavior_analysis.SessionTrainingStatus() & 'training_status="trained" ')) psy_df = pd.DataFrame(columns=unique_signed_contrasts) for row, mouse in enumerate(mice.subject_nickname.unique()): key = ((subject.Subject() & 'subject_nickname = "{}"'.format(mouse)) * (behavior.TrialSet() & 'n_trials > 100') * (subject.SubjectLab()) * (behavior_analysis.SessionTrainingStatus() & 'training_status="trained" ')).fetch('KEY') choice, cont_left, cont_right = (behavior.TrialSet.Trial & key).fetch( 'trial_response_choice', 'trial_stim_contrast_left', 'trial_stim_contrast_right') signed_contrasts = cont_right - cont_left right_choices = choice == 'CCW' total_trials = [] right_trials = [] for cont in unique_signed_contrasts: matching = (signed_contrasts == cont) total_trials.append(np.sum(matching)) right_trials.append(np.sum(right_choices[matching])) prop_right_trials = np.divide(right_trials, total_trials) psy_df.loc[row, :] = prop_right_trials return psy_df
# ================================= # # GRAB ALL DATA FROM DATAJOINT # ================================= # sess = (acquisition.Session & 'task_protocol LIKE "%biased%"') s = pd.DataFrame.from_dict(sess.fetch(as_dict=True)) labs = list(s['session_lab'].unique()) labs.append('zadorlab') print(labs) # hack to get around SQL limit for lidx, lab in enumerate(labs): print(lab) b = (behavior.TrialSet.Trial & (subject.SubjectLab() & 'lab_name="%s"'%lab)) \ * sess.proj('session_uuid','task_protocol') \ * subject.SubjectLab.proj('lab_name') \ * subject.Subject() & 'subject_line IS NULL OR subject_line="C57BL/6J"' #* subject.Subject() & 'subject_birth_date between "2018-09-01" and "2019-02-01"' & 'subject_line IS NULL OR subject_line="C57BL/6J"' bdat = pd.DataFrame( b.fetch(order_by='subject_nickname, session_start_time, trial_id')) print(bdat['subject_nickname'].unique()) if lidx == 0: behav = bdat.copy() else: behav = behav.append(bdat.copy(), sort=False, ignore_index=True) behav = dj2pandas(behav)
#requires Alex glm module import matplotlib.pyplot as plt import pandas as pd ## CONNECT TO datajoint import datajoint as dj dj.config['database.host'] = 'datajoint.internationalbrainlab.org' from ibl_pipeline.analyses import behavior as behavior_analysis from ibl_pipeline import reference, subject, behavior from alexfigs_datajoint_functions import * # this has all plotting functions import seaborn as sns from glm import * key = ((subject.Subject() & 'sex!="U"') * (behavior.TrialSet() & 'n_trials > 100') * (subject.SubjectLab()) * (behavior_analysis.SessionTrainingStatus() & 'training_status="ready for ephys" ')).fetch('KEY') trials_ibl = pd.DataFrame.from_dict( (subject.Subject() * behavior.TrialSet.Trial & key).fetch(as_dict=True)) trials_ibl['signed_contrasts'] = trials_ibl[ 'trial_stim_contrast_right'] - trials_ibl['trial_stim_contrast_left'] ##Rename for GLM function trials_ibl = trials_ibl.rename(index=str, columns={ "session_start_time": "ses", "subject_uuid": "mouse_name", "trial_feedback_type": "feedbackType", "trial_response_choice": "choice" })
#Set path for data """ You will need to modify the paths below if running this code on your own computer """ path_training = '/Users/alex/Documents/Postdoc/GLM_behavior_paper/Data_by_animal/training/' path_biased = '/Users/alex/Documents/Postdoc/GLM_behavior_paper/Data_by_animal/biased/' path_model_analysis = '/Users/alex/Documents/PYTHON/trialhistory_frund/analysis.py' path_model2array = '/Users/alex/Documents/PYTHON/analysis/paper-behavior/model2array.py' #Import data from all the animals in both stages of training use_subjects = subject.Subject * subject.SubjectLab * subject.SubjectProject & 'subject_project="ibl_neuropixel_brainwide_01"' sess = (acquisition.Session & (behavior.TrialSet.Trial() & 'ABS(trial_stim_contrast_left-0)<0.0001' \ & 'ABS(trial_stim_contrast_right-0)<0.0001') & 'task_protocol like "%trainingChoiceWorld%"') \ * use_subjects b = (behavior.TrialSet.Trial & sess) * subject.Subject() * subject.SubjectLab() bdat = pd.DataFrame( b.fetch(order_by='subject_nickname, session_start_time, trial_id')) behav = dj2pandas(bdat) #Import data from biased choice world sess = (acquisition.Session & (behavior.TrialSet.Trial() & 'ABS(trial_stim_contrast_left-0)<0.0001' \ & 'ABS(trial_stim_contrast_right-0)<0.0001') & 'task_protocol like "%biasedChoiceWorld%"') * use_subjects b = (behavior.TrialSet.Trial & sess) * subject.Subject() * subject.SubjectLab() bdat = pd.DataFrame( b.fetch(order_by='subject_nickname, session_start_time, trial_id')) behav_biased = dj2pandas(bdat) #Store lab identities for later mousenlabid = behav[['subject_uuid', 'lab_name']] mousenlabid = mousenlabid.drop_duplicates()
sns.set_style("darkgrid") sns.set_context(context="poster") # import wrappers etc from ibl_pipeline import reference, subject, action, acquisition, data, behavior from ibl_pipeline.utils import psychofit as psy from dj_tools import * figpath = os.path.join(os.path.expanduser('~'), 'Data/Figures_IBL') # ================================= # ## GET DATA # ================================= # b = (behavior.TrialSet) \ * (acquisition.Session.proj(session_date='DATE(session_start_time)') & 'session_start_time > "2019-03-01"') \ * (subject.Subject() & 'subject_birth_date between "2018-09-01" and "2019-02-01"') * subject.SubjectLab() \ * action.Weighing.proj(weighing_date='DATE(weighing_time)') bdat = pd.DataFrame(b.fetch(order_by='subject_nickname, session_start_time')) print(bdat['subject_nickname'].unique()) bdat['date_march'] = pd.to_timedelta(pd.to_datetime( bdat.session_date)).dt.total_seconds().astype(int) g = sns.lmplot(x="date_march", y="n_trials", hue="sex", col="lab_name", units="subject_nickname", col_wrap=4, lowess=True, data=bdat)
# import wrappers etc from ibl_pipeline import reference, subject, action, acquisition, data, behavior from ibl_pipeline.utils import psychofit as psy from ibl_pipeline.analyses import behavior as behavioral_analyses from dj_tools import * figpath = os.path.join(os.path.expanduser('~'), 'Data/Figures_IBL') # ================================= # # GRAB ALL DATA FROM DATAJOINT # ================================= # sess = ((acquisition.Session & 'task_protocol LIKE "%trainingchoice%"') * \ (behavioral_analyses.SessionTrainingStatus() & 'training_status="trained"')) * \ subject.SubjectLab() s = pd.DataFrame.from_dict(sess.fetch(as_dict=True)) labs = list(s['lab_name'].unique()) print(labs) # hack to get around SQL limit for lidx, lab in enumerate(labs): print(lab) b = (behavior.TrialSet.Trial & (subject.SubjectLab() & 'lab_name="%s"'%lab)) \ * sess.proj('session_uuid','task_protocol') \ * subject.SubjectLab.proj('lab_name') \ * subject.Subject() & 'subject_birth_date between "2018-09-01" and "2019-02-01"' & 'subject_line IS NULL OR subject_line="C57BL/6J"' bdat = pd.DataFrame(b.fetch(order_by='subject_nickname, session_start_time, trial_id'))