Пример #1
0
def psy_by_mouse(unique_signed_contrasts):

    mice = pd.DataFrame.from_dict(
        (subject.Subject()) * (subject.SubjectLab()) *
        (behavior_analysis.SessionTrainingStatus()
         & 'training_status="trained"  '))

    psy_df = pd.DataFrame(columns=unique_signed_contrasts)

    for row, mouse in enumerate(mice.subject_nickname.unique()):

        key = ((subject.Subject() & 'subject_nickname = "{}"'.format(mouse)) *
               (behavior.TrialSet() & 'n_trials > 100') *
               (subject.SubjectLab()) *
               (behavior_analysis.SessionTrainingStatus()
                & 'training_status="trained"  ')).fetch('KEY')

        choice, cont_left, cont_right = (behavior.TrialSet.Trial & key).fetch(
            'trial_response_choice', 'trial_stim_contrast_left',
            'trial_stim_contrast_right')
        signed_contrasts = cont_right - cont_left
        right_choices = choice == 'CCW'

        total_trials = []
        right_trials = []

        for cont in unique_signed_contrasts:
            matching = (signed_contrasts == cont)
            total_trials.append(np.sum(matching))
            right_trials.append(np.sum(right_choices[matching]))

        prop_right_trials = np.divide(right_trials, total_trials)
        psy_df.loc[row, :] = prop_right_trials

    return psy_df
    def make(self, key):

        # get nickname
        subject_nickname = (subject.Subject() & key).fetch1('subject_nickname')
        key['subject_nickname'] = subject_nickname
        subject_number = [
            int(s) for s in re.findall(r'\d+', subject_nickname)
        ][0]

        # load data file
        data_file = pd.read_csv('~/Downloads/sfn_data_ibl.csv')

        # grab only the row that contains what we need
        thisdat = data_file.loc[data_file.subject_nickname ==
                                subject_number, :]

        if not thisdat.empty:
            # now insert
            if not thisdat.sex.isnull().values.any():
                key['sex'] = thisdat.sex.item()
            if not thisdat.age.isnull().values.any():
                key['age'] = int(thisdat.age.item())
            if not thisdat.knowledge.isnull().values.any():
                key['task_knowledge'] = thisdat.knowledge.item()
            if not thisdat.AQ.isnull().values.any():
                key['aq_score'] = thisdat.AQ.item()
        print(key)
        self.insert1(key)
Пример #3
0
    def make(self, key):

        # grab all trials for this subject
        trials = (subject.Subject() & key) * behavior.TrialSet.Trial()
        stim_left, stim_right, resp, feedback = trials.fetch(
            'trial_stim_contrast_left', 'trial_stim_contrast_right',
            'trial_response_choice', 'trial_feedback_type')

        # # TODO: retrieve days to add dayLength

        # # convert to psytrack format
        # D = {'y':pd.DataFrame(resp)[0].replace({'CCW': 2, 'No Go': np.nan, 'CW': 1}).values}
        # D.update({'inputs':{'contrast_left':stim_left, 'contrast_right':stim_right}})

        # # TODO: specify the weights correctly
        # weights = {'bias' : 1,  # a special key
        #    'contrast_left' : 1,    # use only the first column of s1 from inputs
        #    'contrast_right' : 1}    # use only the first column of s2 from inputs

        # hyper= {'sigInit' : 2**4.,
        #     'sigma' : [2**-4.]*len(weights),   # Each weight will have it't own sigma, but all initialized the same
        #     'sigDay' : None}        # Not necessary to specify as None, but keeps things general

        # optList = ['sigma']

        # # FIT THE ACTUAL MODEL
        hyp, evd, wMode, hess = hyperOpt(D, hyper, weights, optList)

        # TODO: make sure to save the primary key for each trial to link it back up later

        glm_weights, transition_matrix, posterior_probs = run_zoe_func(
            some_inputs_from_dj)
        # glm_weights is num_of_states x 3 array; transition matrix is NumStates x NumStates array
        # posterior_probs is array of size NumTrials x NumStates

        glmhmm_result = dict(**key,
                             glm_weights=glm_weights,
                             transition_matrix=transition_matrix)
        self.insert1(glmhmm_result)

        session_start_times, trial_ids = trials.fetch('session_start_time',
                                                      'trial_id')
        trials_results = [
            dict(**key,
                 session_start_time=session_start_time,
                 trial_id=trial_id,
                 posterior_probs=prob)
            for prob, session_start_time, trial_id in zip(
                posterior_probs, session_start_times, trial_ids)
        ]

        self.Trial.insert(trials_results)

        class Trial(dj.Part):
            definition = """
Пример #4
0
# ================================= #

sess = (acquisition.Session & 'task_protocol LIKE "%biased%"')
s = pd.DataFrame.from_dict(sess.fetch(as_dict=True))
labs = list(s['session_lab'].unique())
labs.append('zadorlab')
print(labs)

# hack to get around SQL limit
for lidx, lab in enumerate(labs):

    print(lab)
    b = (behavior.TrialSet.Trial & (subject.SubjectLab() & 'lab_name="%s"'%lab)) \
     * sess.proj('session_uuid','task_protocol') \
     * subject.SubjectLab.proj('lab_name') \
     * subject.Subject() & 'subject_line IS NULL OR subject_line="C57BL/6J"'
    #* subject.Subject() & 'subject_birth_date between "2018-09-01" and "2019-02-01"' & 'subject_line IS NULL OR subject_line="C57BL/6J"'

    bdat = pd.DataFrame(
        b.fetch(order_by='subject_nickname, session_start_time, trial_id'))
    print(bdat['subject_nickname'].unique())

    if lidx == 0:
        behav = bdat.copy()
    else:
        behav = behav.append(bdat.copy(), sort=False, ignore_index=True)

behav = dj2pandas(behav)

# ================================= #
# choice history
Пример #5
0
# ================================================================== #
# COMPARE DIFFERENT CONTRAST SETS
# ================================================================== #

labs = ['churchlandlab', 'mainenlab']
# hack to get around SQL limit
for lidx, lab in enumerate(labs):

	print(lab)

	if 'churchlandlab' in lab:

		# old mice, latest sessions (to check different contrast sets)
		b = (behavior.TrialSet.Trial & (subject.SubjectLab() & 'lab_name="%s"'%lab)) \
			* (acquisition.Session() & 'session_start_time > "2019-03-01"'  & 'task_protocol LIKE "%biased%"') \
			* (subject.Subject() & 'subject_birth_date < "2018-08-01"') * subject.SubjectLab.proj('lab_name')
	elif 'mainenlab' in lab:
		b = (behavior.TrialSet.Trial & (subject.SubjectLab() & 'lab_name="%s"'%lab)) \
			* (acquisition.Session() & 'session_start_time > "2019-04-01"'  & 'task_protocol LIKE "%biased%"') \
			* (subject.Subject() & 'subject_nickname in ("ZM_1369", "ZM_1371", "ZM_1372")') * subject.SubjectLab.proj('lab_name')

	bdat = pd.DataFrame(b.fetch(order_by='subject_nickname, session_start_time, trial_id'))
	print(bdat['subject_nickname'].unique())

	if lidx == 0:
		behav = bdat.copy()
	else:
		behav = behav.append(bdat.copy(), sort=False, ignore_index=True)

# recode
behav = dj2pandas(behav)
#requires Alex glm module

import matplotlib.pyplot as plt
import pandas as pd
## CONNECT TO datajoint
import datajoint as dj
dj.config['database.host'] = 'datajoint.internationalbrainlab.org'
from ibl_pipeline.analyses import behavior as behavior_analysis
from ibl_pipeline import reference, subject, behavior
from alexfigs_datajoint_functions import *  # this has all plotting functions
import seaborn as sns
from glm import *

key = ((subject.Subject() & 'sex!="U"') *
       (behavior.TrialSet() & 'n_trials > 100') * (subject.SubjectLab()) *
       (behavior_analysis.SessionTrainingStatus()
        & 'training_status="ready for ephys"  ')).fetch('KEY')
trials_ibl = pd.DataFrame.from_dict(
    (subject.Subject() * behavior.TrialSet.Trial & key).fetch(as_dict=True))

trials_ibl['signed_contrasts'] = trials_ibl[
    'trial_stim_contrast_right'] - trials_ibl['trial_stim_contrast_left']

##Rename for GLM function
trials_ibl = trials_ibl.rename(index=str,
                               columns={
                                   "session_start_time": "ses",
                                   "subject_uuid": "mouse_name",
                                   "trial_feedback_type": "feedbackType",
                                   "trial_response_choice": "choice"
                               })
Пример #7
0
from scipy import stats

## CONNECT TO datajoint

import datajoint as dj
dj.config['database.host'] = 'datajoint.internationalbrainlab.org'

import datajoint as dj
from ibl_pipeline.analyses import behavior as behavior_analysis
from ibl_pipeline import reference, subject, action, acquisition, data, behavior
from ibl_pipeline.analyses import behavior as behavior_analysis
from load_mouse_data_datajoint import *  # this has all plotting functions


#Collect all alyx data
allsubjects = pd.DataFrame.from_dict((subject.Subject() * subject.SubjectLab & 'sex!="U"' & 'subject_birth_date>"2018-10-15"' ).fetch(as_dict=True, order_by=[ 'subject_nickname']))



allsubjects = pd.DataFrame.from_dict(
    ((subject.Subject - subject.Death) * subject.SubjectLab & 'sex!="U"' &
     action.Weighing & action.WaterAdministration).fetch(
as_dict=True, order_by=['lab_name', 'subject_nickname']))

if allsubjects.empty:
    raise ValueError('DataJoint seems to be down, please try again later')
#Drop double entries
allsubjects =  allsubjects.drop_duplicates('subject_nickname')
#Drop transgenics
allsubjects.loc[(allsubjects['subject_line'] == 'C57BL/6J') |(allsubjects['subject_line'] == None)]
Пример #8
0
# loading and plotting functions
from define_paths import fig_path
from behavior_plots import *
from load_mouse_data_datajoint import *  # this has all plotting functions
import psychofit as psy  # https://github.com/cortex-lab/psychofit

# folder to save plots, from DataJoint
path = '/Snapshot_DataJoint_shortcut/'

# ============================================= #
# START BIG OVERVIEW PLOT
# ============================================= #

subjects = pd.DataFrame.from_dict(
    ((subject.Subject() - subject.Death() & 'sex!="U"')
     & action.Weighing() & action.WaterAdministration()
     & behavior.TrialSet()).fetch(as_dict=True,
                                  order_by=['lab_name', 'subject_nickname']))
users = subjects['lab_name'].unique()
print(users)

for lidx, lab in enumerate(users):

    subjects = pd.DataFrame.from_dict(
        ((subject.Subject() - subject.Death() & 'subject_nickname="IBL_47"'
          & 'sex!="U"' & 'lab_name="%s"' % lab)
         & action.Weighing() & action.WaterAdministration()
         & behavior.TrialSet()).fetch(as_dict=True,
                                      order_by=['subject_nickname']))
    # group by batches: mice that were born on the same day
#Set path for data
"""
You will need to modify the paths below  if running this code on your own computer
"""

path_training = '/Users/alex/Documents/Postdoc/GLM_behavior_paper/Data_by_animal/training/'
path_biased = '/Users/alex/Documents/Postdoc/GLM_behavior_paper/Data_by_animal/biased/'
path_model_analysis = '/Users/alex/Documents/PYTHON/trialhistory_frund/analysis.py'
path_model2array = '/Users/alex/Documents/PYTHON/analysis/paper-behavior/model2array.py'

#Import data from all the animals in both stages of training
use_subjects = subject.Subject * subject.SubjectLab * subject.SubjectProject & 'subject_project="ibl_neuropixel_brainwide_01"'
sess = (acquisition.Session & (behavior.TrialSet.Trial() & 'ABS(trial_stim_contrast_left-0)<0.0001' \
 & 'ABS(trial_stim_contrast_right-0)<0.0001') & 'task_protocol like "%trainingChoiceWorld%"') \
 * use_subjects
b = (behavior.TrialSet.Trial & sess) * subject.Subject() * subject.SubjectLab()
bdat = pd.DataFrame(
    b.fetch(order_by='subject_nickname, session_start_time, trial_id'))
behav = dj2pandas(bdat)

#Import data from biased choice world
sess = (acquisition.Session & (behavior.TrialSet.Trial() & 'ABS(trial_stim_contrast_left-0)<0.0001' \
 & 'ABS(trial_stim_contrast_right-0)<0.0001') & 'task_protocol like "%biasedChoiceWorld%"') * use_subjects
b = (behavior.TrialSet.Trial & sess) * subject.Subject() * subject.SubjectLab()
bdat = pd.DataFrame(
    b.fetch(order_by='subject_nickname, session_start_time, trial_id'))
behav_biased = dj2pandas(bdat)

#Store lab identities for later
mousenlabid = behav[['subject_uuid', 'lab_name']]
mousenlabid = mousenlabid.drop_duplicates()
Пример #10
0
sns.set(style="ticks", context="paper", font_scale=1.4)

# import wrappers etc
from ibl_pipeline import reference, subject, action, acquisition, data, behavior
from ibl_pipeline.utils import psychofit as psy
from ibl_pipeline.analyses import behavior as behavioral_analyses
from dj_tools import *
# import training_criteria_schemas as criteria_urai

figpath = os.path.join(os.path.expanduser('~'), 'Data/Figures_IBL')

# ================================= #
# GRAB ALL DATA FROM DATAJOINT
# ================================= #

use_subjects = subject.Subject(
) & 'subject_birth_date between "2018-09-01" and "2019-02-01"' & 'subject_line IS NULL OR subject_line="C57BL/6J"'
# criterion = criteria_urai.SessionTrainingStatus_v1()
criterion = behavioral_analyses.SessionTrainingStatus()
sess = ((acquisition.Session & 'task_protocol LIKE "%biased%"') * \
 (criterion & 'training_status="ready for ephys"')) \
 * subject.SubjectLab * use_subjects

s = pd.DataFrame.from_dict(sess.fetch(as_dict=True))
labs = list(s['lab_name'].unique())
labs = ['zadorlab']
print(labs)

# hack to get around SQL limit
for lidx, lab in enumerate(labs):

    print(lab)
Пример #11
0
sns.set_style("darkgrid")
sns.set_context(context="poster")

# import wrappers etc
from ibl_pipeline import reference, subject, action, acquisition, data, behavior
from ibl_pipeline.utils import psychofit as psy
from dj_tools import *
figpath = os.path.join(os.path.expanduser('~'), 'Data/Figures_IBL')

# ================================= #
##  GET DATA
# ================================= #

b = (behavior.TrialSet) \
 * (acquisition.Session.proj(session_date='DATE(session_start_time)') & 'session_start_time > "2019-03-01"') \
 * (subject.Subject() & 'subject_birth_date between "2018-09-01" and "2019-02-01"') * subject.SubjectLab() \
 * action.Weighing.proj(weighing_date='DATE(weighing_time)')
bdat = pd.DataFrame(b.fetch(order_by='subject_nickname, session_start_time'))

print(bdat['subject_nickname'].unique())
bdat['date_march'] = pd.to_timedelta(pd.to_datetime(
    bdat.session_date)).dt.total_seconds().astype(int)

g = sns.lmplot(x="date_march",
               y="n_trials",
               hue="sex",
               col="lab_name",
               units="subject_nickname",
               col_wrap=4,
               lowess=True,
               data=bdat)
Пример #12
0
import datajoint as dj
from ibl_pipeline import reference, subject, action, acquisition, data, behavior

# loading and plotting functions
from behavior_plots import *
from load_mouse_data_datajoint import * # this has all plotting functions
import psychofit as psy # https://github.com/cortex-lab/psychofit

# folder to save plots, from DataJoint
path = '/Figures_DataJoint_shortcuts/'

# ============================================= #
# START BIG OVERVIEW PLOT
# ============================================= #

allsubjects = pd.DataFrame.from_dict(((subject.Subject() - subject.Death()) & 'sex!="U"'
                                   & action.Weighing() & action.WaterAdministration() & behavior.TrialSet()
                                   ).fetch(as_dict=True, order_by=['lab_name', 'subject_nickname']))
users = allsubjects['lab_name'].unique()
print(users)

# from guido: make sure max 5 mice are plotted on a single figure
sub_batch_size = 5


for lidx, lab in enumerate(users):

	# take mice from this lab only
	subjects = pd.DataFrame.from_dict(((subject.Subject() - subject.Death() & 'sex!="U"' & 'lab_name="%s"'%lab)
                                   & action.Weighing() & action.WaterAdministration() & behavior.TrialSet()
                                   ).fetch(as_dict=True, order_by=['subject_nickname']))
Пример #13
0
from ibl_pipeline.analyses import behavior as behavior_analysis
from ibl_pipeline.analyses import psychofit as psy  # https://github.com/cortex-lab/psychofit

# loading and plotting functions
from behavior_plots import *
from load_mouse_data_datajoint import *  # this has all plotting functions

# folder to save plots, from DataJoint
path = '/Figures_DataJoint_shortcuts/'

# ============================================= #
# START BIG OVERVIEW PLOT
# ============================================= #

allsubjects = pd.DataFrame.from_dict(
    ((subject.Subject() - subject.Death()) & 'sex!="U"'
     & action.Weighing() & action.WaterAdministration()).fetch(
         as_dict=True, order_by=['lab_name', 'subject_nickname']))
if allsubjects.empty:
    raise ValueError('DataJoint seems to be down, please try again later')

users = allsubjects['lab_name'].unique()
print(users)

# from guido: make sure max 5 mice are plotted on a single figure
sub_batch_size = 5

# keep track of when each mouse is trained
training_review = pd.DataFrame([])

for lidx, lab in enumerate(users):
Пример #14
0
# loading and plotting functions
from define_paths import fig_path
from behavior_plots import *
from load_mouse_data_datajoint import * # this has all plotting functions
import psychofit as psy # https://github.com/cortex-lab/psychofit

# folder to save plots, from DataJoint
path = '/Snapshot_DataJoint_shortcut/'

# ============================================= #
# START BIG OVERVIEW PLOT
# ============================================= #

# all mice that are alive, without those with undefined sex (i.e. example mice)
# restrict to animals that have trial data, weights and water logged
subjects = pd.DataFrame.from_dict(((subject.Subject() - subject.Death() & 'sex!="U"')
                                   & action.Weighing() & action.WaterAdministration() & behavior.TrialSet()
                                   ).fetch(as_dict=True, order_by=['lab_name', 'subject_nickname']))
print(subjects['subject_nickname'].unique())

for i, mouse in enumerate(subjects['subject_nickname']):

    # get all this mouse's data
    print(mouse)
    weight_water, baseline = get_water_weight(mouse)
    behav = get_behavior(mouse)

    if weight_water.empty or behav.empty:
        continue

    # ============================================= #
Пример #15
0
sess = ((acquisition.Session & 'task_protocol LIKE "%trainingchoice%"') * \
 (behavioral_analyses.SessionTrainingStatus() & 'training_status="trained"')) * \
 subject.SubjectLab()

s = pd.DataFrame.from_dict(sess.fetch(as_dict=True))
labs = list(s['lab_name'].unique())
print(labs)

# hack to get around SQL limit
for lidx, lab in enumerate(labs):

	print(lab)
	b = (behavior.TrialSet.Trial & (subject.SubjectLab() & 'lab_name="%s"'%lab)) \
		* sess.proj('session_uuid','task_protocol') \
		* subject.SubjectLab.proj('lab_name') \
		* subject.Subject() & 'subject_birth_date between "2018-09-01" and "2019-02-01"' & 'subject_line IS NULL OR subject_line="C57BL/6J"'

	bdat = pd.DataFrame(b.fetch(order_by='subject_nickname, session_start_time, trial_id'))
	print(bdat['subject_nickname'].unique())

	if lidx == 0:
		behav = bdat.copy()
	else:
		behav = behav.append(bdat.copy(), sort=False, ignore_index=True)

# ================================= #
# for now, manually add the cortexlab matlab animals
# ================================= #

ucl_mice = ['KS001', 'MW003', 'MW001', 'MW002', 'LEW008', 'LEW009', 'LEW010']
ucl_trained_dates = ['2019-02-25', '2018-12-10', '2019-02-11', '2019-01-14', '2018-10-04', '2018-10-04', 'LEW010']
Пример #16
0
from scipy import stats

## CONNECT TO datajoint

import datajoint as dj
dj.config['database.host'] = 'datajoint.internationalbrainlab.org'

import datajoint as dj
from ibl_pipeline.analyses import behavior as behavior_analysis
from ibl_pipeline import reference, subject, action, acquisition, data, behavior
from ibl_pipeline.analyses import behavior as behavior_analysis
from load_mouse_data_datajoint import *  # this has all plotting functions

#Collect all alyx data
allsubjects = pd.DataFrame.from_dict(
    (subject.Subject() * subject.SubjectLab & 'sex!="U"'
     & 'subject_birth_date>"2018-10-15"').fetch(as_dict=True,
                                                order_by=['subject_nickname']))

allsubjects = pd.DataFrame.from_dict(
    ((subject.Subject - subject.Death) * subject.SubjectLab & 'sex!="U"'
     & action.Weighing & action.WaterAdministration).fetch(
         as_dict=True, order_by=['lab_name', 'subject_nickname']))

if allsubjects.empty:
    raise ValueError('DataJoint seems to be down, please try again later')
#Drop double entries
allsubjects = allsubjects.drop_duplicates('subject_nickname')
#Drop transgenics
allsubjects.loc[(allsubjects['subject_line'] == 'C57BL/6J') |
                (allsubjects['subject_line'] == None)]