def typical_resp(data_path='~/ni_data/ofM.dr/', l1_dir='l1', workflow_name='composite'): glm.l2_common_effect( path.join(data_path, l1_dir, workflow_name), workflow_name="best_responders_old", include={ 'subject': ["5689", "5690", "5691", "5700", "6262", "6255", "5694", "5706"], 'trial': ["CogB"], }, groupby="session", keep_work=True, mask="~/ni_data/templates/DSURQEc_200micron_mask.nii.gz", ) glm.l2_common_effect( path.join(data_path, l1_dir, workflow_name), workflow_name="best_responders", include={ 'subject': ["5699", "5687", "5691", "5694", "4005", "6255", "5706"], 'trial': ["CogB"], }, groupby="session", keep_work=True, mask="~/ni_data/templates/DSURQEc_200micron_mask.nii.gz", )
def typical_resp(data_path='~/ni_data/ofM.dr/bids/', l1_dir='l1', workflow_name='generic'): from samri.pipelines import preprocess, glm glm.l2_common_effect(path.join(data_path,l1_dir,workflow_name), workflow_name="best_responders", include={ 'subject':["6262","6255","5694","5706",'5704','6455','6459','5691',"5689","5690","5700","6451","6460","6456","6461","6462"], 'task':["CogB"], }, groupby="session", keep_work=True, mask="/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii", )
def typical_resp(data_path='~/ni_data/ofM.dr/bids/', l1_dir='l1', workflow_name='generic'): from samri.pipelines import preprocess, glm glm.l2_common_effect( path.join(data_path, l1_dir, workflow_name), workflow_name="best_responders", include={ 'subject': [ "6262", "6255", "5694", "5706", '5704', '6455', '6459', '5691', "5689", "5690", "5700", "6451", "6460", "6456", "6461", "6462" ], 'task': ["CogB"], }, groupby="session", keep_work=True, mask="/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii", )
def dr_cont(): from labbookdb.report.development import animal_multiselect from samri.pipelines import glm from samri.pipelines.preprocess import bruker from samri.report.snr import iter_significant_signal from samri.utilities import bids_autofind # Assuming data cobnverted to BIDS bids_base = '~/ni_data/ofM.dr/bids' # Preprocess #animal_list = animal_multiselect(cage_treatments=['cFluDW','cFluDW_','cFluIP']) # Animal list selection needs fixing in LabbookDB database, so we add the following animals manually #animal_list.extend(['4001','4002','4003','4004','4005','4006','4007','4008','4009','4011','4012','4013','6557']) # Determining Responders by Significance _, substitutions = bids_autofind( '~/ni_data/ofM.dr/l1/generic/', path_template= "{bids_dir}/sub-{{subject}}/ses-{{session}}/sub-{{subject}}_ses-{{session}}_task-{{task}}_acq-{{acquisition}}_cbv_pfstat.nii.gz", match_regex= '.+/sub-(?P<sub>.+)/ses-(?P<ses>.+)/.*?_task-(?P<task>.+).*?_acq-(?P<acquisition>.+)_cbv_pfstat\.nii.gz', ) print(substitutions) iter_significant_signal( '~/ni_data/ofM.dr/l1/generic/sub-{subject}/ses-{session}/sub-{subject}_ses-{session}_task-{task}_acq-{{acquisition}}_cbv_pfstat.nii.gz', substitutions=substitutions, mask_path='/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii', save_as='~/ni_data/ofM.dr/l1/generic/total_significance.csv') # Determining Responders by a priori pattern glm.l2_common_effect( '~/ni_data/ofM.dr/l1/', workflow_name="a_priori_responders", include={ 'subject': [ '4001', '4005', '4006', '4007', '4008', '4009', '4011', '4012', '4013' ], }, groupby="session", keep_work=True, mask="/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii", )
def cbv_composite( data_path="~/ni_data/ofM.dr/", workflow_name='composite', preprocessing_dir="preprocessing", l1_dir="l1", ): #preprocessing.bruker(data_path, # #exclude_measurements=['20151027_121613_4013_1_1'], # functional_match={'trial':['CogB','JogB']}, # structural_match={'acquisition':['TurboRARE','TurboRARElowcov']}, # #subjects=["4007","4008","4011","4012","5687","5688","5695","5689","5690","5691","5703","5704","5706"], # #subjects=["4007","4008","5687","5688","5704", # # "5692","6262","5694","5700","6255","5699"], # #subjects=["4001","4011","5703","5706",], # workflow_name=workflow_name, # lowpass_sigma=2, # highpass_sigma=225, # very_nasty_bruker_delay_hack=True, # negative_contrast_agent=True, # functional_blur_xy=.4, # functional_registration_method="composite", # keep_work=True, # template="~/ni_data/templates/DSURQEc_200micron_average.nii", # registration_mask="~/ni_data/templates/DSURQEc_200micron_mask.nii.gz", # actual_size=True, # verbose=True, # ) #glm.l1(path.join(data_path,preprocessing_dir,workflow_name), # workflow_name=workflow_name, # # include={"subjects":["5689","5690","5691"]}, # habituation="confound", # mask="~/ni_data/templates/DSURQEc_200micron_mask.nii.gz", # keep_work=True, # ) glm.l2_common_effect( path.join(data_path, l1_dir, workflow_name), workflow_name="composite_subjects", groupby="subject", keep_work=True, mask="~/ni_data/templates/DSURQEc_200micron_mask.nii.gz", )
def dr_composite(): from samri.pipelines import glm, preprocess preprocessing.bruker("~/ni_data/ofM.dr/", exclude_measurements=['20151027_121613_4013_1_1'], workflow_name="composite", very_nasty_bruker_delay_hack=True, negative_contrast_agent=True, functional_blur_xy=4, functional_registration_method="composite") glm.l1("~/ni_data/ofM.dr/preprocessing/composite", workflow_name="composite", include={"subjects": [i for i in range(4001, 4010)] + [4011, 4012]}, habituation="confound", mask="~/ni_data/templates/ds_QBI_chr_bin.nii.gz", keep_work=True) glm.l1( "~/ni_data/ofM.dr/preprocessing/composite", workflow_name="composite_dr", include={"subjects": [i for i in range(4001, 4010)] + [4011, 4012]}, habituation="confound", mask="~/ni_data/templates/roi/f_dr_chr_bin.nii.gz", ) glm.l2_common_effect("~/ni_data/ofM.dr/l1/composite", workflow_name="subjectwise_composite", groupby="subject") glm.l2_common_effect("~/ni_data/ofM.dr/l1/composite", workflow_name="sessionwise_composite", groupby="session", exclude={ "subjects": [ "4001", "4002", "4003", "4004", "4005", "4006", "4009", "4011", "4013" ] }) glm.l2_common_effect( "~/ni_data/ofM.dr/l1/composite", workflow_name="sessionwise_composite_w4011", groupby="session", exclude={ "subjects": ["4001", "4002", "4003", "4004", "4005", "4006", "4009", "4013"] })
preprocess_base = '{}/l1/'.format(scratch_dir) masks = { 'generic': '/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii', 'masked': '/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii', } for key in masks: # We filter by run, since the primary contrast is replaced by the statistic contrast in level2. # We exclude animal 4006, as its slice positioning significantly diminishes statistic coverage. glm.l2_common_effect(path.join(preprocess_base, key), workflow_name=key, mask=masks[key], groupby='none', keep_work=False, n_jobs_percentage=.33, exclude={'subject': ['4006'], }, include={'run': ['0'], }, out_base='{}/l2'.format(scratch_dir), ) glm.l2_common_effect(path.join(preprocess_base, key), workflow_name=key, mask=masks[key], groupby='none', keep_work=False, n_jobs_percentage=.33, exclude={'subject': ['4006'], }, include={'run': ['1'], }, out_base='{}/l2'.format(scratch_dir), )
def dr_full(): from labbookdb.report.development import animal_multiselect from samri.pipelines import glm from samri.pipelines.preprocess import bruker from samri.report.snr import iter_significant_signal from samri.utilities import bids_autofind # Assuming data cobnverted to BIDS bids_base = '~/ni_data/ofM.dr/bids' # Preprocess animal_list = animal_multiselect( cage_treatments=['cFluDW', 'cFluDW_', 'cFluIP']) # Animal list selection needs fixing in LabbookDB database, so we add the following animals manually animal_list.extend([ '4001', '4002', '4003', '4004', '4005', '4006', '4007', '4008', '4009', '4011', '4012', '4013', '6557' ]) full_prep( bids_base, "~/ni_data/templates/dsurqec_200micron.nii", registration_mask="~/ni_data/templates/dsurqec_200micron_mask.nii", functional_match={ 'type': [ 'cbv', ], }, structural_match={ 'acquisition': ['TurboRARE', 'TurboRARElowcov'], }, subjects=animal_list, actual_size=True, functional_registration_method="composite", negative_contrast_agent=True, out_dir='~/ni_data/ofM.dr/preprocessing', ) #bruker(bids_base, "~/ni_data/templates/dsurqec_200micron.nii", # registration_mask="~/ni_data/templates/dsurqec_200micron_mask.nii", # functional_match={'type':['bold',],}, # structural_match={'acquisition':['TurboRARE','TurboRARElowcov'],}, # subjects=animal_list, # actual_size=True, # functional_registration_method="composite", # negative_contrast_agent=False, # out_dir='~/ni_data/ofM.dr/preprocessing', # ) # Model fitting glm.l1( '~/ni_data/ofM.dr/preprocessing/generic', out_dir='~/ni_data/ofM.dr/l1', workflow_name='generic', habituation="confound", mask="/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii", # We need the workdir to extract the betas keep_work=True, ) # Determining Responders by Significance substitutions = bids_autofind( '~/ni_data/ofM.dr/l1/generic/', path_template= "{bids_dir}/sub-{{subject}}/ses-{{session}}/sub-{{subject}}_ses-{{session}}_task-{{task}}_acq-{{acquisition}}_cbv_pfstat.nii.gz", match_regex= '.+/sub-(?P<sub>.+)/ses-(?P<ses>.+)/.*?_task-(?P<task>.+).*?_acq-(?P<acquisition>.+)_cbv_pfstat\.nii.gz', ) iter_significant_signal( '~/ni_data/ofM.dr/l1/generic/sub-{subject}/ses-{session}/sub-{subject}_ses-{session}_task-{task}_acq-{{acquisition}}_cbv_pfstat.nii.gz', substitutions=substitutions, mask_path='/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii', save_as='~/ni_data/ofM.dr/l1/generic/total_significance.csv') # Determining Responders by a priori pattern glm.l2_common_effect( '~/ni_data/ofM.dr/l1/', workflow_name="a_priori_responders", include={ 'subject': [ '4001', '4005', '4006', '4007', '4008', '4009', '4011', '4012', '4013' ], }, groupby="session", keep_work=True, mask="/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii", )
def cbv_composite( data_path, workflow_name, preprocessing_dir="preprocessing", l1_dir="l1", ): preprocessing.bruker( data_path, #exclude_measurements=['20151027_121613_4013_1_1'], functional_scan_types=["EPI_CBV_chr_longSOA", "EPI_CBV_jb_long"], #subjects=["4007","4008","4011","4012","5687","5688","5695","5689","5690","5691","5703","5704","5706"], subjects=["4007", "4008", "4011", "5687", "5688", "5704"], #subjects=["4007","4008","4009","4011","4012","5689","5690","5691","5703","5704","5706"], workflow_name=workflow_name, lowpass_sigma=2, highpass_sigma=225, very_nasty_bruker_delay_hack=True, negative_contrast_agent=True, functional_blur_xy=.4, functional_registration_method="composite", keep_work=True, template="~/ni_data/templates/DSURQEc_200micron_average.nii", registration_mask="~/ni_data/templates/DSURQEc_200micron_mask.nii.gz", actual_size=True, ) glm.l1( path.join(data_path, preprocessing_dir, workflow_name), workflow_name=workflow_name, # include={"subjects":["5689","5690","5691"]}, habituation="confound", mask="/home/chymera/ni_data/templates/DSURQEc_200micron_mask.nii.gz", keep_work=True, ) glm.l2_common_effect( path.join(data_path, l1_dir, workflow_name), workflow_name="composite_sessions_best_responders", exclude={ "scans": ["EPI_BOLD_"], "subjects": [ "4001", "4002", "4003", "4004", "4006", "4008", "4009", "5674", "5703", "5704", "5706", "4005", "5687" ] }, groupby="session", keep_work=True, mask="/home/chymera/ni_data/templates/DSURQEc_200micron_mask.nii.gz", ) glm.l2_common_effect( path.join(data_path, l1_dir, workflow_name), workflow_name="composite_sessions_responders", exclude={ "scans": ["EPI_BOLD_"], "subjects": [ "4001", "4002", "4003", "4004", "4006", "4008", "4009", "5674", "5703", "5704", "5706" ] }, groupby="session", keep_work=True, mask="/home/chymera/ni_data/templates/DSURQEc_200micron_mask.nii.gz", )
def abi_connectivity_map( identifier, exclude_experiments=[], keep_work=False, mask='/usr/share/mouse-brain-atlases/dsurqec_200micron_mask.nii', prepare_root='/var/tmp/{user}/samri/abi_connectivity/', prepare_subdirs='sub-{experiment}/ses-1/anat/sub-{experiment}_ses-1_desc-cope.nii.gz', save_as_cope='', save_as_zstat='', save_as_tstat='', tmp_dir='/var/tmp/{user}/samri/abi_connectivity/l2', abi_data_root='/usr/share/ABI-connectivity-data/', invert_lr_experiments=[], ): """Create statistical summary maps (any subset of COPE, t-statistics, and z-statistics) for an ABI connectivity identifier. Parameters ---------- identifier : str Experiment set identifier which corresponds to the data set paths from the ABI-connectivity-data package. exclude_experiments : list of str, optional List of strings, each string 9 characters long, identifying which experiments should be excluded from the modelling. keep_work : bool, optional Whether to keep the work files (including the prepared ABI experiment data, the work directory, results directory, and crash directory of the model). mask : string, optional Path to a NIfTI file containing ones and zeroes and specifying the mask for the modelling workflow. It is important that this data is in the same template space as the ABI-connectivity-data package, which is the DSURQEC space [1]_ . prepare_root : string, optional Python-formattable string, under which the prepared (e.g. flipped) data from ABI experiments is to be saved. Generally this should be a temporal and new path, containing either "{user}" or located under the user's home directory, in order to avoid race conditions between users. It will be deleted without request for confirmation if this function is executed with `keep_work=False` (which is the default). prepare_subdirs : string, optional Python-formattable string, containing "{experiment}" according to which the prepared (e.g. flipped) data from ABI experiments is to be organized inside the `prepare_root` directory. save_as_cope : str, optional Path under which to save the COPE result of the modelling. save_as_tstat : str, optional Path under which to save the t-statistic result of the modelling. save_as_zstat : str, optional Path under which to save the z-statistic result of the modelling. tmp_dir : string, optional Temporary directory inside which to execute the modelling workflow. Generally this should be a temporal and new path, containing either "{user}" or located under the user's home directory, in order to avoid race conditions between users. abi_data_root : str, optional Root path for the ABI-connectivity-data package installation on the current machine. invert_lr_experiments : list of str, optional List of strings, each string 9 characters long, identifying which experiments need to be inverted with respect to the left-right orientation. Notes ----- If neither of the `save_as_cope`, `save_as_tstat`, and `save_as_zstat` parameters are specified, all of the results are saved in the current work directory as `{identifier}_{statistic}.nii.gz`. References ---------- .. [1] H. I. Ioanas and M. Marks and M. F. Yanik and M. Rudin "An Optimized Registration Workflow and Standard Geometric Space for Small Animal Brain Imaging" https://doi.org/10.1101/619650 """ # Prepend user name to SAMRI temp directories to prevent users from overwriting each other's work. current_user = getpass.getuser() prepare_root = prepare_root.format(user=current_user) tmp_dir = tmp_dir.format(user=current_user) reposit_path = path.join(prepare_root, '{identifier}', prepare_subdirs) prepare_abi_connectivity_maps( identifier, abi_data_root=abi_data_root, reposit_path=reposit_path, invert_lr_experiments=invert_lr_experiments, ) prepare_root = prepare_root.format(identifier=identifier) glm.l2_common_effect( prepare_root, workflow_name=identifier, mask=mask, n_jobs_percentage=.33, out_base=tmp_dir, exclude={'subject': exclude_experiments}, run_mode='fe', keep_work=keep_work, ) if save_as_zstat: zstat_path = path.join(tmp_dir, identifier, '_zstat.nii.gz') shutil.copyfile(zstat_path, save_as_zstat) if save_as_tstat: tstat_path = path.join(tmp_dir, identifier, '_tstat.nii.gz') shutil.copyfile(tstat_path, save_as_tstat) if save_as_cope: cope_path = path.join(tmp_dir, identifier, '_cope.nii.gz') shutil.copyfile(cope_path, save_as_cope) if not any([save_as_zstat, save_as_tstat, save_as_cope]): zstat_path = path.join(tmp_dir, identifier, '_zstat.nii.gz') shutil.copyfile(zstat_path, '{identifier}_zstat.nii.gz') tstat_path = path.join(tmp_dir, identifier, '_tstat.nii.gz') shutil.copyfile(tstat_path, '{identifier}_tstat.nii.gz') cope_path = path.join(tmp_dir, identifier, '_cope.nii.gz') shutil.copyfile(cope_path, '{identifier}_cope.nii.gz') if not keep_work: results_dir = path.join(tmp_dir, identifier) shutil.rmtree(results_dir) shutil.rmtree(prepare_root)