_, matrix, names = check_design_matrix(design_matrix) contrasts = {} n_columns = len(names) contrast_matrix = np.eye(n_columns) for i in xrange(2): contrasts[names[2 * i]] = contrast_matrix[2 * i] # more interesting contrasts contrasts['faces-scrambled'] = contrasts['faces'] - contrasts['scrambled'] contrasts['scrambled-faces'] = -contrasts['faces-scrambled'] contrasts['effects_of_interest'] = contrasts['faces'] + contrasts['scrambled'] # fit GLM print('Fitting a GLM (this takes time)...') fmri_glm = FirstLevelGLM().fit( [nibabel.concat_images(x) for x in subject_data.func], design_matrices) # save computed mask mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") print("Saving mask image %s" % mask_path) nibabel.save(fmri_glm.masker_.mask_img_, mask_path) mask_images.append(mask_path) # compute contrast maps z_maps = {} effects_maps = {} for contrast_id, contrast_val in contrasts.items(): print("\tcontrast id: %s" % contrast_id) z_map, t_map, effects_map, var_map = fmri_glm.transform( [contrast_val] * 2, contrast_name=contrast_id, output_z=True,
def group_one_sample_t_test(masks, effects_maps, contrasts, output_dir, start_time=base_reporter.pretty_time(), **kwargs): """ Runs a one-sample t-test procedure for group analysis. Here, we are for each experimental condition, only interested refuting the null hypothesis H0: "The average effect accross the subjects is zero!" Parameters ---------- masks: list of strings or nibabel image objects subject masks, one per subject effects_maps: list of dicts of lists effects maps from subject-level GLM; each entry is a dictionary; each entry (indexed by condition id) of this dictionary is the filename (or correspinding nibabel image object) for the effects maps for that condition (aka contrast),for that subject contrasts: dictionary of array_likes contrasts vectors, indexed by condition id kwargs: dict_like kwargs for plot_stats_map API """ # make output directory if not os.path.exists(output_dir): os.makedirs(output_dir) assert len(masks) == len(effects_maps), (len(masks), len(effects_maps)) # compute group mask group_mask = intersect_masks(masks) # construct design matrix (only one covariate, namely the "mean effect") design_matrix = np.ones(len(effects_maps) )[:, np.newaxis] # only the intercept group_level_z_maps = {} group_level_t_maps = {} for contrast_id in contrasts: print "\tcontrast id: %s" % contrast_id # effects maps will be the input to the second level GLM first_level_image = nibabel.concat_images( [x[contrast_id] for x in effects_maps]) # fit 2nd level GLM for given contrast group_model = FirstLevelGLM(first_level_image, design_matrix, group_mask) group_model.fit(do_scaling=False, model='ols') # specify and estimate the contrast contrast_val = np.array(([[1.]]) ) # the only possible contrast ! z_map, t_map = group_model.contrast( contrast_val, con_id='one_sample %s' % contrast_id, output_z=True, output_stat=True) # save map for map_type, map_img in zip(["z", "t"], [z_map, t_map]): map_dir = os.path.join(output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, 'group_level_%s.nii.gz' % ( contrast_id)) print "\t\tWriting %s ..." % map_path nibabel.save(map_img, map_path) if map_type == "z": group_level_z_maps[contrast_id] = map_path elif map_type == "t": group_level_z_maps[contrast_id] = map_path # do stats report stats_report_filename = os.path.join(output_dir, "report_stats.html") generate_subject_stats_report(stats_report_filename, contrasts, group_level_z_maps, group_mask, start_time=start_time, **kwargs) print "\r\nStatistic report written to %s\r\n" % ( stats_report_filename) return group_level_z_maps
dmat_outfile = os.path.join(subject_data.output_dir, 'design_matrix.png') plt.savefig(dmat_outfile, bbox_inches="tight", dpi=200) # specify contrasts contrasts = {} _, matrix, names = check_design_matrix(design_matrix) contrast_matrix = np.eye(len(names)) for i in range(len(names)): contrasts[names[i]] = contrast_matrix[i] # more interesting contrasts""" contrasts = {'active-rest': contrasts['active'] - contrasts['rest']} # fit GLM print('\r\nFitting a GLM (this takes time) ..') fmri_glm = FirstLevelGLM(noise_model='ar1', standardize=False).fit( [nibabel.concat_images(subject_data.func[0])], design_matrix) # save computed mask mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") print("Saving mask image %s" % mask_path) nibabel.save(fmri_glm.masker_.mask_img_, mask_path) # compute bg unto which activation will be projected anat_img = nibabel.load(subject_data.anat) print("Computing contrasts ..") z_maps = {} effects_maps = {} for contrast_id, contrast_val in contrasts.items(): print("\tcontrast id: %s" % contrast_id) z_map, t_map, eff_map, var_map = fmri_glm.transform(
"""specify contrasts""" _, matrix, names = check_design_matrix(design_matrix) contrasts = {} n_columns = len(names) I = np.eye(len(names)) for i in xrange(2): contrasts['%s' % names[2 * i]] = I[2 * i] """more interesting contrasts""" contrasts['EV1>EV2'] = contrasts['EV1'] - contrasts['EV2'] contrasts['EV2>EV1'] = contrasts['EV2'] - contrasts['EV1'] contrasts['effects_of_interest'] = contrasts['EV1'] + contrasts['EV2'] """fit GLM""" print('\r\nFitting a GLM (this takes time) ..') fmri_glm = FirstLevelGLM() fmri_glm.fit(fmri_files, design_matrix) """save computed mask""" mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") print("Saving mask image %s" % mask_path) nibabel.save(fmri_glm.masker_.mask_img_, mask_path) # compute bg unto which activation will be projected mean_fmri_files = compute_mean_3D_image(fmri_files) print("Computing contrasts ..") z_maps = {} for contrast_id, contrast_val in contrasts.items(): print("\tcontrast id: %s" % contrast_id) z_map, t_map, eff_map, var_map = fmri_glm.transform( con_vals=contrasts[contrast_id],
def group_one_sample_t_test(masks, effects_maps, contrasts, output_dir, start_time=base_reporter.pretty_time(), **kwargs): """ Runs a one-sample t-test procedure for group analysis. Here, we are for each experimental condition, only interested refuting the null hypothesis H0: "The average effect accross the subjects is zero!" Parameters ---------- masks: list of strings or nibabel image objects subject masks, one per subject effects_maps: list of dicts of lists effects maps from subject-level GLM; each entry is a dictionary; each entry (indexed by condition id) of this dictionary is the filename (or correspinding nibabel image object) for the effects maps for that condition (aka contrast),for that subject contrasts: dictionary of array_likes contrasts vectors, indexed by condition id kwargs: dict_like kwargs for plot_stats_map API """ # make output directory if not os.path.exists(output_dir): os.makedirs(output_dir) assert len(masks) == len(effects_maps), (len(masks), len(effects_maps)) # compute group mask group_mask = intersect_masks(masks) # construct design matrix (only one covariate, namely the "mean effect") design_matrix = np.ones( len(effects_maps))[:, np.newaxis] # only the intercept group_level_z_maps = {} group_level_t_maps = {} for contrast_id in contrasts: print("\tcontrast id: %s" % contrast_id) # effects maps will be the input to the second level GLM first_level_image = nibabel.concat_images( [x[contrast_id] for x in effects_maps]) # fit 2nd level GLM for given contrast group_model = FirstLevelGLM(first_level_image, design_matrix, group_mask) group_model.fit(do_scaling=False, model='ols') # specify and estimate the contrast contrast_val = np.array(([[1.]])) # the only possible contrast ! z_map, t_map = group_model.contrast(contrast_val, con_id='one_sample %s' % contrast_id, output_z=True, output_stat=True) # save map for map_type, map_img in zip(["z", "t"], [z_map, t_map]): map_dir = os.path.join(output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, 'group_level_%s.nii.gz' % (contrast_id)) print("\t\tWriting %s ..." % map_path) nibabel.save(map_img, map_path) if map_type == "z": group_level_z_maps[contrast_id] = map_path elif map_type == "t": group_level_z_maps[contrast_id] = map_path # do stats report stats_report_filename = os.path.join(output_dir, "report_stats.html") generate_subject_stats_report(stats_report_filename, contrasts, group_level_z_maps, group_mask, start_time=start_time, **kwargs) print("\r\nStatistic report written to %s\r\n" % stats_report_filename) return group_level_z_maps
"""specify contrasts""" _, matrix, names = check_design_matrix(design_matrix) contrasts = {} n_columns = len(names) I = np.eye(len(names)) for i in xrange(2): contrasts['%s' % names[2 * i]] = I[2 * i] """more interesting contrasts""" contrasts['EV1>EV2'] = contrasts['EV1'] - contrasts['EV2'] contrasts['EV2>EV1'] = contrasts['EV2'] - contrasts['EV1'] contrasts['effects_of_interest'] = contrasts['EV1'] + contrasts['EV2'] """fit GLM""" print('\r\nFitting a GLM (this takes time) ..') fmri_glm = FirstLevelGLM() fmri_glm.fit(fmri_files, design_matrix) """save computed mask""" mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") print "Saving mask image %s" % mask_path nibabel.save(fmri_glm.masker_.mask_img_, mask_path) # compute bg unto which activation will be projected mean_fmri_files = compute_mean_3D_image(fmri_files) print "Computing contrasts .." z_maps = {} for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, t_map, eff_map, var_map = fmri_glm.transform( con_vals=contrasts[contrast_id],
def first_level(subject_id): ''' Launch the first level analysis for one subject ROOTDIR is an variable of environnmenent where your data are stored. ROOTDIR needs to be set up by the user. See: https://github.com/neurospin/pypreprocess/ Tape: python first_level.py Keyword arguments: subject_id -- Name of the subject ''' # Configure paths data_dir = os.path.join(os.environ["ROOTDIR"], "dataset", "bids_dataset", subject_id) output_dir = os.path.join(os.environ["ROOTDIR"], "processed_data", subject_id) subject_session_output_dir = os.path.join(output_dir, 'res_stats') if not os.path.exists(subject_session_output_dir): os.makedirs(subject_session_output_dir) # Experimental paradigm meta-params stats_start_time = time.ctime() tr = 2.4 drift_model = 'blank' #hrf_model = 'canonical' # hemodynamic reponse function hrf_model = 'spm' # hemodynamic reponse function hfcut = 128. n_scans = 128 # Preparation of paradigm events_file = glob.glob( os.path.join(data_dir, 'func/*_task-standartloc_events.tsv'))[0] print events_file paradigm = paradigm_contrasts.localizer_paradigm(events_file) # Build design matrix frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) design_matrix = make_design_matrix( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, period_cut=hfcut, ) # Specify contrasts contrasts = paradigm_contrasts.localizer_contrasts(design_matrix) # Fit GLM fmri_file = glob.glob( os.path.join(output_dir, 'func/wra*_task-standartloc_bold.nii.gz'))[0] print 'Fitting a GLM (this takes time)...' # fmri_glm = FirstLevelGLM(noise_model='ar1', standardize=False).fit(fmri_files[0], # [design_matrix for design_matrix in design_matrices] # ) # fmri_glm = FirstLevelGLM(noise_model='ar1', standardize=False).fit(fmri_file, # [design_matrix for design_matrix in design_matrices] # ) fmri_glm = FirstLevelGLM(noise_model='ar1', standardize=False).fit(fmri_file, design_matrix) # Save computed mask mask_images = [] mask_path = os.path.join(subject_session_output_dir, "mask.nii.gz") nibabel.save(fmri_glm.masker_.mask_img_, mask_path) mask_images.append(mask_path) # Compute contrasts z_maps = {} effects_maps = {} for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, t_map, effects_map, var_map = fmri_glm.transform( [contrast_val] * 1, contrast_name=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True) # Store stat maps to disk for map_type, out_map in zip(['z', 't', 'effects', 'variance'], [z_map, t_map, effects_map, var_map]): map_dir = os.path.join(subject_session_output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '%s%s.nii.gz' % (subject_id, contrast_id)) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # Do stats report anat_file = glob.glob(os.path.join(output_dir, 'anat/w*_T1w.nii.gz'))[0] anat_img = nibabel.load(anat_file) stats_report_filename = os.path.join(subject_session_output_dir, "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.masker_.mask_img_, threshold=2.3, cluster_th=15, anat=anat_img, anat_affine=anat_img.get_affine(), design_matrices=[design_matrix], paradigm=paradigm, subject_id=subject_id, start_time=stats_start_time, title="GLM for subject %s" % subject_id, # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model, ) ProgressReport().finish_dir(subject_session_output_dir) print "Statistic report written to %s\r\n" % stats_report_filename return z_maps
def execute_glm(doc, out_dir, contrast_definitions=None, outputs=None, glm_model="ar1"): """Function to execute GLM for one subject --and perhaps multiple sessions thereof """ stats_start_time = time.ctime() # study_dir = os.path.join(out_dir, doc['study']) if outputs is None: outputs = {"maps": False, "data": False, "mask": True, "model": True} else: outputs["maps"] = False subject_id = doc["subject"] subject_output_dir = os.path.join(out_dir, subject_id) _export(doc, subject_output_dir, outputs=outputs) params = load_glm_params(doc) # instantiate GLM fmri_glm = FirstLevelGLM(noise_model=glm_model, mask=doc["mask"]).fit(params["data"], params["design_matrices"]) # save beta-maps to disk beta_map_dir = os.path.join(subject_output_dir, "beta_maps") if not os.path.exists(beta_map_dir): os.makedirs(beta_map_dir) for j, glm in zip(range(len(fmri_glm.glms)), fmri_glm.glms): # XXX save array in some compressed format np.savetxt( os.path.join(beta_map_dir, "beta_map_%i.txt" % j), glm.get_beta(), # array has shape (n_conditions, n_voxels) ) # define contrasts if contrast_definitions is not None: params["contrasts"] = make_contrasts(params, contrast_definitions) contrasts = sorted(params["contrasts"][0].keys()) _contrasts = {} z_maps = {} # compute stats maps for index, contrast_id in enumerate(contrasts): print " study[%s] subject[%s] contrast [%s]: %i/%i" % ( doc["study"], doc["subject"], contrast_id, index + 1, len(contrasts), ) contrast = [c[contrast_id] for c in params["contrasts"]] contrast_name = contrast_id.replace(" ", "_") z_map, t_map, c_map, var_map = fmri_glm.transform( contrast, con_name=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True ) for dtype, out_map in zip(["z", "t", "c", "variance"], [z_map, t_map, c_map, var_map]): map_dir = os.path.join(subject_output_dir, "%s_maps" % dtype) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, "%s.nii.gz" % contrast_name) nb.save(out_map, map_path) # collect z map if dtype == "z": _contrasts[contrast_name] = contrast z_maps[contrast_name] = map_path # invoke a single API to handle plotting and html business for you subject_stats_report_filename = os.path.join(subject_output_dir, "report_stats.html") glm_reporter.generate_subject_stats_report( subject_stats_report_filename, _contrasts, z_maps, doc["mask"], design_matrices=list(params["design_matrices"]), subject_id=doc["subject"], cluster_th=15, # 15 voxels start_time=stats_start_time, TR=doc["TR"], n_scans=doc["n_scans"], n_sessions=doc["n_sessions"], model=glm_model, ) print "Report for subject %s written to %s" % (doc["subject"], subject_stats_report_filename)
# specify contrasts _, matrix, names = check_design_matrix(design_matrix) contrasts = {} n_columns = len(names) contrast_matrix = np.eye(n_columns) for i in range(2): contrasts[names[2 * i]] = contrast_matrix[2 * i] # more interesting contrasts contrasts['faces-scrambled'] = contrasts['faces'] - contrasts['scrambled'] contrasts['scrambled-faces'] = -contrasts['faces-scrambled'] contrasts['effects_of_interest'] = contrasts['faces'] + contrasts['scrambled'] # fit GLM print('Fitting a GLM (this takes time)...') fmri_glm = FirstLevelGLM().fit( [nibabel.concat_images(x) for x in subject_data.func], design_matrices) # save computed mask mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") print("Saving mask image %s" % mask_path) nibabel.save(fmri_glm.masker_.mask_img_, mask_path) mask_images.append(mask_path) # compute contrast maps z_maps = {} effects_maps = {} for contrast_id, contrast_val in contrasts.items(): print("\tcontrast id: %s" % contrast_id) z_map, t_map, effects_map, var_map = fmri_glm.transform( [contrast_val] * 2, contrast_name=contrast_id,
def execute_glm(doc, out_dir, contrast_definitions=None, outputs=None, glm_model='ar1', ): """Function to execute GLM for one subject --and perhaps multiple sessions thereof """ stats_start_time = time.ctime() # study_dir = os.path.join(out_dir, doc['study']) if outputs is None: outputs = {'maps': False, 'data': False, 'mask': True, 'model': True, } else: outputs['maps'] = False subject_id = doc['subject'] subject_output_dir = os.path.join( out_dir, subject_id) _export(doc, subject_output_dir, outputs=outputs) params = load_glm_params(doc) # instantiate GLM fmri_glm = FirstLevelGLM(noise_model=glm_model, mask=doc['mask']).fit( params['data'], params['design_matrices']) # save beta-maps to disk beta_map_dir = os.path.join(subject_output_dir, 'beta_maps') if not os.path.exists(beta_map_dir): os.makedirs(beta_map_dir) for j, glm in zip(range(len(fmri_glm.glms)), fmri_glm.glms): # XXX save array in some compressed format np.savetxt(os.path.join(beta_map_dir, "beta_map_%i.txt" % j), glm.get_beta(), # array has shape (n_conditions, n_voxels) ) # define contrasts if contrast_definitions is not None: params['contrasts'] = make_contrasts(params, contrast_definitions) contrasts = sorted(params['contrasts'][0].keys()) _contrasts = {} z_maps = {} # compute stats maps for index, contrast_id in enumerate(contrasts): print ' study[%s] subject[%s] contrast [%s]: %i/%i' % ( doc['study'], doc['subject'], contrast_id, index + 1, len(contrasts) ) contrast = [c[contrast_id] for c in params['contrasts']] contrast_name = contrast_id.replace(' ', '_') z_map, t_map, c_map, var_map = fmri_glm.transform( contrast, con_name=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True,) for dtype, out_map in zip(['z', 't', 'c', 'variance'], [z_map, t_map, c_map, var_map]): map_dir = os.path.join(subject_output_dir, '%s_maps' % dtype) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_name) nb.save(out_map, map_path) # collect z map if dtype == 'z': _contrasts[contrast_name] = contrast z_maps[contrast_name] = map_path # invoke a single API to handle plotting and html business for you subject_stats_report_filename = os.path.join( subject_output_dir, "report_stats.html") glm_reporter.generate_subject_stats_report( subject_stats_report_filename, _contrasts, z_maps, doc['mask'], design_matrices=list(params['design_matrices']), subject_id=doc['subject'], cluster_th=15, # 15 voxels start_time=stats_start_time, TR=doc['TR'], n_scans=doc['n_scans'], n_sessions=doc['n_sessions'], model=glm_model, ) print "Report for subject %s written to %s" % ( doc['subject'], subject_stats_report_filename)