예제 #1
0
파일: io.py 프로젝트: hanke/gumpdata
    def get_dti(self, subj, instance=1):
        """Returns DTI image and b-values plus b-vectors

        Parameters
        ----------
        subj : int or str
          Subject identifier (without 'sub' prefix).
        instance : int
          ID of the DTI dataset.

        Returns
        -------
        NiBabel Nifti1Image, array, array
          The first returned array the the vector of b-values for each volume
          in the image file. The second array are the 3D b-vectors for each
          volume in the image file.
        """
        import nibabel as nb

        path = _opj(self._basedir, _sub2id(subj), "dti")
        return (
            nb.load(_opj(path, "dti%.3i.nii.gz" % instance)),
            np.loadtxt(_opj(path, "dti%.3i.bvals" % instance)),
            np.loadtxt(_opj(path, "dti%.3i.bvecs" % instance)).T,
        )
def run_searchlight(op, subjectdir, conf, output_dir,TR=2):
	mask_name = conf.mask_name
	conditions = conf.conditions_to_compare
	flavor = conf.flavor
	study_path = op.study_dir()
	subcode = subjectdir.subcode()

	for condition in conditions:
		did_run = True
		output = _opj(output_dir, '*{}*'.format(conf.get_cond_prefix(condition)))
		if conf.num_of_permutations > 0:
			output = "{}_perm{}".format(output,conf.num_of_permutations)
		if len(glob(output)) == 0:
			did_run = False
	if did_run:
		print "already ran all sl for {}".format(output_dir)
		return

	fds = conf.get_ds(study_path, subcode, conf, mask_name, flavor, TR)
	print fds.summary()
	warp = glob(_opj(study_path,'sub{:0>3d}'.format(subcode), '**', conf.mvpa_tasks[0], 'reg', 'example_func2standard_warp.nii.gz'))[0]

	if not os.path.exists(output_dir):
		os.makedirs(output_dir)
	for pair in conditions:
		permute = AttributePermutator('condition', limit='chunks')
		print conf.num_of_permutations+1
		for j in xrange(conf.num_of_permutations+1):
			prefix = conf.get_cond_prefix(pair)
			cond_ds = fds[np.array([c in pair for c in fds.sa['condition']])]
			if j > 0:
				cond_ds = permute(cond_ds)
				prefix = "{}_perm{}".format(prefix,j)
			print prefix
			output_basename = os.path.join(output_dir, prefix)
			if(len(glob(output_basename+"*")) > 0):
				print "sl already ran {}".format(j)
				continue

			kwa = {'voxel_indices': conf.get_neighbourhood_strategy(cond_ds)}
			qe = IndexQueryEngine(**kwa)
			# init the searchlight with the queryengine
			sl = Searchlight(conf.get_sl_measure(), queryengine=qe, roi_ids=None,
	                       enable_ca=['roi_sizes', 'roi_feature_ids'])
			print "starting sl {}".format(datetime.now())
			sl_map = sl(cond_ds)
			print "finished sl {}".format(datetime.now())

			pickle.dump(sl_map, open("{}_sl_map.p".format(output_basename), "wb"))
			acc_results = map2nifti(sl_map,
	                           imghdr=fds.a.imghdr)
			acc_nii_filename = '{}-acc.nii.gz'.format(output_basename)
			acc_results.to_filename(acc_nii_filename)
			#do_searchlight(cond_ds,k,os.path.join(output_dir, prefix))

			out_filename = acc_nii_filename.replace('.nii.gz', '_mni.nii.gz')
			apply_warp(acc_nii_filename, warp, out_filename)
예제 #3
0
def perm_hist(subj):
	conf = AnalysisConfiguration()
	data_dir = os.environ.get('DATA_DIR') or '/home/user/data'
	sub_dir = _opj(data_dir,conf.study_name,'sub{:0>3d}'.format(subj))
	directory = _opj(data_dir,'LP/sub{:0>3d}/results/'.format(subj))
	print conf.dir_name()
	for pair in conf.conditions_to_compare:
			#print _opj(directory,conf.dir_name(),'{}*{}{}*.p'.format(conf.mask_name,pair[0],pair[1]))
			files = sorted(glob(_opj(directory,conf.dir_name(),'{}*{}{}*.p'.format(conf.mask_name,pair[0],pair[1]))))
			plt.figure()
			plt.subplot(211)
			plt.title('sub{:0>3d}-{}{}'.format(subj,pair[0],pair[1]))
			print pair, " ", len(files)
			all_maps = []
			for f in files[:-1]:
				f_h = file(f,'r')
				m = pickle.load(f_h)
				all_maps.append(m)
				if 'perm' in f:
					color = 'black'
					line_width = 1
				else:
					color = 'crimson'
					line_width = 2
				plt.hist(np.transpose(m),bins=20,histtype='step',color=[color], lw = line_width)
			perms = vstack(all_maps)
			real_f = files[-1]
			f_h = file(real_f,'r')
			real_map = pickle.load(f_h)
			color = 'crimson'
			line_width = 2
			plt.hist(np.transpose(real_map),bins=20,histtype='step',color=[color], lw = line_width)
			percentiles = np.zeros((1,len(real_map.samples[0])))
			for i,vox in enumerate(real_map.samples[0]):
			    percentiles[0,i]=percentileofscore(perms[:,i].samples.flat,vox)
			plt.subplot(212)
			print len(percentiles[0])
			plt.hist(percentiles[0],bins=20,histtype='step')
			real_map.samples=percentiles
			nii = real_f.replace("_sl_map.p", "-acc.nii.gz")
			nii_file = nib.load(nii)
			perc_results = map2nifti(real_map, imghdr=nii_file.header)
			perc_nii_filename =real_f.replace("_sl_map.p", "-percentiles_sub{:0>3d}.nii.gz".format(subj))
			perc_results.to_filename(perc_nii_filename)
			thr_prc_filename = perc_nii_filename.replace(".nii.gz","_p0.01.nii.gz")
			thr = fsl.maths.Threshold(in_file=perc_nii_filename, thresh=100,
						  out_file=thr_prc_filename)
			thr.run()
			mni_thr_filename = thr_prc_filename.replace(".nii.gz","_mni.nii.gz")
			apply_warp(sub_dir,thr_prc_filename, mni_thr_filename)

			
	plt.show()
	#plt.savefig('/tmp/sub{:0>3d}_{}{}'.format(subj,pair[0],pair[1]))
	raw_input()
예제 #4
0
def process_files(prefix, output_dir, thr,all_file, avg_file):
    from scipy import ndimage
    data = avg_file.get_data()
    cluster_map, n_clusters = ndimage.label(data > thr)
    output_file = _opj(output_dir, "{}_thr_{}.nii.gz".format(prefix, thr))
    nib.save(nib.Nifti1Image(cluster_map, None, avg_file.header), output_file)
    data = all_file.get_data()
    thr_data = data > thr
    res = np.sum(thr_data, 3)
    output_file = _opj(output_dir, "{}_sum_thr_{}.nii.gz".format(prefix, thr))
    nib.save(nib.Nifti1Image(res, None, avg_file.header), output_file)

    return output_file
예제 #5
0
def main():
	conf = AnalysisConfiguration()

	data_dir = os.environ.get('DATA_DIR') or '/home/user/data'

	op = OpenFMRIData(data_dir, conf.study_name)
	analyzer = OpenFMRIAnalyzer(op, conf)
	all_subject_dirs = op.all_subjects_dirs_with_raw()

	for subject in all_subject_dirs:
		analyzer.extract_brain(subject)

	for subject in all_subject_dirs:
		analyzer.anatomical_registration(subject)

	for subject in all_subject_dirs:
		#for task in conf.mvpa_tasks:
			#subject.remove_volumes_from_model(1, "", task, conf.num_of_volumes_to_delete)

		analyzer.motion_correction(subject)
		analyzer.functional_registration(subject)

		if conf.func_seg:
			analyzer.functional_segmentation(subject)
		else:
			analyzer.segmentation(subject)
			analyzer.generate_functional_gm_masks(subject)
		#analyzer.warp_standard_mask(subject)


	for subject in all_subject_dirs:
		# DO SL
		out_dir = _opj(subject.path(),'results',conf.dir_name())
		if not os.path.exists(out_dir):
			os.makedirs(out_dir)
		run_searchlight(op, subject, conf, out_dir)
#		run_searchlight(op.study_dir(), subject.subcode(), mask_name, k, [['G1','G4']], out_dir,flavor)


	#Group Level
	output_dir = _opj(op.study_dir(), 'results', "{}".format(conf.dir_name()))

	if not os.path.exists(output_dir):
		os.makedirs(output_dir)

	files = glob(_opj(op.study_dir(), "**", 'results', conf.dir_name(), '*acc_mni.nii.gz'))
	print files
	generate_group_level_map( files, output_dir)
예제 #6
0
def calc_summary_niis(in_files, output_dir, prefix):
    all_file = _opj(output_dir, '{}_all.nii.gz'.format(prefix))
    avg_file = _opj(output_dir, '{}_avg.nii.gz'.format(prefix))
    merge = fsl.Merge(in_files=in_files,
                      dimension='t',
                      merged_file=all_file)
    merge.run()
    mean = fsl.maths.MeanImage(in_file=all_file, dimension='T', out_file=avg_file)
    mean.run()
    all_nii = nib.load(all_file)
    avg_nii = nib.load(avg_file)
    all_data = all_nii.get_data()
    med_data = np.median(all_data,3)
    output_file = _opj(output_dir, "{}_median.nii.gz".format(prefix))
    nib.save(nib.Nifti1Image(med_data, None, avg_nii.header), output_file)
    return all_nii, avg_nii
예제 #7
0
파일: io.py 프로젝트: proska/gumpdata
    def get_run_fmri(self, subj, task, run, flavor='dico'):
        """Returns a NiBabel image instance for fMRI of a particular run

        Parameters
        ----------
        subj : int or str
          Subject identifier (without 'sub' prefix).
        task : int
          Task ID (see task_key.txt)
        run : int
          Run ID.
        flavor : ('', 'dico', 'dico7Tad2grpbold7Tad', 'dico7Tad2grpbold7Tad_nl')
          fMRI data flavor to access (see dataset description)

        Returns
        -------
        NiBabel Nifti1Image
        """
        import nibabel as nb

        if flavor == '':
            fname = 'bold.nii.gz'
        elif flavor == 'dico':
            fname = 'bold_dico.nii.gz'
        else:
            fname = 'bold_%s.nii.gz' % flavor
        fname = _opj(self._basedir, _sub2id(subj), 'BOLD', _taskrun(task, run),
                     fname)
        return nb.load(fname)
예제 #8
0
파일: io.py 프로젝트: proska/gumpdata
    def get_run_physio_data(self, subj, task, run, sensors=None):
        """Returns the physiological recording for a particular run

        Parameters
        ----------
        subj : int or str
          Subject identifier (without 'sub' prefix).
        task : int
          Task ID (see task_key.txt)
        run : int
          Run ID.
        sensors : None or tuple({'trigger', 'respiratory', 'cardiac', 'oxygen'})
          Selection and order of values to return.

        Returns
        -------
        array
          Array of floats -- one row per sample (100Hz), if ``sensors`` is None,
          4 columns are returned (trigger track, respiratory trace, cardiac trace,
          oxygen saturation). If ``sensors`` is specified the order of columns
          matches the order of the ``sensors`` sequence.
        """
        fname = _opj(self._basedir, _sub2id(subj), 'physio',
                     _taskrun(task, run), 'physio.txt.gz')
        sensor_map = {
            'trigger': 0,
            'respiratory': 1,
            'cardiac': 2,
            'oxygen': 3
        }
        if not sensors is None:
            sensors = [sensor_map[s] for s in sensors]
        data = np.loadtxt(fname, usecols=sensors)
        return data
예제 #9
0
파일: openfmri.py 프로젝트: Marphy/PyMVPA
    def get_model_conditions(self, model):
        """Returns a description of all conditions for a given model

        Parameters
        ----------
        model : int
          Model identifier.

        Returns
        -------
        list(dict)
          A list of a model conditions is returned, where each item is a
          dictionary with keys ``id`` (numerical condition ID), ``task``
          (numerical task ID for the task containing this condition), and
          ``name`` (the literal condition name). This information is
          returned in a list (instead of a dictionary), because the openfmri
          specification of model conditions contains no unique condition
          identifier. Conditions are only uniquely described by the combination
          of task and condition ID.
        """
        def_fname = _opj(self._basedir, 'models', _model2id(model),
                         'condition_key.txt')
        def_data = np.recfromtxt(def_fname)
        conds = []
        # load model meta data
        for dd in def_data:
            cond = {}
            cond['task'] = _id2int(dd[0])
            cond['id'] = _id2int(dd[1])
            cond['name'] = dd[2]
            conds.append(cond)
        return conds
예제 #10
0
파일: openfmri.py 프로젝트: Marphy/PyMVPA
    def get_bold_run_image(self, subj, task, run, flavor=None):
        """Returns a NiBabel image instance for the BOLD data of a 
        particular subject/task/run combination.

        Parameters
        ----------
        subj : int
          Subject identifier.
        task : int
          Task ID (see task_key.txt)
        run : int
          Run ID.
        flavor : None or str
          BOLD data flavor to access (see dataset description)

        Returns
        -------
        NiBabel Nifti1Image
        """
        import nibabel as nb

        if flavor is None:
            flavor = ''
        else:
            flavor = '_' + flavor
        fname = 'bold%s.nii.gz' % flavor
        fname = _opj(self._basedir, _sub2id(subj),
                     'BOLD', _taskrun(task, run),
                     fname)
        return nb.load(fname)
예제 #11
0
파일: io.py 프로젝트: hanke/gumpdata
    def get_run_fmri(self, subj, task, run, flavor="dico"):
        """Returns a NiBabel image instance for fMRI of a particular run

        Parameters
        ----------
        subj : int or str
          Subject identifier (without 'sub' prefix).
        task : int
          Task ID (see task_key.txt)
        run : int
          Run ID.
        flavor : ('', 'dico', 'dico7Tad2grpbold7Tad', 'dico7Tad2grpbold7Tad_nl')
          fMRI data flavor to access (see dataset description)

        Returns
        -------
        NiBabel Nifti1Image
        """
        import nibabel as nb

        if flavor == "":
            fname = "bold.nii.gz"
        elif flavor == "dico":
            fname = "bold_dico.nii.gz"
        else:
            fname = "bold_%s.nii.gz" % flavor
        fname = _opj(self._basedir, _sub2id(subj), "BOLD", _taskrun(task, run), fname)
        return nb.load(fname)
예제 #12
0
파일: io.py 프로젝트: hanke/gumpdata
    def get_run_physio_data(self, subj, task, run, sensors=None):
        """Returns the physiological recording for a particular run

        Parameters
        ----------
        subj : int or str
          Subject identifier (without 'sub' prefix).
        task : int
          Task ID (see task_key.txt)
        run : int
          Run ID.
        sensors : None or tuple({'trigger', 'respiratory', 'cardiac', 'oxygen'})
          Selection and order of values to return.

        Returns
        -------
        array
          Array of floats -- one row per sample (100Hz), if ``sensors`` is None,
          4 columns are returned (trigger track, respiratory trace, cardiac trace,
          oxygen saturation). If ``sensors`` is specified the order of columns
          matches the order of the ``sensors`` sequence.
        """
        fname = _opj(self._basedir, _sub2id(subj), "physio", _taskrun(task, run), "physio.txt.gz")
        sensor_map = {"trigger": 0, "respiratory": 1, "cardiac": 2, "oxygen": 3}
        if not sensors is None:
            sensors = [sensor_map[s] for s in sensors]
        data = np.loadtxt(fname, usecols=sensors)
        return data
예제 #13
0
파일: io.py 프로젝트: proska/gumpdata
    def get_t2(self, subj, instance=1):
        """Returns T2-weighted scan.

        Parameters
        ----------
        subj : int or str
          Subject identifier (without 'sub' prefix).
        instance : int
          ID of the T2 dataset.

        Returns
        -------
        NiBabel Nifti1Image
        """
        import nibabel as nb
        path = _opj(self._basedir, _sub2id(subj), 'anatomy', 'other')
        return nb.load(_opj(path, 't2w%.3i.nii.gz' % instance))
예제 #14
0
파일: io.py 프로젝트: proska/gumpdata
    def get_angio(self, subj, instance=1):
        """Returns angiography scan.

        Parameters
        ----------
        subj : int or str
          Subject identifier (without 'sub' prefix).
        instance : int
          ID of the angio dataset.

        Returns
        -------
        NiBabel Nifti1Image
        """
        import nibabel as nb
        path = _opj(self._basedir, _sub2id(subj), 'angio')
        return nb.load(_opj(path, 'angio%.3i.nii.gz' % instance))
예제 #15
0
def _subdirs2ids(path, prefix, **kwargs):
    ids = []
    if not os.path.exists(path):
        return ids
    for item in os.listdir(path):
        if item.startswith(prefix) and os.path.isdir(_opj(path, item)):
            ids.append(_id2int(item, **kwargs))
    return sorted(ids)
예제 #16
0
파일: openfmri.py 프로젝트: Guenx/PyMVPA
    def get_task_descriptions(self):
        """Returns a dictionary with the tasks defined in the dataset

        Dictionary keys are integer task IDs, values are task description
        strings.
        """
        fname = _opj(self._basedir, 'task_key.txt')
        return _get_description_dict(fname, xfm_key=_id2int)
예제 #17
0
파일: openfmri.py 프로젝트: dinga92/PyMVPA
def _subdirs2ids(path, prefix, **kwargs):
    ids = []
    if not os.path.exists(path):
        return ids
    for item in os.listdir(path):
        if item.startswith(prefix) and os.path.isdir(_opj(path, item)):
                ids.append(_id2int(item, **kwargs))
    return sorted(ids)
예제 #18
0
    def get_task_descriptions(self):
        """Return a dictionary with the tasks defined in the dataset

        Dictionary keys are integer task IDs, values are task description
        strings.
        """
        fname = _opj(self.basedir, 'task_key.txt')
        return _get_description_dict(fname, xfm_key=_id2int)
예제 #19
0
파일: io.py 프로젝트: hanke/gumpdata
    def get_angio(self, subj, instance=1):
        """Returns angiography scan.

        Parameters
        ----------
        subj : int or str
          Subject identifier (without 'sub' prefix).
        instance : int
          ID of the angio dataset.

        Returns
        -------
        NiBabel Nifti1Image
        """
        import nibabel as nb

        path = _opj(self._basedir, _sub2id(subj), "angio")
        return nb.load(_opj(path, "angio%.3i.nii.gz" % instance))
예제 #20
0
파일: io.py 프로젝트: hanke/gumpdata
    def get_t2(self, subj, instance=1):
        """Returns T2-weighted scan.

        Parameters
        ----------
        subj : int or str
          Subject identifier (without 'sub' prefix).
        instance : int
          ID of the T2 dataset.

        Returns
        -------
        NiBabel Nifti1Image
        """
        import nibabel as nb

        path = _opj(self._basedir, _sub2id(subj), "anatomy", "other")
        return nb.load(_opj(path, "t2w%.3i.nii.gz" % instance))
예제 #21
0
파일: io.py 프로젝트: proska/gumpdata
    def get_fieldmap(self, subj, instance=1):
        """Returns fieldmap scan.

        Parameters
        ----------
        subj : int or str
          Subject identifier (without 'sub' prefix).
        instance : int
          ID of the fieldmap dataset.

        Returns
        -------
        NiBabel Nifti1Image, Nifti1Image
          Magnitude and phase image.
        """
        import nibabel as nb
        path = _opj(self._basedir, _sub2id(subj), 'fieldmap')
        return nb.load(_opj(path, 'fieldmap%.3i_mag.nii.gz' % instance)), \
               nb.load(_opj(path, 'fieldmap%.3i_pha.nii.gz' % instance))
예제 #22
0
파일: io.py 프로젝트: proska/gumpdata
    def get_swi(self, subj, instance=1):
        """Returns susceptibility-weighted scan.

        Parameters
        ----------
        subj : int or str
          Subject identifier (without 'sub' prefix).
        instance : int
          ID of the SWI dataset.

        Returns
        -------
        NiBabel Nifti1Image, Nifti1Image
          Magnitude and phase image.
        """
        import nibabel as nb
        path = _opj(self._basedir, _sub2id(subj), 'anatomy', 'other')
        return nb.load(_opj(path, 'swi%.3i_mag.nii.gz' % instance)), \
               nb.load(_opj(path, 'swi%.3i_pha.nii.gz' % instance))
예제 #23
0
def _subdirs2ids(path, prefix, **kwargs):
    # num_ids to separate sorting of numeric and literal ids
    ids, num_ids = [], []
    if not os.path.exists(path):
        return ids
    for item in os.listdir(path):
        if item.startswith(prefix) and os.path.isdir(_opj(path, item)):
            id_ = _id2int(item, **kwargs)
            (num_ids if isinstance(id_, (np.integer, int)) else ids).append(id_)
    return sorted(num_ids) + sorted(ids)
예제 #24
0
파일: openfmri.py 프로젝트: Guenx/PyMVPA
    def get_model_descriptions(self):
        """Returns a dictionary with the models described in the dataset

        Dictionary keys are integer model IDs, values are description strings.

        Note that the return dictionary is not necessarily comprehensive. It
        only reflects the models described in ``model_key.txt``. If a dataset
        is inconsistently described, ``get_model_ids()`` actually may discover
        more or less models in comparison to the avauilable model descriptions.
        """
        fname = _opj(self._basedir, 'model_key.txt')
        return _get_description_dict(fname, xfm_key=_id2int)
예제 #25
0
def apply_warp(sub_dir, in_file, out_file):
#    warp_file = _opj(sub_dir,'anatomy','reg','highres2standard_warp.nii.gz')
#    pre_mat_file = _opj(sub_dir,'BOLD','task001','reg','example_func2highres.mat')
    warp_file = _opj(sub_dir,'BOLD','task001','reg','example_func2standard_warp.nii.gz')
    standard_image = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz')
    apply_warp = fsl.preprocess.ApplyWarp(ref_file=standard_image,
                                          in_file=in_file,
                                          field_file=warp_file,
                                          #premat=pre_mat_file,
                                          interp='trilinear',
                                          out_file=out_file)
    apply_warp.run()
예제 #26
0
파일: io.py 프로젝트: hanke/gumpdata
    def get_scene_boundaries(self):
        """Returns the boundaries between scenes in movie time

        Returns
        -------
        list(float)
          Timestamps are given in seconds.
        """
        fname = _opj(self._basedir, "stimulus", "task001", "annotations", "scenes.csv")
        cr = csv.reader(open(fname))
        ts = [float(line[0]) for line in cr]
        return ts
예제 #27
0
    def get_model_descriptions(self):
        """Return a dictionary with the models described in the dataset

        Dictionary keys are integer model IDs, values are description strings.

        Note that the return dictionary is not necessarily comprehensive. It
        only reflects the models described in ``model_key.txt``. If a dataset
        is inconsistently described, ``get_model_ids()`` actually may discover
        more or less models in comparison to the avauilable model descriptions.
        """
        fname = _opj(self.basedir, 'model_key.txt')
        return _get_description_dict(fname, xfm_key=_id2int)
예제 #28
0
파일: io.py 프로젝트: proska/gumpdata
    def get_dti(self, subj, instance=1):
        """Returns DTI image and b-values plus b-vectors

        Parameters
        ----------
        subj : int or str
          Subject identifier (without 'sub' prefix).
        instance : int
          ID of the DTI dataset.

        Returns
        -------
        NiBabel Nifti1Image, array, array
          The first returned array the the vector of b-values for each volume
          in the image file. The second array are the 3D b-vectors for each
          volume in the image file.
        """
        import nibabel as nb
        path = _opj(self._basedir, _sub2id(subj), 'dti')
        return nb.load(_opj(path, 'dti%.3i.nii.gz' % instance)), \
               np.loadtxt(_opj(path, 'dti%.3i.bvals' % instance)), \
               np.loadtxt(_opj(path, 'dti%.3i.bvecs' % instance)).T
예제 #29
0
파일: io.py 프로젝트: hanke/gumpdata
    def get_swi(self, subj, instance=1):
        """Returns susceptibility-weighted scan.

        Parameters
        ----------
        subj : int or str
          Subject identifier (without 'sub' prefix).
        instance : int
          ID of the SWI dataset.

        Returns
        -------
        NiBabel Nifti1Image, Nifti1Image
          Magnitude and phase image.
        """
        import nibabel as nb

        path = _opj(self._basedir, _sub2id(subj), "anatomy", "other")
        return (
            nb.load(_opj(path, "swi%.3i_mag.nii.gz" % instance)),
            nb.load(_opj(path, "swi%.3i_pha.nii.gz" % instance)),
        )
예제 #30
0
파일: io.py 프로젝트: proska/gumpdata
    def get_scene_boundaries(self):
        """Returns the boundaries between scenes in movie time

        Returns
        -------
        list(float)
          Timestamps are given in seconds.
        """
        fname = _opj(self._basedir, 'stimulus', 'task001', 'annotations',
                     'scenes.csv')
        cr = csv.reader(open(fname))
        ts = [float(line[0]) for line in cr]
        return ts
예제 #31
0
파일: io.py 프로젝트: hanke/gumpdata
    def get_fieldmap(self, subj, instance=1):
        """Returns fieldmap scan.

        Parameters
        ----------
        subj : int or str
          Subject identifier (without 'sub' prefix).
        instance : int
          ID of the fieldmap dataset.

        Returns
        -------
        NiBabel Nifti1Image, Nifti1Image
          Magnitude and phase image.
        """
        import nibabel as nb

        path = _opj(self._basedir, _sub2id(subj), "fieldmap")
        return (
            nb.load(_opj(path, "fieldmap%.3i_mag.nii.gz" % instance)),
            nb.load(_opj(path, "fieldmap%.3i_pha.nii.gz" % instance)),
        )
예제 #32
0
def make_ds(sub, datapath, flavor):
    of = OpenFMRIDataset(datapath)
    ds = of.get_model_bold_dataset(
        model_id=1,
        subj_id=sub,
        flavor=flavor,
        mask=_opj(datapath, 'sub%.3i' % sub, 'masks', 'task001_run001',
                  'grey.nii.gz'),
        #preproc_img=smooth,
        preproc_ds=detrend,
        modelfx=fit_event_hrf_model,
        time_attr='time_coords',
        condition_attr='condition')
    ds14 = ds[np.array([c in ['G1', 'G4'] for c in ds.sa['condition']])]
    ds23 = ds[np.array([c in ['G2', 'G3'] for c in ds.sa['condition']])]

    result_dir = _opj(datapath, 'mvpa', 'ds', flavor)
    if not os.path.isdir(result_dir):
        os.makedirs(result_dir)
    print "{:0>3d}-ds14 {},{}".format(sub, ds14.shape, ds14.sa.condition)
    print "{:0>3d}-ds23 {},{}".format(sub, ds23.shape, ds23.sa.condition)
    h5save(_opj(result_dir, 'sub%.3i_14_hrf.hdf5' % sub), ds14)
    h5save(_opj(result_dir, 'sub%.3i_23_hrf.hdf5' % sub), ds23)
예제 #33
0
def create_betas_per_trial_with_pymvpa_roni(study_path, subj, conf, mask_name, flavor, TR):
    dhandle = OpenFMRIDataset(study_path)
    model = 1
    task = 1
    # Do this for other tasks as well. not only the first
    mask_fname = _opj(study_path, "sub{:0>3d}".format(subj), "masks", conf.mvpa_tasks[0], "{}.nii.gz".format(mask_name))
    print mask_fname
    run_datasets = []
    for run_id in dhandle.get_task_bold_run_ids(task)[subj]:
        if type(run_id) == str:
            continue

            # all_events = dhandle.get_bold_run_model(model, subj, run_id)
        all_events = get_bold_run_model(dhandle, 2, subj, run_id)
        run_events = []
        i = 0
        for event in all_events:
            if event["task"] == task:
                event["condition"] = "{}-{}".format(event["condition"], event["id"])
                run_events.append(event)
                i += 1

                # load BOLD data for this run (with masking); add 0-based chunk ID
        run_ds = dhandle.get_bold_run_dataset(subj, task, run_id, flavor=flavor, chunks=run_id - 1, mask=mask_fname)
        # convert event info into a sample attribute and assign as 'targets'
        run_ds.sa.time_coords = run_ds.sa.time_indices * TR
        run_ds.sa["targets"] = events2sample_attr(run_events, run_ds.sa.time_coords, noinfolabel="rest")
        # additional time series preprocessing can go here
        poly_detrend(run_ds, polyord=1, chunks_attr="chunks")
        zscore(run_ds, chunks_attr="chunks", param_est=("targets", ["rest"]), dtype="float32")
        glm_dataset = fit_event_hrf_model(run_ds, run_events, time_attr="time_coords", condition_attr="condition")
        glm_dataset.sa["targets"] = [x[: x.find("-")] for x in glm_dataset.sa.condition]
        glm_dataset.sa["id"] = [x[x.find("-") + 1 :] for x in glm_dataset.sa.condition]
        glm_dataset.sa.condition = glm_dataset.sa["targets"]
        glm_dataset.sa["chunks"] = [run_id - 1] * len(glm_dataset.samples)

        # If a trial was dropped (the subject pressed on a button) than the counter trial from the
        # other condition should also be dropped
        for pair in conf.conditions_to_compare:
            cond_bool = np.array([c in pair for c in glm_dataset.sa["condition"]])
            sub_dataset = glm_dataset[cond_bool]
            c = Counter(sub_dataset.sa.id)
            for value in c:
                if c[value] < 2:
                    id_bool = np.array([value in cond_id for cond_id in glm_dataset.sa["id"]])
                    glm_dataset = glm_dataset[np.bitwise_not(np.logical_and(id_bool, cond_bool))]

        run_datasets.append(glm_dataset)

    return vstack(run_datasets, 0)
예제 #34
0
파일: make_ds.py 프로젝트: danielrez/FSL
def make_ds(sub, datapath, flavor):
    of = OpenFMRIDataset(datapath)
    ds = of.get_model_bold_dataset(
        model_id=1,
        subj_id=sub,
        flavor=flavor,
        mask=_opj(datapath, "sub%.3i" % sub, "masks", "task001_run001", "grey.nii.gz"),
        # preproc_img=smooth,
        preproc_ds=detrend,
        modelfx=fit_event_hrf_model,
        time_attr="time_coords",
        condition_attr="condition",
    )
    ds14 = ds[np.array([c in ["G1", "G4"] for c in ds.sa["condition"]])]
    ds23 = ds[np.array([c in ["G2", "G3"] for c in ds.sa["condition"]])]

    result_dir = _opj(datapath, "mvpa", "ds", flavor)
    if not os.path.isdir(result_dir):
        os.makedirs(result_dir)
    print "{:0>3d}-ds14 {},{}".format(sub, ds14.shape, ds14.sa.condition)
    print "{:0>3d}-ds23 {},{}".format(sub, ds23.shape, ds23.sa.condition)
    h5save(_opj(result_dir, "sub%.3i_14_hrf.hdf5" % sub), ds14)
    h5save(_opj(result_dir, "sub%.3i_23_hrf.hdf5" % sub), ds23)
예제 #35
0
def create_betas_per_run_with_pymvpa(study_path, subj, conf, mask_name, flavor):
    of = OpenFMRIDataset(study_path)
    mask_fname = _opj(study_path, "sub{:0>3d}".format(subj), "masks", conf.mvpa_tasks[0], "{}.nii.gz".format(mask_name))
    ds = of.get_model_bold_dataset(
        model_id=1,
        subj_id=subj,
        flavor=flavor,
        mask=mask_fname,
        # preproc_img=smooth,
        preproc_ds=detrend,
        modelfx=fit_event_hrf_model,
        time_attr="time_coords",
        condition_attr="condition",
    )
    return ds
예제 #36
0
파일: openfmri.py 프로젝트: Marphy/PyMVPA
    def get_bold_run_model(self, model, subj, run):
        """Returns the stimulation design for a particular subject/task/run.

        Parameters
        ----------
        model : int
          Model identifier.
        subj : int
          Subject identifier.
        run : int
          Run ID.

        Returns
        -------
        dict
          Nested dictionary for all tasks and conditions contained in a
          particular model. First-level keys are task IDs. Second-level keys
          are condition IDs. Second-level values are rec-arrays with fields
          'onset', 'duration', 'intensity'.
        """

        conditions = self.get_model_conditions(model)
        events = []
        ev_fields = ('onset', 'duration', 'intensity')
        # get onset info for specific subject/task/run combo
        for cond in conditions:
            task_id = cond['task']
            task_descr = self.get_task_descriptions()[task_id]
            stim_fname = _opj(self._basedir, _sub2id(subj), 'model',
                              _model2id(model), 'onsets',
                              _taskrun(task_id, run),
                              '%s.txt' % _cond2id(cond['id']))
            try:
                evdata = np.atleast_1d(
                       np.recfromtxt(stim_fname, names=ev_fields))
            except IOError:
                warning("onset definition file '%s' not found; no information "
                        "about condition '%s' for run %i"
                        % (stim_fname, cond['name'], run))
                continue
            for ev in evdata:
                evdict = dict(zip(ev_fields,
                                  [ev[field] for field in ev_fields]))
                evdict['task'] = task_descr
                evdict['condition'] = cond['name']
                evdict['run'] = run
                events.append(evdict)
        return events
예제 #37
0
파일: openfmri.py 프로젝트: Guenx/PyMVPA
    def get_bold_run_ids(self, subj, task):
        """Returns (sorted) list of run IDs for a given subject and task

        Typically, run IDs are integer values, but string IDs are supported
        as well.

        Parameters
        ----------
        subj : int or str
          Subject ID
        task : int or str
          Run ID
        """
        task_prefix = _prefix('task', task)
        return _subdirs2ids(_opj(self._basedir, _sub2id(subj), 'BOLD'),
                            '%s_' % (task_prefix,),
                            strip=len(task_prefix) + 4)
예제 #38
0
    def get_bold_run_ids(self, subj, task):
        """Return (sorted) list of run IDs for a given subject and task

        Typically, run IDs are integer values, but string IDs are supported
        as well.

        Parameters
        ----------
        subj : int or str
          Subject ID
        task : int or str
          Run ID
        """
        task_prefix = _prefix('task', task)
        return _subdirs2ids(_opj(self.basedir, _sub2id(subj), 'BOLD'),
                            '%s_' % (task_prefix, ),
                            strip=len(task_prefix) + 4)
예제 #39
0
def make_ds(sub, datapath, flavor):
	of = OpenFMRIDataset(datapath)
	ds = of.get_model_bold_dataset(
	    model_id=1, subj_id=sub,
	#ds = of.get_bold_run_dataset(1,1,1,
	    
	    flavor=flavor,
	    mask=_opj(
		datapath, 'sub%.3i' % sub, 'masks', 'task001_run001',
		'grey.nii.gz'),
	    #preproc_img=smooth,
	    #preproc_ds = detrend, 
	    #modelfx=fit_event_hrf_model,
	    time_attr='time_coords',
	    condition_attr='condition')
	for i in np.unique(ds.chunks)[5:]:
		detrend(ds[ds.chunks == i])
예제 #40
0
파일: io.py 프로젝트: hanke/gumpdata
    def get_german_audiodescription_transcript(self):
        """Returns the transcript with star and end timestamps

        Returns
        -------
        array(float, float), list(str)
          The first return value is a 2-column array with start and end timestamp
          of each narration sequence. The second return value is a list with the
          corresponding transcripts in UTF8 encoding.

        """
        fname = _opj(self._basedir, "stimulus", "task001", "annotations", "german_audio_description.csv")
        cr = csv.reader(open(fname))
        transcripts = []
        ts = []
        for line in cr:
            ts.append([float(i) for i in line[:2]])
            transcripts.append(line[2])
        return np.array(ts), transcripts
예제 #41
0
파일: io.py 프로젝트: proska/gumpdata
    def get_german_audiodescription_transcript(self):
        """Returns the transcript with star and end timestamps

        Returns
        -------
        array(float, float), list(str)
          The first return value is a 2-column array with start and end timestamp
          of each narration sequence. The second return value is a list with the
          corresponding transcripts in UTF8 encoding.

        """
        fname = _opj(self._basedir, 'stimulus', 'task001', 'annotations',
                     'german_audio_description.csv')
        cr = csv.reader(open(fname))
        transcripts = []
        ts = []
        for line in cr:
            ts.append([float(i) for i in line[:2]])
            transcripts.append(line[2])
        return np.array(ts), transcripts
예제 #42
0
파일: io.py 프로젝트: hanke/gumpdata
    def get_run_motion_estimates(self, subj, task, run):
        """Returns the motion correction estimates for a particular run

        Parameters
        ----------
        subj : int or str
          Subject identifier (without 'sub' prefix).
        task : int
          Task ID (see task_key.txt)
        run : int
          Run ID.

        Returns
        -------
        array
          Array of floats -- one row per fMRI volume, 6 columns (first three:
          translation X, Y, Z in mm, last three: rotation in deg)
        """
        fname = _opj(self._basedir, _sub2id(subj), "BOLD", _taskrun(task, run), "bold_dico_moco.txt")
        data = np.loadtxt(fname)
        return data
예제 #43
0
파일: io.py 프로젝트: proska/gumpdata
    def get_run_motion_estimates(self, subj, task, run):
        """Returns the motion correction estimates for a particular run

        Parameters
        ----------
        subj : int or str
          Subject identifier (without 'sub' prefix).
        task : int
          Task ID (see task_key.txt)
        run : int
          Run ID.

        Returns
        -------
        array
          Array of floats -- one row per fMRI volume, 6 columns (first three:
          translation X, Y, Z in mm, last three: rotation in deg)
        """
        fname = _opj(self._basedir, _sub2id(subj), 'BOLD', _taskrun(task, run),
                     'bold_dico_moco.txt')
        data = np.loadtxt(fname)
        return data
예제 #44
0
def create_betas_per_trial_with_pymvpa(study_path, subj, conf, mask_name, flavor, TR):
    dhandle = OpenFMRIDataset(study_path)
    model = 1
    task = 1
    # Do this for other tasks as well. not only the first
    mask_fname = _opj(study_path, "sub{:0>3d}".format(subj), "masks", conf.mvpa_tasks[0], "{}.nii.gz".format(mask_name))
    print mask_fname
    run_datasets = []
    for run_id in dhandle.get_task_bold_run_ids(task)[subj]:
        if type(run_id) == str:
            continue
        all_events = dhandle.get_bold_run_model(model, subj, run_id)
        run_events = []
        i = 0
        for event in all_events:
            if event["task"] == task:
                event["condition"] = "{}-{}".format(event["condition"], i)
                run_events.append(event)
                i += 1

                # load BOLD data for this run (with masking); add 0-based chunk ID
        run_ds = dhandle.get_bold_run_dataset(subj, task, run_id, flavor=flavor, chunks=run_id - 1, mask=mask_fname)
        # convert event info into a sample attribute and assign as 'targets'
        run_ds.sa.time_coords = run_ds.sa.time_indices * TR
        print run_id

        run_ds.sa["targets"] = events2sample_attr(run_events, run_ds.sa.time_coords, noinfolabel="rest")
        # additional time series preprocessing can go here
        poly_detrend(run_ds, polyord=1, chunks_attr="chunks")
        zscore(run_ds, chunks_attr="chunks", param_est=("targets", ["rest"]), dtype="float32")
        glm_dataset = fit_event_hrf_model(run_ds, run_events, time_attr="time_coords", condition_attr="condition")
        glm_dataset.sa["targets"] = [x[: x.find("-")] for x in glm_dataset.sa.condition]
        glm_dataset.sa.condition = glm_dataset.sa["targets"]
        glm_dataset.sa["chunks"] = [run_id - 1] * len(glm_dataset.samples)
        run_datasets.append(glm_dataset)
    return vstack(run_datasets, 0)
예제 #45
0
def main():
	conf = AnalysisConfiguration()

	data_dir = os.environ.get('DATA_DIR') or '/home/daniel/fsl-analysis/data'

	op = OpenFMRIData(data_dir, conf.study_name)
	analyzer = OpenFMRIAnalyzer(op, conf)
	all_subject_dirs = op.all_subjects_dirs_with_raw()

	for subject in all_subject_dirs:
		analyzer.extract_brain(subject)

	for subject in all_subject_dirs:
		analyzer.anatomical_registration(subject)

	for subject in all_subject_dirs:
		#for task in conf.mvpa_tasks:
			#subject.remove_volumes_from_model(1, "", task, conf.num_of_volumes_to_delete)

		analyzer.motion_correction(subject)
		analyzer.functional_registration(subject)

		if conf.func_seg:
			analyzer.functional_segmentation(subject)
		else:
			analyzer.segmentation(subject)
			analyzer.generate_functional_gm_masks(subject)
		#analyzer.warp_standard_mask(subject)


	for subject in all_subject_dirs:
		# DO SL
		out_dir = _opj(subject.path(),'results',conf.dir_name())
		if not os.path.exists(out_dir):
			os.makedirs(out_dir)
		run_searchlight(op, subject, conf, out_dir)
예제 #46
0
from mvpa2.base.hdf5 import h5load, h5save
from mvpa2.datasets import Dataset
from mvpa2.datasets.mri import map2nifti
from mvpa2.algorithms.group_clusterthr import GroupClusterThreshold

orig = []
perms = []
perm_count = []

subj_ids = range(1, 21)

# load all subjs
print 'Loading subjs', time.asctime()
for subj in subj_ids:
    print 'Loading subj %i' % subj
    sds = h5load(_opj('grp_results', 'grpspace_sub%.3i.hdf5' % subj))
    orig.append(sds.samples[0])
    perms.append(sds.samples[1:])
    perm_count.append(len(sds) - 1)

print 'Merge data', time.asctime()
orig_ds = Dataset(orig, sa=dict(subj=subj_ids), fa=sds.fa, a=sds.a)
perm_ds = Dataset(np.vstack(perms),
                  sa=dict(subj=np.repeat(subj_ids, perm_count)),
                  fa=sds.fa,
                  a=sds.a)
# some magic to drop the memory demand
del orig
del perms

print 'Train thresholder', time.asctime()
예제 #47
0
from os.path import join as _opj
from mvpa2.datasets.sources import OpenFMRIDataset

datapath = "/home/data/psyinf/forrest_gump/anondata"
logpath = _opj(os.path.dirname(datapath), 'condor_logs')

# dataset handler
of = OpenFMRIDataset(datapath)

# preamble for a condor submit file
condor_submit_preamble = """
universe = vanilla
output = %(logpath)s/$(CLUSTER).$(PROCESS).out
error = %(logpath)s/$(CLUSTER).$(PROCESS).err
log = %(logpath)s/$(CLUSTER).$(PROCESS).log
initialdir = %(datapath)s
getenv = True
should_transfer_files = NO
transfer_executable = False
""" % dict(datapath=datapath, logpath=logpath)
예제 #48
0
def main():
    sub = int(sys.argv[1])
    data_dir = os.environ.get('DATA_DIR') or '/home/user/data'
    study_name = os.environ.get('STUDY_NAME') or 'LP'
    flavor = 'mcf'
    make_ds(sub, _opj(data_dir, study_name), flavor)
datapath = 'BASEDIR'
of = OpenFMRIDataset(datapath)

sub = int(sys.argv[1]) + 1


def smooth(img):
    # we need to preserve the original header because the smoothing function
    # f***s the TR up
    nimg = smooth_img(img, fwhm=2.0)
    return nb.Nifti1Image(nimg.get_data(),
                          img.get_affine(),
                          header=img.get_header())


ds = of.get_model_bold_dataset(
    model_id=1,
    subj_id=sub,
    flavor='dico_bold7Tp1_to_subjbold7Tp1',
    # full brain
    mask=_opj(datapath, 'sub%.3i' % sub, 'templates', 'bold7Tp1', 'qa',
              'jointfgbrainmask_bold7Tp1_to_subjbold7Tp1.nii.gz'),
    preproc_img=smooth,
    # HP filtering is done by NiPy's GLM
    modelfx=fit_event_hrf_model,
    time_attr='time_coords',
    condition_attr='condition')

h5save(_opj('data', 'sub%.3i_2.0mm_hrf.hdf5' % sub), ds)
예제 #50
0
 def get_model_ids(self):
     """Return a sorted list of integer IDs for all available models"""
     return _subdirs2ids(_opj(self.basedir, 'models'), 'model')
예제 #51
0
    def get_bold_run_dataset(self,
                             subj,
                             task,
                             run,
                             flavor=None,
                             preproc_img=None,
                             add_sa=None,
                             **kwargs):
        """Return a dataset instance for the BOLD data of a particular
        subject/task/run combination.

        This method support the same functionality as fmri_dataset(), while
        wrapping get_bold_run_image() to access the input fMRI data. Additional
        attributes, such as subject ID, task ID, and run ID are automatically
        stored as dataset sample attributes.

        Parameters
        ----------
        subj : int
          Subject identifier.
        task : int
          Task ID (see task_key.txt)
        run : int
          Run ID.
        flavor : None or str
          BOLD data flavor to access (see dataset description). If ``flavor``
          corresponds to an existing file in the respective task/run directory,
          it is assumed to be a stored dataset in HDF5 format and loaded via
          ``h5load()`` -- otherwise datasets are constructed from NIfTI images.
        preproc_img : callable or None
          If not None, this callable will be called with the loaded source BOLD
          image instance as an argument before fmri_dataset() is executed.
          The callable must return an image instance.
        add_sa: str or tuple(str)
          Single or sequence of names of files in the respective BOLD
          directory containing additional samples attributes. At this time
          all formats supported by NumPy's loadtxt() are supported.
          The number of lines in such a file needs to match the number of
          BOLD volumes. Each column is converted into a separate dataset
          sample attribute. The file name with a column index suffix is used
          to determine the attribute name.
        **kwargs:
          All additional arguments are passed on to fmri_dataset()

        Returns
        -------
        Dataset
        """
        from mvpa2.datasets.mri import fmri_dataset

        # check whether flavor corresponds to a particular file
        if flavor is not None:
            path = _opj(self.basedir, _sub2id(subj), 'BOLD',
                        _taskrun(task, run), flavor)
        if flavor is not None and os.path.exists(path):
            from mvpa2.base.hdf5 import h5load
            ds = h5load(path)
        else:
            bold_img = self.get_bold_run_image(subj, task, run, flavor=flavor)
            if preproc_img is not None:
                bold_img = preproc_img(bold_img)
            # load (and mask) data
            ds = fmri_dataset(bold_img, **kwargs)

        # inject sample attributes
        for name, var in (('subj', subj), ('task', task), ('run', run)):
            ds.sa[name] = np.repeat(var, len(ds))

        if add_sa is None:
            return ds

        if isinstance(add_sa, str):
            add_sa = (add_sa, )
        for sa in add_sa:
            # TODO: come up with a fancy way of detecting what kind of thing
            # we are accessing -- in any case: first axis needs to match
            # nsamples
            attrs = self._load_bold_task_run_data(subj, task, run, [sa],
                                                  np.loadtxt)
            if len(attrs.shape) == 1:
                ds.sa[sa] = attrs
            else:
                for col in range(attrs.shape[1]):
                    ds.sa['%s_%i' % (sa, col)] = attrs[:, col]
        return ds
예제 #52
0
 def _load_data(self, path, loadfx):
     # little helper to access stuff in datasets
     path = _opj(self.basedir, *path)
     return loadfx(path)
예제 #53
0
 def get_scan_properties(self):
     """Return a dictionary with the scan properties listed in scan_key.txt
     """
     fname = _opj(self.basedir, 'scan_key.txt')
     return _get_description_dict(fname)
예제 #54
0
파일: openfmri.py 프로젝트: Guenx/PyMVPA
 def get_model_ids(self):
     """Returns a sorted list of integer IDs for all available models"""
     return _subdirs2ids(_opj(self._basedir, 'models'), 'model')
import nibabel as nb
from mvpa2.base.hdf5 import h5load, h5save
from mvpa2.datasets.mri import map2nifti, fmri_dataset

# +1 to be compatible with condor submission
subj = int(sys.argv[1]) + 1

res_dir = 'grp_results'
if not os.path.exists(res_dir):
    os.makedirs(res_dir)

# because the stored SL results have no proper imghdr
subjtmpl = nb.load('BASEDIR/sub%.3i/templates/bold7Tp1/head.nii.gz' % subj)

# orig results
ds = h5load(_opj('results', 'sub%.3i_2.0mm_hrf_sl_orig.hdf5' % subj))
# load permutations and merge with orig results
data = np.vstack([ds.samples[0]] + [
    np.load(fname) for fname in sorted(
        glob(_opj('results', 'sub%.3i_2.0mm_hrf_sl_perm*.npy' % subj)))
])
#data = ds.samples

# write out as NIfTI
tdir = mkdtemp()
print tdir
orig_fname = _opj(tdir, 'data_in_orig.nii.gz')
group_fname = _opj(tdir, 'data_in_group.nii.gz')
nb.save(map2nifti(ds, data, imghdr=subjtmpl.get_header()), orig_fname)

# project into group space