Beispiel #1
0
def align_epi_anat(anatomy, epi_dsets, skull_strip_anat=True):
    ''' aligns epis to anatomy using ``align_epi_anat.py`` script
    
    :epi_dsets:       can be either a string or list of strings of the epi child datasets
    :skull_strip_anat:     if ``True``, ``anatomy`` will be skull-stripped using the default method
    
    The default output suffix is "_al"
    '''

    if isinstance(epi_dsets, basestring):
        epi_dsets = [epi_dsets]

    if len(epi_dsets) == 0:
        nl.notify('Warning: no epi alignment datasets given for anatomy %s!' %
                  anatomy,
                  level=nl.level.warning)
        return

    if all(os.path.exists(nl.suffix(x, '_al')) for x in epi_dsets):
        return

    anatomy_use = anatomy

    if skull_strip_anat:
        nl.skull_strip(anatomy, '_ns')
        anatomy_use = nl.suffix(anatomy, '_ns')

    inputs = [anatomy_use] + epi_dsets
    dset_products = lambda dset: [
        nl.suffix(dset, '_al'),
        nl.prefix(dset) + '_al_mat.aff12.1D',
        nl.prefix(dset) + '_tsh_vr_motion.1D'
    ]
    products = nl.flatten([dset_products(dset) for dset in epi_dsets])
    with nl.run_in_tmp(inputs, products):
        if nl.is_nifti(anatomy_use):
            anatomy_use = nl.afni_copy(anatomy_use)
        epi_dsets_use = []
        for dset in epi_dsets:
            if nl.is_nifti(dset):
                epi_dsets_use.append(nl.afni_copy(dset))
            else:
                epi_dsets_use.append(dset)
        cmd = [
            "align_epi_anat.py", "-epi2anat", "-anat_has_skull", "no",
            "-epi_strip", "3dAutomask", "-anat", anatomy_use, "-epi_base", "5",
            "-epi", epi_dsets_use[0]
        ]
        if len(epi_dsets_use) > 1:
            cmd += ['-child_epi'] + epi_dsets_use[1:]
            out = nl.run(cmd)

        for dset in epi_dsets:
            if nl.is_nifti(dset):
                dset_nifti = nl.nifti_copy(nl.prefix(dset) + '_al+orig')
                if dset_nifti and os.path.exists(
                        dset_nifti) and dset_nifti.endswith(
                            '.nii') and dset.endswith('.gz'):
                    nl.run(['gzip', dset_nifti])
Beispiel #2
0
def align_epi_anat(anatomy,epi_dsets,skull_strip_anat=True):
    ''' aligns epis to anatomy using ``align_epi_anat.py`` script

    :epi_dsets:       can be either a string or list of strings of the epi child datasets
    :skull_strip_anat:     if ``True``, ``anatomy`` will be skull-stripped using the default method

    The default output suffix is "_al"
    '''

    if isinstance(epi_dsets,basestring):
        epi_dsets = [epi_dsets]

    if len(epi_dsets)==0:
        nl.notify('Warning: no epi alignment datasets given for anatomy %s!' % anatomy,level=nl.level.warning)
        return

    if all(os.path.exists(nl.suffix(x,'_al')) for x in epi_dsets):
        return

    anatomy_use = anatomy

    if skull_strip_anat:
        nl.skull_strip(anatomy,'_ns')
        anatomy_use = nl.suffix(anatomy,'_ns')

    inputs = [anatomy_use] + epi_dsets
    dset_products = lambda dset: [nl.suffix(dset,'_al'), nl.prefix(dset)+'_al_mat.aff12.1D', nl.prefix(dset)+'_tsh_vr_motion.1D']
    products = nl.flatten([dset_products(dset) for dset in epi_dsets])
    with nl.run_in_tmp(inputs,products):
        if nl.is_nifti(anatomy_use):
            anatomy_use = nl.afni_copy(anatomy_use)
        epi_dsets_use = []
        for dset in epi_dsets:
            if nl.is_nifti(dset):
                epi_dsets_use.append(nl.afni_copy(dset))
            else:
                epi_dsets_use.append(dset)
        cmd = ["align_epi_anat.py", "-epi2anat", "-anat_has_skull", "no", "-epi_strip", "3dAutomask","-anat", anatomy_use, "-epi_base", "5", "-epi", epi_dsets_use[0]]
        if len(epi_dsets_use)>1:
            cmd += ['-child_epi'] + epi_dsets_use[1:]
            out = nl.run(cmd)

        for dset in epi_dsets:
            if nl.is_nifti(dset):
                dset_nifti = nl.nifti_copy(nl.prefix(dset)+'_al+orig')
                if dset_nifti and os.path.exists(dset_nifti) and dset_nifti.endswith('.nii') and dset.endswith('.gz'):
                    nl.run(['gzip',dset_nifti])
Beispiel #3
0
def subjects(experiment=None,label=None,tags=None,only_included=True):
    '''returns a list of subject objects for all subjects with valid JSON files
    
    :experiment:    only return subjects that have a scan for the given experiment
    :label:         only return subjects who have datasets with that label
    :tags:          only return subjects who have a session with given tag
    
    :only_included: if True (the default), will exclude any subjects with ``subject.include``
                    set to False
    
    Using this list, it is easy to filter again to find a specific subset of subjects. 
    
    For example, for all the subjects for the experiment ``Exp1``::
    
        subjects = padre.subjects('Exp1')
    
    To find all subjects who contain a run called ``rest``::
    
        subjects = padre.subjects(label='rest')
    
    To get a list of all subjects who have had a left ATL surgery::

    	subjects = [s for s in padre.subjects() if s.meta['clinical']['surgery_type']=='left atl']
    
    '''
    if not _indexed_and_loaded_all_subjects:
        _index_all_subjects(True)
    if experiment==None and p._global_experiment:
        experiment = p._global_experiment
    all_subjs = _all_subjects.values()
    if label:
        all_subjs = [x for x in all_subjs if len(x.dsets(label))]
    if experiment:
        all_subjs = [x for x in all_subjs if experiment in [x._sessions[y]['experiment'] for y in x._sessions if 'experiment' in x._sessions[y]]]
    if tags:
        if isinstance(tags,basestring):
            tags = [tags]
        all_subjs = [x for x in all_subjs if all([tag in nl.flatten([x._sessions[y]['tags'] for y in x._sessions if 'tags' in x._sessions[y]]) for tag in tags])]
    if only_included and not p._include_all:
        all_subjs = [x for x in all_subjs if x.include]
    
    return all_subjs
Beispiel #4
0
    def partial(self,start=0,end=None,run=0):
        '''chops the stimulus by only including time points ``start`` through ``end`` (in reps, inclusive; ``None``=until the end)
        if using stim_times-style simulus, will change the ``run``'th run. If a column, will just chop the column'''
        self.read_file()
        decon_stim = copy.copy(self)
        if start<0:
            start = 0
        if self.type()=="column":
            decon_stim.column_file = None
            if end>=len(decon_stim.column):
                end = None
            if end==None:
                decon_stim.column = decon_stim.column[start:]
            else:
                decon_stim.column = decon_stim.column[start:end+1]
            if len(decon_stim.column)==0:
                return None
        if self.type()=="times":
            if self.TR==None:
                nl.notify('Error: cannot get partial segment of a stim_times stimulus without a TR',level=nl.level.error)
                return None
            def time_in(a):
                first_number = r'^(\d+(\.\d+)?)'
                if isinstance(a,basestring):
                    m = re.match(first_number,a)
                    if m:
                        a = m.group(1)
                    else:
                        nl.notify('Warning: cannot intepret a number from the stim_time: "%s"' % a,level=nl.level.warning)
                        return False
                a = float(a)/self.TR
                if a>=start and (end==None or a<=end):
                    return True
                return False

            decon_stim.times_file = None
            if len(decon_stim.times)==0 or '__iter__' not in dir(decon_stim.times[0]):
                decon_stim.times = [decon_stim.times]
            decon_stim.times[run] = [x for x in decon_stim.times[run] if time_in(x)]
            if len(nl.flatten(decon_stim.times))==0:
                return None
        return decon_stim
Beispiel #5
0
    def partial(self,start=0,end=None,run=0):
        '''chops the stimulus by only including time points ``start`` through ``end`` (in reps, inclusive; ``None``=until the end)
        if using stim_times-style simulus, will change the ``run``'th run. If a column, will just chop the column'''
        self.read_file()
        decon_stim = copy.copy(self)
        if start<0:
            start = 0
        if self.type()=="column":
            decon_stim.column_file = None
            if end>=len(decon_stim.column):
                end = None
            if end==None:
                decon_stim.column = decon_stim.column[start:]
            else:
                decon_stim.column = decon_stim.column[start:end+1]
            if len(decon_stim.column)==0:
                return None
        if self.type()=="times":
            if self.TR==None:
                nl.notify('Error: cannot get partial segment of a stim_times stimulus without a TR',level=nl.level.error)
                return None
            def time_in(a):
                first_number = r'^(\d+(\.\d+)?)'
                if isinstance(a,basestring):
                    m = re.match(first_number,a)
                    if m:
                        a = m.group(1)
                    else:
                        nl.notify('Warning: cannot intepret a number from the stim_time: "%s"' % a,level=nl.level.warning)
                        return False
                a = float(a)/self.TR
                if a>=start and (end==None or a<=end):
                    return True
                return False

            decon_stim.times_file = None
            if len(decon_stim.times)==0 or '__iter__' not in dir(decon_stim.times[0]):
                decon_stim.times = [decon_stim.times]
            decon_stim.times[run] = [x for x in decon_stim.times[run] if time_in(x)]
            if len(nl.flatten(decon_stim.times))==0:
                return None
        return decon_stim
Beispiel #6
0
def stack_decon_stims(stim_list):
    '''take a ``list`` (in order of runs) of ``dict``s of stim_name:DeconStim and stack them together. returns
    a single ``dict`` of stim_name:decon_stim

    As in, takes:
    [
        # Run 1
        { "stim1": decon_stim1a, "stim2": decon_stim2a },
        # Run 2
        { "stim1": decon_stim1b, "stim2": decon_stim2b, "stim3": decon_stim3 }
    ]

    And makes:
        { "stim1": decon_stim1, "stim2": decon_stim2, "stim3": decon_stim3 }

    If a stimulus is not present in a run, it will fill that run with an empty stimulus
    '''
    stim_names = list(set(nl.flatten([stims.keys() for stims in stim_list])))

    stim_dict = {}
    for stim_name in stim_names:
        types = list(set([stims[stim_name].type() for stims in stim_list if stim_name in stims]))
        if len(types)>1:
            nl.notify('Error: Trying to stack stimuli of different types! (%s)' % stim_name,level=nl.level.error)
            return None
        type = types[0]

        stim_stack = []
        for i in xrange(len(stim_list)):
            if stim_name in stim_list[i]:
                stim_stack.append(stim_list[i][stim_name])
            else:
                stim_stack.append(stim_list[i].values()[0].blank_stim(type=type))
        stim_dict[stim_name] = copy.copy(stim_stack[0])
        for stim in stim_stack[1:]:
            stim_dict[stim_name] = stim_dict[stim_name].concat_stim(stim)
    return stim_dict.values()
Beispiel #7
0
def stack_decon_stims(stim_list):
    '''take a ``list`` (in order of runs) of ``dict``s of stim_name:DeconStim and stack them together. returns
    a single ``dict`` of stim_name:decon_stim

    As in, takes:
    [
        # Run 1
        { "stim1": decon_stim1a, "stim2": decon_stim2a },
        # Run 2
        { "stim1": decon_stim1b, "stim2": decon_stim2b, "stim3": decon_stim3 }
    ]

    And makes:
        { "stim1": decon_stim1, "stim2": decon_stim2, "stim3": decon_stim3 }

    If a stimulus is not present in a run, it will fill that run with an empty stimulus
    '''
    stim_names = list(set(nl.flatten([stims.keys() for stims in stim_list])))

    stim_dict = {}
    for stim_name in stim_names:
        types = list(set([stims[stim_name].type() for stims in stim_list if stim_name in stims]))
        if len(types)>1:
            nl.notify('Error: Trying to stack stimuli of different types! (%s)' % stim_name,level=nl.level.error)
            return None
        type = types[0]

        stim_stack = []
        for i in xrange(len(stim_list)):
            if stim_name in stim_list[i]:
                stim_stack.append(stim_list[i][stim_name])
            else:
                stim_stack.append(stim_list[i].values()[0].blank_stim(type=type))
        stim_dict[stim_name] = copy.copy(stim_stack[0])
        for stim in stim_stack[1:]:
            stim_dict[stim_name] = stim_dict[stim_name].concat_stim(stim)
    return stim_dict.values()
Beispiel #8
0
 def dset_in_dict(fname, l):
     return len(
         [x for x in nl.flatten(l.values()) if fname == x['filename']]) > 1
Beispiel #9
0
 def dset_in_dict(fname,l):
     return len([x for x in nl.flatten(l.values()) if fname==x['filename']])>1
Beispiel #10
0
def smooth_decon_to_fwhm(decon,fwhm,cache=True):
    '''takes an input :class:`Decon` object and uses ``3dBlurToFWHM`` to make the output as close as possible to ``fwhm``
    returns the final measured fwhm. If ``cache`` is ``True``, will save the blurred input file (and use it again in the future)'''
    if os.path.exists(decon.prefix):
        return
    blur_dset = lambda dset: nl.suffix(dset,'_smooth_to_%.2f' % fwhm)

    with nl.notify('Running smooth_decon_to_fwhm analysis (with %.2fmm blur)' % fwhm):
        tmpdir = tempfile.mkdtemp()
        try:
            cwd = os.getcwd()
            random_files = [re.sub(r'\[\d+\]$','',str(x)) for x in nl.flatten([x for x in decon.__dict__.values() if isinstance(x,basestring) or isinstance(x,list)]+[x.values() for x in decon.__dict__.values() if isinstance(x,dict)])]
            files_to_copy = [x for x in random_files if os.path.exists(x) and x[0]!='/']
            files_to_copy += [blur_dset(dset) for dset in decon.input_dsets if os.path.exists(blur_dset(dset))]
            # copy crap
            for file in files_to_copy:
                try:
                    shutil.copytree(file,tmpdir)
                except OSError as e:
                    shutil.copy(file,tmpdir)
                shutil.copy(file,tmpdir)

            copyback_files = [decon.prefix,decon.errts]
            with nl.run_in(tmpdir):
                if os.path.exists(decon.prefix):
                    os.remove(decon.prefix)

                # Create the blurred inputs (or load from cache)
                if cache and all([os.path.exists(os.path.join(cwd,blur_dset(dset))) for dset in decon.input_dsets]):
                    # Everything is already cached...
                    nl.notify('Using cache\'d blurred datasets')
                else:
                    # Need to make them from scratch
                    with nl.notify('Creating blurred datasets'):
                        old_errts = decon.errts
                        decon.errts = 'residual.nii.gz'
                        decon.prefix = os.path.basename(decon.prefix)
                        # Run once in place to get the residual dataset
                        decon.run()
                        running_reps = 0
                        for dset in decon.input_dsets:
                            info = nl.dset_info(dset)
                            residual_dset = nl.suffix(dset,'_residual')
                            nl.run(['3dbucket','-prefix',residual_dset,'%s[%d..%d]'%(decon.errts,running_reps,running_reps+info.reps-1)],products=residual_dset)
                            cmd = ['3dBlurToFWHM','-quiet','-input',dset,'-blurmaster',residual_dset,'-prefix',blur_dset(dset),'-FWHM',fwhm]
                            if decon.mask:
                                if decon.mask=='auto':
                                    cmd += ['-automask']
                                else:
                                    cmd += ['-mask',decon.mask]
                            nl.run(cmd,products=blur_dset(dset))
                            running_reps += info.reps
                            if cache:
                                copyback_files.append(blur_dset(dset))
                    decon.errts = old_errts
                decon.input_dsets = [blur_dset(dset) for dset in decon.input_dsets]
                for d in [decon.prefix,decon.errts]:
                    if os.path.exists(d):
                        try:
                            os.remove(d)
                        except:
                            pass
                decon.run()
                for copyfile in copyback_files:
                    if os.path.exists(copyfile):
                        shutil.copy(copyfile,cwd)
                    else:
                        nl.notify('Warning: deconvolve did not produce expected file %s' % decon.prefix,level=nl.level.warning)
        except:
            raise
        finally:
            shutil.rmtree(tmpdir,True)
Beispiel #11
0
def smooth_decon_to_fwhm(decon,fwhm,cache=True):
    '''takes an input :class:`Decon` object and uses ``3dBlurToFWHM`` to make the output as close as possible to ``fwhm``
    returns the final measured fwhm. If ``cache`` is ``True``, will save the blurred input file (and use it again in the future)'''
    if os.path.exists(decon.prefix):
        return
    blur_dset = lambda dset: nl.suffix(dset,'_smooth_to_%.2f' % fwhm)

    with nl.notify('Running smooth_decon_to_fwhm analysis (with %.2fmm blur)' % fwhm):
        tmpdir = tempfile.mkdtemp()
        try:
            cwd = os.getcwd()
            random_files = [re.sub(r'\[\d+\]$','',str(x)) for x in nl.flatten([x for x in decon.__dict__.values() if isinstance(x,basestring) or isinstance(x,list)]+[x.values() for x in decon.__dict__.values() if isinstance(x,dict)])]
            files_to_copy = [x for x in random_files if os.path.exists(x) and x[0]!='/']
            files_to_copy += [blur_dset(dset) for dset in decon.input_dsets if os.path.exists(blur_dset(dset))]
            # copy crap
            for file in files_to_copy:
                try:
                    shutil.copytree(file,tmpdir)
                except OSError as e:
                    shutil.copy(file,tmpdir)
                shutil.copy(file,tmpdir)

            copyback_files = [decon.prefix,decon.errts]
            with nl.run_in(tmpdir):
                if os.path.exists(decon.prefix):
                    os.remove(decon.prefix)

                # Create the blurred inputs (or load from cache)
                if cache and all([os.path.exists(os.path.join(cwd,blur_dset(dset))) for dset in decon.input_dsets]):
                    # Everything is already cached...
                    nl.notify('Using cache\'d blurred datasets')
                else:
                    # Need to make them from scratch
                    with nl.notify('Creating blurred datasets'):
                        old_errts = decon.errts
                        decon.errts = 'residual.nii.gz'
                        decon.prefix = os.path.basename(decon.prefix)
                        # Run once in place to get the residual dataset
                        decon.run()
                        running_reps = 0
                        for dset in decon.input_dsets:
                            info = nl.dset_info(dset)
                            residual_dset = nl.suffix(dset,'_residual')
                            nl.run(['3dbucket','-prefix',residual_dset,'%s[%d..%d]'%(decon.errts,running_reps,running_reps+info.reps-1)],products=residual_dset)
                            cmd = ['3dBlurToFWHM','-quiet','-input',dset,'-blurmaster',residual_dset,'-prefix',blur_dset(dset),'-FWHM',fwhm]
                            if decon.mask:
                                if decon.mask=='auto':
                                    cmd += ['-automask']
                                else:
                                    cmd += ['-mask',decon.mask]
                            nl.run(cmd,products=blur_dset(dset))
                            running_reps += info.reps
                            if cache:
                                copyback_files.append(blur_dset(dset))
                    decon.errts = old_errts
                decon.input_dsets = [blur_dset(dset) for dset in decon.input_dsets]
                for d in [decon.prefix,decon.errts]:
                    if os.path.exists(d):
                        try:
                            os.remove(d)
                        except:
                            pass
                decon.run()
                for copyfile in copyback_files:
                    if os.path.exists(copyfile):
                        shutil.copy(copyfile,cwd)
                    else:
                        nl.notify('Warning: deconvolve did not produce expected file %s' % decon.prefix,level=nl.level.warning)
        except:
            raise
        finally:
            shutil.rmtree(tmpdir,True)