Example #1
0
def atlas_overlap(dset, atlas=None):
    '''aligns ``dset`` to the TT_N27 atlas and returns ``(cost,overlap)``'''
    atlas = find_atlas(atlas)
    if atlas == None:
        return None

    cost_func = 'crM'
    infile = os.path.abspath(dset)
    tmpdir = tempfile.mkdtemp()
    with nl.run_in(tmpdir):
        o = nl.run([
            '3dAllineate', '-verb', '-base', atlas, '-source', infile + '[0]',
            '-NN', '-final', 'NN', '-cost', cost_func, '-nmatch', '20%',
            '-onepass', '-fineblur', '2', '-cmass', '-prefix', 'test.nii.gz'
        ])
        m = re.search(r'Final\s+cost = ([\d.]+) ;', o.output)
        if m:
            cost = float(m.group(1))
        o = nl.run(['3dmaskave', '-mask', atlas, '-q', 'test.nii.gz'],
                   stderr=None)
        data_thresh = float(o.output) / 4
        i = nl.dset_info('test.nii.gz')
        o = nl.run([
            '3dmaskave', '-q', '-mask', 'SELF', '-sum',
            nl.calc([atlas, 'test.nii.gz'],
                    'equals(step(a-10),step(b-%.2f))' % data_thresh)
        ],
                   stderr=None)
        overlap = 100 * float(
            o.output) / (i.voxel_dims[0] * i.voxel_dims[1] * i.voxel_dims[2])
    try:
        shutil.rmtree(tmpdir)
    except:
        pass
    return (cost, overlap)
Example #2
0
def cdf(dset,p):
    info = nl.dset_info(dset)
    if info==None:
        nl.notify('Error: Could not get info for dset %s'%dset, level=nl.level.error)
        return None
    command = ['cdf','-p2t',info.subbricks[0]['stat'],str(p)] + info.subbricks[0]['params']
    return float(subprocess.check_output(command).split()[2])
Example #3
0
def skullstrip_template(dset, template, prefix=None, suffix=None, dilate=0):
    '''Takes the raw anatomy ``dset``, aligns it to a template brain, and applies a templated skullstrip. Should produce fairly reliable skullstrips as long
    as there is a decent amount of normal brain and the overall shape of the brain is normal-ish'''
    if suffix == None:
        suffix = '_sstemplate'
    if prefix == None:
        prefix = nl.suffix(dset, suffix)
    if not os.path.exists(prefix):
        with nl.notify('Running template-based skull-strip on %s' % dset):
            dset = os.path.abspath(dset)
            template = os.path.abspath(template)
            tmp_dir = tempfile.mkdtemp()
            cwd = os.getcwd()
            with nl.run_in(tmp_dir):
                nl.affine_align(template,
                                dset,
                                skull_strip=None,
                                cost='mi',
                                opts=['-nmatch', '100%'])
                nl.run([
                    '3dQwarp', '-minpatch', '20', '-penfac', '10', '-noweight',
                    '-source',
                    nl.suffix(template, '_aff'), '-base', dset, '-prefix',
                    nl.suffix(template, '_qwarp')
                ],
                       products=nl.suffix(template, '_qwarp'))
                info = nl.dset_info(nl.suffix(template, '_qwarp'))
                max_value = info.subbricks[0]['max']
                nl.calc([dset, nl.suffix(template, '_qwarp')],
                        'a*step(b-%f*0.05)' % max_value, prefix)
                shutil.move(prefix, cwd)
            shutil.rmtree(tmp_dir)
Example #4
0
def roi_stats(mask,dset):
    out_dict = {}
    values = [{'Med': 'median', 'Min': 'min', 'Max': 'max',
               'NZMean': 'nzmean', 'NZSum': 'nzsum', 'NZSigma': 'nzsigma',
               'Mean': 'mean', 'Sigma': 'sigma', 'Mod': 'mode','NZcount':'nzvoxels'},
              {'NZMod': 'nzmode', 'NZMed': 'nzmedian', 'NZMax': 'nzmax', 'NZMin': 'nzmin','Mean':'mean'}]
    options = [['-nzmean','-nzsum','-nzvoxels','-minmax','-sigma','-nzsigma','-median','-mode'],
               ['-nzminmax','-nzmedian','-nzmode']]
    if not nl.dset_grids_equal((mask,dset)):
        i = nl.dset_info(dset)
        grid_hash = '_' + '_'.join([str(x) for x in (i.voxel_size + i.voxel_dims)])
        new_mask = nl.suffix(mask,grid_hash)
        if not os.path.exists(new_mask):
            nl.run(["3dfractionize","-template",dset,"-input",nl.calc(mask,"a",datum="short"),"-prefix",new_mask,"-preserve","-clip","0.2"])
        mask = new_mask
    for i in xrange(len(values)):
        cmd = ['3dROIstats','-1Dformat','-nobriklab','-mask',mask] + options[i] + [dset]
        out = subprocess.check_output(cmd).split('\n')
        header = [(values[i][x.split('_')[0]],int(x.split('_')[1])) for x in out[1].split()[1:]]
        for j in xrange(len(out)/2-1):
            stats = [float(x) for x in out[(j+1)*2+1][1:].split()]
            for s in xrange(len(stats)):
                roi = header[s][1]
                stat_name = header[s][0]
                stat = stats[s]
                if roi not in out_dict:
                    out_dict[roi] = {}
                out_dict[roi][stat_name] = stat
    return out_dict
Example #5
0
def cdf(dset, p):
    info = nl.dset_info(dset)
    if info == None:
        nl.notify('Error: Could not get info for dset %s' % dset,
                  level=nl.level.error)
        return None
    command = ['cdf', '-p2t', info.subbricks[0]['stat'],
               str(p)] + info.subbricks[0]['params']
    return float(subprocess.check_output(command).split()[2])
Example #6
0
 def __getattribute__(self,name):
     if name=='info':
         if self._info==None:
             try:
                 self._info = nl.dset_info(self.__abspath__())
             except:
                 pass
         return self._info
     else:
         return object.__getattribute__(self,name)
Example #7
0
def qwarp_apply(dset_from,
                dset_warp,
                affine=None,
                warp_suffix='_warp',
                master='WARP',
                interp=None,
                prefix=None):
    '''applies the transform from a previous qwarp
    
    Uses the warp parameters from the dataset listed in 
    ``dset_warp`` (usually the dataset name ends in ``_WARP``) 
    to the dataset ``dset_from``. If a ``.1D`` file is given
    in the ``affine`` parameter, it will be applied simultaneously
    with the qwarp.
    
    If the parameter ``interp`` is given, will use as interpolation method,
    otherwise it will just use the default (currently wsinc5)
    
    Output dataset with have the ``warp_suffix`` suffix added to its name
    '''
    out_dset = prefix
    if out_dset == None:
        out_dset = os.path.split(nl.suffix(dset_from, warp_suffix))[1]
    dset_from_info = nl.dset_info(dset_from)
    dset_warp_info = nl.dset_info(dset_warp)
    if (dset_from_info.orient != dset_warp_info.orient):
        # If the datasets are different orientations, the transform won't be applied correctly
        nl.run([
            '3dresample', '-orient', dset_warp_info.orient, '-prefix',
            nl.suffix(dset_from, '_reorient'), '-inset', dset_from
        ],
               products=nl.suffix(dset_from, '_reorient'))
        dset_from = nl.suffix(dset_from, '_reorient')
    warp_opt = str(dset_warp)
    if affine:
        warp_opt += ' ' + affine
    cmd = ['3dNwarpApply', '-nwarp', warp_opt]
    cmd += ['-source', dset_from, '-master', master, '-prefix', out_dset]

    if interp:
        cmd += ['-interp', interp]

    nl.run(cmd, products=out_dset)
Example #8
0
def auto_qc(dset, inside_perc=60, atlas=None, p=0.001):
    '''returns ``False`` if ``dset`` fails minimum checks, or returns a float from ``0.0`` to ``100.0`` describing data quality'''
    with nl.notify('Running quality check on %s:' % dset):
        if not os.path.exists(dset):
            nl.notify('Error: cannot find the file!', level=nl.level.error)
            return False

        info = nl.dset_info(dset)
        if not info:
            nl.notify('Error: could not read the dataset!',
                      level=nl.level.error)

        if any(['stat' in x for x in info.subbricks]):
            with nl.notify('Statistical results detected...'):
                inside = inside_brain(dset, atlas=atlas, p=p)
                nl.notify('%.1f significant voxels inside brain')
                if inside < inside_perc:
                    nl.notify('Warning: below quality threshold!',
                              level=nl.level.warning)
#                    return False
                nl.notify('Looks ok')
                return inside

        if len(info.subbricks) > 1:
            with nl.notify('Time-series detected...'):
                return_val = True
                (cost, overlap) = atlas_overlap(dset)
                if cost > 0.15 or overlap < 80:
                    nl.notify(
                        'Warning: does not appear to conform to brain dimensions',
                        level=nl.level.warning)
                    return_val = False
                if len(info.subbricks) > 5:
                    (oc, perc_outliers) = outcount(dset)
                    if perc_outliers > 0.1:
                        nl.notify(
                            'Warning: large amount of outlier time points',
                            level=nl.level.warning)
                        return_val = False
            if return_val:
                nl.notify('Looks ok')
                return min(100 * (1 - cost), overlap, 100 * perc_outliers)
            return False

        with nl.notify('Single brain image detected...'):
            (cost, overlap) = atlas_overlap(dset)
            # Be more lenient if it's not an EPI, cuz who knows what else is in this image
            if cost > 0.45 or overlap < 70:
                nl.notify(
                    'Warning: does not appear to conform to brain dimensions',
                    level=nl.level.warning)
                return False
            nl.notify('Looks ok')
            return min(100 * (1 - cost), overlap)
Example #9
0
def roi_stats(mask, dset):
    out_dict = {}
    values = [{
        'Med': 'median',
        'Min': 'min',
        'Max': 'max',
        'NZMean': 'nzmean',
        'NZSum': 'nzsum',
        'NZSigma': 'nzsigma',
        'Mean': 'mean',
        'Sigma': 'sigma',
        'Mod': 'mode',
        'NZcount': 'nzvoxels'
    }, {
        'NZMod': 'nzmode',
        'NZMed': 'nzmedian',
        'NZMax': 'nzmax',
        'NZMin': 'nzmin',
        'Mean': 'mean'
    }]
    options = [[
        '-nzmean', '-nzsum', '-nzvoxels', '-minmax', '-sigma', '-nzsigma',
        '-median', '-mode'
    ], ['-nzminmax', '-nzmedian', '-nzmode']]
    if not nl.dset_grids_equal((mask, dset)):
        i = nl.dset_info(dset)
        grid_hash = '_' + '_'.join(
            [str(x) for x in (i.voxel_size + i.voxel_dims)])
        new_mask = nl.suffix(mask, grid_hash)
        if not os.path.exists(new_mask):
            nl.run([
                "3dfractionize", "-template", dset, "-input",
                nl.calc(mask, "a", datum="short"), "-prefix", new_mask,
                "-preserve", "-clip", "0.2"
            ])
        mask = new_mask
    for i in xrange(len(values)):
        cmd = ['3dROIstats', '-1Dformat', '-nobriklab', '-mask', mask
               ] + options[i] + [dset]
        out = subprocess.check_output(cmd).split('\n')
        header = [(values[i][x.split('_')[0]], int(x.split('_')[1]))
                  for x in out[1].split()[1:]]
        for j in xrange(len(out) / 2 - 1):
            stats = [float(x) for x in out[(j + 1) * 2 + 1][1:].split()]
            for s in xrange(len(stats)):
                roi = header[s][1]
                stat_name = header[s][0]
                stat = stats[s]
                if roi not in out_dict:
                    out_dict[roi] = {}
                out_dict[roi][stat_name] = stat
    return out_dict
Example #10
0
def outcount(dset, fraction=0.1):
    '''gets outlier count and returns ``(list of proportion of outliers by timepoint,total percentage of outlier time points)'''
    polort = nl.auto_polort(dset)
    info = nl.dset_info(dset)
    o = nl.run(
        ['3dToutcount', '-fraction', '-automask', '-polort', polort, dset],
        stderr=None,
        quiet=None)
    if o.return_code == 0 and o.output:
        oc = [float(x) for x in o.output.split('\n') if x.strip() != '']
        binary_outcount = [x < fraction for x in oc]
        perc_outliers = 1 - (sum(binary_outcount) / float(info.reps))
        return (oc, perc_outliers)
Example #11
0
def qwarp_apply(dset_from,dset_warp,affine=None,warp_suffix='_warp',master='WARP',interp=None,prefix=None):
    '''applies the transform from a previous qwarp

    Uses the warp parameters from the dataset listed in
    ``dset_warp`` (usually the dataset name ends in ``_WARP``)
    to the dataset ``dset_from``. If a ``.1D`` file is given
    in the ``affine`` parameter, it will be applied simultaneously
    with the qwarp.

    If the parameter ``interp`` is given, will use as interpolation method,
    otherwise it will just use the default (currently wsinc5)

    Output dataset with have the ``warp_suffix`` suffix added to its name
    '''
    out_dset = prefix
    if out_dset==None:
        out_dset = os.path.split(nl.suffix(dset_from,warp_suffix))[1]
    dset_from_info = nl.dset_info(dset_from)
    dset_warp_info = nl.dset_info(dset_warp)
    if(dset_from_info.orient!=dset_warp_info.orient):
        # If the datasets are different orientations, the transform won't be applied correctly
        nl.run(['3dresample','-orient',dset_warp_info.orient,'-prefix',nl.suffix(dset_from,'_reorient'),'-inset',dset_from],products=nl.suffix(dset_from,'_reorient'))
        dset_from = nl.suffix(dset_from,'_reorient')
    warp_opt = str(dset_warp)
    if affine:
        warp_opt += ' ' + affine
    cmd = [
        '3dNwarpApply',
        '-nwarp', warp_opt]
    cmd += [
        '-source', dset_from,
        '-master',master,
        '-prefix', out_dset
    ]

    if interp:
        cmd += ['-interp',interp]

    nl.run(cmd,products=out_dset)
Example #12
0
def ijk_to_xyz(dset,ijk):
    '''convert the dset indices ``ijk`` to RAI coordinates ``xyz``'''
    i = nl.dset_info(dset)
    orient_codes = [int(x) for x in nl.run(['@AfniOrient2RAImap',i.orient]).output.split()]
    orient_is = [abs(x)-1 for x in orient_codes]
    rai = []
    for rai_i in xrange(3):
         ijk_i = orient_is[rai_i]
         if orient_codes[rai_i] > 0:
             rai.append(ijk[ijk_i]*i.voxel_size[rai_i] + i.spatial_from[rai_i])
         else:
             rai.append(i.spatial_to[rai_i] - ijk[ijk_i]*i.voxel_size[rai_i])
    return rai
Example #13
0
def qwarp_epi(dset, align_subbrick=5, suffix='_qwal', prefix=None):
    '''aligns an EPI time-series using 3dQwarp
    
    Very expensive and not efficient at all, but it can produce pretty impressive alignment for EPI time-series with significant
    distortions due to motion'''
    info = nl.dset_info(dset)
    if info == None:
        nl.notify('Error reading dataset "%s"' % (dset), level=nl.level.error)
        return False
    if prefix == None:
        prefix = nl.suffix(dset, suffix)
    dset_sub = lambda x: '_tmp_qwarp_epi-%s_%d.nii.gz' % (nl.prefix(dset), x)
    try:
        align_dset = nl.suffix(dset_sub(align_subbrick), '_warp')
        nl.calc('%s[%d]' % (dset, align_subbrick),
                expr='a',
                prefix=align_dset,
                datum='float')
        for i in xrange(info.reps):
            if i != align_subbrick:
                nl.calc('%s[%d]' % (dset, i),
                        expr='a',
                        prefix=dset_sub(i),
                        datum='float')
                nl.run([
                    '3dQwarp', '-nowarp', '-workhard', '-superhard',
                    '-minpatch', '9', '-blur', '0', '-pear', '-nopenalty',
                    '-base', align_dset, '-source',
                    dset_sub(i), '-prefix',
                    nl.suffix(dset_sub(i), '_warp')
                ],
                       quiet=True)
        cmd = ['3dTcat', '-prefix', prefix]
        if info.TR:
            cmd += ['-tr', info.TR]
        if info.slice_timing:
            cmd += ['-tpattern', info.slice_timing]
        cmd += [nl.suffix(dset_sub(i), '_warp') for i in xrange(info.reps)]
        nl.run(cmd, quiet=True)
    except Exception as e:
        raise e
    finally:
        for i in xrange(info.reps):
            for suffix in ['', 'warp']:
                try:
                    os.remove(nl.suffix(dset_sub(i), suffix))
                except:
                    pass
Example #14
0
def ijk_to_xyz(dset, ijk):
    '''convert the dset indices ``ijk`` to RAI coordinates ``xyz``'''
    i = nl.dset_info(dset)
    orient_codes = [
        int(x)
        for x in nl.run(['@AfniOrient2RAImap', i.orient]).output.split()
    ]
    orient_is = [abs(x) - 1 for x in orient_codes]
    rai = []
    for rai_i in xrange(3):
        ijk_i = orient_is[rai_i]
        if orient_codes[rai_i] > 0:
            rai.append(ijk[ijk_i] * i.voxel_size[rai_i] +
                       i.spatial_from[rai_i])
        else:
            rai.append(i.spatial_to[rai_i] - ijk[ijk_i] * i.voxel_size[rai_i])
    return rai
Example #15
0
def create_censor_file(input_dset,out_prefix=None,fraction=0.1,clip_to=0.1,max_exclude=0.3,motion_file=None,motion_exclude=1.0):
    '''create a binary censor file using 3dToutcount

    :input_dset:        the input dataset
    :prefix:            output 1D file (default: ``prefix(input_dset)`` + ``.1D``)
    :fraction:          censor a timepoint if proportional of outliers in this
                        time point is greater than given value
    :clip_to:           keep the number of time points censored under this proportion
                        of total reps. If more time points would be censored,
                        it will only pick the top ``clip_to*reps`` points
    :max_exclude:       if more time points than the given proportion of reps are excluded for the
                        entire run, throw an exception -- something is probably wrong
    :motion_file:       optional filename of a "motion" file with multiple columns and rows corresponding to reps.
                        It doesn't really matter what the values are, as long as they are appropriate relative to ``motion_exclude``
    :motion_exclude:    Will exclude any reps that have a value greater than this in any column of ``motion_file``
    '''
    (outcount,perc_outliers) = nl.qc.outcount(input_dset,fraction)
    info = nl.dset_info(input_dset)
    binarize = lambda o,f: [oo<f for oo in o]
    perc_outliers = lambda o: 1.-(sum(o)/float(info.reps))

    if motion_file:
        with open(motion_file,'Ur') as f:
            motion = [max([float(y) for y in x.strip().split()]) for x in f.read().split('\n') if len(x.strip())>0 and x.strip()[0]!='#']
            motion_1D = [x for x in binarize(motion,motion_exclude)]
            if perc_outliers(motion_1D) > max_exclude:
                nl.notify('Error: Too many points excluded because of motion (%.2f) in dset %s' % (perc_outliers(motion_1D),input_dset),level=nl.level.error)
                return False
            outcount = [outcount[i] if motion_1D[i] else 1. for i in range(len(outcount))]

    binary_outcount = binarize(outcount,fraction)

    if max_exclude and perc_outliers(binary_outcount) > max_exclude:
        nl.notify('Error: Found %.1f%% outliers in dset %s' % (100*perc_outliers(outcount),input_dset),level=nl.level.error)
        return False
    if clip_to:
        while perc_outliers(binary_outcount) > clip_to:
            best_outlier = min([(outcount[i],i) for i in range(len(outcount)) if not binary_outcount[i]])
            binary_outcount[best_outlier[1]] = True
    if not out_prefix:
        out_prefix = nl.prefix(input_dset) + '.1D'
    with open(out_prefix,'w') as f:
        f.write('\n'.join([str(int(x)) for x in binary_outcount]))
    return True
Example #16
0
def subbrick(dset,
             label,
             coef=False,
             tstat=False,
             fstat=False,
             rstat=False,
             number_only=False):
    ''' returns a string referencing the given subbrick within a dset

    This method reads the header of the dataset ``dset``, finds the subbrick whose
    label matches ``label`` and returns a string of type ``dataset[X]``, which can
    be used by most AFNI programs to refer to a subbrick within a file

    The options coef, tstat, fstat, and rstat will add the suffix that is
    appended to the label by 3dDeconvolve

    :coef:  "#0_Coef"
    :tstat: "#0_Tstat"
    :fstat: "_Fstat"
    :rstat: "_R^2"

    if ``number_only`` is set to ``True``, will only return the subbrick number instead of a string
    '''

    if coef:
        label += "#0_Coef"
    elif tstat:
        label += "#0_Tstat"
    elif fstat:
        label += "_Fstat"
    elif rstat:
        label += "_R^2"

    info = nl.dset_info(dset)
    if info == None:
        nl.notify('Error: Couldn\'t get info from dset "%s"' % dset,
                  level=nl.level.error)
        return None
    i = info.subbrick_labeled(label)
    if number_only:
        return i
    return '%s[%d]' % (dset, i)
Example #17
0
def subbrick(dset,label,coef=False,tstat=False,fstat=False,rstat=False,number_only=False):
    ''' returns a string referencing the given subbrick within a dset

    This method reads the header of the dataset ``dset``, finds the subbrick whose
    label matches ``label`` and returns a string of type ``dataset[X]``, which can
    be used by most AFNI programs to refer to a subbrick within a file

    The options coef, tstat, fstat, and rstat will add the suffix that is
    appended to the label by 3dDeconvolve

    :coef:  "#0_Coef"
    :tstat: "#0_Tstat"
    :fstat: "_Fstat"
    :rstat: "_R^2"

    If ``coef`` or ``tstat`` are set to a number, it will use that parameter number
    (instead of 0), for models that use multiple parameters (e.g., "TENT").

    if ``number_only`` is set to ``True``, will only return the subbrick number instead of a string
    '''

    if coef is not False:
        if coef is True:
            coef = 0
        label += "#%d_Coef" % coef
    elif tstat != False:
        if tstat==True:
            tstat = 0
        label += "#%d_Tstat" % tstat
    elif fstat:
        label += "_Fstat"
    elif rstat:
        label += "_R^2"

    info = nl.dset_info(dset)
    if info==None:
        nl.notify('Error: Couldn\'t get info from dset "%s"'%dset,level=nl.level.error)
        return None
    i = info.subbrick_labeled(label)
    if number_only:
        return i
    return '%s[%d]' % (dset,i)
Example #18
0
def qwarp_epi(dset,align_subbrick=5,suffix='_qwal',prefix=None):
    '''aligns an EPI time-series using 3dQwarp

    Very expensive and not efficient at all, but it can produce pretty impressive alignment for EPI time-series with significant
    distortions due to motion'''
    info = nl.dset_info(dset)
    if info==None:
        nl.notify('Error reading dataset "%s"' % (dset),level=nl.level.error)
        return False
    if prefix==None:
        prefix = nl.suffix(dset,suffix)
    dset_sub = lambda x: '_tmp_qwarp_epi-%s_%d.nii.gz' % (nl.prefix(dset),x)
    try:
        align_dset = nl.suffix(dset_sub(align_subbrick),'_warp')
        nl.calc('%s[%d]' % (dset,align_subbrick),expr='a',prefix=align_dset,datum='float')
        for i in xrange(info.reps):
            if i != align_subbrick:
                nl.calc('%s[%d]' % (dset,i),expr='a',prefix=dset_sub(i),datum='float')
                nl.run([
                    '3dQwarp', '-nowarp',
                    '-workhard', '-superhard', '-minpatch', '9', '-blur', '0',
                    '-pear', '-nopenalty',
                    '-base', align_dset,
                    '-source', dset_sub(i),
                    '-prefix', nl.suffix(dset_sub(i),'_warp')
                ],quiet=True)
        cmd = ['3dTcat','-prefix',prefix]
        if info.TR:
            cmd += ['-tr',info.TR]
        if info.slice_timing:
            cmd += ['-tpattern',info.slice_timing]
        cmd += [nl.suffix(dset_sub(i),'_warp') for i in xrange(info.reps)]
        nl.run(cmd,quiet=True)
    except Exception as e:
        raise e
    finally:
        for i in xrange(info.reps):
            for suffix in ['','warp']:
                try:
                    os.remove(nl.suffix(dset_sub(i),suffix))
                except:
                    pass
Example #19
0
def align_epi(anatomy, epis, suffix='_al', base=3, skull_strip=True):
    '''[[currently in progress]]: a simple replacement for the ``align_epi_anat.py`` script, because I've found it to be unreliable, in my usage'''
    for epi in epis:
        nl.tshift(epi, suffix='_tshift')
        nl.affine_align(nl.suffix(epi, '_tshift'),
                        '%s[%d]' % (epis[0], base),
                        skull_strip=False,
                        epi=True,
                        cost='crM',
                        resample='wsinc5',
                        grid_size=nl.dset_info(epi).voxel_size[0],
                        suffix='_al')
    ss = [anatomy] if skull_strip else False
    nl.affine_align(anatomy,
                    '%s[%d]' % (epis[0], base),
                    skull_strip=ss,
                    cost='hel',
                    grid_size=1,
                    opts=['-interp', 'cubic'],
                    suffix='_al-to-EPI')
Example #20
0
def skullstrip_template(dset,template,prefix=None,suffix=None,dilate=0):
    '''Takes the raw anatomy ``dset``, aligns it to a template brain, and applies a templated skullstrip. Should produce fairly reliable skullstrips as long
    as there is a decent amount of normal brain and the overall shape of the brain is normal-ish'''
    if suffix==None:
        suffix = '_sstemplate'
    if prefix==None:
        prefix = nl.suffix(dset,suffix)
    if not os.path.exists(prefix):
        with nl.notify('Running template-based skull-strip on %s' % dset):
            dset = os.path.abspath(dset)
            template = os.path.abspath(template)
            tmp_dir = tempfile.mkdtemp()
            cwd = os.getcwd()
            with nl.run_in(tmp_dir):
                nl.affine_align(template,dset,skull_strip=None,cost='mi',opts=['-nmatch','100%'])
                nl.run(['3dQwarp','-minpatch','20','-penfac','10','-noweight','-source',nl.suffix(template,'_aff'),'-base',dset,'-prefix',nl.suffix(template,'_qwarp')],products=nl.suffix(template,'_qwarp'))
                info = nl.dset_info(nl.suffix(template,'_qwarp'))
                max_value = info.subbricks[0]['max']
                nl.calc([dset,nl.suffix(template,'_qwarp')],'a*step(b-%f*0.05)'%max_value,prefix)
                shutil.move(prefix,cwd)
            shutil.rmtree(tmp_dir)
Example #21
0
def skull_strip(dset, suffix='_ns', prefix=None, unifize=True):
    ''' use bet to strip skull from given anatomy '''
    # should add options to use betsurf and T1/T2 in the future
    # Since BET fails on weirdly distributed datasets, I added 3dUnifize in... I realize this makes this dependent on AFNI. Sorry, :)
    if prefix == None:
        prefix = nl.suffix(dset, suffix)
    unifize_dset = nl.suffix(dset, '_u')
    cmd = bet2 if bet2 else 'bet2'
    if unifize:
        info = nl.dset_info(dset)
        if info == None:
            nl.notify('Error: could not read info for dset %s' % dset,
                      level=nl.level.error)
            return False
        cmd = os.path.join(fsl_dir, cmd) if fsl_dir else cmd
        cutoff_value = nl.max(dset) * 0.05
        nl.run([
            '3dUnifize', '-prefix', unifize_dset,
            nl.calc(dset, 'step(a-%f)*a' % cutoff_value)
        ],
               products=unifize_dset)
    else:
        unifize_dset = dset
    nl.run([cmd, unifize_dset, prefix, '-w', 0.5], products=prefix)
Example #22
0
def smooth_decon_to_fwhm(decon,fwhm,cache=True):
    '''takes an input :class:`Decon` object and uses ``3dBlurToFWHM`` to make the output as close as possible to ``fwhm``
    returns the final measured fwhm. If ``cache`` is ``True``, will save the blurred input file (and use it again in the future)'''
    if os.path.exists(decon.prefix):
        return
    blur_dset = lambda dset: nl.suffix(dset,'_smooth_to_%.2f' % fwhm)

    with nl.notify('Running smooth_decon_to_fwhm analysis (with %.2fmm blur)' % fwhm):
        tmpdir = tempfile.mkdtemp()
        try:
            cwd = os.getcwd()
            random_files = [re.sub(r'\[\d+\]$','',str(x)) for x in nl.flatten([x for x in decon.__dict__.values() if isinstance(x,basestring) or isinstance(x,list)]+[x.values() for x in decon.__dict__.values() if isinstance(x,dict)])]
            files_to_copy = [x for x in random_files if os.path.exists(x) and x[0]!='/']
            files_to_copy += [blur_dset(dset) for dset in decon.input_dsets if os.path.exists(blur_dset(dset))]
            # copy crap
            for file in files_to_copy:
                try:
                    shutil.copytree(file,tmpdir)
                except OSError as e:
                    shutil.copy(file,tmpdir)
                shutil.copy(file,tmpdir)

            copyback_files = [decon.prefix,decon.errts]
            with nl.run_in(tmpdir):
                if os.path.exists(decon.prefix):
                    os.remove(decon.prefix)

                # Create the blurred inputs (or load from cache)
                if cache and all([os.path.exists(os.path.join(cwd,blur_dset(dset))) for dset in decon.input_dsets]):
                    # Everything is already cached...
                    nl.notify('Using cache\'d blurred datasets')
                else:
                    # Need to make them from scratch
                    with nl.notify('Creating blurred datasets'):
                        old_errts = decon.errts
                        decon.errts = 'residual.nii.gz'
                        decon.prefix = os.path.basename(decon.prefix)
                        # Run once in place to get the residual dataset
                        decon.run()
                        running_reps = 0
                        for dset in decon.input_dsets:
                            info = nl.dset_info(dset)
                            residual_dset = nl.suffix(dset,'_residual')
                            nl.run(['3dbucket','-prefix',residual_dset,'%s[%d..%d]'%(decon.errts,running_reps,running_reps+info.reps-1)],products=residual_dset)
                            cmd = ['3dBlurToFWHM','-quiet','-input',dset,'-blurmaster',residual_dset,'-prefix',blur_dset(dset),'-FWHM',fwhm]
                            if decon.mask:
                                if decon.mask=='auto':
                                    cmd += ['-automask']
                                else:
                                    cmd += ['-mask',decon.mask]
                            nl.run(cmd,products=blur_dset(dset))
                            running_reps += info.reps
                            if cache:
                                copyback_files.append(blur_dset(dset))
                    decon.errts = old_errts
                decon.input_dsets = [blur_dset(dset) for dset in decon.input_dsets]
                for d in [decon.prefix,decon.errts]:
                    if os.path.exists(d):
                        try:
                            os.remove(d)
                        except:
                            pass
                decon.run()
                for copyfile in copyback_files:
                    if os.path.exists(copyfile):
                        shutil.copy(copyfile,cwd)
                    else:
                        nl.notify('Warning: deconvolve did not produce expected file %s' % decon.prefix,level=nl.level.warning)
        except:
            raise
        finally:
            shutil.rmtree(tmpdir,True)
Example #23
0
def auto_polort(dset):
    '''a copy of 3dDeconvolve's ``-polort A`` option'''
    info = nl.dset_info(dset)
    return 1 + round(info.reps / 150.0)
Example #24
0
def auto_polort(dset):
    '''a copy of 3dDeconvolve's ``-polort A`` option'''
    info = nl.dset_info(dset)
    return 1 + round(info.reps/150.0)
Example #25
0
def align_epi(anatomy,epis,suffix='_al',base=3,skull_strip=True):
    '''[[currently in progress]]: a simple replacement for the ``align_epi_anat.py`` script, because I've found it to be unreliable, in my usage'''
    for epi in epis:
        nl.tshift(epi,suffix='_tshift')
        nl.affine_align(nl.suffix(epi,'_tshift'),'%s[%d]'%(epis[0],base),skull_strip=False,epi=True,cost='crM',resample='wsinc5',grid_size=nl.dset_info(epi).voxel_size[0],suffix='_al')
    ss = [anatomy] if skull_strip else False
    nl.affine_align(anatomy,'%s[%d]'%(epis[0],base),skull_strip=ss,cost='lpa',grid_size=1,opts=['-interp','cubic'],suffix='_al-to-EPI')
Example #26
0
def create_censor_file(input_dset,
                       out_prefix=None,
                       fraction=0.1,
                       clip_to=0.1,
                       max_exclude=0.3,
                       motion_file=None,
                       motion_exclude=1.0):
    '''create a binary censor file using 3dToutcount

    :input_dset:        the input dataset
    :prefix:            output 1D file (default: ``prefix(input_dset)`` + ``.1D``)
    :fraction:          censor a timepoint if proportional of outliers in this
                        time point is greater than given value
    :clip_to:           keep the number of time points censored under this proportion
                        of total reps. If more time points would be censored,
                        it will only pick the top ``clip_to*reps`` points
    :max_exclude:       if more time points than the given proportion of reps are excluded for the
                        entire run, throw an exception -- something is probably wrong
    :motion_file:       optional filename of a "motion" file with multiple columns and rows corresponding to reps.
                        It doesn't really matter what the values are, as long as they are appropriate relative to ``motion_exclude``
    :motion_exclude:    Will exclude any reps that have a value greater than this in any column of ``motion_file``
    '''
    (outcount, perc_outliers) = nl.qc.outcount(input_dset, fraction)
    info = nl.dset_info(input_dset)
    binarize = lambda o, f: [oo < f for oo in o]
    perc_outliers = lambda o: 1. - (sum(o) / float(info.reps))

    if motion_file:
        with open(motion_file, 'Ur') as f:
            motion = [
                max([float(y) for y in x.strip().split()])
                for x in f.read().split('\n')
                if len(x.strip()) > 0 and x.strip()[0] != '#'
            ]
            motion_1D = [x for x in binarize(motion, motion_exclude)]
            if perc_outliers(motion_1D) > max_exclude:
                nl.notify(
                    'Error: Too many points excluded because of motion (%.2f) in dset %s'
                    % (perc_outliers(motion_1D), input_dset),
                    level=nl.level.error)
                return False
            outcount = [
                outcount[i] if motion_1D[i] else 1.
                for i in range(len(outcount))
            ]

    binary_outcount = binarize(outcount, fraction)

    if max_exclude and perc_outliers(binary_outcount) > max_exclude:
        nl.notify('Error: Found %.1f%% outliers in dset %s' %
                  (100 * perc_outliers(outcount), input_dset),
                  level=nl.level.error)
        return False
    if clip_to:
        while perc_outliers(binary_outcount) > clip_to:
            best_outlier = min([(outcount[i], i) for i in range(len(outcount))
                                if not binary_outcount[i]])
            binary_outcount[best_outlier[1]] = True
    if not out_prefix:
        out_prefix = nl.prefix(input_dset) + '.1D'
    with open(out_prefix, 'w') as f:
        f.write('\n'.join([str(int(x)) for x in binary_outcount]))
    return True
Example #27
0
def smooth_decon_to_fwhm(decon,fwhm,cache=True):
    '''takes an input :class:`Decon` object and uses ``3dBlurToFWHM`` to make the output as close as possible to ``fwhm``
    returns the final measured fwhm. If ``cache`` is ``True``, will save the blurred input file (and use it again in the future)'''
    if os.path.exists(decon.prefix):
        return
    blur_dset = lambda dset: nl.suffix(dset,'_smooth_to_%.2f' % fwhm)

    with nl.notify('Running smooth_decon_to_fwhm analysis (with %.2fmm blur)' % fwhm):
        tmpdir = tempfile.mkdtemp()
        try:
            cwd = os.getcwd()
            random_files = [re.sub(r'\[\d+\]$','',str(x)) for x in nl.flatten([x for x in decon.__dict__.values() if isinstance(x,basestring) or isinstance(x,list)]+[x.values() for x in decon.__dict__.values() if isinstance(x,dict)])]
            files_to_copy = [x for x in random_files if os.path.exists(x) and x[0]!='/']
            files_to_copy += [blur_dset(dset) for dset in decon.input_dsets if os.path.exists(blur_dset(dset))]
            # copy crap
            for file in files_to_copy:
                try:
                    shutil.copytree(file,tmpdir)
                except OSError as e:
                    shutil.copy(file,tmpdir)
                shutil.copy(file,tmpdir)

            copyback_files = [decon.prefix,decon.errts]
            with nl.run_in(tmpdir):
                if os.path.exists(decon.prefix):
                    os.remove(decon.prefix)

                # Create the blurred inputs (or load from cache)
                if cache and all([os.path.exists(os.path.join(cwd,blur_dset(dset))) for dset in decon.input_dsets]):
                    # Everything is already cached...
                    nl.notify('Using cache\'d blurred datasets')
                else:
                    # Need to make them from scratch
                    with nl.notify('Creating blurred datasets'):
                        old_errts = decon.errts
                        decon.errts = 'residual.nii.gz'
                        decon.prefix = os.path.basename(decon.prefix)
                        # Run once in place to get the residual dataset
                        decon.run()
                        running_reps = 0
                        for dset in decon.input_dsets:
                            info = nl.dset_info(dset)
                            residual_dset = nl.suffix(dset,'_residual')
                            nl.run(['3dbucket','-prefix',residual_dset,'%s[%d..%d]'%(decon.errts,running_reps,running_reps+info.reps-1)],products=residual_dset)
                            cmd = ['3dBlurToFWHM','-quiet','-input',dset,'-blurmaster',residual_dset,'-prefix',blur_dset(dset),'-FWHM',fwhm]
                            if decon.mask:
                                if decon.mask=='auto':
                                    cmd += ['-automask']
                                else:
                                    cmd += ['-mask',decon.mask]
                            nl.run(cmd,products=blur_dset(dset))
                            running_reps += info.reps
                            if cache:
                                copyback_files.append(blur_dset(dset))
                    decon.errts = old_errts
                decon.input_dsets = [blur_dset(dset) for dset in decon.input_dsets]
                for d in [decon.prefix,decon.errts]:
                    if os.path.exists(d):
                        try:
                            os.remove(d)
                        except:
                            pass
                decon.run()
                for copyfile in copyback_files:
                    if os.path.exists(copyfile):
                        shutil.copy(copyfile,cwd)
                    else:
                        nl.notify('Warning: deconvolve did not produce expected file %s' % decon.prefix,level=nl.level.warning)
        except:
            raise
        finally:
            shutil.rmtree(tmpdir,True)