def afni_copy(filename): ''' creates a ``+orig`` copy of the given dataset and returns the filename as a string ''' if nl.pkg_available('afni',True): afni_filename = "%s+orig" % nl.prefix(filename) if not os.path.exists(afni_filename + ".HEAD"): nl.calc(filename,'a',prefix=nl.prefix(filename)) return afni_filename
def afni_copy(filename): ''' creates a ``+orig`` copy of the given dataset and returns the filename as a string ''' if nl.pkg_available('afni', True): afni_filename = "%s+orig" % nl.prefix(filename) if not os.path.exists(afni_filename + ".HEAD"): nl.calc(filename, 'a', prefix=nl.prefix(filename)) return afni_filename
def align_epi_anat(anatomy, epi_dsets, skull_strip_anat=True): ''' aligns epis to anatomy using ``align_epi_anat.py`` script :epi_dsets: can be either a string or list of strings of the epi child datasets :skull_strip_anat: if ``True``, ``anatomy`` will be skull-stripped using the default method The default output suffix is "_al" ''' if isinstance(epi_dsets, basestring): epi_dsets = [epi_dsets] if len(epi_dsets) == 0: nl.notify('Warning: no epi alignment datasets given for anatomy %s!' % anatomy, level=nl.level.warning) return if all(os.path.exists(nl.suffix(x, '_al')) for x in epi_dsets): return anatomy_use = anatomy if skull_strip_anat: nl.skull_strip(anatomy, '_ns') anatomy_use = nl.suffix(anatomy, '_ns') inputs = [anatomy_use] + epi_dsets dset_products = lambda dset: [ nl.suffix(dset, '_al'), nl.prefix(dset) + '_al_mat.aff12.1D', nl.prefix(dset) + '_tsh_vr_motion.1D' ] products = nl.flatten([dset_products(dset) for dset in epi_dsets]) with nl.run_in_tmp(inputs, products): if nl.is_nifti(anatomy_use): anatomy_use = nl.afni_copy(anatomy_use) epi_dsets_use = [] for dset in epi_dsets: if nl.is_nifti(dset): epi_dsets_use.append(nl.afni_copy(dset)) else: epi_dsets_use.append(dset) cmd = [ "align_epi_anat.py", "-epi2anat", "-anat_has_skull", "no", "-epi_strip", "3dAutomask", "-anat", anatomy_use, "-epi_base", "5", "-epi", epi_dsets_use[0] ] if len(epi_dsets_use) > 1: cmd += ['-child_epi'] + epi_dsets_use[1:] out = nl.run(cmd) for dset in epi_dsets: if nl.is_nifti(dset): dset_nifti = nl.nifti_copy(nl.prefix(dset) + '_al+orig') if dset_nifti and os.path.exists( dset_nifti) and dset_nifti.endswith( '.nii') and dset.endswith('.gz'): nl.run(['gzip', dset_nifti])
def affine_align(dset_from,dset_to,skull_strip=True,mask=None,suffix='_aff',prefix=None,cost=None,epi=False,resample='wsinc5',grid_size=None,opts=[]): ''' interface to 3dAllineate to align anatomies and EPIs ''' dset_ss = lambda dset: os.path.split(nl.suffix(dset,'_ns'))[1] def dset_source(dset): if skull_strip==True or skull_strip==dset: return dset_ss(dset) else: return dset dset_affine = prefix if dset_affine==None: dset_affine = os.path.split(nl.suffix(dset_from,suffix))[1] dset_affine_mat_1D = nl.prefix(dset_affine) + '_matrix.1D' dset_affine_par_1D = nl.prefix(dset_affine) + '_params.1D' if os.path.exists(dset_affine): # final product already exists return for dset in [dset_from,dset_to]: if skull_strip==True or skull_strip==dset: nl.skull_strip(dset,'_ns') mask_use = mask if mask: # the mask was probably made in the space of the original dset_to anatomy, # which has now been cropped from the skull stripping. So the lesion mask # needs to be resampled to match the corresponding mask if skull_strip==True or skull_strip==dset_to: nl.run(['3dresample','-master',dset_u(dset_ss(dset)),'-inset',mask,'-prefix',nl.suffix(mask,'_resam')],products=nl.suffix(mask,'_resam')) mask_use = nl.suffix(mask,'_resam') all_cmd = [ '3dAllineate', '-prefix', dset_affine, '-base', dset_source(dset_to), '-source', dset_source(dset_from), '-source_automask', '-1Dmatrix_save', dset_affine_mat_1D, '-1Dparam_save',dset_affine_par_1D, '-autoweight', '-final',resample, '-cmass' ] + opts if grid_size: all_cmd += ['-newgrid',grid_size] if cost: all_cmd += ['-cost',cost] if epi: all_cmd += ['-EPI'] if mask: all_cmd += ['-emask', mask_use] nl.run(all_cmd,products=dset_affine)
def align_epi_anat(anatomy,epi_dsets,skull_strip_anat=True): ''' aligns epis to anatomy using ``align_epi_anat.py`` script :epi_dsets: can be either a string or list of strings of the epi child datasets :skull_strip_anat: if ``True``, ``anatomy`` will be skull-stripped using the default method The default output suffix is "_al" ''' if isinstance(epi_dsets,basestring): epi_dsets = [epi_dsets] if len(epi_dsets)==0: nl.notify('Warning: no epi alignment datasets given for anatomy %s!' % anatomy,level=nl.level.warning) return if all(os.path.exists(nl.suffix(x,'_al')) for x in epi_dsets): return anatomy_use = anatomy if skull_strip_anat: nl.skull_strip(anatomy,'_ns') anatomy_use = nl.suffix(anatomy,'_ns') inputs = [anatomy_use] + epi_dsets dset_products = lambda dset: [nl.suffix(dset,'_al'), nl.prefix(dset)+'_al_mat.aff12.1D', nl.prefix(dset)+'_tsh_vr_motion.1D'] products = nl.flatten([dset_products(dset) for dset in epi_dsets]) with nl.run_in_tmp(inputs,products): if nl.is_nifti(anatomy_use): anatomy_use = nl.afni_copy(anatomy_use) epi_dsets_use = [] for dset in epi_dsets: if nl.is_nifti(dset): epi_dsets_use.append(nl.afni_copy(dset)) else: epi_dsets_use.append(dset) cmd = ["align_epi_anat.py", "-epi2anat", "-anat_has_skull", "no", "-epi_strip", "3dAutomask","-anat", anatomy_use, "-epi_base", "5", "-epi", epi_dsets_use[0]] if len(epi_dsets_use)>1: cmd += ['-child_epi'] + epi_dsets_use[1:] out = nl.run(cmd) for dset in epi_dsets: if nl.is_nifti(dset): dset_nifti = nl.nifti_copy(nl.prefix(dset)+'_al+orig') if dset_nifti and os.path.exists(dset_nifti) and dset_nifti.endswith('.nii') and dset.endswith('.gz'): nl.run(['gzip',dset_nifti])
def mgz_to_nifti(filename,prefix=None,gzip=True): '''Convert ``filename`` to a NIFTI file using ``mri_convert``''' setup_freesurfer() if prefix==None: prefix = nl.prefix(filename) + '.nii' if gzip and not prefix.endswith('.gz'): prefix += '.gz' nl.run([os.path.join(freesurfer_home,'bin','mri_convert'),filename,prefix],products=prefix)
def mgz_to_nifti(filename, prefix=None, gzip=True): '''Convert ``filename`` to a NIFTI file using ``mri_convert``''' setup_freesurfer() if prefix == None: prefix = nl.prefix(filename) + '.nii' if gzip and not prefix.endswith('.gz'): prefix += '.gz' nl.run([ os.path.join(freesurfer_home, 'bin', 'mri_convert'), filename, prefix ], products=prefix)
def volreg(dset,suffix='_volreg',base=3,tshift=3,dfile_suffix='_volreg.1D'): '''simple interface to 3dvolreg :suffix: suffix to add to ``dset`` for volreg'ed file :base: either a number or ``dset[#]`` of the base image to register to :tshift: if a number, then tshift ignoring that many images, if ``None`` then don't tshift :dfile_suffix: suffix to add to ``dset`` to save the motion parameters to ''' cmd = ['3dvolreg','-prefix',nl.suffix(dset,suffix),'-base',base,'-dfile',nl.prefix(dset)+dfile_suffix] if tshift: cmd += ['-tshift',tshift] cmd += [dset] nl.run(cmd,products=nl.suffix(dset,suffix))
def qwarp_epi(dset, align_subbrick=5, suffix='_qwal', prefix=None): '''aligns an EPI time-series using 3dQwarp Very expensive and not efficient at all, but it can produce pretty impressive alignment for EPI time-series with significant distortions due to motion''' info = nl.dset_info(dset) if info == None: nl.notify('Error reading dataset "%s"' % (dset), level=nl.level.error) return False if prefix == None: prefix = nl.suffix(dset, suffix) dset_sub = lambda x: '_tmp_qwarp_epi-%s_%d.nii.gz' % (nl.prefix(dset), x) try: align_dset = nl.suffix(dset_sub(align_subbrick), '_warp') nl.calc('%s[%d]' % (dset, align_subbrick), expr='a', prefix=align_dset, datum='float') for i in xrange(info.reps): if i != align_subbrick: nl.calc('%s[%d]' % (dset, i), expr='a', prefix=dset_sub(i), datum='float') nl.run([ '3dQwarp', '-nowarp', '-workhard', '-superhard', '-minpatch', '9', '-blur', '0', '-pear', '-nopenalty', '-base', align_dset, '-source', dset_sub(i), '-prefix', nl.suffix(dset_sub(i), '_warp') ], quiet=True) cmd = ['3dTcat', '-prefix', prefix] if info.TR: cmd += ['-tr', info.TR] if info.slice_timing: cmd += ['-tpattern', info.slice_timing] cmd += [nl.suffix(dset_sub(i), '_warp') for i in xrange(info.reps)] nl.run(cmd, quiet=True) except Exception as e: raise e finally: for i in xrange(info.reps): for suffix in ['', 'warp']: try: os.remove(nl.suffix(dset_sub(i), suffix)) except: pass
def create_censor_file(input_dset,out_prefix=None,fraction=0.1,clip_to=0.1,max_exclude=0.3,motion_file=None,motion_exclude=1.0): '''create a binary censor file using 3dToutcount :input_dset: the input dataset :prefix: output 1D file (default: ``prefix(input_dset)`` + ``.1D``) :fraction: censor a timepoint if proportional of outliers in this time point is greater than given value :clip_to: keep the number of time points censored under this proportion of total reps. If more time points would be censored, it will only pick the top ``clip_to*reps`` points :max_exclude: if more time points than the given proportion of reps are excluded for the entire run, throw an exception -- something is probably wrong :motion_file: optional filename of a "motion" file with multiple columns and rows corresponding to reps. It doesn't really matter what the values are, as long as they are appropriate relative to ``motion_exclude`` :motion_exclude: Will exclude any reps that have a value greater than this in any column of ``motion_file`` ''' (outcount,perc_outliers) = nl.qc.outcount(input_dset,fraction) info = nl.dset_info(input_dset) binarize = lambda o,f: [oo<f for oo in o] perc_outliers = lambda o: 1.-(sum(o)/float(info.reps)) if motion_file: with open(motion_file,'Ur') as f: motion = [max([float(y) for y in x.strip().split()]) for x in f.read().split('\n') if len(x.strip())>0 and x.strip()[0]!='#'] motion_1D = [x for x in binarize(motion,motion_exclude)] if perc_outliers(motion_1D) > max_exclude: nl.notify('Error: Too many points excluded because of motion (%.2f) in dset %s' % (perc_outliers(motion_1D),input_dset),level=nl.level.error) return False outcount = [outcount[i] if motion_1D[i] else 1. for i in range(len(outcount))] binary_outcount = binarize(outcount,fraction) if max_exclude and perc_outliers(binary_outcount) > max_exclude: nl.notify('Error: Found %.1f%% outliers in dset %s' % (100*perc_outliers(outcount),input_dset),level=nl.level.error) return False if clip_to: while perc_outliers(binary_outcount) > clip_to: best_outlier = min([(outcount[i],i) for i in range(len(outcount)) if not binary_outcount[i]]) binary_outcount[best_outlier[1]] = True if not out_prefix: out_prefix = nl.prefix(input_dset) + '.1D' with open(out_prefix,'w') as f: f.write('\n'.join([str(int(x)) for x in binary_outcount])) return True
def qwarp_epi(dset,align_subbrick=5,suffix='_qwal',prefix=None): '''aligns an EPI time-series using 3dQwarp Very expensive and not efficient at all, but it can produce pretty impressive alignment for EPI time-series with significant distortions due to motion''' info = nl.dset_info(dset) if info==None: nl.notify('Error reading dataset "%s"' % (dset),level=nl.level.error) return False if prefix==None: prefix = nl.suffix(dset,suffix) dset_sub = lambda x: '_tmp_qwarp_epi-%s_%d.nii.gz' % (nl.prefix(dset),x) try: align_dset = nl.suffix(dset_sub(align_subbrick),'_warp') nl.calc('%s[%d]' % (dset,align_subbrick),expr='a',prefix=align_dset,datum='float') for i in xrange(info.reps): if i != align_subbrick: nl.calc('%s[%d]' % (dset,i),expr='a',prefix=dset_sub(i),datum='float') nl.run([ '3dQwarp', '-nowarp', '-workhard', '-superhard', '-minpatch', '9', '-blur', '0', '-pear', '-nopenalty', '-base', align_dset, '-source', dset_sub(i), '-prefix', nl.suffix(dset_sub(i),'_warp') ],quiet=True) cmd = ['3dTcat','-prefix',prefix] if info.TR: cmd += ['-tr',info.TR] if info.slice_timing: cmd += ['-tpattern',info.slice_timing] cmd += [nl.suffix(dset_sub(i),'_warp') for i in xrange(info.reps)] nl.run(cmd,quiet=True) except Exception as e: raise e finally: for i in xrange(info.reps): for suffix in ['','warp']: try: os.remove(nl.suffix(dset_sub(i),suffix)) except: pass
def volreg(dset, suffix='_volreg', base=3, tshift=3, dfile_suffix='_volreg.1D'): '''simple interface to 3dvolreg :suffix: suffix to add to ``dset`` for volreg'ed file :base: either a number or ``dset[#]`` of the base image to register to :tshift: if a number, then tshift ignoring that many images, if ``None`` then don't tshift :dfile_suffix: suffix to add to ``dset`` to save the motion parameters to ''' cmd = [ '3dvolreg', '-prefix', nl.suffix(dset, suffix), '-base', base, '-dfile', nl.prefix(dset) + dfile_suffix ] if tshift: cmd += ['-tshift', tshift] cmd += [dset] nl.run(cmd, products=nl.suffix(dset, suffix))
def _create_dset_dicom(directory,slice_order='alt+z',sort_order=None,force_slices=None): tags = { 'num_rows': (0x0028,0x0010), 'num_reps': (0x0020,0x0105), 'num_frames': (0x0028,0x0008), 'acq_time': (0x0008,0x0032), 'siemens_slices': (0x0019, 0x100a), 'TR': (0x0018,0x0080) } with nl.notify('Trying to create datasets from %s' % directory): directory = os.path.abspath(directory) if not os.path.exists(directory): nl.notify('Error: could not find %s' % directory,level=nl.level.error) return False out_file = '%s.nii.gz' % nl.prefix(os.path.basename(directory)) if os.path.exists(out_file): nl.notify('Error: file "%s" already exists!' % out_file,level=nl.level.error) return False cwd = os.getcwd() sorted_dir = tempfile.mkdtemp() try: with nl.run_in(sorted_dir): file_list = glob.glob(directory + '/*') num_reps = None new_file_list = [] for f in file_list: try: if len(info_for_tags(f,tags['num_rows']))>0: # Only include DICOMs that actually have image information new_file_list.append(f) except: pass file_list = new_file_list if len(file_list)==0: nl.notify('Error: Couldn\'t find any valid DICOM images',level=nl.level.error) return False with open('file_list.txt','w') as f: f.write('\n'.join(file_list)) try: subprocess.check_output([ 'Dimon', '-infile_list','file_list.txt', '-dicom_org', '-save_details','details', '-max_images','100000', '-fast','-no_wait', '-quit'],stderr=subprocess.STDOUT) except subprocess.CalledProcessError, e: nl.notify('Warning: Dimon returned an error while sorting images',level=nl.level.warning) else: if os.path.exists('details.2.final_list.txt'): with open('details.2.final_list.txt') as f: details = [x.strip().split() for x in f.readlines() if x[0]!='#'] file_list = [x[0] for x in details] else: nl.notify('Warning: Dimon didn\'t return expected output, unable to sort images',level=nl.level.warning)
def affine_align(dset_from, dset_to, skull_strip=True, mask=None, suffix='_aff', prefix=None, cost=None, epi=False, resample='wsinc5', grid_size=None, opts=[]): ''' interface to 3dAllineate to align anatomies and EPIs ''' dset_ss = lambda dset: os.path.split(nl.suffix(dset, '_ns'))[1] def dset_source(dset): if skull_strip == True or skull_strip == dset: return dset_ss(dset) else: return dset dset_affine = prefix if dset_affine == None: dset_affine = os.path.split(nl.suffix(dset_from, suffix))[1] dset_affine_mat_1D = nl.prefix(dset_affine) + '_matrix.1D' dset_affine_par_1D = nl.prefix(dset_affine) + '_params.1D' if os.path.exists(dset_affine): # final product already exists return for dset in [dset_from, dset_to]: if skull_strip == True or skull_strip == dset: nl.skull_strip(dset, '_ns') mask_use = mask if mask: # the mask was probably made in the space of the original dset_to anatomy, # which has now been cropped from the skull stripping. So the lesion mask # needs to be resampled to match the corresponding mask if skull_strip == True or skull_strip == dset_to: nl.run([ '3dresample', '-master', dset_u(dset_ss(dset)), '-inset', mask, '-prefix', nl.suffix(mask, '_resam') ], products=nl.suffix(mask, '_resam')) mask_use = nl.suffix(mask, '_resam') all_cmd = [ '3dAllineate', '-prefix', dset_affine, '-base', dset_source(dset_to), '-source', dset_source(dset_from), '-1Dmatrix_save', dset_affine_mat_1D, '-1Dparam_save', dset_affine_par_1D, '-autoweight', '-final', resample, '-cmass' ] + opts if grid_size: all_cmd += ['-newgrid', grid_size] if cost: all_cmd += ['-cost', cost] if epi: all_cmd += ['-EPI'] if mask: all_cmd += ['-emask', mask_use] nl.run(all_cmd, products=dset_affine)
def qwarp_align(dset_from,dset_to,skull_strip=True,mask=None,affine_suffix='_aff',suffix='_qwarp',prefix=None): '''aligns ``dset_from`` to ``dset_to`` using 3dQwarp Will run ``3dSkullStrip`` (unless ``skull_strip`` is ``False``), ``3dUnifize``, ``3dAllineate``, and then ``3dQwarp``. This method will add suffixes to the input dataset for the intermediate files (e.g., ``_ss``, ``_u``). If those files already exist, it will assume they were intelligently named, and use them as is :skull_strip: If True/False, turns skull-stripping of both datasets on/off. If a string matching ``dset_from`` or ``dset_to``, will only skull-strip the given dataset :mask: Applies the given mask to the alignment. Because of the nature of the alignment algorithms, the mask is **always** applied to the ``dset_to``. If this isn't what you want, you need to reverse the transform and re-apply it (e.g., using :meth:`qwarp_invert` and :meth:`qwarp_apply`). If the ``dset_to`` dataset is skull-stripped, the mask will also be resampled to match the ``dset_to`` grid. :affine_suffix: Suffix applied to ``dset_from`` to name the new dataset, as well as the ``.1D`` file. :suffix: Suffix applied to the final ``dset_from`` dataset. An additional file with the additional suffix ``_WARP`` will be created containing the parameters (e.g., with the default ``_qwarp`` suffix, the parameters will be in a file with the suffix ``_qwarp_WARP``) :prefix: Alternatively to ``suffix``, explicitly give the full output filename The output affine dataset and 1D, as well as the output of qwarp are named by adding the given suffixes (``affine_suffix`` and ``qwarp_suffix``) to the ``dset_from`` file If ``skull_strip`` is a string instead of ``True``/``False``, it will only skull strip the given dataset instead of both of them # TODO: currently does not work with +tlrc datasets because the filenames get mangled ''' dset_ss = lambda dset: os.path.split(nl.suffix(dset,'_ns'))[1] dset_u = lambda dset: os.path.split(nl.suffix(dset,'_u'))[1] def dset_source(dset): if skull_strip==True or skull_strip==dset: return dset_ss(dset) else: return dset dset_affine = os.path.split(nl.suffix(dset_from,affine_suffix))[1] dset_affine_1D = nl.prefix(dset_affine) + '.1D' dset_qwarp = prefix if dset_qwarp==None: dset_qwarp = os.path.split(nl.suffix(dset_from,suffix))[1] if os.path.exists(dset_qwarp): # final product already exists return affine_align(dset_from,dset_to,skull_strip,mask,affine_suffix) for dset in [dset_from,dset_to]: nl.run([ '3dUnifize', '-prefix', dset_u(dset_source(dset)), '-input', dset_source(dset) ],products=[dset_u(dset_source(dset))]) mask_use = mask if mask: # the mask was probably made in the space of the original dset_to anatomy, # which has now been cropped from the skull stripping. So the lesion mask # needs to be resampled to match the corresponding mask if skull_strip==True or skull_strip==dset_to: nl.run(['3dresample','-master',dset_u(dset_ss(dset)),'-inset',mask,'-prefix',nl.suffix(mask,'_resam')],products=nl.suffix(mask,'_resam')) mask_use = nl.suffix(mask,'_resam') warp_cmd = [ '3dQwarp', '-prefix', dset_qwarp, '-duplo', '-useweight', '-blur', '0', '3', '-iwarp', '-base', dset_u(dset_source(dset_to)), '-source', dset_affine ] if mask: warp_cmd += ['-emask', mask_use] nl.run(warp_cmd,products=dset_qwarp)
def qwarp_align(dset_from, dset_to, skull_strip=True, mask=None, affine_suffix='_aff', suffix='_qwarp', prefix=None): '''aligns ``dset_from`` to ``dset_to`` using 3dQwarp Will run ``3dSkullStrip`` (unless ``skull_strip`` is ``False``), ``3dUnifize``, ``3dAllineate``, and then ``3dQwarp``. This method will add suffixes to the input dataset for the intermediate files (e.g., ``_ss``, ``_u``). If those files already exist, it will assume they were intelligently named, and use them as is :skull_strip: If True/False, turns skull-stripping of both datasets on/off. If a string matching ``dset_from`` or ``dset_to``, will only skull-strip the given dataset :mask: Applies the given mask to the alignment. Because of the nature of the alignment algorithms, the mask is **always** applied to the ``dset_to``. If this isn't what you want, you need to reverse the transform and re-apply it (e.g., using :meth:`qwarp_invert` and :meth:`qwarp_apply`). If the ``dset_to`` dataset is skull-stripped, the mask will also be resampled to match the ``dset_to`` grid. :affine_suffix: Suffix applied to ``dset_from`` to name the new dataset, as well as the ``.1D`` file. :suffix: Suffix applied to the final ``dset_from`` dataset. An additional file with the additional suffix ``_WARP`` will be created containing the parameters (e.g., with the default ``_qwarp`` suffix, the parameters will be in a file with the suffix ``_qwarp_WARP``) :prefix: Alternatively to ``suffix``, explicitly give the full output filename The output affine dataset and 1D, as well as the output of qwarp are named by adding the given suffixes (``affine_suffix`` and ``qwarp_suffix``) to the ``dset_from`` file If ``skull_strip`` is a string instead of ``True``/``False``, it will only skull strip the given dataset instead of both of them # TODO: currently does not work with +tlrc datasets because the filenames get mangled ''' dset_ss = lambda dset: os.path.split(nl.suffix(dset, '_ns'))[1] dset_u = lambda dset: os.path.split(nl.suffix(dset, '_u'))[1] def dset_source(dset): if skull_strip == True or skull_strip == dset: return dset_ss(dset) else: return dset dset_affine = os.path.split(nl.suffix(dset_from, affine_suffix))[1] dset_affine_1D = nl.prefix(dset_affine) + '.1D' dset_qwarp = prefix if dset_qwarp == None: dset_qwarp = os.path.split(nl.suffix(dset_from, suffix))[1] if os.path.exists(dset_qwarp): # final product already exists return affine_align(dset_from, dset_to, skull_strip, mask, affine_suffix) for dset in [dset_from, dset_to]: nl.run([ '3dUnifize', '-prefix', dset_u(dset_source(dset)), '-input', dset_source(dset) ], products=[dset_u(dset_source(dset))]) mask_use = mask if mask: # the mask was probably made in the space of the original dset_to anatomy, # which has now been cropped from the skull stripping. So the lesion mask # needs to be resampled to match the corresponding mask if skull_strip == True or skull_strip == dset_to: nl.run([ '3dresample', '-master', dset_u(dset_ss(dset)), '-inset', mask, '-prefix', nl.suffix(mask, '_resam') ], products=nl.suffix(mask, '_resam')) mask_use = nl.suffix(mask, '_resam') warp_cmd = [ '3dQwarp', '-prefix', dset_qwarp, '-duplo', '-useweight', '-blur', '0', '3', '-iwarp', '-base', dset_u(dset_source(dset_to)), '-source', dset_affine ] if mask: warp_cmd += ['-emask', mask_use] nl.run(warp_cmd, products=dset_qwarp)
def _create_dset_dicom(directory, slice_order='alt+z', sort_order=None, force_slices=None): tags = { 'num_rows': (0x0028, 0x0010), 'num_reps': (0x0020, 0x0105), 'num_frames': (0x0028, 0x0008), 'acq_time': (0x0008, 0x0032), 'siemens_slices': (0x0019, 0x100a), 'TR': (0x0018, 0x0080) } with nl.notify('Trying to create datasets from %s' % directory): directory = os.path.abspath(directory) if not os.path.exists(directory): nl.notify('Error: could not find %s' % directory, level=nl.level.error) return False out_file = '%s.nii.gz' % nl.prefix(os.path.basename(directory)) if os.path.exists(out_file): nl.notify('Error: file "%s" already exists!' % out_file, level=nl.level.error) return False cwd = os.getcwd() sorted_dir = tempfile.mkdtemp() try: with nl.run_in(sorted_dir): file_list = glob.glob(directory + '/*') num_reps = None new_file_list = [] for f in file_list: try: if len(info_for_tags(f, tags['num_rows'])) > 0: # Only include DICOMs that actually have image information new_file_list.append(f) except: pass file_list = new_file_list if len(file_list) == 0: nl.notify('Error: Couldn\'t find any valid DICOM images', level=nl.level.error) return False with open('file_list.txt', 'w') as f: f.write('\n'.join(file_list)) try: subprocess.check_output([ 'Dimon', '-infile_list', 'file_list.txt', '-dicom_org', '-save_details', 'details', '-max_images', '100000', '-fast', '-no_wait', '-quit' ], stderr=subprocess.STDOUT) except subprocess.CalledProcessError, e: nl.notify( 'Warning: Dimon returned an error while sorting images', level=nl.level.warning) else: if os.path.exists('details.2.final_list.txt'): with open('details.2.final_list.txt') as f: details = [ x.strip().split() for x in f.readlines() if x[0] != '#' ] file_list = [x[0] for x in details] else: nl.notify( 'Warning: Dimon didn\'t return expected output, unable to sort images', level=nl.level.warning)
def create_censor_file(input_dset, out_prefix=None, fraction=0.1, clip_to=0.1, max_exclude=0.3, motion_file=None, motion_exclude=1.0): '''create a binary censor file using 3dToutcount :input_dset: the input dataset :prefix: output 1D file (default: ``prefix(input_dset)`` + ``.1D``) :fraction: censor a timepoint if proportional of outliers in this time point is greater than given value :clip_to: keep the number of time points censored under this proportion of total reps. If more time points would be censored, it will only pick the top ``clip_to*reps`` points :max_exclude: if more time points than the given proportion of reps are excluded for the entire run, throw an exception -- something is probably wrong :motion_file: optional filename of a "motion" file with multiple columns and rows corresponding to reps. It doesn't really matter what the values are, as long as they are appropriate relative to ``motion_exclude`` :motion_exclude: Will exclude any reps that have a value greater than this in any column of ``motion_file`` ''' (outcount, perc_outliers) = nl.qc.outcount(input_dset, fraction) info = nl.dset_info(input_dset) binarize = lambda o, f: [oo < f for oo in o] perc_outliers = lambda o: 1. - (sum(o) / float(info.reps)) if motion_file: with open(motion_file, 'Ur') as f: motion = [ max([float(y) for y in x.strip().split()]) for x in f.read().split('\n') if len(x.strip()) > 0 and x.strip()[0] != '#' ] motion_1D = [x for x in binarize(motion, motion_exclude)] if perc_outliers(motion_1D) > max_exclude: nl.notify( 'Error: Too many points excluded because of motion (%.2f) in dset %s' % (perc_outliers(motion_1D), input_dset), level=nl.level.error) return False outcount = [ outcount[i] if motion_1D[i] else 1. for i in range(len(outcount)) ] binary_outcount = binarize(outcount, fraction) if max_exclude and perc_outliers(binary_outcount) > max_exclude: nl.notify('Error: Found %.1f%% outliers in dset %s' % (100 * perc_outliers(outcount), input_dset), level=nl.level.error) return False if clip_to: while perc_outliers(binary_outcount) > clip_to: best_outlier = min([(outcount[i], i) for i in range(len(outcount)) if not binary_outcount[i]]) binary_outcount[best_outlier[1]] = True if not out_prefix: out_prefix = nl.prefix(input_dset) + '.1D' with open(out_prefix, 'w') as f: f.write('\n'.join([str(int(x)) for x in binary_outcount])) return True