def afni_copy(filename): ''' creates a ``+orig`` copy of the given dataset and returns the filename as a string ''' if nl.pkg_available('afni', True): afni_filename = "%s+orig" % nl.prefix(filename) if not os.path.exists(afni_filename + ".HEAD"): nl.calc(filename, 'a', prefix=nl.prefix(filename)) return afni_filename
def afni_copy(filename): ''' creates a ``+orig`` copy of the given dataset and returns the filename as a string ''' if nl.pkg_available('afni',True): afni_filename = "%s+orig" % nl.prefix(filename) if not os.path.exists(afni_filename + ".HEAD"): nl.calc(filename,'a',prefix=nl.prefix(filename)) return afni_filename
def skullstrip_template(dset, template, prefix=None, suffix=None, dilate=0): '''Takes the raw anatomy ``dset``, aligns it to a template brain, and applies a templated skullstrip. Should produce fairly reliable skullstrips as long as there is a decent amount of normal brain and the overall shape of the brain is normal-ish''' if suffix == None: suffix = '_sstemplate' if prefix == None: prefix = nl.suffix(dset, suffix) if not os.path.exists(prefix): with nl.notify('Running template-based skull-strip on %s' % dset): dset = os.path.abspath(dset) template = os.path.abspath(template) tmp_dir = tempfile.mkdtemp() cwd = os.getcwd() with nl.run_in(tmp_dir): nl.affine_align(template, dset, skull_strip=None, cost='mi', opts=['-nmatch', '100%']) nl.run([ '3dQwarp', '-minpatch', '20', '-penfac', '10', '-noweight', '-source', nl.suffix(template, '_aff'), '-base', dset, '-prefix', nl.suffix(template, '_qwarp') ], products=nl.suffix(template, '_qwarp')) info = nl.dset_info(nl.suffix(template, '_qwarp')) max_value = info.subbricks[0]['max'] nl.calc([dset, nl.suffix(template, '_qwarp')], 'a*step(b-%f*0.05)' % max_value, prefix) shutil.move(prefix, cwd) shutil.rmtree(tmp_dir)
def qwarp_epi(dset, align_subbrick=5, suffix='_qwal', prefix=None): '''aligns an EPI time-series using 3dQwarp Very expensive and not efficient at all, but it can produce pretty impressive alignment for EPI time-series with significant distortions due to motion''' info = nl.dset_info(dset) if info == None: nl.notify('Error reading dataset "%s"' % (dset), level=nl.level.error) return False if prefix == None: prefix = nl.suffix(dset, suffix) dset_sub = lambda x: '_tmp_qwarp_epi-%s_%d.nii.gz' % (nl.prefix(dset), x) try: align_dset = nl.suffix(dset_sub(align_subbrick), '_warp') nl.calc('%s[%d]' % (dset, align_subbrick), expr='a', prefix=align_dset, datum='float') for i in xrange(info.reps): if i != align_subbrick: nl.calc('%s[%d]' % (dset, i), expr='a', prefix=dset_sub(i), datum='float') nl.run([ '3dQwarp', '-nowarp', '-workhard', '-superhard', '-minpatch', '9', '-blur', '0', '-pear', '-nopenalty', '-base', align_dset, '-source', dset_sub(i), '-prefix', nl.suffix(dset_sub(i), '_warp') ], quiet=True) cmd = ['3dTcat', '-prefix', prefix] if info.TR: cmd += ['-tr', info.TR] if info.slice_timing: cmd += ['-tpattern', info.slice_timing] cmd += [nl.suffix(dset_sub(i), '_warp') for i in xrange(info.reps)] nl.run(cmd, quiet=True) except Exception as e: raise e finally: for i in xrange(info.reps): for suffix in ['', 'warp']: try: os.remove(nl.suffix(dset_sub(i), suffix)) except: pass
def roi_stats(mask,dset): out_dict = {} values = [{'Med': 'median', 'Min': 'min', 'Max': 'max', 'NZMean': 'nzmean', 'NZSum': 'nzsum', 'NZSigma': 'nzsigma', 'Mean': 'mean', 'Sigma': 'sigma', 'Mod': 'mode','NZcount':'nzvoxels'}, {'NZMod': 'nzmode', 'NZMed': 'nzmedian', 'NZMax': 'nzmax', 'NZMin': 'nzmin','Mean':'mean'}] options = [['-nzmean','-nzsum','-nzvoxels','-minmax','-sigma','-nzsigma','-median','-mode'], ['-nzminmax','-nzmedian','-nzmode']] if not nl.dset_grids_equal((mask,dset)): i = nl.dset_info(dset) grid_hash = '_' + '_'.join([str(x) for x in (i.voxel_size + i.voxel_dims)]) new_mask = nl.suffix(mask,grid_hash) if not os.path.exists(new_mask): nl.run(["3dfractionize","-template",dset,"-input",nl.calc(mask,"a",datum="short"),"-prefix",new_mask,"-preserve","-clip","0.2"]) mask = new_mask for i in xrange(len(values)): cmd = ['3dROIstats','-1Dformat','-nobriklab','-mask',mask] + options[i] + [dset] out = subprocess.check_output(cmd).split('\n') header = [(values[i][x.split('_')[0]],int(x.split('_')[1])) for x in out[1].split()[1:]] for j in xrange(len(out)/2-1): stats = [float(x) for x in out[(j+1)*2+1][1:].split()] for s in xrange(len(stats)): roi = header[s][1] stat_name = header[s][0] stat = stats[s] if roi not in out_dict: out_dict[roi] = {} out_dict[roi][stat_name] = stat return out_dict
def atlas_overlap(dset, atlas=None): '''aligns ``dset`` to the TT_N27 atlas and returns ``(cost,overlap)``''' atlas = find_atlas(atlas) if atlas == None: return None cost_func = 'crM' infile = os.path.abspath(dset) tmpdir = tempfile.mkdtemp() with nl.run_in(tmpdir): o = nl.run([ '3dAllineate', '-verb', '-base', atlas, '-source', infile + '[0]', '-NN', '-final', 'NN', '-cost', cost_func, '-nmatch', '20%', '-onepass', '-fineblur', '2', '-cmass', '-prefix', 'test.nii.gz' ]) m = re.search(r'Final\s+cost = ([\d.]+) ;', o.output) if m: cost = float(m.group(1)) o = nl.run(['3dmaskave', '-mask', atlas, '-q', 'test.nii.gz'], stderr=None) data_thresh = float(o.output) / 4 i = nl.dset_info('test.nii.gz') o = nl.run([ '3dmaskave', '-q', '-mask', 'SELF', '-sum', nl.calc([atlas, 'test.nii.gz'], 'equals(step(a-10),step(b-%.2f))' % data_thresh) ], stderr=None) overlap = 100 * float( o.output) / (i.voxel_dims[0] * i.voxel_dims[1] * i.voxel_dims[2]) try: shutil.rmtree(tmpdir) except: pass return (cost, overlap)
def qwarp_epi(dset,align_subbrick=5,suffix='_qwal',prefix=None): '''aligns an EPI time-series using 3dQwarp Very expensive and not efficient at all, but it can produce pretty impressive alignment for EPI time-series with significant distortions due to motion''' info = nl.dset_info(dset) if info==None: nl.notify('Error reading dataset "%s"' % (dset),level=nl.level.error) return False if prefix==None: prefix = nl.suffix(dset,suffix) dset_sub = lambda x: '_tmp_qwarp_epi-%s_%d.nii.gz' % (nl.prefix(dset),x) try: align_dset = nl.suffix(dset_sub(align_subbrick),'_warp') nl.calc('%s[%d]' % (dset,align_subbrick),expr='a',prefix=align_dset,datum='float') for i in xrange(info.reps): if i != align_subbrick: nl.calc('%s[%d]' % (dset,i),expr='a',prefix=dset_sub(i),datum='float') nl.run([ '3dQwarp', '-nowarp', '-workhard', '-superhard', '-minpatch', '9', '-blur', '0', '-pear', '-nopenalty', '-base', align_dset, '-source', dset_sub(i), '-prefix', nl.suffix(dset_sub(i),'_warp') ],quiet=True) cmd = ['3dTcat','-prefix',prefix] if info.TR: cmd += ['-tr',info.TR] if info.slice_timing: cmd += ['-tpattern',info.slice_timing] cmd += [nl.suffix(dset_sub(i),'_warp') for i in xrange(info.reps)] nl.run(cmd,quiet=True) except Exception as e: raise e finally: for i in xrange(info.reps): for suffix in ['','warp']: try: os.remove(nl.suffix(dset_sub(i),suffix)) except: pass
def temporal_snr(signal_dset, noise_dset, mask=None, prefix='temporal_snr.nii.gz'): '''Calculates temporal SNR by dividing average signal of ``signal_dset`` by SD of ``noise_dset``. ``signal_dset`` should be a dataset that contains the average signal value (i.e., nothing that has been detrended by removing the mean), and ``noise_dset`` should be a dataset that has all possible known signal fluctuations (e.g., task-related effects) removed from it (the residual dataset from a deconvolve works well)''' for d in [('mean', signal_dset), ('stdev', noise_dset)]: new_d = nl.suffix(d[1], '_%s' % d[0]) cmd = ['3dTstat', '-%s' % d[0], '-prefix', new_d] if mask: cmd += ['-mask', mask] cmd += [d[1]] nl.run(cmd, products=new_d) nl.calc([nl.suffix(signal_dset, '_mean'), nl.suffix(noise_dset, '_stdev')], 'a/b', prefix=prefix)
def voxel_count(dset, p=None, positive_only=False, mask=None, ROI=None): ''' returns the number of non-zero voxels :p: threshold the dataset at the given *p*-value, then count :positive_only: only count positive values :mask: count within the given mask :ROI: only use the ROI with the given value (or list of values) within the mask if ROI is 'all' then return the voxel count of each ROI as a dictionary ''' if p: dset = nl.thresh(dset, p, positive_only) else: if positive_only: dset = nl.calc(dset, 'step(a)') count = 0 devnull = open(os.devnull, "w") if mask: cmd = [ '3dROIstats', '-1Dformat', '-nomeanout', '-nobriklab', '-nzvoxels' ] cmd += ['-mask', str(mask), str(dset)] out = subprocess.check_output(cmd, stderr=devnull).split('\n') if len(out) < 4: return 0 rois = [ int(x.replace('NZcount_', '')) for x in out[1].strip()[1:].split() ] counts = [ int(x.replace('NZcount_', '')) for x in out[3].strip().split() ] count_dict = None if ROI == None: ROI = rois if ROI == 'all': count_dict = {} ROI = rois else: if not isinstance(ROI, list): ROI = [ROI] for r in ROI: if r in rois: roi_count = counts[rois.index(r)] if count_dict != None: count_dict[r] = roi_count else: count += roi_count else: cmd = ['3dBrickStat', '-slow', '-count', '-non-zero', str(dset)] count = int(subprocess.check_output(cmd, stderr=devnull).strip()) if count_dict: return count_dict return count
def skullstrip_template(dset,template,prefix=None,suffix=None,dilate=0): '''Takes the raw anatomy ``dset``, aligns it to a template brain, and applies a templated skullstrip. Should produce fairly reliable skullstrips as long as there is a decent amount of normal brain and the overall shape of the brain is normal-ish''' if suffix==None: suffix = '_sstemplate' if prefix==None: prefix = nl.suffix(dset,suffix) if not os.path.exists(prefix): with nl.notify('Running template-based skull-strip on %s' % dset): dset = os.path.abspath(dset) template = os.path.abspath(template) tmp_dir = tempfile.mkdtemp() cwd = os.getcwd() with nl.run_in(tmp_dir): nl.affine_align(template,dset,skull_strip=None,cost='mi',opts=['-nmatch','100%']) nl.run(['3dQwarp','-minpatch','20','-penfac','10','-noweight','-source',nl.suffix(template,'_aff'),'-base',dset,'-prefix',nl.suffix(template,'_qwarp')],products=nl.suffix(template,'_qwarp')) info = nl.dset_info(nl.suffix(template,'_qwarp')) max_value = info.subbricks[0]['max'] nl.calc([dset,nl.suffix(template,'_qwarp')],'a*step(b-%f*0.05)'%max_value,prefix) shutil.move(prefix,cwd) shutil.rmtree(tmp_dir)
def roi_stats(mask, dset): out_dict = {} values = [{ 'Med': 'median', 'Min': 'min', 'Max': 'max', 'NZMean': 'nzmean', 'NZSum': 'nzsum', 'NZSigma': 'nzsigma', 'Mean': 'mean', 'Sigma': 'sigma', 'Mod': 'mode', 'NZcount': 'nzvoxels' }, { 'NZMod': 'nzmode', 'NZMed': 'nzmedian', 'NZMax': 'nzmax', 'NZMin': 'nzmin', 'Mean': 'mean' }] options = [[ '-nzmean', '-nzsum', '-nzvoxels', '-minmax', '-sigma', '-nzsigma', '-median', '-mode' ], ['-nzminmax', '-nzmedian', '-nzmode']] if not nl.dset_grids_equal((mask, dset)): i = nl.dset_info(dset) grid_hash = '_' + '_'.join( [str(x) for x in (i.voxel_size + i.voxel_dims)]) new_mask = nl.suffix(mask, grid_hash) if not os.path.exists(new_mask): nl.run([ "3dfractionize", "-template", dset, "-input", nl.calc(mask, "a", datum="short"), "-prefix", new_mask, "-preserve", "-clip", "0.2" ]) mask = new_mask for i in xrange(len(values)): cmd = ['3dROIstats', '-1Dformat', '-nobriklab', '-mask', mask ] + options[i] + [dset] out = subprocess.check_output(cmd).split('\n') header = [(values[i][x.split('_')[0]], int(x.split('_')[1])) for x in out[1].split()[1:]] for j in xrange(len(out) / 2 - 1): stats = [float(x) for x in out[(j + 1) * 2 + 1][1:].split()] for s in xrange(len(stats)): roi = header[s][1] stat_name = header[s][0] stat = stats[s] if roi not in out_dict: out_dict[roi] = {} out_dict[roi][stat_name] = stat return out_dict
def voxel_count(dset,p=None,positive_only=False,mask=None,ROI=None): ''' returns the number of non-zero voxels :p: threshold the dataset at the given *p*-value, then count :positive_only: only count positive values :mask: count within the given mask :ROI: only use the ROI with the given value (or list of values) within the mask if ROI is 'all' then return the voxel count of each ROI as a dictionary ''' if p: dset = nl.thresh(dset,p,positive_only) else: if positive_only: dset = nl.calc(dset,'step(a)') count = 0 devnull = open(os.devnull,"w") if mask: cmd = ['3dROIstats','-1Dformat','-nomeanout','-nobriklab', '-nzvoxels'] cmd += ['-mask',str(mask),str(dset)] out = subprocess.check_output(cmd,stderr=devnull).split('\n') if len(out)<4: return 0 rois = [int(x.replace('NZcount_','')) for x in out[1].strip()[1:].split()] counts = [int(x.replace('NZcount_','')) for x in out[3].strip().split()] count_dict = None if ROI==None: ROI = rois if ROI=='all': count_dict = {} ROI = rois else: if not isinstance(ROI,list): ROI = [ROI] for r in ROI: if r in rois: roi_count = counts[rois.index(r)] if count_dict!=None: count_dict[r] = roi_count else: count += roi_count else: cmd = ['3dBrickStat', '-slow', '-count', '-non-zero', str(dset)] count = int(subprocess.check_output(cmd,stderr=devnull).strip()) if count_dict: return count_dict return count
def inside_brain(stat_dset, atlas=None, p=0.001): '''calculates the percentage of voxels above a statistical threshold inside a brain mask vs. outside it if ``atlas`` is ``None``, it will try to find ``TT_N27``''' atlas = find_atlas(atlas) if atlas == None: return None mask_dset = nl.suffix(stat_dset, '_atlasfrac') nl.run([ '3dfractionize', '-template', nl.strip_subbrick(stat_dset), '-input', nl.calc([atlas], '1+step(a-100)', datum='short'), '-preserve', '-clip', '0.2', '-prefix', mask_dset ], products=mask_dset, quiet=True, stderr=None) s = nl.roi_stats(mask_dset, nl.thresh(stat_dset, p)) return 100.0 * s[2]['nzvoxels'] / (s[1]['nzvoxels'] + s[2]['nzvoxels'])
def skull_strip(dset, suffix='_ns', prefix=None, unifize=True): ''' use bet to strip skull from given anatomy ''' # should add options to use betsurf and T1/T2 in the future # Since BET fails on weirdly distributed datasets, I added 3dUnifize in... I realize this makes this dependent on AFNI. Sorry, :) if prefix == None: prefix = nl.suffix(dset, suffix) unifize_dset = nl.suffix(dset, '_u') cmd = bet2 if bet2 else 'bet2' if unifize: info = nl.dset_info(dset) if info == None: nl.notify('Error: could not read info for dset %s' % dset, level=nl.level.error) return False cmd = os.path.join(fsl_dir, cmd) if fsl_dir else cmd cutoff_value = nl.max(dset) * 0.05 nl.run([ '3dUnifize', '-prefix', unifize_dset, nl.calc(dset, 'step(a-%f)*a' % cutoff_value) ], products=unifize_dset) else: unifize_dset = dset nl.run([cmd, unifize_dset, prefix, '-w', 0.5], products=prefix)