示例#1
0
def dset_info(dset):
    '''returns a :class:`DsetInfo` object containing the meta-data from ``dset``'''
    if nl.pkg_available('afni'):
        return _dset_info_afni(dset)
    nl.notify('Error: no packages available to get dset info',
              level=nl.level.error)
    return None
示例#2
0
def unpack_new_archives(pi):
    new_files = []
    with nl.notify('Scanning PI %s for new archives' % pi):
        for root, dirs, files in os.walk(os.path.join(import_location, pi)):
            for fname in files + dirs:
                full_file = os.path.join(root, fname)
                padre_dir = full_file.replace(import_location, "PADRE/Import")
                if padre_dir not in import_log:
                    # Add in a check for the modification date
                    if any(
                        [fnmatch.fnmatch(fname, m) for m in c.archive_masks]):
                        with nl.notify('Found new archive "%s"' % full_file):
                            subject_guess = os.path.basename(
                                os.path.dirname(full_file))
                            nl.notify('guessing the subject number is %s' %
                                      subject_guess)
                            dsets = import_archive(full_file, subject_guess)
                            new_files.append({
                                'dir':
                                full_file.lstrip(
                                    os.path.join(import_location,
                                                 pi)).lstrip('/'),
                                'subj':
                                subject_guess,
                                'fname':
                                fname,
                                'dsets':
                                dsets
                            })
    return new_files
示例#3
0
文件: afni.py 项目: azraq27/neural
def cdf(dset,p):
    info = nl.dset_info(dset)
    if info==None:
        nl.notify('Error: Could not get info for dset %s'%dset, level=nl.level.error)
        return None
    command = ['cdf','-p2t',info.subbricks[0]['stat'],str(p)] + info.subbricks[0]['params']
    return float(subprocess.check_output(command).split()[2])
示例#4
0
def rename(subject_id, new_subject_id):
    with commit_wrap():
        subj = p.load(subject_id)
        if subj:
            try:
                os.rename(p.subject_dir(subject_id),
                          p.subject_dir(new_subject_id))
            except OSError:
                nl.notify('Error: filesystem reported error moving %s to %s' %
                          (subject_id, new_subject_id),
                          level=nl.level.error)
            else:
                subj._subject_id = new_subject_id
                subj.save()
                if os.path.exists(p.subject_json(subj)):
                    try:
                        os.remove(
                            os.path.join(
                                p.subject_dir(subj),
                                os.path.basename(p.subject_json(subject_id))))
                    except OSError:
                        pass
                    try:
                        del (p.subject._all_subjects[str(subject_id)])
                    except KeyError:
                        pass
    p.subject._index_one_subject(new_subject_id)
示例#5
0
def max_diff(dset1, dset2):
    '''calculates maximal voxel-wise difference in datasets (in %)
    
    Useful for checking if datasets have the same data. For example, if the maximum difference is
    < 1.0%, they're probably the same dataset'''
    for dset in [dset1, dset2]:
        if not os.path.exists(dset):
            nl.notify('Error: Could not find file: %s' % dset,
                      level=nl.level.error)
            return float('inf')
    try:
        dset1_d = nib.load(dset1)
        dset2_d = nib.load(dset2)
        dset1_data = dset1_d.get_data()
        dset2_data = dset2_d.get_data()
    except IOError:
        nl.notify('Error: Could not read files %s and %s' % (dset1, dset2),
                  level=nl.level.error)
        return float('inf')
    try:
        old_err = np.seterr(divide='ignore', invalid='ignore')
        max_val = 100 * np.max(
            np.ma.masked_invalid(
                np.double(dset1_data - dset2_data) /
                ((dset1_data + dset2_data) / 2)))
        np.seterr(**old_err)
        return max_val
    except ValueError:
        return float('inf')
示例#6
0
文件: common.py 项目: azraq27/neural
def pkg_available(pkg_name,required=False):
    '''tests if analysis package is available on this machine (e.g., "afni" or "fsl"), and prints an error if ``required``'''
    if pkg_name in pkgs:
        return True
    if required:
        nl.notify('Error: could not find required analysis package %s' % pkg_name,level=nl.level.error)
    return False
示例#7
0
def align_epi_anat(anatomy, epi_dsets, skull_strip_anat=True):
    ''' aligns epis to anatomy using ``align_epi_anat.py`` script
    
    :epi_dsets:       can be either a string or list of strings of the epi child datasets
    :skull_strip_anat:     if ``True``, ``anatomy`` will be skull-stripped using the default method
    
    The default output suffix is "_al"
    '''

    if isinstance(epi_dsets, basestring):
        epi_dsets = [epi_dsets]

    if len(epi_dsets) == 0:
        nl.notify('Warning: no epi alignment datasets given for anatomy %s!' %
                  anatomy,
                  level=nl.level.warning)
        return

    if all(os.path.exists(nl.suffix(x, '_al')) for x in epi_dsets):
        return

    anatomy_use = anatomy

    if skull_strip_anat:
        nl.skull_strip(anatomy, '_ns')
        anatomy_use = nl.suffix(anatomy, '_ns')

    inputs = [anatomy_use] + epi_dsets
    dset_products = lambda dset: [
        nl.suffix(dset, '_al'),
        nl.prefix(dset) + '_al_mat.aff12.1D',
        nl.prefix(dset) + '_tsh_vr_motion.1D'
    ]
    products = nl.flatten([dset_products(dset) for dset in epi_dsets])
    with nl.run_in_tmp(inputs, products):
        if nl.is_nifti(anatomy_use):
            anatomy_use = nl.afni_copy(anatomy_use)
        epi_dsets_use = []
        for dset in epi_dsets:
            if nl.is_nifti(dset):
                epi_dsets_use.append(nl.afni_copy(dset))
            else:
                epi_dsets_use.append(dset)
        cmd = [
            "align_epi_anat.py", "-epi2anat", "-anat_has_skull", "no",
            "-epi_strip", "3dAutomask", "-anat", anatomy_use, "-epi_base", "5",
            "-epi", epi_dsets_use[0]
        ]
        if len(epi_dsets_use) > 1:
            cmd += ['-child_epi'] + epi_dsets_use[1:]
            out = nl.run(cmd)

        for dset in epi_dsets:
            if nl.is_nifti(dset):
                dset_nifti = nl.nifti_copy(nl.prefix(dset) + '_al+orig')
                if dset_nifti and os.path.exists(
                        dset_nifti) and dset_nifti.endswith(
                            '.nii') and dset.endswith('.gz'):
                    nl.run(['gzip', dset_nifti])
示例#8
0
def display(name):
    try:
        user = getpass.getuser()
    except:
        # Sometimes this seems to fail for no apparent reason
        user = "******"
    if compress(user) in faces:
        nl.notify(decompress(faces[compress(user)][name]))
示例#9
0
def pkg_available(pkg_name, required=False):
    '''tests if analysis package is available on this machine (e.g., "afni" or "fsl"), and prints an error if ``required``'''
    if pkg_name in pkgs:
        return True
    if required:
        nl.notify('Error: could not find required analysis package %s' %
                  pkg_name,
                  level=nl.level.error)
    return False
示例#10
0
def organize_dir(orig_dir):
    '''scans through the given directory and organizes DICOMs that look similar into subdirectories
    
    output directory is the ``orig_dir`` with ``-sorted`` appended to the end'''

    tags = [
        (0x10, 0x20),  # Subj ID
        (0x8, 0x21),  # Date
        (0x8, 0x31),  # Time
        (0x8, 0x103e)  # Descr
    ]
    orig_dir = orig_dir.rstrip('/')
    files = scan_dir(orig_dir, tags=tags, md5_hash=True)
    dups = find_dups(files)
    for dup in dups:
        nl.notify('Found duplicates of %s...' % dup[0])
        for each_dup in dup[1:]:
            nl.notify('\tdeleting %s' % each_dup)
            try:
                os.remove(each_dup)
            except IOError:
                nl.notify('\t[failed]')
            del (files[each_dup])

    clustered = cluster_files(files)
    output_dir = '%s-sorted' % orig_dir
    for key in clustered:
        if (0x8, 0x31) in clustered[key]['info']:
            clustered[key]['info'][(0x8, 0x31)] = str(
                int(float(clustered[key]['info'][(0x8, 0x31)])))
        for t in tags:
            if t not in clustered[key]['info']:
                clustered[key]['info'][t] = '_'
        run_name = '-'.join(
            [scrub_fname(str(clustered[key]['info'][x]))
             for x in tags]) + '-%d_images' % len(clustered[key]['files'])
        run_dir = os.path.join(output_dir, run_name)
        nl.notify('Moving files into %s' % run_dir)
        try:
            if not os.path.exists(run_dir):
                os.makedirs(run_dir)
        except IOError:
            nl.notify('Error: failed to create directory %s' % run_dir)
        else:
            for f in clustered[key]['files']:
                try:
                    dset_fname = os.path.split(f)[1]
                    if dset_fname[0] == '.':
                        dset_fname = '_' + dset_fname[1:]
                    os.rename(f, os.path.join(run_dir, dset_fname))
                except (IOError, OSError):
                    pass
    for r, ds, fs in os.walk(output_dir, topdown=False):
        for d in ds:
            dname = os.path.join(r, d)
            if len(os.listdir(dname)) == 0:
                os.remove(dname)
示例#11
0
def cdf(dset, p):
    info = nl.dset_info(dset)
    if info == None:
        nl.notify('Error: Could not get info for dset %s' % dset,
                  level=nl.level.error)
        return None
    command = ['cdf', '-p2t', info.subbricks[0]['stat'],
               str(p)] + info.subbricks[0]['params']
    return float(subprocess.check_output(command).split()[2])
示例#12
0
文件: common.py 项目: azraq27/neural
def available_method(method_name):
    '''ruturn the method for earliest package in ``pkg_preferences``, if package is available (based on :meth:`pkg_available`)'''
    pkg_prefs_copy = list(pkg_prefs)
    if method_name in method_prefs:
        pkg_prefs_copy = [method_prefs[method_name]] + pkg_prefs_copy
    for pkg in pkg_prefs_copy:
        if pkg in pkgs:
            if method_name in dir(pkgs[pkg]):
                return getattr(pkgs[pkg],method_name)
    nl.notify('Error: Could not find implementation of %s on this computer' % (method_name),level=nl.level.error)
示例#13
0
文件: maint.py 项目: azraq27/padre
def delete_subject(subject_id):
    if not os.path.exists(p.trash_dir):
        os.makedirs(p.trash_dir)
    new_dir = os.path.join(p.trash_dir,'%s-%s' % (subject_id,datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S').format()))
    while os.path.exists(new_dir):
        new_dir += '_1'
    try:
        shutil.move(p.subject_dir(subject_id),new_dir)
    except OSError,IOError:
        nl.notify('Error moving subject directory %s to the trash' % subject_id,level=nl.level.error)
示例#14
0
文件: dicom.py 项目: azraq27/neural
def organize_dir(orig_dir):
    '''scans through the given directory and organizes DICOMs that look similar into subdirectories

    output directory is the ``orig_dir`` with ``-sorted`` appended to the end'''

    tags = [
        (0x10,0x20),    # Subj ID
        (0x8,0x21),     # Date
        (0x8,0x31),     # Time
        (0x8,0x103e)    # Descr
    ]
    orig_dir = orig_dir.rstrip('/')
    files = scan_dir(orig_dir,tags=tags,md5_hash=True)
    dups = find_dups(files)
    for dup in dups:
        nl.notify('Found duplicates of %s...' % dup[0])
        for each_dup in dup[1:]:
            nl.notify('\tdeleting %s' % each_dup)
            try:
                os.remove(each_dup)
            except IOError:
                nl.notify('\t[failed]')
            del(files[each_dup])

    clustered = cluster_files(files)
    output_dir = '%s-sorted' % orig_dir
    for key in clustered:
        if (0x8,0x31) in clustered[key]['info']:
            clustered[key]['info'][(0x8,0x31)] = str(int(float(clustered[key]['info'][(0x8,0x31)])))
        for t in tags:
            if t not in clustered[key]['info']:
                clustered[key]['info'][t] = '_'
        run_name = '-'.join([scrub_fname(str(clustered[key]['info'][x])) for x in tags])+'-%d_images' %len(clustered[key]['files'])
        run_dir = os.path.join(output_dir,run_name)
        nl.notify('Moving files into %s' % run_dir)
        try:
            if not os.path.exists(run_dir):
                os.makedirs(run_dir)
        except IOError:
            nl.notify('Error: failed to create directory %s' % run_dir)
        else:
            for f in clustered[key]['files']:
                try:
                    dset_fname = os.path.split(f)[1]
                    if dset_fname[0]=='.':
                        dset_fname = '_' + dset_fname[1:]
                    os.rename(f,os.path.join(run_dir,dset_fname))
                except (IOError, OSError):
                    pass
    for r,ds,fs in os.walk(output_dir,topdown=False):
        for d in ds:
            dname = os.path.join(r,d)
            if len(os.listdir(dname))==0:
                os.remove(dname)
示例#15
0
def available_method(method_name):
    '''ruturn the method for earliest package in ``pkg_preferences``, if package is available (based on :meth:`pkg_available`)'''
    pkg_prefs_copy = list(pkg_prefs)
    if method_name in method_prefs:
        pkg_prefs_copy = [method_prefs[method_name]] + pkg_prefs_copy
    for pkg in pkg_prefs_copy:
        if pkg in pkgs:
            if method_name in dir(pkgs[pkg]):
                return getattr(pkgs[pkg], method_name)
    nl.notify('Error: Could not find implementation of %s on this computer' %
              (method_name),
              level=nl.level.error)
示例#16
0
def error(msg, miss=None):
    nl.notify(msg + '\n', level=nl.level.error)
    if miss:
        try:
            import urllib, urllib2
            data = {'miss': miss}
            data_enc = urllib.urlencode(data)
            urllib2.urlopen('http://wolflion.org/cgi-bin/report.py?%s' %
                            data_enc)
        except:
            pass
    sys.exit()
示例#17
0
文件: dsets.py 项目: azraq27/neural
def dset_copy(dset,to_dir):
    '''robust way to copy a dataset (including AFNI briks)'''
    if nl.is_afni(dset):
        dset_strip = re.sub(r'\.(HEAD|BRIK)?(\.(gz|bz))?','',dset)
        for dset_file in [dset_strip + '.HEAD'] + glob.glob(dset_strip + '.BRIK*'):
            if os.path.exists(dset_file):
                shutil.copy(dset_file,to_dir)
    else:
        if os.path.exists(dset):
            shutil.copy(dset,to_dir)
        else:
            nl.notify('Warning: couldn\'t find file %s to copy to %s' %(dset,to_dir),level=nl.level.warning)
示例#18
0
文件: decon.py 项目: azraq27/neural
 def time_in(a):
     first_number = r'^(\d+(\.\d+)?)'
     if isinstance(a,basestring):
         m = re.match(first_number,a)
         if m:
             a = m.group(1)
         else:
             nl.notify('Warning: cannot intepret a number from the stim_time: "%s"' % a,level=nl.level.warning)
             return False
     a = float(a)/self.TR
     if a>=start and (end==None or a<=end):
         return True
     return False
示例#19
0
 def time_in(a):
     first_number = r'^(\d+(\.\d+)?)'
     if isinstance(a,basestring):
         m = re.match(first_number,a)
         if m:
             a = m.group(1)
         else:
             nl.notify('Warning: cannot intepret a number from the stim_time: "%s"' % a,level=nl.level.warning)
             return False
     a = float(a)/self.TR
     if a>=start and (end==None or a<=end):
         return True
     return False
示例#20
0
文件: maint.py 项目: azraq27/padre
def merge(subject_id_from,subject_id_into):
    nl.notify('Trying to merge %s into %s' % (subject_id_from,subject_id_into))
    with commit_wrap():
        subj_from = p.load(subject_id_from)
        subj_to = p.load(subject_id_into)
        if subj_from and subj_to:
            merge_attr(subj_from.include,subj_to.include)
            merge_attr(subj_from.notes,subj_to.notes)
            subj_to.meta = dict(subj_from.meta.items() + subj_to.meta.items())
            sess_keys = subj_from._sessions.keys()
            for sess in sess_keys:
                merge_session(subj_from,subj_to,sess)
            subj_to.save()
            delete_subject(subj_from)
示例#21
0
def merge(subject_id_from, subject_id_into):
    nl.notify('Trying to merge %s into %s' %
              (subject_id_from, subject_id_into))
    with commit_wrap():
        subj_from = p.load(subject_id_from)
        subj_to = p.load(subject_id_into)
        if subj_from and subj_to:
            merge_attr(subj_from.include, subj_to.include)
            merge_attr(subj_from.notes, subj_to.notes)
            subj_to.meta = dict(subj_from.meta.items() + subj_to.meta.items())
            sess_keys = subj_from._sessions.keys()
            for sess in sess_keys:
                merge_session(subj_from, subj_to, sess)
            subj_to.save()
            delete_subject(subj_from)
示例#22
0
文件: dsets.py 项目: azraq27/neural
def nifti_copy(filename,prefix=None,gzip=True):
    ''' creates a ``.nii`` copy of the given dataset and returns the filename as a string'''
    # I know, my argument ``prefix`` clobbers the global method... but it makes my arguments look nice and clean
    if prefix==None:
        prefix = filename
    nifti_filename = globals()['prefix'](prefix) + ".nii"
    if gzip:
        nifti_filename += '.gz'
    if not os.path.exists(nifti_filename):
        try:
            subprocess.check_call(['3dAFNItoNIFTI','-prefix',nifti_filename,str(filename)])
        except subprocess.CalledProcessError:
            nl.notify('Error: could not convert "%s" to NIFTI dset!' % filename,level=nl.level.error)
            return None
    return nifti_filename
示例#23
0
def delete_subject(subject_id):
    if not os.path.exists(p.trash_dir):
        os.makedirs(p.trash_dir)
    new_dir = os.path.join(
        p.trash_dir, '%s-%s' %
        (subject_id,
         datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S').format()))
    while os.path.exists(new_dir):
        new_dir += '_1'
    try:
        shutil.move(p.subject_dir(subject_id), new_dir)
    except OSError, IOError:
        nl.notify('Error moving subject directory %s to the trash' %
                  subject_id,
                  level=nl.level.error)
示例#24
0
def dset_copy(dset, to_dir):
    '''robust way to copy a dataset (including AFNI briks)'''
    if nl.is_afni(dset):
        dset_strip = re.sub(r'\.(HEAD|BRIK)?(\.(gz|bz))?', '', dset)
        for dset_file in [dset_strip + '.HEAD'
                          ] + glob.glob(dset_strip + '.BRIK*'):
            if os.path.exists(dset_file):
                shutil.copy(dset_file, to_dir)
    else:
        if os.path.exists(dset):
            shutil.copy(dset, to_dir)
        else:
            nl.notify('Warning: couldn\'t find file %s to copy to %s' %
                      (dset, to_dir),
                      level=nl.level.warning)
示例#25
0
def qwarp_epi(dset, align_subbrick=5, suffix='_qwal', prefix=None):
    '''aligns an EPI time-series using 3dQwarp
    
    Very expensive and not efficient at all, but it can produce pretty impressive alignment for EPI time-series with significant
    distortions due to motion'''
    info = nl.dset_info(dset)
    if info == None:
        nl.notify('Error reading dataset "%s"' % (dset), level=nl.level.error)
        return False
    if prefix == None:
        prefix = nl.suffix(dset, suffix)
    dset_sub = lambda x: '_tmp_qwarp_epi-%s_%d.nii.gz' % (nl.prefix(dset), x)
    try:
        align_dset = nl.suffix(dset_sub(align_subbrick), '_warp')
        nl.calc('%s[%d]' % (dset, align_subbrick),
                expr='a',
                prefix=align_dset,
                datum='float')
        for i in xrange(info.reps):
            if i != align_subbrick:
                nl.calc('%s[%d]' % (dset, i),
                        expr='a',
                        prefix=dset_sub(i),
                        datum='float')
                nl.run([
                    '3dQwarp', '-nowarp', '-workhard', '-superhard',
                    '-minpatch', '9', '-blur', '0', '-pear', '-nopenalty',
                    '-base', align_dset, '-source',
                    dset_sub(i), '-prefix',
                    nl.suffix(dset_sub(i), '_warp')
                ],
                       quiet=True)
        cmd = ['3dTcat', '-prefix', prefix]
        if info.TR:
            cmd += ['-tr', info.TR]
        if info.slice_timing:
            cmd += ['-tpattern', info.slice_timing]
        cmd += [nl.suffix(dset_sub(i), '_warp') for i in xrange(info.reps)]
        nl.run(cmd, quiet=True)
    except Exception as e:
        raise e
    finally:
        for i in xrange(info.reps):
            for suffix in ['', 'warp']:
                try:
                    os.remove(nl.suffix(dset_sub(i), suffix))
                except:
                    pass
示例#26
0
def skullstrip_template(dset, template, prefix=None, suffix=None, dilate=0):
    '''Takes the raw anatomy ``dset``, aligns it to a template brain, and applies a templated skullstrip. Should produce fairly reliable skullstrips as long
    as there is a decent amount of normal brain and the overall shape of the brain is normal-ish'''
    if suffix == None:
        suffix = '_sstemplate'
    if prefix == None:
        prefix = nl.suffix(dset, suffix)
    if not os.path.exists(prefix):
        with nl.notify('Running template-based skull-strip on %s' % dset):
            dset = os.path.abspath(dset)
            template = os.path.abspath(template)
            tmp_dir = tempfile.mkdtemp()
            cwd = os.getcwd()
            with nl.run_in(tmp_dir):
                nl.affine_align(template,
                                dset,
                                skull_strip=None,
                                cost='mi',
                                opts=['-nmatch', '100%'])
                nl.run([
                    '3dQwarp', '-minpatch', '20', '-penfac', '10', '-noweight',
                    '-source',
                    nl.suffix(template, '_aff'), '-base', dset, '-prefix',
                    nl.suffix(template, '_qwarp')
                ],
                       products=nl.suffix(template, '_qwarp'))
                info = nl.dset_info(nl.suffix(template, '_qwarp'))
                max_value = info.subbricks[0]['max']
                nl.calc([dset, nl.suffix(template, '_qwarp')],
                        'a*step(b-%f*0.05)' % max_value, prefix)
                shutil.move(prefix, cwd)
            shutil.rmtree(tmp_dir)
示例#27
0
def convert_coord(coord_from,matrix_file,base_to_aligned=True):
    '''Takes an XYZ array (in DICOM coordinates) and uses the matrix file produced by 3dAllineate to transform it. By default, the 3dAllineate
    matrix transforms from base to aligned space; to get the inverse transform set ``base_to_aligned`` to ``False``'''
    with open(matrix_file) as f:
        try:
            values = [float(y) for y in ' '.join([x for x in f.readlines() if x.strip()[0]!='#']).strip().split()]
        except:
            nl.notify('Error reading values from matrix file %s' % matrix_file, level=nl.level.error)
            return False
    if len(values)!=12:
        nl.notify('Error: found %d values in matrix file %s (expecting 12)' % (len(values),matrix_file), level=nl.level.error)
        return False
    matrix = np.vstack((np.array(values).reshape((3,-1)),[0,0,0,1]))
    if not base_to_aligned:
        matrix = np.linalg.inv(matrix)
    return np.dot(matrix,list(coord_from) + [1])[:3]
示例#28
0
def align_epi_anat(anatomy,epi_dsets,skull_strip_anat=True):
    ''' aligns epis to anatomy using ``align_epi_anat.py`` script

    :epi_dsets:       can be either a string or list of strings of the epi child datasets
    :skull_strip_anat:     if ``True``, ``anatomy`` will be skull-stripped using the default method

    The default output suffix is "_al"
    '''

    if isinstance(epi_dsets,basestring):
        epi_dsets = [epi_dsets]

    if len(epi_dsets)==0:
        nl.notify('Warning: no epi alignment datasets given for anatomy %s!' % anatomy,level=nl.level.warning)
        return

    if all(os.path.exists(nl.suffix(x,'_al')) for x in epi_dsets):
        return

    anatomy_use = anatomy

    if skull_strip_anat:
        nl.skull_strip(anatomy,'_ns')
        anatomy_use = nl.suffix(anatomy,'_ns')

    inputs = [anatomy_use] + epi_dsets
    dset_products = lambda dset: [nl.suffix(dset,'_al'), nl.prefix(dset)+'_al_mat.aff12.1D', nl.prefix(dset)+'_tsh_vr_motion.1D']
    products = nl.flatten([dset_products(dset) for dset in epi_dsets])
    with nl.run_in_tmp(inputs,products):
        if nl.is_nifti(anatomy_use):
            anatomy_use = nl.afni_copy(anatomy_use)
        epi_dsets_use = []
        for dset in epi_dsets:
            if nl.is_nifti(dset):
                epi_dsets_use.append(nl.afni_copy(dset))
            else:
                epi_dsets_use.append(dset)
        cmd = ["align_epi_anat.py", "-epi2anat", "-anat_has_skull", "no", "-epi_strip", "3dAutomask","-anat", anatomy_use, "-epi_base", "5", "-epi", epi_dsets_use[0]]
        if len(epi_dsets_use)>1:
            cmd += ['-child_epi'] + epi_dsets_use[1:]
            out = nl.run(cmd)

        for dset in epi_dsets:
            if nl.is_nifti(dset):
                dset_nifti = nl.nifti_copy(nl.prefix(dset)+'_al+orig')
                if dset_nifti and os.path.exists(dset_nifti) and dset_nifti.endswith('.nii') and dset.endswith('.gz'):
                    nl.run(['gzip',dset_nifti])
示例#29
0
def new_object(args):
    args = [
        x for x in args
        if 'concept' not in dir(x[0]) or x[0].concept.name != 'new'
    ]
    concept_names = [x[0].concept.name for x in args if 'concept' in dir(x[0])]
    if 'session_do' in concept_names:
        other_args = [x for x in args if isinstance(x, basestring)]
        if len(other_args) > 1:
            nl.notify(
                'Error: You gave me multiple names (%s), don\'t know which you want to be the session name!'
                % ','.join(other_args),
                level=nl.level.error)
            return False
        new_session = other_args[0]
        if 'subject' not in concept_names:
            nl.notify(
                'Error: I don\'t know what subject to create session %s in!' %
                new_session,
                level=nl.level.error)
            return False
        subject_id = [
            x[0].item for x in args
            if 'concept' in dir(x[0]) and x[0].concept.name == 'subject'
        ][0]
        with nl.notify('Creating session %s in subject %s' %
                       (new_session, subject_id)):
            subj = p.load(subject_id)
            if not subj:
                nl.notify('Error: couldn\'t load data for subject "%s"' %
                          subject_id,
                          level=nl.level.error)
                return False
            p.maint.new_session(subj, new_session)
            return True
    # Otherwise assume you want to make a subject
    other_args = [x for x in args if isinstance(x, basestring)]
    if len(other_args) > 1:
        nl.notify(
            'Error: You gave me multiple names (%s), don\'t know which you want to be the subject name!'
            % ','.join(other_args),
            level=nl.level.error)
        return False
    new_subject = other_args[0]
    with nl.notify('Creating new subject %s' % (new_subject)):
        p.maint.create_subject(new_subject)
        return True
示例#30
0
def dsets_identical(dset1, dset2):
    '''Tests if given datasets are identical'''
    max_tolerance = 1.0

    with nl.notify('Comparing %s with %s' % (dset1, dset2)):
        #        info = [nl.afni.dset_info(dset) for dset in [dset1,dset2]]
        #        for param in ['reps','voxel_size','voxel_dims']:
        #            if getattr(info[0],param) != getattr(info[1],param):
        #                nl.notify('Datasets differ in at least %s (%s vs. %s)' % (param,getattr(info[0],param),getattr(info[1],param)),level=nl.level.warning)
        #                return False
        max_diff = nl.max_diff(dset1, dset2)
        if max_diff > max_tolerance:
            nl.notify(
                'Datasets have a maximal differenence >%.1f (max_diff = %.1f)'
                % (max_tolerance, max_diff),
                level=nl.level.warning)
            return False

        if max_diff == 0:
            nl.notify('Datasets appear to be identical')
            return True

        nl.notify('Datasets are minimally different (max_diff = %.1f)' %
                  max_diff)
        return True
示例#31
0
文件: dicom.py 项目: azraq27/neural
def reconstruct_files(input_dir):
    '''sorts ``input_dir`` and tries to reconstruct the subdirectories found'''
    input_dir = input_dir.rstrip('/')
    with nl.notify('Attempting to organize/reconstruct directory'):
        # Some datasets start with a ".", which confuses many programs
        for r,ds,fs in os.walk(input_dir):
            for f in fs:
                if f[0]=='.':
                    shutil.move(os.path.join(r,f),os.path.join(r,'i'+f))
        nl.dicom.organize_dir(input_dir)
        output_dir = '%s-sorted' % input_dir
        if os.path.exists(output_dir):
            with nl.run_in(output_dir):
                for dset_dir in os.listdir('.'):
                    with nl.notify('creating dataset from %s' % dset_dir):
                        nl.dicom.create_dset(dset_dir)
        else:
            nl.notify('Warning: failed to auto-organize directory %s' % input_dir,level=nl.level.warning)
示例#32
0
def create_censor_file(input_dset,out_prefix=None,fraction=0.1,clip_to=0.1,max_exclude=0.3,motion_file=None,motion_exclude=1.0):
    '''create a binary censor file using 3dToutcount

    :input_dset:        the input dataset
    :prefix:            output 1D file (default: ``prefix(input_dset)`` + ``.1D``)
    :fraction:          censor a timepoint if proportional of outliers in this
                        time point is greater than given value
    :clip_to:           keep the number of time points censored under this proportion
                        of total reps. If more time points would be censored,
                        it will only pick the top ``clip_to*reps`` points
    :max_exclude:       if more time points than the given proportion of reps are excluded for the
                        entire run, throw an exception -- something is probably wrong
    :motion_file:       optional filename of a "motion" file with multiple columns and rows corresponding to reps.
                        It doesn't really matter what the values are, as long as they are appropriate relative to ``motion_exclude``
    :motion_exclude:    Will exclude any reps that have a value greater than this in any column of ``motion_file``
    '''
    (outcount,perc_outliers) = nl.qc.outcount(input_dset,fraction)
    info = nl.dset_info(input_dset)
    binarize = lambda o,f: [oo<f for oo in o]
    perc_outliers = lambda o: 1.-(sum(o)/float(info.reps))

    if motion_file:
        with open(motion_file,'Ur') as f:
            motion = [max([float(y) for y in x.strip().split()]) for x in f.read().split('\n') if len(x.strip())>0 and x.strip()[0]!='#']
            motion_1D = [x for x in binarize(motion,motion_exclude)]
            if perc_outliers(motion_1D) > max_exclude:
                nl.notify('Error: Too many points excluded because of motion (%.2f) in dset %s' % (perc_outliers(motion_1D),input_dset),level=nl.level.error)
                return False
            outcount = [outcount[i] if motion_1D[i] else 1. for i in range(len(outcount))]

    binary_outcount = binarize(outcount,fraction)

    if max_exclude and perc_outliers(binary_outcount) > max_exclude:
        nl.notify('Error: Found %.1f%% outliers in dset %s' % (100*perc_outliers(outcount),input_dset),level=nl.level.error)
        return False
    if clip_to:
        while perc_outliers(binary_outcount) > clip_to:
            best_outlier = min([(outcount[i],i) for i in range(len(outcount)) if not binary_outcount[i]])
            binary_outcount[best_outlier[1]] = True
    if not out_prefix:
        out_prefix = nl.prefix(input_dset) + '.1D'
    with open(out_prefix,'w') as f:
        f.write('\n'.join([str(int(x)) for x in binary_outcount]))
    return True
示例#33
0
def nifti_copy(filename, prefix=None, gzip=True):
    ''' creates a ``.nii`` copy of the given dataset and returns the filename as a string'''
    # I know, my argument ``prefix`` clobbers the global method... but it makes my arguments look nice and clean
    if prefix == None:
        prefix = filename
    nifti_filename = globals()['prefix'](prefix) + ".nii"
    if gzip:
        nifti_filename += '.gz'
    if not os.path.exists(nifti_filename):
        try:
            subprocess.check_call(
                ['3dAFNItoNIFTI', '-prefix', nifti_filename,
                 str(filename)])
        except subprocess.CalledProcessError:
            nl.notify('Error: could not convert "%s" to NIFTI dset!' %
                      filename,
                      level=nl.level.error)
            return None
    return nifti_filename
示例#34
0
def subbrick(dset,
             label,
             coef=False,
             tstat=False,
             fstat=False,
             rstat=False,
             number_only=False):
    ''' returns a string referencing the given subbrick within a dset

    This method reads the header of the dataset ``dset``, finds the subbrick whose
    label matches ``label`` and returns a string of type ``dataset[X]``, which can
    be used by most AFNI programs to refer to a subbrick within a file

    The options coef, tstat, fstat, and rstat will add the suffix that is
    appended to the label by 3dDeconvolve

    :coef:  "#0_Coef"
    :tstat: "#0_Tstat"
    :fstat: "_Fstat"
    :rstat: "_R^2"

    if ``number_only`` is set to ``True``, will only return the subbrick number instead of a string
    '''

    if coef:
        label += "#0_Coef"
    elif tstat:
        label += "#0_Tstat"
    elif fstat:
        label += "_Fstat"
    elif rstat:
        label += "_R^2"

    info = nl.dset_info(dset)
    if info == None:
        nl.notify('Error: Couldn\'t get info from dset "%s"' % dset,
                  level=nl.level.error)
        return None
    i = info.subbrick_labeled(label)
    if number_only:
        return i
    return '%s[%d]' % (dset, i)
示例#35
0
    def partial(self,start=0,end=None,run=0):
        '''chops the stimulus by only including time points ``start`` through ``end`` (in reps, inclusive; ``None``=until the end)
        if using stim_times-style simulus, will change the ``run``'th run. If a column, will just chop the column'''
        self.read_file()
        decon_stim = copy.copy(self)
        if start<0:
            start = 0
        if self.type()=="column":
            decon_stim.column_file = None
            if end>=len(decon_stim.column):
                end = None
            if end==None:
                decon_stim.column = decon_stim.column[start:]
            else:
                decon_stim.column = decon_stim.column[start:end+1]
            if len(decon_stim.column)==0:
                return None
        if self.type()=="times":
            if self.TR==None:
                nl.notify('Error: cannot get partial segment of a stim_times stimulus without a TR',level=nl.level.error)
                return None
            def time_in(a):
                first_number = r'^(\d+(\.\d+)?)'
                if isinstance(a,basestring):
                    m = re.match(first_number,a)
                    if m:
                        a = m.group(1)
                    else:
                        nl.notify('Warning: cannot intepret a number from the stim_time: "%s"' % a,level=nl.level.warning)
                        return False
                a = float(a)/self.TR
                if a>=start and (end==None or a<=end):
                    return True
                return False

            decon_stim.times_file = None
            if len(decon_stim.times)==0 or '__iter__' not in dir(decon_stim.times[0]):
                decon_stim.times = [decon_stim.times]
            decon_stim.times[run] = [x for x in decon_stim.times[run] if time_in(x)]
            if len(nl.flatten(decon_stim.times))==0:
                return None
        return decon_stim
示例#36
0
文件: dsets.py 项目: azraq27/neural
def subbrick(dset,label,coef=False,tstat=False,fstat=False,rstat=False,number_only=False):
    ''' returns a string referencing the given subbrick within a dset

    This method reads the header of the dataset ``dset``, finds the subbrick whose
    label matches ``label`` and returns a string of type ``dataset[X]``, which can
    be used by most AFNI programs to refer to a subbrick within a file

    The options coef, tstat, fstat, and rstat will add the suffix that is
    appended to the label by 3dDeconvolve

    :coef:  "#0_Coef"
    :tstat: "#0_Tstat"
    :fstat: "_Fstat"
    :rstat: "_R^2"

    If ``coef`` or ``tstat`` are set to a number, it will use that parameter number
    (instead of 0), for models that use multiple parameters (e.g., "TENT").

    if ``number_only`` is set to ``True``, will only return the subbrick number instead of a string
    '''

    if coef is not False:
        if coef is True:
            coef = 0
        label += "#%d_Coef" % coef
    elif tstat != False:
        if tstat==True:
            tstat = 0
        label += "#%d_Tstat" % tstat
    elif fstat:
        label += "_Fstat"
    elif rstat:
        label += "_R^2"

    info = nl.dset_info(dset)
    if info==None:
        nl.notify('Error: Couldn\'t get info from dset "%s"'%dset,level=nl.level.error)
        return None
    i = info.subbrick_labeled(label)
    if number_only:
        return i
    return '%s[%d]' % (dset,i)
示例#37
0
def qwarp_epi(dset,align_subbrick=5,suffix='_qwal',prefix=None):
    '''aligns an EPI time-series using 3dQwarp

    Very expensive and not efficient at all, but it can produce pretty impressive alignment for EPI time-series with significant
    distortions due to motion'''
    info = nl.dset_info(dset)
    if info==None:
        nl.notify('Error reading dataset "%s"' % (dset),level=nl.level.error)
        return False
    if prefix==None:
        prefix = nl.suffix(dset,suffix)
    dset_sub = lambda x: '_tmp_qwarp_epi-%s_%d.nii.gz' % (nl.prefix(dset),x)
    try:
        align_dset = nl.suffix(dset_sub(align_subbrick),'_warp')
        nl.calc('%s[%d]' % (dset,align_subbrick),expr='a',prefix=align_dset,datum='float')
        for i in xrange(info.reps):
            if i != align_subbrick:
                nl.calc('%s[%d]' % (dset,i),expr='a',prefix=dset_sub(i),datum='float')
                nl.run([
                    '3dQwarp', '-nowarp',
                    '-workhard', '-superhard', '-minpatch', '9', '-blur', '0',
                    '-pear', '-nopenalty',
                    '-base', align_dset,
                    '-source', dset_sub(i),
                    '-prefix', nl.suffix(dset_sub(i),'_warp')
                ],quiet=True)
        cmd = ['3dTcat','-prefix',prefix]
        if info.TR:
            cmd += ['-tr',info.TR]
        if info.slice_timing:
            cmd += ['-tpattern',info.slice_timing]
        cmd += [nl.suffix(dset_sub(i),'_warp') for i in xrange(info.reps)]
        nl.run(cmd,quiet=True)
    except Exception as e:
        raise e
    finally:
        for i in xrange(info.reps):
            for suffix in ['','warp']:
                try:
                    os.remove(nl.suffix(dset_sub(i),suffix))
                except:
                    pass
示例#38
0
文件: decon.py 项目: azraq27/neural
    def partial(self,start=0,end=None,run=0):
        '''chops the stimulus by only including time points ``start`` through ``end`` (in reps, inclusive; ``None``=until the end)
        if using stim_times-style simulus, will change the ``run``'th run. If a column, will just chop the column'''
        self.read_file()
        decon_stim = copy.copy(self)
        if start<0:
            start = 0
        if self.type()=="column":
            decon_stim.column_file = None
            if end>=len(decon_stim.column):
                end = None
            if end==None:
                decon_stim.column = decon_stim.column[start:]
            else:
                decon_stim.column = decon_stim.column[start:end+1]
            if len(decon_stim.column)==0:
                return None
        if self.type()=="times":
            if self.TR==None:
                nl.notify('Error: cannot get partial segment of a stim_times stimulus without a TR',level=nl.level.error)
                return None
            def time_in(a):
                first_number = r'^(\d+(\.\d+)?)'
                if isinstance(a,basestring):
                    m = re.match(first_number,a)
                    if m:
                        a = m.group(1)
                    else:
                        nl.notify('Warning: cannot intepret a number from the stim_time: "%s"' % a,level=nl.level.warning)
                        return False
                a = float(a)/self.TR
                if a>=start and (end==None or a<=end):
                    return True
                return False

            decon_stim.times_file = None
            if len(decon_stim.times)==0 or '__iter__' not in dir(decon_stim.times[0]):
                decon_stim.times = [decon_stim.times]
            decon_stim.times[run] = [x for x in decon_stim.times[run] if time_in(x)]
            if len(nl.flatten(decon_stim.times))==0:
                return None
        return decon_stim
示例#39
0
def reconstruct_files(input_dir):
    '''sorts ``input_dir`` and tries to reconstruct the subdirectories found'''
    input_dir = input_dir.rstrip('/')
    with nl.notify('Attempting to organize/reconstruct directory'):
        # Some datasets start with a ".", which confuses many programs
        for r, ds, fs in os.walk(input_dir):
            for f in fs:
                if f[0] == '.':
                    shutil.move(os.path.join(r, f), os.path.join(r, 'i' + f))
        nl.dicom.organize_dir(input_dir)
        output_dir = '%s-sorted' % input_dir
        if os.path.exists(output_dir):
            with nl.run_in(output_dir):
                for dset_dir in os.listdir('.'):
                    with nl.notify('creating dataset from %s' % dset_dir):
                        nl.dicom.create_dset(dset_dir)
        else:
            nl.notify('Warning: failed to auto-organize directory %s' %
                      input_dir,
                      level=nl.level.warning)
示例#40
0
文件: decon.py 项目: azraq27/neural
 def blank_stim(self,type=None,fill=0):
     '''Makes a blank version of stim. If a type is not given, returned as same type as current stim.
     If a column stim, will fill in blanks with ``fill``'''
     blank = copy.copy(self)
     blank.name = 'Blank'
     if type==None:
         type = self.type()
     if type=="column":
         num_reps = self.reps
         if num_reps==None:
             if self.type()=="column":
                 self.read_file()
                 num_reps = len(self.column)
             else:
                 nl.notify('Error: requested to return a blank column, but I can\'t figure out how many reps to make it!',level=nl.level.error)
         blank.column = [fill]*num_reps
         return blank
     if type=="times":
         blank.times = []
         return blank
示例#41
0
 def blank_stim(self,type=None,fill=0):
     '''Makes a blank version of stim. If a type is not given, returned as same type as current stim.
     If a column stim, will fill in blanks with ``fill``'''
     blank = copy.copy(self)
     blank.name = 'Blank'
     if type==None:
         type = self.type()
     if type=="column":
         num_reps = self.reps
         if num_reps==None:
             if self.type()=="column":
                 self.read_file()
                 num_reps = len(self.column)
             else:
                 nl.notify('Error: requested to return a blank column, but I can\'t figure out how many reps to make it!',level=nl.level.error)
         blank.column = [fill]*num_reps
         return blank
     if type=="times":
         blank.times = []
         return blank
示例#42
0
文件: maint.py 项目: azraq27/padre
def sessions_identical(subj1,sess1,subj2,sess2):
    '''Tests the given sessions to make sure the datasets are the same'''
    dsets1 = [os.path.basename(str(x)) for x in subj1.dsets(session=sess1)]
    dsets2 = [os.path.basename(str(x)) for x in subj2.dsets(session=sess2)]
    dsets = list(set(dsets1+dsets2))
    return_val = True
    with nl.notify('Comparing sessions %s.%s and %s.%s:' % (subj1,sess1,subj2,sess2)):
        for dset in dsets:
            if not dsets_identical(os.path.join(p.sessions_dir(subj1),sess1,dset),os.path.join(p.sessions_dir(subj2),sess2,dset)):
                return_val = False
                continue
    return return_val
示例#43
0
文件: maint.py 项目: azraq27/padre
def rename(subject_id,new_subject_id):
    with commit_wrap():
        subj = p.load(subject_id)
        if subj:
            try:
                os.rename(p.subject_dir(subject_id),p.subject_dir(new_subject_id))
            except OSError:
                nl.notify('Error: filesystem reported error moving %s to %s' % (subject_id,new_subject_id),level=nl.level.error)
            else:
                subj._subject_id = new_subject_id
                subj.save()
                if os.path.exists(p.subject_json(subj)):
                    try:
                        os.remove(os.path.join(p.subject_dir(subj),os.path.basename(p.subject_json(subject_id))))
                    except OSError:
                        pass
                    try:
                        del(p.subject._all_subjects[str(subject_id)])
                    except KeyError:
                        pass
    p.subject._index_one_subject(new_subject_id)
示例#44
0
def filter_subjs(subjects=None, string=None, matches=None, require_match=True):
    '''takes ``list`` of subjects and filters based on the :class:`ConceptMatch`s given in ``matches``. If ``matches``
    is not given, will parse the string ``string`` instead. If ``require_match``, will return ``None`` if it fails to find
    any constraints, otherwise it returns ``subjects``'''
    match_concepts = ['subject', 'session', 'label', 'experiment', 'tag']
    if matches == None:
        if string == None:
            if subjects == None:
                return p.subjects()
            else:
                return subjects
        matches = bottle.parse_string(string)

    if subjects == None:
        if 'all' in [x[0].concept.name for x in matches]:
            nl.notify('Including incomplete sessions')
            subjects = p.subjects(only_included=False)
        else:
            subjects = p.subjects()
    running_exclusions = {}
    if not any([[x in sub_matches for x in match_concepts]
                for sub_matches in matches]):
        if require_match:
            nl.notify('Warning: Could not find any constraints to use',
                      level=nl.level.warning)
            return None
        else:
            nl.notify('Using no constraints')
            return subjects
    with nl.notify('Using constraints:'):
        subjects_total = set(subjects)
        for match_options in matches:
            if not isinstance(match_options, list):
                continue
            match_options = [x for x in match_options if x in match_concepts]
            if len(match_options) == 0:
                continue
            subjects_options = set()
            for match in match_options:
                nl.notify('%s = %s' %
                          (match.concept.name, repr(match.examples)))
                subjects_options = logic.or_sets(
                    subjects_options, set(filter_by_match(match, subjects)))
            subjects_total = logic.and_sets(subjects_options, subjects_total)
    return subjects_total
示例#45
0
def link_dsets(args):
    with nl.notify('Trying to link the following datasets...'):
        subjects = filter_subjs(p.subjects(), args)
        dsets_to_link = []

        for subj in subjects:
            with nl.notify(str(subj)):
                for dset in dsets_with(subj, args):
                    nl.notify(dset.__str__(False))
                    dsets_to_link.append(dset)
        nl.notify('Does that all look good? (y/n)')
        i = raw_input()
        if i.lower() == 'y':
            nl.notify('Ok, linking them')
            for dset in dsets_to_link:
                try:
                    os.symlink(str(dset), os.path.basename(str(dset)))
                except OSError:
                    pass
        else:
            nl.notify('Nope, that didn\'t look like a "y"...',
                      level=nl.level.warning)
示例#46
0
文件: matching.py 项目: azraq27/padre
def bottle_help():
    nl.notify('''This program uses freeform text fuzzy matching. It will try to correct for misspellings and
    synonyms as it can.
    
    General syntax:
    [action] [objects]
    
    Currently supported actions are:
        list    print something to the screen (add quiet to do it less loud)
        add     add on something to an existing subject
        new     create a new subject
        link    create symbolic links to datasets in current directory
    
    Examples:
        list subjects
        list subjects ExperimentName
        get dsets SubjectName
        get subjects LabelName Tag1
    
    Just try stuff...

    ''')
示例#47
0
文件: decon.py 项目: azraq27/neural
    def concat_stim(self,decon_stim):
        '''concatenate this to another :class:`DeconStim` of the same "type"'''
        if self.type()!=decon_stim.type():
            nl.notify('Error: Trying to concatenate stimuli of different types! %s (%s) with %s (%s)' % (self.name,self.type(),decon_stim.name,decon_stim.type()),level=nl.level.error)
            return None
        concat_stim = copy.copy(self)
        if self.name=='Blank':
            concat_stim = copy.copy(decon_stim)

        self.read_file()
        if self.type()=="column":
            # if an explicit # of reps is given, concat to that
            reps = [x.reps if x.reps else len(x.column) for x in [self,decon_stim]]
            concat_stim.column = self.column[:reps[0]] + decon_stim.column[:reps[1]]
            return concat_stim
        if self.type()=="times":
            if len(self.times)==0 or '__iter__' not in dir(self.times[0]):
                self.times = [self.times]
            if len(decon_stim.times)==0 or '__iter__' not in dir(decon_stim.times[0]):
                decon_stim.times = [decon_stim.times]
            concat_stim.times = self.times + decon_stim.times
            return concat_stim
        return None
示例#48
0
def convert_coord(coord_from, matrix_file, base_to_aligned=True):
    '''Takes an XYZ array (in DICOM coordinates) and uses the matrix file produced by 3dAllineate to transform it. By default, the 3dAllineate
    matrix transforms from base to aligned space; to get the inverse transform set ``base_to_aligned`` to ``False``'''
    with open(matrix_file) as f:
        try:
            values = [
                float(y) for y in ' '.join(
                    [x for x in f.readlines()
                     if x.strip()[0] != '#']).strip().split()
            ]
        except:
            nl.notify('Error reading values from matrix file %s' % matrix_file,
                      level=nl.level.error)
            return False
    if len(values) != 12:
        nl.notify('Error: found %d values in matrix file %s (expecting 12)' %
                  (len(values), matrix_file),
                  level=nl.level.error)
        return False
    matrix = np.vstack((np.array(values).reshape((3, -1)), [0, 0, 0, 1]))
    if not base_to_aligned:
        matrix = np.linalg.inv(matrix)
    return np.dot(matrix, list(coord_from) + [1])[:3]
示例#49
0
    def concat_stim(self,decon_stim):
        '''concatenate this to another :class:`DeconStim` of the same "type"'''
        if self.type()!=decon_stim.type():
            nl.notify('Error: Trying to concatenate stimuli of different types! %s (%s) with %s (%s)' % (self.name,self.type(),decon_stim.name,decon_stim.type()),level=nl.level.error)
            return None
        concat_stim = copy.copy(self)
        if self.name=='Blank':
            concat_stim = copy.copy(decon_stim)

        self.read_file()
        if self.type()=="column":
            # if an explicit # of reps is given, concat to that
            reps = [x.reps if x.reps else len(x.column) for x in [self,decon_stim]]
            concat_stim.column = self.column[:reps[0]] + decon_stim.column[:reps[1]]
            return concat_stim
        if self.type()=="times":
            if len(self.times)==0 or '__iter__' not in dir(self.times[0]):
                self.times = [self.times]
            if len(decon_stim.times)==0 or '__iter__' not in dir(decon_stim.times[0]):
                decon_stim.times = [decon_stim.times]
            concat_stim.times = self.times + decon_stim.times
            return concat_stim
        return None
示例#50
0
def bottle_help():
    nl.notify(
        '''This program uses freeform text fuzzy matching. It will try to correct for misspellings and
    synonyms as it can.
    
    General syntax:
    [action] [objects]
    
    Currently supported actions are:
        list    print something to the screen (add quiet to do it less loud)
        add     add on something to an existing subject
        new     create a new subject
        link    create symbolic links to datasets in current directory
    
    Examples:
        list subjects
        list subjects ExperimentName
        get dsets SubjectName
        get subjects LabelName Tag1
    
    Just try stuff...

    ''')
示例#51
0
文件: decon.py 项目: azraq27/neural
def stack_decon_stims(stim_list):
    '''take a ``list`` (in order of runs) of ``dict``s of stim_name:DeconStim and stack them together. returns
    a single ``dict`` of stim_name:decon_stim

    As in, takes:
    [
        # Run 1
        { "stim1": decon_stim1a, "stim2": decon_stim2a },
        # Run 2
        { "stim1": decon_stim1b, "stim2": decon_stim2b, "stim3": decon_stim3 }
    ]

    And makes:
        { "stim1": decon_stim1, "stim2": decon_stim2, "stim3": decon_stim3 }

    If a stimulus is not present in a run, it will fill that run with an empty stimulus
    '''
    stim_names = list(set(nl.flatten([stims.keys() for stims in stim_list])))

    stim_dict = {}
    for stim_name in stim_names:
        types = list(set([stims[stim_name].type() for stims in stim_list if stim_name in stims]))
        if len(types)>1:
            nl.notify('Error: Trying to stack stimuli of different types! (%s)' % stim_name,level=nl.level.error)
            return None
        type = types[0]

        stim_stack = []
        for i in xrange(len(stim_list)):
            if stim_name in stim_list[i]:
                stim_stack.append(stim_list[i][stim_name])
            else:
                stim_stack.append(stim_list[i].values()[0].blank_stim(type=type))
        stim_dict[stim_name] = copy.copy(stim_stack[0])
        for stim in stim_stack[1:]:
            stim_dict[stim_name] = stim_dict[stim_name].concat_stim(stim)
    return stim_dict.values()
示例#52
0
def stack_decon_stims(stim_list):
    '''take a ``list`` (in order of runs) of ``dict``s of stim_name:DeconStim and stack them together. returns
    a single ``dict`` of stim_name:decon_stim

    As in, takes:
    [
        # Run 1
        { "stim1": decon_stim1a, "stim2": decon_stim2a },
        # Run 2
        { "stim1": decon_stim1b, "stim2": decon_stim2b, "stim3": decon_stim3 }
    ]

    And makes:
        { "stim1": decon_stim1, "stim2": decon_stim2, "stim3": decon_stim3 }

    If a stimulus is not present in a run, it will fill that run with an empty stimulus
    '''
    stim_names = list(set(nl.flatten([stims.keys() for stims in stim_list])))

    stim_dict = {}
    for stim_name in stim_names:
        types = list(set([stims[stim_name].type() for stims in stim_list if stim_name in stims]))
        if len(types)>1:
            nl.notify('Error: Trying to stack stimuli of different types! (%s)' % stim_name,level=nl.level.error)
            return None
        type = types[0]

        stim_stack = []
        for i in xrange(len(stim_list)):
            if stim_name in stim_list[i]:
                stim_stack.append(stim_list[i][stim_name])
            else:
                stim_stack.append(stim_list[i].values()[0].blank_stim(type=type))
        stim_dict[stim_name] = copy.copy(stim_stack[0])
        for stim in stim_stack[1:]:
            stim_dict[stim_name] = stim_dict[stim_name].concat_stim(stim)
    return stim_dict.values()
示例#53
0
文件: matching.py 项目: azraq27/padre
def filter_subjs(subjects=None,string=None,matches=None,require_match=True):
    '''takes ``list`` of subjects and filters based on the :class:`ConceptMatch`s given in ``matches``. If ``matches``
    is not given, will parse the string ``string`` instead. If ``require_match``, will return ``None`` if it fails to find
    any constraints, otherwise it returns ``subjects``'''
    match_concepts = ['subject','session','label','experiment','tag']
    if matches==None:
        if string==None:
            if subjects==None:
                return p.subjects()
            else:
                return subjects
        matches = bottle.parse_string(string)
    
    if subjects==None:
        if 'all' in [x[0].concept.name for x in matches]:
            nl.notify('Including incomplete sessions')
            subjects = p.subjects(only_included=False)
        else:
            subjects = p.subjects()
    running_exclusions = {}
    if not any([[x in sub_matches for x in match_concepts] for sub_matches in matches]):
        if require_match:
            nl.notify('Warning: Could not find any constraints to use',level=nl.level.warning)
            return None
        else:
            nl.notify('Using no constraints')
            return subjects
    with nl.notify('Using constraints:'):
        subjects_total = set(subjects)
        for match_options in matches:
            if not isinstance(match_options,list):
                continue
            match_options = [x for x in match_options if x in match_concepts]
            if len(match_options)==0:
                continue
            subjects_options = set()
            for match in match_options:
                nl.notify('%s = %s' % (match.concept.name,repr(match.examples)))
                subjects_options = logic.or_sets(subjects_options,set(filter_by_match(match,subjects)))
            subjects_total = logic.and_sets(subjects_options,subjects_total)
    return subjects_total
示例#54
0
文件: dicom.py 项目: azraq27/neural
def max_diff(dset1,dset2):
    '''calculates maximal voxel-wise difference in datasets (in %)

    Useful for checking if datasets have the same data. For example, if the maximum difference is
    < 1.0%, they're probably the same dataset'''
    for dset in [dset1,dset2]:
        if not os.path.exists(dset):
            nl.notify('Error: Could not find file: %s' % dset,level=nl.level.error)
            return float('inf')
    try:
        dset1_d = nib.load(dset1)
        dset2_d = nib.load(dset2)
        dset1_data = dset1_d.get_data()
        dset2_data = dset2_d.get_data()
    except IOError:
        nl.notify('Error: Could not read files %s and %s' % (dset1,dset2),level=nl.level.error)
        return float('inf')
    try:
        old_err = np.seterr(divide='ignore',invalid='ignore')
        max_val = 100*np.max(np.ma.masked_invalid(np.double(dset1_data - dset2_data) / ((dset1_data+dset2_data)/2)))
        np.seterr(**old_err)
        return max_val
    except ValueError:
        return float('inf')
示例#55
0
def skull_strip(dset, suffix='_ns', prefix=None, unifize=True):
    ''' use bet to strip skull from given anatomy '''
    # should add options to use betsurf and T1/T2 in the future
    # Since BET fails on weirdly distributed datasets, I added 3dUnifize in... I realize this makes this dependent on AFNI. Sorry, :)
    if prefix == None:
        prefix = nl.suffix(dset, suffix)
    unifize_dset = nl.suffix(dset, '_u')
    cmd = bet2 if bet2 else 'bet2'
    if unifize:
        info = nl.dset_info(dset)
        if info == None:
            nl.notify('Error: could not read info for dset %s' % dset,
                      level=nl.level.error)
            return False
        cmd = os.path.join(fsl_dir, cmd) if fsl_dir else cmd
        cutoff_value = nl.max(dset) * 0.05
        nl.run([
            '3dUnifize', '-prefix', unifize_dset,
            nl.calc(dset, 'step(a-%f)*a' % cutoff_value)
        ],
               products=unifize_dset)
    else:
        unifize_dset = dset
    nl.run([cmd, unifize_dset, prefix, '-w', 0.5], products=prefix)
示例#56
0
def skullstrip_template(dset,template,prefix=None,suffix=None,dilate=0):
    '''Takes the raw anatomy ``dset``, aligns it to a template brain, and applies a templated skullstrip. Should produce fairly reliable skullstrips as long
    as there is a decent amount of normal brain and the overall shape of the brain is normal-ish'''
    if suffix==None:
        suffix = '_sstemplate'
    if prefix==None:
        prefix = nl.suffix(dset,suffix)
    if not os.path.exists(prefix):
        with nl.notify('Running template-based skull-strip on %s' % dset):
            dset = os.path.abspath(dset)
            template = os.path.abspath(template)
            tmp_dir = tempfile.mkdtemp()
            cwd = os.getcwd()
            with nl.run_in(tmp_dir):
                nl.affine_align(template,dset,skull_strip=None,cost='mi',opts=['-nmatch','100%'])
                nl.run(['3dQwarp','-minpatch','20','-penfac','10','-noweight','-source',nl.suffix(template,'_aff'),'-base',dset,'-prefix',nl.suffix(template,'_qwarp')],products=nl.suffix(template,'_qwarp'))
                info = nl.dset_info(nl.suffix(template,'_qwarp'))
                max_value = info.subbricks[0]['max']
                nl.calc([dset,nl.suffix(template,'_qwarp')],'a*step(b-%f*0.05)'%max_value,prefix)
                shutil.move(prefix,cwd)
            shutil.rmtree(tmp_dir)
示例#57
0
文件: maint.py 项目: azraq27/padre
def dsets_identical(dset1,dset2):
    '''Tests if given datasets are identical'''
    max_tolerance = 1.0
    
    with nl.notify('Comparing %s with %s' % (dset1,dset2)):
#        info = [nl.afni.dset_info(dset) for dset in [dset1,dset2]]
#        for param in ['reps','voxel_size','voxel_dims']:
#            if getattr(info[0],param) != getattr(info[1],param):
#                nl.notify('Datasets differ in at least %s (%s vs. %s)' % (param,getattr(info[0],param),getattr(info[1],param)),level=nl.level.warning)
#                return False
        max_diff = nl.max_diff(dset1,dset2)
        if max_diff > max_tolerance:
            nl.notify('Datasets have a maximal differenence >%.1f (max_diff = %.1f)' % (max_tolerance, max_diff),level=nl.level.warning)
            return False
        
        if max_diff==0:
            nl.notify('Datasets appear to be identical')
            return True
        
        nl.notify('Datasets are minimally different (max_diff = %.1f)' % max_diff)
        return True
示例#58
0
文件: dicom.py 项目: azraq27/neural
def unpack_archive(fname,out_dir):
    '''unpacks the archive file ``fname`` and reconstructs datasets into ``out_dir``

    Datasets are reconstructed and auto-named using :meth:`create_dset`. The raw directories
    that made the datasets are archive with the dataset name suffixed by ``tgz``, and any other
    files found in the archive are put into ``other_files.tgz``'''
    with nl.notify('Unpacking archive %s' % fname):
        tmp_dir = tempfile.mkdtemp()
        tmp_unpack = os.path.join(tmp_dir,'unpack')
        os.makedirs(tmp_unpack)
        nl.utils.unarchive(fname,tmp_unpack)
        reconstruct_files(tmp_unpack)
        out_dir = os.path.abspath(out_dir)
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)
        if not os.path.exists(tmp_unpack+'-sorted'):
            return
        with nl.run_in(tmp_unpack+'-sorted'):
            for fname in glob.glob('*.nii'):
                nl.run(['gzip',fname])
            for fname in glob.glob('*.nii.gz'):
                new_file = os.path.join(out_dir,fname)
                if not os.path.exists(new_file):
                    shutil.move(fname,new_file)
            raw_out = os.path.join(out_dir,'raw')
            if not os.path.exists(raw_out):
                os.makedirs(raw_out)
            for rawdir in os.listdir('.'):
                rawdir_tgz = os.path.join(raw_out,rawdir+'.tgz')
                if not os.path.exists(rawdir_tgz):
                    with tarfile.open(rawdir_tgz,'w:gz') as tgz:
                        tgz.add(rawdir)
        if len(os.listdir(tmp_unpack))!=0:
            # There are still raw files left
            with tarfile.open(os.path.join(raw_out,'other_files.tgz'),'w:gz') as tgz:
                tgz.add(tmp_unpack)
    shutil.rmtree(tmp_dir)
示例#59
0
文件: dicom.py 项目: azraq27/neural
def create_dset_to3d(prefix,file_list,file_order='zt',num_slices=None,num_reps=None,TR=None,slice_order='alt+z',only_dicoms=True,sort_filenames=False):
    '''manually create dataset by specifying everything (not recommended, but necessary when autocreation fails)

    If `num_slices` or `num_reps` is omitted, it will be inferred by the number of images. If both are omitted,
    it assumes that this it not a time-dependent dataset

    :only_dicoms:       filter the given list by readable DICOM images
    :sort_filenames:    sort the given files by filename using the right-most number in the filename'''

    tags = {
        'num_rows': (0x0028,0x0010),
        'num_reps': (0x0020,0x0105),
        'TR': (0x0018,0x0080)
    }
    with nl.notify('Trying to create dataset %s' % prefix):
        if os.path.exists(prefix):
            nl.notify('Error: file "%s" already exists!' % prefix,level=nl.level.error)
            return False

        tagvals = {}
        for f in file_list:
            try:
                tagvals[f] = info_for_tags(f,tags.values())
            except:
                pass
        if only_dicoms:
            new_file_list = []
            for f in file_list:
                if f in tagvals and len(tagvals[f][tags['num_rows']])>0:
                    # Only include DICOMs that actually have image information
                    new_file_list.append(f)
            file_list = new_file_list

        if sort_filenames:
            def file_num(fname):
                try:
                    nums = [x.strip('.') for x in re.findall(r'[\d.]+',fname) if x.strip('.')!='']
                    return float(nums[-1])
                except:
                    return fname
            file_list = sorted(file_list,key=file_num)

        if len(file_list)==0:
            nl.notify('Error: Couldn\'t find any valid DICOM images',level=nl.level.error)
            return False


        cmd = ['to3d','-skip_outliers','-quit_on_err','-prefix',prefix]

        if num_slices!=None or num_reps!=None:
            # Time-based dataset
            if num_slices==None:
                if len(file_list)%num_reps!=0:
                    nl.notify('Error: trying to guess # of slices, but %d (number for files) doesn\'t divide evenly into %d (number of reps)' % (len(file_list),num_reps),level=nl.level.error)
                    return False
                num_slices = len(file_list)/num_reps
            if num_reps==None:
                if len(file_list)%num_slices==0:
                    num_reps = len(file_list)/num_slices
                elif len(file_list)==1 and tags['num_reps'] in tagvals[file_list[0]]:
                    num_reps = tagvals[file_list[0]][tags['num_reps']]
                else:
                    nl.notify('Error: trying to guess # of reps, but %d (number for files) doesn\'t divide evenly into %d (number of slices)' % (len(file_list),num_slices),level=nl.level.error)
                    return False

            if TR==None:
                TR = tagvals[file_list[0]][tags['TR']]
            cmd += ['-time:%s'%file_order]
            if file_order=='zt':
                cmd += [num_slices,num_reps]
            else:
                cmd += [num_reps,num_slices]
            cmd += [TR,slice_order]
        cmd += ['-@']
        cmd = [str(x) for x in cmd]
        out = None
        try:
            p = subprocess.Popen(cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
            out = p.communicate('\n'.join(file_list))
            if p.returncode!=0:
                raise Exception
        except:
            with nl.notify('Error: to3d returned error',level=nl.level.error):
                if out:
                    nl.notify('stdout:\n' + out[0] + '\nstderr:\n' + out[1],level=nl.level.error)
            return False
示例#60
0
文件: utils.py 项目: azraq27/neural
def run(command,products=None,working_directory='.',force_local=False,stderr=True,quiet=False):
    '''wrapper to run external programs

    :command:           list containing command and parameters
                        (formatted the same as subprocess; must contain only strings)
    :products:          string or list of files that are the products of this command
                        if all products exist, the command will not be run, and False returned
    :working_directory: will chdir to this directory
    :force_local:       when used with `neural.scheduler`, setting to ``True`` will disable
                        all job distribution functions
    :stderr:            forward ``stderr`` into the output
                        ``True`` will combine ``stderr`` and ``stdout``
                        ``False`` will return ``stdout`` and let ``stderr`` print to the console
                        ``None`` will return ``stdout`` and suppress ``stderr``
    :quiet:             ``False`` (default) will print friendly messages
                        ``True`` will suppress everything but errors
                        ``None`` will suppress all output

    Returns result in form of :class:`RunResult`
    '''
    with run_in(working_directory):
        if products:
            if isinstance(products,basestring):
                products = [products]
            if all([os.path.exists(x) for x in products]):
                return False

        command = flatten(command)
        command = [str(x) for x in command]
        quiet_option = False if quiet==False else True
        with nl.notify('Running %s...' % command[0],level=nl.level.debug,quiet=quiet_option):
            out = None
            returncode = 0
            try:
                if stderr:
                    # include STDERR in STDOUT output
                    out = subprocess.check_output(command,stderr=subprocess.STDOUT)
                elif stderr==None:
                    # dump STDERR into nothing
                    out = subprocess.check_output(command,stderr=subprocess.PIPE)
                else:
                    # let STDERR show through to the console
                    out = subprocess.check_output(command)
            except subprocess.CalledProcessError, e:
                if quiet!=None:
                    nl.notify('''ERROR: %s returned a non-zero status

    ----COMMAND------------
    %s
    -----------------------


    ----OUTPUT-------------
    %s
    -----------------------
    Return code: %d
    ''' % (command[0],' '.join(command),e.output,e.returncode),level=nl.level.error)
                out = e.output
                returncode = e.returncode
            result = RunResult(out,returncode)
            if products and returncode==0:
                result.output_filename = products[0]
            return result