Exemple #1
0
def extract_stats_fsl(data, mask, gmmask, threshold=0.3):
    """ uses fsl tools to extract data values in mask,
    masks 'mask' with gmmask thresholded at 'threshold' (default 0.3)
    returns mean, std, nvoxels
    NOTE: generates some tmp files in tempdir, but also removes them"""
    tmpdir = tempfile.mkdtemp()
    startdir = os.getcwd()
    os.chdir(tmpdir)
    # first mask mask with thresholded gmmask
    pth, nme = os.path.split(mask)
    outfile = fname_presuffix(mask, prefix='gmask_', newpath=tmpdir)
    c1 = CommandLine('fslmaths %s -thr %2.2f -nan -mul %s %s' %
                     (gmmask, threshold, mask, outfile)).run()
    if not c1.runtime.returncode == 0:
        print 'gm masking of mask failed for %s' % (mask)
        print 'tmp dir', tmpdir
        print c1.runtime.stderr
        return None
    #first mask data
    cmd = 'fslmaths %s -nan -mas %s masked_data' % (data, outfile)
    mask_out = CommandLine(cmd).run()
    if not mask_out.runtime.returncode == 0:
        print 'masking failed for %s' % (data)
        return None, None, None
    masked = find_single_file('masked*')
    # get stats
    mean_out = CommandLine('fslstats %s -M' % (masked)).run()
    mean = mean_out.runtime.stdout.strip('\n').strip()
    std_out = CommandLine('fslstats %s -S' % (masked)).run()
    std = std_out.runtime.stdout.strip('\n').strip()
    vox_out = CommandLine('fslstats %s -V' % (masked)).run()
    vox = vox_out.runtime.stdout.split()[0]
    os.chdir(startdir)
    rmtree(tmpdir)
    return mean, std, vox
def make_cerebellum(aseg):
      cwd = os.getcwd()
      pth, nme = os.path.split(aseg)
      os.chdir(pth)
      cl = CommandLine('fslmaths %s -thr 47 -uthr 47 right_cerebellum'% (aseg))
      cout = cl.run()
      
      if not cout.runtime.returncode == 0:
            os.chdir(cwd)
            print 'Unable to create  right cerebellum for %s'%(aseg)
            return None

      cl2 = CommandLine('fslmaths %s -thr 8 -uthr 8 left_cerebellum'% (aseg))
      cout2 = cl2.run()

      if not cout2.runtime.returncode == 0:
            os.chdir(cwd)
            print 'Unable to create  left cerebellum for %s'%(aseg)
            return None

      cl3 = CommandLine('fslmaths left_cerebellum -add right_cerebellum -bin grey_cerebellum')
      cout3 = cl3.run()
      if not cout3.runtime.returncode == 0:
            print 'Unable to create whole cerebellum for %s'%(aseg)
            print cout3.runtime.stderr
            print cout3.runtime.stdout
            return None
      
      cmd = 'rm right_cerebellum.* left_cerebellum.*'
      cl4 = CommandLine(cmd)
      cout4 = cl4.run()
      os.chdir(cwd)
      cerebellum = glob('%s/grey_cerebellum.*'%(pth))
      return cerebellum[0]
def export_graph(graph_in, base_dir=None, show=False, use_execgraph=False,
                 show_connectinfo=False, dotfilename='graph.dot', format='png',
                 simple_form=True):
    """ Displays the graph layout of the pipeline

    This function requires that pygraphviz and matplotlib are available on
    the system.

    Parameters
    ----------

    show : boolean
    Indicate whether to generate pygraphviz output fromn
    networkx. default [False]

    use_execgraph : boolean
    Indicates whether to use the specification graph or the
    execution graph. default [False]

    show_connectioninfo : boolean
    Indicates whether to show the edge data on the graph. This
    makes the graph rather cluttered. default [False]
    """
    graph = deepcopy(graph_in)
    if use_execgraph:
        graph = generate_expanded_graph(graph)
        logger.debug('using execgraph')
    else:
        logger.debug('using input graph')
    if base_dir is None:
        base_dir = os.getcwd()
    if not os.path.exists(base_dir):
        os.makedirs(base_dir)
    outfname = fname_presuffix(dotfilename,
                               suffix='_detailed.dot',
                               use_ext=False,
                               newpath=base_dir)
    logger.info('Creating detailed dot file: %s' % outfname)
    _write_detailed_dot(graph, outfname)
    cmd = 'dot -T%s -O %s' % (format, outfname)
    res = CommandLine(cmd).run()
    if res.runtime.returncode:
        logger.warn('dot2png: %s', res.runtime.stderr)
    pklgraph = _create_dot_graph(graph, show_connectinfo, simple_form)
    outfname = fname_presuffix(dotfilename,
                               suffix='.dot',
                               use_ext=False,
                               newpath=base_dir)
    nx.write_dot(pklgraph, outfname)
    logger.info('Creating dot file: %s' % outfname)
    cmd = 'dot -T%s -O %s' % (format, outfname)
    res = CommandLine(cmd).run()
    if res.runtime.returncode:
        logger.warn('dot2png: %s', res.runtime.stderr)
    if show:
        pos = nx.graphviz_layout(pklgraph, prog='dot')
        nx.draw(pklgraph, pos)
        if show_connectinfo:
            nx.draw_networkx_edge_labels(pklgraph, pos)
Exemple #4
0
def fs_extract_label_rois(subdir, pet, dat, labels):
    """
    Uses freesurfer tools to extract

    Parameters
    -----------
    subdir : subjects freesurfer directory

    pet : filename of subjects PET volume coreg'd to mri space

    dat : filename of dat generated by tkregister mapping pet to mri

    labels : filename of subjects aparc+aseg.mgz

    Returns
    -------
    stats_file: file  that contains roi stats

    label_file : file of volume with label rois in pet space
               you can check dat with ...
               'tkmedit %s T1.mgz -overlay %s -overlay-reg %s
               -fthresh 0.5 -fmid1'%(subject, pet, dat)
                 
    """
    pth, nme, ext = split_filename(pet)
    pth_lbl, nme_lbl, ext_lbl = split_filename(labels)

    stats_file = os.path.join(pth, '%s_%s_stats' % (nme, nme_lbl))
    label_file = os.path.join(pth, '%s_%s_.nii.gz' % (nme, nme_lbl))

    # Gen label file
    cmd = [
        'mri_label2vol',
        '--seg %s/mri/%s' % (subdir, labels),
        '--temp %s' % (pet),
        '--reg' % (dat),
        '--o %s' % (label_file)
    ]
    cmd = ' '.join(cmd)
    cout = CommandLine(cmd).run()
    if not cout.runtime.returncode == 0:
        print 'mri_label2vol failed for %s' % (pet)
        return None, None
    ## Get stats
    cmd = [
        'mri_segstats',
        '--seg %s' % (label_file),
        '--sum %s' % (stats_file),
        '--in %s' % (pet), '--nonempty --ctab',
        '/usr/local/freesurfer_x86_64-4.5.0/FreeSurferColorLUT.txt'
    ]
    cmd = ' '.join(cmd)
    cout = CommandLine(cmd).run()
    if not cout.runtime.returncode == 0:
        print 'mri_segstats failed for %s' % (pet)
        return None, None
    return stats_file, label_file
Exemple #5
0
def dicom2nrrd(dicomdir, experiment_dir, out_prefix, out_suffix):
    import os
    import shutil

    dirnames = os.listdir(dicomdir)
    for d_i in range(len(dirnames)):
        fileName, fileExtension = os.path.splitext(dirnames[d_i])
        if fileExtension == '.gz':
            os.remove(dirnames[d_i])
        if fileExtension == '.bval':
            os.remove(dirnames[d_i])
        if fileExtension == '.bvec':
            os.remove(dirnames[d_i])

    from nipype.interfaces.base import CommandLine
    basename = experiment_dir + '/' + out_prefix + '/' + out_prefix + out_suffix
    cmd = CommandLine(
        '/Users/eija/Documents/osx/dcm2nii -a Y -d N -e N -i N -p N -o %s %s' %
        (basename, dicomdir))
    print "DICOM->NII:" + cmd.cmd
    cmd.run()

    dirnames = os.listdir(dicomdir)
    filename_nii = ''
    filename_bvec = ''
    filename_bval = ''
    for d_i in range(len(dirnames)):
        fileName, fileExtension = os.path.splitext(dirnames[d_i])
        if fileExtension == '.gz':
            if len(filename_nii) > 0:
                raise "multiple copies of .nii.gz was found"
            filename_nii = fileName
        if fileExtension == '.bval':
            if len(filename_nii) > 0:
                raise "multiple copies of .bval was found"
            filename_bval = fileName
        if fileExtension == '.bvec':
            if len(filename_nii) > 0:
                raise "multiple copies of .bvec was found"
            filename_bvec = fileName

    move_to_results((dicomdir + '/' + filename_nii + '.gz'), experiment_dir,
                    out_prefix)
    move_to_results((dicomdir + '/' + filename_bval + '.bval'), experiment_dir,
                    out_prefix)
    move_to_results((dicomdir + '/' + filename_bvec + '.bvec'), experiment_dir,
                    out_prefix)

    cmd = CommandLine(
        'DWIConvert --inputVolume %s.nii.gz --outputVolume %s.nrrd --conversionMode FSLToNrrd --inputBValues %s.bval --inputBVectors %s.bvec'
        % (basename, basename, basename, basename))
    print "NII->NRRD:" + cmd.cmd
    cmd.run()
    return os.path.abspath('%s.nrrd' % (basename))
def remove_files(files):
    """removes files """
    if not hasattr(files, '__iter__'):
        cl = CommandLine('rm %s'% files)
        out = cl.run()
        if not out.runtime.returncode == 0:
            print 'failed to delete %s' % files
            print out.runtime.stderr
        return
    for f in files:
        cl = CommandLine('rm %s'% f)
        out = cl.run()
        if not out.runtime.returncode == 0:
            print 'failed to delete %s' % f
            print out.runtime.stderr
def geodesic_depth(command, surface_file):
    """
    Measure "travel depth" of each vertex in a surface mesh.
    (Calls Joachim Giard's C++ code)

    Parameters
    ----------
    command : travel depth C++ executable command
    surface_file : ``vtk file``

    Returns
    -------
    depth_file: string
        vtk file with geodesic depth per vertex of mesh

    """
    import os
    from nipype.interfaces.base import CommandLine

    depth_file = os.path.join(
        os.getcwd(),
        os.path.splitext(os.path.basename(surface_file))[0] +
        '.geodesic_depth.vtk')
    cli = CommandLine(command=command)
    cli.inputs.args = ' '.join([surface_file, depth_file])
    cli.cmdline
    cli.run()

    if not os.path.exists(depth_file):
        raise (IOError(depth_file + " not found"))

    return depth_file
def area(command, surface_file):
    """
    Measure area of each vertex in a surface mesh.
    (Calls Joachim Giard's C++ code)

    Parameters
    ----------
    command : string
        Voronoi-based surface area C++ executable command
    surface_file : string
        vtk file with surface mesh

    Returns
    -------
    area_file: string
        vtk file with surface area per vertex of mesh

    """
    import os
    from nipype.interfaces.base import CommandLine

    area_file = os.path.join(
        os.getcwd(),
        os.path.splitext(os.path.basename(surface_file))[0] + '.area.vtk')
    cli = CommandLine(command=command)
    cli.inputs.args = ' '.join([surface_file, area_file])
    cli.cmdline
    cli.run()

    if not os.path.exists(area_file):
        raise (IOError(area_file + " not found"))

    return area_file
Exemple #9
0
def fs_generate_dat(pet, subdir):
    """ use freesurfer tkregister to generate a dat file used in
    extracting PET counts with a labelled mri mask in freesurfer

    Parameters
    ----------
    pet : pet file that is registered to the subjects mri

    subdir : subjects freesurfer directory

    Returns
    -------
    dat : dat file generated , or None if failes
    you can check dat with ...
               'tkmedit %s T1.mgz -overlay %s -overlay-reg %s
               -fthresh 0.5 -fmid1'%(subject, pet, dat)
                 
    """
    pth, nme, ext = split_filename(pet)
    dat = os.path.join(pth, '%s_2_FS.dat' % (nme))
    cmd = 'tkregister2 --mov %s --s %s --regheader --reg %s --noedit' % (
        pet, subdir, dat)
    cout = CommandLine(cmd).run()
    if not cout.runtime.returncode == 0:
        print 'tkregister failed for %s' % (pet)
        return None
    return dat
Exemple #10
0
def dicom2mhd(dicomdir, experiment_dir, out_prefix):
    from nipype.utils.filemanip import split_filename
    from nipype.interfaces.base import CommandLine
    import os
    _, name, _ = split_filename(dicomdir)
    outfile_mhd = experiment_dir + '/' + out_prefix + '/' + name + '_tmp/' + 'output' + '.mhd'
    outfile_raw = experiment_dir + '/' + out_prefix + '/' + name + '_tmp/' + 'output' + '.raw'
    outfile_txt = experiment_dir + '/' + out_prefix + '/' + name + '_tmp/' + 'output' + '_info.txt'
    outdir = experiment_dir + '/' + out_prefix + '/' + name + '_tmp'
    cmd = CommandLine((
        mcverter_basedir +
        'mcverter %s -r -f meta -o %s -F-PatientName-SeriesDate-SeriesDescription-StudyId-SeriesNumber'
        % (dicomdir, outdir)))
    print "DICOM->NII:" + cmd.cmd
    cmd.run()
    # Move to results folder
    outfile_mhd = move_to_results(outfile_mhd, experiment_dir, out_prefix)
    outfile_raw = move_to_results(outfile_raw, experiment_dir, out_prefix)
    outfile_txt = move_to_results(outfile_txt, experiment_dir, out_prefix)
    os.rmdir(outdir)
    # Rename basename, and reference in mhd header
    outfile_mhd = rename_basename_to(outfile_mhd, name)
    outfile_raw = rename_basename_to(outfile_raw, name)
    outfile_txt = rename_basename_to(outfile_txt, name)
    replace_inplace(outfile_mhd, ('output.raw'), (name + '.raw'))

    return outfile_mhd, outfile_raw, outfile_txt
Exemple #11
0
def get_doc(cmd, opt_map, help_flag=None, trap_error=True):
    """Get the docstring from our command and options map.

    Parameters
    ----------
    cmd : string
        The command whose documentation we are fetching
    opt_map : dict
        Dictionary of flags and option attributes.
    help_flag : string
        Provide additional help flag. e.g., -h
    trap_error : boolean
        Override if underlying command returns a non-zero returncode

    Returns
    -------
    doc : string
        The formated docstring

    """
    res = CommandLine('which %s' % cmd.split(' ')[0],
                      terminal_output='allatonce').run()
    cmd_path = res.runtime.stdout.strip()
    if cmd_path == '':
        raise Exception('Command %s not found'%cmd.split(' ')[0])
    if help_flag:
        cmd = ' '.join((cmd,help_flag))
    doc = grab_doc(cmd,trap_error)
    opts = reverse_opt_map(opt_map)
    return build_doc(doc, opts)
Exemple #12
0
def get_params_from_doc(cmd, style='--', help_flag=None, trap_error=True):
    """Auto-generate option map from command line help

    Parameters
    ----------
    cmd : string
        The command whose documentation we are fetching
    style : string default ['--']
        The help command style (--, -). Multiple styles can be provided in a
        list e.g. ['--','-'].
    help_flag : string
        Provide additional help flag. e.g., -h
    trap_error : boolean
        Override if underlying command returns a non-zero returncode

    Returns
    -------
    optmap : dict
        Contains a mapping from input to command line variables

    """
    res = CommandLine('which %s' % cmd.split(' ')[0],
                      terminal_output='allatonce').run()
    cmd_path = res.runtime.stdout.strip()
    if cmd_path == '':
        raise Exception('Command %s not found'%cmd.split(' ')[0])
    if help_flag:
        cmd = ' '.join((cmd,help_flag))
    doc = grab_doc(cmd,trap_error)
    return _parse_doc(doc,style)
Exemple #13
0
 def _grab_xml(self, module):
     cmd = CommandLine(command="Slicer3", args="--launch %s --xml" % module)
     ret = cmd.run()
     if ret.runtime.returncode == 0:
         return xml.dom.minidom.parseString(ret.runtime.stdout)
     else:
         raise Exception(cmd.cmdline + " failed:\n%s" % ret.runtime.stderr)
Exemple #14
0
    def version():
        """Check for RATS version on system

        Parameters
        ----------
        None

        Returns
        -------
        version : str
           Version number as string or None if RATS not found

        """
        try:
            clout = CommandLine(command='RATS_MM --version',
                                resource_monitor=False).run()
        except IOError:
            # If rats_vcheck is not present, return None
            LOGGER.warn('RATS_MM executable not found.')
            return None

        version_stamp = clout.runtime.stdout.split('\n')[1].split(
            'version ')[0]
        if version_stamp.startswith('RATS_MM'):
            version_stamp = version_stamp.split('version: ')[1]
        else:
            return None

        return version_stamp
Exemple #15
0
def run_palm(cope_file,
             design_file,
             contrast_file,
             group_file,
             mask_file,
             cluster_threshold=3.09):
    import os
    from glob import glob
    from nipype.interfaces.base import CommandLine
    #cmd = ("palm -i {cope_file} -m {mask_file} -d {design_file} -t {contrast_file} -eb {group_file} -T "
    #       "-C {cluster_threshold} -Cstat extent -fdr -noniiclass -twotail -logp -zstat")
    #cl = CommandLine(cmd.format(cope_file=cope_file, mask_file=mask_file, design_file=design_file,
    #                            contrast_file=contrast_file,
    #                            group_file=group_file, cluster_threshold=cluster_threshold))

    # XXX: ideally we should make it more fancy, but since we're only doing
    # 1-sample t-tests we need to omit the design, contrast, and group files
    # as for PALM's FAQs
    cmd = (
        "palm -i {cope_file} -m {mask_file} -T "
        "-C {cluster_threshold} -Cstat extent -fdr -noniiclass -twotail -logp -zstat"
    )
    cl = CommandLine(
        cmd.format(cope_file=cope_file,
                   mask_file=mask_file,
                   cluster_threshold=cluster_threshold))
    results = cl.run(terminal_output='file')
    return [os.path.join(os.getcwd(), val) for val in sorted(glob('palm*'))]
def DICOM2animatedGIF_sidebyside(dcmdir_l, dcmdir_r, outputpath, slice_i, suffix):

    dcmio = DicomIO.DicomIO()
    dwidcm_l = dcmio.ReadDICOM_frames(dcmdir_l)
    dwidcm_r = dcmio.ReadDICOM_frames(dcmdir_r)
    # Write all frames of slice into set of png files
    for frame_i in range(len(dwidcm_l)):
        slice_l = dwidcm_l[frame_i][slice_i].pixel_array.T
        slice_r = dwidcm_r[frame_i][slice_i].pixel_array.T
        dimx = slice_l.shape[0]
        dimy = slice_l.shape[1]
        newImg1 = PIL.Image.new('L', (dimx*2, dimy))
        pixels1 = newImg1.load()
        for i in range (0, dimx):
            for j in range (0, dimy):
                pixels1[i, j] = float(slice_l[i, j])
                pixels1[dimx+i, j] = float(slice_r[i, j])
        #pixels1[i, j] = float(slice[i, j]) * dwidcm[frame_i][slice_i].RescaleSlope + dwidcm[frame_i][slice_i].RescaleIntercept
        newImg1.save((outputpath + '_' + ('%02d' % frame_i) + '.png'),'PNG')
    cmd = CommandLine('convert -delay 25 -loop 0 %s_*.png %s_%s.gif' % (outputpath, outputpath, suffix))
    cmd.run()
    for frame_i in range(len(dwidcm_l)):
        os.remove((outputpath + '_' + ('%02d' % frame_i) + '.png'))
    print "convert (ImageMagick):" + cmd.cmd
    return (outputpath + '.gif')
Exemple #17
0
 def _submit_batchtask(self, scriptfile, node):
     cmd = CommandLine('qsub', environ=os.environ.data,
                       terminal_output='allatonce')
     path = os.path.dirname(scriptfile)
     qsubargs = ''
     if self._qsub_args:
         qsubargs = self._qsub_args
     if 'qsub_args' in node.plugin_args:
         if 'overwrite' in node.plugin_args and \
                 node.plugin_args['overwrite']:
             qsubargs = node.plugin_args['qsub_args']
         else:
             qsubargs += (" " + node.plugin_args['qsub_args'])
     if '-o' not in qsubargs:
         qsubargs = '%s -o %s' % (qsubargs, path)
     if '-e' not in qsubargs:
         qsubargs = '%s -e %s' % (qsubargs, path)
     if node._hierarchy:
         jobname = '.'.join((os.environ.data['LOGNAME'],
                             node._hierarchy,
                             node._id))
     else:
         jobname = '.'.join((os.environ.data['LOGNAME'],
                             node._id))
     jobnameitems = jobname.split('.')
     jobnameitems.reverse()
     jobname = '.'.join(jobnameitems)
     jobname = qsub_sanitize_job_name(jobname)
     cmd.inputs.args = '%s -N %s %s' % (qsubargs,
                                        jobname,
                                        scriptfile)
     oldlevel = iflogger.level
     iflogger.setLevel(logging.getLevelName('CRITICAL'))
     tries = 0
     result = list()
     while True:
         try:
             result = cmd.run()
         except Exception as e:
             if tries < self._max_tries:
                 tries += 1
                 time.sleep(
                     self._retry_timeout)  # sleep 2 seconds and try again.
             else:
                 iflogger.setLevel(oldlevel)
                 raise RuntimeError('\n'.join((('Could not submit sge task'
                                                ' for node %s') % node._id,
                                               str(e))))
         else:
             break
     iflogger.setLevel(oldlevel)
     # retrieve sge taskid
     lines = [line for line in result.runtime.stdout.split('\n') if line]
     taskid = int(re.match("Your job ([0-9]*) .* has been submitted",
                           lines[-1]).groups()[0])
     self._pending[taskid] = node.output_dir()
     self._refQstatSubstitute.add_startup_job(taskid, cmd.cmdline)
     logger.debug('submitted sge task: %d for node %s with %s' %
                  (taskid, node._id, cmd.cmdline))
     return taskid
Exemple #18
0
def run_rand(cope_file,
             design_file,
             contrast_file,
             group_file,
             mask_file,
             cluster_threshold=2.3,
             n=10000):

    import os
    from glob import glob
    from nipype.interfaces.base import CommandLine

    cmd = (
        "randomise -i {cope_file} -m {mask_file} -d {design_file} -t {contrast_file} -e {group_file} -T "
        "-c {cluster_threshold} -x -n {n}")

    cl = CommandLine(
        cmd.format(cope_file=cope_file,
                   mask_file=mask_file,
                   design_file=design_file,
                   contrast_file=contrast_file,
                   group_file=group_file,
                   cluster_threshold=cluster_threshold,
                   n=n))
    results = cl.run(terminal_output='file')
    return [os.path.join(os.getcwd(), val) for val in sorted(glob('rand*'))]
Exemple #19
0
 def _submit_batchtask(self, scriptfile, node):
     cmd = CommandLine('bsub',
                       environ=os.environ.data,
                       terminal_output='allatonce')
     path = os.path.dirname(scriptfile)
     bsubargs = ''
     if self._bsub_args:
         bsubargs = self._bsub_args
     if 'bsub_args' in node.plugin_args:
         if 'overwrite' in node.plugin_args and\
            node.plugin_args['overwrite']:
             bsubargs = node.plugin_args['bsub_args']
         else:
             bsubargs += (" " + node.plugin_args['bsub_args'])
     if '-o' not in bsubargs:  # -o outfile
         bsubargs = '%s -o %s' % (bsubargs, scriptfile + ".log")
     if '-e' not in bsubargs:
         bsubargs = '%s -e %s' % (bsubargs, scriptfile + ".log"
                                  )  # -e error file
     if node._hierarchy:
         jobname = '.'.join(
             (os.environ.data['LOGNAME'], node._hierarchy, node._id))
     else:
         jobname = '.'.join((os.environ.data['LOGNAME'], node._id))
     jobnameitems = jobname.split('.')
     jobnameitems.reverse()
     jobname = '.'.join(jobnameitems)
     cmd.inputs.args = '%s -J %s sh %s' % (bsubargs, jobname, scriptfile
                                           )  # -J job_name_spec
     logger.debug('bsub ' + cmd.inputs.args)
     oldlevel = iflogger.level
     iflogger.setLevel(logging.getLevelName('CRITICAL'))
     tries = 0
     while True:
         try:
             result = cmd.run()
         except Exception as e:
             if tries < self._max_tries:
                 tries += 1
                 sleep(
                     self._retry_timeout)  # sleep 2 seconds and try again.
             else:
                 iflogger.setLevel(oldlevel)
                 raise RuntimeError('\n'.join(
                     (('Could not submit lsf task'
                       ' for node %s') % node._id, str(e))))
         else:
             break
     iflogger.setLevel(oldlevel)
     # retrieve lsf taskid
     match = re.search('<(\d*)>', result.runtime.stdout)
     if match:
         taskid = int(match.groups()[0])
     else:
         raise ScriptError("Can't parse submission job output id: %s" %
                           result.runtime.stdout)
     self._pending[taskid] = node.output_dir()
     logger.debug('submitted lsf task: %d for node %s' % (taskid, node._id))
     return taskid
Exemple #20
0
def area(command, surface_file, verbose=False):
    """
    Measure area of each vertex in a surface mesh.
    (Calls Joachim Giard's C++ code)

    Parameters
    ----------
    command : string
        Voronoi-based surface area C++ executable command
    surface_file : string
        vtk file with surface mesh
    verbose : bool
        print statements?

    Returns
    -------
    area_file: string
        vtk file with surface area per vertex of mesh

    Examples
    --------
    >>> import os
    >>> import numpy as np
    >>> from mindboggle.shapes.surface_shapes import area
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> surface_file = fetch_data(urls['left_pial'], '', '.vtk')
    >>> verbose = False
    >>> ccode_path = os.environ['vtk_cpp_tools']
    >>> command = os.path.join(ccode_path, 'area', 'PointAreaMain')
    >>> area_file = area(command, surface_file, verbose)
    >>>
    >>> scalars, name = read_scalars(area_file)
    >>> np.allclose(scalars[0:8],
    ...             [0.48270401731, 0.39661528543, 0.57813454792, 0.70574099571,
    ...              0.84318527207, 0.57642554119, 0.66942016035, 0.70629953593])
    True

    """
    import os
    from nipype.interfaces.base import CommandLine

    basename = os.path.splitext(os.path.basename(surface_file))[0]
    area_file = os.path.join(os.getcwd(), basename + '.area.vtk')
    args = ' '.join([surface_file, area_file])

    if verbose:
        print("{0} {1}".format(command, args))

    cli = CommandLine(command=command)
    cli.inputs.args = args
    cli.terminal_output = 'file'
    cli.run()

    if not os.path.exists(area_file):
        raise IOError(area_file + " not found")

    return area_file
Exemple #21
0
def dicom2nrrd(dicomdir, out_prefix):
    import os
    from nipype.interfaces.base import CommandLine
    cmd = CommandLine(
        'DWIConvert --inputDicomDirectory %s --outputVolume %s.nrrd' %
        (dicomdir, out_prefix))
    cmd.run()
    return os.path.abspath('%s.nrrd' % out_prefix)
Exemple #22
0
def test_provenance():
    ps = ProvStore()
    from nipype.interfaces.base import CommandLine
    results = CommandLine('echo hello').run()
    ps.add_results(results)
    provn = ps.g.get_provn()
    prov_json = ps.g.serialize(format='json')
    yield assert_true, 'echo hello' in provn
Exemple #23
0
def test_provenance(tmpdir):
    from nipype.interfaces.base import CommandLine
    tmpdir.chdir()
    ps = ProvStore()
    results = CommandLine('echo hello').run()
    ps.add_results(results)
    provn = ps.g.get_provn()
    assert 'echo hello' in provn
def convert_audio_file(old_file,
                       new_file,
                       command='ffmpeg',
                       input_args='-i',
                       output_args='-ac 2'):
    """
    Convert audio file to new format.

    Parameters
    ----------
    old_file : string
        full path to the input file
    new_file : string
        full path to the output file
    command : string
        executable command without arguments
    input_args : string
        arguments preceding input file name in command
    output_args : string
        arguments preceding output file name in command

    Returns
    -------
    new_file : string
        full path to the output file

    Examples
    --------
    >>> from mhealthx.xio import convert_audio_file
    >>> old_file = '/Users/arno/mhealthx_cache/mhealthx/feature_files/test.m4a'
    >>> new_file = 'test.wav'
    >>> command = 'ffmpeg'
    >>> input_args = '-y -i'
    >>> output_args = '-ac 2'
    >>> new_file = convert_audio_file(old_file, new_file, command, input_args, output_args)

    """
    import os
    from nipype.interfaces.base import CommandLine

    if not os.path.isfile(old_file):
        raise IOError("{0} does not exist.".format(old_file))
        new_file = None
    else:
        input_args = ' '.join([input_args, old_file, output_args, new_file])
        try:
            # Nipype command line wrapper:
            cli = CommandLine(command=command)
            cli.inputs.args = input_args
            cli.cmdline
            cli.run()
        except:
            import traceback
            traceback.print_exc()
            print("'{0} {1}' unsuccessful".format(command, input_args))
            new_file = None

    return new_file
Exemple #25
0
    def _submit_batchtask(self, scriptfile, node):
        """
        This is more or less the _submit_batchtask from sge.py with flipped variable
        names, different command line switches, and different output formatting/processing
        """
        cmd = CommandLine('sbatch',
                          environ=os.environ.data,
                          terminal_output='allatonce')
        path = os.path.dirname(scriptfile)

        sbatch_args = ''
        if self._sbatch_args:
            sbatch_args = self._sbatch_args
        if 'sbatch_args' in node.plugin_args:
            if 'overwrite' in node.plugin_args and\
               node.plugin_args['overwrite']:
                sbatch_args = node.plugin_args['sbatch_args']
            else:
                sbatch_args += (" " + node.plugin_args['sbatch_args'])
        if '-o' not in sbatch_args:
            sbatch_args = '%s -o %s' % (sbatch_args,
                                        os.path.join(path, 'slurm-%j.out'))
        if '-e' not in sbatch_args:
            sbatch_args = '%s -e %s' % (sbatch_args,
                                        os.path.join(path, 'slurm-%j.out'))
        if '-p' not in sbatch_args:
            sbatch_args = '%s -p normal' % (sbatch_args)
        if '-n' not in sbatch_args:
            sbatch_args = '%s -n 16' % (sbatch_args)
        if '-t' not in sbatch_args:
            sbatch_args = '%s -t 1:00:00' % (sbatch_args)
        if node._hierarchy:
            jobname = '.'.join(
                (os.environ.data['LOGNAME'], node._hierarchy, node._id))
        else:
            jobname = '.'.join((os.environ.data['LOGNAME'], node._id))
        jobnameitems = jobname.split('.')
        jobnameitems.reverse()
        jobname = '.'.join(jobnameitems)
        cmd.inputs.args = '%s -J %s %s' % (sbatch_args, jobname, scriptfile)
        oldlevel = iflogger.level
        iflogger.setLevel(logging.getLevelName('CRITICAL'))
        tries = 0
        while True:
            try:
                result = cmd.run()
            except Exception, e:
                if tries < self._max_tries:
                    tries += 1
                    sleep(
                        self._retry_timeout)  # sleep 2 seconds and try again.
                else:
                    iflogger.setLevel(oldlevel)
                    raise RuntimeError('\n'.join(
                        (('Could not submit sbatch task'
                          ' for node %s') % node._id, str(e))))
            else:
                break
Exemple #26
0
def travel_depth(command, surface_file, verbose=False):
    """
    Measure "travel depth" of each vertex in a surface mesh.
    (Calls Joachim Giard's C++ code)

    Parameters
    ----------
    command : string
        travel depth C++ executable command
    surface_file : string
        vtk file
    verbose : bool
        print statements?

    Returns
    -------
    depth_file: string
        vtk file with travel depth per vertex of mesh

    Examples
    --------
    >>> import os
    >>> import numpy as np
    >>> from mindboggle.shapes.surface_shapes import travel_depth
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> surface_file = fetch_data(urls['left_pial'], '', '.vtk')
    >>> verbose = False
    >>> ccode_path = os.environ['vtk_cpp_tools']
    >>> command = os.path.join(ccode_path, 'travel_depth', 'TravelDepthMain')
    >>> depth_file = travel_depth(command, surface_file, verbose)
    >>> scalars, name = read_scalars(depth_file)
    >>> print(np.array_str(np.array(scalars[0:8]), precision=5,
    ...     suppress_small=True))
    [ 0.02026  0.06009  0.12859  0.04564  0.00774  0.05284  0.05354  0.01316]

    """
    import os
    from nipype.interfaces.base import CommandLine

    basename = os.path.splitext(os.path.basename(surface_file))[0]
    depth_file = os.path.join(os.getcwd(), basename + '.travel_depth.vtk')
    args = ' '.join([surface_file, depth_file])

    if verbose:
        print("{0} {1}".format(command, args))

    cli = CommandLine(command=command)
    cli.inputs.args = args
    cli.cmdline
    cli.run()

    if not os.path.exists(depth_file):
        raise IOError(depth_file + " not found")

    return depth_file
def tar_cmd(infile):
    """ given a ipped tar archive, untars"""
    cwd = os.getcwd()
    pth, nme = os.path.split(infile)
    os.chdir(pth)
    cl = CommandLine('tar xfvz %s'%(infile))
    cout = cl.run()
    os.chdir(cwd)
    return pth
Exemple #28
0
def unzip(infile):
    gunzipfile, gz = os.path.splitext(infile)
    if not 'gz' in gz:
        #when running gunzip on file when
        return infile
    else:
       c3 = CommandLine('gunzip %s'%(infile))
       c3.run()
       return gunzipfile
Exemple #29
0
def travel_depth(command, surface_file, verbose=False):
    """
    Measure "travel depth" of each vertex in a surface mesh.
    (Calls Joachim Giard's C++ code)

    Parameters
    ----------
    command : string
        travel depth C++ executable command
    surface_file : string
        vtk file
    verbose : bool
        print statements?

    Returns
    -------
    depth_file: string
        vtk file with travel depth per vertex of mesh

    Examples
    --------
    >>> import os
    >>> import numpy as np
    >>> from mindboggle.shapes.surface_shapes import travel_depth
    >>> from mindboggle.mio.vtks import read_scalars
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> surface_file = fetch_data(urls['left_pial'], '', '.vtk')
    >>> verbose = False
    >>> ccode_path = os.environ['vtk_cpp_tools']
    >>> command = os.path.join(ccode_path, 'travel_depth', 'TravelDepthMain')
    >>> depth_file = travel_depth(command, surface_file, verbose)
    >>> scalars, name = read_scalars(depth_file)
    >>> np.allclose(scalars[0:8], [0.020259869839, 0.06009166489, 0.12858575442, 0.045639221313, 0.007742772964, 0.052839111255, 0.053538904296, 0.013158746337])
    True

    """
    import os
    from nipype.interfaces.base import CommandLine

    basename = os.path.splitext(os.path.basename(surface_file))[0]
    depth_file = os.path.join(os.getcwd(), basename + '.travel_depth.vtk')
    args = ' '.join([surface_file, depth_file])

    if verbose:
        print("{0} {1}".format(command, args))

    cli = CommandLine(command=command)
    cli.inputs.args = args
    cli.terminal_output = 'file'
    cli.run()

    if not os.path.exists(depth_file):
        raise IOError(depth_file + " not found")

    return depth_file
Exemple #30
0
def dicom2nii(dicomdir, experiment_dir, out_prefix, out_suffix):
    import os
    import shutil
    from nipype.interfaces.base import CommandLine

    dirnames = os.listdir(dicomdir)
    for d_i in range(len(dirnames)):
        fileName, fileExtension = os.path.splitext(dirnames[d_i])
        if fileExtension == '.gz':
            os.remove(os.path.join(dicomdir, dirnames[d_i]))
        if fileExtension == '.bval':
            os.remove(os.path.join(dicomdir, dirnames[d_i]))
        if fileExtension == '.bvec':
            os.remove(os.path.join(dicomdir, dirnames[d_i]))

    from nipype.interfaces.base import CommandLine
    basename = experiment_dir + '/' + out_prefix + '/' + out_prefix + out_suffix
    cmd = CommandLine(
        '/Users/eija/Documents/osx/dcm2nii -a Y -d N -e N -i N -p N -o %s %s' %
        (basename, dicomdir))
    print "DICOM->NII:" + cmd.cmd
    cmd.run()

    dirnames = os.listdir(dicomdir)
    filename_nii = ''
    filename_bvec = ''
    filename_bval = ''
    for d_i in range(len(dirnames)):
        fileName, fileExtension = os.path.splitext(dirnames[d_i])
        if fileExtension == '.gz':
            if len(filename_nii) > 0:
                raise "multiple copies of .nii.gz was found"
            filename_nii = fileName
        if fileExtension == '.bval':
            if len(filename_nii) > 0:
                raise "multiple copies of .bval was found"
            filename_bval = fileName
        if fileExtension == '.bvec':
            if len(filename_nii) > 0:
                raise "multiple copies of .bvec was found"
            filename_bvec = fileName

    outfile = move_to_results((dicomdir + '/' + filename_nii + '.gz'),
                              experiment_dir, out_prefix)
    outfile_bval = ''
    outfile_bvec = ''
    if len(filename_bval) > 0:
        outfile_bval = move_to_results(
            (dicomdir + '/' + filename_bval + '.bval'), experiment_dir,
            out_prefix)
    if len(filename_bvec) > 0:
        outfile_bvec = move_to_results(
            (dicomdir + '/' + filename_bvec + '.bvec'), experiment_dir,
            out_prefix)

    return outfile, outfile_bval, outfile_bvec