コード例 #1
0
ファイル: __init__.py プロジェクト: vferat/nipype
def check_latest_version(raise_exception=False):
    """Check for the latest version of the library

    parameters:
    raise_exception: boolean
        Raise a RuntimeError if a bad version is being used
    """
    import etelemetry
    logger = logging.getLogger('nipype.utils')

    INIT_MSG = "Running {packname} version {version} (latest: {latest})".format

    latest = {"version": 'Unknown', "bad_versions": []}
    result = None
    try:
        result = etelemetry.get_project("nipy/nipype")
    except Exception as e:
        logger.warning("Could not check for version updates: \n%s", e)
    finally:
        if result:
            latest.update(**result)
            if LooseVersion(__version__) != LooseVersion(latest["version"]):
                logger.info(INIT_MSG(packname='nipype',
                                     version=__version__,
                                     latest=latest["version"]))
            if latest["bad_versions"] and \
                    any([LooseVersion(__version__) == LooseVersion(ver)
                         for ver in latest["bad_versions"]]):
                message = ('You are using a version of Nipype with a critical '
                           'bug. Please use a different version.')
                if raise_exception:
                    raise RuntimeError(message)
                else:
                    logger.critical(message)
    return latest
コード例 #2
0
def main(args=None):
    """Main program function."""
    if args is None:
        namespace = parse_args(sys.argv[1:])
    else:
        namespace = parse_args(args)

    if namespace.verbosity is not None:
        utils.set_log_level(namespace.verbosity)

    logger.debug(vars(namespace))

    try:
        latest = etelemetry.get_project("kaczmarj/neurodocker")
    except RuntimeError as e:
        print("# Could not check for version updates: ", e)
    else:
        if latest and 'version' in latest:
            print("# Your version: {0} Latest version: {1}".format(__version__,
                                                                 latest["version"]))

    subparser_functions = {
        'docker': generate,
        'singularity': generate,
        'trace': reprozip_trace,
        'merge': reprozip_merge,
    }

    subparser_functions[namespace.subsubparser_name](namespace)
コード例 #3
0
ファイル: __init__.py プロジェクト: r03ert0/pydra
def check_latest_version(raise_exception=False):
    """
    Check for the latest version of the library.

    Parameters
    ----------
    raise_exception: :obj:`bool`
        Raise a RuntimeError if a bad version is being used

    """
    import etelemetry
    import logging
    from pkg_resources import parse_version

    logger = logging.getLogger("pydra")

    INIT_MSG = "Running {packname} version {version} (latest: {latest})".format

    latest = {"version": "Unknown", "bad_versions": []}
    result = None
    try:
        result = etelemetry.get_project("nipype/pydra")
    except Exception as e:
        logger.warning("Could not check for version updates: \n%s", e)
    finally:
        if result:
            latest.update(**result)
            if parse_version(__version__) != parse_version(latest["version"]):
                logger.info(
                    INIT_MSG(
                        packname="pydra", version=__version__, latest=latest["version"]
                    )
                )
            if latest["bad_versions"] and any(
                [
                    parse_version(__version__) == parse_version(ver)
                    for ver in latest["bad_versions"]
                ]
            ):
                message = (
                    "You are using a version of Pydra with a critical "
                    "bug. Please use a different version."
                )
                if raise_exception:
                    raise RuntimeError(message)
                else:
                    logger.critical(message)
    return latest
コード例 #4
0
ファイル: cli.py プロジェクト: neuronets/ams
def predict(*, infile, outprefix, batch_size, threshold, model_file):
    """Segment meningiomas in a 3D T1-weighted contrast-enhanced MRI using a trained deep neural network.

    CAUTION: this tool is not a medical product and is only intended for research purposes.

    The predictions are saved to OUTPREFIX_* with the same extension as the input file.

    If you encounter out-of-memory issues, use a lower batch size value.
    """

    msg = "CAUTION: this tool is not a medical product and is only intended for research purposes."
    msg = '\n' + '*' * len(msg) + '\n' + msg + '\n' + '*' * len(msg) + '\n'
    click.echo(click.style(msg, fg='red'))

    try:
        latest = etelemetry.get_project("neuronets/ams")
    except RuntimeError as e:
        print("Could not check for version updates: ", e)
    else:
        if latest and 'version' in latest:
            print(f"Your version: {__version__}")
            print(f"Latest version: {latest['version']}\n")

    _orig_infile = infile

    if infile.lower().endswith('.nii.gz'):
        outfile_ext = '.nii.gz'
    else:
        outfile_ext = Path(infile).suffix

    outfile = "{}{}".format(outprefix, outfile_ext)
    outfile_orig = "{}_orig{}".format(outprefix, outfile_ext)

    img = nib.load(infile)
    ndim = len(img.shape)
    if ndim != 3:
        raise ValueError("Input volume must have three dimensions but got {}.".format(ndim))
    if img.shape != _REQUIRED_SHAPE:
        tmp = tempfile.NamedTemporaryFile(suffix='.nii.gz')
        print("++ Conforming volume to 1mm^3 voxels and size 256x256x256.")
        _conform(infile, tmp.name)
        infile = tmp.name
    else:
        tmp = None

    # Load and preprocess MRI.
    img = nib.load(infile)
    x = nobrainer.io.read_volume(infile, dtype='float32')
    x = nobrainer.volume.standardize_numpy(x)
    x = nobrainer.volume.to_blocks_numpy(x, _BLOCK_SHAPE)
    x = x[..., None]  # Add grayscale channel.

    # Run forward pass of model.
    model = tf.keras.models.load_model(model_file, compile=False)
    y_ = model.predict(x, batch_size=batch_size, verbose=1)
    y_ = np.squeeze(y_, axis=-1)

    # Binarize probabilities and combine into volume.
    y_ = (y_ > threshold).astype(np.uint8)
    y_ = nobrainer.volume.from_blocks_numpy(y_, _REQUIRED_SHAPE)
    y_img = nib.Nifti1Image(y_, affine=img.affine, header=img.header)
    y_img.header.set_data_dtype(np.uint8)

    nib.save(y_img, outfile)

    # Reslice only if we conformed.
    if tmp is not None:
        print("++ Reslicing into original volume space.")
        _reslice(input=outfile, output=outfile_orig, reference=_orig_infile, labels=True)
コード例 #5
0
def predict(*, infile, outprefix, model, n_samples, batch_size, save_variance,
            save_entropy):
    """Predict labels from features using a trained model.

    The predictions are saved to OUTPREFIX_* with the same extension as the input file.

    If you encounter out-of-memory issues, use a lower batch size value.
    """
    try:
        latest = etelemetry.get_project("neuronets/kwyk")
    except RuntimeError as e:
        print("Could not check for version updates: ", e)
    else:
        if latest and 'version' in latest:
            print("Your version: {0} Latest version: {1}".format(
                __version__, latest["version"]))

    _orig_infile = infile

    # Are there other neuroimaging file extensions with multiple periods?
    if infile.lower().endswith('.nii.gz'):
        outfile_ext = '.nii.gz'
    else:
        outfile_ext = Path(infile).suffix
    outfile_stem = outprefix

    outfile_means = "{}_means{}".format(outfile_stem, outfile_ext)
    outfile_variance = "{}_variance{}".format(outfile_stem, outfile_ext)
    outfile_entropy = "{}_entropy{}".format(outfile_stem, outfile_ext)
    outfile_uncertainty = "{}_uncertainty{}".format(outfile_stem, '.json')

    for ff in [
            outfile_means, outfile_variance, outfile_entropy,
            outfile_uncertainty
    ]:
        if Path(ff).exists():
            raise FileExistsError("file exists: {}".format(ff))

    required_shape = (256, 256, 256)
    block_shape = (32, 32, 32)

    img = nib.load(infile)
    ndim = len(img.shape)
    if ndim != 3:
        raise ValueError(
            "Input volume must have three dimensions but got {}.".format(ndim))
    if img.shape != required_shape:
        tmp = tempfile.NamedTemporaryFile(suffix='.nii.gz')
        print("++ Conforming volume to 1mm^3 voxels and size 256x256x256.")
        _conform(infile, tmp.name)
        infile = tmp.name
    else:
        tmp = None

    savedmodel_path = _models[model]

    print("++ Running forward pass of model.")
    predictor = _get_predictor(savedmodel_path)
    outputs = predict_from_filepath(infile,
                                    predictor=predictor,
                                    block_shape=block_shape,
                                    return_variance=True,
                                    return_entropy=True,
                                    n_samples=n_samples,
                                    batch_size=batch_size,
                                    normalizer=zscore)

    # Delete temporary file.
    if tmp is not None:
        tmp.close()

    if n_samples > 1:
        means, variance, entropy = outputs
    else:
        means, entropy = outputs
        variance = None

    outfile_means_orig = "{}_means_orig{}".format(outfile_stem, outfile_ext)
    outfile_variance_orig = "{}_variance_orig{}".format(
        outfile_stem, outfile_ext)
    outfile_entropy_orig = "{}_entropy_orig{}".format(outfile_stem,
                                                      outfile_ext)

    print("++ Saving results.")
    data = np.round(means.get_fdata()).astype(np.uint8)
    means = nib.Nifti1Image(data, header=means.header, affine=means.affine)
    means.header.set_data_dtype(np.uint8)
    nib.save(means, outfile_means)
    _reslice(outfile_means, outfile_means_orig, _orig_infile, True)
    if save_variance and variance is not None:
        nib.save(variance, outfile_variance)
        _reslice(outfile_variance, outfile_variance_orig, _orig_infile)
    if save_entropy:
        nib.save(entropy, outfile_entropy)
        _reslice(outfile_entropy, outfile_entropy_orig, _orig_infile)
        uncertainty = np.mean(
            np.ma.masked_where(data == 0, entropy.get_fdata()))
        average_uncertainty = {"uncertainty": uncertainty}
        with open(outfile_uncertainty, "w") as fp:
            json.dump(average_uncertainty, fp, indent=4)
コード例 #6
0
def workflow(*, dicom_dir_template=None, files=None, subjs=None,
             converter='dcm2niix', outdir='.', locator=None, conv_outdir=None,
             anon_cmd=None, heuristic=None, with_prov=False, session=None,
             bids_options=None, overwrite=False, datalad=False, debug=False,
             command=None, grouping='studyUID', minmeta=False,
             random_seed=None, dcmconfig=None, queue=None, queue_args=None):
    """Run the HeuDiConv conversion workflow.

    Parameters
    ----------
    dicom_dir_template : str or None, optional
        Location of dicomdir that can be indexed with subject id
        {subject} and session {session}. Tarballs (can be compressed)
        are supported in addition to directory. All matching tarballs
        for a subject are extracted and their content processed in a
        single pass. If multiple tarballs are found, each is assumed to
        be a separate session and the 'session' argument is ignored.
        Mutually exclusive with 'files'. Default is None.
    files : list or None, optional
        Files (tarballs, dicoms) or directories containing files to
        process. Mutually exclusive with 'dicom_dir_template'. Default is None.
    subjs : list or None, optional
        List of subjects - required for dicom template. If not
        provided, DICOMS would first be "sorted" and subject IDs
        deduced by the heuristic. Default is None.
    converter : {'dcm2niix', None}, optional
        Tool to use for DICOM conversion. Setting to None disables
        the actual conversion step -- useful for testing heuristics.
        Default is None.
    outdir : str, optional
        Output directory for conversion setup (for further
        customization and future reference. This directory will refer
        to non-anonymized subject IDs.
        Default is '.' (current working directory).
    locator : str or 'unknown' or None, optional
        Study path under outdir. If provided, it overloads the value
        provided by the heuristic. If 'datalad=True', every
        directory within locator becomes a super-dataset thus
        establishing a hierarchy. Setting to "unknown" will skip that
        dataset. Default is None.
    conv_outdir : str or None, optional
        Output directory for converted files. By default this is
        identical to --outdir. This option is most useful in
        combination with 'anon_cmd'. Default is None.
    anon_cmd : str or None, optional
        Command to run to convert subject IDs used for DICOMs to
        anonymized IDs. Such command must take a single argument and
        return a single anonymized ID. Also see 'conv_outdir'. Default is None.
    heuristic : str or None, optional
        Name of a known heuristic or path to the Python script containing
        heuristic. Default is None.
    with_prov : bool, optional
        Store additional provenance information. Requires python-rdflib.
        Default is False.
    session : str or None, optional
        Session for longitudinal study_sessions. Default is None.
    bids_options : str or None, optional
        Flag for output into BIDS structure. Can also take BIDS-
        specific options, e.g., --bids notop. The only currently
        supported options is "notop", which skips creation of
        top-level BIDS files. This is useful when running in batch
        mode to prevent possible race conditions. Default is None.
    overwrite : bool, optional
        Overwrite existing converted files. Default is False.
    datalad : bool, optional
        Store the entire collection as DataLad dataset(s). Small files
        will be committed directly to git, while large to annex. New
        version (6) of annex repositories will be used in a "thin"
        mode so it would look to mortals as just any other regular
        directory (i.e. no symlinks to under .git/annex). For now just
        for BIDS mode. Default is False.
    debug : bool, optional
        Do not catch exceptions and show exception traceback. Default is False.
    command : {'heuristics', 'heuristic-info', 'ls', 'populate-templates',
               'sanitize-jsons', 'treat-jsons', None}, optional
        Custom action to be performed on provided files instead of regular
        operation. Default is None.
    grouping : {'studyUID', 'accession_number', 'all', 'custom'}, optional
        How to group dicoms. Default is 'studyUID'.
    minmeta : bool, optional
        Exclude dcmstack meta information in sidecar jsons. Default is False.
    random_seed : int or None, optional
        Random seed to initialize RNG. Default is None.
    dcmconfig : str or None, optional
        JSON file for additional dcm2niix configuration. Default is None.
    queue : {'SLURM', None}, optional
        Batch system to submit jobs in parallel. Default is None.
        If set, will cause scheduling of conversion and return without performing
        any further action.
    queue_args : str or None, optional
        Additional queue arguments passed as single string of space-separated
        Argument=Value pairs. Default is None.

    Notes
    -----
    All parameters in this function must be called as keyword arguments.
    """

    # To be done asap so anything random is deterministic
    if random_seed is not None:
        import random
        random.seed(random_seed)
        import numpy
        numpy.random.seed(random_seed)
    # Ensure only supported bids options are passed
    if debug:
        lgr.setLevel(logging.DEBUG)
    # Should be possible but only with a single subject -- will be used to
    # override subject deduced from the DICOMs
    if files and subjs and len(subjs) > 1:
        raise ValueError(
            "Unable to processes multiple `--subjects` with files"
        )

    if debug:
        setup_exceptionhook()

    # Deal with provided files or templates
    # pre-process provided list of files and possibly sort into groups/sessions
    # Group files per each study/sid/session

    outdir = op.abspath(outdir)

    try:
        import etelemetry
        latest = etelemetry.get_project("nipy/heudiconv")
    except Exception as e:
        lgr.warning("Could not check for version updates: %s", str(e))
        latest = {"version": 'Unknown'}

    lgr.info(INIT_MSG(packname=__packagename__,
                      version=__version__,
                      latest=latest["version"]))

    if command:
        process_extra_commands(outdir, command, files, dicom_dir_template,
                               heuristic, session, subjs, grouping)
        return
    #
    # Load heuristic -- better do it asap to make sure it loads correctly
    #
    if not heuristic:
        raise RuntimeError("No heuristic specified - add to arguments and rerun")

    if queue:
        lgr.info("Queuing %s conversion", queue)
        iterarg, iterables = ("files", len(files)) if files else \
                             ("subjects", len(subjs))
        queue_conversion(queue, iterarg, iterables, queue_args)
        return

    heuristic = load_heuristic(heuristic)

    study_sessions = get_study_sessions(dicom_dir_template, files,
                                        heuristic, outdir, session,
                                        subjs, grouping=grouping)

    # extract tarballs, and replace their entries with expanded lists of files
    # TODO: we might need to sort so sessions are ordered???
    lgr.info("Need to process %d study sessions", len(study_sessions))

    # processed_studydirs = set()

    locator_manual, session_manual = locator, session
    for (locator, session, sid), files_or_seqinfo in study_sessions.items():

        # Allow for session to be overloaded from command line
        if session_manual is not None:
            session = session_manual
        if locator_manual is not None:
            locator = locator_manual
        if not len(files_or_seqinfo):
            raise ValueError("nothing to process?")
        # that is how life is ATM :-/ since we don't do sorting if subj
        # template is provided
        if isinstance(files_or_seqinfo, dict):
            assert(isinstance(list(files_or_seqinfo.keys())[0], SeqInfo))
            dicoms = None
            seqinfo = files_or_seqinfo
        else:
            dicoms = files_or_seqinfo
            seqinfo = None

        if locator == 'unknown':
            lgr.warning("Skipping unknown locator dataset")
            continue

        anon_sid = anonymize_sid(sid, anon_cmd) if anon_cmd else None
        if anon_cmd:
            lgr.info('Anonymized {} to {}'.format(sid, anon_sid))

        study_outdir = op.join(outdir, locator or '')
        anon_outdir = conv_outdir or outdir
        anon_study_outdir = op.join(anon_outdir, locator or '')

        if datalad:
            from .external.dlad import prepare_datalad
            dlad_sid = sid if not anon_sid else anon_sid
            dl_msg = prepare_datalad(anon_study_outdir, anon_outdir, dlad_sid,
                                     session, seqinfo, dicoms,
                                     bids_options)

        lgr.info("PROCESSING STARTS: {0}".format(
            str(dict(subject=sid, outdir=study_outdir, session=session))))

        prep_conversion(sid,
                        dicoms,
                        study_outdir,
                        heuristic,
                        converter=converter,
                        anon_sid=anon_sid,
                        anon_outdir=anon_study_outdir,
                        with_prov=with_prov,
                        ses=session,
                        bids_options=bids_options,
                        seqinfo=seqinfo,
                        min_meta=minmeta,
                        overwrite=overwrite,
                        dcmconfig=dcmconfig,
                        grouping=grouping,)

        lgr.info("PROCESSING DONE: {0}".format(
            str(dict(subject=sid, outdir=study_outdir, session=session))))

        if datalad:
            from .external.dlad import add_to_datalad
            msg = "Converted subject %s" % dl_msg
            # TODO:  whenever propagate to supers work -- do just
            # ds.save(msg=msg)
            #  also in batch mode might fail since we have no locking ATM
            #  and theoretically no need actually to save entire study
            #  we just need that
            add_to_datalad(outdir, study_outdir, msg, bids_options)
コード例 #7
0
def process_args(args):
    """Given a structure of arguments from the parser perform computation"""

    # Deal with provided files or templates
    # pre-process provided list of files and possibly sort into groups/sessions
    # Group files per each study/sid/session

    outdir = op.abspath(args.outdir)

    try:
        import etelemetry
        latest = etelemetry.get_project("nipy/heudiconv")
    except Exception as e:
        lgr.warning("Could not check for version updates: %s", str(e))
        latest = {"version": 'Unknown'}

    lgr.info(
        INIT_MSG(packname=__packagename__,
                 version=__version__,
                 latest=latest["version"]))

    if args.command:
        process_extra_commands(outdir, args)
        return
    #
    # Load heuristic -- better do it asap to make sure it loads correctly
    #
    if not args.heuristic:
        raise RuntimeError(
            "No heuristic specified - add to arguments and rerun")

    if args.queue:
        lgr.info("Queuing %s conversion", args.queue)
        iterarg, iterables = ("files", len(args.files)) if args.files else \
                             ("subjects", len(args.subjs))
        queue_conversion(args.queue, iterarg, iterables, args.queue_args)
        sys.exit(0)

    heuristic = load_heuristic(args.heuristic)

    study_sessions = get_study_sessions(args.dicom_dir_template,
                                        args.files,
                                        heuristic,
                                        outdir,
                                        args.session,
                                        args.subjs,
                                        grouping=args.grouping)

    # extract tarballs, and replace their entries with expanded lists of files
    # TODO: we might need to sort so sessions are ordered???
    lgr.info("Need to process %d study sessions", len(study_sessions))

    # processed_studydirs = set()

    for (locator, session, sid), files_or_seqinfo in study_sessions.items():

        # Allow for session to be overloaded from command line
        if args.session is not None:
            session = args.session
        if args.locator is not None:
            locator = args.locator
        if not len(files_or_seqinfo):
            raise ValueError("nothing to process?")
        # that is how life is ATM :-/ since we don't do sorting if subj
        # template is provided
        if isinstance(files_or_seqinfo, dict):
            assert (isinstance(list(files_or_seqinfo.keys())[0], SeqInfo))
            dicoms = None
            seqinfo = files_or_seqinfo
        else:
            dicoms = files_or_seqinfo
            seqinfo = None

        if locator == 'unknown':
            lgr.warning("Skipping unknown locator dataset")
            continue

        anon_sid = anonymize_sid(sid, args.anon_cmd) if args.anon_cmd else None
        if args.anon_cmd:
            lgr.info('Anonymized {} to {}'.format(sid, anon_sid))

        study_outdir = op.join(outdir, locator or '')
        anon_outdir = args.conv_outdir or outdir
        anon_study_outdir = op.join(anon_outdir, locator or '')

        # TODO: --datalad  cmdline option, which would take care about initiating
        # the outdir -> study_outdir datasets if not yet there
        if args.datalad:
            from ..external.dlad import prepare_datalad
            dlad_sid = sid if not anon_sid else anon_sid
            dl_msg = prepare_datalad(anon_study_outdir, anon_outdir, dlad_sid,
                                     session, seqinfo, dicoms,
                                     args.bids_options)

        lgr.info("PROCESSING STARTS: {0}".format(
            str(dict(subject=sid, outdir=study_outdir, session=session))))

        prep_conversion(
            sid,
            dicoms,
            study_outdir,
            heuristic,
            converter=args.converter,
            anon_sid=anon_sid,
            anon_outdir=anon_study_outdir,
            with_prov=args.with_prov,
            ses=session,
            bids_options=args.bids_options,
            seqinfo=seqinfo,
            min_meta=args.minmeta,
            overwrite=args.overwrite,
            dcmconfig=args.dcmconfig,
            grouping=args.grouping,
        )

        lgr.info("PROCESSING DONE: {0}".format(
            str(dict(subject=sid, outdir=study_outdir, session=session))))

        if args.datalad:
            from ..external.dlad import add_to_datalad
            msg = "Converted subject %s" % dl_msg
            # TODO:  whenever propagate to supers work -- do just
            # ds.save(msg=msg)
            #  also in batch mode might fail since we have no locking ATM
            #  and theoretically no need actually to save entire study
            #  we just need that
            add_to_datalad(outdir, study_outdir, msg, args.bids_options)
コード例 #8
0
ファイル: __init__.py プロジェクト: rzlim08/mindboggle
#                  __version__)
#__doc__ += """
#"""

# Set up package information function
try:
    from .pkg_info import get_pkg_info as _get_pkg_info
except:
    get_info = lambda: ""
else:
    get_info = lambda : _get_pkg_info(os.path.dirname(__file__))

# module imports
#from . import blah as blah
# object imports
#from .blah import blah, blah

INIT_MSG = "Running {packname} version {version} (latest: {latest})".format
latest = {"version": 'Unknown'}
try:
    from .version import __version__
    import etelemetry
    latest = etelemetry.get_project("nipy/mindboggle")
except Exception as e:
    print("Could not check for version updates: ", e)
finally:
    print(INIT_MSG(packname='mindboggle',
                   version=__version__,
                   latest=latest["version"]))