Exemplo n.º 1
0
 def _wrapper(func):
     """Wrapper that registers a function if it satisfies requirements."""
     try:
         auto_d = auto_discover
         require_module(requires, min_version=min_version, mandatory=True)
         AVAILABLE_LOADERS[name] = dict(func=func, auto_discover=auto_d)
     except (ImportError, AssertionError):
         pass
     SUPPORTED_LOADERS[name] = (requires, min_version)
     return func
Exemplo n.º 2
0
 def _wrapper(func):
     """Wrapper that registers a function if it satisfies requirements."""
     try:
         auto_d = auto_discover
         require_module(requires, min_version=min_version, mandatory=True)
         AVAILABLE_LOADERS[name] = dict(func=func, auto_discover=auto_d)
     except (ImportError, AssertionError):
         pass
     SUPPORTED_LOADERS[name] = (requires, min_version)
     return func
Exemplo n.º 3
0
    def _normalise_image(self, image):
        """
        Normalises a 2D RGB image, if necessary performs any type casting
        and reshaping operations.
        :param image: a 2D RGB image, possibly given as a 5D tensor
        :return: the normalised image in its original shape
        """

        if isinstance(image.dtype, np.floating) and image.dtype != np.float32:
            image = image.astype(np.float32)
        elif isinstance(image.dtype, np.uint):
            image = image.astype(np.float32)/255

        orig_shape = list(image.shape)
        if len(orig_shape) == 5 and (orig_shape[2] > 1 or orig_shape[3] > 1):
            raise ValueError('Can only process 2D images.')

        if len(image.shape) != 3:
            image = image.reshape(orig_shape[:2] + [orig_shape[-1]])

        image = image[...,::-1]

        cv2 = require_module('cv2')
        yuv_image = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
        intensity = (255*yuv_image[...,0]).astype(np.uint8)
        yuv_image[...,0] = cv2.equalizeHist(intensity).astype(np.float32)/255

        return cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR)[...,::-1]\
                  .reshape(orig_shape)
Exemplo n.º 4
0
def imread_opencv(filename):
    """OpenCV image loader with identity 2D affine."""
    cv2 = require_module('cv2')
    img = cv2.imread(filename, flags=-1)
    if img is None:
        raise IOError(filename)
    return image2nibabel(img[..., ::-1])
Exemplo n.º 5
0
def imread_opencv(filename):
    """OpenCV image loader with identity 2D affine."""
    cv2 = require_module('cv2')
    img = cv2.imread(filename, flags=-1)
    if img is None:
        raise IOError(filename)
    return image2nibabel(img[..., ::-1])
Exemplo n.º 6
0
def _image3_animated_gif(tag, ims):
    PIL = require_module('PIL')
    from PIL.GifImagePlugin import Image as GIF

    # x=numpy.random.randint(0,256,[10,10,10],numpy.uint8)
    ims = [
        np.asarray((ims[i, :, :]).astype(np.uint8))
        for i in range(ims.shape[0])
    ]
    ims = [GIF.fromarray(im) for im in ims]
    img_str = b''
    for b_data in PIL.GifImagePlugin.getheader(ims[0])[0]:
        img_str += b_data
    img_str += b'\x21\xFF\x0B\x4E\x45\x54\x53\x43\x41\x50' \
         b'\x45\x32\x2E\x30\x03\x01\x00\x00\x00'
    for i in ims:
        for b_data in PIL.GifImagePlugin.getdata(i):
            img_str += b_data
    img_str += b'\x3B'
    if IS_PYTHON2:
        img_str = str(img_str)
    summary_image_str = summary_pb2.Summary.Image(
        height=10, width=10, colorspace=1, encoded_image_string=img_str)
    image_summary = summary_pb2.Summary.Value(tag=tag, image=summary_image_str)
    return [summary_pb2.Summary(value=[image_summary]).SerializeToString()]
Exemplo n.º 7
0
def imread_sitk(filename):
    """SimpleITK requires two function calls to retrieve a numpy array."""
    sitk = require_module('SimpleITK')
    try:
        simg = sitk.ReadImage(filename)
    except RuntimeError:
        raise IOError(filename)
    img = sitk.GetArrayFromImage(simg)
    return image2nibabel(img, affine=make_affine_from_sitk(simg))
Exemplo n.º 8
0
def imread_sitk(filename):
    """SimpleITK requires two function calls to retrieve a numpy array."""
    sitk = require_module('SimpleITK')
    try:
        simg = sitk.ReadImage(filename)
    except RuntimeError:
        raise IOError(filename)
    img = sitk.GetArrayFromImage(simg)
    if simg.GetDimension() > 2:
        img = img.transpose()
    return image2nibabel(img, affine=make_affine_from_sitk(simg))
Exemplo n.º 9
0
    def test_equilisation(self):
        cv2 = require_module('cv2', mandatory=False)

        if cv2 is None:
            self.skipTest('requires cv2 module')
            return

        def _get_histogram(img):
            inten = cv2.cvtColor(img[::-1], cv2.COLOR_BGR2YUV)[..., 0] * 255

            return np.histogram(inten, 32, [0, 256])[0]

        hist_before = _get_histogram(IMAGE_DATA)

        layer = RGBHistogramEquilisationLayer(image_name='image')
        orig_shape = list(IMAGE_DATA.shape)
        input_shape = orig_shape[:2] + [1] * 2 + [3]
        img, _ = layer(IMAGE_DATA.reshape(input_shape))

        hist_after = _get_histogram(img.reshape(orig_shape))

        self.assertGreater(
            hist_before.astype(np.float32).std(),
            hist_after.astype(np.float32).std())

        img, _ = layer({'image': IMAGE_DATA.reshape(input_shape)})

        hist_after = _get_histogram(img['image'].reshape(orig_shape))

        self.assertGreater(
            hist_before.astype(np.float32).std(),
            hist_after.astype(np.float32).std())

        img = (255 * IMAGE_DATA).astype(np.uint8)
        img, _ = layer({'image': IMAGE_DATA.reshape(input_shape)})

        hist_after = _get_histogram(img['image'].reshape(orig_shape))

        self.assertGreater(
            hist_before.astype(np.float32).std(),
            hist_after.astype(np.float32).std())
Exemplo n.º 10
0
def _image3_animated_gif(tag, ims):
    PIL = require_module('PIL')
    from PIL.GifImagePlugin import Image as GIF

    # x=numpy.random.randint(0,256,[10,10,10],numpy.uint8)
    ims = [np.asarray((ims[i, :, :]).astype(np.uint8))
           for i in range(ims.shape[0])]
    ims = [GIF.fromarray(im) for im in ims]
    s = b''
    for b in PIL.GifImagePlugin.getheader(ims[0])[0]:
        s += b
    s += b'\x21\xFF\x0B\x4E\x45\x54\x53\x43\x41\x50' \
         b'\x45\x32\x2E\x30\x03\x01\x00\x00\x00'
    for i in ims:
        for b in PIL.GifImagePlugin.getdata(i):
            s += b
    s += b'\x3B'
    if IS_PYTHON2:
        s = str(s)
    summary_image_str = summary_pb2.Summary.Image(
        height=10, width=10, colorspace=1, encoded_image_string=s)
    image_summary = summary_pb2.Summary.Value(
        tag=tag, image=summary_image_str)
    return [summary_pb2.Summary(value=[image_summary]).SerializeToString()]
Exemplo n.º 11
0
 def test_no_package(self):
     with self.assertRaisesRegexp(ImportError, ''):
         require_module('foobar_wrong_case')
Exemplo n.º 12
0
 def test_installed_min_version(self):
     require_module('tensorflow', 1.0)
Exemplo n.º 13
0
def imread_pillow(filename):
    """PIL (Pillow) image loader with an identity affine matrix."""
    pil = require_module('PIL.Image')
    img = np.asarray(pil.open(filename))
    return image2nibabel(img)
Exemplo n.º 14
0
 def test_installed(self):
     require_module('tensorflow')
Exemplo n.º 15
0
def add_network_args(parser):
    """
    keywords defined for network specification

    :param parser:
    :return:
    """
    import niftynet.layer.binary_masking
    import niftynet.layer.activation
    import niftynet.utilities.histogram_standardisation as hist_std_module

    parser.add_argument(
        "--name",
        help="Choose a net from NiftyNet/niftynet/network/ or from "
             "user specified module string",
        metavar='')

    parser.add_argument(
        "--activation_function",
        help="Specify activation function types",
        choices=list(niftynet.layer.activation.SUPPORTED_OP),
        metavar='TYPE_STR',
        default='relu')

    parser.add_argument(
        "--batch_size",
        metavar='',
        help="Set batch size of the net",
        type=int,
        default=2)

    parser.add_argument(
        "--decay",
        help="[Training only] Set weight decay",
        type=float,
        default=0.0)

    parser.add_argument(
        "--reg_type",
        metavar='TYPE_STR',
        help="[Training only] Specify regulariser type_str",
        type=str,
        default='L2')

    parser.add_argument(
        "--volume_padding_size",
        metavar='',
        help="Set padding size of each volume (in all dimensions)",
        type=spatialnumarray,
        default=(0, 0, 0))

    parser.add_argument(
        "--window_sampling",
        metavar='TYPE_STR',
        help="How to sample patches from each loaded image:"
             " 'uniform': fixed size uniformly distributed,"
             " 'resize': resize image to the patch size.",
        choices=['uniform', 'resize', 'balanced', 'weighted'],
        default='uniform')

    parser.add_argument(
        "--queue_length",
        help="Set size of preprocessing buffer queue",
        metavar='',
        type=int,
        default=5)

    parser.add_argument(
        "--multimod_foreground_type",
        choices=list(
            niftynet.layer.binary_masking.SUPPORTED_MULTIMOD_MASK_TYPES),
        help="Way of combining the foreground masks from different "
             "modalities. 'and' is the intersection, 'or' is the union "
             "and 'multi' permits each modality to use its own mask.",
        default='and')

    parser.add_argument(
        "--histogram_ref_file",
        metavar='',
        type=str,
        help="A reference file of histogram for intensity normalisation",
        default=DEFAULT_HISTOGRAM_REF_FILE)

    parser.add_argument(
        "--norm_type",
        help="Type of normalisation to perform",
        type=str,
        default='percentile',
        choices=list(hist_std_module.SUPPORTED_CUTPOINTS))

    parser.add_argument(
        "--cutoff",
        help="Cutoff values for the normalisation process",
        type=float_array,
        default=(0.01, 0.99))

    parser.add_argument(
        "--foreground_type",
        choices=list(
            niftynet.layer.binary_masking.SUPPORTED_MASK_TYPES),
        help="type_str of foreground masking strategy used",
        default='otsu_plus')

    parser.add_argument(
        "--normalisation",
        help="Indicates if the normalisation must be performed",
        type=str2boolean,
        default=False)

    parser.add_argument(
        "--whitening",
        help="Indicates if the whitening of the data should be applied",
        type=str2boolean,
        default=False)

    parser.add_argument(
        "--normalise_foreground_only",
        help="Indicates whether a foreground mask should be applied when"
             " normalising volumes",
        type=str2boolean,
        default=False)

    parser.add_argument(
        "--weight_initializer",
        help="Set the initializer for the weight parameters",
        type=str,
        default='he_normal')

    parser.add_argument(
        "--bias_initializer",
        help="Set the initializer for the bias parameters",
        type=str,
        default='zeros')

    yaml = require_module('yaml', mandatory=False)
    if yaml:
        parser.add_argument(
            "--weight_initializer_args",
            help="Pass arguments to the initializer for the weight parameters",
            type=yaml.load,
            default={})
        parser.add_argument(
            "--bias_initializer_args",
            help="Pass arguments to the initializer for the bias parameters",
            type=yaml.load,
            default={})

    return parser
Exemplo n.º 16
0
 def test_no_version_info(self):
     require_module('importlib', 0)
Exemplo n.º 17
0
 def test_no_version_info(self):
     require_module('importlib', 0)
Exemplo n.º 18
0
 def test_wrong_version(self):
     with self.assertRaisesRegexp(AssertionError, ''):
         require_module('tensorflow', 100, mandatory=True)
Exemplo n.º 19
0
 def test_self_version(self):
     require_module('importlib')
Exemplo n.º 20
0
 def test_no_package(self):
     with self.assertRaisesRegexp(ImportError, ''):
         require_module('foobar_wrong_case', mandatory=True)
Exemplo n.º 21
0
 def test_installed_min_version(self):
     require_module('tensorflow', 1.0)
Exemplo n.º 22
0
Data augmentation using elastic deformations as used by:
Milletari,F., Navab, N., & Ahmadi, S. A. (2016) V-net:
Fully convolutional neural networks for volumetric medical
image segmentation
"""

from __future__ import absolute_import, print_function

import warnings

import numpy as np

from niftynet.layer.base_layer import RandomisedLayer
from niftynet.utilities.util_import import require_module

sitk = require_module('SimpleITK')

warnings.simplefilter("ignore", UserWarning)
warnings.simplefilter("ignore", RuntimeWarning)


class RandomElasticDeformationLayer(RandomisedLayer):
    """
    generate randomised elastic deformations
    along each dim for data augmentation
    """

    def __init__(self,
                 num_controlpoints=4,
                 std_deformation_sigma=15,
                 proportion_to_augment=0.5,
Exemplo n.º 23
0
 def test_wrong_version(self):
     with self.assertRaisesRegexp(AssertionError, ''):
         require_module('tensorflow', 100)
Exemplo n.º 24
0
def add_network_args(parser):
    """
    keywords defined for network specification

    :param parser:
    :return:
    """
    import niftynet.layer.binary_masking
    import niftynet.layer.activation
    import niftynet.utilities.histogram_standardisation as hist_std_module

    parser.add_argument(
        "--name",
        help="Choose a net from NiftyNet/niftynet/network/ or from "
        "user specified module string",
        metavar='')

    parser.add_argument("--activation_function",
                        help="Specify activation function types",
                        choices=list(niftynet.layer.activation.SUPPORTED_OP),
                        metavar='TYPE_STR',
                        default='relu')

    parser.add_argument("--batch_size",
                        metavar='',
                        help="Set batch size of the net",
                        type=int,
                        default=2)

    parser.add_argument("--smaller_final_batch_mode",
                        metavar='TYPE_STR',
                        help="If True, allow the final batch to be smaller "
                        "if there are insufficient items left in the queue, "
                        "and the batch size will be undetermined during "
                        "graph construction.",
                        choices=list(SMALLER_FINAL_BATCH_MODE),
                        default='pad')

    parser.add_argument("--decay",
                        help="[Training only] Set weight decay",
                        type=float,
                        default=0.0)

    parser.add_argument("--reg_type",
                        metavar='TYPE_STR',
                        help="[Training only] Specify regulariser type_str",
                        type=str,
                        default='L2')

    parser.add_argument(
        "--volume_padding_size",
        metavar='',
        help="Set padding size of each volume (in all dimensions)",
        type=spatialnumarray,
        default=(0, 0, 0))

    parser.add_argument("--volume_padding_mode",
                        metavar='',
                        help="Set which type of numpy padding to do, see "
                        "https://docs.scipy.org/doc/numpy-1.14.0/"
                        "reference/generated/numpy.pad.html "
                        "for details",
                        type=str,
                        default='minimum')

    parser.add_argument(
        "--volume_padding_to_size",
        help="Choose size to pad all input volumes to. Any dimensions "
        "that exceed the desired size will be kept the same. Default: "
        "(0, ) which indicates not to use this mode. ",
        type=spatialnumarray,
        default=(0, ))

    parser.add_argument(
        "--window_sampling",
        metavar='TYPE_STR',
        help="How to sample patches from each loaded image:"
        " 'uniform': fixed size uniformly distributed,"
        " 'resize': resize image to the patch size.",
        choices=['uniform', 'resize', 'balanced', 'weighted', 'patch'],
        default='uniform')

    parser.add_argument(
        "--force_output_identity_resizing",
        metavar=str2boolean,
        help="Forces the shape of the inferred output to match the "
        "input label shape rather than be resized to input image shape.",
        default=False)

    parser.add_argument("--queue_length",
                        help="Set size of preprocessing buffer queue",
                        metavar='',
                        type=int,
                        default=5)

    parser.add_argument(
        "--multimod_foreground_type",
        choices=list(
            niftynet.layer.binary_masking.SUPPORTED_MULTIMOD_MASK_TYPES),
        help="Way of combining the foreground masks from different "
        "modalities. 'and' is the intersection, 'or' is the union "
        "and 'multi' permits each modality to use its own mask.",
        default='and')

    parser.add_argument(
        "--histogram_ref_file",
        metavar='',
        type=str,
        help="A reference file of histogram for intensity normalisation",
        default=DEFAULT_HISTOGRAM_REF_FILE)

    parser.add_argument("--norm_type",
                        help="Type of normalisation to perform",
                        type=str,
                        default='percentile',
                        choices=list(hist_std_module.SUPPORTED_CUTPOINTS))

    parser.add_argument("--cutoff",
                        help="Cutoff values for the normalisation process",
                        type=float_array,
                        default=(0.01, 0.99))

    parser.add_argument(
        "--foreground_type",
        choices=list(niftynet.layer.binary_masking.SUPPORTED_MASK_TYPES),
        help="type_str of foreground masking strategy used",
        default='otsu_plus')

    parser.add_argument(
        "--normalisation",
        help="Indicates if the normalisation must be performed",
        type=str2boolean,
        default=False)

    parser.add_argument(
        "--rgb_normalisation",
        help="Indicates if RGB histogram equilisation should be performed",
        type=str2boolean,
        default=False)

    parser.add_argument(
        "--whitening",
        help="Indicates if the whitening of the data should be applied",
        type=str2boolean,
        default=False)

    parser.add_argument(
        "--normalise_foreground_only",
        help="Indicates whether a foreground mask should be applied when"
        " normalising volumes",
        type=str2boolean,
        default=False)

    parser.add_argument("--weight_initializer",
                        help="Set the initializer for the weight parameters",
                        type=str,
                        default='he_normal')

    parser.add_argument("--bias_initializer",
                        help="Set the initializer for the bias parameters",
                        type=str,
                        default='zeros')

    parser.add_argument("--keep_prob",
                        help="Probability that each element is kept "
                        "if dropout is supported by the network",
                        type=float,
                        default=1.0)

    yaml = require_module('yaml', mandatory=False)
    if yaml:
        parser.add_argument(
            "--weight_initializer_args",
            help="Pass arguments to the initializer for the weight parameters",
            type=yaml.load,
            default={})
        parser.add_argument(
            "--bias_initializer_args",
            help="Pass arguments to the initializer for the bias parameters",
            type=yaml.load,
            default={})

    return parser
Exemplo n.º 25
0
 def test_self_version(self):
     require_module('importlib')
Exemplo n.º 26
0
 def test_installed(self):
     require_module('tensorflow')
Exemplo n.º 27
0
 def test_no_input(self):
     with self.assertRaisesRegexp(ImportError, ''):
         require_module([])
     with self.assertRaisesRegexp(ImportError, ''):
         require_module(None)
Exemplo n.º 28
0
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
try:
    from tensorflow.python.util import module_wrapper as deprecation
except ImportError:
    from tensorflow.python.util import deprecation_wrapper as deprecation
deprecation._PER_MODULE_WARNING_LIMIT = 0

from niftynet.io.misc_io import set_logger, close_logger

set_logger()

from niftynet.utilities.util_import import require_module

require_module('blinker', descriptor='New dependency', mandatory=True)

from tensorflow.python.util import deprecation

deprecation._PRINT_DEPRECATION_WARNINGS = False

from niftynet.engine.signal import TRAIN, INFER, EVAL, EXPORT
import niftynet.utilities.util_common as util
import niftynet.utilities.user_parameters_parser as user_parameters_parser
from niftynet.engine.application_driver import ApplicationDriver
from niftynet.evaluation.evaluation_application_driver import \
    EvaluationApplicationDriver
from niftynet.io.misc_io import touch_folder
from niftynet.io.misc_io import resolve_module_dir
from niftynet.io.misc_io import to_absolute_path
Exemplo n.º 29
0
def imread_skimage(filename):
    """Scikit-image loader with an identity affine matrix."""
    skio = require_module('skimage.io')
    img = skio.imread(filename)
    return image2nibabel(img)
Exemplo n.º 30
0
def add_network_args(parser):
    """
    keywords defined for network specification

    :param parser:
    :return:
    """
    import niftynet.layer.binary_masking
    import niftynet.layer.activation
    import niftynet.utilities.histogram_standardisation as hist_std_module

    parser.add_argument(
        "--name",
        help="Choose a net from NiftyNet/niftynet/network/ or from "
        "user specified module string",
        metavar='')

    parser.add_argument("--activation_function",
                        help="Specify activation function types",
                        choices=list(niftynet.layer.activation.SUPPORTED_OP),
                        metavar='TYPE_STR',
                        default='relu')

    parser.add_argument("--batch_size",
                        metavar='',
                        help="Set batch size of the net",
                        type=int,
                        default=2)

    parser.add_argument("--decay",
                        help="[Training only] Set weight decay",
                        type=float,
                        default=0.0)

    parser.add_argument("--reg_type",
                        metavar='TYPE_STR',
                        help="[Training only] Specify regulariser type_str",
                        type=str,
                        default='L2')

    parser.add_argument(
        "--volume_padding_size",
        metavar='',
        help="Set padding size of each volume (in all dimensions)",
        type=spatialnumarray,
        default=(0, 0, 0))

    parser.add_argument("--window_sampling",
                        metavar='TYPE_STR',
                        help="How to sample patches from each loaded image:"
                        " 'uniform': fixed size uniformly distributed,"
                        " 'resize': resize image to the patch size.",
                        choices=['uniform', 'resize', 'balanced', 'weighted'],
                        default='uniform')

    parser.add_argument("--queue_length",
                        help="Set size of preprocessing buffer queue",
                        metavar='',
                        type=int,
                        default=5)

    parser.add_argument(
        "--multimod_foreground_type",
        choices=list(
            niftynet.layer.binary_masking.SUPPORTED_MULTIMOD_MASK_TYPES),
        help="Way of combining the foreground masks from different "
        "modalities. 'and' is the intersection, 'or' is the union "
        "and 'multi' permits each modality to use its own mask.",
        default='and')

    parser.add_argument(
        "--histogram_ref_file",
        metavar='',
        type=str,
        help="A reference file of histogram for intensity normalisation",
        default=DEFAULT_HISTOGRAM_REF_FILE)

    parser.add_argument("--norm_type",
                        help="Type of normalisation to perform",
                        type=str,
                        default='percentile',
                        choices=list(hist_std_module.SUPPORTED_CUTPOINTS))

    parser.add_argument("--cutoff",
                        help="Cutoff values for the normalisation process",
                        type=float_array,
                        default=(0.01, 0.99))

    parser.add_argument(
        "--foreground_type",
        choices=list(niftynet.layer.binary_masking.SUPPORTED_MASK_TYPES),
        help="type_str of foreground masking strategy used",
        default='otsu_plus')

    parser.add_argument(
        "--normalisation",
        help="Indicates if the normalisation must be performed",
        type=str2boolean,
        default=False)

    parser.add_argument(
        "--whitening",
        help="Indicates if the whitening of the data should be applied",
        type=str2boolean,
        default=False)

    parser.add_argument(
        "--normalise_foreground_only",
        help="Indicates whether a foreground mask should be applied when"
        " normalising volumes",
        type=str2boolean,
        default=False)

    parser.add_argument("--weight_initializer",
                        help="Set the initializer for the weight parameters",
                        type=str,
                        default='he_normal')

    parser.add_argument("--bias_initializer",
                        help="Set the initializer for the bias parameters",
                        type=str,
                        default='zeros')

    yaml = require_module('yaml', mandatory=False)
    if yaml:
        parser.add_argument(
            "--weight_initializer_args",
            help="Pass arguments to the initializer for the weight parameters",
            type=yaml.load,
            default={})
        parser.add_argument(
            "--bias_initializer_args",
            help="Pass arguments to the initializer for the bias parameters",
            type=yaml.load,
            default={})

    return parser
Exemplo n.º 31
0
def imread_skimage(filename):
    """Scikit-image loader with an identity affine matrix."""
    skio = require_module('skimage.io')
    img = skio.imread(filename)
    return image2nibabel(img)
Exemplo n.º 32
0
def imread_pillow(filename):
    """PIL (Pillow) image loader with an identity affine matrix."""
    pil = require_module('PIL.Image')
    img = np.asarray(pil.open(filename))
    return image2nibabel(img)
Exemplo n.º 33
0
 def test_no_input(self):
     with self.assertRaisesRegexp(ImportError, ''):
         require_module([], mandatory=True)
     with self.assertRaisesRegexp(ImportError, ''):
         require_module(None, mandatory=True)
Exemplo n.º 34
0
except AttributeError:
    pass

from niftynet.utilities.versioning import get_niftynet_version_string

__version__ = get_niftynet_version_string()

import os

from niftynet.io.misc_io import set_logger

set_logger()

from niftynet.utilities.util_import import require_module

require_module('blinker', descriptor='New dependency', mandatory=True)

import niftynet.utilities.util_common as util
import niftynet.utilities.user_parameters_parser as user_parameters_parser
from niftynet.engine.application_driver import ApplicationDriver
from niftynet.evaluation.evaluation_application_driver import \
    EvaluationApplicationDriver
from niftynet.io.misc_io import touch_folder
from niftynet.io.misc_io import resolve_module_dir
from niftynet.io.misc_io import to_absolute_path


def main():
    system_param, input_data_param = user_parameters_parser.run()
    if util.has_bad_inputs(system_param):
        return -1