Пример #1
0
    def _run_composite_model(self, model, recalculate, model_names):
        with mot.configuration.config_context(
                RuntimeConfigurationAction(cl_environments=self._cl_envs,
                                           load_balancer=self._load_balancer)):
            with per_model_logging_context(
                    os.path.join(self._output_folder, model.name)):
                self._logger.info('Using MDT version {}'.format(__version__))
                self._logger.info('Preparing for model {0}'.format(model.name))
                self._logger.info('Current cascade: {0}'.format(model_names))

                optimizer = self._optimizer or get_optimizer_for_model(
                    model_names)

                if self._cl_device_indices is not None:
                    all_devices = get_cl_devices()
                    optimizer.cl_environments = [
                        all_devices[ind] for ind in self._cl_device_indices
                    ]
                    optimizer.load_balancer = EvenDistribution()

                processing_strategy = get_processing_strategy(
                    'optimization', model_names=model_names)
                processing_strategy.set_tmp_dir(self._tmp_results_dir)

                fitter = SingleModelFit(model,
                                        self._problem_data,
                                        self._output_folder,
                                        optimizer,
                                        processing_strategy,
                                        recalculate=recalculate)
                results = fitter.run()

        return results
Пример #2
0
    def load(self, value):
        if 'cl_device_ind' in value:
            if value['cl_device_ind'] is not None:
                from mdt.utils import get_cl_devices
                devices = get_cl_devices(value['cl_device_ind'])

                if devices:
                    mot.configuration.set_cl_environments(devices)
Пример #3
0
def get_cl_devices():
    """Get a list of all CL devices in the system.

    The indices of the devices can be used in the model fitting/sample functions for 'cl_device_ind'.

    Returns:
        A list of CLEnvironments, one for each device in the system.
    """
    from mdt.utils import get_cl_devices
    return get_cl_devices()
Пример #4
0
    def _write_python_script_file(self, output_file, **kwargs):
        input_data_info = kwargs['input_data_info']
        optim_options = kwargs['optim_options']

        all_cl_devices = get_cl_devices()
        user_selected_devices = mot.configuration.get_cl_environments()

        format_kwargs = dict(
            header=get_script_file_header_text({'Purpose': 'Fitting a model'}),
            dwi=input_data_info.dwi,
            protocol=input_data_info.protocol,
            mask=input_data_info.mask,
            noise_std=input_data_info.noise_std,
            gradient_deviations=input_data_info.gradient_deviations,
            extra_protocol=input_data_info.extra_protocol,
            model=kwargs['model'],
            output_folder=kwargs['output_folder'],
            recalculate=kwargs['recalculate'],
            double_precision=kwargs['double_precision'],
            cl_device_ind=[
                ind for ind, device in enumerate(all_cl_devices)
                if device in user_selected_devices
            ],
            method=optim_options.method,
            patience=optim_options.patience)

        with open(output_file, 'w') as f:
            f.write('#!/usr/bin/env python\n')

            f.write(
                dedent('''
                {header}

                import mdt
                
                input_data = mdt.load_input_data(
                    {dwi!r},
                    {protocol!r},
                    {mask!r},
                    noise_std={noise_std!r},
                    gradient_deviations={gradient_deviations!r},
                    extra_protocol={extra_protocol!r})
                
                mdt.fit_model(
                    {model!r},
                    input_data,
                    {output_folder!r},
                    recalculate={recalculate!r},
                    double_precision={double_precision!r},
                    cl_device_ind={cl_device_ind!r},
                    use_cascaded_inits=True,
                    method={method!r},
                    optimizer_options={{'patience': {patience!r}}})

            ''').format(**format_kwargs))
Пример #5
0
    def _write_bash_script_file(self, output_file, *args, **kwargs):
        input_data_info = kwargs['input_data_info']
        optim_options = kwargs['optim_options']

        all_cl_devices = get_cl_devices()
        user_selected_devices = mot.configuration.get_cl_environments()

        with open(output_file, 'w') as f:
            f.write('#!/usr/bin/env bash\n')
            f.write(
                dedent('''
                {header}

                mdt-model-fit \\
                    "{model}" \\
                    "{dwi}" \\
                    "{protocol}" \\
                    "{mask}" ''').format(header=get_script_file_header_text(
                    {'Purpose': 'Fitting a model'}),
                                         model=kwargs['model'],
                                         dwi=input_data_info.dwi,
                                         protocol=input_data_info.protocol,
                                         mask=input_data_info.mask))

            def write_new_line(line):
                f.write('\\\n' + ' ' * 4 + line + ' ')

            write_new_line('-o "{}"'.format(kwargs['output_folder']))

            if input_data_info.gradient_deviations:
                write_new_line('--gradient-deviations "{}"'.format(
                    input_data_info.gradient_deviations))

            if input_data_info.noise_std:
                write_new_line('--noise-std {}'.format(
                    input_data_info.noise_std))

            write_new_line('--cl-device-ind {}'.format(' '.join(
                str(ind) for ind, device in enumerate(all_cl_devices)
                if device in user_selected_devices)))

            write_new_line('--recalculate'
                           if kwargs['recalculate'] else '--no-recalculate')
            write_new_line(
                '--double' if kwargs['double_precision'] else '--float')
            write_new_line('--method {}'.format(optim_options.method))
            write_new_line('--patience {}'.format(optim_options.patience))
            write_new_line('--use-cascaded-inits')

            if input_data_info.extra_protocol:
                write_new_line('--extra-protocol {}'.format(' '.join(
                    '{}="{}"'.format(key, value)
                    for key, value in input_data_info.extra_protocol.items())))
Пример #6
0
    def load(self, value):
        if 'cl_device_ind' in value:
            if value['cl_device_ind'] is not None:
                from mdt.utils import get_cl_devices
                all_devices = get_cl_devices()

                indices = value['cl_device_ind']
                if not isinstance(indices, collections.Iterable):
                    indices = [indices]

                devices = [
                    all_devices[ind] for ind in indices
                    if ind < len(all_devices)
                ]

                if devices:
                    mot.configuration.set_cl_environments(devices)
                    mot.configuration.set_load_balancer(EvenDistribution())
Пример #7
0
def get_mot_config_context(cl_device_ind):
    """Get the configuration context that uses the given devices by index.

    Args:
        cl_device_ind (int or list of int): the device index or a list of device indices

    Returns:
        mot.configuration.ConfigAction: the configuration action to use
    """
    if cl_device_ind is not None and not isinstance(cl_device_ind,
                                                    collections.Iterable):
        cl_device_ind = [cl_device_ind]

    if cl_device_ind is None:
        return mot.configuration.VoidConfigurationAction()

    cl_envs = [get_cl_devices()[ind] for ind in cl_device_ind]
    return mot.configuration.RuntimeConfigurationAction(
        cl_environments=cl_envs)
Пример #8
0
def sample_model(model,
                 input_data,
                 output_folder,
                 nmr_samples=None,
                 burnin=None,
                 thinning=None,
                 method=None,
                 recalculate=False,
                 cl_device_ind=None,
                 double_precision=False,
                 store_samples=True,
                 sample_items_to_save=None,
                 tmp_results_dir=True,
                 initialization_data=None,
                 post_processing=None,
                 post_sampling_cb=None,
                 sampler_options=None):
    """Sample a composite model using Markov Chain Monte Carlo sampling.

    Args:
        model (:class:`~mdt.models.composite.DMRICompositeModel` or str): the model to sample
        input_data (:class:`~mdt.utils.MRIInputData`): the input data object containing all
            the info needed for the model fitting.
        output_folder (string): The path to the folder where to place the output, we will make a subdir with the
            model name in it (for the optimization results) and then a subdir with the samples output.
        nmr_samples (int): the number of samples we would like to return.
        burnin (int): the number of samples to burn-in, that is, to discard before returning the desired
            number of samples
        thinning (int): how many sample we wait before storing a new one. This will draw extra samples such that
                the total number of samples generated is ``nmr_samples * (thinning)`` and the number of samples stored
                is ``nmr_samples``. If set to one or lower we store every sample after the burn in.
        method (str): The sampling method to use, one of:
            - 'AMWG', for the Adaptive Metropolis-Within-Gibbs
            - 'SCAM', for the Single Component Adaptive Metropolis
            - 'FSL', for the sampling method used in the FSL toolbox
            - 'MWG', for the Metropolis-Within-Gibbs (simple random walk metropolis without updates)

            If not given, defaults to 'AMWG'.

        recalculate (boolean): If we want to recalculate the results if they are already present.
        cl_device_ind (int): the index of the CL device to use. The index is from the list from the function
            utils.get_cl_devices().
        double_precision (boolean): if we would like to do the calculations in double precision
        store_samples (boolean): determines if we store any of the samples. If set to False we will store none
            of the samples.
        sample_items_to_save (list): list of output names we want to store the samples of. If given, we only
            store the items specified in this list. Valid items are the free parameter names of the model and the
            items 'LogLikelihood' and 'LogPrior'.
        tmp_results_dir (str, True or None): The temporary dir for the calculations. Set to a string to use
                that path directly, set to True to use the config value, set to None to disable.
        initialization_data (:class:`~mdt.utils.InitializationData` or dict): provides (extra) initialization data to
            use during model fitting. If we are optimizing a cascade model this data only applies to the last model
            in the cascade. If a dictionary is given we will load the elements as arguments to the
            :class:`mdt.utils.SimpleInitializationData` class. For example::

                initialization_data = {'fixes': {...}, 'inits': {...}}

            is transformed into::

                initialization_data = SimpleInitializationData(fixes={...}, inits={...})
        post_processing (dict): a dictionary with flags for post-processing options to enable or disable.
            For valid elements, please see the configuration file settings for ``sample`` under ``post_processing``.
            Valid input for this parameter is for example: {'sample_statistics': True} to enable automatic calculation
            of the sample statistics.
        post_sampling_cb (Callable[
            [mot.sample.base.SamplingOutput, mdt.models.composite.DMRICompositeModel], Optional[Dict]]):
                additional post-processing called after sampling. This function can optionally return a (nested)
                dictionary with as keys dir-/file-names and as values maps to be stored in the results directory.
        sampler_options (dict): specific options for the MCMC routine. These will be provided to the sampling routine
            as additional keyword arguments to the constructor.

    Returns:
        dict: if store_samples is True then we return the samples per parameter as a numpy memmap. If store_samples
            is False we return None
    """
    import mdt.utils
    from mdt.lib.model_sampling import sample_composite_model
    from mdt.models.cascade import DMRICascadeModelInterface
    import mot.configuration

    settings = mdt.configuration.get_general_sampling_settings()
    if nmr_samples is None:
        nmr_samples = settings['nmr_samples']
    if burnin is None:
        burnin = settings['burnin']
    if thinning is None:
        thinning = settings['thinning']

    if not isinstance(initialization_data,
                      InitializationData) and initialization_data is not None:
        initialization_data = SimpleInitializationData(**initialization_data)

    if not mdt.utils.check_user_components():
        init_user_settings(pass_if_exists=True)

    if isinstance(model, str):
        model = get_model(model)()

    if post_processing:
        model.update_active_post_processing('sampling', post_processing)

    if isinstance(model, DMRICascadeModelInterface):
        raise ValueError(
            'The function \'sample_model()\' does not accept cascade models.')

    if cl_device_ind is None:
        cl_context_action = mot.configuration.VoidConfigurationAction()
    else:
        cl_context_action = mot.configuration.RuntimeConfigurationAction(
            cl_environments=get_cl_devices(cl_device_ind),
            double_precision=double_precision)

    with mot.configuration.config_context(cl_context_action):
        base_dir = os.path.join(output_folder, model.name, 'samples')

        if not os.path.isdir(base_dir):
            os.makedirs(base_dir)

        if recalculate:
            shutil.rmtree(base_dir)

        logger = logging.getLogger(__name__)
        logger.info('Using MDT version {}'.format(__version__))
        logger.info('Preparing for model {0}'.format(model.name))
        logger.info('The {0} parameters we will sample are: {1}'.format(
            len(model.get_free_param_names()), model.get_free_param_names()))

        return sample_composite_model(
            model,
            input_data,
            base_dir,
            nmr_samples,
            thinning,
            burnin,
            get_temporary_results_dir(tmp_results_dir),
            method=method,
            recalculate=recalculate,
            store_samples=store_samples,
            sample_items_to_save=sample_items_to_save,
            initialization_data=initialization_data,
            post_sampling_cb=post_sampling_cb,
            sampler_options=sampler_options)
Пример #9
0
    def __init__(self,
                 data_folder,
                 batch_profile=None,
                 subjects_selection=None,
                 recalculate=False,
                 models_to_fit=None,
                 cascade_subdir=False,
                 cl_device_ind=None,
                 double_precision=False,
                 tmp_results_dir=True):
        """This class is meant to make running computations as simple as possible.

        The idea is that a single folder is enough to fit_model the computations. One can optionally give it the
        batch_profile to use for the fitting. If not given, this class will attempt to use the
        batch_profile that fits the data folder best.

        Setting the ``cl_device_ind`` has the side effect that it changes the current run time cl_device settings in the
        MOT toolkit for the duration of this function.

        Args:
            data_folder (str): the main directory to look for items to process.
            batch_profile (:class:`~mdt.batch_utils.BatchProfile` or str): the batch profile to use
                or the name of a batch profile to use from the users folder.
            subjects_selection (:class:`~mdt.batch_utils.BatchSubjectSelection`): the subjects to use for processing.
                If None all subjects are processed.
            recalculate (boolean): If we want to recalculate the results if they are already present.
            cascade_subdir (boolean): if we want to create a subdirectory for every cascade model.
                Per default we output the maps of cascaded results in the same directory, this allows reusing cascaded
                results for other cascades (for example, if you cascade BallStick -> Noddi you can use
                the BallStick results also for BallStick -> Charmed). This flag disables that behaviour and instead
                outputs the results of a cascade model to a subdirectory for that cascade.
                This does not apply recursive.
            models_to_fit (list of str): A list of models to fit to the data. This overrides the models in
                the batch config.
            cl_device_ind (int): the index of the CL device to use. The index is from the list from the function
                get_cl_devices().
            double_precision (boolean): if we would like to do the calculations in double precision
            tmp_results_dir (str, True or None): The temporary dir for the calculations. Set to a string to use
                that path directly, set to True to use the config value, set to None to disable.
        """
        self._logger = logging.getLogger(__name__)
        self._batch_profile = batch_profile_factory(batch_profile, data_folder)
        self._subjects_selection = subjects_selection or AllSubjects()
        self._tmp_results_dir = tmp_results_dir

        if models_to_fit:
            self._models_to_fit = models_to_fit
        else:
            self._models_to_fit = self._batch_profile.get_models_to_fit()

        self._cl_device_ind = cl_device_ind
        self._recalculate = recalculate
        self._double_precision = double_precision
        self._cascade_subdir = cascade_subdir

        if self._batch_profile is None:
            raise RuntimeError('No suitable batch profile could be '
                               'found for the directory {0}'.format(
                                   os.path.abspath(data_folder)))

        self._logger.info('Using MDT version {}'.format(__version__))
        self._logger.info('Using batch profile: {0}'.format(
            self._batch_profile))
        self._subjects = self._subjects_selection.get_selection(
            self._batch_profile.get_subjects())

        self._logger.info('Subjects found: {0}'.format(
            self._batch_profile.get_subjects_count()))
        self._logger.info('Subjects to process: {0}'.format(len(
            self._subjects)))

        if self._cl_device_ind is not None:
            if not isinstance(self._cl_device_ind, collections.Iterable):
                self._cl_device_ind = [self._cl_device_ind]
            devices = get_cl_devices()
            mot.configuration.set_cl_environments(
                [devices[ind] for ind in self._cl_device_ind])
Пример #10
0
    def __init__(self,
                 model,
                 problem_data,
                 output_folder,
                 optimizer=None,
                 recalculate=False,
                 only_recalculate_last=False,
                 cascade_subdir=False,
                 cl_device_ind=None,
                 double_precision=False,
                 tmp_results_dir=True):
        """Setup model fitting for the given input model and data.

        To actually fit the model call run().

        Args:
            model
                (:class:`~mdt.models.composite.DMRICompositeModel` or :class:`~mdt.models.cascade.DMRICascadeModelInterface`):
                    the model we want to optimize.
            problem_data (:class:`~mdt.utils.DMRIProblemData`): the problem data object which contains the dwi image,
                the dwi header, the brain_mask and the protocol to use.
            output_folder (string): The full path to the folder where to place the output
            optimizer (:class:`mot.cl_routines.optimizing.base.AbstractOptimizer`): The optimization routine to use.
                If None, we create one using the configuration files.
            recalculate (boolean): If we want to recalculate the results if they are already present.
            only_recalculate_last (boolean): If we want to recalculate all the models.
                This is only of importance when dealing with CascadeModels. If set to true we only recalculate
                the last element in the chain (if recalculate is set to True, that is). If set to false,
                we recalculate everything. This only holds for the first level of the cascade.
            cascade_subdir (boolean): if we want to create a subdirectory for the given model if it is a cascade model.
                Per default we output the maps of cascaded results in the same directory, this allows reusing cascaded
                results for other cascades (for example, if you cascade BallStick -> Noddi you can use the BallStick
                results also for BallStick -> Charmed). This flag disables that behaviour and instead outputs the
                results of a cascade model to a subdirectory for that cascade. This does not apply recursive.
            cl_device_ind (int): the index of the CL device to use. The index is from the list from the function
                get_cl_devices(). This can also be a list of device indices.
            double_precision (boolean): if we would like to do the calculations in double precision
            tmp_results_dir (str, True or None): The temporary dir for the calculations. Set to a string to use
                that path directly, set to True to use the config value, set to None to disable.
        """
        if isinstance(model, string_types):
            model = get_model(model)

        model.double_precision = double_precision

        self._model = model
        self._problem_data = problem_data
        self._output_folder = output_folder
        if cascade_subdir and isinstance(self._model,
                                         DMRICascadeModelInterface):
            self._output_folder += '/{}'.format(self._model.name)
        self._optimizer = optimizer
        self._recalculate = recalculate
        self._only_recalculate_last = only_recalculate_last
        self._logger = logging.getLogger(__name__)
        self._cl_device_indices = cl_device_ind
        self._model_names_list = []
        self._tmp_results_dir = get_temporary_results_dir(tmp_results_dir)

        if self._cl_device_indices is not None and not isinstance(
                self._cl_device_indices, collections.Iterable):
            self._cl_device_indices = [self._cl_device_indices]

        self._cl_envs = None
        self._load_balancer = None
        if self._cl_device_indices is not None:
            all_devices = get_cl_devices()
            self._cl_envs = [
                all_devices[ind] for ind in self._cl_device_indices
            ]
            self._load_balancer = EvenDistribution()

        if not model.is_protocol_sufficient(self._problem_data.protocol):
            raise InsufficientProtocolError(
                'The provided protocol is insufficient for this model. '
                'The reported errors where: {}'.format(
                    self._model.get_protocol_problems(
                        self._problem_data.protocol)))
Пример #11
0
def get_optimization_inits(model_name,
                           input_data,
                           output_folder,
                           cl_device_ind=None):
    """Get better optimization starting points for the given model.

    Since initialization can make quite a difference in optimization results, this function can generate
    a good initialization starting point for the given model. The idea is that before you call the :func:`fit_model`
    function, you call this function to get a better starting point. An usage example would be::

        input_data = mdt.load_input_data(..)

        init_data = get_optimization_inits('BallStick_r1', input_data, '/my/folder')

        fit_model('BallStick_r1', input_data, '/my/folder',
                  initialization_data={'inits': init_data})

    Where the init data returned by this function can directly be used as input to the ``initialization_data``
    argument of the :func`fit_model` function.

    Please note that his function only supports models shipped by default with MDT.

    Args:
        model_name (str):
            The name of a model for which we want the optimization starting points.
        input_data (:class:`~mdt.utils.MRIInputData`): the input data object containing all
            the info needed for model fitting of intermediate models.
        output_folder (string): The path to the folder where to place the output, we will make a subdir with the
            model name in it.
        cl_device_ind (int or list): the index of the CL device to use. The index is from the list from the function
            utils.get_cl_devices(). This can also be a list of device indices.

    Returns:
        dict: a dictionary with initialization points for the selected model
    """
    logger = logging.getLogger(__name__)

    def get_subset(param_names, fit_results):
        return {
            key: value
            for key, value in fit_results.items() if key in param_names
        }

    def get_model_fit(model_name):
        logger.info(
            'Starting intermediate optimization for generating initialization point.'
        )

        from mdt import fit_model
        results = fit_model(
            model_name,
            input_data,
            output_folder,
            recalculate=False,
            use_cascaded_inits=False,
            initialization_data={'inits': get_init_data(model_name)})

        logger.info(
            'Finished intermediate optimization for generating initialization point.'
        )
        return results

    def get_init_data(model_name):
        inits = {}
        free_parameters = get_model(model_name)().get_free_param_names()

        if 'S0.s0' in free_parameters and input_data.has_input_data('b'):
            unweighted_locations = np.where(
                input_data.get_input_data('b') < 250e6)[0]
            inits['S0.s0'] = np.mean(input_data.signal4d[...,
                                                         unweighted_locations],
                                     axis=-1)

        if model_name.startswith('BallStick_r2'):
            inits.update(
                get_subset(free_parameters, get_model_fit('BallStick_r1')))
            inits['w_stick1.w'] = np.minimum(inits['w_stick0.w'], 0.05)
        elif model_name.startswith('BallStick_r3'):
            inits.update(
                get_subset(free_parameters, get_model_fit('BallStick_r2')))
            inits['w_stick2.w'] = np.minimum(inits['w_stick1.w'], 0.05)
        elif model_name.startswith('Tensor'):
            fit_results = get_model_fit('BallStick_r1')
            inits.update(get_subset(free_parameters, fit_results))
            inits['Tensor.theta'] = fit_results['Stick0.theta']
            inits['Tensor.phi'] = fit_results['Stick0.phi']
        elif model_name.startswith('NODDI'):
            fit_results = get_model_fit('BallStick_r1')
            inits.update(get_subset(free_parameters, fit_results))
            inits['w_ic.w'] = fit_results['w_stick0.w'] / 2.0
            inits['w_ec.w'] = fit_results['w_stick0.w'] / 2.0
            inits['w_csf.w'] = fit_results['w_ball.w']
            inits['NODDI_IC.theta'] = fit_results['Stick0.theta']
            inits['NODDI_IC.phi'] = fit_results['Stick0.phi']
        elif model_name.startswith('BinghamNODDI_r1'):
            noddi_results = get_model_fit('NODDI')
            inits.update(get_subset(free_parameters, noddi_results))
            inits['w_in0.w'] = noddi_results['w_ic.w']
            inits['w_en0.w'] = noddi_results['w_ec.w']
            inits['w_csf.w'] = noddi_results['w_csf.w']
            inits['BinghamNODDI_IN0.theta'] = noddi_results['NODDI_IC.theta']
            inits['BinghamNODDI_IN0.phi'] = noddi_results['NODDI_IC.phi']
            inits['BinghamNODDI_IN0.k1'] = noddi_results['NODDI_IC.kappa']
        elif model_name.startswith('BinghamNODDI_r2'):
            bs2_results = get_model_fit('BallStick_r2')
            inits.update(get_subset(free_parameters, bs2_results))
            inits.update(
                get_subset(free_parameters, get_model_fit('BinghamNODDI_r1')))
            inits['BinghamNODDI_IN1.theta'] = bs2_results['Stick1.theta']
            inits['BinghamNODDI_IN1.phi'] = bs2_results['Stick1.phi']
        elif model_name.startswith('Kurtosis'):
            fit_results = get_model_fit('Tensor')
            inits.update(get_subset(free_parameters, fit_results))
            inits.update({
                'KurtosisTensor.' + key: fit_results['Tensor.' + key]
                for key in ['theta', 'phi', 'psi', 'd', 'dperp0', 'dperp1']
            })
        elif model_name.startswith('CHARMED_r'):
            nmr_dir = model_name[len('CHARMED_r'):len('CHARMED_r') + 1]
            fit_results = get_model_fit('BallStick_r' + nmr_dir)
            inits.update(get_subset(free_parameters, fit_results))
            inits['Tensor.theta'] = fit_results['Stick0.theta']
            inits['Tensor.phi'] = fit_results['Stick0.phi']
            for dir_ind in range(int(nmr_dir)):
                inits['w_res{}.w'.format(dir_ind)] = fit_results[
                    'w_stick{}.w'.format(dir_ind)]
                inits['CHARMEDRestricted{}.theta'.format(
                    dir_ind)] = fit_results['Stick{}.theta'.format(dir_ind)]
                inits['CHARMEDRestricted{}.phi'.format(dir_ind)] = fit_results[
                    'Stick{}.phi'.format(dir_ind)]
        elif model_name.startswith('BallRacket_r'):
            nmr_dir = model_name[len('BallRacket_r'):len('BallRacket_r') + 1]
            fit_results = get_model_fit('BallStick_r' + nmr_dir)
            inits.update(get_subset(free_parameters, fit_results))
            for dir_ind in range(int(nmr_dir)):
                inits['w_res{}.w'.format(dir_ind)] = fit_results[
                    'w_stick{}.w'.format(dir_ind)]
                inits['Racket{}.theta'.format(dir_ind)] = fit_results[
                    'Stick{}.theta'.format(dir_ind)]
                inits['Racket{}.phi'.format(dir_ind)] = fit_results[
                    'Stick{}.phi'.format(dir_ind)]
        elif model_name.startswith('AxCaliber'):
            fit_results = get_model_fit('BallStick_r1')
            inits.update(get_subset(free_parameters, fit_results))
            inits['GDRCylinders.theta'] = fit_results['Stick0.theta']
            inits['GDRCylinders.phi'] = fit_results['Stick0.phi']
        elif model_name.startswith('ActiveAx'):
            fit_results = get_model_fit('BallStick_r1')
            inits.update(get_subset(free_parameters, fit_results))
            inits['w_ic.w'] = fit_results['w_stick0.w'] / 2.0
            inits['w_ec.w'] = fit_results['w_stick0.w'] / 2.0
            inits['w_csf.w'] = fit_results['w_ball.w']
            inits['CylinderGPD.theta'] = fit_results['Stick0.theta']
            inits['CylinderGPD.phi'] = fit_results['Stick0.phi']
        elif model_name.startswith('QMT_ReducedRamani'):
            inits['S0.s0'] = np.mean(input_data.signal4d, axis=-1)

        return inits

    cl_environments = None
    if cl_device_ind is not None:
        cl_environments = get_cl_devices(cl_device_ind)

    with mot_config_context(
            mot.configuration.RuntimeConfigurationAction(
                cl_environments=cl_environments)):
        return get_init_data(model_name)
Пример #12
0
def fit_model(model,
              input_data,
              output_folder,
              method=None,
              recalculate=False,
              cl_device_ind=None,
              double_precision=False,
              tmp_results_dir=True,
              initialization_data=None,
              use_cascaded_inits=True,
              post_processing=None,
              optimizer_options=None):
    """Run the optimizer on the given model.

    Args:
        model (str or :class:`~mdt.models.base.EstimableModel`):
            The name of a composite model or an implementation of a composite model.
        input_data (:class:`~mdt.utils.MRIInputData`): the input data object containing all
            the info needed for the model fitting.
        output_folder (string): The path to the folder where to place the output, we will make a subdir with the
            model name in it.
        method (str): The optimization method to use, one of:
            - 'Levenberg-Marquardt'
            - 'Nelder-Mead'
            - 'Powell'
            - 'Subplex'

            If not given, defaults to 'Powell'.

        recalculate (boolean): If we want to recalculate the results if they are already present.
        cl_device_ind (int or list): the index of the CL device to use. The index is from the list from the function
            utils.get_cl_devices(). This can also be a list of device indices.
        double_precision (boolean): if we would like to do the calculations in double precision
        tmp_results_dir (str, True or None): The temporary dir for the calculations. Set to a string to use
            that path directly, set to True to use the config value, set to None to disable.
        initialization_data (dict): provides (extra) initialization data to
            use during model fitting. This dictionary can contain the following elements:

            * ``inits``: dictionary with per parameter an initialization point
            * ``fixes``: dictionary with per parameter a fixed point, this will remove that parameter from the fitting
            * ``lower_bounds``: dictionary with per parameter a lower bound
            * ``upper_bounds``: dictionary with per parameter a upper bound
            * ``unfix``: a list of parameters to unfix

            For example::

                initialization_data = {
                    'fixes': {'Stick0.theta: np.array(...), ...},
                    'inits': {...}
                }

        use_cascaded_inits (boolean): if set, we initialize the model parameters using :func:`get_optimization_inits`.
            You can also overrule the default initializations using the ``initialization_data`` attribute.
        post_processing (dict): a dictionary with flags for post-processing options to enable or disable.
            For valid elements, please see the configuration file settings for ``optimization``
            under ``post_processing``. Valid input for this parameter is for example: {'covariance': False}
            to disable automatic calculation of the covariance from the Hessian.
        optimizer_options (dict): extra options passed to the optimization routines.

    Returns:
        dict: The result maps for the given composite model or the last model in the cascade.
            This returns the results as 3d/4d volumes for every output map.
    """
    logger = logging.getLogger(__name__)

    if not check_user_components():
        init_user_settings(pass_if_exists=True)

    if cl_device_ind is not None:
        if not isinstance(cl_device_ind, collections.Iterable):
            cl_device_ind = [cl_device_ind]
        cl_runtime_info = CLRuntimeInfo(
            cl_environments=get_cl_devices(cl_device_ind),
            double_precision=double_precision)
    else:
        cl_runtime_info = CLRuntimeInfo(double_precision=double_precision)

    if isinstance(model, str):
        model_name = model
        model_instance = get_model(model)()
    else:
        model_name = model.name
        model_instance = model

    if not model_instance.is_input_data_sufficient(input_data):
        raise InsufficientProtocolError(
            'The provided protocol is insufficient for this model. '
            'The reported errors where: {}'.format(
                model_instance.get_input_data_problems(input_data)))

    if post_processing:
        model_instance.update_active_post_processing('optimization',
                                                     post_processing)

    if use_cascaded_inits:
        if initialization_data is None:
            initialization_data = {}
        initialization_data['inits'] = initialization_data.get('inits', {})
        inits = get_optimization_inits(model_name,
                                       input_data,
                                       output_folder,
                                       cl_device_ind=cl_device_ind)
        inits.update(initialization_data['inits'])
        initialization_data['inits'] = inits

        initialization_data = SimpleInitializationData(**initialization_data)
        initialization_data.apply_to_model(model_instance, input_data)

        logger.info('Preparing {0} with the cascaded initializations.'.format(
            model_name))

    if method is None:
        method, optimizer_options = get_optimizer_for_model(model_name)

    with mot.configuration.config_context(CLRuntimeAction(cl_runtime_info)):
        fit_composite_model(model_instance,
                            input_data,
                            output_folder,
                            method,
                            get_temporary_results_dir(tmp_results_dir),
                            recalculate=recalculate,
                            optimizer_options=optimizer_options)

    return get_all_nifti_data(os.path.join(output_folder, model_name))
Пример #13
0
def sample_model(model,
                 problem_data,
                 output_folder,
                 sampler=None,
                 recalculate=False,
                 cl_device_ind=None,
                 double_precision=False,
                 store_samples=True,
                 tmp_results_dir=True,
                 save_user_script_info=True,
                 initialization_maps=None):
    """Sample a composite model using the given cascading strategy.

    Args:
        model (:class:`~mdt.models.composite.DMRICompositeModel` or str): the model to sample
        problem_data (:class:`~mdt.utils.DMRIProblemData`): the problem data object
        output_folder (string): The path to the folder where to place the output, we will make a subdir with the
            model name in it (for the optimization results) and then a subdir with the samples output.
        sampler (:class:`mot.cl_routines.sampling.base.AbstractSampler`): the sampler to use
        recalculate (boolean): If we want to recalculate the results if they are already present.
        cl_device_ind (int): the index of the CL device to use. The index is from the list from the function
            utils.get_cl_devices().
        double_precision (boolean): if we would like to do the calculations in double precision
        store_samples (boolean): if set to False we will store none of the samples. Use this
                if you are only interested in the volume maps and not in the entire sample chain.
        tmp_results_dir (str, True or None): The temporary dir for the calculations. Set to a string to use
                that path directly, set to True to use the config value, set to None to disable.
        save_user_script_info (boolean, str or SaveUserScriptInfo): The info we need to save about the script the
            user is currently executing. If True (default) we use the stack to lookup the script the user is executing
            and save that using a SaveFromScript saver. If a string is given we use that filename again for the
            SaveFromScript saver. If False or None, we do not write any information. If a SaveUserScriptInfo is
            given we use that directly.
        initialization_maps (dict): 4d maps to initialize the sampling with. Per default this is None,
            common practice is to use the maps from an optimization as starting point

    Returns:
        dict: the samples per parameter as a numpy memmap if store_samples is True
    """
    import mdt.utils
    from mot.load_balance_strategies import EvenDistribution
    from mdt.model_sampling import sample_composite_model
    from mdt.models.cascade import DMRICascadeModelInterface
    import mot.configuration

    if not mdt.utils.check_user_components():
        init_user_settings(pass_if_exists=True)

    if isinstance(model, string_types):
        model = get_model(model)

    if isinstance(model, DMRICascadeModelInterface):
        raise ValueError(
            'The function \'sample_model()\' does not accept cascade models.')

    if not model.is_protocol_sufficient(problem_data.protocol):
        raise InsufficientProtocolError(
            'The given protocol is insufficient for this model. '
            'The reported errors where: {}'.format(
                model.get_protocol_problems(problem_data.protocol)))

    if cl_device_ind is not None and not isinstance(cl_device_ind,
                                                    collections.Iterable):
        cl_device_ind = [cl_device_ind]

    if cl_device_ind is None:
        cl_context_action = mot.configuration.VoidConfigurationAction()
    else:
        cl_context_action = mot.configuration.RuntimeConfigurationAction(
            cl_environments=[get_cl_devices()[ind] for ind in cl_device_ind],
            load_balancer=EvenDistribution())

    with mot.configuration.config_context(cl_context_action):
        if sampler is None:
            sampler = configuration.get_sampler()

        processing_strategy = get_processing_strategy('sampling',
                                                      model_names=model.name)
        processing_strategy.set_tmp_dir(
            get_temporary_results_dir(tmp_results_dir))

        output_folder = os.path.join(output_folder, model.name, 'samples')
        if not os.path.isdir(output_folder):
            os.makedirs(output_folder)

        with per_model_logging_context(output_folder, overwrite=recalculate):
            logger = logging.getLogger(__name__)
            logger.info('Using MDT version {}'.format(__version__))
            logger.info('Preparing for model {0}'.format(model.name))

            if initialization_maps:
                model.set_initial_parameters(
                    create_roi(initialization_maps, problem_data.mask))

            model.double_precision = double_precision

            results = sample_composite_model(model,
                                             problem_data,
                                             output_folder,
                                             sampler,
                                             processing_strategy,
                                             recalculate=recalculate,
                                             store_samples=store_samples)

        easy_save_user_script_info(save_user_script_info,
                                   output_folder + '/used_scripts.py',
                                   stack()[1][0].f_globals.get('__file__'))
        return results
Пример #14
0
def bootstrap_model(model, input_data, optimization_results, output_folder, bootstrap_method=None,
                    bootstrap_options=None, nmr_samples=None, optimization_method=None, optimizer_options=None,
                    recalculate=False, cl_device_ind=None, double_precision=False, keep_samples=True,
                    tmp_results_dir=True, initialization_data=None):
    """Resample the model using residual bootstrapping.

    This is typically used to construct confidence intervals on the optimized parameters.

    Args:
        model (str or :class:`~mdt.models.base.EstimableModel`): the model to sample
        input_data (:class:`~mdt.lib.input_data.MRIInputData`): the input data object containing all
            the info needed for the model fitting.
        optimization_results (dict or str): the optimization results, either a dictionary with results or the
            path to a folder.
        output_folder (string): The path to the folder where to place the output, we will make a subdir with the
            model name in it (for the optimization results) and then a subdir with the samples output.
        bootstrap_method (str): the bootstrap method we want to use, 'residual', or 'wild'. Defaults to 'wild'.
        bootstrap_options (dict): bootstrapping options specific for the bootstrap method in use
        nmr_samples (int): the number of samples we would like to compute. Defaults to 1000.
        optimization_method (str): The optimization method to use, one of:
            - 'Levenberg-Marquardt'
            - 'Nelder-Mead'
            - 'Powell'
            - 'Subplex'

            If not given, defaults to 'Powell'.
        optimizer_options (dict): extra options passed to the optimization routines.
        recalculate (boolean): If we want to recalculate the results if they are already present.
        cl_device_ind (int): the index of the CL device to use. The index is from the list from the function
            utils.get_cl_devices().
        double_precision (boolean): if we would like to do the calculations in double precision
        keep_samples (boolean): determines if we keep any of the chains. If set to False, the chains will
            be discarded after generating the mean and standard deviations.
        tmp_results_dir (str, True or None): The temporary dir for the calculations. Set to a string to use
                that path directly, set to True to use the config value, set to None to disable.
        initialization_data (dict): provides (extra) initialization data to
            use during model fitting. This dictionary can contain the following elements:

            * ``inits``: dictionary with per parameter an initialization point
            * ``fixes``: dictionary with per parameter a fixed point, this will remove that parameter from the fitting
            * ``lower_bounds``: dictionary with per parameter a lower bound
            * ``upper_bounds``: dictionary with per parameter a upper bound
            * ``unfix``: a list of parameters to unfix

            For example::

                initialization_data = {
                    'fixes': {'Stick0.theta: np.array(...), ...},
                    'inits': {...}
                }

    Returns:
        dict: if keep_samples is True we return the samples per parameter as a numpy memmap.
            If store_samples is False we return None
    """
    initialization_data = initialization_data or {}
    nmr_samples = nmr_samples or 1000
    bootstrap_method = bootstrap_method or 'wild'

    if not check_user_components():
        init_user_settings(pass_if_exists=True)

    if cl_device_ind is None:
        cl_context_action = mot.configuration.VoidConfigurationAction()
    else:
        cl_context_action = mot.configuration.RuntimeConfigurationAction(
            cl_environments=get_cl_devices(cl_device_ind),
            double_precision=double_precision)

    if isinstance(model, str):
        model_name = model
        model_instance = get_model(model)()
    else:
        model_name = model.name
        model_instance = model

    model_instance.update_active_post_processing('optimization', {'uncertainties': False, 'll_and_ic': False})

    initialization_data = SimpleInitializationData(**initialization_data)
    initialization_data.apply_to_model(model_instance, input_data)

    if optimization_method is None:
        optimization_method, optimizer_options = get_optimizer_for_model(model_name)

    with mot.configuration.config_context(cl_context_action):
        from mdt.lib.processing.model_bootstrapping import compute_bootstrap
        return compute_bootstrap(model_instance, input_data, optimization_results,
                                 output_folder, bootstrap_method, optimization_method, nmr_samples,
                                 get_temporary_results_dir(tmp_results_dir),
                                 recalculate=recalculate,
                                 keep_samples=keep_samples,
                                 optimizer_options=optimizer_options,
                                 bootstrap_options=bootstrap_options)
Пример #15
0
    def __init__(self,
                 model,
                 input_data,
                 output_folder,
                 method=None,
                 optimizer_options=None,
                 recalculate=False,
                 only_recalculate_last=False,
                 cl_device_ind=None,
                 double_precision=False,
                 tmp_results_dir=True,
                 initialization_data=None,
                 post_processing=None):
        """Setup model fitting for the given input model and data.

        To actually fit the model call run().

        Args:
            model (str or :class:`~mdt.models.composite.DMRICompositeModel` or :class:`~mdt.models.cascade.DMRICascadeModelInterface`):
                    the model we want to optimize.
            input_data (:class:`~mdt.utils.MRIInputData`): the input data object containing
                all the info needed for the model fitting.
            output_folder (string): The full path to the folder where to place the output
            method (str): The optimization method to use, one of:
                - 'Levenberg-Marquardt'
                - 'Nelder-Mead'
                - 'Powell'
                - 'Subplex'

                If not given, defaults to 'Powell'.
            optimizer_options (dict): extra options passed to the optimization routines.
            recalculate (boolean): If we want to recalculate the results if they are already present.
            only_recalculate_last (boolean): If we want to recalculate all the models.
                This is only of importance when dealing with CascadeModels. If set to true we only recalculate
                the last element in the chain (if recalculate is set to True, that is). If set to false,
                we recalculate everything. This only holds for the first level of the cascade.
            cl_device_ind (int or list): the index of the CL device to use. The index is from the list from the function
                get_cl_devices(). This can also be a list of device indices.
            double_precision (boolean): if we would like to do the calculations in double precision
            tmp_results_dir (str, True or None): The temporary dir for the calculations. Set to a string to use
                that path directly, set to True to use the config value, set to None to disable.
            initialization_data (dict or :class:`~mdt.utils.InitializationData`): extra initialization data to use
                during model fitting. If we are optimizing a cascade model this data only applies to the last model in
                the cascade.
            post_processing (dict): a dictionary with flags for post-processing options to enable or disable.
                For valid elements, please see the configuration file settings for ``optimization``
                under ``post_processing``. Valid input for this parameter is for example: {'covariance': False}
                to disable automatic calculation of the covariance from the Hessian.

        """
        if isinstance(model, str):
            model = get_model(model)()

        if post_processing:
            model.update_active_post_processing('optimization',
                                                post_processing)

        self._model = model
        self._input_data = input_data
        self._output_folder = output_folder
        self._method = method
        self._optimizer_options = optimizer_options
        self._recalculate = recalculate
        self._only_recalculate_last = only_recalculate_last
        self._logger = logging.getLogger(__name__)

        self._model_names_list = []
        self._tmp_results_dir = get_temporary_results_dir(tmp_results_dir)

        if initialization_data is not None and not isinstance(
                initialization_data, InitializationData):
            self._initialization_data = SimpleInitializationData(
                **initialization_data)
        else:
            self._initialization_data = initialization_data

        if cl_device_ind is not None:
            self._cl_runtime_info = CLRuntimeInfo(
                cl_environments=get_cl_devices(cl_device_ind),
                double_precision=double_precision)
        else:
            self._cl_runtime_info = CLRuntimeInfo(
                double_precision=double_precision)

        if not model.is_input_data_sufficient(self._input_data):
            raise InsufficientProtocolError(
                'The provided protocol is insufficient for this model. '
                'The reported errors where: {}'.format(
                    self._model.get_input_data_problems(self._input_data)))
Пример #16
0
def sample_model(model,
                 input_data,
                 output_folder,
                 nmr_samples=None,
                 burnin=None,
                 thinning=None,
                 recalculate=False,
                 cl_device_ind=None,
                 double_precision=False,
                 store_samples=True,
                 sample_items_to_save=None,
                 tmp_results_dir=True,
                 save_user_script_info=True,
                 initialization_data=None,
                 post_processing=None):
    """Sample a composite model using the Adaptive Metropolis-Within-Gibbs (AMWG) MCMC algorithm [1].

    Args:
        model (:class:`~mdt.models.composite.DMRICompositeModel` or str): the model to sample
        input_data (:class:`~mdt.utils.MRIInputData`): the input data object containing all
            the info needed for the model fitting.
        output_folder (string): The path to the folder where to place the output, we will make a subdir with the
            model name in it (for the optimization results) and then a subdir with the samples output.
        nmr_samples (int): the number of samples we would like to return.
        burnin (int): the number of samples to burn-in, that is, to discard before returning the desired
            number of samples
        thinning (int): how many sample we wait before storing a new one. This will draw extra samples such that
                the total number of samples generated is ``nmr_samples * (thinning)`` and the number of samples stored
                is ``nmr_samples``. If set to one or lower we store every sample after the burn in.
        recalculate (boolean): If we want to recalculate the results if they are already present.
        cl_device_ind (int): the index of the CL device to use. The index is from the list from the function
            utils.get_cl_devices().
        double_precision (boolean): if we would like to do the calculations in double precision
        store_samples (boolean): determines if we store any of the samples. If set to False we will store none
            of the samples.
        sample_items_to_save (list): list of output names we want to store the samples of. If given, we only
            store the items specified in this list. Valid items are the free parameter names of the model and the
            items 'LogLikelihood' and 'LogPrior'.
        tmp_results_dir (str, True or None): The temporary dir for the calculations. Set to a string to use
                that path directly, set to True to use the config value, set to None to disable.
        save_user_script_info (boolean, str or SaveUserScriptInfo): The info we need to save about the script the
            user is currently executing. If True (default) we use the stack to lookup the script the user is executing
            and save that using a SaveFromScript saver. If a string is given we use that filename again for the
            SaveFromScript saver. If False or None, we do not write any information. If a SaveUserScriptInfo is
            given we use that directly.
        initialization_data (:class:`~mdt.utils.InitializationData` or dict): provides (extra) initialization data to
            use during model fitting. If we are optimizing a cascade model this data only applies to the last model
            in the cascade. If a dictionary is given we will load the elements as arguments to the
            :class:`mdt.utils.SimpleInitializationData` class. For example::

                initialization_data = {'fixes': {...}, 'inits': {...}}

            is transformed into::

                initialization_data = SimpleInitializationData(fixes={...}, inits={...})
        post_processing (dict): a dictionary with flags for post-processing options to enable or disable.
            For valid elements, please see the configuration file settings for ``sampling`` under ``post_processing``.
            Valid input for this parameter is for example: {'sample_statistics': True} to enable automatic calculation
            of the sampling statistics.

    Returns:
        dict: if store_samples is True then we return the samples per parameter as a numpy memmap. If store_samples
            is False we return None

    References:
        1. Roberts GO, Rosenthal JS. Examples of adaptive MCMC. J Comput Graph Stat. 2009;18(2):349-367.
           doi:10.1198/jcgs.2009.06134.
    """
    import mdt.utils
    from mot.load_balance_strategies import EvenDistribution
    from mdt.model_sampling import sample_composite_model
    from mdt.models.cascade import DMRICascadeModelInterface
    import mot.configuration

    settings = mdt.configuration.get_general_sampling_settings()
    if nmr_samples is None:
        nmr_samples = settings['nmr_samples']
    if burnin is None:
        burnin = settings['burnin']
    if thinning is None:
        thinning = settings['thinning']

    if not isinstance(initialization_data,
                      InitializationData) and initialization_data is not None:
        initialization_data = SimpleInitializationData(**initialization_data)

    if not mdt.utils.check_user_components():
        init_user_settings(pass_if_exists=True)

    if isinstance(model, string_types):
        model = get_model(model)()

    if post_processing:
        model.update_active_post_processing('sampling', post_processing)

    if isinstance(model, DMRICascadeModelInterface):
        raise ValueError(
            'The function \'sample_model()\' does not accept cascade models.')

    if cl_device_ind is not None and not isinstance(cl_device_ind,
                                                    collections.Iterable):
        cl_device_ind = [cl_device_ind]

    if cl_device_ind is None:
        cl_context_action = mot.configuration.VoidConfigurationAction()
    else:
        cl_envs = [get_cl_devices()[ind] for ind in cl_device_ind]
        cl_context_action = mot.configuration.RuntimeConfigurationAction(
            cl_environments=cl_envs,
            load_balancer=EvenDistribution(),
            double_precision=double_precision)

    with mot.configuration.config_context(cl_context_action):
        base_dir = os.path.join(output_folder, model.name, 'samples')

        if not os.path.isdir(base_dir):
            os.makedirs(base_dir)

        if recalculate:
            shutil.rmtree(base_dir)

        logger = logging.getLogger(__name__)
        logger.info('Using MDT version {}'.format(__version__))
        logger.info('Preparing for model {0}'.format(model.name))
        logger.info('The parameters we will sample are: {0}'.format(
            model.get_free_param_names()))

        results = sample_composite_model(
            model,
            input_data,
            base_dir,
            nmr_samples,
            thinning,
            burnin,
            get_temporary_results_dir(tmp_results_dir),
            recalculate=recalculate,
            store_samples=store_samples,
            sample_items_to_save=sample_items_to_save,
            initialization_data=initialization_data)

        easy_save_user_script_info(save_user_script_info,
                                   os.path.join(base_dir, 'used_scripts.py'),
                                   stack()[1][0].f_globals.get('__file__'))
        return results
Пример #17
0
    def __init__(self,
                 model,
                 input_data,
                 output_folder,
                 optimizer=None,
                 recalculate=False,
                 only_recalculate_last=False,
                 cascade_subdir=False,
                 cl_device_ind=None,
                 double_precision=False,
                 tmp_results_dir=True,
                 initialization_data=None,
                 post_processing=None):
        """Setup model fitting for the given input model and data.

        To actually fit the model call run().

        Args:
            model (str or :class:`~mdt.models.composite.DMRICompositeModel` or :class:`~mdt.models.cascade.DMRICascadeModelInterface`):
                    the model we want to optimize.
            input_data (:class:`~mdt.utils.MRIInputData`): the input data object containing
                all the info needed for the model fitting.
            output_folder (string): The full path to the folder where to place the output
            optimizer (:class:`mot.cl_routines.optimizing.base.AbstractOptimizer`): The optimization routine to use.
                If None, we create one using the configuration files.
            recalculate (boolean): If we want to recalculate the results if they are already present.
            only_recalculate_last (boolean): If we want to recalculate all the models.
                This is only of importance when dealing with CascadeModels. If set to true we only recalculate
                the last element in the chain (if recalculate is set to True, that is). If set to false,
                we recalculate everything. This only holds for the first level of the cascade.
            cascade_subdir (boolean): if we want to create a subdirectory for the given model if it is a cascade model.
                Per default we output the maps of cascaded results in the same directory, this allows reusing cascaded
                results for other cascades (for example, if you cascade BallStick -> Noddi you can use the BallStick
                results also for BallStick -> Charmed). This flag disables that behaviour and instead outputs the
                results of a cascade model to a subdirectory for that cascade. This does not apply recursive.
            cl_device_ind (int): the index of the CL device to use. The index is from the list from the function
                get_cl_devices(). This can also be a list of device indices.
            double_precision (boolean): if we would like to do the calculations in double precision
            tmp_results_dir (str, True or None): The temporary dir for the calculations. Set to a string to use
                that path directly, set to True to use the config value, set to None to disable.
            initialization_data (:class:`~mdt.utils.InitializationData`): extra initialization data to use
                during model fitting. If we are optimizing a cascade model this data only applies to the last model in the
                cascade.
            post_processing (dict): a dictionary with flags for post-processing options to enable or disable.
                For valid elements, please see the configuration file settings for ``optimization``
                under ``post_processing``. Valid input for this parameter is for example: {'covariance': False}
                to disable automatic calculation of the covariance from the Hessian.

        """
        if isinstance(model, string_types):
            model = get_model(model)()

        if post_processing:
            model.update_active_post_processing('optimization',
                                                post_processing)

        self._model = model
        self._input_data = input_data
        self._output_folder = output_folder
        if cascade_subdir and isinstance(self._model,
                                         DMRICascadeModelInterface):
            self._output_folder += '/{}'.format(self._model.name)
        self._optimizer = optimizer
        self._recalculate = recalculate
        self._only_recalculate_last = only_recalculate_last
        self._logger = logging.getLogger(__name__)

        self._model_names_list = []
        self._tmp_results_dir = get_temporary_results_dir(tmp_results_dir)
        self._initialization_data = initialization_data or SimpleInitializationData(
        )

        if cl_device_ind is not None and not isinstance(
                cl_device_ind, collections.Iterable):
            cl_device_ind = [cl_device_ind]

        cl_environments = None
        if cl_device_ind is not None:
            cl_environments = [get_cl_devices()[ind] for ind in cl_device_ind]

        self._cl_runtime_info = CLRuntimeInfo(
            cl_environments=cl_environments,
            load_balancer=EvenDistribution(),
            double_precision=double_precision)

        if not model.is_input_data_sufficient(self._input_data):
            raise InsufficientProtocolError(
                'The provided protocol is insufficient for this model. '
                'The reported errors where: {}'.format(
                    self._model.get_input_data_problems(self._input_data)))