Esempio n. 1
0
            def __init__(self):
                models = []
                for model_def in template.models:
                    if isinstance(model_def, str):
                        models.append(mdt.get_model(model_def)())
                    else:
                        models.append(mdt.get_model(model_def[0])(model_def[1]))

                super().__init__(deepcopy(template.name), models)
Esempio n. 2
0
            def __init__(self, *args):
                models = []
                for model_def in template.models:
                    if isinstance(model_def, six.string_types):
                        models.append(mdt.get_model(model_def)())
                    else:
                        models.append(
                            mdt.get_model(model_def[0])(model_def[1]))

                new_args = [deepcopy(template.name), models]
                for ind, arg in args:
                    new_args[ind] = arg
                super(AutoCreatedCascadeModel, self).__init__(*new_args)
def prepare_charmed_r1_params(params_cube):
    """Set the primary direction of the Tensor to the first CHARMED Restricted compartment. \
        Sorts the diffusivities of the Tensor
    """
    param_names = mdt.get_model('CHARMED_r1')().get_free_param_names()
    params_cube[..., param_names.index('Tensor.theta')] = params_cube[
        ..., param_names.index('CHARMEDRestricted0.theta')]
    params_cube[..., param_names.index('Tensor.phi')] = params_cube[
        ..., param_names.index('CHARMEDRestricted0.phi')]
    params_cube[:, 1:4] = np.sort(params_cube[:, 1:4], axis=1)[:, ::-1]
def prepare_noddi_params(params_cube):
    """Normalizes the w_ic.w and w_ec.w if > 1, else leaves it as is."""
    param_names = mdt.get_model('NODDI')().get_free_param_names()

    w_ec_w = params_cube[:, param_names.index('w_ec.w')]
    w_ic_w = params_cube[:, param_names.index('w_ic.w')]

    summed = w_ec_w + w_ic_w
    w_ec_w[summed > 1] = (w_ec_w / summed)[summed > 1]
    w_ic_w[summed > 1] = (w_ic_w / summed)[summed > 1]

    params_cube[:, param_names.index('w_ec.w')] = w_ec_w
    params_cube[:, param_names.index('w_ic.w')] = w_ic_w
def prepare_charmed2_params(params_cube):
    """Make sure the weights sum to 1 and sort the Tensor diffusivities"""
    param_names = mdt.get_model('CHARMED_r2')().get_free_param_names()
    params_cube[..., param_names.index('Tensor.theta')] = params_cube[
        ..., param_names.index('CHARMEDRestricted0.theta')]
    params_cube[..., param_names.index('Tensor.phi')] = params_cube[
        ..., param_names.index('CHARMEDRestricted0.phi')]

    weights_sum = np.sum(params_cube[:, [7, 11]], axis=1)
    indices = weights_sum > 1
    params_cube[indices, 7] /= weights_sum[indices]
    params_cube[indices, 11] /= weights_sum[indices]

    params_cube[:, (7, 11)] = np.sort(params_cube[:, (7, 11)], axis=1)[:, ::-1]
    params_cube[:, 1:4] = np.sort(params_cube[:, 1:4], axis=1)[:, ::-1]
def create_simulations(protocol_name, model_name):
    output_pjoin = pjoin.create_extended(protocol_name, model_name)
    if os.path.exists(output_pjoin()):
        return
    else:
        output_pjoin.make_dirs()

    model = mdt.get_model(model_name)(volume_selection=False)
    param_names = model.get_free_param_names()

    model_config = simulations[model_name]

    parameters = create_parameter_combinations(10000, [
        param_names.index(name)
        for name in model_config['randomize_parameters']
    ],
                                               model.get_initial_parameters(),
                                               model_config['lower_bounds'],
                                               model_config['upper_bounds'],
                                               seed=0)

    if model_config['prepare_params_cube_cb'] is not None:
        model_config['prepare_params_cube_cb'](parameters)

    simulated_signals = mdt.simulate_signals(
        model, mdt.load_protocol(pjoin(protocol_name)), parameters)

    mdt.write_nifti(parameters[None, :, None, :],
                    output_pjoin('original_parameters.nii'))
    mdt.write_nifti(simulated_signals[None, :, None, :],
                    output_pjoin('simulated_signals.nii'))

    for snr in noise_snrs:
        noisy_signals = mdt.add_rician_noise(simulated_signals,
                                             unweighted_signal_height / snr,
                                             seed=0)
        mdt.write_nifti(noisy_signals[None, :, None, :],
                        output_pjoin('noisy_signals_{}.nii'.format(snr)))

    mdt.create_blank_mask(
        output_pjoin('noisy_signals_{}.nii'.format(noise_snrs[0])),
        output_pjoin('mask.nii'))
Esempio n. 7
0
def simulate_signals(model_name, protocol, parameters):
    """Generate the signal for the given model for each of the parameters.

    This function only accepts a 2d list of parameters. For a generated parameters cube use function
    simulate_signals_param_cube.

    Args:
        model_name (str): the name of the model we want to generate the values for
        protocol (Protocol): the protocol object we use for generating the signals
        parameters (ndarray): the 2d matrix with the parameters for every problem instance

    Returns:
        signal estimates
    """
    problem_data = MockDMRIProblemData(protocol, None, None, None)

    model = mdt.get_model(model_name)
    model.set_problem_data(problem_data)

    signal_evaluate = CalculateModelEstimates()
    return signal_evaluate.calculate(model, parameters)
Esempio n. 8
0
    def run_model(self):
        model = mdt.get_model(self.modelSelection.currentText())
        protocol = mdt.load_protocol(self.selectedProtocol.text())

        if not model.is_protocol_sufficient(protocol):
            msg = ProtocolWarningBox(model.get_protocol_problems(protocol))
            msg.exec_()
            return

        self._run_model_worker.set_args(
            model,
            mdt.load_problem_data(self.selectedDWI.text(),
                                  self.selectedProtocol.text(),
                                  self.selectedMask.text(),
                                  noise_std=self._optim_options.noise_std),
            self.selectedOutputFolder.text(),
            recalculate=True,
            double_precision=self._optim_options.double_precision,
            only_recalculate_last=not self._optim_options.recalculate_all,
            optimizer=self._optim_options.get_optimizer(),
            save_user_script_info=False)

        self._computations_thread.start()
        self._run_model_worker.moveToThread(self._computations_thread)

        self._run_model_worker.starting.connect(
            self._computations_thread.starting)
        self._run_model_worker.finished.connect(
            self._computations_thread.finished)

        self._run_model_worker.starting.connect(
            lambda: self.runButton.setEnabled(False))
        self._run_model_worker.finished.connect(
            lambda: self.runButton.setEnabled(True))

        self._run_model_worker.finished.connect(
            lambda: self._shared_state.set_output_folder(
                self._get_full_model_output_path()))

        self._run_model_worker.starting.emit()
Esempio n. 9
0
    def run_model(self):
        model = mdt.get_model(self._get_current_model_name())()

        if not model.is_input_data_sufficient(
                self._input_data_info.build_input_data()):
            msg = ProtocolWarningBox(
                model.get_input_data_problems(
                    self._input_data_info.build_input_data()))
            msg.exec_()
            return

        self._run_model_worker.set_args(
            model,
            self._input_data_info.build_input_data(),
            self.selectedOutputFolder.text(),
            recalculate=True,
            double_precision=self._optim_options.double_precision,
            only_recalculate_last=not self._optim_options.recalculate_all,
            method=self._optim_options.method)

        self._computations_thread.start()
        self._run_model_worker.moveToThread(self._computations_thread)

        self._run_model_worker.starting.connect(
            self._computations_thread.starting)
        self._run_model_worker.finished.connect(
            self._computations_thread.finished)

        self._run_model_worker.starting.connect(
            lambda: self.runButton.setEnabled(False))
        self._run_model_worker.finished.connect(
            lambda: self.runButton.setEnabled(True))

        self._run_model_worker.finished.connect(
            lambda: self._shared_state.set_output_folder(
                self._get_full_model_output_path()))

        image_path = split_image_path(self._input_data_info.dwi)
        script_basename = os.path.join(
            image_path[0], 'scripts', 'fit_model_{}_{}'.format(
                self._get_current_model_name().replace('|', '.'),
                image_path[1]))
        if not os.path.isdir(os.path.join(image_path[0], 'scripts')):
            os.makedirs(os.path.join(image_path[0], 'scripts'))

        script_info = dict(
            optim_options=self._optim_options,
            input_data_info=self._input_data_info,
            model=self._get_current_model_name(),
            output_folder=self.selectedOutputFolder.text(),
            recalculate=True,
            double_precision=self._optim_options.double_precision,
            only_recalculate_last=not self._optim_options.recalculate_all)

        self._run_model_worker.finished.connect(
            lambda: self._write_python_script_file(script_basename + '.py', **
                                                   script_info))

        self._run_model_worker.finished.connect(
            lambda: self._write_bash_script_file(script_basename + '.sh', **
                                                 script_info))

        self._run_model_worker.starting.emit()