示例#1
0
    def _reconstruct_slice(self, slice_data, slice_index):
        nmr_timeseries = slice_data.shape[-2]
        nmr_channels = slice_data.shape[-1]

        batch = np.reshape(slice_data, (-1, nmr_timeseries, nmr_channels))

        codec = STARCOptimizationCodec(nmr_channels)

        data = Struct({'observations': Array(batch.reshape((batch.shape[0], -1))),
                       'scratch': LocalMemory('double', nmr_items=batch.shape[1] + 4)},
                      'starc_data')

        wrapper = ParameterDecodingWrapper(nmr_channels)

        result = minimize(wrapper.wrap_objective_function(get_starc_objective_func(batch), codec.get_decode_function()),
                          codec.encode(self._get_starting_weights(slice_index, batch)),
                          data=wrapper.wrap_input_data(data),
                          cl_runtime_info=self.cl_runtime_info)

        weights = codec.decode(result['x'])
        reconstruction = np.sum(batch * weights[:, None, :], axis=2)

        sos = np.sqrt(np.sum(np.abs(slice_data).astype(np.float64) ** 2, axis=-1))
        reconstruction = np.reshape(reconstruction, slice_data.shape[:-2] + (nmr_timeseries,))
        reconstruction *= (np.mean(sos, axis=2) / np.mean(reconstruction, axis=2))[:, :, None]

        return {
            'weights': np.reshape(weights, slice_data.shape[:-2] + (nmr_channels,)),
            'reconstruction': reconstruction,
        }
示例#2
0
def _tau_to_kappa(tau):
    """Using non-linear optimization, convert the NODDI-DTI Tau variables to NODDI kappa's.

    Args:
        tau (ndarray): the list of tau's per voxel.

    Returns:
        ndarray: the list of corresponding kappa's
    """
    tau_func = SimpleCLFunction.from_string('''
        double tau(double kappa){
            if(kappa < 1e-12){
                return 1/3.0;
            }
            return 0.5 * ( 1 / ( sqrt(kappa) * dawson(sqrt(kappa) ) ) - 1/kappa);
        }''',
                                            dependencies=[dawson()])

    objective_func = SimpleCLFunction.from_string('''
        double tau_to_kappa(local const mot_float_type* const x, void* data, local mot_float_type* objective_list){
            return pown(tau(x[0]) - ((_tau_to_kappa_data*)data)->tau, 2); 
        }
    ''',
                                                  dependencies=[tau_func])

    kappa = minimize(objective_func,
                     np.ones_like(tau),
                     data=Struct(
                         {'tau': Array(tau, 'mot_float_type', as_scalar=True)},
                         '_tau_to_kappa_data')).x
    kappa[kappa > 64] = 1
    kappa[kappa < 0] = 1
    return kappa
示例#3
0
 def _get_mcmc_method_kernel_data(self):
     return Struct({
         'x1_position': Array(self._x1, 'mot_float_type', mode='rw'),
         'x1_log_likelihood': Array(self._x1_log_likelihood, 'mot_float_type', mode='rw'),
         'x1_log_prior': Array(self._x1_log_prior, 'mot_float_type', mode='rw'),
         'scratch_mft': LocalMemory('mot_float_type', self._nmr_params + 2),
         'scratch_int': LocalMemory('int', self._nmr_params + 4),
     }, '_twalk_data')
示例#4
0
def _minimize_levenberg_marquardt(func, x0, nmr_observations, cl_runtime_info, lower_bounds, upper_bounds,
                                  use_local_reduction,
                                  constraints_func=None, data=None, options=None):
    options = options or {}
    nmr_problems = x0.shape[0]
    nmr_parameters = x0.shape[1]

    if nmr_observations < x0.shape[1]:
        raise ValueError('The number of instances per problem must be greater than the number of parameters')

    penalty_data, penalty_func = _get_penalty_function(nmr_parameters, constraints_func)

    eval_func = SimpleCLFunction.from_string('''
        void evaluate(local mot_float_type* x, void* data, local mot_float_type* result){
            double penalty = _mle_penalty(
                x,
                ((_lm_eval_func_data*)data)->data,
                ((_lm_eval_func_data*)data)->lower_bounds,
                ((_lm_eval_func_data*)data)->upper_bounds,
                ''' + str(options.get('penalty_weight', 1e30)) + ''',
                ((_lm_eval_func_data*)data)->penalty_data
            );

            ''' + func.get_cl_function_name() + '''(x, ((_lm_eval_func_data*)data)->data, result);

            if(get_local_id(0) == 0){
                for(int j = 0; j < ''' + str(nmr_observations) + '''; j++){
                    result[j] += penalty;
                }
            }
            barrier(CLK_LOCAL_MEM_FENCE);
        }
    ''', dependencies=[func, penalty_func])

    jacobian_func = _lm_numdiff_jacobian(eval_func, nmr_parameters, nmr_observations)

    optimizer_func = LevenbergMarquardt(eval_func, nmr_parameters, nmr_observations, jacobian_func,
                                        **_clean_options('Levenberg-Marquardt', options))

    kernel_data = {'model_parameters': Array(x0, ctype='mot_float_type', mode='rw'),
                   'data': Struct({'data': data,
                                   'lower_bounds': lower_bounds,
                                   'upper_bounds': upper_bounds,
                                   'penalty_data': penalty_data,
                                   'jacobian_x_tmp': LocalMemory('mot_float_type', nmr_observations)
                                   },
                                  '_lm_eval_func_data')}
    kernel_data.update(optimizer_func.get_kernel_data())

    return_code = optimizer_func.evaluate(
        kernel_data, nmr_problems,
        use_local_reduction=use_local_reduction and all(env.is_gpu for env in cl_runtime_info.cl_environments),
        cl_runtime_info=cl_runtime_info)

    return OptimizeResults({'x': kernel_data['model_parameters'].get_data(),
                            'status': return_code})
示例#5
0
    def wrap_input_data(self, input_data):
        """Wrap the input data with extra information this wrapper might need.

        Args:
            input_data (mot.lib.kernel_data.KernelData): the kernel data we will wrap

        Returns:
            mot.lib.kernel_data.KernelData: the wrapped kernel data
        """
        return Struct({'data': input_data, 'x_tmp': LocalMemory('mot_float_type', nmr_items=self._nmr_parameters)},
                      'objective_function_wrapper_data')
示例#6
0
def _minimize_powell(func, x0, cl_runtime_info, lower_bounds, upper_bounds, use_local_reduction,
                     constraints_func=None, data=None, options=None):
    """
    Options:
        patience (int): Used to set the maximum number of iterations to patience*(number_of_parameters+1)
        reset_method (str): one of 'EXTRAPOLATED_POINT' or 'RESET_TO_IDENTITY' lower case or upper case.
        patience_line_search (int): the patience of the searching algorithm. Defaults to the
            same patience as for the Powell algorithm itself.
    """
    options = options or {}
    nmr_problems = x0.shape[0]
    nmr_parameters = x0.shape[1]

    penalty_data, penalty_func = _get_penalty_function(nmr_parameters, constraints_func)

    eval_func = SimpleCLFunction.from_string('''
        double evaluate(local mot_float_type* x, void* data){
            double penalty = _mle_penalty(
                x,
                ((_powell_eval_func_data*)data)->data,
                ((_powell_eval_func_data*)data)->lower_bounds,
                ((_powell_eval_func_data*)data)->upper_bounds,
                ''' + str(options.get('penalty_weight', 1e30)) + ''',
                ((_powell_eval_func_data*)data)->penalty_data
            );

            double func_val = ''' + func.get_cl_function_name() + '''(x, ((_powell_eval_func_data*)data)->data, 0);

            if(isnan(func_val)){
                return INFINITY;
            }

            return func_val + penalty;
        }
    ''', dependencies=[func, penalty_func])

    optimizer_func = Powell(eval_func, nmr_parameters, **_clean_options('Powell', options))

    kernel_data = {'model_parameters': Array(x0, ctype='mot_float_type', mode='rw'),
                   'data': Struct({'data': data,
                                   'lower_bounds': lower_bounds,
                                   'upper_bounds': upper_bounds,
                                   'penalty_data': penalty_data}, '_powell_eval_func_data')}
    kernel_data.update(optimizer_func.get_kernel_data())

    return_code = optimizer_func.evaluate(
        kernel_data, nmr_problems,
        use_local_reduction=use_local_reduction and all(env.is_gpu for env in cl_runtime_info.cl_environments),
        cl_runtime_info=cl_runtime_info,)

    return OptimizeResults({'x': kernel_data['model_parameters'].get_data(),
                            'status': return_code})
示例#7
0
    def get_cache_struct(self, address_space):
        if not self._cache_info:
            return None

        fields = {}
        for dependency in self.get_dependencies():
            if isinstance(dependency, CompartmentModel):
                if dependency.get_cache_struct(address_space):
                    fields.update(dependency.get_cache_struct(address_space))

        struct_name = self._get_cache_parameter().ctype

        if address_space == 'private':
            for ctype, name, nmr_elements in self._cache_info.fields:
                fields[name] = PrivateMemory(nmr_elements, ctype)
        else:
            for ctype, name, nmr_elements in self._cache_info.fields:
                fields[name] = LocalMemory(ctype, nmr_items=nmr_elements)

        return {self.name: Struct(fields, struct_name)}
示例#8
0
def _get_penalty_function(nmr_parameters, constraints_func=None):
    """Get a function to compute the penalty term for the boundary conditions.

    This is meant to be used in the evaluation function of the optimization routines.

    Args:
        nmr_parameters (int): the number of parameters in the model
        constraints_func (mot.optimize.base.ConstraintFunction): function to compute (inequality) constraints.
            Should hold a CL function with the signature:

            .. code-block:: c

                void <func_name>(local const mot_float_type* const x,
                                 void* data,
                                 local mot_float_type* constraint_values);

            Where ``constraints_values`` is filled as:

            .. code-block:: c

                constraint_values[i] = g_i(x)

            That is, for each constraint function :math:`g_i`, formulated as :math:`g_i(x) <= 0`, we should return
            the function value of :math:`g_i`.

    Returns:
        tuple: Struct and SimpleCLFunction, the required data for the penalty function and the penalty function itself.
    """
    dependencies = []
    data_requirements = {'scratch': LocalMemory('double', 1)}
    constraints_code = ''

    if constraints_func and constraints_func.get_nmr_constraints() > 0:
        nmr_constraints = constraints_func.get_nmr_constraints()
        dependencies.append(constraints_func)
        data_requirements['constraints'] = LocalMemory('mot_float_type', nmr_constraints)

        constraints_code = '''
            local mot_float_type* constraints = ((_mle_penalty_data*)scratch_data)->constraints;

            ''' + constraints_func.get_cl_function_name() + '''(x, data, constraints);

            for(int i = 0; i < ''' + str(nmr_constraints) + '''; i++){
                *penalty_sum += pown(max((mot_float_type)0, constraints[i]), 2);
            }
        '''

    data = Struct(data_requirements, '_mle_penalty_data')
    func = SimpleCLFunction.from_string('''
        double _mle_penalty(
                local mot_float_type* x,
                void* data,
                local mot_float_type* lower_bounds,
                local mot_float_type* upper_bounds,
                float penalty_weight,
                void* scratch_data){

            local double* penalty_sum = ((_mle_penalty_data*)scratch_data)->scratch;

            if(get_local_id(0) == 0){
                *penalty_sum = 0;

                // boundary conditions
                for(int i = 0; i < ''' + str(nmr_parameters) + '''; i++){
                    if(isfinite(upper_bounds[i])){
                        *penalty_sum += pown(max((mot_float_type)0, x[i] - upper_bounds[i]), 2);
                    }
                    if(isfinite(lower_bounds[i])){
                        *penalty_sum += pown(max((mot_float_type)0, lower_bounds[i] - x[i]), 2);
                    }
                }
            }
            barrier(CLK_LOCAL_MEM_FENCE);

            // constraints
            ''' + constraints_code + '''

            return penalty_weight * *penalty_sum;
        }
    ''', dependencies=dependencies)
    return data, func
示例#9
0
def _minimize_subplex(func, x0, cl_runtime_info, lower_bounds, upper_bounds, use_local_reduction,
                      constraints_func=None, data=None, options=None):
    """Variation on the Nelder-Mead Simplex method by Thomas H. Rowan.

    This method uses NMSimplex to search subspace regions for the minimum. See Rowan's thesis titled
    "Functional Stability analysis of numerical algorithms" for more details.

     The scales should satisfy the following constraints:

        .. code-block:: python

            alpha > 0
            0 < beta < 1
            gamma > 1
            gamma > alpha
            0 < delta < 1

    Options:
        patience (int): Used to set the maximum number of iterations to patience*(number_of_parameters+1)
        patience_nmsimplex (int): The maximum patience for each subspace search.
            For each subspace search we set the number of iterations to patience*(number_of_parameters_subspace+1)
        scale (double): the scale of the initial simplex, default 1.0
        alpha (double): reflection coefficient, default 1.0
        beta (double): contraction coefficient, default 0.5
        gamma (double); expansion coefficient, default 2.0
        delta (double); shrinkage coefficient, default 0.5
        psi (double): subplex specific, simplex reduction coefficient, default 0.001.
        omega (double): subplex specific, scaling reduction coefficient, default 0.01
        min_subspace_length (int): the minimum subspace length, defaults to min(2, n).
            This should hold: (1 <= min_s_d <= max_s_d <= n and min_s_d*ceil(n/nsmax_s_dmax) <= n)
        max_subspace_length (int): the maximum subspace length, defaults to min(5, n).
            This should hold: (1 <= min_s_d <= max_s_d <= n and min_s_d*ceil(n/max_s_d) <= n)

        adaptive_scales (boolean): if set to True we use adaptive scales instead of the default scale values.
            This sets the scales to:

            .. code-block:: python

                n = <# parameters>

                alpha = 1
                beta  = 0.75 - 1.0 / (2 * n)
                gamma = 1 + 2.0 / n
                delta = 1 - 1.0 / n

    References:
        [1] Gao F, Han L. Implementing the Nelder-Mead simplex algorithm with adaptive parameters.
              Comput Optim Appl. 2012;51(1):259-277. doi:10.1007/s10589-010-9329-3.
    """
    options = options or {}
    nmr_problems = x0.shape[0]
    nmr_parameters = x0.shape[1]

    penalty_data, penalty_func = _get_penalty_function(nmr_parameters, constraints_func)

    eval_func = SimpleCLFunction.from_string('''
        double evaluate(local mot_float_type* x, void* data){
            double penalty = _mle_penalty(
                x,
                ((_subplex_eval_func_data*)data)->data,
                ((_subplex_eval_func_data*)data)->lower_bounds,
                ((_subplex_eval_func_data*)data)->upper_bounds,
                ''' + str(options.get('penalty_weight', 1e30)) + ''',
                ((_subplex_eval_func_data*)data)->penalty_data
            );

            double func_val = ''' + func.get_cl_function_name() + '''(x, ((_subplex_eval_func_data*)data)->data, 0);

            if(isnan(func_val)){
                return INFINITY;
            }

            return func_val + penalty;
        }
    ''', dependencies=[func, penalty_func])

    optimizer_func = Subplex(eval_func, nmr_parameters, **_clean_options('Subplex', options))

    kernel_data = {'model_parameters': Array(x0, ctype='mot_float_type', mode='rw'),
                   'data': Struct({'data': data,
                                   'lower_bounds': lower_bounds,
                                   'upper_bounds': upper_bounds,
                                   'penalty_data': penalty_data}, '_subplex_eval_func_data')}
    kernel_data.update(optimizer_func.get_kernel_data())

    return_code = optimizer_func.evaluate(
        kernel_data, nmr_problems,
        use_local_reduction=use_local_reduction and all(env.is_gpu for env in cl_runtime_info.cl_environments),
        cl_runtime_info=cl_runtime_info)

    return OptimizeResults({'x': kernel_data['model_parameters'].get_data(),
                            'status': return_code})
示例#10
0
def _minimize_nmsimplex(func, x0, cl_runtime_info, lower_bounds, upper_bounds, use_local_reduction,
                        constraints_func=None, data=None, options=None):
    """Use the Nelder-Mead simplex method to calculate the optimimum.

    The scales should satisfy the following constraints:

        .. code-block:: python

            alpha > 0
            0 < beta < 1
            gamma > 1
            gamma > alpha
            0 < delta < 1

    Options:
        patience (int): Used to set the maximum number of iterations to patience*(number_of_parameters+1)
        scale (double): the scale of the initial simplex, default 1.0
        alpha (double): reflection coefficient, default 1.0
        beta (double): contraction coefficient, default 0.5
        gamma (double); expansion coefficient, default 2.0
        delta (double); shrinkage coefficient, default 0.5
        adaptive_scales (boolean): if set to True we use adaptive scales instead of the default scale values.
            This sets the scales to:

            .. code-block:: python

                n = <# parameters>

                alpha = 1
                beta  = 0.75 - 1.0 / (2 * n)
                gamma = 1 + 2.0 / n
                delta = 1 - 1.0 / n

            Following the paper [1]

    References:
        [1] Gao F, Han L. Implementing the Nelder-Mead simplex algorithm with adaptive parameters.
              Comput Optim Appl. 2012;51(1):259-277. doi:10.1007/s10589-010-9329-3.
    """
    options = options or {}
    nmr_problems = x0.shape[0]
    nmr_parameters = x0.shape[1]

    penalty_data, penalty_func = _get_penalty_function(nmr_parameters, constraints_func)

    eval_func = SimpleCLFunction.from_string('''
        double evaluate(local mot_float_type* x, void* data){
            double penalty = _mle_penalty(
                x,
                ((_nmsimplex_eval_func_data*)data)->data,
                ((_nmsimplex_eval_func_data*)data)->lower_bounds,
                ((_nmsimplex_eval_func_data*)data)->upper_bounds,
                ''' + str(options.get('penalty_weight', 1e30)) + ''',
                ((_nmsimplex_eval_func_data*)data)->penalty_data
            );

            double func_val = ''' + func.get_cl_function_name() + '''(x, ((_nmsimplex_eval_func_data*)data)->data, 0);

            if(isnan(func_val)){
                return INFINITY;
            }

            return func_val + penalty;
        }
    ''', dependencies=[func, penalty_func])

    optimizer_func = NMSimplex('evaluate', nmr_parameters, dependencies=[eval_func],
                               **_clean_options('Nelder-Mead', options))

    kernel_data = {'model_parameters': Array(x0, ctype='mot_float_type', mode='rw'),
                   'data': Struct({'data': data,
                                   'lower_bounds': lower_bounds,
                                   'upper_bounds': upper_bounds,
                                   'penalty_data': penalty_data}, '_nmsimplex_eval_func_data')}
    kernel_data.update(optimizer_func.get_kernel_data())

    return_code = optimizer_func.evaluate(
        kernel_data, nmr_problems,
        use_local_reduction=use_local_reduction and all(env.is_gpu for env in cl_runtime_info.cl_environments),
        cl_runtime_info=cl_runtime_info)

    return OptimizeResults({'x': kernel_data['model_parameters'].get_data(),
                            'status': return_code})
示例#11
0
文件: base.py 项目: 42n4/MOT
 def _get_mcmc_method_kernel_data(self):
     return Struct(self._get_mcmc_method_kernel_data_elements(),
                   '_mcmc_method_data')
示例#12
0
    be perfect for every simulated distribution. In general though, fit results should match the ground truth.
    """
    # The number of unique distributions, this is typically very large
    nmr_simulations = 1000

    # How many data points per distribution, this is typically small
    nmr_datapoints = 25

    # generate a range of parameters, basically the ground truth
    shape = np.random.uniform(0.1, 10, nmr_simulations)
    scale = np.random.uniform(0.1, 5, nmr_simulations)

    # generate some random locations on those simulated distributions
    gamma_random = np.zeros((nmr_simulations, nmr_datapoints))
    for i in range(nmr_datapoints):
        gamma_random[:, i] = np.random.gamma(shape, scale)

    # The optimization starting points for shape and scale
    x0 = np.ones((nmr_simulations, 2))

    # Minimize the parameters of the model given the starting points.
    opt_output = minimize(get_objective_function(nmr_datapoints),
                          x0,
                          data=Struct({'gamma_random': Array(gamma_random)},
                                      'optimization_data'))

    # Print the output
    print(np.column_stack([shape, scale]))
    print(opt_output.x)
    print(np.abs(opt_output.x - np.column_stack([shape, scale])))
示例#13
0
    For more information, see http://matatat.org/sampyl/examples/german_tank_problem.html or
    https://en.wikipedia.org/wiki/German_tank_problem
    """

    # The number of problems
    nmr_problems = 10000

    # The data we would like to use
    observations, nmr_tanks_ground_truth = get_historical_data(nmr_problems)
    # observations, nmr_tanks_ground_truth = get_simulated_data(nmr_problems)

    ## Sample ##
    # The additional data we need
    kernel_data = Struct(
        {
            'observed_tanks': Array(observations, 'uint'),
            'lower_bounds': Array(np.max(observations, axis=1), 'uint'),
            'upper_bounds': Array(np.ones((nmr_problems, )) * 1000, 'uint')
        }, '_model_data')

    # Create an instance of the sample routine we want to use.
    sampler = AdaptiveMetropolisWithinGibbs(
        get_log_likelihood_function(observations.shape[1]),
        get_log_prior_function(),
        np.max(observations, axis=1),  # starting position
        np.ones(nmr_problems) * 10,  # initial proposal standard deviations
        data=kernel_data)

    # Sample each instance
    sampling_output = sampler.sample(10000, thinning=1, burnin=0)

    # Obtain the samples