def _get_kernel_data(self, nmr_samples, thinning, return_output): """Get the kernel data we will input to the MCMC sampler. This sets the items: * data: the pointer to the user provided data * method_data: the data specific to the MCMC method * nmr_iterations: the number of iterations to sample * iteration_offset: the current sample index, that is, the offset to the given number of iterations * rng_state: the random number generator state * current_chain_position: the current position of the sampled chain * current_log_likelihood: the log likelihood of the current position on the chain * current_log_prior: the log prior of the current position on the chain Additionally, if ``return_output`` is True, we add to that the arrays: * samples: for the samples * log_likelihoods: for storing the log likelihoods * log_priors: for storing the priors Args: nmr_samples (int): the number of samples we will draw thinning (int): the thinning factor we want to use return_output (boolean): if the kernel should return output Returns: dict[str: mot.lib.utils.KernelData]: the kernel input data """ kernel_data = { 'data': self._data, 'method_data': self._get_mcmc_method_kernel_data(), 'nmr_iterations': Scalar(nmr_samples * thinning, ctype='ulong'), 'iteration_offset': Scalar(self._sampling_index, ctype='ulong'), 'rng_state': Array(self._rng_state, 'uint', mode='rw'), 'current_chain_position': Array(self._current_chain_position, 'mot_float_type', mode='rw'), 'current_log_likelihood': Array(self._current_log_likelihood, 'mot_float_type', mode='rw'), 'current_log_prior': Array(self._current_log_prior, 'mot_float_type', mode='rw'), } if return_output: kernel_data.update({ 'samples': Zeros((self._nmr_problems, self._nmr_params, nmr_samples), ctype='mot_float_type'), 'log_likelihoods': Zeros((self._nmr_problems, nmr_samples), ctype='mot_float_type'), 'log_priors': Zeros((self._nmr_problems, nmr_samples), ctype='mot_float_type'), }) return kernel_data
def _wynn_extrapolate(derivatives): nmr_problems, nmr_derivatives, nmr_steps = derivatives.shape nmr_extrapolations = nmr_steps - 2 kernel_data = { 'derivatives': Array(derivatives, 'double', offset_str='{problem_id} * ' + str(nmr_steps)), 'extrapolations': Zeros((nmr_problems * nmr_derivatives, nmr_extrapolations), 'double', mode='rw'), 'errors': Zeros((nmr_problems * nmr_derivatives, nmr_extrapolations), 'double', mode='rw'), } wynn_func = _wynn_extrapolation_kernel(nmr_steps) wynn_func.evaluate(kernel_data, nmr_problems * nmr_derivatives, use_local_reduction=False) extrapolations = np.reshape( kernel_data['extrapolations'].get_data(), (nmr_problems, nmr_derivatives, nmr_extrapolations)) errors = np.reshape(kernel_data['errors'].get_data(), (nmr_problems, nmr_derivatives, nmr_extrapolations)) return extrapolations, errors
def extra_optimization_maps(parameters_dict): """Calculate DKI statistics like the mean, axial and radial kurtosis. The Mean Kurtosis (MK) is calculated by averaging the Kurtosis over orientations on the unit sphere. The Axial Kurtosis (AK) is obtained using the principal direction of diffusion (fe; first eigenvec) from the Tensor as its direction and then averaging the Kurtosis over +fe and -fe. Finally, the Radial Kurtosis (RK) is calculated by averaging the Kurtosis over a circle of directions around the first eigenvec. Args: parameters_dict (dict): the fitted Kurtosis parameters, this requires a dictionary with at least the elements: 'd', 'dperp0', 'dperp1', 'theta', 'phi', 'psi', 'W_0000', 'W_1000', 'W_1100', 'W_1110', 'W_1111', 'W_2000', 'W_2100', 'W_2110', 'W_2111', 'W_2200', 'W_2210', 'W_2211', 'W_2220', 'W_2221', 'W_2222'. Returns: dict: maps for the Mean Kurtosis (MK), Axial Kurtosis (AK) and Radial Kurtosis (RK). """ param_names = [ 'd', 'dperp0', 'dperp1', 'theta', 'phi', 'psi', 'W_0000', 'W_1000', 'W_1100', 'W_1110', 'W_1111', 'W_2000', 'W_2100', 'W_2110', 'W_2111', 'W_2200', 'W_2210', 'W_2211', 'W_2220', 'W_2221', 'W_2222' ] parameters = np.column_stack([parameters_dict[n] for n in param_names]) nmr_voxels = parameters.shape[0] kernel_data = { 'parameters': Array(parameters, ctype='mot_float_type'), 'directions': Array(DKIMeasures._get_spherical_samples(), ctype='float4', offset_str='0'), 'nmr_directions': Scalar(DKIMeasures._get_spherical_samples().shape[0]), 'nmr_radial_directions': Scalar(256), 'mks': Zeros((nmr_voxels, ), ctype='float'), 'aks': Zeros((nmr_voxels, ), ctype='float'), 'rks': Zeros((nmr_voxels, ), ctype='float') } DKIMeasures._get_compute_function(param_names).evaluate( kernel_data, nmr_voxels) return { 'MK': kernel_data['mks'].get_data(), 'AK': kernel_data['aks'].get_data(), 'RK': kernel_data['rks'].get_data() }
def run_cl(samples): data = { 'samples': Array(samples, 'mot_float_type'), 'means': Zeros(samples.shape[0], 'mot_float_type'), 'stds': Zeros(samples.shape[0], 'mot_float_type'), 'nmr_samples': Scalar(samples.shape[1]), 'low': Scalar(low), 'high': Scalar(high), } cl_func.evaluate(data, samples.shape[0]) return data['means'].get_data(), data['stds'].get_data()
def _richardson_extrapolation(derivatives, step_ratio): """Apply the Richardson extrapolation to the derivatives computed with different steps. Having for every problem instance and every Hessian element multiple derivatives computed with decreasing steps, we can now apply Richardson extrapolation to reduce the error term from :math:`\mathcal{O}(h^{2})` to :math:`\mathcal{O}(h^{4})` or :math:`\mathcal{O}(h^{6})` depending on how many steps we have calculated. This method only considers extrapolation up to the sixth error order. For a set of two derivatives we compute a single fourth order approximation, for three derivatives and up we compute ``n-2`` sixth order approximations. Expected errors for approximation ``i`` are computed using the ``i+1`` derivative plus a statistical error based on the machine precision. Args: derivatives (ndarray): (n, p, s), a matrix with for n problems and p parameters, s step sizes. step_ratio (ndarray): the diminishing ratio of the steps used to compute the derivatives. """ nmr_problems, nmr_derivatives, nmr_steps = derivatives.shape richardson_coefficients = _get_richardson_coefficients( step_ratio, min(nmr_steps, 3) - 1) nmr_convolutions_needed = nmr_steps - (len(richardson_coefficients) - 2) final_nmr_convolutions = nmr_convolutions_needed - 1 kernel_data = { 'derivatives': Array(derivatives, 'double', offset_str='{problem_id} * ' + str(nmr_steps)), 'richardson_extrapolations': Zeros((nmr_problems * nmr_derivatives, nmr_convolutions_needed), 'double', mode='rw'), 'errors': Zeros((nmr_problems * nmr_derivatives, final_nmr_convolutions), 'double', mode='rw'), } richardson_func = _richardson_error_kernel(nmr_steps, nmr_convolutions_needed, richardson_coefficients) richardson_func.evaluate(kernel_data, nmr_problems * nmr_derivatives, use_local_reduction=False) richardson_extrapolations = np.reshape( kernel_data['richardson_extrapolations'].get_data(), (nmr_problems, nmr_derivatives, nmr_convolutions_needed)) errors = np.reshape( kernel_data['errors'].get_data(), (nmr_problems, nmr_derivatives, final_nmr_convolutions)) return richardson_extrapolations[..., :final_nmr_convolutions], errors
def _get_signal_estimates(self, roi_indices): self._model.set_input_data(self._input_data, suppress_warnings=True) parameters = self._x_opt_array[roi_indices] kernel_data = { 'data': self._model.get_kernel_data().get_subset(roi_indices), 'parameters': Array(parameters, ctype='mot_float_type'), 'estimates': Zeros((parameters.shape[0], self._model.get_nmr_observations()), 'mot_float_type') } eval_function_info = self._model.get_model_eval_function() simulate_function = SimpleCLFunction.from_string( ''' void simulate(void* data, local mot_float_type* parameters, global mot_float_type* estimates){ for(uint i = 0; i < ''' + str(self._model.get_nmr_observations()) + '''; i++){ estimates[i] = ''' + eval_function_info.get_cl_function_name() + '''(data, parameters, i); } } ''', dependencies=[eval_function_info]) simulate_function.evaluate(kernel_data, parameters.shape[0]) return kernel_data['estimates'].get_data()
def _minimize_levenberg_marquardt(func, x0, nmr_observations, cl_runtime_info, data=None, options=None): options = _clean_options('Levenberg-Marquardt', options) nmr_problems = x0.shape[0] nmr_parameters = x0.shape[1] if nmr_observations < x0.shape[1]: raise ValueError('The number of instances per problem must be greater than the number of parameters') kernel_data = {'model_parameters': Array(x0, ctype='mot_float_type', mode='rw'), 'data': data, 'fjac': Zeros((nmr_problems, nmr_parameters, nmr_observations), ctype='mot_float_type', mode='rw')} eval_func = SimpleCLFunction.from_string(''' void evaluate(local mot_float_type* x, void* data, local mot_float_type* result){ ''' + func.get_cl_function_name() + '''(x, data, result); } ''', dependencies=[func]) optimizer_func = LevenbergMarquardt(eval_func, nmr_parameters, nmr_observations, jacobian_func=None, **options) return_code = optimizer_func.evaluate( kernel_data, nmr_problems, use_local_reduction=all(env.is_gpu for env in cl_runtime_info.get_cl_environments()), cl_runtime_info=cl_runtime_info) return OptimizeResults({'x': kernel_data['model_parameters'].get_data(), 'status': return_code})
def get_kernel_wrapped(self, input_data, nmr_instances, kernel_name=None): if self.is_kernel_func(): return self input_data = convert_inputs_to_kernel_data(input_data, self.get_parameters(), nmr_instances) kernel_name = kernel_name or 'kernel_' + self.get_cl_function_name() variable_inits = [] function_call_inputs = [] post_function_callbacks = [] for parameter in self.get_parameters(): data = input_data[parameter.name] call_args = (parameter.name, '_' + parameter.name, 'gid', parameter.address_space) variable_inits.append(data.initialize_variable(*call_args)) function_call_inputs.append( data.get_function_call_input(*call_args)) post_function_callbacks.append( data.post_function_callback(*call_args)) parameter_list = [] for name, data in input_data.items(): parameter_list.extend(data.get_kernel_parameters('_' + name)) assignment = '' extra_data = {} if self.get_return_type() != 'void': assignment = '__return_values[gid] = ' extra_data = { '__return_values': Zeros((nmr_instances, ), self.get_return_type()) } parameter_list.extend( extra_data['__return_values'].get_kernel_parameters( '__return_values')) cl_body = ''' ulong gid = (ulong)(get_global_id(0) / get_local_size(0)); ''' + '\n'.join(variable_inits) + ''' ''' + assignment + ' ' + self.get_cl_function_name( ) + '(' + ', '.join(function_call_inputs) + '''); ''' + '\n'.join(post_function_callbacks) + ''' ''' func = SimpleCLFunction('void', kernel_name, parameter_list, cl_body, dependencies=[self], is_kernel_func=True) return func, extra_data
def apply_cl_function(cl_function, kernel_data, nmr_instances, use_local_reduction=False, cl_runtime_info=None): """Run the given function/procedure on the given set of data. This class will wrap the given CL function in a kernel call and execute that that for every data instance using the provided kernel data. This class will respect the read write setting of the kernel data elements such that output can be written back to the according kernel data elements. Args: cl_function (mot.lib.cl_function.CLFunction): the function to run on the datasets. Either a name function tuple or an actual CLFunction object. kernel_data (dict[str: mot.lib.kernel_data.KernelData]): the data to use as input to the function. nmr_instances (int): the number of parallel threads to run (used as ``global_size``) use_local_reduction (boolean): set this to True if you want to use local memory reduction in your CL procedure. If this is set to True we will multiply the global size (given by the nmr_instances) by the work group sizes. cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information """ cl_runtime_info = cl_runtime_info or CLRuntimeInfo() for param in cl_function.get_parameters(): if param.name not in kernel_data: names = [param.name for param in cl_function.get_parameters()] missing_names = [name for name in names if name not in kernel_data] raise ValueError( 'Some parameters are missing an input value, ' 'required parameters are: {}, missing inputs are: {}'.format( names, missing_names)) if cl_function.get_return_type() != 'void': kernel_data['_results'] = Zeros((nmr_instances, ), cl_function.get_return_type()) workers = [] for cl_environment in cl_runtime_info.get_cl_environments(): workers.append( _ProcedureWorker(cl_environment, cl_runtime_info.get_compile_flags(), cl_function, kernel_data, cl_runtime_info.double_precision, use_local_reduction)) cl_runtime_info.load_balancer.process(workers, nmr_instances) if cl_function.get_return_type() != 'void': return kernel_data['_results'].get_data()
def create_signal_estimates(model, input_data, parameters): """Create the signals estimates for your estimated model parameters. This function is typically used to obtain signal estimates from optimization results. This function evaluates the model as it is in the model fitting and sample. That is, this method includes the gradient deviations (if set in the input data) and loads all static and fixed parameters maps. Args: model (str or model): the model or the name of the model to use for estimating the signals input_data (mdt.utils.MRIInputData): the input data object, we will set this to the model parameters (str or dict): either a directory file name or a dictionary containing optimization results Each element is assumed to be a 4d volume with the voxels we are using for the simulations. Returns: ndarray: the 4d array with the signal estimates per voxel """ if isinstance(model, str): model = get_model(model)() model.set_input_data(input_data) build_model = model.build() if isinstance(parameters, str): parameters = get_all_nifti_data(parameters) parameters = create_roi(parameters, input_data.mask) parameters = model.param_dict_to_array(parameters) kernel_data = { 'data': build_model.get_kernel_data(), 'parameters': Array(parameters, ctype='mot_float_type'), 'estimates': Zeros((parameters.shape[0], build_model.get_nmr_observations()), 'mot_float_type') } _get_simulate_function(build_model).evaluate(kernel_data, parameters.shape[0]) results = kernel_data['estimates'].get_data() return restore_volumes(results, input_data.mask)
def _generate_samples(cl_function, nmr_distributions, nmr_samples, ctype, kernel_data, seed=None): np.random.seed(seed) rng_seed = np.random.uniform(low=np.iinfo(np.uint32).min, high=np.iinfo(np.uint32).max + 1, size=(nmr_distributions, 1)).astype(np.uint32) kernel_data.update({ 'samples': Zeros((nmr_distributions, nmr_samples), ctype), 'rng_seed': Array(rng_seed, 'uint', mode='r', as_scalar=True) }) cl_function.evaluate(kernel_data, nmr_distributions) return kernel_data['samples'].get_data()
def simulate_signals(model, protocol, parameters): """Estimate the signals of a given model for the given combination of protocol and parameters. In contrast to the function :func:`create_signal_estimates`, this function does not incorporate the gradient deviations. Furthermore, this function expects a two dimensional list of parameters and this function will simply evaluate the model for each set of parameters. Args: model (str or model): the model or the name of the model to use for estimating the signals protocol (mdt.protocols.Protocol): the protocol we will use for the signal simulation parameters (dict or ndarray): the parameters for which to simulate the signal. It can either be a matrix with for every row every model parameter, or a dictionary with for every parameter a 1d array. Returns: ndarray: a 2d array with for every parameter combination the simulated model signal """ if isinstance(model, str): model = get_model(model)() model.set_input_data(MockMRIInputData(protocol=protocol)) build_model = model.build() if isinstance(parameters, collections.Mapping): parameters = model.param_dict_to_array(parameters) nmr_problems = parameters.shape[0] kernel_data = { 'data': build_model.get_kernel_data(), 'parameters': Array(parameters, ctype='mot_float_type'), 'estimates': Zeros((nmr_problems, build_model.get_nmr_observations()), 'mot_float_type') } _get_simulate_function(build_model).evaluate(kernel_data, nmr_problems) return kernel_data['estimates'].get_data()
def estimate_hessian(objective_func, parameters, lower_bounds=None, upper_bounds=None, step_ratio=2, nmr_steps=5, max_step_sizes=None, data=None, cl_runtime_info=None): """Estimate and return the upper triangular elements of the Hessian of the given function at the given parameters. This calculates the Hessian using central difference (using a 2nd order Taylor expansion) with a Richardson extrapolation over the proposed sequence of steps. If enough steps are given, we apply a Wynn epsilon extrapolation on top of the Richardson extrapolated results. If more steps are left, we return the estimate with the lowest error, taking into account outliers using a median filter. The Hessian is evaluated at the steps: .. math:: \quad ((f(x + d_j e_j + d_k e_k) - f(x + d_j e_j - d_k e_k)) - (f(x - d_j e_j + d_k e_k) - f(x - d_j e_j - d_k e_k)) / (4 d_j d_k) where :math:`e_j` is a vector where element :math:`j` is one and the rest are zero and :math:`d_j` is a scalar spacing :math:`steps_j`. Steps are generated according to an exponentially diminishing ratio, defined as: steps = max_step * step_ratio**-i, i = 0,1,..,nmr_steps-1. Where the maximum step can be provided. For example, a maximum step of 2 with a step ratio of 2, computed for 4 steps gives: [2.0, 1.0, 0.5, 0.25]. If lower and upper bounds are given, we use as maximum step size the largest step size that fits between the Hessian point and the boundaries. The steps define the order of the estimation, with 2 steps resulting in a O(h^2) estimate, 3 steps resulting in a O(h^4) estimate and 4 or more steps resulting in a O(h^6) derivative estimate. Args: objective_func (mot.lib.cl_function.CLFunction): The function we want to differentiate. A CL function with the signature: .. code-block:: c double <func_name>(local const mot_float_type* const x, void* data); The objective function has the same signature as the minimization function in MOT. For the numerical hessian, the ``objective_list`` parameter is ignored. parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems, and p parameters lower_bounds (tuple or list or None): a list of length (p,) for p parameters with the lower bounds. Each element of the list can be a scalar or a vector (of the same length as the number of problem instances). To disable bounds for this parameter use -np.inf. upper_bounds (tuple or list or None): a list of length (p,) for p parameters with the upper bounds. Each element of the list can be a scalar or a vector (of the same length as the number of problem instances). To disable bounds for this parameter use np.inf. step_ratio (float): the ratio at which the steps diminish. nmr_steps (int): the number of steps we will generate. We will calculate the derivative for each of these step sizes and extrapolate the best step size from among them. The minimum number of steps is 1. max_step_sizes (float or ndarray or None): the maximum step size, or the maximum step size per parameter. If None is given, we use 0.1 for all parameters. If a float is given, we use that for all parameters. If a list is given, it should be of the same length as the number of parameters. data (mot.lib.kernel_data.KernelData): the user provided data for the ``void* data`` pointer. cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information Returns: ndarray: per problem instance a vector with the upper triangular elements of the Hessian matrix. This array can hold NaN's, for elements where the Hessian failed to approximate. """ if len(parameters.shape) == 1: parameters = parameters[None, :] nmr_voxels = parameters.shape[0] nmr_params = parameters.shape[1] nmr_derivatives = nmr_params * (nmr_params + 1) // 2 initial_step = _get_initial_step(parameters, lower_bounds, upper_bounds, max_step_sizes) kernel_data = { 'parameters': Array(parameters, ctype='mot_float_type', mode='r', use_host_ptr=False), 'initial_step': Array(initial_step, ctype='float', mode='r'), 'derivatives': Zeros((nmr_voxels, nmr_derivatives), 'double'), 'errors': Zeros((nmr_voxels, nmr_derivatives), 'double'), 'x_tmp': LocalMemory('mot_float_type', nmr_params), 'data': data, 'scratch': LocalMemory('double', nmr_steps + (nmr_steps - 1) + nmr_steps) } hessian_kernel = SimpleCLFunction.from_string( ''' void _numdiff_hessian( global mot_float_type* parameters, global float* initial_step, global double* derivatives, global double* errors, local mot_float_type* x_tmp, void* data, local double* scratch){ if(get_local_id(0) == 0){ for(uint i = 0; i < ''' + str(nmr_params) + '''; i++){ x_tmp[i] = parameters[i]; } } barrier(CLK_LOCAL_MEM_FENCE); double f_x_input = ''' + objective_func.get_cl_function_name() + '''(x_tmp, data); // upper triangle loop uint coord_ind = 0; for(int i = 0; i < ''' + str(nmr_params) + '''; i++){ for(int j = i; j < ''' + str(nmr_params) + '''; j++){ _numdiff_hessian_element( data, x_tmp, f_x_input, i, j, initial_step, derivatives + coord_ind, errors + coord_ind, scratch); coord_ind++; } } } ''', dependencies=[ objective_func, _get_numdiff_hessian_element_func(objective_func, nmr_steps, step_ratio) ]) hessian_kernel.evaluate(kernel_data, nmr_voxels, use_local_reduction=True, cl_runtime_info=cl_runtime_info) return kernel_data['derivatives'].get_data()
def compute_log_likelihood(ll_func, parameters, data=None, cl_runtime_info=None): """Calculate and return the log likelihood of the given model for the given parameters. This calculates the log likelihoods for every problem in the model (typically after optimization), or a log likelihood for every sample of every model (typically after sample). In the case of the first (after optimization), the parameters must be an (d, p) array for d problems and p parameters. In the case of the second (after sample), you must provide this function with a matrix of shape (d, p, n) with d problems, p parameters and n samples. Args: ll_func (mot.lib.cl_function.CLFunction): The log-likelihood function. A CL function with the signature: .. code-block:: c double <func_name>(local const mot_float_type* const x, void* data); parameters (ndarray): The parameters to use in the evaluation of the model. This is either an (d, p) matrix or (d, p, n) matrix with d problems, p parameters and n samples. data (mot.lib.kernel_data.KernelData): the user provided data for the ``void* data`` pointer. cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information Returns: ndarray: per problem the log likelihood, or, per problem and per sample the log likelihood. """ def get_cl_function(): nmr_params = parameters.shape[1] if len(parameters.shape) > 2: return SimpleCLFunction.from_string( ''' void compute(global mot_float_type* parameters, global mot_float_type* log_likelihoods, void* data){ local mot_float_type x[''' + str(nmr_params) + ''']; for(uint sample_ind = 0; sample_ind < ''' + str(parameters.shape[2]) + '''; sample_ind++){ for(uint i = 0; i < ''' + str(nmr_params) + '''; i++){ x[i] = parameters[i *''' + str(parameters.shape[2]) + ''' + sample_ind]; } double ll = ''' + ll_func.get_cl_function_name() + '''(x, data); if(get_local_id(0) == 0){ *(log_likelihoods) = ll; } } } ''', dependencies=[ll_func]) return SimpleCLFunction.from_string(''' void compute(local mot_float_type* parameters, global mot_float_type* log_likelihoods, void* data){ double ll = ''' + ll_func.get_cl_function_name() + '''(parameters, data); if(get_local_id(0) == 0){ *(log_likelihoods) = ll; } } ''', dependencies=[ll_func]) kernel_data = { 'data': data, 'parameters': Array(parameters, 'mot_float_type', mode='r') } shape = parameters.shape if len(shape) > 2: kernel_data.update({ 'log_likelihoods': Zeros((shape[0], shape[2]), 'mot_float_type'), }) else: kernel_data.update({ 'log_likelihoods': Zeros((shape[0], ), 'mot_float_type'), }) get_cl_function().evaluate(kernel_data, parameters.shape[0], use_local_reduction=True, cl_runtime_info=cl_runtime_info) return kernel_data['log_likelihoods'].get_data()
def _compute_derivatives(objective_func, parameters, step_ratio, step_offset, nmr_steps, lower_bounds, upper_bounds, max_step_sizes, scaling_factors, data=None, parameter_transform_func=None): """Compute the lower triangular elements of the Hessian using the central difference method. This will compute the elements of the Hessian multiple times with decreasing step sizes. Args: model: the log likelihood model we are trying to differentiate parameters (ndarray): a (n, p) matrix with for for every problem n, p parameters. These are the points at which we want to calculate the derivative step_ratio (float): the ratio at which the steps exponentially diminish step_offset (int): ignore the first few step sizes by this offset nmr_steps (int): the number of steps to compute and return (after the step offset) lower_bounds (list): lower bounds upper_bounds (list): upper bounds max_step_sizes (ndarray): the maximum step sizes per parameter scaling_factors (ndarray): per estimable parameter a single float with the parameter scaling for that parameter. Use 1 as identity. data (mot.lib.kernel_data.KernelData): the user provided data for the ``void* data`` pointer. parameter_transform_func (mot.lib.cl_function.CLFunction): A transformation that can prepare the parameter plus/ minus the proposed step before evaluation. Signature: .. code-block:: c void <func_name>(void* data, local mot_float_type* x); """ nmr_params = parameters.shape[1] nmr_derivatives = (nmr_params**2 - nmr_params) // 2 + nmr_params if parameter_transform_func is None: parameter_transform_func = SimpleCLFunction.from_string( 'void voidTransform(void* data, local mot_float_type* x){}') initial_step = _get_initial_step_size(parameters, lower_bounds, upper_bounds, max_step_sizes, scaling_factors) if step_offset: initial_step *= float(step_ratio)**-step_offset kernel_data = { 'data': data, 'parameters': Array(parameters, ctype='mot_float_type'), 'parameter_scalings_inv': Array(1. / scaling_factors, ctype='float', offset_str='0'), 'initial_step': Array(initial_step, ctype='float'), 'step_evaluates': Zeros((parameters.shape[0], nmr_derivatives, nmr_steps), 'double'), } _derivation_kernel(objective_func, nmr_params, nmr_steps, step_ratio, parameter_transform_func).evaluate( kernel_data, parameters.shape[0], use_local_reduction=True) return kernel_data['step_evaluates'].get_data()