def _get_mcmc_method_kernel_data(self): return Struct({ 'x1_position': Array(self._x1, 'mot_float_type', mode='rw'), 'x1_log_likelihood': Array(self._x1_log_likelihood, 'mot_float_type', mode='rw'), 'x1_log_prior': Array(self._x1_log_prior, 'mot_float_type', mode='rw'), 'scratch_mft': LocalMemory('mot_float_type', self._nmr_params + 2), 'scratch_int': LocalMemory('int', self._nmr_params + 4), }, '_twalk_data')
def get_kernel_data(self): """Get the kernel data needed for this optimization routine to work.""" return { 'nmsimplex_scratch': LocalMemory( 'mot_float_type', self._nmr_parameters * 2 + (self._nmr_parameters + 1)**2 + 1), 'initial_simplex_scale': LocalMemory('mot_float_type', self._nmr_parameters) }
def get_kernel_data(self): """Get the kernel data needed for this optimization routine to work.""" return { 'scratch_mot_float_type': LocalMemory( 'mot_float_type', 8 + 2 * self._var_replace_dict['NMR_OBSERVATIONS'] + 5 * self._var_replace_dict['NMR_PARAMS'] + self._var_replace_dict['NMR_PARAMS'] * self._var_replace_dict['NMR_OBSERVATIONS']), 'scratch_int': LocalMemory('int', self._var_replace_dict['NMR_PARAMS']) }
def _reconstruct_slice(self, slice_data, slice_index): nmr_timeseries = slice_data.shape[-2] nmr_channels = slice_data.shape[-1] batch = np.reshape(slice_data, (-1, nmr_timeseries, nmr_channels)) codec = STARCOptimizationCodec(nmr_channels) data = Struct({'observations': Array(batch.reshape((batch.shape[0], -1))), 'scratch': LocalMemory('double', nmr_items=batch.shape[1] + 4)}, 'starc_data') wrapper = ParameterDecodingWrapper(nmr_channels) result = minimize(wrapper.wrap_objective_function(get_starc_objective_func(batch), codec.get_decode_function()), codec.encode(self._get_starting_weights(slice_index, batch)), data=wrapper.wrap_input_data(data), cl_runtime_info=self.cl_runtime_info) weights = codec.decode(result['x']) reconstruction = np.sum(batch * weights[:, None, :], axis=2) sos = np.sqrt(np.sum(np.abs(slice_data).astype(np.float64) ** 2, axis=-1)) reconstruction = np.reshape(reconstruction, slice_data.shape[:-2] + (nmr_timeseries,)) reconstruction *= (np.mean(sos, axis=2) / np.mean(reconstruction, axis=2))[:, :, None] return { 'weights': np.reshape(weights, slice_data.shape[:-2] + (nmr_channels,)), 'reconstruction': reconstruction, }
def get_kernel_data(self): """Get the kernel data needed for this optimization routine to work.""" return { 'scratch_mot_float_type': LocalMemory( 'mot_float_type', 3 * self._var_replace_dict['NMR_PARAMS'] + self._var_replace_dict['NMR_PARAMS']**2) }
def _get_mcmc_method_kernel_data_elements(self): """Get the mcmc method kernel data elements. Used by :meth:`_get_mcmc_method_kernel_data`.""" return { 'proposal_stds': Array(self._proposal_stds, 'mot_float_type', mode='rw'), 'x_tmp': LocalMemory('mot_float_type', nmr_items=1 + self._nmr_params) }
def get_kernel_data(self): """Get the kernel data needed for this optimization routine to work.""" return { 'subplex_scratch_float': LocalMemory( 'mot_float_type', 4 + self._var_replace_dict['NMR_PARAMS'] * 2 + self._var_replace_dict['MAX_SUBSPACE_LENGTH'] * 2 + (self._var_replace_dict['MAX_SUBSPACE_LENGTH'] * 2 + self._var_replace_dict['MAX_SUBSPACE_LENGTH'] + 1)**2 + 1), 'subplex_scratch_int': LocalMemory( 'int', 2 + self._var_replace_dict['NMR_PARAMS'] + (self._var_replace_dict['NMR_PARAMS'] // self._var_replace_dict['MIN_SUBSPACE_LENGTH'])), 'initial_simplex_scale': LocalMemory('mot_float_type', self._var_replace_dict['NMR_PARAMS']) }
def _minimize_levenberg_marquardt(func, x0, nmr_observations, cl_runtime_info, lower_bounds, upper_bounds, use_local_reduction, constraints_func=None, data=None, options=None): options = options or {} nmr_problems = x0.shape[0] nmr_parameters = x0.shape[1] if nmr_observations < x0.shape[1]: raise ValueError('The number of instances per problem must be greater than the number of parameters') penalty_data, penalty_func = _get_penalty_function(nmr_parameters, constraints_func) eval_func = SimpleCLFunction.from_string(''' void evaluate(local mot_float_type* x, void* data, local mot_float_type* result){ double penalty = _mle_penalty( x, ((_lm_eval_func_data*)data)->data, ((_lm_eval_func_data*)data)->lower_bounds, ((_lm_eval_func_data*)data)->upper_bounds, ''' + str(options.get('penalty_weight', 1e30)) + ''', ((_lm_eval_func_data*)data)->penalty_data ); ''' + func.get_cl_function_name() + '''(x, ((_lm_eval_func_data*)data)->data, result); if(get_local_id(0) == 0){ for(int j = 0; j < ''' + str(nmr_observations) + '''; j++){ result[j] += penalty; } } barrier(CLK_LOCAL_MEM_FENCE); } ''', dependencies=[func, penalty_func]) jacobian_func = _lm_numdiff_jacobian(eval_func, nmr_parameters, nmr_observations) optimizer_func = LevenbergMarquardt(eval_func, nmr_parameters, nmr_observations, jacobian_func, **_clean_options('Levenberg-Marquardt', options)) kernel_data = {'model_parameters': Array(x0, ctype='mot_float_type', mode='rw'), 'data': Struct({'data': data, 'lower_bounds': lower_bounds, 'upper_bounds': upper_bounds, 'penalty_data': penalty_data, 'jacobian_x_tmp': LocalMemory('mot_float_type', nmr_observations) }, '_lm_eval_func_data')} kernel_data.update(optimizer_func.get_kernel_data()) return_code = optimizer_func.evaluate( kernel_data, nmr_problems, use_local_reduction=use_local_reduction and all(env.is_gpu for env in cl_runtime_info.cl_environments), cl_runtime_info=cl_runtime_info) return OptimizeResults({'x': kernel_data['model_parameters'].get_data(), 'status': return_code})
def wrap_input_data(self, input_data): """Wrap the input data with extra information this wrapper might need. Args: input_data (mot.lib.kernel_data.KernelData): the kernel data we will wrap Returns: mot.lib.kernel_data.KernelData: the wrapped kernel data """ return Struct({'data': input_data, 'x_tmp': LocalMemory('mot_float_type', nmr_items=self._nmr_parameters)}, 'objective_function_wrapper_data')
def _initialize_likelihood_prior(self, positions, log_likelihoods, log_priors): """Initialize the likelihood and the prior using the given positions. This is a general method for computing the log likelihoods and log priors for given positions. Subclasses can use this to instantiate secondary chains as well. """ func = SimpleCLFunction.from_string( ''' void compute(global mot_float_type* chain_position, global mot_float_type* log_likelihood, global mot_float_type* log_prior, local mot_float_type* x_tmp, void* data){ bool is_first_work_item = get_local_id(0) == 0; if(is_first_work_item){ for(uint i = 0; i < ''' + str(self._nmr_params) + '''; i++){ x_tmp[i] = chain_position[i]; } *log_prior = _computeLogPrior(x_tmp, data); } barrier(CLK_LOCAL_MEM_FENCE); *log_likelihood = _computeLogLikelihood(x_tmp, data); } ''', dependencies=[ self._get_log_prior_cl_func(), self._get_log_likelihood_cl_func() ]) kernel_data = { 'chain_position': Array(positions, 'mot_float_type', mode='rw'), 'log_likelihood': Array(log_likelihoods, 'mot_float_type', mode='rw'), 'log_prior': Array(log_priors, 'mot_float_type', mode='rw'), 'x_tmp': LocalMemory('mot_float_type', self._nmr_params), 'data': self._data } func.evaluate(kernel_data, self._nmr_problems, use_local_reduction=all( env.is_gpu for env in self._cl_runtime_info.cl_environments), cl_runtime_info=self._cl_runtime_info)
def get_cache_struct(self, address_space): if not self._cache_info: return None fields = {} for dependency in self.get_dependencies(): if isinstance(dependency, CompartmentModel): if dependency.get_cache_struct(address_space): fields.update(dependency.get_cache_struct(address_space)) struct_name = self._get_cache_parameter().ctype if address_space == 'private': for ctype, name, nmr_elements in self._cache_info.fields: fields[name] = PrivateMemory(nmr_elements, ctype) else: for ctype, name, nmr_elements in self._cache_info.fields: fields[name] = LocalMemory(ctype, nmr_items=nmr_elements) return {self.name: Struct(fields, struct_name)}
def _get_penalty_function(nmr_parameters, constraints_func=None): """Get a function to compute the penalty term for the boundary conditions. This is meant to be used in the evaluation function of the optimization routines. Args: nmr_parameters (int): the number of parameters in the model constraints_func (mot.optimize.base.ConstraintFunction): function to compute (inequality) constraints. Should hold a CL function with the signature: .. code-block:: c void <func_name>(local const mot_float_type* const x, void* data, local mot_float_type* constraint_values); Where ``constraints_values`` is filled as: .. code-block:: c constraint_values[i] = g_i(x) That is, for each constraint function :math:`g_i`, formulated as :math:`g_i(x) <= 0`, we should return the function value of :math:`g_i`. Returns: tuple: Struct and SimpleCLFunction, the required data for the penalty function and the penalty function itself. """ dependencies = [] data_requirements = {'scratch': LocalMemory('double', 1)} constraints_code = '' if constraints_func and constraints_func.get_nmr_constraints() > 0: nmr_constraints = constraints_func.get_nmr_constraints() dependencies.append(constraints_func) data_requirements['constraints'] = LocalMemory('mot_float_type', nmr_constraints) constraints_code = ''' local mot_float_type* constraints = ((_mle_penalty_data*)scratch_data)->constraints; ''' + constraints_func.get_cl_function_name() + '''(x, data, constraints); for(int i = 0; i < ''' + str(nmr_constraints) + '''; i++){ *penalty_sum += pown(max((mot_float_type)0, constraints[i]), 2); } ''' data = Struct(data_requirements, '_mle_penalty_data') func = SimpleCLFunction.from_string(''' double _mle_penalty( local mot_float_type* x, void* data, local mot_float_type* lower_bounds, local mot_float_type* upper_bounds, float penalty_weight, void* scratch_data){ local double* penalty_sum = ((_mle_penalty_data*)scratch_data)->scratch; if(get_local_id(0) == 0){ *penalty_sum = 0; // boundary conditions for(int i = 0; i < ''' + str(nmr_parameters) + '''; i++){ if(isfinite(upper_bounds[i])){ *penalty_sum += pown(max((mot_float_type)0, x[i] - upper_bounds[i]), 2); } if(isfinite(lower_bounds[i])){ *penalty_sum += pown(max((mot_float_type)0, lower_bounds[i] - x[i]), 2); } } } barrier(CLK_LOCAL_MEM_FENCE); // constraints ''' + constraints_code + ''' return penalty_weight * *penalty_sum; } ''', dependencies=dependencies) return data, func
def estimate_hessian(objective_func, parameters, lower_bounds=None, upper_bounds=None, step_ratio=2, nmr_steps=5, max_step_sizes=None, data=None, cl_runtime_info=None): """Estimate and return the upper triangular elements of the Hessian of the given function at the given parameters. This calculates the Hessian using central difference (using a 2nd order Taylor expansion) with a Richardson extrapolation over the proposed sequence of steps. If enough steps are given, we apply a Wynn epsilon extrapolation on top of the Richardson extrapolated results. If more steps are left, we return the estimate with the lowest error, taking into account outliers using a median filter. The Hessian is evaluated at the steps: .. math:: \quad ((f(x + d_j e_j + d_k e_k) - f(x + d_j e_j - d_k e_k)) - (f(x - d_j e_j + d_k e_k) - f(x - d_j e_j - d_k e_k)) / (4 d_j d_k) where :math:`e_j` is a vector where element :math:`j` is one and the rest are zero and :math:`d_j` is a scalar spacing :math:`steps_j`. Steps are generated according to an exponentially diminishing ratio, defined as: steps = max_step * step_ratio**-i, i = 0,1,..,nmr_steps-1. Where the maximum step can be provided. For example, a maximum step of 2 with a step ratio of 2, computed for 4 steps gives: [2.0, 1.0, 0.5, 0.25]. If lower and upper bounds are given, we use as maximum step size the largest step size that fits between the Hessian point and the boundaries. The steps define the order of the estimation, with 2 steps resulting in a O(h^2) estimate, 3 steps resulting in a O(h^4) estimate and 4 or more steps resulting in a O(h^6) derivative estimate. Args: objective_func (mot.lib.cl_function.CLFunction): The function we want to differentiate. A CL function with the signature: .. code-block:: c double <func_name>(local const mot_float_type* const x, void* data); The objective function has the same signature as the minimization function in MOT. For the numerical hessian, the ``objective_list`` parameter is ignored. parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems, and p parameters lower_bounds (tuple or list or None): a list of length (p,) for p parameters with the lower bounds. Each element of the list can be a scalar or a vector (of the same length as the number of problem instances). To disable bounds for this parameter use -np.inf. upper_bounds (tuple or list or None): a list of length (p,) for p parameters with the upper bounds. Each element of the list can be a scalar or a vector (of the same length as the number of problem instances). To disable bounds for this parameter use np.inf. step_ratio (float): the ratio at which the steps diminish. nmr_steps (int): the number of steps we will generate. We will calculate the derivative for each of these step sizes and extrapolate the best step size from among them. The minimum number of steps is 1. max_step_sizes (float or ndarray or None): the maximum step size, or the maximum step size per parameter. If None is given, we use 0.1 for all parameters. If a float is given, we use that for all parameters. If a list is given, it should be of the same length as the number of parameters. data (mot.lib.kernel_data.KernelData): the user provided data for the ``void* data`` pointer. cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information Returns: ndarray: per problem instance a vector with the upper triangular elements of the Hessian matrix. This array can hold NaN's, for elements where the Hessian failed to approximate. """ if len(parameters.shape) == 1: parameters = parameters[None, :] nmr_voxels = parameters.shape[0] nmr_params = parameters.shape[1] nmr_derivatives = nmr_params * (nmr_params + 1) // 2 initial_step = _get_initial_step(parameters, lower_bounds, upper_bounds, max_step_sizes) kernel_data = { 'parameters': Array(parameters, ctype='mot_float_type', mode='r', use_host_ptr=False), 'initial_step': Array(initial_step, ctype='float', mode='r'), 'derivatives': Zeros((nmr_voxels, nmr_derivatives), 'double'), 'errors': Zeros((nmr_voxels, nmr_derivatives), 'double'), 'x_tmp': LocalMemory('mot_float_type', nmr_params), 'data': data, 'scratch': LocalMemory('double', nmr_steps + (nmr_steps - 1) + nmr_steps) } hessian_kernel = SimpleCLFunction.from_string( ''' void _numdiff_hessian( global mot_float_type* parameters, global float* initial_step, global double* derivatives, global double* errors, local mot_float_type* x_tmp, void* data, local double* scratch){ if(get_local_id(0) == 0){ for(uint i = 0; i < ''' + str(nmr_params) + '''; i++){ x_tmp[i] = parameters[i]; } } barrier(CLK_LOCAL_MEM_FENCE); double f_x_input = ''' + objective_func.get_cl_function_name() + '''(x_tmp, data); // upper triangle loop uint coord_ind = 0; for(int i = 0; i < ''' + str(nmr_params) + '''; i++){ for(int j = i; j < ''' + str(nmr_params) + '''; j++){ _numdiff_hessian_element( data, x_tmp, f_x_input, i, j, initial_step, derivatives + coord_ind, errors + coord_ind, scratch); coord_ind++; } } } ''', dependencies=[ objective_func, _get_numdiff_hessian_element_func(objective_func, nmr_steps, step_ratio) ]) hessian_kernel.evaluate(kernel_data, nmr_voxels, use_local_reduction=True, cl_runtime_info=cl_runtime_info) return kernel_data['derivatives'].get_data()