def save(base, parameters, error, time, evaluations): """ Stores a set of ``parameters``, an RMSE ``error``, and the ``time`` and number of ``evaluations`` it took to obtain the result in a file based on the given ``base`` name. """ path = base + '.txt' assert (len(parameters) == 25) error = float(error) time = float(time) evaluations = int(evaluations) print('Writing results to ' + str(path)) with open(path, 'w') as f: f.write('error: ' + pints.strfloat(error) + '\n') f.write('time: ' + pints.strfloat(time) + '\n') f.write('evaluations: ' + str(evaluations) + '\n') f.write('parameters:\n') for p in parameters: f.write(' ' + pints.strfloat(p) + '\n') print('Done')
def save_samples(filename, *sample_lists): """ Stores one or multiple lists of samples at the path given by ``filename``. If one list of samples is given, the filename is used as is. If multiple lists are given, the filenames are updated to include ``_0``, ``_1``, ``_2``, etc. For example, ``save_samples('test.csv', samples)`` will store information from ``samples`` in ``test.csv``. Using ``save_samples('test.csv', samples_0, samples_1)`` will store the samples from ``samples_0`` to ``test_0.csv`` and ``samples_1`` to ``test_1.csv``. See also: :meth:`load_samples()`. """ import numpy as np import os import pints # Get filenames k = len(sample_lists) if k < 1: raise ValueError('At least one set of samples must be given.') elif k == 1: filenames = [filename] else: parts = os.path.splitext(filename) filenames = [parts[0] + '_' + str(i) + parts[1] for i in range(k)] # Check shapes try: sample_lists = np.array(sample_lists, dtype=float) except ValueError: raise ValueError( 'Sample lists must contain only floats and be of same length.') shape = sample_lists[0].shape if len(shape) != 2: raise ValueError( 'Samples must be given as 2d arrays (e.g. lists of lists).') # Store filename = iter(filenames) header = ','.join(['"p' + str(j) + '"' for j in range(shape[1])]) for samples in sample_lists: with open(next(filename), 'w') as f: f.write(header + '\n') for sample in samples: f.write(','.join([pints.strfloat(x) for x in sample]) + '\n')
def run_model(model, cell, protocol, time, voltage, current, plot='unfold', label=None, axes=None): # Select protocol file protocol_file = os.path.join(root, protocol + '.mmt') print(protocol_file) myokit_protocol = myokit.load_protocol(protocol_file) # Estimate noise from start of data sigma_noise = np.std(current[:2000], ddof=1) # fetch cmaes parameters obtained_parameters = model.fetch_parameters() # Cell-specific parameters temperature = model.temperature(cell) lower_conductance = model.conductance_limit(cell) # Apply capacitance filter based on protocol print('Applying capacitance filtering') time, voltage, current = model.capacitance(myokit_protocol, 0.1, time, voltage, current) forward_model = model.ForwardModel(myokit_protocol, temperature, sine_wave=False) problem = pints.SingleOutputProblem(forward_model, time, current) log_likelihood = pints.KnownNoiseLogLikelihood(problem, sigma_noise) log_prior = model.LogPrior(lower_conductance) log_posterior = pints.LogPosterior(log_likelihood, log_prior) # Show obtained parameters and score obtained_log_posterior = log_posterior(obtained_parameters) print('Kylie sine-wave parameters:') for x in obtained_parameters: print(pints.strfloat(x)) print('Final log-posterior:') print(pints.strfloat(obtained_log_posterior)) # Simulate simulated = forward_model.simulate(obtained_parameters, time) if plot == 'unfold': axes[0].plot(time, voltage, color='red') #, label='voltage') #axes[0].legend(loc='upper right') axes[1].plot(time, current, alpha=0.3, color='red') #, label='measured current') if label == 0: model_name = 'circularCOIIC' axes[1].plot(time, simulated, alpha=1, color='blue', label=model_name) elif label == 1: model_name = 'linearCOI' axes[1].plot(time, simulated, alpha=1, color='magenta', label=model_name) elif label == 2: model_name = 'linearCCOI' axes[1].plot(time, simulated, alpha=1, color='seagreen', label=model_name) elif label == 3: model_name = 'linearCCCOI' axes[1].plot(time, simulated, alpha=1, color='seagreen', label=model_name) #axes.subplot(2,1,1) else: IkrModel.fold_plot(protocol, time, voltage, [current, simulated])
def run(self): """ Runs the optimisation, returns a tuple ``(xbest, fbest)``. """ # Check stopping criteria has_stopping_criterion = False has_stopping_criterion |= (self._max_iterations is not None) has_stopping_criterion |= (self._max_unchanged_iterations is not None) has_stopping_criterion |= (self._threshold is not None) if not has_stopping_criterion: raise ValueError('At least one stopping criterion must be set.') # Iterations and function evaluations iteration = 0 evaluations = 0 # Unchanged iterations count (used for stopping or just for # information) unchanged_iterations = 0 # Create evaluator object if self._parallel: # Get number of workers n_workers = self._n_workers # For population based optimisers, don't use more workers than # particles! if isinstance(self._optimiser, PopulationBasedOptimiser): n_workers = min(n_workers, self._optimiser.population_size()) evaluator = pints.ParallelEvaluator(self._function, n_workers=n_workers) else: evaluator = pints.SequentialEvaluator(self._function) # Keep track of best position and score fbest = float('inf') # Internally we always minimise! Keep a 2nd value to show the user fbest_user = fbest if self._minimising else -fbest # Set up progress reporting next_message = 0 # Start logging logging = self._log_to_screen or self._log_filename if logging: if self._log_to_screen: # Show direction if self._minimising: print('Minimising error measure') else: print('Maximising LogPDF') # Show method print('Using ' + str(self._optimiser.name())) # Show parallelisation if self._parallel: print('Running in parallel with ' + str(n_workers) + ' worker processes.') else: print('Running in sequential mode.') # Show population size pop_size = 1 if isinstance(self._optimiser, PopulationBasedOptimiser): pop_size = self._optimiser.population_size() if self._log_to_screen: print('Population size: ' + str(pop_size)) # Set up logger logger = pints.Logger() if not self._log_to_screen: logger.set_stream(None) if self._log_filename: logger.set_filename(self._log_filename, csv=self._log_csv) # Add fields to log max_iter_guess = max(self._max_iterations or 0, 10000) max_eval_guess = max_iter_guess * pop_size logger.add_counter('Iter.', max_value=max_iter_guess) logger.add_counter('Eval.', max_value=max_eval_guess) logger.add_float('Best') self._optimiser._log_init(logger) logger.add_time('Time m:s') # Start searching timer = pints.Timer() running = True try: while running: # Get points xs = self._optimiser.ask() # Calculate scores fs = evaluator.evaluate(xs) # Perform iteration self._optimiser.tell(fs) # Check if new best found fnew = self._optimiser.fbest() if fnew < fbest: # Check if this counts as a significant change if np.abs(fnew - fbest) < self._min_significant_change: unchanged_iterations += 1 else: unchanged_iterations = 0 # Update best fbest = fnew # Update user value of fbest fbest_user = fbest if self._minimising else -fbest else: unchanged_iterations += 1 # Update evaluation count evaluations += len(fs) # Show progress if logging and iteration >= next_message: # Log state logger.log(iteration, evaluations, fbest_user) self._optimiser._log_write(logger) logger.log(timer.time()) # Choose next logging point if iteration < self._message_warm_up: next_message = iteration + 1 else: next_message = self._message_interval * ( 1 + iteration // self._message_interval) # Update iteration count iteration += 1 # # Check stopping criteria # # Maximum number of iterations if (self._max_iterations is not None and iteration >= self._max_iterations): running = False halt_message = ('Halting: Maximum number of iterations (' + str(iteration) + ') reached.') # Maximum number of iterations without significant change halt = (self._max_unchanged_iterations is not None and unchanged_iterations >= self._max_unchanged_iterations) if halt: running = False halt_message = ('Halting: No significant change for ' + str(unchanged_iterations) + ' iterations.') # Threshold value if self._threshold is not None and fbest < self._threshold: running = False halt_message = ('Halting: Objective function crossed' ' threshold: ' + str(self._threshold) + '.') # Error in optimiser error = self._optimiser.stop() if error: # pragma: no cover running = False halt_message = ('Halting: ' + str(error)) except (Exception, SystemExit, KeyboardInterrupt): # pragma: no cover # Unexpected end! # Show last result and exit print('\n' + '-' * 40) print('Unexpected termination.') print('Current best score: ' + str(fbest)) print('Current best position:') for p in self._optimiser.xbest(): print(pints.strfloat(p)) print('-' * 40) raise time_taken = timer.time() # Log final values and show halt message if logging: logger.log(iteration, evaluations, fbest_user) self._optimiser._log_write(logger) logger.log(time_taken) if self._log_to_screen: print(halt_message) # Save post-run statistics self._evaluations = evaluations self._iterations = iteration self._time = time_taken # Return best position and score return self._optimiser.xbest(), fbest_user
print('Starting logposterior: ', logposterior(x0)) opt = pints.OptimisationController(logposterior, x0.T, method=pints.CMAES) opt.set_max_iterations(None) opt.set_parallel(True) # Run optimisation try: with np.errstate(all='ignore'): # Tell numpy not to issue warnings p, s = opt.run() p = transform_to_model_param(p) params.append(p) logposteriors.append(s) print('Found solution: Old parameters:') for k, x in enumerate(p): print(pints.strfloat(x) + ' ' + \ pints.strfloat(priorparams[k])) except ValueError: import traceback traceback.print_exc() # Order from best to worst order = np.argsort(logposteriors)[::-1] # (use [::-1] for LL) logposteriors = np.asarray(logposteriors)[order] params = np.asarray(params)[order] # Show results bestn = min(3, N) print('Best %d logposteriors:' % bestn) for i in xrange(bestn): print(logposteriors[i])
def run(self): """ Runs the optimisation, returns a tuple ``(x_best, f_best)``. An optional ``callback`` function can be passed in that will be called at the end of every iteration. The callback should take the arguments ``(iteration, optimiser)``, where ``iteration`` is the iteration count (an integer) and ``optimiser`` is the optimiser object. """ # Can only run once for each controller instance if self._has_run: raise RuntimeError("Controller is valid for single use only") self._has_run = True # Check stopping criteria has_stopping_criterion = False has_stopping_criterion |= (self._max_iterations is not None) has_stopping_criterion |= (self._unchanged_max_iterations is not None) has_stopping_criterion |= (self._max_evaluations is not None) has_stopping_criterion |= (self._threshold is not None) if not has_stopping_criterion: raise ValueError('At least one stopping criterion must be set.') # Iterations and function evaluations iteration = 0 evaluations = 0 # Unchanged iterations count (used for stopping or just for # information) unchanged_iterations = 0 # Choose method to evaluate f = self._function if self._needs_sensitivities: f = f.evaluateS1 # Create evaluator object if self._parallel: # Get number of workers n_workers = self._n_workers # For population based optimisers, don't use more workers than # particles! if isinstance(self._optimiser, PopulationBasedOptimiser): n_workers = min(n_workers, self._optimiser.population_size()) evaluator = pints.ParallelEvaluator(f, n_workers=n_workers) else: evaluator = pints.SequentialEvaluator(f) # Keep track of current best and best-guess scores. fb = fg = float('inf') # Internally we always minimise! Keep a 2nd value to show the user. fb_user, fg_user = (fb, fg) if self._minimising else (-fb, -fg) # Keep track of the last significant change f_sig = float('inf') # Set up progress reporting next_message = 0 # Start logging logging = self._log_to_screen or self._log_filename if logging: if self._log_to_screen: # Show direction if self._minimising: print('Minimising error measure') else: print('Maximising LogPDF') # Show method print('Using ' + str(self._optimiser.name())) # Show parallelisation if self._parallel: print('Running in parallel with ' + str(n_workers) + ' worker processes.') else: print('Running in sequential mode.') # Show population size pop_size = 1 if isinstance(self._optimiser, PopulationBasedOptimiser): pop_size = self._optimiser.population_size() if self._log_to_screen: print('Population size: ' + str(pop_size)) # Set up logger logger = pints.Logger() if not self._log_to_screen: logger.set_stream(None) if self._log_filename: logger.set_filename(self._log_filename, csv=self._log_csv) # Add fields to log max_iter_guess = max(self._max_iterations or 0, 10000) max_eval_guess = max( self._max_evaluations or 0, max_iter_guess * pop_size) logger.add_counter('Iter.', max_value=max_iter_guess) logger.add_counter('Eval.', max_value=max_eval_guess) logger.add_float('Best') logger.add_float('Current') self._optimiser._log_init(logger) logger.add_time('Time m:s') # Start searching timer = pints.Timer() running = True try: while running: # Get points xs = self._optimiser.ask() # Calculate scores fs = evaluator.evaluate(xs) # Perform iteration self._optimiser.tell(fs) # Update current scores fb = self._optimiser.f_best() fg = self._optimiser.f_guessed() fb_user, fg_user = (fb, fg) if self._minimising else (-fb, -fg) # Check for significant changes f_new = fg if self._use_f_guessed else fb if np.abs(f_new - f_sig) >= self._unchanged_threshold: unchanged_iterations = 0 f_sig = f_new else: unchanged_iterations += 1 # Update evaluation count evaluations += len(fs) # Show progress if logging and iteration >= next_message: # Log state logger.log(iteration, evaluations, fb_user, fg_user) self._optimiser._log_write(logger) logger.log(timer.time()) # Choose next logging point if iteration < self._message_warm_up: next_message = iteration + 1 else: next_message = self._message_interval * ( 1 + iteration // self._message_interval) # Update iteration count iteration += 1 # # Check stopping criteria # # Maximum number of iterations if (self._max_iterations is not None and iteration >= self._max_iterations): running = False halt_message = ('Maximum number of iterations (' + str(iteration) + ') reached.') # Maximum number of iterations without significant change halt = (self._unchanged_max_iterations is not None and unchanged_iterations >= self._unchanged_max_iterations) if running and halt: running = False halt_message = ('No significant change for ' + str(unchanged_iterations) + ' iterations.') # Maximum number of evaluations if (self._max_evaluations is not None and evaluations >= self._max_evaluations): running = False halt_message = ( 'Maximum number of evaluations (' + str(self._max_evaluations) + ') reached.') # Threshold value halt = (self._threshold is not None and f_new < self._threshold) if running and halt: running = False halt_message = ('Objective function crossed threshold: ' + str(self._threshold) + '.') # Error in optimiser error = self._optimiser.stop() if error: # pragma: no cover running = False halt_message = str(error) elif self._callback is not None: self._callback(iteration - 1, self._optimiser) except (Exception, SystemExit, KeyboardInterrupt): # pragma: no cover # Unexpected end! # Show last result and exit print('\n' + '-' * 40) print('Unexpected termination.') print('Current score: ' + str(fg_user)) print('Current position:') # Show current parameters x_user = self._optimiser.x_guessed() if self._transformation is not None: x_user = self._transformation.to_model(x_user) for p in x_user: print(pints.strfloat(p)) print('-' * 40) raise # Stop timer self._time = timer.time() # Log final values and show halt message if logging: if iteration - 1 < next_message: logger.log(iteration, evaluations, fb_user, fg_user) self._optimiser._log_write(logger) logger.log(self._time) if self._log_to_screen: print('Halting: ' + halt_message) # Save post-run statistics self._evaluations = evaluations self._iterations = iteration # Get best parameters if self._use_f_guessed: x = self._optimiser.x_guessed() f = self._optimiser.f_guessed() else: x = self._optimiser.x_best() f = self._optimiser.f_best() # Inverse transform search parameters if self._transformation is not None: x = self._transformation.to_model(x) # Return best position and score return x, f if self._minimising else -f
def n_parameters(self): return self.n lpdf = LogisticAPI( 'https://mighty-badlands-12664.herokuapp.com/pints-team/benchmarks/1.0.0/') real_parameters = [0.015, 500, 10] ## Select some boundaries boundaries = pints.RectangularBoundaries([0, 400, 0], [0.03, 600, 20]) ## Perform an optimization with boundaries and hints x0 = 0.01, 450, 5 sigma0 = [0.01, 100, 10] found_parameters, found_value = pints.optimise(lpdf, x0, sigma0, boundaries, method=pints.CMAES) ## Show score of true solution print('log likelihood at true solution: ') print(lpdf(real_parameters)) # ## Compare parameters with original print('Found solution: True parameters:') for k, x in enumerate(found_parameters): print(pints.strfloat(x) + ' ' + pints.strfloat(real_parameters[k]))
print('prior at x0: ') print(prior(x0)) print('Score at x0: ') print(score(x0)) found_parameters, found_solution = pints.cmaes( score, boundaries, x0, sigma0, ) print('Found solution: x0:') for k, x in enumerate(found_parameters): print(pints.strfloat(x) + ' ' + pints.strfloat(x0[k])) print('Score at found_parameters: ') print(found_solution) #print(score(found_parameters)) if reversible: dir_name = 'reversible_%s' % (diff_i) else: dir_name = 'quasireversible_%s' % (diff_i) if not os.path.exists(dir_name): os.makedirs(dir_name) pickle.dump((found_parameters, found_solution), open('%s/params_and_solution%d.p' % (dir_name, sample_i), "wb"))
boundaries = pints.RectangularBoundaries(lower, upper) # Perform an optimization x0 = x_true * 1.2 optimiser = pints.OptimisationController(score, x0, boundaries=boundaries, method=pints.CMAES) print('Running...') x_found, score_found = optimiser.run() # Compare parameters with original print('Found solution: True parameters:') for k, x in enumerate(x_found): print(pints.strfloat(x) + ' ' + pints.strfloat(x_true[k])) fitted_values = problem.evaluate(x_found) plt.figure() plt.suptitle('Generated data + simulation with fit parameters') plt.subplot(2, 1, 1) plt.xlabel('Time (ms)') plt.ylabel('Vm (mV)') plt.plot(times, noisy_values[:, 0]) plt.plot(times, fitted_values[:, 0]) plt.subplot(2, 1, 2) plt.xlabel('Time (ms)') plt.ylabel('[Ca]i (mol/L)') plt.plot(times, noisy_values[:, 1]) plt.plot(times, fitted_values[:, 1])
LL_filename = folder + '/' + model_name + '-cell-' + str(cell) + '-LLs.csv' if not os.path.isfile(LL_filename): continue LLs = pints.io.load_samples(LL_filename) chains = pints.io.load_samples(folder + '/' + model_name + '-cell-' + str(cell) + '-chain.csv', n=num_chains) # Set parameter transformation #transform_to_model_param = parametertransform.log_transform_to_model_param #transform_from_model_param = parametertransform.log_transform_from_model_param MAPs = np.argmax(LLs, axis=0) which_chain = np.argmax([LLs[MAPs[i]][i] for i in range(len(MAPs))]) transform_MAP_param = chains[which_chain][MAPs[which_chain], :] MAP_param = util.transformer(transform, transform_MAP_param, rate_dict, False) print(model_name, ': Best Log-Posterior: ', LLs[MAPs[which_chain]][which_chain]) print('MAP parameters: ') print(MAP_param) #from defaultsetting import param_names with open( 'mcmc_results/' + model_name + '-cell-' + str(cell) + '-best-parameters.txt', 'w') as f: for p in transform_MAP_param: f.write(pints.strfloat(p) + '\n')