def run_pso(lum, drug_conc, time, weights): # create global versions of lum, drug_conc, and time variables that can be used in cost function global _lum, _drug_conc, _time, _weights _lum = np.array(lum) _drug_conc = np.array(drug_conc) _time = np.array(time) _weights = np.array(weights) ########## # _dc, idx = np.unique(_drug_conc, return_index=True) # _lum_avg = np.array([np.mean(_lum[idx[i]:idx[i+1]], axis=0) for i in range(len(idx)-1)] + # [np.mean(_lum[idx[-1]:], axis=0)]) # colors = cmap(np.linspace(0, 1, len(_dc))) # plt.figure() # u_idx = 0 # for i in range(len(_lum)): # if i <= idx[-1] and i == idx[u_idx+1]: # u_idx += 1 # plt.plot(_time[i], _lum[i], 'o', color=colors[u_idx]) # plt.plot(_time[idx[u_idx]], _lum_avg[u_idx], '--', lw=2, color=colors[u_idx]) # plt.show() ########## # estimate slope m and y-intercept nC0 from control luminescence data global _m, _nC0 _m = 0 _nC0 = 0 # find indices of control experiments indices = np.where(_drug_conc == 0)[0] for idx in indices: fit = linregress(_time[idx], _lum[idx]) _m += fit.slope _nC0 += fit.intercept _m /= len(indices) _nC0 /= len(indices) # run PSO to estimate the other three parameters: kdiv-kdeath, kdiv*-kdeath*, koff/kon pso = PSO(save_sampled=True, verbose=True, shrink_steps=False) pso.set_start_position([2, 2, 2]) # allows particles to move +/- 2 orders of magnitude pso.set_bounds(2) # sets maximum speed that a particle can travel pso.set_speed(-0.1, 0.1) pso.run( num_particles=100, num_iterations=1000, stop_threshold=0, num_processors=1, max_iter_no_improv=1000, cost_function=cost ) return np.array([_m, _nC0] + list(pso.best.pos)) # [m, nC0, kdiv-kdeath, kdiv*-kdeath*, koff/kon]
def test_population_creation(): known_sol = [8.6998, 6.7665] pso = PSO(start=[0, 0], verbose=False, shrink_steps=False) pso.set_bounds(lower=[-100, -100], upper=[100, 100]) pso.set_speed(-10, 10) pso.run(num_iterations=100, num_particles=10, cost_function=h1, ) pso.return_ranked_populations() error = np.sum((pso.best.pos - known_sol) ** 2) print('True value: [8.6998, 6.7665]. Found:{0}. Error^2 = {1}'.format( pso.best.pos, error)) assert (error < 0.1)
def run_pso(run, iterations, bd): pso = PSO(save_sampled=False, verbose=True, shrink_steps=False) #pso.set_cost_function(costfunction) pso.set_start_position(starting_position) pso.set_bounds(bd) pso.set_speed(-.1, .1) pso.run(num_particles=20, num_iterations=iterations, stop_threshold=1e-5, cost_function=costfunction, max_iter_no_improv=50, num_processors=20, save_samples=True) param_sets = convert_to_flat_array(pso, model) #print(param_sets) param_sets.to_csv('run'+run+'.csv')
def test_population_creation(): pso = PSO(cost_function=h1, start=[10, 0], verbose=False) pso.set_bounds(lower=[-100, -100], upper=[100, 100]) pso.run(num_iterations=100, num_particles=10) pso.return_ranked_populations() error = np.sum((pso.best - [8.6998, 6.7665]) ** 2) print('True value: [8.6998, 6.7665]. Found:{0}. Error^2 = {1}'.format(pso.best, error)) assert (error < 0.1)
def __call__(self, pso_kwargs=None, cost_type='norm_logpdf', custom_cost=None): """Call the SwarmIt instance to construct to instance of the NestedSampling object. Args: pso_kwargs (dict): Dictionary of any additional optional keyword arguments to pass to the PSO object constructor. Defaults to dict(). cost_type (str): Define the type of cost estimator to use. Options are 'norm_logpdf'=>Compute the cost using the normal distribution estimator, 'mse'=>Compute the cost using the negative mean squared error estimator, 'sse'=>Compute the cost using the negative sum of squared errors estimator. Defaults to 'norm_logpdf'. Returns: type: Description of returned object. """ if pso_kwargs is None: pso_kwargs = dict() # self.ns_version = ns_version self._pso_kwargs = pso_kwargs #population_size = pso_population_size if cost_type == 'mse': cost = self.mse_cost elif cost_type == 'sse': cost = self.sse_cost elif (cost_type == 'custom') and (custom_cost is not None): self.set_custom_cost(custom_cost) cost = self.custom_cost else: cost = self.norm_logpdf_cost # Construct the PSO if 'save_sampled' not in pso_kwargs.keys(): pso_kwargs['save_sampled'] = False if 'verbose' not in pso_kwargs.keys(): pso_kwargs['verbose'] = False pso = PSO(**pso_kwargs) pso.set_start_position(self._starting_position) pso.set_cost_function(cost) pso.set_bounds(lower=self._lower, upper=self._upper) pso.set_speed(-.25, .25) return pso
def run_example(): # create PSO object pso = PSO(save_sampled=False, verbose=True, shrink_steps=False) pso.set_start_position(starting_position) # allows particles to move +/- 2 orders of magnitude pso.set_bounds(2) # sets maximum speed that a particle can travel pso.set_speed(-.1, .1) pso.run(num_particles=24, num_iterations=100, stop_threshold=1e-5, num_processors=18, max_iter_no_improv=20, cost_function=likelihood) display(pso.best.pos, save_name='best_fit') np.savetxt("pso_fit_for_model.csv", pso.best.pos) create_gif_of_model_training(pso)
def run_pso(run, iterations, bd, outdir='', suffix=''): pso = PSO(save_sampled=True, verbose=True, shrink_steps=False) #pso.set_cost_function(costfunction) pso.set_start_position(starting_params) pso.set_bounds(bd) pso.set_speed(-0.1, 0.1) pso.run(num_particles=20, num_iterations=iterations, stop_threshold=1e-5, cost_function=costfunction, max_iter_no_improv=500, num_processors=20, save_samples=True) param_sets = convert_to_flat_array(pso, model) #print(param_sets) outfile = os.path.join(outdir, 'run%d%s.csv' % (run, suffix)) param_sets.to_csv(outfile, index_label='iter')
def test_himmelblau(): """ test to see if PSO can find simple minimum """ minimums = [[3.0, 2.0], [-2.805118, 3.131312], [-3.779310, -3.283186], [3.584428, -1.848126]] pso = PSO(cost_function=himmelblau, start=[10, 0], verbose=False) pso.set_bounds(lower=[-100, -100], upper=[100, 100]) pso.run(num_iterations=100, num_particles=10) good_min = False for i in minimums: if np.sum((pso.best - i) ** 2) < .1: good_min = True error = np.sum((pso.best - i) ** 2) found_min = i if good_min: print('Found minimum') print('True value: {0}. Found:{1}. Error^2 = {2}'.format(found_min, pso.best, error))
def run_example(): # Here we initial the class # We must proivde the cost function and a starting value optimizer = PSO(cost_function=obj_function, start=start_position, verbose=True) # We also must set bounds. This can be a single scalar or an array of len(start_position) optimizer.set_bounds(parameter_range=3) optimizer.set_speed(speed_min=-.5, speed_max=.5) optimizer.run(num_particles=25, num_iterations=100) if plot: display(start_position, optimizer.best) print("Original values {0}".format(log10_original_values ** 10)) print("Starting values {0}".format(start_position ** 10)) print("Best PSO values {0}".format(optimizer.best ** 10)) fig = plt.figure() fig.add_subplot(221) plt.scatter(log10_original_values[0], log10_original_values[1], marker='>', color='b', label='ideal') plt.scatter(start_position[0], start_position[1], marker='^', color='r', label='start') plt.scatter(optimizer.history[:, 0], optimizer.history[:, 1], c=optimizer.values, cmap=plt.cm.coolwarm) fig.add_subplot(223) plt.scatter(log10_original_values[0], log10_original_values[2], marker='>', color='b', label='ideal') plt.scatter(start_position[0], start_position[2], marker='^', color='r', label='start') plt.scatter(optimizer.history[:, 0], optimizer.history[:, 2], c=optimizer.values, cmap=plt.cm.coolwarm) fig.add_subplot(222) plt.scatter(log10_original_values[1], log10_original_values[2], marker='>', color='b', label='ideal') plt.scatter(start_position[1], start_position[2], marker='^', color='r', label='start') plt.scatter(optimizer.history[:, 1], optimizer.history[:, 2], c=optimizer.values, cmap=plt.cm.coolwarm) fig.add_subplot(224) plt.legend(loc=0) plt.colorbar() plt.tight_layout() plt.savefig('population.png')
def run_example(): # Runs the cost function to calculate error between model and data print("Error at start = {}".format(likelihood(starting_position)[0])) # Displays the model with defaul positions display(starting_position, save_name='starting_position') # create PSO object pso = PSO(save_sampled=False, verbose=True, num_proc=4) pso.set_cost_function(likelihood) pso.set_start_position(starting_position) # allows particles to move +/- 2 orders of magnitude pso.set_bounds(2) # sets maximum speed that a particle can travel pso.set_speed(-.25, .25) pso.run(num_particles=25, num_iterations=50, stop_threshold=1e-5) display(pso.best, save_name='best_fit') np.savetxt("pso_fit_for_model.csv", pso.best)
def test_mismatched_bounds(): pso = PSO(start=[10, 0], cost_function=himmelblau, verbose=False) pso.set_bounds(lower=[-100, 0, -100], upper=[100, 100]) pso.run(num_iterations=100, num_particles=10)
def run_example_multiple(): best_pars = np.zeros((100, len(model.parameters))) counter = 0 for i in range(100): pso = PSO(save_sampled=False, verbose=False, num_proc=4) pso.set_cost_function(likelihood) nominal_random = xnominal + np.random.uniform(-1, 1, len(xnominal)) pso.set_start_position(nominal_random) pso.set_bounds(2.5) pso.set_speed(-.25, .25) pso.run(25, 100) if pso.best.fitness.values[0] < 0.066: Y = np.copy(pso.best) param_values[rates_of_interest_mask] = 10**Y best_pars[counter] = param_values counter += 1 print(i, counter) # display(pso.best) np.save('jnk3_noASK1_ncalibrated_pars_1h', best_pars)
def run_example(): pso = PSO(save_sampled=False, verbose=True, num_proc=4) pso.set_cost_function(likelihood) pso.set_start_position(xnominal) pso.set_bounds(lower=lower, upper=upper) pso.set_speed(-.25, .25) pso.run(25, 200) display(pso.best) np.save('calibrated_pars_pso1', pso.best)
def run_example(): pso = PSO(save_sampled=False, verbose=True, num_proc=4) pso.set_cost_function(likelihood) pso.set_start_position(xnominal) pso.set_bounds(2) pso.set_speed(-.25, .25) pso.run(25, 100) display(pso.best)
def run_pso(run, iterations, bd): pso = PSO(save_sampled=False, verbose=True, shrink_steps=False, num_proc=14) pso.set_cost_function(costfunction) pso.set_start_position(starting_position) pso.set_bounds(bd) pso.set_speed(-.1, .1) pso.run(num_particles=200, num_iterations=iterations, stop_threshold=1e-5) #print('best pos: ', pso.best.pos) print('history ', pso.history) print('run ', run) print('fit ', pso.best.fitness) print('all fitness ', pso.values) np.savetxt("H841_params_" + run + ".txt", 10**pso.history, delimiter=",") np.savetxt("H841_fit_" + run + ".txt", pso.values, delimiter=",")
def run_example(): # Here we initial the class # We must proivde the cost function and a starting value optimizer = PSO(cost_function=obj_function, start=start_position, verbose=True) # We also must set bounds. This can be a single scalar or an array of len(start_position) optimizer.set_bounds(parameter_range=3) optimizer.set_speed(speed_min=-.5, speed_max=.5) optimizer.run(num_particles=25, num_iterations=100) if plot: display(start_position, optimizer.best) print("Original values {0}".format(log10_original_values**10)) print("Starting values {0}".format(start_position**10)) print("Best PSO values {0}".format(optimizer.best**10)) fig = plt.figure() fig.add_subplot(221) plt.scatter(log10_original_values[0], log10_original_values[1], marker='>', color='b', label='ideal') plt.scatter(start_position[0], start_position[1], marker='^', color='r', label='start') plt.scatter(optimizer.history[:, 0], optimizer.history[:, 1], c=optimizer.values, cmap=plt.cm.coolwarm) fig.add_subplot(223) plt.scatter(log10_original_values[0], log10_original_values[2], marker='>', color='b', label='ideal') plt.scatter(start_position[0], start_position[2], marker='^', color='r', label='start') plt.scatter(optimizer.history[:, 0], optimizer.history[:, 2], c=optimizer.values, cmap=plt.cm.coolwarm) fig.add_subplot(222) plt.scatter(log10_original_values[1], log10_original_values[2], marker='>', color='b', label='ideal') plt.scatter(start_position[1], start_position[2], marker='^', color='r', label='start') plt.scatter(optimizer.history[:, 1], optimizer.history[:, 2], c=optimizer.values, cmap=plt.cm.coolwarm) fig.add_subplot(224) plt.legend(loc=0) plt.colorbar() plt.tight_layout() plt.savefig('population.png')
log_original_values = np.log10(param_values[rate_mask]) if '__main__' == __name__: # We will use a best guess starting position for the model, start_position = log_original_values + \ np.random.uniform(-1, 1, size=len(log_original_values)) display(start_position, "Before optimization") plt.tight_layout() plt.savefig("fit_before_pso.png", bbox_inches='tight') logger.info("Saving pre-fit figure as fit_before_pso.png") # Here we initial the class # We must proivde the cost function and a starting value optimizer = PSO(start=start_position, verbose=True, shrink_steps=False) # We also must set bounds of the parameter space, and the speed PSO will # travel (max speed in either direction) optimizer.set_bounds(parameter_range=4) optimizer.set_speed(speed_min=-.05, speed_max=.05) # Now we run the pso algorithm optimizer.run(num_particles=50, num_iterations=500, num_processors=12, cost_function=obj_function, max_iter_no_improv=25) best_params = optimizer.best.pos display(best_params, "After optimization")
def test_no_bounds(): pso = PSO(start=[10, 0], cost_function=himmelblau, verbose=False) pso.run(num_iterations=100, num_particles=10)
def obj_function(params): params_tmp = np.copy(params) param_values[rate_mask] = 10 ** params_tmp # don't need to change result = solver1.run(param_values=param_values) ysim_array1 = result.observables['MLKLa_obs'][:] ysim_norm1 = normalize(ysim_array1) e1 = np.sum((ydata_norm - ysim_norm1) ** 2) return e1, def run_example(): print('run_example') best_pars = np.zeros((1000, len(model.parameters))) counter = 0 # Here we initial the class # We must proivde the cost function and a starting value for i in range(1000): optimizer = PSO(cost_function=obj_function,start = log10_original_values, verbose=True) # We also must set bounds. This can be a single scalar or an array of len(start_position) optimizer.set_bounds(parameter_range=2) optimizer.set_speed(speed_min=-.25, speed_max=.25) optimizer.run(num_particles=75, num_iterations=25) best_pars[i] = optimizer.best print(optimizer.best) # print(i, counter) np.save('optimizer_best_75_50_100TNF',best_pars) if '__main__' == __name__: run_example()
def test_missing_cost_function(): pso = PSO(start=[10, 0], verbose=False) pso.set_bounds(lower=[-100, -100], upper=[100, 100]) pso.run(num_iterations=100, num_particles=10)
# USER-Set: must appropriately update cost function! def cost(position): Y = np.copy(position) param_values[calibrate_mask] = 10**Y sim = solver.run(param_values=param_values).all logp_data = np.sum(like_data.logpdf(sim['observable'])) if np.isnan(logp_data): logp_data = np.inf return -logp_data, # Setup the particle swarm optimization run # Set the number of particles in the swarm. num_particles = 25 # Set the number of iterations for PSO run. num_iterations = 50 # Construct the optimizer pso = PSO(save_sampled=False, verbose=True, num_procs=1) pso.set_cost_function(cost) starting_position = swarm_param.centers() pso.set_start_position(starting_position) pso.set_bounds(lower=swarm_param.lower(), upper=swarm_param.upper()) # sets maximum speed that a particle can travel pso.set_speed(-.25, .25) # run it pso.run(num_particles, num_iterations, stop_threshold=1e-5) print("Best parameters: ", pso.best)