def main(filename): """Run fitting process: scale simulations and fit approximate models.""" # Set parameters and initial conditions _, old_params = utils.get_setup_params( parameters.COBB_PARAMS, scale_inf=False, host_props=parameters.COBB_PROP_FIG4A, extra_spread=False) setup, new_params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=False, host_props=parameters.COBB_PROP_FIG4A, extra_spread=False) # First find beta scaling that gives matching time scale using corrected model scaling_factor = fitting.scale_sim_model(setup, old_params, new_params, time_step=0.001) logging.info("Simulation scaling factor found: %f", scaling_factor) # Write scaling results to file results = {'sim_scaling_factor': scaling_factor} with open(filename + '.json', "w") as outfile: json.dump(results, outfile, indent=4) setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A, extra_spread=False) tanoak_factors, beta = fit_beta(setup, params) results['tanoak_beta_factors'] = tanoak_factors.tolist() beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] for i, name in enumerate(beta_names): results[name] = beta[i] logging.info("%s: %f", name, beta[i]) with open(filename + '.json', "w") as outfile: json.dump(results, outfile, indent=4) roguing_factor = scale_control() results['roguing_factor'] = roguing_factor with open(filename + '.json', "w") as outfile: json.dump(results, outfile, indent=4)
def min_func(factor): """Function to minimise, SSE between healthy tanoak over range of rates.""" approx_tans = np.zeros_like(test_rates) setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A, extra_spread=False) params['max_budget'] = 1000 beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) for i, rate in enumerate(test_rates): params['rogue_rate'] = rate * factor approx_model = ms_approx.MixedStandApprox(setup, params, beta) approx_run = approx_model.run_policy(const_rogue_policy) approx_tans[i] = np.sum(approx_run[0][[6, 8, 9, 11], -1]) logging.info("Approx run, Factor %f, Rate: %f, tans: %f", factor, rate, approx_tans[i]) return np.sum(np.square(approx_tans - sim_tans))
def make_data(folder=None): """Scan over budgets to analyse change in OL control""" if folder is None: folder = os.path.join(os.path.realpath(__file__), '..', '..', 'data', 'budget_scan') with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A) logging.info("Parameters: %s", params) beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) approx_params = copy.deepcopy(params) approx_params['rogue_rate'] *= scale_and_fit_results['roguing_factor'] approx_params['rogue_cost'] /= scale_and_fit_results['roguing_factor'] approx_model = ms_approx.MixedStandApprox(setup, approx_params, beta) # budgets = np.array([8, 10, 12, 14, 16, 18, 20]) budgets = np.array([36]) for budget in budgets: logging.info("Budget: %f", budget) params['max_budget'] = budget approx_model.params['max_budget'] = budget approx_params['max_budget'] = budget _, control, _ = approx_model.optimise(n_stages=20, init_policy=even_policy) approx_model.save_optimisation( os.path.join(folder, "budget_" + str(int(budget)) + "_OL.pkl")) mpc_controller = mpc.Controller(setup, params, beta, approx_params=approx_params) mpc_controller.optimise(horizon=100, time_step=0.5, end_time=100, update_period=20, rolling_horz=False, stage_len=5, init_policy=control, use_init_first=True) mpc_controller.save_optimisation( os.path.join(folder, "budget_" + str(int(budget)) + "_MPC.pkl"))
def run_optimisations(datapath): """Run OL and MPC frameworks using newly parameterised roguing rate.""" with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) with open(os.path.join(datapath, "scaling_results.json"), "r") as infile: global_scaling = json.load(infile) setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A) beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) control_factor = global_scaling['roguing_factor'] approx_params = copy.deepcopy(params) approx_params['rogue_rate'] *= control_factor approx_params['rogue_cost'] /= control_factor approx_model = ms_approx.MixedStandApprox(setup, approx_params, beta) def even_policy(time): return np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) _ = approx_model.optimise(n_stages=20, init_policy=even_policy) approx_model.save_optimisation( os.path.join(datapath, "OL_GloballyScaledControl.pkl")) ol_control = interp1d(setup['times'][:-1], approx_model.optimisation['control'], kind="zero", fill_value="extrapolate") mpc_controller = mpc.Controller(setup, params, beta, approx_params=approx_params) _ = mpc_controller.optimise(horizon=100, time_step=0.5, end_time=100, update_period=20, rolling_horz=False, stage_len=5, init_policy=ol_control, use_init_first=True) mpc_controller.save_optimisation( os.path.join(datapath, "MPC_GloballyScaledControl.pkl"))
def run_scan(filename): """Scan over scaling factors.""" factors = np.arange(0.5, 5.05, 0.05) setup, new_params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=False, host_props=parameters.COBB_PROP_FIG4A, extra_spread=False) setup['times'] = np.arange(0, 50, step=0.01) cross_over_times = [] for factor in factors: params = copy.deepcopy(new_params) params['inf_tanoak_tanoak'] *= factor params['inf_bay_to_bay'] *= factor params['inf_bay_to_tanoak'] *= factor params['inf_tanoak_to_bay'] *= factor model = ms_sim.MixedStandSimulator(setup, params) sim_run, *_ = model.run_policy(control_policy=None) logging.info("Done sim") cross_over_time = fitting._get_crossover_time(sim_run, model.ncells, setup['times']) cross_over_times.append(cross_over_time) logging.info("Factor: %f, cross-over time: %f", factor, cross_over_time) csv_file = filename + '_scan.csv' with open(csv_file, 'w', newline='') as csvfile: spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) spamwriter.writerow(['ScalingFactor', 'CrossOverTime']) for i, factor in enumerate(factors): spamwriter.writerow([factor, cross_over_times[i]])
def run_scan_control(filename): """Scan over control scaling factor.""" factors = np.arange(0.5, 1.51, 0.01) test_rates = np.linspace(0, 1.0, 51) diff_results = np.zeros_like(factors) # Run simulations for range of roguing rates, with constant control rates setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A, extra_spread=False) params['max_budget'] = 1000 with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) sim_tans = np.zeros_like(test_rates) ncells = np.product(setup['landscape_dims']) for i, rate in enumerate(test_rates): params['rogue_rate'] = rate model = ms_sim.MixedStandSimulator(setup, params) sim_run = model.run_policy(const_rogue_policy) sim_state = np.sum(sim_run[0].reshape( (ncells, 15, -1)), axis=0) / ncells sim_tans[i] = np.sum(sim_state[[6, 8, 9, 11], -1]) logging.info("Sim run, rate: %f, healthy tans: %f", rate, sim_tans[i]) def min_func(factor): """Function to minimise, SSE between healthy tanoak over range of rates.""" approx_tans = np.zeros_like(test_rates) setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A, extra_spread=False) params['max_budget'] = 1000 beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) for i, rate in enumerate(test_rates): params['rogue_rate'] = rate * factor approx_model = ms_approx.MixedStandApprox(setup, params, beta) approx_run = approx_model.run_policy(const_rogue_policy) approx_tans[i] = np.sum(approx_run[0][[6, 8, 9, 11], -1]) logging.info("Approx run, Factor %f, Rate: %f, tans: %f", factor, rate, approx_tans[i]) return np.sum(np.square(approx_tans - sim_tans)) for i, factor in enumerate(factors): diff = min_func(factor) diff_results[i] = diff csv_file = filename + '_scan_control.csv' with open(csv_file, 'w', newline='') as csvfile: spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) spamwriter.writerow(['ControlScalingFactor', 'SSE']) for i, factor in enumerate(factors): spamwriter.writerow([factor, diff_results[i]])
def make_plots(data_folder=None, fig_folder=None): """Generate plot of budget scan analysis""" if data_folder is None: data_folder = os.path.join(os.path.realpath(__file__), '..', '..', 'data', 'budget_scan') if fig_folder is None: fig_folder = os.path.join(os.path.realpath(__file__), '..', '..', 'figures', 'budget_scan') budgets = np.array([8, 10, 12, 14, 16, 18, 20]) n_budgets = len(budgets) with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A) model = ms_sim.MixedStandSimulator(setup, params) beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) approx_params = copy.deepcopy(params) approx_params['rogue_rate'] *= scale_and_fit_results['roguing_factor'] approx_params['rogue_cost'] /= scale_and_fit_results['roguing_factor'] approx_model = ms_approx.MixedStandApprox(setup, approx_params, beta) # Collect control proportions order = np.array([3, 4, 5, 6, 0, 1, 2, 7, 8]) control_abs_ol = np.zeros((n_budgets, 9)) control_abs_mpc = np.zeros((n_budgets, 9)) real_objectives_ol = np.zeros(n_budgets) real_objectives_mpc = np.zeros(n_budgets) approx_objectives_ol = np.zeros(n_budgets) approx_objectives_mpc = np.zeros(n_budgets) for i, budget in enumerate(budgets): logging.info("Budget: %f", budget) approx_model.load_optimisation( os.path.join(data_folder, "budget_" + str(int(budget)) + "_OL.pkl")) control = approx_model.optimisation['control'] control_policy = interp1d(setup['times'][:-1], control, kind="zero", fill_value="extrapolate") model.run_policy(control_policy) approx_model.run_policy(control_policy) control[0:3] *= params['rogue_rate'] * params['rogue_cost'] * 0.5 control[3:7] *= params['thin_rate'] * params['thin_cost'] * 0.5 control[7:] *= params['protect_rate'] * params['protect_cost'] * 0.5 control[0] *= params['rel_small_cost'] control[3] *= params['rel_small_cost'] control_abs_ol[i] = (np.sum(control, axis=1))[order] / 100 real_objectives_ol[i] = model.run['objective'] approx_objectives_ol[i] = approx_model.run['objective'] mpc_controller = mpc.Controller.load_optimisation( os.path.join("data", "budget_scan", "budget_" + str(int(budget)) + "_MPC.pkl")) sim_run, approx_run = mpc_controller.run_control() approx_model.run['state'] = approx_run[0] approx_model.run['objective'] = approx_run[1] model.run['state'] = sim_run[0] model.run['objective'] = sim_run[1] control = mpc_controller.control control[0:3] *= params['rogue_rate'] * params['rogue_cost'] * 0.5 control[3:7] *= params['thin_rate'] * params['thin_cost'] * 0.5 control[7:] *= params['protect_rate'] * params['protect_cost'] * 0.5 control[0] *= params['rel_small_cost'] control[3] *= params['rel_small_cost'] control_abs_mpc[i] = (np.sum(control, axis=1))[order] / 100 real_objectives_mpc[i] = model.run['objective'] approx_objectives_mpc[i] = approx_model.run['objective'] # Make plot labels = [ "Thin Tan (Small)", "Thin Tan (Large)", "Thin Bay", "Thin Red", "Rogue Tan (Small)", "Rogue Tan (Large)", "Rogue Bay", "Protect Tan (Small)", "Protect Tan (Large)" ] colors = visualisation.CONTROL_COLOURS # Open-loop plot fig = plt.figure() ax = fig.add_subplot(111) ax2 = ax.twinx() color = 'tab:red' ax2.set_ylabel('Objective', color=color) ax2.plot(budgets, -1 * approx_objectives_ol, '--', color=color, label='OL approximate model objective') ax2.tick_params(axis='y', labelcolor=color) bars = [] for i in range(9): b = ax.bar(budgets - 10, control_abs_ol[:, i], bottom=np.sum(control_abs_ol[:, :i], axis=1), color=colors[i], width=15, alpha=0.75) bars.append(b) ax2.legend(ncol=1) ax.set_xlabel("Budget") ax.set_ylabel("Expenditure") color = 'tab:red' ax2.set_ylabel('Objective', color=color) ax2.plot(budgets, -1 * real_objectives_mpc, '-', color=color, label='MPC simulation objective') ax2.tick_params(axis='y', labelcolor=color) bars = [] for i in range(9): b = ax.bar(budgets + 10, control_abs_mpc[:, i], bottom=np.sum(control_abs_mpc[:, :i], axis=1), color=colors[i], width=15, alpha=0.75) bars.append(b) ax.legend(bars, labels, bbox_to_anchor=(0.5, -0.15), loc="upper center", ncol=3, frameon=True) ax2.legend(ncol=1, loc='upper left') ax.set_xlabel("Budget") ax.set_ylabel("Expenditure") ax.set_title('Left: OL, right: MPC', fontsize=8) ax.set_ylim([0, 900]) ax2.set_ylim([0.0, 1.8]) ax.set_xticks([0, 50, 100, 200, 300, 400, 500, 600, 700]) fig.tight_layout() fig.savefig(os.path.join(fig_folder, "BudgetScan.pdf"), dpi=300)
def main(n_reps=10, sigma=0.25, append=False, folder_name='parameter_sensitivity', run_mpc=False): """Run sensitivity tests.""" os.makedirs(os.path.join('data', folder_name), exist_ok=True) # Analysis: # 1. First construct default parameters (corrected and scaled Cobb) setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A) mpc_args = { 'horizon': 100, 'time_step': 0.5, 'end_time': 100, 'update_period': 20, 'rolling_horz': False, 'stage_len': 5, 'init_policy': None, 'use_init_first': True } ncells = np.product(setup['landscape_dims']) # Baseline no control run model = ms_sim.MixedStandSimulator(setup, params) model.run_policy(control_policy=None, n_fixed_steps=None) with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) if not append: model.save_run( os.path.join("data", folder_name, "no_control_baseline.pkl")) beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) approx_params = copy.deepcopy(params) approx_params['rogue_rate'] *= scale_and_fit_results['roguing_factor'] approx_params['rogue_cost'] /= scale_and_fit_results['roguing_factor'] approx_model = ms_approx.MixedStandApprox(setup, approx_params, beta) logging.info("Running baseline OL control") _, baseline_ol_control_policy, exit_text = approx_model.optimise( n_stages=20, init_policy=even_policy) approx_model.save_optimisation( os.path.join("data", folder_name, "ol_control_baseline.pkl")) if run_mpc: logging.info("Running baseline MPC control") mpc_args['init_policy'] = baseline_ol_control_policy mpc_controller = mpc.Controller(setup, params, beta, approx_params=approx_params) mpc_controller.optimise(**mpc_args) mpc_controller.save_optimisation( os.path.join("data", folder_name, "mpc_control_baseline.pkl")) # Which parameters to perturb: # First single numbers that can be perturbed perturbing_params_numbers = [ 'inf_bay_to_bay', 'inf_bay_to_tanoak', 'inf_tanoak_to_bay', 'nat_mort_bay', 'nat_mort_redwood', 'recov_tanoak', 'recov_bay', 'resprout_tanoak' ] # And lists of parameters perturbing_params_lists = [ 'inf_tanoak_tanoak', 'nat_mort_tanoak', 'inf_mort_tanoak', 'trans_tanoak', 'recruit_tanoak' ] if append: logging.info("Loading previous dataset to append new data to") # Read in summary data already generated with open(os.path.join("data", folder_name, "summary.json"), "r") as infile: summary_results = json.load(infile) approx_model = ms_approx.MixedStandApprox.load_optimisation_class( os.path.join("data", folder_name, "ol_control_baseline.pkl")) baseline_ol_control_policy = interp1d( approx_model.setup['times'][:-1], approx_model.optimisation['control'], kind="zero", fill_value="extrapolate") n_reps = (len(summary_results), len(summary_results) + n_reps) ol_alloc_results = np.load( os.path.join("data", folder_name, "ol_alloc_results.npy")) mpc_alloc_results = np.load( os.path.join("data", folder_name, "mpc_alloc_results.npy")) else: # Otherwise start afresh summary_results = [] ol_alloc_results = np.zeros((0, 9, len(setup['times']) - 1)) mpc_alloc_results = np.zeros((0, 9, len(setup['times']) - 1)) n_reps = (0, n_reps) error_dist = truncnorm(-1.0 / sigma, np.inf, loc=1.0, scale=sigma) for i in range(*n_reps): # 2. Perturb these parameters using Normal distribution, sigma 25% logging.info("Perturbing parameter set %d of %d with sigma %f", i + 1, n_reps[1], sigma) new_params = copy.deepcopy(params) for param_key in perturbing_params_numbers: new_params[param_key] = new_params[param_key] * error_dist.rvs() for param_key in perturbing_params_lists: new_params[param_key] = ( new_params[param_key] * error_dist.rvs(size=len(new_params[param_key]))) # Set space weights and recruitment rates to NaN so can be recaluclated for dyn equilibrium new_params['recruit_bay'] = np.nan new_params['recruit_redwood'] = np.nan new_params['space_tanoak'] = np.full(4, np.nan) # 3. Recalculate space weights & recruitment rates to give dynamic equilibrium new_params, _ = utils.initialise_params( new_params, host_props=parameters.COBB_PROP_FIG4A) # 4. Run simulation model with no control policy model = ms_sim.MixedStandSimulator(setup, new_params) model.run_policy(control_policy=None, n_fixed_steps=None) model.save_run( os.path.join("data", folder_name, "no_control_{}.pkl".format(i))) # 5. Fit approximate model _, beta = scale_and_fit.fit_beta(setup, new_params) approx_new_params = copy.deepcopy(params) approx_new_params['rogue_rate'] *= scale_and_fit_results[ 'roguing_factor'] approx_new_params['rogue_cost'] /= scale_and_fit_results[ 'roguing_factor'] # 6. Optimise control (open-loop) approx_model = ms_approx.MixedStandApprox(setup, approx_new_params, beta) *_, exit_text = approx_model.optimise( n_stages=20, init_policy=baseline_ol_control_policy) if exit_text not in [ "Optimal Solution Found.", "Solved To Acceptable Level." ]: logging.warning( "Failed optimisation. Trying intialisation from previous solution." ) filename = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', 'mixed_stand_model', "BOCOP", "problem.def") with open(filename, "r") as infile: all_lines = infile.readlines() all_lines[31] = "# " + all_lines[31] all_lines[32] = "# " + all_lines[32] all_lines[33] = all_lines[33][2:] all_lines[34] = all_lines[34][2:] with ms_approx._try_file_open(filename) as outfile: outfile.writelines(all_lines) *_, exit_text = approx_model.optimise( n_stages=20, init_policy=baseline_ol_control_policy) all_lines[31] = all_lines[31][2:] all_lines[32] = all_lines[32][2:] all_lines[33] = "# " + all_lines[33] all_lines[34] = "# " + all_lines[34] with ms_approx._try_file_open(filename) as outfile: outfile.writelines(all_lines) if exit_text not in [ "Optimal Solution Found.", "Solved To Acceptable Level." ]: logging.error( "Failed optimisation. Falling back to init policy.") approx_model.save_optimisation( os.path.join("data", folder_name, "ol_control_{}.pkl".format(i))) # Run OL control to get objective ol_control_policy = interp1d(setup['times'][:-1], approx_model.optimisation['control'], kind="zero", fill_value="extrapolate") sim_run = model.run_policy(ol_control_policy) ol_obj = model.run['objective'] sim_state = np.sum(np.reshape(sim_run[0], (ncells, 15, -1)), axis=0) / ncells allocation = (np.array([ sim_state[1] + sim_state[4], sim_state[7] + sim_state[10], sim_state[13], np.sum(sim_state[0:6], axis=0), np.sum(sim_state[6:12], axis=0), sim_state[12] + sim_state[13], sim_state[14], sim_state[0] + sim_state[3], sim_state[6] + sim_state[9] ])[:, :-1] * approx_model.optimisation['control']) allocation[0:3] *= params['rogue_rate'] * params['rogue_cost'] allocation[3:7] *= params['thin_rate'] * params['thin_cost'] allocation[7:] *= params['protect_rate'] * params['protect_cost'] allocation[0] *= params['rel_small_cost'] allocation[3] *= params['rel_small_cost'] expense = utils.control_expenditure( approx_model.optimisation['control'], new_params, sim_state[:, :-1]) for j in range(len(setup['times']) - 1): if expense[j] > new_params['max_budget']: allocation[:, j] *= new_params['max_budget'] / expense[j] ol_alloc_results = np.concatenate([ol_alloc_results, [allocation]], axis=0) if run_mpc: mpc_args['init_policy'] = ol_control_policy # Optimise control (MPC) mpc_controller = mpc.Controller(setup, new_params, beta, approx_params=approx_new_params) *_, mpc_obj = mpc_controller.optimise(**mpc_args) mpc_controller.save_optimisation( os.path.join("data", folder_name, "mpc_control_{}.pkl".format(i))) sim_run, _ = mpc_controller.run_control() sim_state = np.sum(np.reshape(sim_run[0], (ncells, 15, -1)), axis=0) / ncells allocation = (np.array([ sim_state[1] + sim_state[4], sim_state[7] + sim_state[10], sim_state[13], np.sum(sim_state[0:6], axis=0), np.sum(sim_state[6:12], axis=0), sim_state[12] + sim_state[13], sim_state[14], sim_state[0] + sim_state[3], sim_state[6] + sim_state[9] ])[:, :-1] * mpc_controller.control) allocation[0:3] *= params['rogue_rate'] * params['rogue_cost'] allocation[3:7] *= params['thin_rate'] * params['thin_cost'] allocation[7:] *= params['protect_rate'] * params['protect_cost'] allocation[0] *= params['rel_small_cost'] allocation[3] *= params['rel_small_cost'] expense = utils.control_expenditure(mpc_controller.control, new_params, sim_state[:, :-1]) for j in range(len(setup['times']) - 1): if expense[j] > new_params['max_budget']: allocation[:, j] *= new_params['max_budget'] / expense[j] mpc_alloc_results = np.concatenate( [mpc_alloc_results, [allocation]], axis=0) list_keys = [ 'inf_tanoak_tanoak', 'nat_mort_tanoak', 'inf_mort_tanoak', 'trans_tanoak', 'recruit_tanoak', 'space_tanoak' ] for key in list_keys: new_params[key] = new_params[key].tolist() summary_results.append({ 'iteration': i, 'params': new_params, 'beta': beta.tolist(), 'ol_objective': ol_obj, 'mpc_objective': mpc_obj }) # Write summary results to file with open(os.path.join("data", folder_name, "summary.json"), "w") as outfile: json.dump(summary_results, outfile, indent=4) # Save control allocations to file np.save(os.path.join("data", folder_name, "ol_alloc_results.npy"), ol_alloc_results) np.save(os.path.join("data", folder_name, "mpc_alloc_results.npy"), mpc_alloc_results)
def make_data(n_reps=10, folder=None, append=False): """Generate data analysing effect of sampling effort.""" if folder is None: folder = os.path.join(os.path.realpath(__file__), '..', '..', 'data', 'obs_uncert') setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A) ncells = np.product(setup['landscape_dims']) # Use population size of 500 as 500m2 per cell pop_size = 500 sampling_nums = np.array( [1, 2, 3, 5, 7, 10, 15, 25, 35, 50, 70, 100, 150, 250, 350, 500]) with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) approx_params = copy.deepcopy(params) approx_params['rogue_rate'] *= scale_and_fit_results['roguing_factor'] approx_params['rogue_cost'] /= scale_and_fit_results['roguing_factor'] mpc_args = { 'horizon': 100, 'time_step': 0.5, 'end_time': 100, 'update_period': 20, 'rolling_horz': False, 'stage_len': 5, 'use_init_first': True } approx_model = ms_approx.MixedStandApprox(setup, approx_params, beta) _, baseline_ol_control, _ = approx_model.optimise(n_stages=20, init_policy=even_policy) mpc_args['init_policy'] = baseline_ol_control for n_samples in sampling_nums: logging.info("Running with %d/%d stems sampled, %f%%", n_samples, pop_size, 100 * n_samples / pop_size) observer = observer_factory(pop_size, n_samples) mpc_controls = np.zeros((n_reps, 9, len(setup['times']) - 1)) mpc_objs = np.zeros(n_reps) # For storing observed states: mpc_approx_states = np.zeros( (n_reps, 4, 15)) # 4 as 4 update steps excluding the start mpc_actual_states = np.zeros((n_reps, 4, 15)) # MPC runs - approximate model initialised correctly, & then observed at update steps for i in range(n_reps): mpc_controller = mpc.Controller(setup, params, beta, approx_params=approx_params) _, _, mpc_control, mpc_obj = mpc_controller.optimise( **mpc_args, observer=observer) mpc_objs[i] = mpc_obj mpc_controls[i] = mpc_control mpc_approx_states[i] = mpc_controller.approx_update_states sim_run, approx_run = mpc_controller.run_control() sim_state = np.sum(np.reshape(sim_run[0], (ncells, 15, -1)), axis=0) / ncells mpc_actual_states[i] = sim_state[:, [40, 80, 120, 160]].T logging.info("MPC run %d of %d done", i + 1, n_reps) # Append to existing data if append: old_filename = os.path.join( folder, "sampled_data_" + str(n_samples) + ".npz") with np.load(old_filename) as old_dataset: mpc_controls = np.append(old_dataset['mpc_controls'], mpc_controls, axis=0) mpc_objs = np.append(old_dataset['mpc_objs'], mpc_objs, axis=0) mpc_approx_states = np.append(old_dataset['mpc_approx_states'], mpc_approx_states, axis=0) # Store data dataset = { 'mpc_controls': mpc_controls, 'mpc_objs': mpc_objs, 'mpc_approx_states': mpc_approx_states, 'mpc_actual_states': mpc_actual_states } np.savez_compressed( os.path.join(folder, "sampled_data_" + str(n_samples)), **dataset)
def run_all(n_ens=10, n_opt=10, folder=None, append=False): """Run analysis for all error standard deviation values.""" if folder is None: folder = os.path.join(os.path.realpath(__file__), '..', '..', 'data', 'param_uncert') error_std_devs = np.array( [0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4]) for std_dev in error_std_devs: logging.info("Starting analysis with %f standard deviation", std_dev) # Initial conditions used in 2012 paper setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A) mpc_args = { 'horizon': 100, 'time_step': 0.5, 'end_time': 100, 'update_period': 20, 'rolling_horz': False, 'stage_len': 5, 'init_policy': even_policy, 'use_init_first': True } if append: logging.info("Reading in existing fit ensemble.") ensemble_and_fit = {} filename = os.path.join( folder, "fitting_ensemble_data_" + str(std_dev) + ".npz") with np.load(filename) as data: for key in data.keys(): ensemble_and_fit[key] = data[key] else: ensemble_and_fit = generate_ensemble_and_fit(n_ens, std_dev) np.savez_compressed( os.path.join(folder, "fitting_ensemble_data_" + str(std_dev)), **ensemble_and_fit) if append: logging.info("Appending to existing dataset.") if std_dev == 0.0: logging.info("Std dev=0.0 - no repeats to run.") continue full_optimisations = {} filename = os.path.join( folder, "optimisation_data_" + str(std_dev) + ".npz") with np.load(filename) as data: for key in data.keys(): full_optimisations[key] = data[key] ol_pol = interp1d(setup['times'][:-1], full_optimisations['ol_control'], kind="zero", fill_value="extrapolate") optimisations = run_optimisations(ensemble_and_fit, params, setup, n_opt, std_dev, mpc_args, ol_pol=ol_pol) for key in ['params', 'ol_objs', 'mpc_control', 'mpc_objs']: full_optimisations[key] = np.append(full_optimisations[key], optimisations[key], axis=0) np.savez_compressed( os.path.join(folder, "optimisation_data_" + str(std_dev)), **full_optimisations) else: optimisations = run_optimisations(ensemble_and_fit, params, setup, n_opt, std_dev, mpc_args) np.savez_compressed( os.path.join(folder, "optimisation_data_" + str(std_dev)), **optimisations) logging.info("Completed analysis with %f standard deviation", std_dev)
def make_data(folder=None): """Scan over budgets to analyse change in OL control""" if folder is None: folder = os.path.join(os.path.realpath(__file__), '..', '..', 'data', 'div_cost_scan') with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A) logging.info("Parameters: %s", params) beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) approx_params = copy.deepcopy(params) approx_params['rogue_rate'] *= scale_and_fit_results['roguing_factor'] approx_params['rogue_cost'] /= scale_and_fit_results['roguing_factor'] approx_model = ms_approx.MixedStandApprox(setup, approx_params, beta) div_prop = np.array( [0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0]) div_costs = div_prop / (setup['times'][-1] * np.log(3)) for div_cost, prop in zip(div_costs, div_prop): logging.info("Diversity cost: %f", div_cost) params['div_cost'] = div_cost approx_params['div_cost'] = div_cost approx_model.params['div_cost'] = div_cost _, control, exit_text = approx_model.optimise(n_stages=20, init_policy=even_policy) if exit_text not in [ "Optimal Solution Found.", "Solved To Acceptable Level." ]: logging.warning( "Failed optimisation. Trying intialisation from previous solution." ) filename = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', 'mixed_stand_model', "BOCOP", "problem.def") with open(filename, "r") as infile: all_lines = infile.readlines() all_lines[31] = "# " + all_lines[31] all_lines[32] = "# " + all_lines[32] all_lines[33] = all_lines[33][2:] all_lines[34] = all_lines[34][2:] with ms_approx._try_file_open(filename) as outfile: outfile.writelines(all_lines) _, control, exit_text = approx_model.optimise( n_stages=20, init_policy=even_policy) all_lines[31] = all_lines[31][2:] all_lines[32] = all_lines[32][2:] all_lines[33] = "# " + all_lines[33] all_lines[34] = "# " + all_lines[34] with ms_approx._try_file_open(filename) as outfile: outfile.writelines(all_lines) if exit_text not in [ "Optimal Solution Found.", "Solved To Acceptable Level." ]: logging.error("Failed optimisation in OL optimisation.") approx_model.save_optimisation( os.path.join(folder, "div_cost_" + str(prop) + "_OL.pkl")) mpc_controller = mpc.Controller(setup, params, beta, approx_params=approx_params) mpc_controller.optimise(horizon=100, time_step=0.5, end_time=100, update_period=20, rolling_horz=False, stage_len=5, init_policy=control, use_init_first=True) mpc_controller.save_optimisation( os.path.join(folder, "div_cost_" + str(prop) + "_MPC.pkl"))
def make_plots(data_folder=None, fig_folder=None): """Generate plot of diversity cost scan.""" if data_folder is None: data_folder = os.path.join(os.path.realpath(__file__), '..', '..', 'data', 'div_cost_scan') if fig_folder is None: fig_folder = os.path.join(os.path.realpath(__file__), '..', '..', 'figures', 'div_cost_scan') div_props = np.array( [0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0]) n_costs = len(div_props) with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A) beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) approx_model = ms_approx.MixedStandApprox(setup, params, beta) model = ms_sim.MixedStandSimulator(setup, params) ncells = np.product(setup['landscape_dims']) # Collect control proportions order = np.array([3, 4, 5, 6, 0, 1, 2, 7, 8]) control_abs_ol = np.zeros((n_costs, 9)) control_abs_mpc = np.zeros((n_costs, 9)) objectives_ol = np.zeros(n_costs) objectives_mpc = np.zeros(n_costs) for i, div_prop in enumerate(div_props): div_cost = div_prop / (setup['times'][-1] * np.log(3)) logging.info("Diversity cost: %f", div_cost) approx_model.load_optimisation( os.path.join(data_folder, "div_cost_" + str(div_prop) + "_OL.pkl")) control = approx_model.optimisation['control'] control_policy = interp1d(setup['times'][:-1], control, kind="zero", fill_value="extrapolate") sim_run, obj, objs = model.run_policy(control_policy) sim_state = np.sum(np.reshape(sim_run, (ncells, 15, -1)), axis=0) / ncells allocation = np.array([ sim_state[1] + sim_state[4], sim_state[7] + sim_state[10], sim_state[13], np.sum(sim_state[0:6], axis=0), np.sum(sim_state[6:12], axis=0), sim_state[12] + sim_state[13], sim_state[14], sim_state[0] + sim_state[3], sim_state[6] + sim_state[9] ])[:, :-1] * control allocation[0:3] *= params['rogue_rate'] * params['rogue_cost'] allocation[3:7] *= params['thin_rate'] * params['thin_cost'] allocation[7:] *= params['protect_rate'] * params['protect_cost'] allocation[0] *= params['rel_small_cost'] allocation[3] *= params['rel_small_cost'] expense = utils.control_expenditure(control, params, sim_state[:, :-1]) for j in range(len(setup['times']) - 1): if expense[j] > params['max_budget']: allocation[:, j] *= params['max_budget'] / expense[j] control_abs_ol[i] = (np.sum(allocation, axis=1))[order] / 200 objectives_ol[i] = -1 * (obj - objs[-1]) mpc_controller = mpc.Controller.load_optimisation( os.path.join(data_folder, "div_cost_" + str(div_prop) + "_MPC.pkl")) sim_run, approx_run = mpc_controller.run_control() control = mpc_controller.control sim_state = np.sum(np.reshape(sim_run[0], (ncells, 15, -1)), axis=0) / ncells allocation = np.array([ sim_state[1] + sim_state[4], sim_state[7] + sim_state[10], sim_state[13], np.sum(sim_state[0:6], axis=0), np.sum(sim_state[6:12], axis=0), sim_state[12] + sim_state[13], sim_state[14], sim_state[0] + sim_state[3], sim_state[6] + sim_state[9] ])[:, :-1] * control allocation[0:3] *= params['rogue_rate'] * params['rogue_cost'] allocation[3:7] *= params['thin_rate'] * params['thin_cost'] allocation[7:] *= params['protect_rate'] * params['protect_cost'] allocation[0] *= params['rel_small_cost'] allocation[3] *= params['rel_small_cost'] expense = utils.control_expenditure(control, params, sim_state[:, :-1]) for j in range(len(setup['times']) - 1): if expense[j] > params['max_budget']: allocation[:, j] *= params['max_budget'] / expense[j] control_abs_mpc[i] = (np.sum(allocation, axis=1))[order] / 200 objectives_mpc[i] = -1 * (sim_run[1] - sim_run[2][-1]) # Make plot labels = [ "Thin Tan (Small)", "Thin Tan (Large)", "Thin Bay", "Thin Red", "Rogue Tan (Small)", "Rogue Tan (Large)", "Rogue Bay", "Protect Tan (Small)", "Protect Tan (Large)" ] colors = visualisation.CONTROL_COLOURS fig = plt.figure() gs = gridspec.GridSpec(3, 2, width_ratios=[2, 1], height_ratios=[0.2, 3, 1], top=0.95, left=0.1, wspace=0.3, bottom=0.3) ax = fig.add_subplot(gs[:, 0]) ax2 = fig.add_subplot(gs[1, 1]) color = 'tab:red' ax2.set_ylabel('Large healthy tanoak') ax2.plot(div_props, objectives_ol, '--', color=color, label='OL tanoak objective') bars = [] for i in range(9): b = ax.bar(div_props - 0.015, control_abs_ol[:, i], bottom=np.sum(control_abs_ol[:, :i], axis=1), color=colors[i], width=0.015, alpha=0.75) bars.append(b) ax2.plot(div_props, objectives_mpc, '-', color=color, label='MPC tanoak objective') bars = [] for i in range(9): b = ax.bar(div_props + 0.015, control_abs_mpc[:, i], bottom=np.sum(control_abs_mpc[:, :i], axis=1), color=colors[i], width=0.015, alpha=0.75) bars.append(b) ax.legend(bars, labels, bbox_to_anchor=(-0.1, -0.15), loc="upper left", ncol=3, frameon=True) ax2.legend(bbox_to_anchor=(0.5, -0.3), loc="upper center", ncol=1, frameon=True, fontsize=8) ax.set_xlabel("Diversity benefit") ax.set_ylabel("Expenditure") ax2.set_xlabel("Diversity benefit") x_ticks = np.array([0.0, 0.25, 0.5, 0.75, 1.0]) offset = 0.025 ax.set_xticks(x_ticks) ax2.set_xticks(x_ticks) ax.tick_params(axis='x', direction='out', pad=7, length=5, color='darkgray') ax2.tick_params(axis='x', direction='out', pad=7, length=5, color='darkgray') data_to_axis = ax.transData + ax.transAxes.inverted() for x_tick in x_ticks: ax.text(data_to_axis.transform((x_tick - offset, 0))[0], -0.025, 'OL', ha='center', transform=ax.transAxes, fontsize=5) ax.text(data_to_axis.transform((x_tick + (offset / 2), 0))[0], -0.025, 'MPC', ha='left', transform=ax.transAxes, fontsize=5) ax.set_ylim([0, 15]) fig.text(0.01, 0.95, "(a)", transform=fig.transFigure, fontsize=11, fontweight="semibold") fig.text(0.57, 0.95, "(b)", transform=fig.transFigure, fontsize=11, fontweight="semibold") fig.savefig(os.path.join(fig_folder, "DivCostScan.pdf"), dpi=300)
def run_scan(datapath): """Scan over control scaling factor.""" factors = np.arange(0.05, 1.11, 0.01) diff_results = np.zeros_like(factors) # Run simulations using open-loop control policy setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A, extra_spread=True) approx_model = ms_approx.MixedStandApprox.load_optimisation_class( os.path.join('data', 'ol_mpc_control', 'ol_control.pkl')) ol_control = interp1d(setup['times'][:-1], approx_model.optimisation['control'], kind="zero", fill_value="extrapolate") with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) ncells = np.product(setup['landscape_dims']) model = ms_sim.MixedStandSimulator(setup, params) sim_run = model.run_policy(ol_control) sim_state = np.sum(sim_run[0].reshape((ncells, 15, -1)), axis=0) / ncells sim_tans = np.sum(sim_state[[6, 8, 9, 11], -1]) logging.info("Sim run, healthy tans: %f", sim_tans) beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) approx_model = ms_approx.MixedStandApprox(setup, params, beta) def min_func(factor): """Function to minimise, SSE between healthy tanoak using OL control.""" approx_model.params['rogue_rate'] = factor * params['rogue_rate'] approx_run = approx_model.run_policy(ol_control) approx_tans = np.sum(approx_run[0][[6, 8, 9, 11], -1]) logging.info("Approx run, Factor %f, tans: %f", factor, approx_tans) return np.sum(np.square(approx_tans - sim_tans)) for i, factor in enumerate(factors): diff = min_func(factor) diff_results[i] = diff csv_file = os.path.join(datapath, 'scan_control.csv') with open(csv_file, 'w', newline='') as csvfile: spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) spamwriter.writerow(['ControlScalingFactor', 'SSE']) for i, factor in enumerate(factors): spamwriter.writerow([factor, diff_results[i]])
def scale_control(datapath): """Parameterise roguing control in approximate model to match simulations.""" # First run simulation using open-loop policy setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A, extra_spread=True) with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) approx_model = ms_approx.MixedStandApprox.load_optimisation_class( os.path.join('data', 'ol_mpc_control', 'ol_control.pkl')) ol_control = interp1d(setup['times'][:-1], approx_model.optimisation['control'], kind="zero", fill_value="extrapolate") ncells = np.product(setup['landscape_dims']) model = ms_sim.MixedStandSimulator(setup, params) sim_run = model.run_policy(ol_control) sim_state = np.sum(sim_run[0].reshape((ncells, 15, -1)), axis=0) / ncells sim_tans = np.sum(sim_state[[6, 8, 9, 11], -1]) logging.info("Sim run, healthy tans: %f", sim_tans) beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) approx_model = ms_approx.MixedStandApprox(setup, params, beta) def min_func(factor): """Function to minimise, SSE between healthy tanoak over range of rates.""" approx_model.params['rogue_rate'] = factor * params['rogue_rate'] approx_run = approx_model.run_policy(ol_control) approx_tans = np.sum(approx_run[0][[6, 8, 9, 11], -1]) logging.info("Approx run, Factor %f, tans: %f, rate: %f", factor, approx_tans, factor * params['rogue_rate']) return abs(approx_tans - sim_tans) ret = minimize(min_func, [0.25], bounds=[(0, 2)]) logging.info(ret) # Write scaling results to file results = {'roguing_factor': ret.x[0], 'tan_diff': ret.fun} with open(os.path.join(datapath, 'scaling_results.json'), "w") as outfile: json.dump(results, outfile, indent=4)
def generate_ensemble_and_fit(n_ensemble_runs, standard_dev): """Generate ensemble of simulation runs and fit approximate model.""" setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A, extra_spread=True) model = ms_sim.MixedStandSimulator(setup, params) ncells = np.product(setup['landscape_dims']) # Create parameter error distribution if standard_dev != 0.0: error_dist = truncnorm(-1.0 / standard_dev, np.inf, loc=1.0, scale=standard_dev) error_samples = np.reshape(error_dist.rvs(size=n_ensemble_runs * 7), (n_ensemble_runs, 7)) else: n_ensemble_runs = 1 error_samples = np.ones((n_ensemble_runs, 7)) # Sample from parameter distribution and run simulations baseline_beta = np.zeros(7) baseline_beta[:4] = params['inf_tanoak_tanoak'] baseline_beta[4] = params['inf_bay_to_tanoak'] baseline_beta[5] = params['inf_tanoak_to_bay'] baseline_beta[6] = params['inf_bay_to_bay'] parameter_samples = error_samples * baseline_beta logging.info("Generated ensemble parameters for fitting") simulation_runs = np.zeros((n_ensemble_runs, 15, len(setup['times']))) simulation_runs_no_cross_trans = np.zeros( (n_ensemble_runs, 15, len(setup['times']))) for i in range(n_ensemble_runs): model.params['inf_tanoak_tanoak'] = parameter_samples[i, 0:4] model.params['inf_bay_to_tanoak'] = parameter_samples[i, 4] model.params['inf_tanoak_to_bay'] = parameter_samples[i, 5] model.params['inf_bay_to_bay'] = parameter_samples[i, 6] sim_run, *_ = model.run_policy() simulation_runs[i] = np.sum(sim_run.reshape( (ncells, 15, -1)), axis=0) / ncells model.params['inf_bay_to_tanoak'] = 0.0 model.params['inf_tanoak_to_bay'] = 0.0 model.params['inf_bay_to_bay'] = 0.0 sim_run, *_ = model.run_policy() simulation_runs_no_cross_trans[i] = np.sum(sim_run.reshape( (ncells, 15, -1)), axis=0) / ncells logging.info("Run %d of %d done.", i + 1, n_ensemble_runs) ret_dict = { 'params': parameter_samples, 'sims': simulation_runs, 'sims_no_cross_trans': simulation_runs_no_cross_trans, 'fit': None } _, beta = scale_and_fit.fit_beta( setup, params, no_bay_dataset=simulation_runs_no_cross_trans, with_bay_dataset=simulation_runs) ret_dict['fit'] = beta return ret_dict
def scale_control(): """Parameterise roguing control in approximate model to match simulations.""" test_rates = np.linspace(0, 1.0, 51) # First run simulations for range of roguing rates, with constant control rates setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A, extra_spread=False) params['max_budget'] = 1000 with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) sim_tans = np.zeros_like(test_rates) ncells = np.product(setup['landscape_dims']) for i, rate in enumerate(test_rates): params['rogue_rate'] = rate model = ms_sim.MixedStandSimulator(setup, params) sim_run = model.run_policy(const_rogue_policy) sim_state = np.sum(sim_run[0].reshape( (ncells, 15, -1)), axis=0) / ncells sim_tans[i] = np.sum(sim_state[[6, 8, 9, 11], -1]) logging.info("Sim run, rate: %f, healthy tans: %f", rate, sim_tans[i]) def min_func(factor): """Function to minimise, SSE between healthy tanoak over range of rates.""" approx_tans = np.zeros_like(test_rates) setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A, extra_spread=False) params['max_budget'] = 1000 beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) for i, rate in enumerate(test_rates): params['rogue_rate'] = rate * factor approx_model = ms_approx.MixedStandApprox(setup, params, beta) approx_run = approx_model.run_policy(const_rogue_policy) approx_tans[i] = np.sum(approx_run[0][[6, 8, 9, 11], -1]) logging.info("Approx run, Factor %f, Rate: %f, tans: %f", factor, rate, approx_tans[i]) return np.sum(np.square(approx_tans - sim_tans)) ret = minimize(min_func, [1.0], bounds=[(0, 2)]) logging.info(ret) return ret.x[0]
def make_plots(data_folder=None, fig_folder=None): """Generate plots of observational uncertainty analysis.""" if data_folder is None: data_folder = os.path.join(os.path.realpath(__file__), '..', '..', 'data', 'obs_uncert') if fig_folder is None: fig_folder = os.path.join(os.path.realpath(__file__), '..', '..', 'figures', 'obs_uncert') setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A) sampling_effort = np.array([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 1.0]) pop_size = np.product(setup['landscape_dims']) * 561 * 20 / np.sum( setup['state_init']) sampling_nums = list(map(int, pop_size * sampling_effort))[:-1] x_data = [] mpc_objs = [] avg_objs = [] for n_samples, sample_prop in zip(sampling_nums, sampling_effort[:-1]): filename = os.path.join(data_folder, "sampled_data_" + str(n_samples) + ".npz") with np.load(filename) as dataset: mpc_objs = np.append(mpc_objs, dataset['mpc_objs'], axis=0) x_data.extend([sample_prop] * len(dataset['mpc_objs'])) avg_objs.append(np.mean(dataset['mpc_objs'])) mpc_controller = mpc.Controller.load_optimisation( os.path.join(data_folder, '..', "ol_mpc_control", "mpc_control_20.pkl")) mpc_sim_run, _ = mpc_controller.run_control() x_data.append(sampling_effort[-1]) mpc_objs = np.append(mpc_objs, [mpc_sim_run[1]], axis=0) avg_objs.append(mpc_sim_run[1]) approx_model = ms_approx.MixedStandApprox.load_optimisation_class( os.path.join(data_folder, '..', "ol_mpc_control", "ol_control.pkl")) ol_control_policy = interp1d(approx_model.setup['times'][:-1], approx_model.optimisation['control'], kind="zero", fill_value="extrapolate") sim_model = ms_sim.MixedStandSimulator(mpc_controller.setup, mpc_controller.params) ol_sim_run = sim_model.run_policy(ol_control_policy) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(sampling_effort, -1 * np.array(avg_objs), '-', label='MPC mean', color='C1', alpha=0.75) ax.plot(x_data, -mpc_objs, 'o', label='MPC', color='C1') ax.axhline(-1 * ol_sim_run[1], label='OL', linestyle='--', color='C0') ax.set_xlabel("Sampling effort") ax.set_ylabel("Objective") ax.legend() fig.savefig(os.path.join(fig_folder, "param_uncert.pdf"), dpi=600, bbox_inches='tight')