def make_data(folder=None): """Scan over budgets to analyse change in OL control""" if folder is None: folder = os.path.join(os.path.realpath(__file__), '..', '..', 'data', 'budget_scan') with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A) logging.info("Parameters: %s", params) beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) approx_params = copy.deepcopy(params) approx_params['rogue_rate'] *= scale_and_fit_results['roguing_factor'] approx_params['rogue_cost'] /= scale_and_fit_results['roguing_factor'] approx_model = ms_approx.MixedStandApprox(setup, approx_params, beta) # budgets = np.array([8, 10, 12, 14, 16, 18, 20]) budgets = np.array([36]) for budget in budgets: logging.info("Budget: %f", budget) params['max_budget'] = budget approx_model.params['max_budget'] = budget approx_params['max_budget'] = budget _, control, _ = approx_model.optimise(n_stages=20, init_policy=even_policy) approx_model.save_optimisation( os.path.join(folder, "budget_" + str(int(budget)) + "_OL.pkl")) mpc_controller = mpc.Controller(setup, params, beta, approx_params=approx_params) mpc_controller.optimise(horizon=100, time_step=0.5, end_time=100, update_period=20, rolling_horz=False, stage_len=5, init_policy=control, use_init_first=True) mpc_controller.save_optimisation( os.path.join(folder, "budget_" + str(int(budget)) + "_MPC.pkl"))
def run_optimisations(datapath): """Run OL and MPC frameworks using newly parameterised roguing rate.""" with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) with open(os.path.join(datapath, "scaling_results.json"), "r") as infile: global_scaling = json.load(infile) setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A) beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) control_factor = global_scaling['roguing_factor'] approx_params = copy.deepcopy(params) approx_params['rogue_rate'] *= control_factor approx_params['rogue_cost'] /= control_factor approx_model = ms_approx.MixedStandApprox(setup, approx_params, beta) def even_policy(time): return np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) _ = approx_model.optimise(n_stages=20, init_policy=even_policy) approx_model.save_optimisation( os.path.join(datapath, "OL_GloballyScaledControl.pkl")) ol_control = interp1d(setup['times'][:-1], approx_model.optimisation['control'], kind="zero", fill_value="extrapolate") mpc_controller = mpc.Controller(setup, params, beta, approx_params=approx_params) _ = mpc_controller.optimise(horizon=100, time_step=0.5, end_time=100, update_period=20, rolling_horz=False, stage_len=5, init_policy=ol_control, use_init_first=True) mpc_controller.save_optimisation( os.path.join(datapath, "MPC_GloballyScaledControl.pkl"))
def main(n_reps=10, sigma=0.25, append=False, folder_name='parameter_sensitivity', run_mpc=False): """Run sensitivity tests.""" os.makedirs(os.path.join('data', folder_name), exist_ok=True) # Analysis: # 1. First construct default parameters (corrected and scaled Cobb) setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A) mpc_args = { 'horizon': 100, 'time_step': 0.5, 'end_time': 100, 'update_period': 20, 'rolling_horz': False, 'stage_len': 5, 'init_policy': None, 'use_init_first': True } ncells = np.product(setup['landscape_dims']) # Baseline no control run model = ms_sim.MixedStandSimulator(setup, params) model.run_policy(control_policy=None, n_fixed_steps=None) with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) if not append: model.save_run( os.path.join("data", folder_name, "no_control_baseline.pkl")) beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) approx_params = copy.deepcopy(params) approx_params['rogue_rate'] *= scale_and_fit_results['roguing_factor'] approx_params['rogue_cost'] /= scale_and_fit_results['roguing_factor'] approx_model = ms_approx.MixedStandApprox(setup, approx_params, beta) logging.info("Running baseline OL control") _, baseline_ol_control_policy, exit_text = approx_model.optimise( n_stages=20, init_policy=even_policy) approx_model.save_optimisation( os.path.join("data", folder_name, "ol_control_baseline.pkl")) if run_mpc: logging.info("Running baseline MPC control") mpc_args['init_policy'] = baseline_ol_control_policy mpc_controller = mpc.Controller(setup, params, beta, approx_params=approx_params) mpc_controller.optimise(**mpc_args) mpc_controller.save_optimisation( os.path.join("data", folder_name, "mpc_control_baseline.pkl")) # Which parameters to perturb: # First single numbers that can be perturbed perturbing_params_numbers = [ 'inf_bay_to_bay', 'inf_bay_to_tanoak', 'inf_tanoak_to_bay', 'nat_mort_bay', 'nat_mort_redwood', 'recov_tanoak', 'recov_bay', 'resprout_tanoak' ] # And lists of parameters perturbing_params_lists = [ 'inf_tanoak_tanoak', 'nat_mort_tanoak', 'inf_mort_tanoak', 'trans_tanoak', 'recruit_tanoak' ] if append: logging.info("Loading previous dataset to append new data to") # Read in summary data already generated with open(os.path.join("data", folder_name, "summary.json"), "r") as infile: summary_results = json.load(infile) approx_model = ms_approx.MixedStandApprox.load_optimisation_class( os.path.join("data", folder_name, "ol_control_baseline.pkl")) baseline_ol_control_policy = interp1d( approx_model.setup['times'][:-1], approx_model.optimisation['control'], kind="zero", fill_value="extrapolate") n_reps = (len(summary_results), len(summary_results) + n_reps) ol_alloc_results = np.load( os.path.join("data", folder_name, "ol_alloc_results.npy")) mpc_alloc_results = np.load( os.path.join("data", folder_name, "mpc_alloc_results.npy")) else: # Otherwise start afresh summary_results = [] ol_alloc_results = np.zeros((0, 9, len(setup['times']) - 1)) mpc_alloc_results = np.zeros((0, 9, len(setup['times']) - 1)) n_reps = (0, n_reps) error_dist = truncnorm(-1.0 / sigma, np.inf, loc=1.0, scale=sigma) for i in range(*n_reps): # 2. Perturb these parameters using Normal distribution, sigma 25% logging.info("Perturbing parameter set %d of %d with sigma %f", i + 1, n_reps[1], sigma) new_params = copy.deepcopy(params) for param_key in perturbing_params_numbers: new_params[param_key] = new_params[param_key] * error_dist.rvs() for param_key in perturbing_params_lists: new_params[param_key] = ( new_params[param_key] * error_dist.rvs(size=len(new_params[param_key]))) # Set space weights and recruitment rates to NaN so can be recaluclated for dyn equilibrium new_params['recruit_bay'] = np.nan new_params['recruit_redwood'] = np.nan new_params['space_tanoak'] = np.full(4, np.nan) # 3. Recalculate space weights & recruitment rates to give dynamic equilibrium new_params, _ = utils.initialise_params( new_params, host_props=parameters.COBB_PROP_FIG4A) # 4. Run simulation model with no control policy model = ms_sim.MixedStandSimulator(setup, new_params) model.run_policy(control_policy=None, n_fixed_steps=None) model.save_run( os.path.join("data", folder_name, "no_control_{}.pkl".format(i))) # 5. Fit approximate model _, beta = scale_and_fit.fit_beta(setup, new_params) approx_new_params = copy.deepcopy(params) approx_new_params['rogue_rate'] *= scale_and_fit_results[ 'roguing_factor'] approx_new_params['rogue_cost'] /= scale_and_fit_results[ 'roguing_factor'] # 6. Optimise control (open-loop) approx_model = ms_approx.MixedStandApprox(setup, approx_new_params, beta) *_, exit_text = approx_model.optimise( n_stages=20, init_policy=baseline_ol_control_policy) if exit_text not in [ "Optimal Solution Found.", "Solved To Acceptable Level." ]: logging.warning( "Failed optimisation. Trying intialisation from previous solution." ) filename = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', 'mixed_stand_model', "BOCOP", "problem.def") with open(filename, "r") as infile: all_lines = infile.readlines() all_lines[31] = "# " + all_lines[31] all_lines[32] = "# " + all_lines[32] all_lines[33] = all_lines[33][2:] all_lines[34] = all_lines[34][2:] with ms_approx._try_file_open(filename) as outfile: outfile.writelines(all_lines) *_, exit_text = approx_model.optimise( n_stages=20, init_policy=baseline_ol_control_policy) all_lines[31] = all_lines[31][2:] all_lines[32] = all_lines[32][2:] all_lines[33] = "# " + all_lines[33] all_lines[34] = "# " + all_lines[34] with ms_approx._try_file_open(filename) as outfile: outfile.writelines(all_lines) if exit_text not in [ "Optimal Solution Found.", "Solved To Acceptable Level." ]: logging.error( "Failed optimisation. Falling back to init policy.") approx_model.save_optimisation( os.path.join("data", folder_name, "ol_control_{}.pkl".format(i))) # Run OL control to get objective ol_control_policy = interp1d(setup['times'][:-1], approx_model.optimisation['control'], kind="zero", fill_value="extrapolate") sim_run = model.run_policy(ol_control_policy) ol_obj = model.run['objective'] sim_state = np.sum(np.reshape(sim_run[0], (ncells, 15, -1)), axis=0) / ncells allocation = (np.array([ sim_state[1] + sim_state[4], sim_state[7] + sim_state[10], sim_state[13], np.sum(sim_state[0:6], axis=0), np.sum(sim_state[6:12], axis=0), sim_state[12] + sim_state[13], sim_state[14], sim_state[0] + sim_state[3], sim_state[6] + sim_state[9] ])[:, :-1] * approx_model.optimisation['control']) allocation[0:3] *= params['rogue_rate'] * params['rogue_cost'] allocation[3:7] *= params['thin_rate'] * params['thin_cost'] allocation[7:] *= params['protect_rate'] * params['protect_cost'] allocation[0] *= params['rel_small_cost'] allocation[3] *= params['rel_small_cost'] expense = utils.control_expenditure( approx_model.optimisation['control'], new_params, sim_state[:, :-1]) for j in range(len(setup['times']) - 1): if expense[j] > new_params['max_budget']: allocation[:, j] *= new_params['max_budget'] / expense[j] ol_alloc_results = np.concatenate([ol_alloc_results, [allocation]], axis=0) if run_mpc: mpc_args['init_policy'] = ol_control_policy # Optimise control (MPC) mpc_controller = mpc.Controller(setup, new_params, beta, approx_params=approx_new_params) *_, mpc_obj = mpc_controller.optimise(**mpc_args) mpc_controller.save_optimisation( os.path.join("data", folder_name, "mpc_control_{}.pkl".format(i))) sim_run, _ = mpc_controller.run_control() sim_state = np.sum(np.reshape(sim_run[0], (ncells, 15, -1)), axis=0) / ncells allocation = (np.array([ sim_state[1] + sim_state[4], sim_state[7] + sim_state[10], sim_state[13], np.sum(sim_state[0:6], axis=0), np.sum(sim_state[6:12], axis=0), sim_state[12] + sim_state[13], sim_state[14], sim_state[0] + sim_state[3], sim_state[6] + sim_state[9] ])[:, :-1] * mpc_controller.control) allocation[0:3] *= params['rogue_rate'] * params['rogue_cost'] allocation[3:7] *= params['thin_rate'] * params['thin_cost'] allocation[7:] *= params['protect_rate'] * params['protect_cost'] allocation[0] *= params['rel_small_cost'] allocation[3] *= params['rel_small_cost'] expense = utils.control_expenditure(mpc_controller.control, new_params, sim_state[:, :-1]) for j in range(len(setup['times']) - 1): if expense[j] > new_params['max_budget']: allocation[:, j] *= new_params['max_budget'] / expense[j] mpc_alloc_results = np.concatenate( [mpc_alloc_results, [allocation]], axis=0) list_keys = [ 'inf_tanoak_tanoak', 'nat_mort_tanoak', 'inf_mort_tanoak', 'trans_tanoak', 'recruit_tanoak', 'space_tanoak' ] for key in list_keys: new_params[key] = new_params[key].tolist() summary_results.append({ 'iteration': i, 'params': new_params, 'beta': beta.tolist(), 'ol_objective': ol_obj, 'mpc_objective': mpc_obj }) # Write summary results to file with open(os.path.join("data", folder_name, "summary.json"), "w") as outfile: json.dump(summary_results, outfile, indent=4) # Save control allocations to file np.save(os.path.join("data", folder_name, "ol_alloc_results.npy"), ol_alloc_results) np.save(os.path.join("data", folder_name, "mpc_alloc_results.npy"), mpc_alloc_results)
def run_optimisations(ensemble_and_fit, params, setup, n_optim_runs, standard_dev, mpc_args, ol_pol=None): """Run open-loop and MPC optimisations over parameter distributions""" with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) approx_params = copy.deepcopy(params) approx_params['rogue_rate'] *= scale_and_fit_results['roguing_factor'] approx_params['rogue_cost'] /= scale_and_fit_results['roguing_factor'] approx_model = ms_approx.MixedStandApprox(setup, approx_params, ensemble_and_fit['fit']) sim_model = ms_sim.MixedStandSimulator(setup, params) if ol_pol is None: _, ol_control, exit_text = approx_model.optimise( n_stages=20, init_policy=even_policy) if exit_text not in [ "Optimal Solution Found.", "Solved To Acceptable Level." ]: logging.warning( "Failed optimisation. Trying intialisation from previous solution." ) filename = os.path.join( os.path.dirname(os.path.realpath(__file__)), "..", "mixed_stand_model", "BOCOP", "problem.def") with open(filename, "r") as infile: all_lines = infile.readlines() all_lines[31] = "# " + all_lines[31] all_lines[32] = "# " + all_lines[32] all_lines[33] = all_lines[33][2:] all_lines[34] = all_lines[34][2:] with ms_approx._try_file_open(filename) as outfile: outfile.writelines(all_lines) _, ol_control, exit_text = approx_model.optimise( n_stages=20, init_policy=even_policy) all_lines[31] = all_lines[31][2:] all_lines[32] = all_lines[32][2:] all_lines[33] = "# " + all_lines[33] all_lines[34] = "# " + all_lines[34] with ms_approx._try_file_open(filename) as outfile: outfile.writelines(all_lines) if exit_text not in [ "Optimal Solution Found.", "Solved To Acceptable Level." ]: raise RuntimeError("Open loop optimisation failed!!") else: ol_control = ol_pol # Create parameter error distribution if standard_dev != 0.0: error_dist = truncnorm(-1.0 / standard_dev, np.inf, loc=1.0, scale=standard_dev) error_samples = np.reshape(error_dist.rvs(size=n_optim_runs * 7), (n_optim_runs, 7)) else: n_optim_runs = 1 error_samples = np.ones((n_optim_runs, 7)) # Sample from parameter distribution and run simulations baseline_beta = np.zeros(7) baseline_beta[:4] = params['inf_tanoak_tanoak'] baseline_beta[4] = params['inf_bay_to_tanoak'] baseline_beta[5] = params['inf_tanoak_to_bay'] baseline_beta[6] = params['inf_bay_to_bay'] parameter_samples = error_samples * baseline_beta logging.info("Generated ensemble parameters for optimisation runs") ol_objs = np.zeros(n_optim_runs) for i in range(n_optim_runs): logging.info("Using parameter set: %s", parameter_samples[i]) sim_model.params['inf_tanoak_tanoak'] = parameter_samples[i, 0:4] sim_model.params['inf_bay_to_tanoak'] = parameter_samples[i, 4] sim_model.params['inf_tanoak_to_bay'] = parameter_samples[i, 5] sim_model.params['inf_bay_to_bay'] = parameter_samples[i, 6] _, obj, _ = sim_model.run_policy(control_policy=ol_control) ol_objs[i] = obj logging.info("Open-loop run %d of %d done.", i + 1, n_optim_runs) mpc_args['init_policy'] = ol_control mpc_objs = np.zeros(n_optim_runs) mpc_controls = np.zeros((n_optim_runs, 9, len(setup['times']) - 1)) for i in range(n_optim_runs): logging.info("Using parameter set: %s", parameter_samples[i]) sim_model.params['inf_tanoak_tanoak'] = parameter_samples[i, 0:4] sim_model.params['inf_bay_to_tanoak'] = parameter_samples[i, 4] sim_model.params['inf_tanoak_to_bay'] = parameter_samples[i, 5] sim_model.params['inf_bay_to_bay'] = parameter_samples[i, 6] mpc_controller = mpc.Controller(setup, sim_model.params, ensemble_and_fit['fit'], approx_params=approx_params) _, _, mpc_control, mpc_obj = mpc_controller.optimise(**mpc_args) mpc_objs[i] = mpc_obj mpc_controls[i] = mpc_control logging.info("MPC run %d of %d done.", i + 1, n_optim_runs) ret_dict = { 'params': parameter_samples, 'ol_control': np.array([ol_control(t) for t in setup['times'][:-1]]).T, 'ol_objs': ol_objs, 'mpc_control': mpc_controls, 'mpc_objs': mpc_objs } return ret_dict
def make_data(folder=None): """Scan over budgets to analyse change in OL control""" if folder is None: folder = os.path.join(os.path.realpath(__file__), '..', '..', 'data', 'div_cost_scan') with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A) logging.info("Parameters: %s", params) beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) approx_params = copy.deepcopy(params) approx_params['rogue_rate'] *= scale_and_fit_results['roguing_factor'] approx_params['rogue_cost'] /= scale_and_fit_results['roguing_factor'] approx_model = ms_approx.MixedStandApprox(setup, approx_params, beta) div_prop = np.array( [0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0]) div_costs = div_prop / (setup['times'][-1] * np.log(3)) for div_cost, prop in zip(div_costs, div_prop): logging.info("Diversity cost: %f", div_cost) params['div_cost'] = div_cost approx_params['div_cost'] = div_cost approx_model.params['div_cost'] = div_cost _, control, exit_text = approx_model.optimise(n_stages=20, init_policy=even_policy) if exit_text not in [ "Optimal Solution Found.", "Solved To Acceptable Level." ]: logging.warning( "Failed optimisation. Trying intialisation from previous solution." ) filename = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', 'mixed_stand_model', "BOCOP", "problem.def") with open(filename, "r") as infile: all_lines = infile.readlines() all_lines[31] = "# " + all_lines[31] all_lines[32] = "# " + all_lines[32] all_lines[33] = all_lines[33][2:] all_lines[34] = all_lines[34][2:] with ms_approx._try_file_open(filename) as outfile: outfile.writelines(all_lines) _, control, exit_text = approx_model.optimise( n_stages=20, init_policy=even_policy) all_lines[31] = all_lines[31][2:] all_lines[32] = all_lines[32][2:] all_lines[33] = "# " + all_lines[33] all_lines[34] = "# " + all_lines[34] with ms_approx._try_file_open(filename) as outfile: outfile.writelines(all_lines) if exit_text not in [ "Optimal Solution Found.", "Solved To Acceptable Level." ]: logging.error("Failed optimisation in OL optimisation.") approx_model.save_optimisation( os.path.join(folder, "div_cost_" + str(prop) + "_OL.pkl")) mpc_controller = mpc.Controller(setup, params, beta, approx_params=approx_params) mpc_controller.optimise(horizon=100, time_step=0.5, end_time=100, update_period=20, rolling_horz=False, stage_len=5, init_policy=control, use_init_first=True) mpc_controller.save_optimisation( os.path.join(folder, "div_cost_" + str(prop) + "_MPC.pkl"))
def make_data(n_reps=10, folder=None, append=False): """Generate data analysing effect of sampling effort.""" if folder is None: folder = os.path.join(os.path.realpath(__file__), '..', '..', 'data', 'obs_uncert') setup, params = utils.get_setup_params( parameters.CORRECTED_PARAMS, scale_inf=True, host_props=parameters.COBB_PROP_FIG4A) ncells = np.product(setup['landscape_dims']) # Use population size of 500 as 500m2 per cell pop_size = 500 sampling_nums = np.array( [1, 2, 3, 5, 7, 10, 15, 25, 35, 50, 70, 100, 150, 250, 350, 500]) with open(os.path.join("data", "scale_and_fit_results.json"), "r") as infile: scale_and_fit_results = json.load(infile) beta_names = [ 'beta_1,1', 'beta_1,2', 'beta_1,3', 'beta_1,4', 'beta_12', 'beta_21', 'beta_2' ] beta = np.array([scale_and_fit_results[x] for x in beta_names]) approx_params = copy.deepcopy(params) approx_params['rogue_rate'] *= scale_and_fit_results['roguing_factor'] approx_params['rogue_cost'] /= scale_and_fit_results['roguing_factor'] mpc_args = { 'horizon': 100, 'time_step': 0.5, 'end_time': 100, 'update_period': 20, 'rolling_horz': False, 'stage_len': 5, 'use_init_first': True } approx_model = ms_approx.MixedStandApprox(setup, approx_params, beta) _, baseline_ol_control, _ = approx_model.optimise(n_stages=20, init_policy=even_policy) mpc_args['init_policy'] = baseline_ol_control for n_samples in sampling_nums: logging.info("Running with %d/%d stems sampled, %f%%", n_samples, pop_size, 100 * n_samples / pop_size) observer = observer_factory(pop_size, n_samples) mpc_controls = np.zeros((n_reps, 9, len(setup['times']) - 1)) mpc_objs = np.zeros(n_reps) # For storing observed states: mpc_approx_states = np.zeros( (n_reps, 4, 15)) # 4 as 4 update steps excluding the start mpc_actual_states = np.zeros((n_reps, 4, 15)) # MPC runs - approximate model initialised correctly, & then observed at update steps for i in range(n_reps): mpc_controller = mpc.Controller(setup, params, beta, approx_params=approx_params) _, _, mpc_control, mpc_obj = mpc_controller.optimise( **mpc_args, observer=observer) mpc_objs[i] = mpc_obj mpc_controls[i] = mpc_control mpc_approx_states[i] = mpc_controller.approx_update_states sim_run, approx_run = mpc_controller.run_control() sim_state = np.sum(np.reshape(sim_run[0], (ncells, 15, -1)), axis=0) / ncells mpc_actual_states[i] = sim_state[:, [40, 80, 120, 160]].T logging.info("MPC run %d of %d done", i + 1, n_reps) # Append to existing data if append: old_filename = os.path.join( folder, "sampled_data_" + str(n_samples) + ".npz") with np.load(old_filename) as old_dataset: mpc_controls = np.append(old_dataset['mpc_controls'], mpc_controls, axis=0) mpc_objs = np.append(old_dataset['mpc_objs'], mpc_objs, axis=0) mpc_approx_states = np.append(old_dataset['mpc_approx_states'], mpc_approx_states, axis=0) # Store data dataset = { 'mpc_controls': mpc_controls, 'mpc_objs': mpc_objs, 'mpc_approx_states': mpc_approx_states, 'mpc_actual_states': mpc_actual_states } np.savez_compressed( os.path.join(folder, "sampled_data_" + str(n_samples)), **dataset)