def test_parameter_sweep_bad_recover(self, model, tmp_path): comm, rank, num_procs = _init_mpi() tmp_path = _get_rank0_path(comm, tmp_path) m = model m.fs.slack_penalty = 1000. m.fs.slack.setub(0) A = m.fs.input['a'] B = m.fs.input['b'] sweep_params = {A.name: (A, 0.1, 0.9, 3), B.name: (B, 0.0, 0.5, 3)} results_file = os.path.join(tmp_path, 'global_results_bad_recover.csv') h5_fname = "output_dict_bad_recover" # Call the parameter_sweep function parameter_sweep(m, sweep_params, outputs=None, csv_results_file=results_file, h5_results_file=h5_fname, optimize_function=_optimization, reinitialize_function=_bad_reinitialize, reinitialize_kwargs={'slack_penalty': 10.}, mpi_comm=comm) # NOTE: rank 0 "owns" tmp_path, so it needs to be # responsible for doing any output file checking # tmp_path can be deleted as soon as this method # returns if rank == 0: # Check that the global results file is created assert os.path.isfile(results_file) # Attempt to read in the data data = np.genfromtxt(results_file, skip_header=1, delimiter=',') # Compare the last row of the imported data to truth truth_data = [ 0.9, 0.5, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan ] assert np.allclose(data[-1], truth_data, equal_nan=True) if rank == 0: truth_dict = { 'outputs': { 'fs.output[c]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([ 0.2, 0.2, np.nan, 1., 1., np.nan, np.nan, np.nan, np.nan ]) }, 'fs.output[d]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([ 0., 0.75, np.nan, 0., 0.75, np.nan, np.nan, np.nan, np.nan ]) }, 'fs.performance': { 'value': np.array([ 0.2, 0.95, np.nan, 1., 1.75, np.nan, np.nan, np.nan, np.nan ]) }, 'fs.slack[ab_slack]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([ 0., 0., np.nan, 0., 0., np.nan, np.nan, np.nan, np.nan ]) }, 'fs.slack[cd_slack]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([ 0., 0., np.nan, 0., 0., np.nan, np.nan, np.nan, np.nan ]) }, 'objective': { 'value': np.array([ 0.2, 0.95, np.nan, 1., 1.75, np.nan, np.nan, np.nan, np.nan ]) } }, 'solve_successful': [True, True, False, True, True, False, False, False, False], 'sweep_params': { 'fs.input[a]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0.1, 0.1, 0.1, 0.5, 0.5, 0.5, 0.9, 0.9, 0.9]) }, 'fs.input[b]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0.25, 0.5, 0., 0.25, 0.5, 0., 0.25, 0.5]) } } } h5_fpath = os.path.join(tmp_path, '{0}.h5'.format(h5_fname)) read_dict = _read_output_h5(h5_fpath) _assert_dictionary_correctness(truth_dict, read_dict) _assert_h5_csv_agreement(results_file, read_dict)
def test_parameter_sweep_optimize(self, model, tmp_path): comm, rank, num_procs = _init_mpi() tmp_path = _get_rank0_path(comm, tmp_path) m = model m.fs.slack_penalty = 1000. m.fs.slack.setub(0) A = m.fs.input['a'] B = m.fs.input['b'] sweep_params = {A.name: (A, 0.1, 0.9, 3), B.name: (B, 0.0, 0.5, 3)} outputs = { 'output_c': m.fs.output['c'], 'output_d': m.fs.output['d'], 'performance': m.fs.performance, 'objective': m.objective } results_file = os.path.join(tmp_path, 'global_results_optimize.csv') h5_fname = "output_dict_optimize" # Call the parameter_sweep function parameter_sweep(m, sweep_params, outputs=outputs, csv_results_file=results_file, h5_results_file=h5_fname, optimize_function=_optimization, optimize_kwargs={'relax_feasibility': True}, mpi_comm=comm) # NOTE: rank 0 "owns" tmp_path, so it needs to be # responsible for doing any output file checking # tmp_path can be deleted as soon as this method # returns if rank == 0: # Check that the global results file is created assert os.path.isfile(results_file) # Attempt to read in the data data = np.genfromtxt(results_file, skip_header=1, delimiter=',') # Compare the last row of the imported data to truth truth_data = [ 0.9, 0.5, 1.0, 1.0, 2.0, 2.0 - 1000. * ((2. * 0.9 - 1.) + (3. * 0.5 - 1.)) ] assert np.allclose(data[-1], truth_data, equal_nan=True) # Check the h5 if rank == 0: truth_dict = { 'outputs': { 'output_c': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0.2, 0.2, 0.2, 1., 1., 1., 1., 1., 1.]) }, 'output_d': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([ 9.98580690e-09, 0.75, 1., 9.99872731e-09, 0.75, 1., 9.99860382e-09, 0.75, 1. ]) }, 'performance': { 'value': np.array([0.2, 0.95, 1.2, 1., 1.75, 2., 1., 1.75, 2.]) }, 'objective': { 'value': np.array([ 0.2, 9.50000020e-01, -4.98799990e+02, 1., 1.75, -4.97999990e+02, -7.98999990e+02, -7.98249990e+02, 2.0 - 1000. * ((2. * 0.9 - 1.) + (3. * 0.5 - 1.)) ]) } }, 'solve_successful': [True] * 9, 'sweep_params': { 'fs.input[a]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0.1, 0.1, 0.1, 0.5, 0.5, 0.5, 0.9, 0.9, 0.9]) }, 'fs.input[b]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0.25, 0.5, 0., 0.25, 0.5, 0., 0.25, 0.5]) } } } h5_fpath = os.path.join(tmp_path, '{0}.h5'.format(h5_fname)) read_dict = _read_output_h5(h5_fpath) _assert_dictionary_correctness(truth_dict, read_dict) _assert_h5_csv_agreement(results_file, read_dict)
def test_h5_read_write(self, tmp_path): comm, rank, num_procs = _init_mpi() tmp_path = _get_rank0_path(comm, tmp_path) reference_dict = { 'outputs': { 'fs.input[a]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) }, 'fs.input[b]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) }, 'fs.output[c]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0.2, 0.2, 0., 1., 1., 0., 0., 0., 0.]) }, 'fs.output[d]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0.75, 0., 0., 0.75, 0., 0., 0., 0.]) }, 'fs.slack[ab_slack]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) }, 'fs.slack[cd_slack]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) } }, 'solve_successful': [True] * 9, 'sweep_params': { 'fs.input[a]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0.1, 0.1, 0., 0.5, 0.5, 0., 0., 0., 0.]) }, 'fs.input[b]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0.25, 0., 0., 0.25, 0., 0., 0., 0.]) } } } h5_fname = "h5_test_{0}.h5".format(rank) _write_output_to_h5(reference_dict, output_directory=tmp_path, fname=h5_fname) read_dictionary = _read_output_h5(os.path.join(tmp_path, h5_fname)) _assert_dictionary_correctness(reference_dict, read_dictionary)
def test_parameter_sweep(self, model, tmp_path): comm, rank, num_procs = _init_mpi() tmp_path = _get_rank0_path(comm, tmp_path) m = model m.fs.slack_penalty = 1000. m.fs.slack.setub(0) A = m.fs.input['a'] B = m.fs.input['b'] sweep_params = {A.name: (A, 0.1, 0.9, 3), B.name: (B, 0.0, 0.5, 3)} outputs = { 'output_c': m.fs.output['c'], 'output_d': m.fs.output['d'], 'performance': m.fs.performance } results_file = os.path.join(tmp_path, 'global_results.csv') h5_fname = "output_dict" # Call the parameter_sweep function parameter_sweep(m, sweep_params, outputs=outputs, csv_results_file=results_file, h5_results_file=h5_fname, optimize_function=_optimization, debugging_data_dir=tmp_path, mpi_comm=comm) # NOTE: rank 0 "owns" tmp_path, so it needs to be # responsible for doing any output file checking # tmp_path can be deleted as soon as this method # returns if rank == 0: # Check that the global results file is created assert os.path.isfile(results_file) # Check that all local output files have been created for k in range(num_procs): assert os.path.isfile( os.path.join(tmp_path, f'local_results_{k:03}.csv')) # Attempt to read in the data data = np.genfromtxt(results_file, skip_header=1, delimiter=',') # Compare the last row of the imported data to truth truth_data = [0.9, 0.5, np.nan, np.nan, np.nan] assert np.allclose(data[-1], truth_data, equal_nan=True) # Check for the h5 output if rank == 0: truth_dict = { 'outputs': { 'output_c': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([ 0.2, 0.2, np.nan, 1., 1., np.nan, np.nan, np.nan, np.nan ]) }, 'output_d': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([ 0., 0.75, np.nan, 0., 0.75, np.nan, np.nan, np.nan, np.nan ]) }, 'performance': { 'value': np.array([ 0.2, 0.95, np.nan, 1., 1.75, np.nan, np.nan, np.nan, np.nan ]) } }, 'solve_successful': [True, True, False, True, True, False, False, False, False], 'sweep_params': { 'fs.input[a]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0.1, 0.1, 0.1, 0.5, 0.5, 0.5, 0.9, 0.9, 0.9]) }, 'fs.input[b]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0.25, 0.5, 0., 0.25, 0.5, 0., 0.25, 0.5]) } } } h5_fpath = os.path.join(tmp_path, 'output_dict.h5') read_dict = _read_output_h5(h5_fpath) _assert_dictionary_correctness(truth_dict, read_dict) _assert_h5_csv_agreement(results_file, read_dict) # Check if there is a text file created import ast truth_txt_dict = { 'outputs': ['output_c', 'output_d', 'performance'], 'sweep_params': ['fs.input[a]', 'fs.input[b]'] } txt_fpath = os.path.join(tmp_path, '{0}.txt'.format(h5_fname)) assert os.path.exists(txt_fpath) f = open(txt_fpath) f_contents = f.read() read_txt_dict = ast.literal_eval(f_contents) assert read_txt_dict == truth_txt_dict
def test_recursive_parameter_sweep(model, tmp_path): comm, rank, num_procs = _init_mpi() tmp_path = _get_rank0_path(comm, tmp_path) m = model solver = pyo.SolverFactory("ipopt") sweep_params = {} sweep_params["a_val"] = UniformSample(m.fs.a, 0.0, 1.0) outputs = {} outputs["x_val"] = m.fs.x # Run the parameter sweep study using num_samples randomly drawn from the above range num_samples = 10 seed = 0 results_fname = os.path.join(tmp_path, "global_results") csv_results_file = str(results_fname) + ".csv" h5_results_file = str(results_fname) + ".h5" # Run the parameter sweep # recursive_parameter_sweep(m, sweep_params, outputs, results_file='recursive_sweep.csv', # optimize_function=optimize, optimize_kwargs={'solver':solver}, req_num_samples=num_samples, # seed=seed, reinitialize_before_sweep=False, reinitialize_function=initialize_system, # reinitialize_kwargs={'solver':solver} data = recursive_parameter_sweep( m, sweep_params, outputs=outputs, csv_results_file_name=csv_results_file, h5_results_file_name=h5_results_file, req_num_samples=num_samples, debugging_data_dir=tmp_path, seed=seed, ) reference_save_data = np.array([ [0.38344152, 0.11655848], [0.4236548, 0.0763452], [0.43758721, 0.06241279], [0.0187898, 0.4812102], [0.0202184, 0.4797816], [0.06022547, 0.43977453], [0.07103606, 0.42896394], [0.0871293, 0.4128707], [0.10204481, 0.39795519], [0.11827443, 0.38172557], ]) assert np.shape(data) == (10, 2) assert np.allclose(reference_save_data, data, equal_nan=True) assert np.allclose(np.sum(data, axis=1), value(m.fs.success_prob)) if rank == 0: # Check that the global results file is created assert os.path.isfile(csv_results_file) # Check that all local output files have been created for k in range(num_procs): assert os.path.isfile( os.path.join(tmp_path, f"local_results_{k:03}.h5")) assert os.path.isfile( os.path.join(tmp_path, f"local_results_{k:03}.csv")) csv_data = np.genfromtxt(csv_results_file, skip_header=1, delimiter=",") # Compare the last row of the imported data to truth assert np.allclose(data[-1, :], reference_save_data[-1, :], equal_nan=True) # Check for the h5 output truth_dict = { "outputs": { "x_val": { "lower bound": -1.7976931348623157e308, "units": "None", "upper bound": 1.7976931348623157e308, "value": np.array([ 0.11655848, 0.0763452, 0.06241279, 0.4812102, 0.4797816, 0.43977453, 0.42896394, 0.4128707, 0.39795519, 0.38172557, 0.3710737, 0.35664671, 0.33869048, 0.29112324, 0.28961744, 0.23544439, 0.18457165, 0.1404921, 0.13628923, 0.08533806, 0.06296805, 0.06139849, 0.04384967, 0.03852064, ]), } }, "solve_successful": [ True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, ], "sweep_params": { "fs.a": { "units": "None", "value": np.array([ 0.38344152, 0.4236548, 0.43758721, 0.0187898, 0.0202184, 0.06022547, 0.07103606, 0.0871293, 0.10204481, 0.11827443, 0.1289263, 0.14335329, 0.16130952, 0.20887676, 0.21038256, 0.26455561, 0.31542835, 0.3595079, 0.36371077, 0.41466194, 0.43703195, 0.43860151, 0.45615033, 0.46147936, ]), } }, } read_dict = _read_output_h5(h5_results_file) _assert_dictionary_correctness(truth_dict, read_dict) assert np.allclose( data[:, -1], read_dict["outputs"]["x_val"]["value"][:num_samples]) # Check for the companion text file txt_fpath = os.path.join(tmp_path, "{0}.txt".format(h5_results_file)) assert os.path.exists(txt_fpath) truth_txt_dict = { "outputs": ["x_val"], "sweep_params": ["fs.a"], } with open(txt_fpath, "r") as f: f_contents = f.read() read_txt_dict = ast.literal_eval(f_contents) assert read_txt_dict == truth_txt_dict