def main(case_num=1, nx=11, interpolate_nan_outputs=True): # when from the command line case_num = int(case_num) nx = int(nx) interpolate_nan_outputs = bool(interpolate_nan_outputs) # Start MPI communicator comm, rank, num_procs = _init_mpi() tic = time.time() global_results, sweep_params = run_analysis(case_num, nx, interpolate_nan_outputs) print(global_results) toc = time.time() if rank == 0: total_samples = 1 for k, v in sweep_params.items(): total_samples *= v.num_samples print("Finished case_num = %d." % (case_num)) print("Processed %d swept parameters comprising %d total points." % (len(sweep_params), total_samples)) print("Elapsed time = %.1f s." % (toc - tic)) return global_results, sweep_params
def test_parameter_sweep_bad_force_initialize(self, model, tmp_path): comm, rank, num_procs = _init_mpi() tmp_path = _get_rank0_path(comm, tmp_path) m = model m.fs.slack_penalty = 1000. m.fs.slack.setub(0) A = m.fs.input['a'] B = m.fs.input['b'] sweep_params = {A.name: (A, 0.1, 0.9, 3), B.name: (B, 0.0, 0.5, 3)} results_file = os.path.join(tmp_path, 'global_results_recover.csv') h5_fname = "output_dict_recover" with pytest.raises(ValueError): # Call the parameter_sweep function parameter_sweep(m, sweep_params, outputs=None, csv_results_file=results_file, h5_results_file=h5_fname, optimize_function=_optimization, reinitialize_before_sweep=True, reinitialize_function=None, reinitialize_kwargs=None, mpi_comm=comm)
def test_create_global_output(self, model): comm, rank, num_procs = _init_mpi() m = model m.fs.slack_penalty = 1000. m.fs.slack.setub(0) sweep_params = { 'input_a': NormalSample(m.fs.input['a'], 0.1, 0.9), 'input_b': NormalSample(m.fs.input['b'], 0.0, 0.5) } outputs = { 'output_c': m.fs.output['c'], 'output_d': m.fs.output['d'], 'performance': m.fs.performance } sweep_params, sampling_type = _process_sweep_params(sweep_params) # Get the globale sweep param values global_num_cases = 2 * num_procs global_values = _build_combinations(sweep_params, sampling_type, global_num_cases, comm, rank, num_procs) # divide the workload between processors local_values = _divide_combinations(global_values, rank, num_procs) local_num_cases = np.shape(local_values)[0] local_output_dict, outputs = _create_local_output_skeleton( model, sweep_params, None, local_num_cases) # Manually update the values in the numpy array for key, value in local_output_dict.items(): for subkey, subvalue in value.items(): subvalue['value'][:] = rank # Local output dict also contains the solve_successful. The solve status is # based on the local_output_dict['solve_successful'] = [True] * local_num_cases # Get the global output dictionary, This is properly created only on rank 0 global_output_dict = _create_global_output(local_output_dict, global_num_cases, comm, rank, num_procs) if num_procs == 1: assert local_output_dict == global_output_dict else: comm.Barrier() if rank > 0: assert global_output_dict == local_output_dict else: test_array = np.repeat(np.arange(0, num_procs, dtype=float), 2) test_list = [True] * global_num_cases for key, value in global_output_dict.items(): if key != 'solve_successful': for subkey, subvalue in value.items(): assert np.allclose(subvalue['value'], test_array) elif key == 'solve_successful': assert list(value) == test_list comm.Barrier()
def test_divide_combinations(self): # _divide_combinations(global_combo_array, rank, num_procs) comm, rank, num_procs = _init_mpi() A_param = pyo.Param(initialize=0.0, mutable=True) B_param = pyo.Param(initialize=1.0, mutable=True) C_param = pyo.Param(initialize=2.0, mutable=True) range_A = [0.0, 10.0] range_B = [1.0, 20.0] range_C = [2.0, 30.0] nn_A = 4 nn_B = 5 nn_C = 6 param_dict = dict() param_dict['var_A'] = LinearSample(A_param, range_A[0], range_A[1], nn_A) param_dict['var_B'] = LinearSample(B_param, range_B[0], range_B[1], nn_B) param_dict['var_C'] = LinearSample(C_param, range_C[0], range_C[1], nn_C) global_combo_array = _build_combinations(param_dict, SamplingType.FIXED, None, comm, rank, num_procs) test = np.array_split(global_combo_array, num_procs, axis=0)[rank] local_combo_array = _divide_combinations(global_combo_array, rank, num_procs) assert np.shape(local_combo_array)[1] == 3 assert np.allclose(test[:, 0], local_combo_array[:, 0]) assert np.allclose(test[:, 1], local_combo_array[:, 1]) assert np.allclose(test[:, 2], local_combo_array[:, 2]) if rank == 0: assert local_combo_array[0, 0] == pytest.approx(range_A[0]) assert local_combo_array[0, 1] == pytest.approx(range_B[0]) assert local_combo_array[0, 2] == pytest.approx(range_C[0]) if rank == num_procs - 1: assert local_combo_array[-1, 0] == pytest.approx(range_A[1]) assert local_combo_array[-1, 1] == pytest.approx(range_B[1]) assert local_combo_array[-1, 2] == pytest.approx(range_C[1])
def test_aggregate_results(self): comm, rank, num_procs = _init_mpi() # print('Rank %d, num_procs %d' % (rank, num_procs)) nn = 5 np.random.seed(1) local_results = (rank + 1) * np.random.rand(nn, 2) global_values = np.random.rand(nn * num_procs, 4) global_results = _aggregate_results(local_results, global_values, comm, num_procs) assert np.shape(global_results)[1] == np.shape(local_results)[1] assert np.shape(global_results)[0] == np.shape(global_values)[0] if rank == 0: assert global_results[0, 0] == pytest.approx(local_results[0, 0]) assert global_results[0, 1] == pytest.approx(local_results[0, 1]) assert global_results[-1, 0] == pytest.approx(num_procs * local_results[-1, 0]) assert global_results[-1, 1] == pytest.approx(num_procs * local_results[-1, 1])
def test_linear_build_combinations(self): comm, rank, num_procs = _init_mpi() A_param = pyo.Param(initialize=0.0, mutable=True) B_param = pyo.Param(initialize=1.0, mutable=True) C_param = pyo.Param(initialize=2.0, mutable=True) range_A = [0.0, 10.0] range_B = [1.0, 20.0] range_C = [2.0, 30.0] nn_A = 4 nn_B = 5 nn_C = 6 param_dict = dict() param_dict['var_A'] = LinearSample(A_param, range_A[0], range_A[1], nn_A) param_dict['var_B'] = LinearSample(B_param, range_B[0], range_B[1], nn_B) param_dict['var_C'] = LinearSample(C_param, range_C[0], range_C[1], nn_C) global_combo_array = _build_combinations(param_dict, SamplingType.FIXED, None, comm, rank, num_procs) assert np.shape(global_combo_array)[0] == nn_A * nn_B * nn_C assert np.shape(global_combo_array)[1] == len(param_dict) assert global_combo_array[0, 0] == pytest.approx(range_A[0]) assert global_combo_array[0, 1] == pytest.approx(range_B[0]) assert global_combo_array[0, 2] == pytest.approx(range_C[0]) assert global_combo_array[-1, 0] == pytest.approx(range_A[1]) assert global_combo_array[-1, 1] == pytest.approx(range_B[1]) assert global_combo_array[-1, 2] == pytest.approx(range_C[1])
def test_parameter_sweep_bad_recover(self, model, tmp_path): comm, rank, num_procs = _init_mpi() tmp_path = _get_rank0_path(comm, tmp_path) m = model m.fs.slack_penalty = 1000. m.fs.slack.setub(0) A = m.fs.input['a'] B = m.fs.input['b'] sweep_params = {A.name: (A, 0.1, 0.9, 3), B.name: (B, 0.0, 0.5, 3)} results_file = os.path.join(tmp_path, 'global_results_bad_recover.csv') h5_fname = "output_dict_bad_recover" # Call the parameter_sweep function parameter_sweep(m, sweep_params, outputs=None, csv_results_file=results_file, h5_results_file=h5_fname, optimize_function=_optimization, reinitialize_function=_bad_reinitialize, reinitialize_kwargs={'slack_penalty': 10.}, mpi_comm=comm) # NOTE: rank 0 "owns" tmp_path, so it needs to be # responsible for doing any output file checking # tmp_path can be deleted as soon as this method # returns if rank == 0: # Check that the global results file is created assert os.path.isfile(results_file) # Attempt to read in the data data = np.genfromtxt(results_file, skip_header=1, delimiter=',') # Compare the last row of the imported data to truth truth_data = [ 0.9, 0.5, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan ] assert np.allclose(data[-1], truth_data, equal_nan=True) if rank == 0: truth_dict = { 'outputs': { 'fs.output[c]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([ 0.2, 0.2, np.nan, 1., 1., np.nan, np.nan, np.nan, np.nan ]) }, 'fs.output[d]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([ 0., 0.75, np.nan, 0., 0.75, np.nan, np.nan, np.nan, np.nan ]) }, 'fs.performance': { 'value': np.array([ 0.2, 0.95, np.nan, 1., 1.75, np.nan, np.nan, np.nan, np.nan ]) }, 'fs.slack[ab_slack]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([ 0., 0., np.nan, 0., 0., np.nan, np.nan, np.nan, np.nan ]) }, 'fs.slack[cd_slack]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([ 0., 0., np.nan, 0., 0., np.nan, np.nan, np.nan, np.nan ]) }, 'objective': { 'value': np.array([ 0.2, 0.95, np.nan, 1., 1.75, np.nan, np.nan, np.nan, np.nan ]) } }, 'solve_successful': [True, True, False, True, True, False, False, False, False], 'sweep_params': { 'fs.input[a]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0.1, 0.1, 0.1, 0.5, 0.5, 0.5, 0.9, 0.9, 0.9]) }, 'fs.input[b]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0.25, 0.5, 0., 0.25, 0.5, 0., 0.25, 0.5]) } } } h5_fpath = os.path.join(tmp_path, '{0}.h5'.format(h5_fname)) read_dict = _read_output_h5(h5_fpath) _assert_dictionary_correctness(truth_dict, read_dict) _assert_h5_csv_agreement(results_file, read_dict)
sweep_params, outputs, csv_results_file_name=output_filename, optimize_function=opt_function, optimize_kwargs=optimize_kwargs, debugging_data_dir=os.path.split(output_filename)[0] + "/local", interpolate_nan_outputs=interp_nan_outputs, ) return global_results, sweep_params if __name__ == "__main__": # Start MPI communicator comm, rank, num_procs = _init_mpi() # Get the case number to run try: case_num = int(sys.argv[1]) except: # Default to running case 1 case_num = 1 # Get the default number of discretization points try: nx = int(sys.argv[2]) except: # Default to a 4-point discretization nx = 4
def test_init_mpi(self): comm, rank, num_procs = _init_mpi() assert type(rank) == int assert type(num_procs) == int assert 0 <= rank < num_procs
def test_parameter_sweep_optimize(self, model, tmp_path): comm, rank, num_procs = _init_mpi() tmp_path = _get_rank0_path(comm, tmp_path) m = model m.fs.slack_penalty = 1000. m.fs.slack.setub(0) A = m.fs.input['a'] B = m.fs.input['b'] sweep_params = {A.name: (A, 0.1, 0.9, 3), B.name: (B, 0.0, 0.5, 3)} outputs = { 'output_c': m.fs.output['c'], 'output_d': m.fs.output['d'], 'performance': m.fs.performance, 'objective': m.objective } results_file = os.path.join(tmp_path, 'global_results_optimize.csv') h5_fname = "output_dict_optimize" # Call the parameter_sweep function parameter_sweep(m, sweep_params, outputs=outputs, csv_results_file=results_file, h5_results_file=h5_fname, optimize_function=_optimization, optimize_kwargs={'relax_feasibility': True}, mpi_comm=comm) # NOTE: rank 0 "owns" tmp_path, so it needs to be # responsible for doing any output file checking # tmp_path can be deleted as soon as this method # returns if rank == 0: # Check that the global results file is created assert os.path.isfile(results_file) # Attempt to read in the data data = np.genfromtxt(results_file, skip_header=1, delimiter=',') # Compare the last row of the imported data to truth truth_data = [ 0.9, 0.5, 1.0, 1.0, 2.0, 2.0 - 1000. * ((2. * 0.9 - 1.) + (3. * 0.5 - 1.)) ] assert np.allclose(data[-1], truth_data, equal_nan=True) # Check the h5 if rank == 0: truth_dict = { 'outputs': { 'output_c': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0.2, 0.2, 0.2, 1., 1., 1., 1., 1., 1.]) }, 'output_d': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([ 9.98580690e-09, 0.75, 1., 9.99872731e-09, 0.75, 1., 9.99860382e-09, 0.75, 1. ]) }, 'performance': { 'value': np.array([0.2, 0.95, 1.2, 1., 1.75, 2., 1., 1.75, 2.]) }, 'objective': { 'value': np.array([ 0.2, 9.50000020e-01, -4.98799990e+02, 1., 1.75, -4.97999990e+02, -7.98999990e+02, -7.98249990e+02, 2.0 - 1000. * ((2. * 0.9 - 1.) + (3. * 0.5 - 1.)) ]) } }, 'solve_successful': [True] * 9, 'sweep_params': { 'fs.input[a]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0.1, 0.1, 0.1, 0.5, 0.5, 0.5, 0.9, 0.9, 0.9]) }, 'fs.input[b]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0.25, 0.5, 0., 0.25, 0.5, 0., 0.25, 0.5]) } } } h5_fpath = os.path.join(tmp_path, '{0}.h5'.format(h5_fname)) read_dict = _read_output_h5(h5_fpath) _assert_dictionary_correctness(truth_dict, read_dict) _assert_h5_csv_agreement(results_file, read_dict)
def test_recursive_parameter_sweep(model, tmp_path): comm, rank, num_procs = _init_mpi() tmp_path = _get_rank0_path(comm, tmp_path) m = model solver = pyo.SolverFactory("ipopt") sweep_params = {} sweep_params["a_val"] = UniformSample(m.fs.a, 0.0, 1.0) outputs = {} outputs["x_val"] = m.fs.x # Run the parameter sweep study using num_samples randomly drawn from the above range num_samples = 10 seed = 0 results_fname = os.path.join(tmp_path, "global_results") csv_results_file = str(results_fname) + ".csv" h5_results_file = str(results_fname) + ".h5" # Run the parameter sweep # recursive_parameter_sweep(m, sweep_params, outputs, results_file='recursive_sweep.csv', # optimize_function=optimize, optimize_kwargs={'solver':solver}, req_num_samples=num_samples, # seed=seed, reinitialize_before_sweep=False, reinitialize_function=initialize_system, # reinitialize_kwargs={'solver':solver} data = recursive_parameter_sweep( m, sweep_params, outputs=outputs, csv_results_file_name=csv_results_file, h5_results_file_name=h5_results_file, req_num_samples=num_samples, debugging_data_dir=tmp_path, seed=seed, ) reference_save_data = np.array([ [0.38344152, 0.11655848], [0.4236548, 0.0763452], [0.43758721, 0.06241279], [0.0187898, 0.4812102], [0.0202184, 0.4797816], [0.06022547, 0.43977453], [0.07103606, 0.42896394], [0.0871293, 0.4128707], [0.10204481, 0.39795519], [0.11827443, 0.38172557], ]) assert np.shape(data) == (10, 2) assert np.allclose(reference_save_data, data, equal_nan=True) assert np.allclose(np.sum(data, axis=1), value(m.fs.success_prob)) if rank == 0: # Check that the global results file is created assert os.path.isfile(csv_results_file) # Check that all local output files have been created for k in range(num_procs): assert os.path.isfile( os.path.join(tmp_path, f"local_results_{k:03}.h5")) assert os.path.isfile( os.path.join(tmp_path, f"local_results_{k:03}.csv")) csv_data = np.genfromtxt(csv_results_file, skip_header=1, delimiter=",") # Compare the last row of the imported data to truth assert np.allclose(data[-1, :], reference_save_data[-1, :], equal_nan=True) # Check for the h5 output truth_dict = { "outputs": { "x_val": { "lower bound": -1.7976931348623157e308, "units": "None", "upper bound": 1.7976931348623157e308, "value": np.array([ 0.11655848, 0.0763452, 0.06241279, 0.4812102, 0.4797816, 0.43977453, 0.42896394, 0.4128707, 0.39795519, 0.38172557, 0.3710737, 0.35664671, 0.33869048, 0.29112324, 0.28961744, 0.23544439, 0.18457165, 0.1404921, 0.13628923, 0.08533806, 0.06296805, 0.06139849, 0.04384967, 0.03852064, ]), } }, "solve_successful": [ True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, ], "sweep_params": { "fs.a": { "units": "None", "value": np.array([ 0.38344152, 0.4236548, 0.43758721, 0.0187898, 0.0202184, 0.06022547, 0.07103606, 0.0871293, 0.10204481, 0.11827443, 0.1289263, 0.14335329, 0.16130952, 0.20887676, 0.21038256, 0.26455561, 0.31542835, 0.3595079, 0.36371077, 0.41466194, 0.43703195, 0.43860151, 0.45615033, 0.46147936, ]), } }, } read_dict = _read_output_h5(h5_results_file) _assert_dictionary_correctness(truth_dict, read_dict) assert np.allclose( data[:, -1], read_dict["outputs"]["x_val"]["value"][:num_samples]) # Check for the companion text file txt_fpath = os.path.join(tmp_path, "{0}.txt".format(h5_results_file)) assert os.path.exists(txt_fpath) truth_txt_dict = { "outputs": ["x_val"], "sweep_params": ["fs.a"], } with open(txt_fpath, "r") as f: f_contents = f.read() read_txt_dict = ast.literal_eval(f_contents) assert read_txt_dict == truth_txt_dict
def test_parameter_sweep(self, model, tmp_path): comm, rank, num_procs = _init_mpi() tmp_path = _get_rank0_path(comm, tmp_path) m = model m.fs.slack_penalty = 1000. m.fs.slack.setub(0) A = m.fs.input['a'] B = m.fs.input['b'] sweep_params = {A.name: (A, 0.1, 0.9, 3), B.name: (B, 0.0, 0.5, 3)} outputs = { 'output_c': m.fs.output['c'], 'output_d': m.fs.output['d'], 'performance': m.fs.performance } results_file = os.path.join(tmp_path, 'global_results.csv') h5_fname = "output_dict" # Call the parameter_sweep function parameter_sweep(m, sweep_params, outputs=outputs, csv_results_file=results_file, h5_results_file=h5_fname, optimize_function=_optimization, debugging_data_dir=tmp_path, mpi_comm=comm) # NOTE: rank 0 "owns" tmp_path, so it needs to be # responsible for doing any output file checking # tmp_path can be deleted as soon as this method # returns if rank == 0: # Check that the global results file is created assert os.path.isfile(results_file) # Check that all local output files have been created for k in range(num_procs): assert os.path.isfile( os.path.join(tmp_path, f'local_results_{k:03}.csv')) # Attempt to read in the data data = np.genfromtxt(results_file, skip_header=1, delimiter=',') # Compare the last row of the imported data to truth truth_data = [0.9, 0.5, np.nan, np.nan, np.nan] assert np.allclose(data[-1], truth_data, equal_nan=True) # Check for the h5 output if rank == 0: truth_dict = { 'outputs': { 'output_c': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([ 0.2, 0.2, np.nan, 1., 1., np.nan, np.nan, np.nan, np.nan ]) }, 'output_d': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([ 0., 0.75, np.nan, 0., 0.75, np.nan, np.nan, np.nan, np.nan ]) }, 'performance': { 'value': np.array([ 0.2, 0.95, np.nan, 1., 1.75, np.nan, np.nan, np.nan, np.nan ]) } }, 'solve_successful': [True, True, False, True, True, False, False, False, False], 'sweep_params': { 'fs.input[a]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0.1, 0.1, 0.1, 0.5, 0.5, 0.5, 0.9, 0.9, 0.9]) }, 'fs.input[b]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0.25, 0.5, 0., 0.25, 0.5, 0., 0.25, 0.5]) } } } h5_fpath = os.path.join(tmp_path, 'output_dict.h5') read_dict = _read_output_h5(h5_fpath) _assert_dictionary_correctness(truth_dict, read_dict) _assert_h5_csv_agreement(results_file, read_dict) # Check if there is a text file created import ast truth_txt_dict = { 'outputs': ['output_c', 'output_d', 'performance'], 'sweep_params': ['fs.input[a]', 'fs.input[b]'] } txt_fpath = os.path.join(tmp_path, '{0}.txt'.format(h5_fname)) assert os.path.exists(txt_fpath) f = open(txt_fpath) f_contents = f.read() read_txt_dict = ast.literal_eval(f_contents) assert read_txt_dict == truth_txt_dict
def test_create_local_output_skeleton(self, model): comm, rank, num_procs = _init_mpi() m = model m.fs.slack_penalty = 1000. m.fs.slack.setub(0) sweep_params = { 'input_a': (m.fs.input['a'], 0.1, 0.9, 3), 'input_b': (m.fs.input['b'], 0.0, 0.5, 3) } outputs = { 'output_c': m.fs.output['c'], 'output_d': m.fs.output['d'], 'performance': m.fs.performance } sweep_params, sampling_type = _process_sweep_params(sweep_params) values = _build_combinations(sweep_params, sampling_type, None, comm, rank, num_procs) num_cases = np.shape(values)[0] output_dict, outputs = _create_local_output_skeleton( model, sweep_params, None, num_cases) truth_dict = { 'outputs': { 'fs.output[c]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) }, 'fs.output[d]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) }, 'fs.performance': { 'value': np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) }, 'fs.slack[ab_slack]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) }, 'fs.slack[cd_slack]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) }, 'objective': { 'value': np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) } }, 'sweep_params': { 'fs.input[a]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) }, 'fs.input[b]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) } } } _assert_dictionary_correctness(truth_dict, output_dict)
def test_h5_read_write(self, tmp_path): comm, rank, num_procs = _init_mpi() tmp_path = _get_rank0_path(comm, tmp_path) reference_dict = { 'outputs': { 'fs.input[a]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) }, 'fs.input[b]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) }, 'fs.output[c]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0.2, 0.2, 0., 1., 1., 0., 0., 0., 0.]) }, 'fs.output[d]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0.75, 0., 0., 0.75, 0., 0., 0., 0.]) }, 'fs.slack[ab_slack]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) }, 'fs.slack[cd_slack]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]) } }, 'solve_successful': [True] * 9, 'sweep_params': { 'fs.input[a]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0.1, 0.1, 0., 0.5, 0.5, 0., 0., 0., 0.]) }, 'fs.input[b]': { 'lower bound': 0, 'units': 'None', 'upper bound': 0, 'value': np.array([0., 0.25, 0., 0., 0.25, 0., 0., 0., 0.]) } } } h5_fname = "h5_test_{0}.h5".format(rank) _write_output_to_h5(reference_dict, output_directory=tmp_path, fname=h5_fname) read_dictionary = _read_output_h5(os.path.join(tmp_path, h5_fname)) _assert_dictionary_correctness(reference_dict, read_dictionary)
def test_aggregate_filtered_input_arr(): comm, rank, num_procs = _init_mpi() input_dict = { "outputs": { "x_val": { "lower bound": None, "units": "None", "upper bound": None, "value": np.array([ 0.31655848, 0.2763452, 0.26241279, 0.15511682, 0.1511865, 0.09723662, 0.05410589, 0.6797816, 0.62896394, 0.6128707, 0.23852064, 0.17110508, 0.13195544, ]), } }, "solve_successful": [True] * 13, "sweep_params": { "fs.a": { "units": "None", "value": np.array([ 0.38344152, 0.4236548, 0.43758721, 0.54488318, 0.5488135, 0.60276338, 0.64589411, 0.0202184, 0.07103606, 0.0871293, 0.46147936, 0.52889492, 0.56804456, ]), } }, } truth_arr = np.array([ 0.38344152, 0.4236548, 0.43758721, 0.54488318, 0.5488135, 0.60276338, 0.64589411, 0.0202184, 0.07103606, 0.0871293, 0.46147936, 0.52889492, 0.56804456, ]) req_num_samples = 10 values_arr = _aggregate_filtered_input_arr(input_dict, req_num_samples, comm, rank, num_procs) assert np.allclose(values_arr[:, 0], truth_arr[0:req_num_samples], equal_nan=True)
def recursive_parameter_sweep( model, sweep_params, outputs=None, csv_results_file_name=None, h5_results_file_name=None, optimize_function=_default_optimize, optimize_kwargs=None, reinitialize_function=None, reinitialize_kwargs=None, reinitialize_before_sweep=False, mpi_comm=None, debugging_data_dir=None, interpolate_nan_outputs=False, req_num_samples=None, seed=None, ): # Get an MPI communicator comm, rank, num_procs = _init_mpi(mpi_comm) # Convert sweep_params to LinearSamples sweep_params, sampling_type = _process_sweep_params(sweep_params) # Set the seed before sampling np.random.seed(seed) # Set up optimize_kwargs if optimize_kwargs is None: optimize_kwargs = dict() # Set up reinitialize_kwargs if reinitialize_kwargs is None: reinitialize_kwargs = dict() n_samples_remaining = req_num_samples num_total_samples = req_num_samples local_output_collection = {} loop_ctr = 0 while n_samples_remaining > 0 and loop_ctr < 10: # Enumerate/Sample the parameter space global_values = _build_combinations(sweep_params, sampling_type, num_total_samples, comm, rank, num_procs) # divide the workload between processors local_values = _divide_combinations(global_values, rank, num_procs) local_num_cases = np.shape(local_values)[0] if loop_ctr == 0: true_local_num_cases = local_num_cases local_output_collection[loop_ctr] = _do_param_sweep( model, sweep_params, outputs, local_values, optimize_function, optimize_kwargs, reinitialize_function, reinitialize_kwargs, reinitialize_before_sweep, comm, ) # Get the number of successful solves on this proc (sum of boolean flags) success_count = sum( local_output_collection[loop_ctr]["solve_successful"]) failure_count = local_num_cases - success_count # Get the global number of successful solves and update the number of remaining samples if num_procs > 1: # pragma: no cover global_success_count = np.zeros(1, dtype=np.float64) global_failure_count = np.zeros(1, dtype=np.float64) comm.Allreduce(np.array(success_count, dtype=np.float64), global_success_count) comm.Allreduce(np.array(failure_count, dtype=np.float64), global_failure_count) else: global_success_count = success_count global_failure_count = failure_count success_prob = global_success_count / (global_failure_count + global_success_count) if success_prob < 0.1: warnings.warn( f"Success rate of solves = {100.0*success_prob}%, consider adjusting sweep limits." ) n_samples_remaining -= global_success_count # The total number of samples to generate at the next iteration is a multiple of the total remaining samples scale_factor = 2.0 / max(success_prob, 0.10) num_total_samples = int(np.ceil(scale_factor * n_samples_remaining)) loop_ctr += 1 # Now that we have all of the local output dictionaries, we need to construct # a consolidated dictionary based on a filter, e.g., optimal solves. local_filtered_dict, local_n_successful = _filter_recursive_solves( model, sweep_params, outputs, local_output_collection, comm) # if we are debugging if debugging_data_dir is not None: local_filtered_values = np.zeros( (local_n_successful, len(local_filtered_dict["sweep_params"])), dtype=np.float64, ) for i, (key, item) in enumerate( local_filtered_dict["sweep_params"].items()): local_filtered_values[:, i] = item["value"][:] else: local_filtered_values = None # Not that we have all of the successful outputs in a consolidated dictionary locally, # we can now construct a global dictionary of successful solves. ( global_filtered_dict, global_filtered_results, global_filtered_values, ) = _aggregate_filtered_results(local_filtered_dict, req_num_samples, comm, rank, num_procs) # Now we can save this if num_procs > 1: # pragma: no cover comm.Barrier() global_save_data = _save_results( sweep_params, local_filtered_values, global_filtered_values, local_filtered_dict, global_filtered_dict, global_filtered_results, csv_results_file_name, h5_results_file_name, debugging_data_dir, comm, rank, num_procs, interpolate_nan_outputs, ) return global_save_data
def test_random_build_combinations(self): comm, rank, num_procs = _init_mpi() nn = int(1e5) # Uniform random sampling [lower_limit, upper_limit] A_param = pyo.Param(initialize=-10.0, mutable=True) B_param = pyo.Param(initialize=0.0, mutable=True) C_param = pyo.Param(initialize=10.0, mutable=True) range_A = [-10.0, 0.0] range_B = [0.0, 10.0] range_C = [10.0, 20.0] param_dict = dict() param_dict['var_A'] = UniformSample(A_param, range_A[0], range_A[1]) param_dict['var_B'] = UniformSample(B_param, range_B[0], range_B[1]) param_dict['var_C'] = UniformSample(C_param, range_C[0], range_C[1]) global_combo_array = _build_combinations(param_dict, SamplingType.RANDOM, nn, comm, rank, num_procs) assert np.shape(global_combo_array)[0] == nn assert np.shape(global_combo_array)[1] == len(param_dict) assert np.all(range_A[0] < global_combo_array[:, 0]) assert np.all(range_B[0] < global_combo_array[:, 1]) assert np.all(range_C[0] < global_combo_array[:, 2]) assert np.all(global_combo_array[:, 0] < range_A[1]) assert np.all(global_combo_array[:, 1] < range_B[1]) assert np.all(global_combo_array[:, 2] < range_C[1]) # Normal random sampling [mean, stdev] A_param = pyo.Param(initialize=10.0, mutable=True) B_param = pyo.Param(initialize=100.0, mutable=True) C_param = pyo.Param(initialize=1000.0, mutable=True) range_A = [10.0, 5.0] range_B = [100.0, 50.0] range_C = [1000.0, 0.0] param_dict = dict() param_dict['var_A'] = NormalSample(A_param, range_A[0], range_A[1]) param_dict['var_B'] = NormalSample(B_param, range_B[0], range_B[1]) param_dict['var_C'] = NormalSample(C_param, range_C[0], range_C[1]) global_combo_array = _build_combinations(param_dict, SamplingType.RANDOM, nn, comm, rank, num_procs) assert np.shape(global_combo_array)[0] == nn assert np.shape(global_combo_array)[1] == len(param_dict) assert np.mean(global_combo_array[:, 0]) < (range_A[0] + range_A[1]) assert np.mean(global_combo_array[:, 1]) < (range_B[0] + range_B[1]) assert (range_A[0] - range_A[1]) < np.mean(global_combo_array[:, 0]) assert (range_B[0] - range_B[1]) < np.mean(global_combo_array[:, 1]) assert np.all(global_combo_array[:, 2] == range_C[0])