def test_nevergrad_optimizer(self): self.mco_model.optimizer_mode = "NeverGrad" self.mco_model.optimizer.single_point_evaluator = self.evaluator mock_kpi_return = [DataValue(value=0.1), DataValue(value=0.2)] with mock.patch("force_bdss.api.Workflow.execute", return_value=mock_kpi_return) as mock_exec: for point, value, _ in self.mco_model.optimizer.optimize(): self.assertListEqual([0.1, 0.2], list(value)) self.assertEqual(self.mco_model.optimizer.budget, mock_exec.call_count) # Testing verbose_run = False mode with mock.patch("force_bdss.api.Workflow.execute", return_value=mock_kpi_return): optimizer_return = list(self.mco_model.optimizer.optimize()) self.assertEqual(1, len(optimizer_return)) # Testing verbose_run = True mode self.mco_model.optimizer.verbose_run = True with mock.patch("force_bdss.api.Workflow.execute", return_value=mock_kpi_return): optimizer_return = list(self.mco_model.optimizer.optimize()) self.assertEqual(self.mco_model.optimizer.budget, len(optimizer_return)) # Testing infeasible KPI values that are out of KPI bounds mock_kpi_return = [DataValue(value=2), DataValue(value=3)] self.mco_model.optimizer.verbose_run = False with mock.patch("force_bdss.api.Workflow.execute", return_value=mock_kpi_return) as mock_exec: self.assertEqual(0, len(list(self.mco_model.optimizer.optimize()))) self.assertEqual(self.mco_model.optimizer.budget, mock_exec.call_count)
def get_data_values(self): """Return a list containing all DataValues stored in class""" return [ DataValue(type="MASS", value=self.mass), DataValue(type="CHARGE", value=self.charge) ]
def run(self, evaluator): """ Run the MCO with the desired method and communicate the results. """ model = evaluator.mco_model if model.evaluation_mode == "Subprocess": # Here we create an instance of our WorkflowEvaluator subclass # that allows for evaluation of a state in the workflow via calling # force_bdss on a new subprocess running in 'evaluate' mode. # Note: a BaseMCOCommunicator must be present to pass in parameter # values and returning the KPI for a force_bdss run in 'evaluate' # mode single_point_evaluator = SubprocessWorkflow( mco_model=evaluator.mco_model, execution_layers=evaluator.execution_layers, notification_listeners=evaluator.notification_listeners, executable_path=sys.argv[0]) else: single_point_evaluator = evaluator counter = 0 while counter < model.num_trials: counter += 1 log.info("MCO iteration {}/{}".format(counter, model.num_trials)) trial_position = np.random.rand(len(model.parameters)) kpis = single_point_evaluator.evaluate(trial_position) single_point_evaluator.mco_model.notify_progress_event( [DataValue(value=v) for v in trial_position], [DataValue(value=v) for v in kpis])
def run(self, model, parameters): formulation = parse_data_values(parameters, "FORMULATION")[0] chemical_prices = [] chemical_concs = [] chemical_molar_masses = [] for chemical in formulation.chemicals: chemical_prices.append(chemical.price) chemical_molar_masses.append(chemical.molecular_weight) chemical_concs.append(chemical.concentration) total_cost = self.calculate_cost( model.PU_volume, chemical_prices, chemical_concs, chemical_molar_masses, ) status = total_cost < model.threshold return [ DataValue(type="COST", value=total_cost), DataValue(type="PASS", value=status), ]
def run(self, model, parameters): temperature = parameters[0].value reaction_time = parameters[1].value temperature_celsius = self.kelvin_to_celsius(temperature) temperature_difference = temperature_celsius - model.temperature_shift cost = ( reaction_time * temperature_difference**2 * model.W ) cost_gradient = [ reaction_time * 2.0 * temperature_difference * model.W, temperature_difference**2 * model.W ] return [ DataValue( type="COST", value=cost ), DataValue( type="COST_GRADIENT", value=cost_gradient ) ]
def test_2d_run(self): ds = EggboxPESDataSource(self.factory) model = EggboxPESDataSourceModel(self.factory) model.num_cells = 3 model.dimension = 2 model.sigma_star = 0.5 trial = [0, 1] mock_params = [ DataValue(value=trial[0], type="float"), DataValue(value=trial[1], type='float') ] result = ds.run(model, mock_params) self.assertEqual(len(result), 3) self.assertEqual(len(model.basin_depths), 9) self.assertEqual(len(model.basin_positions), 9) trial = [0.2, 0.6] mock_params = [ DataValue(value=trial[0], type="float"), DataValue(value=trial[1], type='float') ] result = ds.run(model, mock_params) self.assertEqual(len(result), 3) self.assertEqual(len(model.basin_depths), 9) self.assertEqual(len(model.basin_positions), 9)
def run(self, evaluator): model = evaluator.mco_model optimizer = ScipyOptimizer(algorithms=model.algorithms) engine = MonteCarloEngine(single_point_evaluator=evaluator, parameters=model.parameters, kpis=model.kpis, method=model.method, n_sample=model.n_sample, optimizer=optimizer) formatter = logging.Formatter( fmt='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') screen_handler = logging.StreamHandler(stream=sys.stdout) screen_handler.setFormatter(formatter) log.addHandler(screen_handler) for index, (optimal_point, optimal_kpis) \ in enumerate(engine.optimize()): # When there is new data, this operation informs the system that # new data has been received. It must be a dictionary as given. log.info("Doing MCO run # {}".format(index)) model.notify_progress_event( [DataValue(value=v) for v in optimal_point], [DataValue(value=v) for v in optimal_kpis], )
def test_dispatch_mco_event(self): send_event = self.setup_task._server_event_callback self.assertEqual(self.setup_task.analysis_model.header, ()) with self.event_loop(): send_event(MCOStartEvent(parameter_names=["x"], kpi_names=["y"])) self.assertEqual(0, len(self.setup_task.analysis_model.evaluation_steps)) self.assertEqual(0, len(self.setup_task.analysis_model.step_metadata)) self.assertEqual(("x", "y"), self.setup_task.analysis_model.header) with self.event_loop(): send_event(ProbeUIRuntimeEvent()) with self.event_loop(): send_event( MCOProgressEvent( optimal_point=[DataValue(value=1.0)], optimal_kpis=[DataValue(value=2.0)], )) self.assertEqual(1, len(self.setup_task.analysis_model.evaluation_steps)) self.assertEqual((1.0, 2.0), self.setup_task.analysis_model.evaluation_steps[0]) self.assertEqual(1, len(self.setup_task.analysis_model.step_metadata)) self.assertDictEqual({'some_metadata': 0}, self.setup_task.analysis_model.step_metadata[0])
def test_basic_function(self): with self.assertTraitChanges(self.model, "event", count=3): res = self.data_source.run( self.model, [DataValue(value=self.filepath), DataValue(value=self.mesh)]) self.assertAlmostEqual(1.887626e-04, res[0].value[-1][-1]) self.assertAlmostEqual(0.99028, res[1].value[-1], 3) self.assertAlmostEqual(5.786418e-02, res[2].value, 3) self.assertAlmostEqual(9.962875e-04, res[3].value) self.assertAlmostEqual(1.2113659e+02, res[4].value) self.assertAlmostEqual(23.13385205, res[5].value) self.assertAlmostEqual(46.0, res[6].value[0][-1]) self.assertAlmostEqual(0.2, res[6].value[1][-1], 2) self.assertAlmostEqual(46.0, res[-1].value[0][-1]) self.assertAlmostEqual(331.167041, res[-1].value[1][-1]) self.assertEqual("FOAM_BSD", res[0].type) self.assertEqual("FILLING_FRACTION", res[1].type) self.assertEqual("OVERPACKING_FRACTION", res[2].type) self.assertEqual("FOAM_VISCOSITY", res[3].type) self.assertEqual("FOAM_DENSITY", res[4].type) self.assertEqual("FOAM_THERM_COND", res[5].type) self.assertEqual("FOAM_HEIGHT", res[6].type) self.assertEqual("FOAM_TEMPERATURE", res[-1].type)
def test_basic_function(self): # Test molecular clustering model = self.factory.create_model() model.fragment_symbols = ["PS1", "SS"] in_slots = self.data_source.slots(model)[0] self.assertEqual(2, len(in_slots)) values = [self.formulation, self.traj_file] data_values = [ DataValue(type=slot.type, value=value) for slot, value in zip(in_slots, values) ] with self.assertTraitChanges(model, "event", count=1): res = self.data_source.run(model, data_values) self.assertEqual(2, res[0].value) # Test atomic clustering model.method = "atomic" model.atom_thresh = 1 data_values = [ DataValue(type=slot.type, value=value) for slot, value in zip(in_slots, values) ] res = self.data_source.run(model, data_values) self.assertEqual(2, res[0].value)
def setUp(self): self.plugin = SurfactantPlugin() self.factory = self.plugin.notification_listener_factories[0] self.notification_listener = self.factory.create_listener() self.model = self.factory.create_model() self.notification_listener.initialize(self.model) self.surfactant = ProbePrimaryIngredient() self.salt = ProbeSaltIngredient() self.solvent = ProbeSolventIngredient() self.surfactant.name = "Primary Surfactant" self.salt.name = "Sodium Chloride" self.solvent.name = "Water" self.parameters = [ DataValue(name="surfactant_name", value="best chemical ever"), DataValue(name="surfactant_conc", value=1.0), DataValue(name="salt_conc", value=5.0), ] self.kpis = [ DataValue(name="viscosity", value=5.7), DataValue(name="cost", value=10), ]
def run(self, evaluator): model = evaluator.mco_model if model.evaluation_mode == "Subprocess": # Here we create an instance of our WorkflowEvaluator subclass # that allows for evaluation of a state in the workflow via calling # `force_bdss` on a new subprocess running in 'evaluate' mode. # Note: a BaseMCOCommunicator must be present to pass in parameter # values and returning the KPI for a `force_bdss` run in 'evaluate' # mode evaluator = SubprocessWorkflowEvaluator( workflow=evaluator.workflow, workflow_filepath=evaluator.workflow_filepath, executable_path=sys.argv[0], ) optimizer = model.optimizer optimizer.single_point_evaluator = evaluator for ( optimal_point, optimal_kpis, scaled_weights, ) in optimizer.optimize(): # When there is new data, this operation informs the system that # new data has been received. It must be a dictionary as given. self.notify_new_point( [DataValue(value=v) for v in optimal_point], [DataValue(value=v) for v in optimal_kpis], scaled_weights, )
def run(self, model, parameters): if model.input_method == 'Model': gelling_reaction = { "A_OH": model.nu_gelling, "E_OH": model.E_a_gelling, "deltaOH": model.delta_H_gelling, "gellingPoint": model.gelling_point } blowing_reaction = { "A_W": model.nu_blowing, "E_W": model.E_a_blowing, "deltaW": model.delta_H_blowing, "latentHeat": model.latent_heat } else: gelling_reaction = { "A_OH": parameters[0].value[0], "E_OH": parameters[0].value[1], "deltaOH": parameters[0].value[2], "gellingPoint": parameters[0].value[3] } blowing_reaction = { "A_W": parameters[1].value[0], "E_W": parameters[1].value[1], "deltaW": parameters[1].value[2], "latentHeat": parameters[1].value[3] } return [ DataValue(type="REACTION", value=gelling_reaction), DataValue(type="REACTION", value=blowing_reaction) ]
def run(self, model, parameters): d = parameters[0].value #print("d =") #print(d) values = [p.value for p in parameters] #print('values=') #print(values) value_types = [p.type for p in parameters] #print('value_types=') #print(value_types) result = self.run_simulation(model, parameters) #result = self.run_simulation(model, d) return [ DataValue( type="S", value=result[0] ), DataValue( type="T", value=result[1] ), DataValue( type="V", value=result[2] ) ]
def run(self, evaluator): model = evaluator.mco_model optim = ScipyOptimizer(algorithms=model.algorithms) optimizer = WeightedOptimizerEngine( kpis=model.kpis, parameters=model.parameters, num_points=model.num_points, space_search_mode=model.space_search_mode, single_point_evaluator=evaluator, verbose_run=model.verbose_run, optimizer=optim, ) for ( optimal_point, optimal_kpis, scaled_weights, ) in optimizer.optimize(): # When there is new data, this operation informs the system that # new data has been received. It must be a dictionary as given. evaluator.mco_model.notify_progress_event( [DataValue(value=v) for v in optimal_point], [DataValue(value=v) for v in optimal_kpis], weights=scaled_weights, )
def run(self, evaluator): model = evaluator.mco_model engine = NevergradOptimizerEngine(kpis=model.kpis, parameters=model.parameters, single_point_evaluator=evaluator, verbose_run=model.verbose_run) # Transform the KPI upper bounds values using the # score function upper_bounds = engine.score_upper_bounds() # Assign optimizer with KPI score upper bounds engine.optimizer = NevergradMultiOptimizer( algorithms=model.algorithms, budget=model.budget, bound_sample=model.bound_sample, upper_bounds=upper_bounds) formatter = logging.Formatter( fmt='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') screen_handler = logging.StreamHandler(stream=sys.stdout) screen_handler.setFormatter(formatter) log.addHandler(screen_handler) for index, (optimal_point, optimal_kpis) \ in enumerate(engine.optimize(verbose_run=model.verbose_run)): # When there is new data, this operation informs the system that # new data has been received. It must be a dictionary as given. model.notify_progress_event( [DataValue(value=v) for v in optimal_point], [DataValue(value=v) for v in optimal_kpis], )
def test_internal_weighted_evaluator(self): self.mco_model.optimizer.single_point_evaluator = self.evaluator mock_kpi_return = [DataValue(value=2), DataValue(value=3)] with mock.patch("force_bdss.api.Workflow.execute", return_value=mock_kpi_return) as mock_exec: self.mco_model.optimizer._weighted_optimize([0.5, 0.5]) self.assertEqual(7, mock_exec.call_count)
def run(self, model, parameters): return [ DataValue(type="ARRHENIUS_NU", value=model.nu_main_reaction), DataValue(type="ARRHENIUS_DELTA_H", value=model.delta_H_main_reaction), DataValue(type="ARRHENIUS_NU", value=model.nu_secondary_reaction), DataValue(type="ARRHENIUS_DELTA_H", value=model.delta_H_secondary_reaction) ]
def get_data_values(self): """Return a list containing all regular DataValues stored in class""" data_values = [ DataValue(type="NAME", value=self.name), DataValue(type="MASS", value=self.mass) ] return data_values
def sample(n, ds, model): """ Sample the potential of the given data source n times. """ i = 0 while i < n: random = np.random.rand(2) mock_params = [DataValue(value=random[0], type="float"), DataValue(value=random[1], type="float")] ds.run(model, mock_params) i += 1
def get_data_values(self): """Return a list containing all DataValues stored in class, including new `price` attribute""" data_values = [ DataValue(type="NAME", value=self.name), DataValue(type="PRICE", value=self.price), DataValue(type="MASS", value=self.mass) ] return data_values
def test__update_simulation_data(self): is_updated = self.data_source._update_simulation_data( DataValue(type="A_OH", value=2)) self.assertTrue(is_updated) self.assertEqual( 2, self.data_source.data_dicts[0].data["GellingConstants"]["A_OH"]) is_updated = self.data_source._update_simulation_data( DataValue(type="B_OH", value=2)) self.assertFalse(is_updated)
def test_run(self): self.model.peak = 2.0 self.model.wavelength_x = 1.0 self.model.wavelength_y = 1.0 mock_params = [ DataValue(value=0.75), DataValue(value=0.75), ] result = self.ds.run(self.model, mock_params) self.assertAlmostEqual(result[0].value, -2.0)
def test_run(self): self.model.peak = -2.0 self.model.angle = 3.1415926/2.0 self.model.offset = 0.0 self.model.sigma = 1.0 mock_params = [ DataValue(value=1.0), DataValue(value=0.0), ] result = self.ds.run(self.model, mock_params) self.assertAlmostEqual(result[0].value, -2.0)
def run(self, model, parameters): sim_output_file = parameters[0].value mesh = parameters[1].value datafile_specification = {"n_system_lines": model.n_system_lines} markers, data = self.read_datafile(sim_output_file, datafile_specification) foam_bsd = self.calculate_foam_bsd(markers, data) _, foam_filling = self.calculate_filling_fraction(markers, data) foam_overpacking = self.calculate_overpacking_fraction(markers, data) foam_viscosity = self.calculate_viscosity(markers, data)[-1] foam_density = self.calculate_foam_density(markers, data)[-1] foam_conductivity = self.calculate_foam_thermal_conductivity( markers, data)[-1] foam_height = self.calculate_height_profile(markers, data, mesh) foam_temperature = self.calculate_temperature_profile(markers, data) # Broadcast simulation time series to be recorded as metadata # for the run model.notify_time_series(foam_bsd, name='bsd_profile') model.notify_time_series(foam_height, name='height_profile') model.notify_time_series(foam_temperature, name='temp_profile') return [ DataValue(type="FOAM_BSD", value=foam_bsd), DataValue(type="FILLING_FRACTION", value=foam_filling), DataValue(type="OVERPACKING_FRACTION", value=foam_overpacking), DataValue(type="FOAM_VISCOSITY", value=foam_viscosity), DataValue(type="FOAM_DENSITY", value=foam_density), DataValue(type="FOAM_THERM_COND", value=foam_conductivity), DataValue(type="FOAM_HEIGHT", value=foam_height), DataValue(type="FOAM_TEMPERATURE", value=foam_temperature), ]
def test_run(self): self.model.peak = -2.0 self.model.cent_x = -1.0 self.model.cent_y = -1.0 self.model.sigm_x = 1.0 self.model.sigm_y = 1.0 mock_params = [ DataValue(value=-1.0), DataValue(value=-1.0), ] result = self.ds.run(self.model, mock_params) self.assertAlmostEqual(result[0].value, -2.0)
def run(self, model, parameters): """Compares a PUFoam height and temperature time series with a reference data set stored on file. Can either handle input reference data on the model or as an MCO parameter. Expects the height values in parameters to be reported from the Workflow in meters and temperature values to be reported in Kelvin """ height_profile = parameters[0].value temp_profile = parameters[1].value if model.input_method == 'Model': ref_data = self._get_reference_data(model) x_data = ref_data[0]['time[s]'] height_data = ref_data[0]['height[mm]'] * 1e-3 if 'temperature[C]' in ref_data[0]: # Convert any temperature data in celsius to Kelvin temp_data = ref_data[0]['temperature[C]'] + 273 else: temp_data = ref_data[0]['temperature[K]'] else: ref_data = parameters[2].value x_data, height_data = np.array(ref_data) ref_data = parameters[3].value x_data, temp_data = np.array(ref_data) # Notify reference data sets to any listeners present model.notify_time_series( np.array([x_data, height_data]), name='ref_height_profile' ) model.notify_time_series( np.array([x_data, temp_data]), name='ref_temp_profile' ) # Calculate residuals between simulation and reference data sets # for both time series height_residuals = self.calculate_residuals( height_profile, (x_data, height_data) ) temp_residuals = self.calculate_residuals( temp_profile, (x_data, temp_data) ) return [DataValue(type="RESIDUALS", value=height_residuals), DataValue(type="RESIDUALS", value=temp_residuals)]
def test_internal_run(self): opt = self.factory.create_optimizer() model = self.factory.create_model() model.num_trials = 7 model.evaluation_mode = 'Internal' parameter_factory = self.factory.parameter_factories[0] model.parameters = [DummyMCOParameter(parameter_factory)] model.kpis = [KPISpecification()] self.evaluator.mco_model = model kpis = [DataValue(value=1), DataValue(value=2)] with mock.patch('force_bdss.api.Workflow.execute', return_value=kpis) as mock_exec: opt.run(self.evaluator) self.assertEqual(mock_exec.call_count, 7)
def run(self, evaluator): # This implementation mimics the expected behavior of dakota # by spawning the force_bdss with the evaluate option to compute # a single point. Your specific implementation should be quite # different, in the sense that it is supposed to spawn your MCO # as a separate process, collect its results via stdout or any # other more appropriate channel, and notify using the mechanisms # explained above. model = evaluator.mco_model parameters = model.parameters # Generate specific parameter values as from the specification in # the model. It basically combines all the ranges and generate points # in those ranges. values = [] for p in parameters: values.append( rotated_range(int(p.lower_bound), int(p.upper_bound), int(p.initial_value))) value_iterator = itertools.product(*values) # Here we create an instance of our WorkflowEvaluator subclass # that allows for evaluation of a state in the workflow via calling # force_bdss on a new subprocess running in 'evaluate' mode. # Note: a BaseMCOCommunicator must be present to pass in parameter # values and returning the KPI for a force_bdss run in 'evaluate' # mode single_point_evaluator = SubprocessWorkflow( mco_model=evaluator.mco_model, execution_layers=evaluator.execution_layers, notification_listeners=evaluator.notification_listeners, executable_path=sys.argv[0]) for value in value_iterator: # We pass the specific parameter values via stdin, and read # the result via stdout. The format is decided by the # MCOCommunicator. NOTE: The communicator is involved in the # communication between the MCO executable and the bdss single # point evaluation, _not_ between the bdss and the MCO executable. kpis = single_point_evaluator.evaluate(value) # When there is new data, this operation informs the system that # new data has been received. It must be a dictionary as given. single_point_evaluator.mco_model.notify_progress_event( [DataValue(value=v) for v in value], [DataValue(value=v) for v in kpis])
def run(self, model, parameters): """ Compute the potential at the requetsed trial point. If `model.locally_optimize` is True, then use SciPy to find the local minmimum corresponding to the trial point. Parameters ---------- model: :obj:`EggboxPESDataSourceModel` the model containing the parameters of the PES. parameters: :obj:`list` of :obj:`DataValue` list of parameters passed as DataValue objects, that match the input slots of this instance. """ x0 = [float(param.value) for param in parameters] if np.max(np.abs(x0)) > 1: raise RuntimeError( 'MCO trial outside of bounds [0, 1]: {}'.format(x0)) bounds = np.array([[0, 1] for i in range(model.dimension)]) model.trials.append(x0) if model.locally_optimize: result = scipy.optimize.minimize(self.evaluate_potential, x0, args=(model, ), bounds=bounds) results = [ DataValue(value=result.x[i], type=model.cuba_design_space_type) for i in range(model.dimension) ] results.append( DataValue(value=result.fun, type=model.cuba_potential_type)) model.results.append(result.x) else: result = self.evaluate_potential(x0, model) results = [ DataValue(value=x0[i], type=model.cuba_design_space_type) for i in range(model.dimension) ] results.append( DataValue(value=result, type=model.cuba_potential_type)) model.results.append(x0) return results