def test_to_platypus_types(self, mocked_platypus): dv = [ RealParameter("real", 0, 1), IntegerParameter("integer", 0, 10), CategoricalParameter("categorical", ["a", "b"]) ] types = to_platypus_types(dv) self.assertTrue(str(types[0]).find("platypus.Real") != -1) self.assertTrue(str(types[1]).find("platypus.Integer") != -1) self.assertTrue(str(types[2]).find("platypus.Permutation") != -1)
def test_run_model(self): model_name = 'modelname' function = mock.Mock() model = Model(model_name, function) model.uncertainties = [RealParameter('a', 0, 1)] model.run_model(Scenario(**{'a': 0.1, 'b': 1}), Policy('test')) function.assert_called_once_with(a=0.1) # test complete translation of scenario model = Model(model_name, function) model.uncertainties = [ RealParameter('a', 0, 1, variable_name=['a', 'b']) ] scenario = Scenario(**{'a': 0.1}) model.run_model(scenario, Policy('test')) self.assertIn('a', scenario.keys()) self.assertIn('b', scenario.keys()) model = Model(model_name, function) cats = [ Category('some name', [1, 2], multivalue=True), Category('some other name', [3, 4], multivalue=True) ] model.uncertainties = [ CategoricalParameter('a', cats, variable_name=['a', 'b']) ] scenario = Scenario(**{'a': 'some name'}) model.run_model(scenario, Policy('test')) self.assertIn('a', scenario.keys()) self.assertIn('b', scenario.keys()) self.assertEqual(scenario['a'], 1) self.assertEqual(scenario['b'], 2) scenario = Scenario(**{'a': 'some other name'}) model.run_model(scenario, Policy('test')) self.assertIn('a', scenario.keys()) self.assertIn('b', scenario.keys()) self.assertEqual(scenario['a'], 3) self.assertEqual(scenario['b'], 4)
def test_store_cases(self): nr_experiments = 3 uncs = [RealParameter("a", 0, 1), RealParameter("b", 0, 1), CategoricalParameter('c', [0, 1, 2]), IntegerParameter("d", 0, 1)] outcomes = [TimeSeriesOutcome("test")] constraints = [] case = {unc.name:random.random() for unc in uncs} case["c"] = int(round(case["c"]*2)) case["d"] = int(round(case["d"])) model = NamedObject('test') policy = Policy('policy') scenario = Scenario(**case) experiment = Case(0, model.name, policy, scenario, 0) callback = DefaultCallback(uncs, [],outcomes, constraints, nr_experiments=nr_experiments, reporting_interval=1) model_outcomes = {outcomes[0].name: 1} model_constraints = {} callback(experiment, model_outcomes, model_constraints) experiments, _ = callback.get_results() design = case design['policy'] = policy.name design['model'] = model.name design['scenario_id'] = scenario.name names = rf.get_names(experiments.dtype) for name in names: entry_a = experiments[name][0] entry_b = design[name] self.assertEqual(entry_a, entry_b, "failed for "+name) # with levers nr_experiments = 3 uncs = [RealParameter("a", 0, 1), RealParameter("b", 0, 1)] levers = [RealParameter("c", 0, 1), RealParameter("d", 0, 1)] outcomes = [TimeSeriesOutcome("test")] case = {unc.name:random.random() for unc in uncs} model = NamedObject('test') policy = Policy('policy', c=1, d=1) scenario = Scenario(**case) experiment = Case(0, model.name, policy, scenario, 0) callback = DefaultCallback(uncs, levers,outcomes,constraints, nr_experiments=nr_experiments, reporting_interval=1) model_outcomes = {outcomes[0].name: 1} model_constraints = {} callback(experiment, model_outcomes, model_constraints) experiments, _ = callback.get_results() design = case design['c'] = 1 design['d'] = 1 design['policy'] = policy.name design['model'] = model.name design['scenario_id'] = scenario.name names = rf.get_names(experiments.dtype) for name in names: self.assertEqual(experiments[name][0], design[name])
#specify uncertainties lake_model.uncertainties = [ RealParameter('b', 0.1, 0.45), RealParameter('q', 2.0, 4.5), RealParameter('mean', 0.01, 0.05), RealParameter('stdev', 0.001, 0.005), RealParameter('delta', 0.93, 0.99) ] # set levers lake_model.levers = [ RealParameter("c1", -2, 2), RealParameter("c2", -2, 2), RealParameter("r1", 0, 2), RealParameter("r2", 0, 2), CategoricalParameter("w1", np.linspace(0, 1, 10)) ] #specify outcomes lake_model.outcomes = [ ScalarOutcome('max_P', kind=ScalarOutcome.MINIMIZE), # @UndefinedVariable ScalarOutcome('utility', kind=ScalarOutcome.MAXIMIZE), # @UndefinedVariable ScalarOutcome('inertia', kind=ScalarOutcome.MINIMIZE), # @UndefinedVariable ScalarOutcome('reliability', kind=ScalarOutcome.MAXIMIZE) ] # @UndefinedVariable # override some of the defaults of the model lake_model.constants = [
class SamplerTestCase(unittest.TestCase): uncertainties = [RealParameter("1", 0, 10), IntegerParameter("2", 0, 10), CategoricalParameter('3', ['a','b', 'c'])] def _test_generate_designs(self, sampler): designs = sampler.generate_designs(self.uncertainties, 10) designs.kind = Scenario msg = 'tested for {}'.format(type(sampler)) actual_nr_designs = 0 for design in designs: actual_nr_designs +=1 self.assertIn('1', design, msg) self.assertIn('2', design, msg) self.assertIn('3', design, msg) self.assertEqual(designs.n, actual_nr_designs, msg) def test_lhs_sampler(self): sampler = LHSSampler() self._test_generate_designs(sampler) def test_mc_sampler(self): sampler = MonteCarloSampler() self._test_generate_designs(sampler) def test_ff_sampler(self): sampler = FullFactorialSampler() self._test_generate_designs(sampler) def test_pf_sampler(self): uncs = [RealParameter('a', 0, 5, resolution=(0, 2.5,5), pff=True), RealParameter('b', 0, 1, resolution=(0,1), pff=True), RealParameter('c', 0, 1), RealParameter('d', 1, 2), ] sampler = PartialFactorialSampler() designs = sampler.generate_designs(uncs, 10) designs.kind = Scenario expected = 60 self.assertEqual(expected, designs.n) self.assertEqual(expected, len([design for design in designs])) ff, other = sampler._sort_parameters(uncs) received = {u.name for u in ff} expected = {'a', 'b'} self.assertEqual(received, expected) received = {u.name for u in other} expected = {'c', 'd'} self.assertEqual(received, expected) def test_determine_parameters(self): function = mock.Mock() model_a = Model("A", function) model_a.uncertainties = [RealParameter('a', 0, 1), RealParameter('b', 0, 1),] function = mock.Mock() model_b = Model("B", function) model_b.uncertainties = [RealParameter('b', 0, 1), RealParameter('c', 0, 1),] models = [model_a, model_b] parameters = determine_parameters(models, 'uncertainties', union=True) for model in models: for unc in model.uncertainties: self.assertIn(unc.name, parameters.keys()) parameters = determine_parameters(models, 'uncertainties', union=False) self.assertIn('b', parameters.keys()) self.assertNotIn('c', parameters.keys()) self.assertNotIn('a', parameters.keys())