def test_to_robust_problem(self, mocked_platypus):
        mocked_model = Model('test', function=mock.Mock())
        mocked_model.levers = [
            RealParameter('a', 0, 1),
            RealParameter('b', 0, 1)
        ]
        mocked_model.uncertainties = [
            RealParameter('c', 0, 1),
            RealParameter('d', 0, 1)
        ]
        mocked_model.outcomes = [ScalarOutcome('x'), ScalarOutcome('y')]

        scenarios = 5
        robustness_functions = [
            ScalarOutcome('mean x', variable_name='x', function=mock.Mock()),
            ScalarOutcome('mean y', variable_name='y', function=mock.Mock())
        ]

        problem = to_robust_problem(mocked_model, scenarios,
                                    robustness_functions)

        self.assertEqual('robust', problem.searchover)
        for entry in problem.parameters:
            self.assertIn(entry.name, mocked_model.levers.keys())
        self.assertEqual(['a', 'b'], problem.parameter_names)
        self.assertEqual(['mean x', 'mean y'], problem.outcome_names)
Exemple #2
0
    def test_pf_sampler(self):
        uncs = [RealParameter('a', 0, 5, resolution=(0, 2.5,5), pff=True),
                RealParameter('b', 0, 1, resolution=(0,1), pff=True),
                RealParameter('c', 0, 1),
                RealParameter('d', 1, 2),
                ]

        sampler = PartialFactorialSampler()
        designs = sampler.generate_designs(uncs, 10)
        designs.kind = Scenario
        
        expected = 60
        self.assertEqual(expected, designs.n)
        
        self.assertEqual(expected, len([design for design in designs]))
        
        ff, other = sampler._sort_parameters(uncs)
        
        received = {u.name for u in ff}
        expected = {'a', 'b'}
        self.assertEqual(received, expected)
        
        received = {u.name for u in other}
        expected = {'c', 'd'}
        self.assertEqual(received, expected)
    def test_to_problem(self, mocked_platypus):
        mocked_model = Model('test', function=mock.Mock())
        mocked_model.levers = [
            RealParameter('a', 0, 1),
            RealParameter('b', 0, 1)
        ]
        mocked_model.uncertainties = [
            RealParameter('c', 0, 1),
            RealParameter('d', 0, 1)
        ]
        mocked_model.outcomes = [ScalarOutcome('x'), ScalarOutcome('y')]
        mocked_model.constraints = []

        searchover = 'levers'
        problem = to_problem(mocked_model, searchover)
        self.assertEqual(searchover, problem.searchover)

        for entry in problem.parameters:
            self.assertIn(entry.name, mocked_model.levers.keys())
            self.assertIn(entry, list(mocked_model.levers))
        for entry in problem.outcome_names:
            self.assertIn(entry.name, mocked_model.outcomes.keys())

        searchover = 'uncertainties'
        problem = to_problem(mocked_model, searchover)

        self.assertEqual(searchover, problem.searchover)
        for entry in problem.parameters:
            self.assertIn(entry.name, mocked_model.uncertainties.keys())
            self.assertIn(entry, list(mocked_model.uncertainties))
        for entry in problem.outcome_names:
            self.assertIn(entry.name, mocked_model.outcomes.keys())
Exemple #4
0
 def test_init(self):
     # let's add some uncertainties to this
     uncs = [RealParameter("a", 0, 1),
            RealParameter("b", 0, 1)]
     outcomes = [TimeSeriesOutcome("test")]
     constraints = []
     callback = DefaultCallback(uncs, [], outcomes, constraints,
                                nr_experiments=100)
     
     self.assertEqual(callback.i, 0)
     self.assertEqual(callback.nr_experiments, 100)
     self.assertEqual(callback.cases.shape[0], 100)
     self.assertEqual(callback.outcomes, [o.name for o in outcomes])
     
     names = rf.get_names(callback.cases.dtype)
     names = set(names)
     self.assertEqual(names, {'a', 'b', 'policy', 'model', 'scenario_id'})
     self.assertEqual(callback.results, {})
     
     # with levers
     levers = [RealParameter('c', 0, 10)]
     
     callback = DefaultCallback(uncs, levers, outcomes, constraints, 
                                nr_experiments=100)
     
     self.assertEqual(callback.i, 0)
     self.assertEqual(callback.nr_experiments, 100)
     self.assertEqual(callback.cases.shape[0], 100)
     self.assertEqual(callback.outcomes, [o.name for o in outcomes])
     
     names = rf.get_names(callback.cases.dtype)
     names = set(names)
     self.assertEqual(names, {'a', 'b', 'c','policy', 'model', 'scenario_id'})
     self.assertEqual(callback.results, {})
Exemple #5
0
    def test_determine_parameters(self):
        function = mock.Mock()
        model_a = Model("A", function)
        model_a.uncertainties = [
            RealParameter('a', 0, 1),
            RealParameter('b', 0, 1),
        ]
        function = mock.Mock()
        model_b = Model("B", function)
        model_b.uncertainties = [
            RealParameter('b', 0, 1),
            RealParameter('c', 0, 1),
        ]

        models = [model_a, model_b]

        parameters = determine_parameters(models, 'uncertainties', union=True)
        for model in models:
            for unc in model.uncertainties:
                self.assertIn(unc.name, parameters.keys())

        parameters = determine_parameters(models, 'uncertainties', union=False)
        self.assertIn('b', parameters.keys())
        self.assertNotIn('c', parameters.keys())
        self.assertNotIn('a', parameters.keys())
Exemple #6
0
    def test_store_results(self):
        nr_experiments = 3
        uncs = [RealParameter("a", 0, 1),
               RealParameter("b", 0, 1)]
        outcomes = [TimeSeriesOutcome("test")]
        model = NamedObject('test')

        experiment = Experiment(0, model, Policy('policy'), Scenario(a=1, b=0), 0)
     
        # case 1 scalar shape = (1)
        callback = DefaultCallback(uncs, [], outcomes, 
                                   nr_experiments=nr_experiments)
        result = {outcomes[0].name: 1}
        callback(experiment, result)
         
        _, out = callback.get_results()
        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3,))
     
        # case 2 time series shape = (1, nr_time_steps)
        callback = DefaultCallback(uncs, [], outcomes, 
                                   nr_experiments=nr_experiments)
        result = {outcomes[0].name: np.random.rand(10)}
        callback(experiment, result)
          
        _, out = callback.get_results()
        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3,10))

        # case 3 maps etc. shape = (x,y)
        callback = DefaultCallback(uncs, [], outcomes, 
                                   nr_experiments=nr_experiments)
        result = {outcomes[0].name: np.random.rand(2,2)}
        callback(experiment,result)
          
        _, out = callback.get_results()
        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3,2,2))

        # case 4 assert raises EMAError
        callback = DefaultCallback(uncs, [], outcomes, 
                                   nr_experiments=nr_experiments)
        result = {outcomes[0].name: np.random.rand(2,2,2)}
        self.assertRaises(EMAError, callback, experiment, result)
        
        # KeyError
        with mock.patch('ema_workbench.util.ema_logging.debug') as mocked_logging:
            callback = DefaultCallback(uncs, [], outcomes, 
                           nr_experiments=nr_experiments)
            result = {'incorrect': np.random.rand(2,)}
            callback(experiment, result)
            
            for outcome in outcomes:
                mocked_logging.assert_called_with("%s not specified as outcome in msi" % outcome.name)
Exemple #7
0
    def test_run_experiment(self):
        mockMSI = mock.Mock(spec=Model)
        mockMSI.name = 'test'
        mockMSI.uncertainties = [RealParameter("a", 0, 10),
                                 RealParameter("b", 0, 10)]
        
        msis = NamedObjectMap(AbstractModel)
        msis['test'] = mockMSI

        runner = ExperimentRunner(msis)
        
        experiment = Experiment('test',
                                mockMSI.name,
                                Policy('none'),  
                                Scenario(a=1, b=2), 0)
        
        runner.run_experiment(experiment)
        
        sc, p = mockMSI.run_model.call_args[0]
        self.assertEqual(sc.name, experiment.scenario.name)
        self.assertEqual(p, experiment.policy)
        
        mockMSI.reset_model.assert_called_once_with()
        
   
        # assert handling of case error
        mockMSI = mock.Mock(spec=Model)
        mockMSI.name = 'test'
        mockMSI.run_model.side_effect = Exception('some exception')
        msis = NamedObjectMap(AbstractModel)
        msis['test'] = mockMSI
        
        runner = ExperimentRunner(msis)
    
        experiment = Experiment('test',mockMSI.name,Policy('none'),  
                      Scenario(a=1, b=2),0)

        with self.assertRaises(EMAError):
            runner.run_experiment(experiment)
           
        # assert handling of case error
        mockMSI = mock.Mock(spec=Model)
        mockMSI.name = 'test'
        mockMSI.run_model.side_effect = CaseError("message", {})
        msis = NamedObjectMap(AbstractModel)
        msis['test'] = mockMSI
        runner = ExperimentRunner(msis)
    
        experiment = Experiment('test',mockMSI.name,Policy('none'),  
                      Scenario(a=1, b=2),0)

        runner.run_experiment(experiment)
Exemple #8
0
    def test_store_results(self):
        nr_experiments = 3
        uncs = [RealParameter("a", 0, 1), RealParameter("b", 0, 1)]
        outcomes = [TimeSeriesOutcome("test")]
        model = NamedObject('test')

        experiment = Case(0, model, Policy('policy'), Scenario(a=1, b=0), 0)

        # case 1 scalar shape = (1)
        callback = DefaultCallback(uncs, [],
                                   outcomes,
                                   nr_experiments=nr_experiments)
        model_outcomes = {outcomes[0].name: 1}
        callback(experiment, model_outcomes)

        _, out = callback.get_results()

        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3, ))

        # case 2 time series shape = (1, nr_time_steps)
        callback = DefaultCallback(uncs, [],
                                   outcomes,
                                   nr_experiments=nr_experiments)
        model_outcomes = {outcomes[0].name: np.random.rand(10)}
        callback(experiment, model_outcomes)

        _, out = callback.get_results()
        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3, 10))

        # case 3 maps etc. shape = (x,y)
        callback = DefaultCallback(uncs, [],
                                   outcomes,
                                   nr_experiments=nr_experiments)
        model_outcomes = {outcomes[0].name: np.random.rand(2, 2)}
        callback(experiment, model_outcomes)

        _, out = callback.get_results()
        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3, 2, 2))

        # case 4 assert raises EMAError
        callback = DefaultCallback(uncs, [],
                                   outcomes,
                                   nr_experiments=nr_experiments)
        model_outcomes = {outcomes[0].name: np.random.rand(2, 2, 2)}
        self.assertRaises(EMAError, callback, experiment, model_outcomes)
    def test_run_model(self):
        model_name = 'modelname'

        function = mock.Mock()

        model = Model(model_name, function)
        model.uncertainties = [RealParameter('a', 0, 1)]
        model.run_model(Scenario(**{'a': 0.1, 'b': 1}), Policy('test'))
        function.assert_called_once_with(a=0.1)

        # test complete translation of scenario

        model = Model(model_name, function)
        model.uncertainties = [
            RealParameter('a', 0, 1, variable_name=['a', 'b'])
        ]

        scenario = Scenario(**{'a': 0.1})
        model.run_model(scenario, Policy('test'))

        self.assertIn('a', scenario.keys())
        self.assertIn('b', scenario.keys())

        model = Model(model_name, function)
        cats = [
            Category('some name', [1, 2], multivalue=True),
            Category('some other name', [3, 4], multivalue=True)
        ]
        model.uncertainties = [
            CategoricalParameter('a', cats, variable_name=['a', 'b'])
        ]

        scenario = Scenario(**{'a': 'some name'})
        model.run_model(scenario, Policy('test'))

        self.assertIn('a', scenario.keys())
        self.assertIn('b', scenario.keys())
        self.assertEqual(scenario['a'], 1)
        self.assertEqual(scenario['b'], 2)

        scenario = Scenario(**{'a': 'some other name'})
        model.run_model(scenario, Policy('test'))

        self.assertIn('a', scenario.keys())
        self.assertIn('b', scenario.keys())
        self.assertEqual(scenario['a'], 3)
        self.assertEqual(scenario['b'], 4)
Exemple #10
0
    def test_init(self):
        # let's add some uncertainties to this
        uncs = [RealParameter("a", 0, 1), RealParameter("b", 0, 1)]
        outcomes = [
            ScalarOutcome("scalar"),
            ArrayOutcome("array", shape=(10, )),
            TimeSeriesOutcome("timeseries")
        ]
        callback = DefaultCallback(uncs, [], outcomes, nr_experiments=100)

        self.assertEqual(callback.i, 0)
        self.assertEqual(callback.nr_experiments, 100)
        self.assertEqual(callback.cases.shape[0], 100)
        self.assertEqual(callback.outcomes, [o.name for o in outcomes])

        names = callback.cases.columns.values.tolist()
        names = set(names)
        self.assertEqual(names, {'a', 'b', 'policy', 'model', 'scenario'})

        self.assertNotIn('scalar', callback.results)
        self.assertNotIn('timeseries', callback.results)
        self.assertIn('array', callback.results)

        a = np.all(np.isnan(callback.results['array']))
        self.assertTrue(a)

        # with levers
        levers = [RealParameter('c', 0, 10)]

        callback = DefaultCallback(uncs, levers, outcomes, nr_experiments=100)

        self.assertEqual(callback.i, 0)
        self.assertEqual(callback.nr_experiments, 100)
        self.assertEqual(callback.cases.shape[0], 100)
        self.assertEqual(callback.outcomes, [o.name for o in outcomes])

        names = callback.cases.columns.values.tolist()
        names = set(names)
        self.assertEqual(names, {'a', 'b', 'c', 'policy', 'model', 'scenario'})

        self.assertNotIn('scalar', callback.results)
        self.assertNotIn('timeseries', callback.results)
        self.assertIn('array', callback.results)

        a = np.all(np.isnan(callback.results['array']))
        self.assertTrue(a)
Exemple #11
0
 def test_model_uncertainties(self):
     model_name = 'modelname'
     
     model = Model(model_name, lambda x:x)
     self.assertTrue(len(model.uncertainties.keys())==0)
     
     unc_a = RealParameter('a', 0, 1)
     model.uncertainties = unc_a
     self.assertTrue(len(model.uncertainties.keys())==1)
     self.assertTrue(unc_a.name in model.uncertainties)
    def test_to_platypus_types(self, mocked_platypus):
        dv = [
            RealParameter("real", 0, 1),
            IntegerParameter("integer", 0, 10),
            CategoricalParameter("categorical", ["a", "b"])
        ]

        types = to_platypus_types(dv)
        self.assertTrue(str(types[0]).find("platypus.Real") != -1)
        self.assertTrue(str(types[1]).find("platypus.Integer") != -1)
        self.assertTrue(str(types[2]).find("platypus.Permutation") != -1)
Exemple #13
0
 def test_store_cases(self):
     nr_experiments = 3
     uncs = [RealParameter("a", 0, 1),
             RealParameter("b", 0, 1),
             CategoricalParameter('c', [0, 1, 2]),
             IntegerParameter("d", 0, 1)]
     outcomes = [TimeSeriesOutcome("test")]
     constraints = []
     case = {unc.name:random.random() for unc in uncs}
     case["c"] = int(round(case["c"]*2))
     case["d"] = int(round(case["d"]))
     
     model = NamedObject('test')
     policy  = Policy('policy')
     scenario = Scenario(**case)
     experiment = Case(0, model.name, policy, scenario, 0)
  
     callback = DefaultCallback(uncs, [],outcomes, constraints,
                                nr_experiments=nr_experiments,
                                reporting_interval=1)
     model_outcomes = {outcomes[0].name: 1}
     model_constraints = {}
     callback(experiment, model_outcomes, model_constraints)
      
     experiments, _ = callback.get_results()
     design = case
     design['policy'] = policy.name
     design['model'] = model.name
     design['scenario_id'] = scenario.name
     
     names = rf.get_names(experiments.dtype)
     for name in names:
         entry_a = experiments[name][0]
         entry_b = design[name]
         
         self.assertEqual(entry_a, entry_b, "failed for "+name)
          
     # with levers
     nr_experiments = 3
     uncs = [RealParameter("a", 0, 1),
             RealParameter("b", 0, 1)]
     levers = [RealParameter("c", 0, 1),
               RealParameter("d", 0, 1)]
     outcomes = [TimeSeriesOutcome("test")]
     case = {unc.name:random.random() for unc in uncs}
     
     model = NamedObject('test')
     policy  = Policy('policy', c=1, d=1)
     scenario = Scenario(**case)
     experiment = Case(0, model.name, policy, scenario, 0)
  
     callback = DefaultCallback(uncs, levers,outcomes,constraints, 
                                nr_experiments=nr_experiments,
                                reporting_interval=1)
     model_outcomes = {outcomes[0].name: 1}
     model_constraints = {}
     callback(experiment, model_outcomes, model_constraints)
      
     experiments, _ = callback.get_results()
     design = case
     design['c'] = 1
     design['d'] = 1
     design['policy'] = policy.name
     design['model'] = model.name
     design['scenario_id'] = scenario.name
     
     names = rf.get_names(experiments.dtype)
     
     for name in names:
         self.assertEqual(experiments[name][0], design[name])
Exemple #14
0
class SamplerTestCase(unittest.TestCase):
    uncertainties = [RealParameter("1", 0, 10),
                     IntegerParameter("2", 0, 10),
                     CategoricalParameter('3', ['a','b', 'c'])]

    def _test_generate_designs(self, sampler):
        designs = sampler.generate_designs(self.uncertainties, 10)
        designs.kind = Scenario
        msg = 'tested for {}'.format(type(sampler))
        
        actual_nr_designs = 0
        for design in designs:
            actual_nr_designs +=1
            
        self.assertIn('1', design, msg)
        self.assertIn('2', design, msg)
        self.assertIn('3', design, msg)
        self.assertEqual(designs.n, actual_nr_designs, msg) 
    
    def test_lhs_sampler(self):
        sampler = LHSSampler()
        self._test_generate_designs(sampler)
     
    def test_mc_sampler(self):
        sampler = MonteCarloSampler()
        self._test_generate_designs(sampler)
    
    def test_ff_sampler(self):
        sampler = FullFactorialSampler()
        self._test_generate_designs(sampler)
        
    def test_pf_sampler(self):
        uncs = [RealParameter('a', 0, 5, resolution=(0, 2.5,5), pff=True),
                RealParameter('b', 0, 1, resolution=(0,1), pff=True),
                RealParameter('c', 0, 1),
                RealParameter('d', 1, 2),
                ]

        sampler = PartialFactorialSampler()
        designs = sampler.generate_designs(uncs, 10)
        designs.kind = Scenario
        
        expected = 60
        self.assertEqual(expected, designs.n)
        
        self.assertEqual(expected, len([design for design in designs]))
        
        ff, other = sampler._sort_parameters(uncs)
        
        received = {u.name for u in ff}
        expected = {'a', 'b'}
        self.assertEqual(received, expected)
        
        received = {u.name for u in other}
        expected = {'c', 'd'}
        self.assertEqual(received, expected)
 
    def test_determine_parameters(self):
        function = mock.Mock()
        model_a = Model("A", function)
        model_a.uncertainties = [RealParameter('a', 0, 1),
                                 RealParameter('b', 0, 1),]
        function = mock.Mock()
        model_b = Model("B", function)
        model_b.uncertainties = [RealParameter('b', 0, 1),
                                 RealParameter('c', 0, 1),]
        
        models = [model_a, model_b]
        
        parameters = determine_parameters(models, 'uncertainties', union=True)
        for model in models:
            for unc in model.uncertainties:
                self.assertIn(unc.name, parameters.keys())
        
        parameters = determine_parameters(models, 'uncertainties', union=False)
        self.assertIn('b', parameters.keys())
        self.assertNotIn('c', parameters.keys())
        self.assertNotIn('a', parameters.keys())