Example #1
0
    def test_experiment_generator(self):
        sampler = LHSSampler()

        shared_abc_1 = RealParameter("shared ab 1", 0, 1)
        shared_abc_2 = RealParameter("shared ab 2", 0, 1)
        unique_a = RealParameter("unique a ", 0, 1)
        unique_b = RealParameter("unique b ", 0, 1)
        uncertainties = [shared_abc_1, shared_abc_2, unique_a, unique_b]
        designs = sampler.generate_designs(uncertainties, 10)
        designs.kind = Scenario

        # everything shared
        model_a = Model("A", mock.Mock())
        model_b = Model("B", mock.Mock())

        model_a.uncertainties = [shared_abc_1, shared_abc_2, unique_a]
        model_b.uncertainties = [shared_abc_1, shared_abc_2, unique_b]
        model_structures = [model_a, model_b]

        policies = [Policy('policy 1'), Policy('policy 2'), Policy('policy 3')]

        gen = experiment_generator(designs, model_structures, policies)

        experiments = []
        for entry in gen:
            experiments.append(entry)
        self.assertEqual(len(experiments), 2 * 3 * 10)
Example #2
0
    def test_policies(self):
        ensemble = ModelEnsemble()

        policy = Policy('test')
        ensemble.policies = policy

        ensemble = ModelEnsemble()

        policies = [Policy('test'), Policy('name')]
        ensemble.policies = policies
Example #3
0
    def test_run_experiment(self):
        mockMSI = mock.Mock(spec=Model)
        mockMSI.name = 'test'
        mockMSI.uncertainties = [RealParameter("a", 0, 10),
                                 RealParameter("b", 0, 10)]
        
        msis = NamedObjectMap(AbstractModel)
        msis['test'] = mockMSI

        runner = ExperimentRunner(msis)
        
        experiment = Experiment('test',
                                mockMSI.name,
                                Policy('none'),  
                                Scenario(a=1, b=2), 0)
        
        runner.run_experiment(experiment)
        
        sc, p = mockMSI.run_model.call_args[0]
        self.assertEqual(sc.name, experiment.scenario.name)
        self.assertEqual(p, experiment.policy)
        
        mockMSI.reset_model.assert_called_once_with()
        
   
        # assert handling of case error
        mockMSI = mock.Mock(spec=Model)
        mockMSI.name = 'test'
        mockMSI.run_model.side_effect = Exception('some exception')
        msis = NamedObjectMap(AbstractModel)
        msis['test'] = mockMSI
        
        runner = ExperimentRunner(msis)
    
        experiment = Experiment('test',mockMSI.name,Policy('none'),  
                      Scenario(a=1, b=2),0)

        with self.assertRaises(EMAError):
            runner.run_experiment(experiment)
           
        # assert handling of case error
        mockMSI = mock.Mock(spec=Model)
        mockMSI.name = 'test'
        mockMSI.run_model.side_effect = CaseError("message", {})
        msis = NamedObjectMap(AbstractModel)
        msis['test'] = mockMSI
        runner = ExperimentRunner(msis)
    
        experiment = Experiment('test',mockMSI.name,Policy('none'),  
                      Scenario(a=1, b=2),0)

        runner.run_experiment(experiment)
Example #4
0
    def test_run_model(self):
        wd = r"../models"

        model_file = r"/Wolf Sheep Predation.nlogo"

        model = NetLogoModel("predPreyNetlogo", wd=wd, model_file=model_file)

        model.run_length = 1000

        model.uncertainties = [
            RealParameter("grass-regrowth-time", 10, 100),
            CategoricalParameter("grass?", ("true", "false"))
        ]

        model.outcomes = [
            TimeSeriesOutcome('sheep'),
            TimeSeriesOutcome('wolves')
        ]
        model.model_init(Policy('no policy'))

        case = {"grass-regrowth-time": 35, "grass?": "true"}

        model.run_model(case)
        _ = model.retrieve_output()

        model.cleanup()
Example #5
0
    def test_run_model(self):
        model_name = 'modelname'

        function = mock.Mock()

        model = Model(model_name, function)
        model.uncertainties = [RealParameter('a', 0, 1)]
        model.run_model(Scenario(**{'a': 0.1, 'b': 1}), Policy('test'))
        function.assert_called_once_with(a=0.1)

        # test complete translation of scenario

        model = Model(model_name, function)
        model.uncertainties = [
            RealParameter('a', 0, 1, variable_name=['a', 'b'])
        ]

        scenario = Scenario(**{'a': 0.1})
        model.run_model(scenario, Policy('test'))

        self.assertIn('a', scenario.keys())
        self.assertIn('b', scenario.keys())

        model = Model(model_name, function)
        cats = [
            Category('some name', [1, 2], multivalue=True),
            Category('some other name', [3, 4], multivalue=True)
        ]
        model.uncertainties = [
            CategoricalParameter('a', cats, variable_name=['a', 'b'])
        ]

        scenario = Scenario(**{'a': 'some name'})
        model.run_model(scenario, Policy('test'))

        self.assertIn('a', scenario.keys())
        self.assertIn('b', scenario.keys())
        self.assertEqual(scenario['a'], 1)
        self.assertEqual(scenario['b'], 2)

        scenario = Scenario(**{'a': 'some other name'})
        model.run_model(scenario, Policy('test'))

        self.assertIn('a', scenario.keys())
        self.assertIn('b', scenario.keys())
        self.assertEqual(scenario['a'], 3)
        self.assertEqual(scenario['b'], 4)
Example #6
0
 def test_run_model(self):
     model_name = 'modelname'
     model_file = 'model_file'
                 
     with mock.patch('ema_workbench.em_framework.model.os') as patch:
         patch.os.is_file.set_return_value(True)
         model = FileModelTest(model_name, '.', model_file)
         model.run_model(Scenario(a=1), Policy('test', b=2))
         self.assertEqual(model.policy.name, 'test')
Example #7
0
    def test_store_results(self):
        nr_experiments = 3
        uncs = [RealParameter("a", 0, 1),
               RealParameter("b", 0, 1)]
        outcomes = [TimeSeriesOutcome("test")]
        model = NamedObject('test')

        experiment = Experiment(0, model, Policy('policy'), Scenario(a=1, b=0), 0)
     
        # case 1 scalar shape = (1)
        callback = DefaultCallback(uncs, [], outcomes, 
                                   nr_experiments=nr_experiments)
        result = {outcomes[0].name: 1}
        callback(experiment, result)
         
        _, out = callback.get_results()
        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3,))
     
        # case 2 time series shape = (1, nr_time_steps)
        callback = DefaultCallback(uncs, [], outcomes, 
                                   nr_experiments=nr_experiments)
        result = {outcomes[0].name: np.random.rand(10)}
        callback(experiment, result)
          
        _, out = callback.get_results()
        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3,10))

        # case 3 maps etc. shape = (x,y)
        callback = DefaultCallback(uncs, [], outcomes, 
                                   nr_experiments=nr_experiments)
        result = {outcomes[0].name: np.random.rand(2,2)}
        callback(experiment,result)
          
        _, out = callback.get_results()
        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3,2,2))

        # case 4 assert raises EMAError
        callback = DefaultCallback(uncs, [], outcomes, 
                                   nr_experiments=nr_experiments)
        result = {outcomes[0].name: np.random.rand(2,2,2)}
        self.assertRaises(EMAError, callback, experiment, result)
        
        # KeyError
        with mock.patch('ema_workbench.util.ema_logging.debug') as mocked_logging:
            callback = DefaultCallback(uncs, [], outcomes, 
                           nr_experiments=nr_experiments)
            result = {'incorrect': np.random.rand(2,)}
            callback(experiment, result)
            
            for outcome in outcomes:
                mocked_logging.assert_called_with("%s not specified as outcome in msi" % outcome.name)
Example #8
0
    def test_store_results(self):
        nr_experiments = 3
        uncs = [RealParameter("a", 0, 1), RealParameter("b", 0, 1)]
        outcomes = [TimeSeriesOutcome("test")]
        model = NamedObject('test')

        experiment = Case(0, model, Policy('policy'), Scenario(a=1, b=0), 0)

        # case 1 scalar shape = (1)
        callback = DefaultCallback(uncs, [],
                                   outcomes,
                                   nr_experiments=nr_experiments)
        model_outcomes = {outcomes[0].name: 1}
        callback(experiment, model_outcomes)

        _, out = callback.get_results()

        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3, ))

        # case 2 time series shape = (1, nr_time_steps)
        callback = DefaultCallback(uncs, [],
                                   outcomes,
                                   nr_experiments=nr_experiments)
        model_outcomes = {outcomes[0].name: np.random.rand(10)}
        callback(experiment, model_outcomes)

        _, out = callback.get_results()
        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3, 10))

        # case 3 maps etc. shape = (x,y)
        callback = DefaultCallback(uncs, [],
                                   outcomes,
                                   nr_experiments=nr_experiments)
        model_outcomes = {outcomes[0].name: np.random.rand(2, 2)}
        callback(experiment, model_outcomes)

        _, out = callback.get_results()
        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3, 2, 2))

        # case 4 assert raises EMAError
        callback = DefaultCallback(uncs, [],
                                   outcomes,
                                   nr_experiments=nr_experiments)
        model_outcomes = {outcomes[0].name: np.random.rand(2, 2, 2)}
        self.assertRaises(EMAError, callback, experiment, model_outcomes)
Example #9
0
 def test_model_init(self):
     model_name = 'modelname'
     
     def initial_func(a=1):
         return a
     
     model = Model(model_name,initial_func)
     
     def policy_func(a=1):
         return a
     
     policy = Policy('test', function=policy_func, unknown='a')
     model.model_init(policy)
     
     self.assertEqual(policy, model.policy)
     self.assertEqual(model.function, policy_func)
     
     with self.assertRaises(AttributeError):
         model.unknown
Example #10
0
    def test_store_cases(self):
        nr_experiments = 3
        uncs = [
            RealParameter("a", 0, 1),
            RealParameter("b", 0, 1),
            CategoricalParameter('c', [0, 1, 2]),
            IntegerParameter("d", 0, 1)
        ]
        outcomes = [TimeSeriesOutcome("test")]
        case = {unc.name: random.random() for unc in uncs}
        case["c"] = int(round(case["c"] * 2))
        case["d"] = int(round(case["d"]))

        model = NamedObject('test')
        policy = Policy('policy')
        scenario = Scenario(**case)
        experiment = Experiment(0, model, policy, scenario, 0)

        callback = DefaultCallback(uncs, [],
                                   outcomes,
                                   nr_experiments=nr_experiments,
                                   reporting_interval=1)
        result = {outcomes[0].name: 1}
        callback(experiment, result)

        experiments, _ = callback.get_results()
        design = case
        design['policy'] = policy.name
        design['model'] = model.name

        names = rf.get_names(experiments.dtype)
        for name in names:
            self.assertEqual(experiments[name][0], design[name])

        # with levers
        nr_experiments = 3
        uncs = [RealParameter("a", 0, 1), RealParameter("b", 0, 1)]
        levers = [RealParameter("c", 0, 1), RealParameter("d", 0, 1)]
        outcomes = [TimeSeriesOutcome("test")]
        case = {unc.name: random.random() for unc in uncs}

        model = NamedObject('test')
        policy = Policy('policy', c=1, d=1)
        scenario = Scenario(**case)
        experiment = Experiment(0, model, policy, scenario, 0)

        callback = DefaultCallback(uncs,
                                   levers,
                                   outcomes,
                                   nr_experiments=nr_experiments,
                                   reporting_interval=1)
        result = {outcomes[0].name: 1}
        callback(experiment, result)

        experiments, _ = callback.get_results()
        design = case
        design['c'] = 1
        design['d'] = 1
        design['policy'] = policy.name
        design['model'] = model.name

        names = rf.get_names(experiments.dtype)

        print(experiments[0])

        for name in names:
            self.assertEqual(experiments[name][0], design[name])
Example #11
0
    def test_store_cases(self):
        nr_experiments = 3
        uncs = [
            RealParameter("a", 0, 1),
            RealParameter("b", 0, 1),
            CategoricalParameter('c', [0, 1, 2]),
            IntegerParameter("d", 0, 1)
        ]
        outcomes = [TimeSeriesOutcome("test")]
        case = {unc.name: random.random() for unc in uncs}
        case["c"] = int(round(case["c"] * 2))
        case["d"] = int(round(case["d"]))

        model = NamedObject('test')
        policy = Policy('policy')
        scenario = Scenario(**case)
        experiment = Case(0, model.name, policy, scenario, 0)

        callback = DefaultCallback(uncs, [],
                                   outcomes,
                                   nr_experiments=nr_experiments,
                                   reporting_interval=1)
        model_outcomes = {outcomes[0].name: 1}
        callback(experiment, model_outcomes)

        experiments, _ = callback.get_results()
        design = case
        design['policy'] = policy.name
        design['model'] = model.name
        design['scenario'] = scenario.name

        names = experiments.columns.values.tolist()
        for name in names:
            entry_a = experiments[name][0]
            entry_b = design[name]

            self.assertEqual(entry_a, entry_b, "failed for " + name)

        # with levers
        nr_experiments = 3
        uncs = [RealParameter("a", 0, 1), RealParameter("b", 0, 1)]
        levers = [RealParameter("c", 0, 1), RealParameter("d", 0, 1)]
        outcomes = [TimeSeriesOutcome("test")]
        case = {unc.name: random.random() for unc in uncs}

        model = NamedObject('test')
        policy = Policy('policy', c=1, d=1)
        scenario = Scenario(**case)
        experiment = Case(0, model.name, policy, scenario, 0)

        callback = DefaultCallback(uncs,
                                   levers,
                                   outcomes,
                                   nr_experiments=nr_experiments,
                                   reporting_interval=1)
        model_outcomes = {outcomes[0].name: 1}
        callback(experiment, model_outcomes)

        experiments, _ = callback.get_results()
        design = case
        design['c'] = 1
        design['d'] = 1
        design['policy'] = policy.name
        design['model'] = model.name
        design['scenario'] = scenario.name

        names = experiments.columns.values.tolist()
        for name in names:
            self.assertEqual(experiments[name][0], design[name])
        RealParameter('fatality rate region 2', 0.0001, 0.1),
        RealParameter('initial immune fraction of the population of region 1',
                      0, 0.5),
        RealParameter('initial immune fraction of the population of region 2',
                      0, 0.5),
        RealParameter('normal interregional contact rate', 0, 0.9),
        RealParameter('permanent immune population fraction R1', 0, 0.5),
        RealParameter('permanent immune population fraction R2', 0, 0.5),
        RealParameter('recovery time region 1', 0.1, 0.75),
        RealParameter('recovery time region 2', 0.1, 0.75),
        RealParameter('susceptible to immune population delay time region 1',
                      0.5, 2),
        RealParameter('susceptible to immune population delay time region 2',
                      0.5, 2),
        RealParameter('root contact rate region 1', 0.01, 5),
        RealParameter('root contact ratio region 2', 0.01, 5),
        RealParameter('infection ratio region 1', 0, 0.15),
        RealParameter('infection rate region 2', 0, 0.15),
        RealParameter('normal contact rate region 1', 10, 100),
        RealParameter('normal contact rate region 2', 10, 200)
    ]

    #add policies
    policies = [
        Policy('no policy', model_file=r'FLUvensimV1basecase.vpm'),
        Policy('static policy', model_file=r'FLUvensimV1static.vpm'),
        Policy('adaptive policy', model_file=r'FLUvensimV1dynamic.vpm')
    ]

    results = perform_experiments(model, 1000, policies=policies)
Example #13
0
    def test_worker(self, mocked_logging, mocked_runner):
        
        if sys.version_info[0] < 3:
            mocked_inqueue = mock.Mock(multiprocessing.queues.SimpleQueue())
            mocked_outqueue = mock.Mock(multiprocessing.queues.SimpleQueue())
        else:
            mocked_inqueue = mock.Mock(multiprocessing.SimpleQueue())
            mocked_outqueue = mock.Mock(multiprocessing.SimpleQueue())

        function = mock.Mock()
        mockMSI = MockMSI("test", function)
        
        # task = None
        mocked_inqueue.get.return_value = None
        ema_parallel_multiprocessing.worker(mocked_inqueue, 
                                            mocked_outqueue, 
                                            [mockMSI])
        mocked_logging.debug.assert_called_with('worker got sentinel -- exiting')
        
        # EOFError, IOError
        mocked_inqueue.get.side_effect = EOFError
        ema_parallel_multiprocessing.worker(mocked_inqueue, 
                                            mocked_outqueue, 
                                            [mockMSI])
        mocked_logging.debug.assert_called_with('worker got EOFError or IOError -- exiting')
 
        mocked_inqueue.get.side_effect = IOError
        ema_parallel_multiprocessing.worker(mocked_inqueue, 
                                            mocked_outqueue, 
                                            [mockMSI])
        mocked_logging.debug.assert_called_with('worker got EOFError or IOError -- exiting')
         
        # task = tuple of _, experiment dict
        #     - success
        #     - ema error
        #     - exception
 
        # setup of test, we get a normal case 
        experiment = Experiment('try', mockMSI.name, Policy('none'), 
                                Scenario(a=1), 0)
        mocked_inqueue.get.return_value = (0, experiment)
        mocked_inqueue.get.side_effect = None        
         
        # running experiment raises EMAError
        mocked_runner().run_experiment.side_effect = EMAError
        feeder_thread = threading.Thread(target=ema_parallel_multiprocessing.worker, 
                                        args=(mocked_inqueue, 
                                        mocked_outqueue, 
                                        [mockMSI]))
        feeder_thread.deamon = True
        feeder_thread.start()
        time.sleep(0.001) # to avoid race conditions
        mocked_inqueue.get.return_value = None
  
        mocked_runner().run_experiment.assert_called_with(experiment)
         
        # reset mocks
        mocked_outqueue.reset_mock()
        mocked_runner().reset_mock()
          
        # running experiment raises EMAError
        experiment = Experiment('try', mockMSI.name, Policy('none'), 
                                Scenario(a=1), 0)
        mocked_inqueue.get.return_value = (0, experiment)
        mocked_inqueue.get.side_effect = None   
          
        mocked_runner().run_experiment.side_effect = Exception
        feder_thread = threading.Thread(target=ema_parallel_multiprocessing.worker, 
                                        args=(mocked_inqueue, 
                                        mocked_outqueue, 
                                        [mockMSI]))
        feder_thread.deamon = True
        feder_thread.start()
        time.sleep(0.001) # to avoid race conditions
        mocked_inqueue.get.return_value = None
  
        mocked_runner().run_experiment.assert_called_with(experiment)
#         mocked_outqueue.put.assert_called_once()
          
        # reset mocks
        mocked_outqueue.reset_mock()
        mocked_runner().reset_mock()
  
        # running experiment works fine
        experiment = Experiment('try', mockMSI.name, Policy('none'), 
                                Scenario(a=1), 0)
        mocked_inqueue.get.return_value = (0, experiment)
        mocked_inqueue.get.side_effect = None   
        mocked_runner().run_experiment.side_effect = None
          
        feder_thread = threading.Thread(target=ema_parallel_multiprocessing.worker, 
                                        args=(mocked_inqueue, 
                                        mocked_outqueue, 
                                        [mockMSI]))
        feder_thread.deamon = True
        feder_thread.start()
        time.sleep(0.001) # to avoid race conditions
        mocked_inqueue.get.return_value = None
  
        mocked_runner().run_experiment.assert_called_with(experiment)
#         mocked_outqueue.put.assert_called_once()
          
        # reset mocks
        mocked_outqueue.reset_mock()
        mocked_runner().reset_mock()
Example #14
0
    def test_perform_experiments(self):

        # everything shared
        model_a = Model("A", mock.Mock())
        model_b = Model("B", mock.Mock())
        model_c = Model("C", mock.Mock())
        models = [model_a, model_b, model_c]

        # let's add some uncertainties to this
        shared_abc_1 = RealParameter("shared abc 1", 0, 1)
        shared_abc_2 = RealParameter("shared abc 2", 0, 1)
        shared_ab_1 = RealParameter("shared ab 1", 0, 1)
        shared_bc_1 = RealParameter("shared bc 1", 0, 1)
        a_1 = RealParameter("a 1", 0, 1)
        b_1 = RealParameter("b 1", 0, 1)
        model_a.uncertainties = [shared_abc_1, shared_abc_2, shared_ab_1, a_1]
        model_b.uncertainties = [
            shared_abc_1, shared_abc_2, shared_ab_1, shared_bc_1, b_1
        ]
        model_c.uncertainties = [shared_abc_1, shared_abc_2, shared_bc_1]

        #let's add an outcome to this
        outcome_shared = TimeSeriesOutcome("test")
        model_a.outcomes = [outcome_shared]
        model_b.outcomes = [outcome_shared]
        model_c.outcomes = [outcome_shared]

        for model in models:
            model.function.return_value = {
                a: [0.1] * 10
                for a in outcome_shared.variable_name
            }

        ensemble = ModelEnsemble()
        ensemble.model_structures = [model_a, model_b, model_c]
        ensemble.policies = [Policy('None')]

        ensemble.perform_experiments(10,
                                     uncertainty_union=True,
                                     outcome_union=True,
                                     reporting_interval=1)
        #         for model in models:
        #             model.function.assert_has_calls() TODO::

        ensemble.perform_experiments(10,
                                     uncertainty_union=True,
                                     outcome_union=False,
                                     reporting_interval=1)

        ensemble.perform_experiments(10,
                                     uncertainty_union=False,
                                     outcome_union=True,
                                     reporting_interval=1)

        ensemble.perform_experiments(10,
                                     uncertainty_union=False,
                                     outcome_union=False,
                                     reporting_interval=1)
        #
        #         self.assertRaises(ValueError, ensemble.perform_experiments,
        #                          10, uncertainty_union=False,
        #                          union_outcomes='Label')

        with mock.patch(
                'ema_workbench.em_framework.ensemble.MultiprocessingPool'
        ) as MockPool:
            ensemble.parallel = True

            mockedCallback = mock.Mock(DefaultCallback)
            mockedCallback.configure_mock(**{'i': 30})
            mockedCallback.return_value = mockedCallback

            ensemble.perform_experiments(10,
                                         uncertainty_union=True,
                                         outcome_union=True,
                                         reporting_interval=1,
                                         callback=mockedCallback)

            self.assertEqual(2, len(MockPool.mock_calls))

            MockPool.reset_mock()
            mockedCallback = mock.Mock(DefaultCallback)
            mockedCallback.configure_mock(**{'i': 10})
            mockedCallback.return_value = mockedCallback

            self.assertRaises(EMAError,
                              ensemble.perform_experiments,
                              10,
                              uncertainty_union=True,
                              outcome_union=True,
                              reporting_interval=1,
                              callback=mockedCallback)
Example #15
0
        RealParameter("ShipTbl2", -0.1, 0.1),
        RealParameter("ShipTbl3", -0.1, 0.1),
        RealParameter("collaboration", 1, 1.6),
        CategoricalParameter("land use scenarios", [
            "NoChange", "moreNature", "Deurbanization", "sustainableGrowth",
            "urbanizationDeurbanization", "urbanizationLargeAndFast",
            "urbanizationLargeSteady"
        ],
                             pff=True)
    ]

    waas_model.outcomes = [
        ScalarOutcome("Flood damage (Milj. Euro)"),
        ScalarOutcome("Number of casualties"),
        ScalarOutcome("Costs"),
        ScalarOutcome("Timing")
    ]

    n_scenarios = 500
    policies = [
        Policy(kwargs['name'], **kwargs['params']) for kwargs in policies
    ]

    with MultiprocessingEvaluator(waas_model) as evaluator:
        # with SequentialEvaluator(waas_model) as evaluator:
        results = perform_experiments(waas_model,
                                      n_scenarios,
                                      policies,
                                      evaluator=evaluator)

    save_results(results, './data/partial factorial over pathways.tar.gz')