コード例 #1
0
    def test_run_experiment(self):
        mockMSI = mock.Mock(spec=Model)
        mockMSI.name = 'test'
        mockMSI.uncertainties = [RealParameter("a", 0, 10),
                                 RealParameter("b", 0, 10)]
        
        msis = NamedObjectMap(AbstractModel)
        msis['test'] = mockMSI

        runner = ExperimentRunner(msis)
        
        experiment = Experiment('test',
                                mockMSI.name,
                                Policy('none'),  
                                Scenario(a=1, b=2), 0)
        
        runner.run_experiment(experiment)
        
        sc, p = mockMSI.run_model.call_args[0]
        self.assertEqual(sc.name, experiment.scenario.name)
        self.assertEqual(p, experiment.policy)
        
        mockMSI.reset_model.assert_called_once_with()
        
   
        # assert handling of case error
        mockMSI = mock.Mock(spec=Model)
        mockMSI.name = 'test'
        mockMSI.run_model.side_effect = Exception('some exception')
        msis = NamedObjectMap(AbstractModel)
        msis['test'] = mockMSI
        
        runner = ExperimentRunner(msis)
    
        experiment = Experiment('test',mockMSI.name,Policy('none'),  
                      Scenario(a=1, b=2),0)

        with self.assertRaises(EMAError):
            runner.run_experiment(experiment)
           
        # assert handling of case error
        mockMSI = mock.Mock(spec=Model)
        mockMSI.name = 'test'
        mockMSI.run_model.side_effect = CaseError("message", {})
        msis = NamedObjectMap(AbstractModel)
        msis['test'] = mockMSI
        runner = ExperimentRunner(msis)
    
        experiment = Experiment('test',mockMSI.name,Policy('none'),  
                      Scenario(a=1, b=2),0)

        runner.run_experiment(experiment)
コード例 #2
0
ファイル: test_model.py プロジェクト: anukat2015/EMAworkbench
    def test_run_model(self):
        model_name = 'modelname'

        function = mock.Mock()

        model = Model(model_name, function)
        model.uncertainties = [RealParameter('a', 0, 1)]
        model.run_model(Scenario(**{'a': 0.1, 'b': 1}), Policy('test'))
        function.assert_called_once_with(a=0.1)

        # test complete translation of scenario

        model = Model(model_name, function)
        model.uncertainties = [
            RealParameter('a', 0, 1, variable_name=['a', 'b'])
        ]

        scenario = Scenario(**{'a': 0.1})
        model.run_model(scenario, Policy('test'))

        self.assertIn('a', scenario.keys())
        self.assertIn('b', scenario.keys())

        model = Model(model_name, function)
        cats = [
            Category('some name', [1, 2], multivalue=True),
            Category('some other name', [3, 4], multivalue=True)
        ]
        model.uncertainties = [
            CategoricalParameter('a', cats, variable_name=['a', 'b'])
        ]

        scenario = Scenario(**{'a': 'some name'})
        model.run_model(scenario, Policy('test'))

        self.assertIn('a', scenario.keys())
        self.assertIn('b', scenario.keys())
        self.assertEqual(scenario['a'], 1)
        self.assertEqual(scenario['b'], 2)

        scenario = Scenario(**{'a': 'some other name'})
        model.run_model(scenario, Policy('test'))

        self.assertIn('a', scenario.keys())
        self.assertIn('b', scenario.keys())
        self.assertEqual(scenario['a'], 3)
        self.assertEqual(scenario['b'], 4)
コード例 #3
0
ファイル: test_model.py プロジェクト: eebart/EMAworkbench
 def test_run_model(self):
     model_name = 'modelname'
     model_file = 'model_file'
                 
     with mock.patch('ema_workbench.em_framework.model.os') as patch:
         patch.os.is_file.set_return_value(True)
         model = FileModelTest(model_name, '.', model_file)
         model.run_model(Scenario(a=1), Policy('test', b=2))
         self.assertEqual(model.policy.name, 'test')
コード例 #4
0
    def test_store_results(self):
        nr_experiments = 3
        uncs = [RealParameter("a", 0, 1),
               RealParameter("b", 0, 1)]
        outcomes = [TimeSeriesOutcome("test")]
        model = NamedObject('test')

        experiment = Experiment(0, model, Policy('policy'), Scenario(a=1, b=0), 0)
     
        # case 1 scalar shape = (1)
        callback = DefaultCallback(uncs, [], outcomes, 
                                   nr_experiments=nr_experiments)
        result = {outcomes[0].name: 1}
        callback(experiment, result)
         
        _, out = callback.get_results()
        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3,))
     
        # case 2 time series shape = (1, nr_time_steps)
        callback = DefaultCallback(uncs, [], outcomes, 
                                   nr_experiments=nr_experiments)
        result = {outcomes[0].name: np.random.rand(10)}
        callback(experiment, result)
          
        _, out = callback.get_results()
        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3,10))

        # case 3 maps etc. shape = (x,y)
        callback = DefaultCallback(uncs, [], outcomes, 
                                   nr_experiments=nr_experiments)
        result = {outcomes[0].name: np.random.rand(2,2)}
        callback(experiment,result)
          
        _, out = callback.get_results()
        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3,2,2))

        # case 4 assert raises EMAError
        callback = DefaultCallback(uncs, [], outcomes, 
                                   nr_experiments=nr_experiments)
        result = {outcomes[0].name: np.random.rand(2,2,2)}
        self.assertRaises(EMAError, callback, experiment, result)
        
        # KeyError
        with mock.patch('ema_workbench.util.ema_logging.debug') as mocked_logging:
            callback = DefaultCallback(uncs, [], outcomes, 
                           nr_experiments=nr_experiments)
            result = {'incorrect': np.random.rand(2,)}
            callback(experiment, result)
            
            for outcome in outcomes:
                mocked_logging.assert_called_with("%s not specified as outcome in msi" % outcome.name)
コード例 #5
0
    def test_store_results(self):
        nr_experiments = 3
        uncs = [RealParameter("a", 0, 1), RealParameter("b", 0, 1)]
        outcomes = [TimeSeriesOutcome("test")]
        model = NamedObject('test')

        experiment = Case(0, model, Policy('policy'), Scenario(a=1, b=0), 0)

        # case 1 scalar shape = (1)
        callback = DefaultCallback(uncs, [],
                                   outcomes,
                                   nr_experiments=nr_experiments)
        model_outcomes = {outcomes[0].name: 1}
        callback(experiment, model_outcomes)

        _, out = callback.get_results()

        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3, ))

        # case 2 time series shape = (1, nr_time_steps)
        callback = DefaultCallback(uncs, [],
                                   outcomes,
                                   nr_experiments=nr_experiments)
        model_outcomes = {outcomes[0].name: np.random.rand(10)}
        callback(experiment, model_outcomes)

        _, out = callback.get_results()
        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3, 10))

        # case 3 maps etc. shape = (x,y)
        callback = DefaultCallback(uncs, [],
                                   outcomes,
                                   nr_experiments=nr_experiments)
        model_outcomes = {outcomes[0].name: np.random.rand(2, 2)}
        callback(experiment, model_outcomes)

        _, out = callback.get_results()
        self.assertIn(outcomes[0].name, out.keys())
        self.assertEqual(out[outcomes[0].name].shape, (3, 2, 2))

        # case 4 assert raises EMAError
        callback = DefaultCallback(uncs, [],
                                   outcomes,
                                   nr_experiments=nr_experiments)
        model_outcomes = {outcomes[0].name: np.random.rand(2, 2, 2)}
        self.assertRaises(EMAError, callback, experiment, model_outcomes)
コード例 #6
0
ファイル: test_model.py プロジェクト: marcjaxa/EMAworkbench
 def test_run_model(self):
     model_name = 'modelname'
     
     function = mock.Mock()
     
     model = Model(model_name, function)
     model.uncertainties = [RealParameter('a',  0 , 1)]
     model.run_model(Scenario(**{'a':0.1, 'b':1}), Policy('test'))
     function.assert_called_once_with(a=0.1)
     
     # test complete translation of scenario
     
     model = Model(model_name, function)
     model.uncertainties = [RealParameter('a',  0 , 1, variable_name=['a', 'b'])]
     
     scenario = Scenario(**{'a':0.1})
     model.run_model(scenario, Policy('test'))
     
     self.assertIn('a', scenario.keys())
     self.assertIn('b', scenario.keys())
     
     model = Model(model_name, function)
     cats = [Category('some name', [1,2]),
             Category('some other name', [3,4])]
     model.uncertainties = [CategoricalParameter('a', cats, 
                                 variable_name=['a', 'b'], multivalue=True)]
     
     scenario = Scenario(**{'a':cats[0].value})
     model.run_model(scenario, Policy('test'))
     
     self.assertIn('a', scenario.keys())
     self.assertIn('b', scenario.keys())
     self.assertEqual(scenario['a'], 1)
     self.assertEqual(scenario['b'], 2)
     
     scenario = Scenario(**{'a':cats[1].value})
     model.run_model(scenario, Policy('test'))
     
     self.assertIn('a', scenario.keys())
     self.assertIn('b', scenario.keys())
     self.assertEqual(scenario['a'], 3)
     self.assertEqual(scenario['b'], 4)
コード例 #7
0
    def test_store_cases(self):
        nr_experiments = 3
        uncs = [
            RealParameter("a", 0, 1),
            RealParameter("b", 0, 1),
            CategoricalParameter('c', [0, 1, 2]),
            IntegerParameter("d", 0, 1)
        ]
        outcomes = [TimeSeriesOutcome("test")]
        case = {unc.name: random.random() for unc in uncs}
        case["c"] = int(round(case["c"] * 2))
        case["d"] = int(round(case["d"]))

        model = NamedObject('test')
        policy = Policy('policy')
        scenario = Scenario(**case)
        experiment = Experiment(0, model, policy, scenario, 0)

        callback = DefaultCallback(uncs, [],
                                   outcomes,
                                   nr_experiments=nr_experiments,
                                   reporting_interval=1)
        result = {outcomes[0].name: 1}
        callback(experiment, result)

        experiments, _ = callback.get_results()
        design = case
        design['policy'] = policy.name
        design['model'] = model.name

        names = rf.get_names(experiments.dtype)
        for name in names:
            self.assertEqual(experiments[name][0], design[name])

        # with levers
        nr_experiments = 3
        uncs = [RealParameter("a", 0, 1), RealParameter("b", 0, 1)]
        levers = [RealParameter("c", 0, 1), RealParameter("d", 0, 1)]
        outcomes = [TimeSeriesOutcome("test")]
        case = {unc.name: random.random() for unc in uncs}

        model = NamedObject('test')
        policy = Policy('policy', c=1, d=1)
        scenario = Scenario(**case)
        experiment = Experiment(0, model, policy, scenario, 0)

        callback = DefaultCallback(uncs,
                                   levers,
                                   outcomes,
                                   nr_experiments=nr_experiments,
                                   reporting_interval=1)
        result = {outcomes[0].name: 1}
        callback(experiment, result)

        experiments, _ = callback.get_results()
        design = case
        design['c'] = 1
        design['d'] = 1
        design['policy'] = policy.name
        design['model'] = model.name

        names = rf.get_names(experiments.dtype)

        print(experiments[0])

        for name in names:
            self.assertEqual(experiments[name][0], design[name])
コード例 #8
0
ファイル: test_callback.py プロジェクト: jpn--/EMAworkbench
    def test_store_cases(self):
        nr_experiments = 3
        uncs = [
            RealParameter("a", 0, 1),
            RealParameter("b", 0, 1),
            CategoricalParameter('c', [0, 1, 2]),
            IntegerParameter("d", 0, 1)
        ]
        outcomes = [TimeSeriesOutcome("test")]
        case = {unc.name: random.random() for unc in uncs}
        case["c"] = int(round(case["c"] * 2))
        case["d"] = int(round(case["d"]))

        model = NamedObject('test')
        policy = Policy('policy')
        scenario = Scenario(**case)
        experiment = Case(0, model.name, policy, scenario, 0)

        callback = DefaultCallback(uncs, [],
                                   outcomes,
                                   nr_experiments=nr_experiments,
                                   reporting_interval=1)
        model_outcomes = {outcomes[0].name: 1}
        callback(experiment, model_outcomes)

        experiments, _ = callback.get_results()
        design = case
        design['policy'] = policy.name
        design['model'] = model.name
        design['scenario'] = scenario.name

        names = experiments.columns.values.tolist()
        for name in names:
            entry_a = experiments[name][0]
            entry_b = design[name]

            self.assertEqual(entry_a, entry_b, "failed for " + name)

        # with levers
        nr_experiments = 3
        uncs = [RealParameter("a", 0, 1), RealParameter("b", 0, 1)]
        levers = [RealParameter("c", 0, 1), RealParameter("d", 0, 1)]
        outcomes = [TimeSeriesOutcome("test")]
        case = {unc.name: random.random() for unc in uncs}

        model = NamedObject('test')
        policy = Policy('policy', c=1, d=1)
        scenario = Scenario(**case)
        experiment = Case(0, model.name, policy, scenario, 0)

        callback = DefaultCallback(uncs,
                                   levers,
                                   outcomes,
                                   nr_experiments=nr_experiments,
                                   reporting_interval=1)
        model_outcomes = {outcomes[0].name: 1}
        callback(experiment, model_outcomes)

        experiments, _ = callback.get_results()
        design = case
        design['c'] = 1
        design['d'] = 1
        design['policy'] = policy.name
        design['model'] = model.name
        design['scenario'] = scenario.name

        names = experiments.columns.values.tolist()
        for name in names:
            self.assertEqual(experiments[name][0], design[name])
コード例 #9
0
    def test_worker(self, mocked_logging, mocked_runner):
        
        if sys.version_info[0] < 3:
            mocked_inqueue = mock.Mock(multiprocessing.queues.SimpleQueue())
            mocked_outqueue = mock.Mock(multiprocessing.queues.SimpleQueue())
        else:
            mocked_inqueue = mock.Mock(multiprocessing.SimpleQueue())
            mocked_outqueue = mock.Mock(multiprocessing.SimpleQueue())

        function = mock.Mock()
        mockMSI = MockMSI("test", function)
        
        # task = None
        mocked_inqueue.get.return_value = None
        ema_parallel_multiprocessing.worker(mocked_inqueue, 
                                            mocked_outqueue, 
                                            [mockMSI])
        mocked_logging.debug.assert_called_with('worker got sentinel -- exiting')
        
        # EOFError, IOError
        mocked_inqueue.get.side_effect = EOFError
        ema_parallel_multiprocessing.worker(mocked_inqueue, 
                                            mocked_outqueue, 
                                            [mockMSI])
        mocked_logging.debug.assert_called_with('worker got EOFError or IOError -- exiting')
 
        mocked_inqueue.get.side_effect = IOError
        ema_parallel_multiprocessing.worker(mocked_inqueue, 
                                            mocked_outqueue, 
                                            [mockMSI])
        mocked_logging.debug.assert_called_with('worker got EOFError or IOError -- exiting')
         
        # task = tuple of _, experiment dict
        #     - success
        #     - ema error
        #     - exception
 
        # setup of test, we get a normal case 
        experiment = Experiment('try', mockMSI.name, Policy('none'), 
                                Scenario(a=1), 0)
        mocked_inqueue.get.return_value = (0, experiment)
        mocked_inqueue.get.side_effect = None        
         
        # running experiment raises EMAError
        mocked_runner().run_experiment.side_effect = EMAError
        feeder_thread = threading.Thread(target=ema_parallel_multiprocessing.worker, 
                                        args=(mocked_inqueue, 
                                        mocked_outqueue, 
                                        [mockMSI]))
        feeder_thread.deamon = True
        feeder_thread.start()
        time.sleep(0.001) # to avoid race conditions
        mocked_inqueue.get.return_value = None
  
        mocked_runner().run_experiment.assert_called_with(experiment)
         
        # reset mocks
        mocked_outqueue.reset_mock()
        mocked_runner().reset_mock()
          
        # running experiment raises EMAError
        experiment = Experiment('try', mockMSI.name, Policy('none'), 
                                Scenario(a=1), 0)
        mocked_inqueue.get.return_value = (0, experiment)
        mocked_inqueue.get.side_effect = None   
          
        mocked_runner().run_experiment.side_effect = Exception
        feder_thread = threading.Thread(target=ema_parallel_multiprocessing.worker, 
                                        args=(mocked_inqueue, 
                                        mocked_outqueue, 
                                        [mockMSI]))
        feder_thread.deamon = True
        feder_thread.start()
        time.sleep(0.001) # to avoid race conditions
        mocked_inqueue.get.return_value = None
  
        mocked_runner().run_experiment.assert_called_with(experiment)
#         mocked_outqueue.put.assert_called_once()
          
        # reset mocks
        mocked_outqueue.reset_mock()
        mocked_runner().reset_mock()
  
        # running experiment works fine
        experiment = Experiment('try', mockMSI.name, Policy('none'), 
                                Scenario(a=1), 0)
        mocked_inqueue.get.return_value = (0, experiment)
        mocked_inqueue.get.side_effect = None   
        mocked_runner().run_experiment.side_effect = None
          
        feder_thread = threading.Thread(target=ema_parallel_multiprocessing.worker, 
                                        args=(mocked_inqueue, 
                                        mocked_outqueue, 
                                        [mockMSI]))
        feder_thread.deamon = True
        feder_thread.start()
        time.sleep(0.001) # to avoid race conditions
        mocked_inqueue.get.return_value = None
  
        mocked_runner().run_experiment.assert_called_with(experiment)
#         mocked_outqueue.put.assert_called_once()
          
        # reset mocks
        mocked_outqueue.reset_mock()
        mocked_runner().reset_mock()
コード例 #10
0
                         RealParameter("r1", 0, 2), 
                         RealParameter("r2", 0, 2), 
                         RealParameter("w1", 0, 1)
                         ]
    
    #specify outcomes 
    lake_model.outcomes = [ScalarOutcome('max_P', 
                                         kind=ScalarOutcome.MINIMIZE),  # @UndefinedVariable
                           ScalarOutcome('utility', 
                                         kind=ScalarOutcome.MAXIMIZE),  # @UndefinedVariable
                           ScalarOutcome('inertia', 
                                         kind=ScalarOutcome.MINIMIZE),  # @UndefinedVariable
                           ScalarOutcome('reliability', 
                                         kind=ScalarOutcome.MAXIMIZE)]  # @UndefinedVariable
    
    # override some of the defaults of the model
    lake_model.constants = [Constant('alpha', 0.41),
                            Constant('nsamples', 100),
                            Constant('myears', 100)]
    

    
    # reference is optional, but can be used to implement search for
    # various user specified scenarios along the lines suggested by
    # Watson and Kasprzyk (2017) 
    reference = Scenario('reference', b=0.4, q=2, mean=0.02, stdev=0.01)
    
    with MultiprocessingEvaluator(lake_model) as evaluator:
        evaluator.optimize(searchover='levers', nfe=1000,
                 epsilons=[0.1,]*len(lake_model.outcomes), reference=reference)