Пример #1
0
   def test_multiple_models(self):
       """
       Test running running with two different pysd models
       Returns
       -------
 
       """
       relative_path_to_file = '../models/Sales_Agent_Market_Building_Dynamics.mdl'
       directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
       mdl_file = os.path.join(directory, relative_path_to_file)
        
       market_model = PysdModel(mdl_file=mdl_file)
       market_model.uncertainties = [RealParameter('Startup Subsidy',0, 3),
                                     RealParameter('Startup Subsidy Length', 0, 10)]
       market_model.outcomes = [TimeSeriesOutcome('Still Employed')]
 
       relative_path_to_file = '../models/Sales_Agent_Motivation_Dynamics.mdl'
       directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
       mdl_file = os.path.join(directory, relative_path_to_file)
 
       motivation_model = PysdModel(mdl_file=mdl_file)
       motivation_model.uncertainties = [RealParameter('Startup Subsidy', 0, 3),
                                     RealParameter('Startup Subsidy Length', 0, 10)]
       motivation_model.outcomes =[TimeSeriesOutcome('Still Employed')]
 
       models = [market_model, motivation_model]  # set the model on the ensemble
       perform_experiments(models, 5)
Пример #2
0
   def test_multiple_models(self):
       """
       Test running running with two different pysd models
       Returns
       -------
 
       """
       relative_path_to_file = '../models/Sales_Agent_Market_Building_Dynamics.mdl'
       directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
       mdl_file = os.path.join(directory, relative_path_to_file)
        
       market_model = PysdModel(mdl_file=mdl_file)
       market_model.uncertainties = [RealParameter('Startup Subsidy',0, 3),
                                     RealParameter('Startup Subsidy Length', 0, 10)]
       market_model.outcomes = [TimeSeriesOutcome('Still Employed')]
 
       relative_path_to_file = '../models/Sales_Agent_Motivation_Dynamics.mdl'
       directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
       mdl_file = os.path.join(directory, relative_path_to_file)
 
       motivation_model = PysdModel(mdl_file=mdl_file)
       motivation_model.uncertainties = [RealParameter('Startup Subsidy', 0, 3),
                                     RealParameter('Startup Subsidy Length', 0, 10)]
       motivation_model.outcomes =[TimeSeriesOutcome('Still Employed')]
 
       models = [market_model, motivation_model]  # set the model on the ensemble
       perform_experiments(models, 5)
Пример #3
0
 def test_running_lookup_uncertainties(self):
     '''
     This is the more comprehensive test, given that the lookup
     uncertainty replaces itself with a bunch of other uncertainties, check
     whether we can successfully run a set of experiments and get results
     back. We assert that the uncertainties are correctly replaced by
     analyzing the experiments array. 
     
     '''
     if os.name != 'nt':
         return
     
     model = LookupTestModel( r'../models/', 'lookupTestModel')
     
     #model.step = 4 #reduce data to be stored
     perform_experiments(model, 10)
Пример #4
0
    def test_running_lookup_uncertainties(self):
        '''
        This is the more comprehensive test, given that the lookup
        uncertainty replaces itself with a bunch of other uncertainties, check
        whether we can successfully run a set of experiments and get results
        back. We assert that the uncertainties are correctly replaced by
        analyzing the experiments array. 
        
        '''
        if os.name != 'nt':
            return

        model = LookupTestModel(r'../models/', 'lookupTestModel')

        #model.step = 4 #reduce data to be stored
        perform_experiments(model, 10)
Пример #5
0
    def test_parallel_experiment(self):
        """
        Test running an experiment in parallel
        Returns
        -------
  
        """
        relative_path_to_file = '../models/Teacup.mdl'
        directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
        mdl_file = os.path.join(directory, relative_path_to_file)
         
        model = PysdModel(mdl_file=mdl_file)
         
        model.uncertainties = [RealParameter('Room Temperature', 33, 120)]
        model.outcomes = [TimeSeriesOutcome('Teacup Temperature')]

        with MultiprocessingEvaluator(model, 2) as evaluator:
            perform_experiments(model, 5, evaluator=evaluator)
Пример #6
0
    def test_parallel_experiment(self):
        """
        Test running an experiment in parallel
        Returns
        -------
  
        """
        relative_path_to_file = '../models/Teacup.mdl'
        directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
        mdl_file = os.path.join(directory, relative_path_to_file)
         
        model = PysdModel(mdl_file=mdl_file)
         
        model.uncertainties = [RealParameter('Room Temperature', 33, 120)]
        model.outcomes = [TimeSeriesOutcome('Teacup Temperature')]

        with MultiprocessingEvaluator(model, 2) as evaluator:
            perform_experiments(model, 5, evaluator=evaluator)
Пример #7
0
 def test_vensim_model(self):
     
     #instantiate a model
     wd = r'../models'
     model = VensimExampleModel(wd, "simpleModel")
     
     nr_runs = 10
     experiments, outcomes = perform_experiments(model, nr_runs)
     
     self.assertEqual(experiments.shape[0], nr_runs)
     self.assertIn('TIME', outcomes.keys())
     self.assertIn(model.outcomes[0].name, outcomes.keys())
Пример #8
0
    def test_vensim_model(self):

        #instantiate a model
        wd = r'../models'
        model = VensimExampleModel(wd, "simpleModel")

        nr_runs = 10
        experiments, outcomes = perform_experiments(model, nr_runs)

        self.assertEqual(experiments.shape[0], nr_runs)
        self.assertIn('TIME', outcomes.keys())
        self.assertIn(model.outcomes[0].name, outcomes.keys())
Пример #9
0
        RealParameter("initial annual supply", 100000, 120000),
        RealParameter("initial in goods", 1500000, 2500000),
        RealParameter("average construction time extraction capacity", 1, 10),
        RealParameter("average lifetime extraction capacity", 20, 40),
        RealParameter("average lifetime recycling capacity", 20, 40),
        RealParameter("initial extraction capacity under construction", 5000,
                      20000),
        RealParameter("initial recycling capacity under construction", 5000,
                      20000),
        RealParameter("initial recycling infrastructure", 5000, 20000),

        # order of delay
        CategoricalParameter("order in goods delay", (1, 4, 10, 1000)),
        CategoricalParameter("order recycling capacity delay", (1, 4, 10)),
        CategoricalParameter("order extraction capacity delay", (1, 4, 10)),

        # uncertainties associated with lookups
        RealParameter("lookup shortage loc", 20, 50),
        RealParameter("lookup shortage speed", 1, 5),
        RealParameter("lookup price substitute speed", 0.1, 0.5),
        RealParameter("lookup price substitute begin", 3, 7),
        RealParameter("lookup price substitute end", 15, 25),
        RealParameter("lookup returns to scale speed", 0.01, 0.2),
        RealParameter("lookup returns to scale scale", 0.3, 0.7),
        RealParameter("lookup approximated learning speed", 0.01, 0.2),
        RealParameter("lookup approximated learning scale", 0.3, 0.6),
        RealParameter("lookup approximated learning start", 30, 60)
    ]

    results = perform_experiments(model, 50)
Пример #10
0
from ema_workbench.connectors.netlogo import NetLogoModel

from ema_workbench.em_framework import (TimeSeriesOutcome, RealParameter,
                                        perform_experiments)
from ema_workbench.util import ema_logging
from ema_workbench.analysis import plotting, plotting_util

if __name__ == '__main__':
    #turn on logging
    ema_logging.log_to_stderr(ema_logging.DEBUG)

    model = NetLogoModel('predprey', 
                          wd="./models/predatorPreyNetlogo", 
                          model_file="Wolf Sheep Predation.nlogo")
    model.run_length = 100
    
    model.uncertainties = [RealParameter("grass-regrowth-time", 1, 99),
                           RealParameter("initial-number-sheep", 1, 200),
                           RealParameter("initial-number-wolves", 1, 200),
                           RealParameter("sheep-reproduce", 1, 20),
                           RealParameter("wolf-reproduce", 1, 20),
                     ]
    
    model.outcomes = [TimeSeriesOutcome('sheep'),
                      TimeSeriesOutcome('wolves'),
                      TimeSeriesOutcome('grass') ]
     
    #perform experiments
    n = 100
    results = perform_experiments(model, n, parallel=True)
        Constant('nsamples', 100),
        Constant('timehorizon', lake_model.time_horizon)
    ]

    scenarios = ['Ref', 77, 96, 130, 181]
    random_scenarios = [81, 289, 391, 257]
    policies = []

    for s in random_scenarios:
        #         if s == 'Ref':
        #             solutions = pd.DataFrame.from_csv(r'../results/Results_EpsNsgaII_nfe10000_scRef_v3.csv')
        #         else:
        #             solutions = pd.DataFrame.from_csv(r'../results/Results_EpsNsgaII_nfe10000_sc{}_v5.csv'.format(s))

        #checked if there are duplicates: No.
        solutions = pd.DataFrame.from_csv(
            r'../data/brushed_random_nfe10000_sc{}.csv'.format(s))
        for index, row in solutions.iterrows():
            name = str(s) + '_' + str(index)
            decision = {
                lever.name: row[lever.name]
                for lever in lake_model.levers
            }  #levers are in the first columns of the solutions
            policies.append(Policy(name=name, **decision))
    #with MultiprocessingEvaluator(lake_model) as evaluator:
    #    results = evaluator.perform_experiments(scenarios=1000, policies=policies)
    results = perform_experiments(lake_model, 1000, policies, parallel=True)
    save_results(
        results,
        r'../CandidateTesting_1000scenarios_revisionRandom_nfe10000.tar.gz')
Пример #12
0
                      1, 10),
        RealParameter("average lifetime extraction capacity", 20, 40),
        RealParameter("average lifetime recycling capacity", 20, 40),
        RealParameter("initial extraction capacity under construction",
                      5000, 20000),
        RealParameter("initial recycling capacity under construction",
                      5000, 20000),
        RealParameter("initial recycling infrastructure", 5000, 20000),

        # order of delay
        CategoricalParameter("order in goods delay", (1, 4, 10, 1000)),
        CategoricalParameter("order recycling capacity delay", (1, 4, 10)),
        CategoricalParameter("order extraction capacity delay", (1, 4, 10)),

        # uncertainties associated with lookups
        RealParameter("lookup shortage loc", 20, 50),
        RealParameter("lookup shortage speed", 1, 5),

        RealParameter("lookup price substitute speed", 0.1, 0.5),
        RealParameter("lookup price substitute begin", 3, 7),
        RealParameter("lookup price substitute end", 15, 25),

        RealParameter("lookup returns to scale speed", 0.01, 0.2),
        RealParameter("lookup returns to scale scale", 0.3, 0.7),

        RealParameter("lookup approximated learning speed", 0.01, 0.2),
        RealParameter("lookup approximated learning scale", 0.3, 0.6),
        RealParameter("lookup approximated learning start", 30, 60)]

    results = perform_experiments(model, 50)