def test_run_model(self): wd = r"../models" model_file = r"/Wolf Sheep Predation.nlogo" model = NetLogoModel("predPreyNetlogo", wd=wd, model_file=model_file) model.run_length = 1000 model.uncertainties = [ RealParameter("grass-regrowth-time", 10, 100), CategoricalParameter("grass?", ("true", "false")) ] model.outcomes = [ TimeSeriesOutcome('sheep'), TimeSeriesOutcome('wolves') ] model.model_init(Policy('no policy')) case = {"grass-regrowth-time": 35, "grass?": "true"} model.run_model(case) _ = model.retrieve_output() model.cleanup()
def test_multiple_models(self): """ Test running running with two different pysd models Returns ------- """ relative_path_to_file = '../models/Sales_Agent_Market_Building_Dynamics.mdl' directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) mdl_file = os.path.join(directory, relative_path_to_file) market_model = PysdModel(mdl_file=mdl_file) market_model.uncertainties = [RealParameter('Startup Subsidy',0, 3), RealParameter('Startup Subsidy Length', 0, 10)] market_model.outcomes = [TimeSeriesOutcome('Still Employed')] relative_path_to_file = '../models/Sales_Agent_Motivation_Dynamics.mdl' directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) mdl_file = os.path.join(directory, relative_path_to_file) motivation_model = PysdModel(mdl_file=mdl_file) motivation_model.uncertainties = [RealParameter('Startup Subsidy', 0, 3), RealParameter('Startup Subsidy Length', 0, 10)] motivation_model.outcomes =[TimeSeriesOutcome('Still Employed')] models = [market_model, motivation_model] # set the model on the ensemble perform_experiments(models, 5)
def __init__(self, working_directory, name): self.model_file = r'\lookup_model.vpm' super(LookupTestModel, self).__init__(working_directory, name) # vensim.load_model(self.modelFile) self.outcomes = [TimeSeriesOutcome('flow1')] ''' each lookup uncertainty defined and added to the uncertainties list must be deleted immediately. it is not possible to do that in the constructor of lookups. or i can delete it later before generating the cases. ''' self.uncertainties.append( LookupUncertainty('hearne2', [(0, 0.5), (-0.5, 0), (0, 0.75), (0.75, 1.5), (0.8, 1.2), (0.8, 1.2)], "TF", self, 0, 2)) #self.uncertainties.pop() self.uncertainties.append( LookupUncertainty('approximation', [(0, 4), (1, 5), (1, 5), (0, 2), (0, 2)], "TF2", self, 0, 10)) #self.uncertainties.pop() #self.uncertainties.append(ParameterUncertainty((0.02, 0.08), "rate1")) #self.uncertainties.append(ParameterUncertainty((0.02, 0.08), "rate2")) self.uncertainties.append( LookupUncertainty( 'categories', [[(0.0, 0.05), (0.25, 0.15), (0.5, 0.4), (0.75, 1), (1, 1.25)], [(0.0, 0.1), (0.25, 0.25), (0.5, 0.75), (1, 1.25)], [(0.0, 0.0), (0.1, 0.2), (0.3, 0.6), (0.6, 0.9), (1, 1.25)]], "TF3", self, 0, 2)) #self.uncertainties.pop() self._delete_lookup_uncertainties()
class VensimExampleModel(VensimModel): ''' example of the most simple case of doing EMA on a Vensim model. ''' #note that this reference to the model should be relative #this relative path will be combined with the workingDirectory model_file = r'\model.vpm' #specify outcomes outcomes = [TimeSeriesOutcome('a')] #specify your uncertainties uncertainties = [RealParameter("x11", 0, 2.5), RealParameter("x12", -2.5, 2.5)]
def test_parallel_experiment(self): """ Test running an experiment in parallel Returns ------- """ relative_path_to_file = '../models/Teacup.mdl' directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) mdl_file = os.path.join(directory, relative_path_to_file) model = PysdModel(mdl_file=mdl_file) model.uncertainties = [RealParameter('Room Temperature', 33, 120)] model.outcomes = [TimeSeriesOutcome('Teacup Temperature')] with MultiprocessingEvaluator(model, 2) as evaluator: perform_experiments(model, 5, evaluator=evaluator)
def test_sequential_experiment(self): """ Test running an experiment in parallel Returns ------- """ directory = os.path.dirname(__file__) relative_path_to_file = '../models/Teacup.mdl' mdl_file = os.path.join(directory, relative_path_to_file) model = PysdModel(mdl_file=mdl_file) model.uncertainties = [RealParameter('Room Temperature', 33, 120)] model.outcomes = [TimeSeriesOutcome('Teacup Temperature')] with SequentialEvaluator(model) as evaluator: evaluator.perform_experiments(5)
def test_parallel_experiment(self): """ Test running an experiment in parallel Returns ------- """ relative_path_to_file = '../models/Teacup.mdl' directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) mdl_file = os.path.join(directory, relative_path_to_file) model = PysdModel(mdl_file=mdl_file) model.uncertainties = [RealParameter('Room Temperature', 33, 120)] model.outcomes = [TimeSeriesOutcome('Teacup Temperature')] ensemble = ModelEnsemble() # instantiate an ensemble ensemble.model_structures = model # set the model on the ensemble ensemble.parallel = True ensemble.perform_experiments(5)
from ema_workbench.connectors.netlogo import NetLogoModel from ema_workbench.em_framework import (TimeSeriesOutcome, RealParameter, perform_experiments) from ema_workbench.util import ema_logging from ema_workbench.analysis import plotting, plotting_util if __name__ == '__main__': #turn on logging ema_logging.log_to_stderr(ema_logging.DEBUG) model = NetLogoModel('predprey', wd="./models/predatorPreyNetlogo", model_file="Wolf Sheep Predation.nlogo") model.run_length = 100 model.uncertainties = [RealParameter("grass-regrowth-time", 1, 99), RealParameter("initial-number-sheep", 1, 200), RealParameter("initial-number-wolves", 1, 200), RealParameter("sheep-reproduce", 1, 20), RealParameter("wolf-reproduce", 1, 20), ] model.outcomes = [TimeSeriesOutcome('sheep'), TimeSeriesOutcome('wolves'), TimeSeriesOutcome('grass') ] #perform experiments n = 100 results = perform_experiments(model, n, parallel=True)
def test_perform_experiments(self): # everything shared model_a = Model("A", mock.Mock()) model_b = Model("B", mock.Mock()) model_c = Model("C", mock.Mock()) models = [model_a, model_b, model_c] # let's add some uncertainties to this shared_abc_1 = RealParameter("shared abc 1", 0, 1) shared_abc_2 = RealParameter("shared abc 2", 0, 1) shared_ab_1 = RealParameter("shared ab 1", 0, 1) shared_bc_1 = RealParameter("shared bc 1", 0, 1) a_1 = RealParameter("a 1", 0, 1) b_1 = RealParameter("b 1", 0, 1) model_a.uncertainties = [shared_abc_1, shared_abc_2, shared_ab_1, a_1] model_b.uncertainties = [ shared_abc_1, shared_abc_2, shared_ab_1, shared_bc_1, b_1 ] model_c.uncertainties = [shared_abc_1, shared_abc_2, shared_bc_1] #let's add an outcome to this outcome_shared = TimeSeriesOutcome("test") model_a.outcomes = [outcome_shared] model_b.outcomes = [outcome_shared] model_c.outcomes = [outcome_shared] for model in models: model.function.return_value = { a: [0.1] * 10 for a in outcome_shared.variable_name } ensemble = ModelEnsemble() ensemble.model_structures = [model_a, model_b, model_c] ensemble.policies = [Policy('None')] ensemble.perform_experiments(10, uncertainty_union=True, outcome_union=True, reporting_interval=1) # for model in models: # model.function.assert_has_calls() TODO:: ensemble.perform_experiments(10, uncertainty_union=True, outcome_union=False, reporting_interval=1) ensemble.perform_experiments(10, uncertainty_union=False, outcome_union=True, reporting_interval=1) ensemble.perform_experiments(10, uncertainty_union=False, outcome_union=False, reporting_interval=1) # # self.assertRaises(ValueError, ensemble.perform_experiments, # 10, uncertainty_union=False, # union_outcomes='Label') with mock.patch( 'ema_workbench.em_framework.ensemble.MultiprocessingPool' ) as MockPool: ensemble.parallel = True mockedCallback = mock.Mock(DefaultCallback) mockedCallback.configure_mock(**{'i': 30}) mockedCallback.return_value = mockedCallback ensemble.perform_experiments(10, uncertainty_union=True, outcome_union=True, reporting_interval=1, callback=mockedCallback) self.assertEqual(2, len(MockPool.mock_calls)) MockPool.reset_mock() mockedCallback = mock.Mock(DefaultCallback) mockedCallback.configure_mock(**{'i': 10}) mockedCallback.return_value = mockedCallback self.assertRaises(EMAError, ensemble.perform_experiments, 10, uncertainty_union=True, outcome_union=True, reporting_interval=1, callback=mockedCallback)
class ScarcityModel(VensimModel): model_file = r'\MetalsEMA.vpm' outcomes = [ TimeSeriesOutcome('relative market price'), TimeSeriesOutcome('supply demand ratio'), TimeSeriesOutcome('real annual demand'), TimeSeriesOutcome('produced of intrinsically demanded'), TimeSeriesOutcome('supply'), TimeSeriesOutcome('Installed Recycling Capacity'), TimeSeriesOutcome('Installed Extraction Capacity') ] uncertainties = [ RealParameter("price elasticity of demand", 0, 0.5), RealParameter("fraction of maximum extraction capacity used", 0.6, 1.2), RealParameter("initial average recycling cost", 1, 4), RealParameter("exogenously planned extraction capacity", 0, 15000), RealParameter("absolute recycling loss fraction", 0.1, 0.5), RealParameter("normal profit margin", 0, 0.4), RealParameter("initial annual supply", 100000, 120000), RealParameter("initial in goods", 1500000, 2500000), RealParameter("average construction time extraction capacity", 1, 10), RealParameter("average lifetime extraction capacity", 20, 40), RealParameter("average lifetime recycling capacity", 20, 40), RealParameter("initial extraction capacity under construction", 5000, 20000), RealParameter("initial recycling capacity under construction", 5000, 20000), RealParameter("initial recycling infrastructure", 5000, 20000), #order of delay CategoricalParameter("order in goods delay", (1, 4, 10, 1000)), CategoricalParameter("order recycling capacity delay", (1, 4, 10)), CategoricalParameter("order extraction capacity delay", (1, 4, 10)), #uncertainties associated with lookups RealParameter("lookup shortage loc", 20, 50), RealParameter("lookup shortage speed", 1, 5), RealParameter("lookup price substitute speed", 0.1, 0.5), RealParameter("lookup price substitute begin", 3, 7), RealParameter("lookup price substitute end", 15, 25), RealParameter("lookup returns to scale speed", 0.01, 0.2), RealParameter("lookup returns to scale scale", 0.3, 0.7), RealParameter("lookup approximated learning speed", 0.01, 0.2), RealParameter("lookup approximated learning scale", 0.3, 0.6), RealParameter("lookup approximated learning start", 30, 60) ] def returnsToScale(self, x, speed, scale): return (x * 1000, scale * 1 / (1 + exp(-1 * speed * (x - 50)))) def approxLearning(self, x, speed, scale, start): x = x - start loc = 1 - scale a = (x * 10000, scale * 1 / (1 + exp(speed * x)) + loc) return a def f(self, x, speed, loc): return (x / 10, loc * 1 / (1 + exp(speed * x))) def priceSubstite(self, x, speed, begin, end): scale = 2 * end start = begin - scale / 2 return (x + 2000, scale * 1 / (1 + exp(-1 * speed * x)) + start) def run_model(self, kwargs): """Method for running an instantiated model structure """ loc = kwargs.pop("lookup shortage loc") speed = kwargs.pop("lookup shortage speed") kwargs['shortage price effect lookup'] = [ self.f(x / 10, speed, loc) for x in range(0, 100) ] speed = kwargs.pop("lookup price substitute speed") begin = kwargs.pop("lookup price substitute begin") end = kwargs.pop("lookup price substitute end") kwargs['relative price substitute lookup'] = [ self.priceSubstite(x, speed, begin, end) for x in range(0, 100, 10) ] scale = kwargs.pop("lookup returns to scale speed") speed = kwargs.pop("lookup returns to scale scale") kwargs['returns to scale lookup'] = [ self.returnsToScale(x, speed, scale) for x in range(0, 101, 10) ] scale = kwargs.pop("lookup approximated learning speed") speed = kwargs.pop("lookup approximated learning scale") start = kwargs.pop("lookup approximated learning start") kwargs['approximated learning effect lookup'] = [ self.approxLearning(x, speed, scale, start) for x in range(0, 101, 10) ] super(ScarcityModel, self).run_model(kwargs)
for x in range(0, 101, 10) ] kwargs['approximated learning effect lookup'] = lookup super(ScarcityModel, self).run_model(kwargs, policy) if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.DEBUG) model = ScarcityModel("scarcity", wd=r'./models/scarcity', model_file=r'\MetalsEMA.vpm') model.outcomes = [ TimeSeriesOutcome('relative market price'), TimeSeriesOutcome('supply demand ratio'), TimeSeriesOutcome('real annual demand'), TimeSeriesOutcome('produced of intrinsically demanded'), TimeSeriesOutcome('supply'), TimeSeriesOutcome('Installed Recycling Capacity'), TimeSeriesOutcome('Installed Extraction Capacity') ] model.uncertainties = [ RealParameter("price elasticity of demand", 0, 0.5), RealParameter("fraction of maximum extraction capacity used", 0.6, 1.2), RealParameter("initial average recycling cost", 1, 4), RealParameter("exogenously planned extraction capacity", 0, 15000), RealParameter("absolute recycling loss fraction", 0.1, 0.5),