def transition_test(): model = EnergyTrans(r"..\..\..\models\EnergyTrans", "fluCase") ensemble = ModelEnsemble() ensemble.set_model_structure(model) ensemble.perform_experiments(cases=10, callback=HDF5Callback)
def test_optimization(): ema_logging.log_to_stderr(ema_logging.INFO) model = FluModel(r'..\data', "fluCase") ensemble = ModelEnsemble() ensemble.set_model_structure(model) ensemble.parallel=True # ensemble.processes = 12 stats, pop = ensemble.perform_outcome_optimization(obj_function = obj_function_multi, reporting_interval=10, weights=(MAXIMIZE, MAXIMIZE), pop_size=100, nr_of_generations=5, crossover_rate=0.5, mutation_rate=0.05) res = stats.hall_of_fame.keys x = [entry.values[0] for entry in res] y = [entry.values[1] for entry in res] print len(x), len(y) fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(x,y) ax.set_ylabel("deceased population") ax.set_xlabel("infected fraction") plt.show()
def flu_test(): model = FluModel(r"..\..\..\models\flu", "fluCase") ensemble = ModelEnsemble() ensemble.set_model_structure(model) ensemble.perform_experiments(cases=10, callback=HDF5Callback)
def test_inspect(): import inspect_test model = FluModel(r'..\..\..\models\flu', "fluCase") ensemble = ModelEnsemble() ensemble.set_model_structure(model) ensemble.perform_experiments(cases = 10, callback=inspect_test.InspectCallback)
def perform_experiments(): ema_logging.log_to_stderr(level=ema_logging.INFO) model = SalinizationModel(r"C:\workspace\EMA-workbench\models\salinization", "verzilting") model.step = 4 ensemble = ModelEnsemble() ensemble.set_model_structure(model) ensemble.parallel = True nr_of_experiments = 10000 results = ensemble.perform_experiments(nr_of_experiments) return results
def test_save_results(): os.remove("test.h5") nrOfExperiments = 10 fileName = "test.h5" experimentName = "one_exp_test" ensemble = ModelEnsemble() ensemble.set_model_structure(FluModel(r"..\..\..\models\flu", "fluCase")) ensemble.perform_experiments( nrOfExperiments, callback=HDF5Callback, fileName=fileName, experimentName=experimentName )
def test_running_lookup_uncertainties(self): ''' This is the more comprehensive test, given that the lookup uncertainty replaces itself with a bunch of other uncertainties, check whether we can successfully run a set of experiments and get results back. We assert that the uncertainties are correctly replaced by analyzing the experiments array. ''' model = LookupTestModel( r'../models/', 'lookupTestModel') #model.step = 4 #reduce data to be stored ensemble = ModelEnsemble() ensemble.set_model_structure(model) ensemble.perform_experiments(10)
def test_vensim_model(self): #instantiate a model wd = r'../models' model = VensimExampleModel(wd, "simpleModel") #instantiate an ensemble ensemble = ModelEnsemble() #set the model on the ensemble ensemble.set_model_structure(model) nr_runs = 10 experiments, outcomes = ensemble.perform_experiments(nr_runs) self.assertEqual(experiments.shape[0], nr_runs) self.assertIn('TIME', outcomes.keys()) self.assertIn(model.outcomes[0].name, outcomes.keys())
def test_optimization(): ema_logging.log_to_stderr(ema_logging.INFO) model = FluModel(r'..\data', "fluCase") ensemble = ModelEnsemble() ensemble.set_model_structure(model) ensemble.parallel=True stats, pop = ensemble.perform_outcome_optimization(obj_function = obj_function_multi, reporting_interval=100, weights=(MAXIMIZE, MAXIMIZE), pop_size=100, nr_of_generations=20, crossover_rate=0.5, mutation_rate=0.05, caching=False) res = stats.hall_of_fame.keys print len(stats.tried_solutions.values())
def maxmin_optimize(): ema_logging.log_to_stderr(ema_logging.INFO) model = TestModel("", 'simpleModel') #instantiate the model ensemble = ModelEnsemble() #instantiate an ensemble ensemble.set_model_structure(model) #set the model on the ensemble ensemble.parallel = True ensemble.processes = 12 def obj_function1(outcomes): return outcomes['y'] policy_levers = { "L1": (0,1), "L2": (0,1)} results = ensemble.perform_maximin_optimization(obj_function1 = obj_function1, policy_levers = policy_levers, minimax1='minimize', nrOfGenerations1=50, nrOfPopMembers1=200, minimax2 = "maximize", nrOfGenerations2 = 50, nrOfPopMembers2 = 100, ) graph_errorbars_raw(results['stats']) plt.show()
def robust_optimize(): ema_logging.log_to_stderr(ema_logging.INFO) model = TestModel("", 'simpleModel') #instantiate the model ensemble = ModelEnsemble() #instantiate an ensemble ensemble.set_model_structure(model) #set the model on the ensemble policy_levers = { "L1": (0,1), "L2": (0,1)} def obj_func(results): return np.average(results['y']) results = ensemble.perform_robust_optimization(cases=1000, obj_function=obj_func, policy_levers=policy_levers, minimax='minimize', nrOfGenerations=50, nrOfPopMembers=20 ) graph_errorbars_raw(results['stats']) plt.show()
def test_multiple_models(): class Model1(ModelStructureInterface): uncertainties = [ParameterUncertainty((0,1),"a"), ParameterUncertainty((0,1),"b")] outcomes = [Outcome("test")] def model_init(self, policy, kwargs): pass def run_model(self, case): self.output['test'] = 1 class Model2(ModelStructureInterface): uncertainties = [ParameterUncertainty((0,1),"b"), ParameterUncertainty((0,1),"c")] outcomes = [Outcome("test")] def model_init(self, policy, kwargs): pass def run_model(self, case): self.output['test'] = 1 # os.remove('test.h5') nrOfExperiments = 10 fileName = 'test.h5' experimentName = "one_exp_test" ensemble = ModelEnsemble() ensemble.add_model_structure(Model1('', "test1")) ensemble.add_model_structure(Model2('', "test2")) ensemble.perform_experiments(nrOfExperiments, callback=HDF5Callback, fileName=fileName, experimentName=experimentName)
def test_optimization(): ema_logging.log_to_stderr(ema_logging.INFO) model = FluModel(r'../models', "fluCase") ensemble = ModelEnsemble() ensemble.set_model_structure(model) ensemble.parallel=True pop_size = 8 nr_of_generations = 10 eps = np.array([1e-3, 1e6]) stats, pop = ensemble.perform_outcome_optimization(obj_function = obj_function_multi, algorithm=epsNSGA2, reporting_interval=100, weights=(MAXIMIZE, MAXIMIZE), pop_size=pop_size, nr_of_generations=nr_of_generations, crossover_rate=0.8, mutation_rate=0.05, eps=eps) fn = '../data/test optimization save.bz2'
def test_tree(): log_to_stderr(level= INFO) model = FluModel(r'..\..\models\flu', "fluCase") ensemble = ModelEnsemble() ensemble.parallel = True ensemble.set_model_structure(model) policies = [{'name': 'no policy', 'file': r'\FLUvensimV1basecase.vpm'}, {'name': 'static policy', 'file': r'\FLUvensimV1static.vpm'}, {'name': 'adaptive policy', 'file': r'\FLUvensimV1dynamic.vpm'} ] ensemble.add_policies(policies) results = ensemble.perform_experiments(10) a_tree = tree(results, classify)
def test_feature_selection(): log_to_stderr(level= INFO) model = FluModel(r'..\..\models\flu', "fluCase") ensemble = ModelEnsemble() ensemble.parallel = True ensemble.set_model_structure(model) policies = [{'name': 'no policy', 'file': r'\FLUvensimV1basecase.vpm'}, {'name': 'static policy', 'file': r'\FLUvensimV1static.vpm'}, {'name': 'adaptive policy', 'file': r'\FLUvensimV1dynamic.vpm'} ] ensemble.add_policies(policies) results = ensemble.perform_experiments(5000) results = feature_selection(results, classify) for entry in results: print entry[0] +"\t" + str(entry[1])
def outcome_optimize(): ema_logging.log_to_stderr(ema_logging.INFO) model = TestModel("", 'simpleModel') #instantiate the model ensemble = ModelEnsemble() #instantiate an ensemble ensemble.set_model_structure(model) #set the model on the ensemble policy = {"name": "test", "L1": 1, "L2": 1} ensemble.add_policy(policy) def obj_func(results): return results['y'] results = ensemble.perform_outcome_optimization(obj_function=obj_func, minimax = 'minimize', nrOfGenerations = 1000, nrOfPopMembers = 10) graph_errorbars_raw(results['stats']) plt.show()
"effect of perceived adequacy on energy drain lookup", self, 0, 10), LookupUncertainty('hearne2', [(-2, 2), (-1, 2), (0, 1.5), (0.1, 1.6), (0.5, 1.5), (0.1, 2)], "effect of perceived adequacy of hours worked lookup", self, 0, 2.5), LookupUncertainty('hearne2', [(-1, 1), (-1, 1), (0, 0.9), (0.1, 1), (0.5, 1.5), (1, 1.5)], "effect of energy levels on hours worked lookup", self, 0, 1.5), LookupUncertainty('hearne2', [(-1, 1), (-1, 1), (0, 0.9), (0.1, 1), (0.5, 1.5), (1, 1.5)], "effect of high energy on further recovery lookup", self, 0, 1.25), LookupUncertainty('hearne2', [(-2, 2), (-1, 1), (0, 100), (20, 120), (0.5, 1.5), (0.5, 2)], "effect of hours worked on energy recovery lookup", self, 0, 1.5), LookupUncertainty('approximation', [(-0.5, 0.35), (3, 5), (1, 10), (0.2, 0.4), (0, 120)], "effect of hours worked on energy drain lookup", self, 0, 3), LookupUncertainty('hearne1', [(0, 1), (0, 0.15), (1, 1.5), (0.75, 1.25)], "effect of low energy on further depletion lookup", self, 0, 1)] self._delete_lookup_uncertainties() if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.INFO) model = Burnout(r'./models/burnout', "burnout") ensemble = ModelEnsemble() ensemble.set_model_structure(model) #run policy with old cases results = ensemble.perform_experiments(100) lines(results, 'Energy Level', density=BOXPLOT) plt.show()
"normal contact rate region 2")] def model_init(self, policy, kwargs): '''initializes the model''' try: self.model_file = policy['file'] except KeyError: ema_logging.warning("key 'file' not found in policy") super(FluModel, self).model_init(policy, kwargs) if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.INFO) model = FluModel(r'..\..\models\flu', "fluCase") ensemble = ModelEnsemble() ensemble.set_model_structure(model) #add policies policies = [{'name': 'no policy', 'file': r'\FLUvensimV1basecase.vpm'}, {'name': 'static policy', 'file': r'\FLUvensimV1static.vpm'}, {'name': 'adaptive policy', 'file': r'\FLUvensimV1dynamic.vpm'} ] ensemble.add_policies(policies) ensemble.parallel = True #turn on parallel processing results = ensemble.perform_experiments(1000)
uncertainties = [] outcomes = [ Outcome('CumulativeGHGreduction', time=True), ] if __name__ == "__main__": #turn on logging ema_logging.log_to_stderr(ema_logging.INFO) #instantiate a model vensimModel = EVO(r"./models", "simpleModel") #instantiate an ensemble ensemble = ModelEnsemble() #set the model on the ensemble ensemble.set_model_structure(vensimModel) # # run in parallel, if not set, FALSE is assumed ensemble.parallel = True cases = [{} for _ in range(1000)] #perform experiments results = ensemble.perform_experiments(cases, reporting_interval=100) save_results( results, r'.\data\EMA results ModelSebastiaanGreeven 1000 exp Stoch Test.tar.gz'
"susceptible to immune population delay time region 1"), ParameterUncertainty((0.5,2), "susceptible to immune population delay time region 2"), ParameterUncertainty((0.01, 5), "root contact rate region 1"), ParameterUncertainty((0.01, 5), "root contact ratio region 2"), ParameterUncertainty((0, 0.15), "infection ratio region 1"), ParameterUncertainty((0, 0.15), "infection rate region 2"), ParameterUncertainty((10, 100), "normal contact rate region 1"), ParameterUncertainty((10, 200), "normal contact rate region 2")] if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.INFO) model = FluModel(r'./models/flu', "fluCase") ensemble = ModelEnsemble() ensemble.set_model_structure(model) ensemble.parallel = True #turn on parallel processing nr_experiments = 1000 results = ensemble.perform_experiments(nr_experiments) fh = r'./data/{} flu cases no policy.tar.gz'.format(nr_experiments) save_results(results, fh)
ParameterUncertainty((0.45,0.55), "tH"), ParameterUncertainty((0.1,0.3), "kk")] #specification of the outcomes outcomes = [Outcome("B4:B1076", time=True), #we can refer to a range in the normal way Outcome("P_t", time=True)] # we can also use named range #name of the sheet sheet = "Sheet1" #relative path to the Excel file workbook = r'\excel example.xlsx' if __name__ == "__main__": ema_logging.log_to_stderr(level=ema_logging.INFO) model = ExcelModel(r"./models/excelModel", "predatorPrey") ensemble = ModelEnsemble() ensemble.set_model_structure(model) ensemble.parallel = True #turn on parallel computing ensemble.processes = 2 #using only 2 cores #run 100 experiments nr_experiments = 100 results = ensemble.perform_experiments(nr_experiments)
a_mean = np.mean(a) b_mean = np.mean(b) if a_mean < 0.5 or b_mean < 0.5: return (np.inf,) * 2 else: return a_mean, b_mean if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.INFO) model = DummyModel(r"", "dummy") np.random.seed(123456789) ensemble = ModelEnsemble() ensemble.set_model_structure(model) policy_levers = {'Trigger a': {'type':'list', 'values':[0, 0.25, 0.5, 0.75, 1]}, 'Trigger b': {'type':'list', 'values':[0, 0.25, 0.5, 0.75, 1]}, 'Trigger c': {'type':'list', 'values':[0, 0.25, 0.5, 0.75, 1]}} cases = ensemble._generate_samples(10, UNION)[0] ensemble.add_policy({"name":None}) experiments = [entry for entry in ensemble._generate_experiments(cases)] for entry in experiments: entry.pop("model") entry.pop("policy") cases = experiments
for loop_index in range(30, 31): # for loop_index in range(1,len(unique_edges)+1): if loop_index - indCons > 0: model = ScarcityModel( r"D:\tbm-g367\workspace\EMA workbench\src\sandbox\sils\Models\Consecutive", "scarcity" ) model_location = r"D:\tbm-g367\workspace\EMA workbench\src\sandbox\sils\Models\Consecutive\Metals EMA.vpm" elif loop_index == 0: model = ScarcityModel(r"D:\tbm-g367\workspace\EMA workbench\src\sandbox\sils\Models\Base", "scarcity") model_location = r"D:\tbm-g367\workspace\EMA workbench\src\sandbox\sils\Models\Base\Metals EMA.vpm" else: model = ScarcityModel(r"D:\tbm-g367\workspace\EMA workbench\src\sandbox\sils\Models\Switches", "scarcity") model_location = r"D:\tbm-g367\workspace\EMA workbench\src\sandbox\sils\Models\Switches\Metals EMA.vpm" ensemble = ModelEnsemble() ensemble.set_model_structure(model) serie = run_interval( model_location, loop_index, interval, "relative market price", unique_edges, indCons, double_list, uncertain_names, uncertain_values[beh_no], ) interval_series.append(serie)
susceptible_population_region_1 = susceptible_population_region_1_NEXT susceptible_population_region_2 = susceptible_population_region_2_NEXT immune_population_region_1 = immune_population_region_1_NEXT immune_population_region_2 = immune_population_region_2_NEXT deceased_population_region_1.append(deceased_population_region_1_NEXT) deceased_population_region_2.append(deceased_population_region_2_NEXT) #End of main code return (runTime, deceased_population_region_1) #, Max_infected, Max_time) if __name__ == "__main__": import expWorkbench.ema_logging as logging np.random.seed(150) #set the seed for replication purposes logging.log_to_stderr(logging.INFO) fluModel = MexicanFlu(None, "mexicanFluExample") ensemble = ModelEnsemble() ensemble.parallel = True ensemble.set_model_structure(fluModel) nr_experiments = 500 results = ensemble.perform_experiments(nr_experiments, reporting_interval=100) lines(results, outcomes_to_show="deceased_population_region_1", show_envelope=True, density=KDE, titles=None, experiments_to_show=np.arange(0, nr_experiments, 10) ) plt.show()
class SimplePythonModel(ModelStructureInterface): """ This class represents a simple example of how one can extent the basic ModelStructureInterface in order to do EMA on a simple model coded in Python directly """ # specify uncertainties uncertainties = [ ParameterUncertainty((0.1, 10), "x1"), ParameterUncertainty((-0.01, 0.01), "x2"), ParameterUncertainty((-0.01, 0.01), "x3"), ] # specify outcomes outcomes = [Outcome("y")] def model_init(self, policy, kwargs): pass def run_model(self, case): """Method for running an instantiated model structure """ self.output[self.outcomes[0].name] = case["x1"] * case["x2"] + case["x3"] if __name__ == "__main__": model = SimplePythonModel(None, "simpleModel") # instantiate the model ensemble = ModelEnsemble() # instantiate an ensemble ensemble.set_model_structure(model) # set the model on the ensemble results = ensemble.perform_experiments(1000) # generate 1000 cases
''' This class represents a simple example of how one can extent the basic ModelStructureInterface in order to do EMA on a simple model coded in Python directly ''' #specify uncertainties uncertainties = [ParameterUncertainty((0.1, 10), "x1"), ParameterUncertainty((-0.01,0.01), "x2"), ParameterUncertainty((-0.01,0.01), "x3")] #specify outcomes outcomes = [Outcome('y', time=False)] def model_init(self, policy, kwargs): pass def run_model(self, case): """Method for running an instantiated model structure """ self.output[self.outcomes[0].name] = case['x1']*case['x2']+case['x3'] ema_logging.info("run model called") if __name__ == '__main__': ema_logging.log_to_stderr(ema_logging.INFO) model = ParallelTestEMA(None, 'simpleModel') ensemble = ModelEnsemble() ensemble.set_model_structure(model) ensemble.parallel = True results = ensemble.perform_experiments(201)
switches = case.pop("preference switches") case["SWITCH preference for MIC"] = switches[0] case["SWITCH preference for expected cost per MWe"]= switches[1] case["SWITCH preference against unknown"]= switches[2] case["SWITCH preference for expected progress"]= switches[3] case["SWITCH preference against specific CO2 emissions"]= switches[4] super(EnergyTrans, self).run_model(case) if __name__ == "__main__": logger = ema_logging.log_to_stderr(ema_logging.INFO) model = EnergyTrans(r'..\..\models\EnergyTrans', "ESDMAElecTrans") model.step = 4 #reduce data to be stored ensemble = ModelEnsemble() ensemble.set_model_structure(model) policies = [{'name': 'no policy', 'file': r'\ESDMAElecTrans_NoPolicy.vpm'}, {'name': 'basic policy', 'file': r'\ESDMAElecTrans_basic_policy.vpm'}, {'name': 'tech2', 'file': r'\ESDMAElecTrans_tech2.vpm'}, {'name': 'econ', 'file': r'\ESDMAElecTrans_econ.vpm'}, {'name': 'adaptive policy', 'file': r'\ESDMAElecTrans_adaptive_policy.vpm'}, {'name': 'ap with op', 'file': r'\ESDMAElecTrans_ap_with_op.vpm'}, ]
# defaults = {'TimeHorizonGov1':50, # 'TimeHorizonGov2':50, # 'TimeHorizonGov3':50, # 'TimeHorizonGov4':50, # 'TimeHorizonGov5':50, # 'TimeHorizonInd1':50, # 'TimeHorizonInd2':50, # 'TimeHorizonInd3':50, # 'TimeHorizonInd4':50, # 'TimeHorizonInd5':50} # msi2 = EVO(r"./models", 'longTimeHorizon', defaults=defaults) msi1 = EVO('./models', 'full') #instantiate an ensemble ensemble = ModelEnsemble() #set the model on the ensemble ensemble.add_model_structure(msi1) # ensemble.add_model_structure(msi2) ensemble.parallel = True ensemble.processes = 36 #perform experiments nr_experiments = 1000 results = ensemble.perform_experiments(nr_experiments, reporting_interval=100) fn = r'.\data\full {} exp {} rep.tar.gz'.format(nr_experiments, msi1.nr_replications)
model = ScarcityModel( r'D:\tbm-g367\workspace\EMA workbench\src\sandbox\sils\Models\Consecutive', "scarcity") model_location = r'D:\tbm-g367\workspace\EMA workbench\src\sandbox\sils\Models\Consecutive\Metals EMA.vpm' elif loop_index == 0: model = ScarcityModel( r'D:\tbm-g367\workspace\EMA workbench\src\sandbox\sils\Models\Base', "scarcity") model_location = r'D:\tbm-g367\workspace\EMA workbench\src\sandbox\sils\Models\Base\Metals EMA.vpm' else: model = ScarcityModel( r'D:\tbm-g367\workspace\EMA workbench\src\sandbox\sils\Models\Switches', "scarcity") model_location = r'D:\tbm-g367\workspace\EMA workbench\src\sandbox\sils\Models\Switches\Metals EMA.vpm' ensemble = ModelEnsemble() ensemble.set_model_structure(model) serie = run_interval(model_location, loop_index, interval, 'relative market price', unique_edges, indCons, double_list, uncertain_names, uncertain_values[beh_no]) interval_series.append(serie) print(interval_series) base = behaviour_int[beh_no][interval[0]:interval[1]] print len(base), len(interval_series[0]) distances, dominant = dominance_distance(behaviour_int[beh_no], interval_series) print distances, dominant
] outcomes = [ Outcome("sheep", time=True), Outcome("wolves", time=True), Outcome("grass", time=True), # TODO patches not working in reporting ] if __name__ == "__main__": # turn on logging ema_logging.log_to_stderr(ema_logging.INFO) # instantiate a model vensimModel = PredatorPrey(r"..\..\models\predatorPreyNetlogo", "simpleModel") # instantiate an ensemble ensemble = ModelEnsemble() # set the model on the ensemble ensemble.set_model_structure(vensimModel) # run in parallel, if not set, FALSE is assumed ensemble.parallel = True # perform experiments results = ensemble.perform_experiments(100) plotting.lines(results, density=plotting.KDE) plt.show()
self.output = results if error: raise CaseError("run not completed", case) def obj_func(outcomes): outcome = outcomes['total fraction new technologies'] zeros = np.zeros((outcome.shape[0], 1)) zeros[outcome[:,-1]>0.6] = 1 value = np.sum(zeros)/zeros.shape[0] return value, if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.INFO) model = EnergyTrans(r"..\data", "ESDMAElecTrans") ensemble = ModelEnsemble() ensemble.set_model_structure(model) ensemble.parallel = True policy_levers = {'Trigger subsidy T2': {'type':'range', 'values':(0,1)}, 'Trigger subsidy T3': {'type':'range', 'values':(0,1)}, 'Trigger subsidy T4': {'type':'range', 'values':(0,1)}, 'Trigger addnewcom': {'type':'list', 'values':[0, 0.25, 0.5, 0.75, 1]}} stats_callback, pop = ensemble.perform_robust_optimization(cases=10, reporting_interval=100, obj_function=obj_func, policy_levers=policy_levers, weights = (MAXIMIZE,), nr_of_generations=100, pop_size=10,
"MPW-type-of-conflict", "MPW", "MPW-risk-averseness1", "MPW-risk-averseness2", "max-conflict-level", "top-dog", "time-since-MPW", "expected-transition", "Conflict-source", "power-transition", "conflict-time", "war-time", ]) if __name__ == '__main__': ema_logging.log_to_stderr(ema_logging.INFO) wd = r'./model' name = 'testmodel' msi = PathOfWarModel(wd, name) # msi.run_length = 200 ensemble = ModelEnsemble() ensemble.add_model_structure(msi) ensemble.parallel = True nr_runs = 2 results = ensemble.perform_experiments(nr_runs, reporting_interval=1) fn = r'./data/{} runs 10 replicaties 25 feb.tar.gz'.format(nr_runs) save_results(results, fn)