def performExperiments(): ema_logging.LOG_FORMAT = '[%(name)s/%(levelname)s/%(processName)s] %(message)s' ema_logging.log_to_stderr(ema_logging.INFO) model = Model('SimulateER', function=simulate_ER) # instantiate the model # specify uncertainties model.uncertainties = [RealParameter("p", 0.1, 1.0)] model.uncertainties = [RealParameter("f", 0.1, 0.9)] model.levers = [IntegerParameter("n", 10, 100)] # specify outcomes model.outcomes = [ScalarOutcome('cc')] model.constants = [Constant('replications', 10)] n_scenarios = 10 n_policies = 10 res = perform_experiments(model, n_scenarios, n_policies) experiments, outcomes = res data = experiments[['n', 'p', 'f']] data.to_csv('out.csv', index=False) return data
def test_get_salib_problem(self): uncertainties = [RealParameter("a", 0, 10), RealParameter("b", 0, 5)] problem = get_SALib_problem(uncertainties) self.assertEqual(2, problem['num_vars']) self.assertEqual(['a', 'b'], problem['names']) self.assertEqual((0, 10), problem['bounds'][0]) self.assertEqual((0, 5), problem['bounds'][1])
def test_FAST(self): parameters = [RealParameter("a", 0, 10), RealParameter("b", 0, 5)] sampler = FASTSampler() samples = sampler.generate_samples(parameters, 100) N = 100 * 2 for key in ['a', 'b']: self.assertIn(key, samples.keys()) self.assertEqual(samples[key].shape[0], N)
def dps_model(params): dps = Model('dps', function=modelData.dps.lake_model) dps.timeHorizon = params.timeHorizon dps.uncertainties = params.uncertainties dps.levers = [RealParameter("c1", -2, 2), RealParameter("c2", -2, 2), RealParameter("r1", 0, 2), RealParameter("r2", 0, 2), RealParameter("w1", 0, 1)] dps.outcomes = params.outcomes dps.constants = params.constants dps.constraints = params.constraints return dps
def set_model(func=recycle_model): experiment_model = EMA_Model('plastic', function=func) uncertainties = [ RealParameter('target_mean', 0.05, 0.20), RealParameter('amb_mean', 0.1, 0.5), RealParameter('percep_range', 0.1, 0.5), RealParameter('know_range', 0.1, 0.5), RealParameter('technology', 0.1, 0.5), RealParameter('tech_std', 0.05, 0.20), IntegerParameter('cap_mean', 400, 800), IntegerParameter('cap_std', 100, 400), ] levers = [ RealParameter("campaign_bud_prop", 0.05, 0.5), RealParameter("final_target", 0.1, 0.4) ] outcomes = [ ScalarOutcome('target_met_frac', ), ScalarOutcome('no_budget_frac', ), ScalarOutcome('avg_fine_period'), ScalarOutcome('fine_per_house'), ScalarOutcome('time_conv'), ScalarOutcome('profit_std'), ] experiment_model.uncertainties = uncertainties experiment_model.levers = levers experiment_model.outcomes = outcomes return experiment_model
def test_morris(self): parameters = [RealParameter("a", 0, 10), RealParameter("b", 0, 5)] sampler = MorrisSampler() samples = sampler.generate_samples(parameters, 100) G = 4 D = len(parameters) N = 100 N = (G / D + 1) * N for key in ['a', 'b']: self.assertIn(key, samples.keys()) self.assertEqual(samples[key].shape[0], N)
def planned_adaptive_model(params): adaptive = Model('plannedadaptive', function=modelData.planned_adaptive.lake_model) adaptive.timeHorizon = params.timeHorizon adaptive.uncertainties = params.uncertainties adaptive.levers = [RealParameter("c1", -2, 2), RealParameter("c2", -2, 2), RealParameter("r1", 0, 2), RealParameter("r2", 0, 2), RealParameter("w1", 0, 1)] adaptive.outcomes = params.outcomes adaptive.constants = params.constants adaptive.constraints = params.constraints return adaptive
def get_lake_model(): """Returns a fully formulated model of the lake problem.""" # instantiate the model lake_model = Model('lakeproblem', function=lake_problem) lake_model.time_horizon = 100 # specify uncertainties lake_model.uncertainties = [ RealParameter('b', 0.1, 0.45), RealParameter('q', 2.0, 4.5), RealParameter('mean', 0.01, 0.05), RealParameter('stdev', 0.001, 0.005), RealParameter('delta', 0.93, 0.99) ] # set levers, one for each time step lake_model.levers = [ RealParameter(str(i), 0, 0.1) for i in range(lake_model.time_horizon) ] # specify outcomes lake_model.outcomes = [ ScalarOutcome('max_P', ), ScalarOutcome('utility'), ScalarOutcome('inertia'), ScalarOutcome('reliability') ] # override some of the defaults of the model lake_model.constants = [Constant('alpha', 0.41), Constant('nsamples', 150)] return lake_model
def intertemporal_model(params): intertemporal = Model('intertemporal', function=modelData.intertemporal.lake_model) intertemporal.timeHorizon = params.timeHorizon intertemporal.uncertainties = params.uncertainties intertemporal.levers = [ RealParameter('l{}'.format(i), 0, 0.1) for i in range(100) ] intertemporal.outcomes = params.outcomes intertemporal.constants = params.constants intertemporal.constraints = params.constraints return intertemporal
def test_sobol(self): parameters = [RealParameter("a", 0, 10), RealParameter("b", 0, 5)] sampler = SobolSampler() samples = sampler.generate_samples(parameters, 100) N = 100 * (2 * 2 + 2) for key in ['a', 'b']: self.assertIn(key, samples.keys()) self.assertEqual(samples[key].shape[0], N) sampler = SobolSampler(second_order=False) samples = sampler.generate_samples(parameters, 100) N = 100 * (2 + 2) for key in ['a', 'b']: self.assertIn(key, samples.keys()) self.assertEqual(samples[key].shape[0], N) parameters = [ RealParameter("a", 0, 10), RealParameter("b", 0, 5), IntegerParameter("c", 0, 2) ] sampler = SobolSampler() samples = sampler.generate_samples(parameters, 100) N = 100 * (2 * 3 + 2) for key in ['a', 'b']: self.assertIn(key, samples.keys()) self.assertEqual(samples[key].shape[0], N) designs = sampler.generate_designs(parameters, 100) self.assertEqual(designs.parameters, parameters) self.assertEqual(designs.params, ['a', 'b', 'c']) self.assertEqual(designs.n, N)
def get_model_for_problem_formulation(problem_formulation_id): ''' Prepare DikeNetwork in a way it can be input in the EMA-workbench. Specify uncertainties, levers and problem formulation. ''' # Load the model: function = DikeNetwork() # workbench model: dike_model = Model('dikesnet', function=function) ## Uncertainties and Levers: # Specify uncertainties range: Real_uncert = {'Bmax': [30, 350], 'pfail': [0, 1]} # m and [.] cat_uncert_loc = {'Brate': (0.9, 1.5, 1000)} # breach growth rate [m/day] cat_uncert = {'discount rate': (1.5, 2.5, 3.5, 4.5)} Int_uncert = {'A.0_ID flood wave shape': [0, 133]} # Range of dike heightening: dike_lev = {'DikeIncrease': [0, 10]} # dm # Series of five Room for the River projects: rfr_lev = ['{}_RfR'.format(project_id) for project_id in range(0, 5)] # Time of warning: 0, 1, 2, 3, 4 days ahead from the flood EWS_lev = {'EWS_DaysToThreat': [0, 4]} # days uncertainties = [] levers = [] for dike in function.dikelist: # uncertainties in the form: locationName_uncertaintyName for uncert_name in Real_uncert.keys(): name = "{}_{}".format(dike, uncert_name) lower, upper = Real_uncert[uncert_name] uncertainties.append(RealParameter(name, lower, upper)) for uncert_name in cat_uncert_loc.keys(): name = "{}_{}".format(dike, uncert_name) categories = cat_uncert_loc[uncert_name] uncertainties.append(CategoricalParameter(name, categories)) # location-related levers in the form: locationName_leversName for lev_name in dike_lev.keys(): name = "{}_{}".format(dike, lev_name) levers.append( IntegerParameter(name, dike_lev[lev_name][0], dike_lev[lev_name][1])) for uncert_name in cat_uncert.keys(): categories = cat_uncert[uncert_name] uncertainties.append(CategoricalParameter(uncert_name, categories)) # project-related levers can be either 0 (not implemented) or 1 (implemented) for uncert_name in Int_uncert.keys(): uncertainties.append( IntegerParameter(uncert_name, Int_uncert[uncert_name][0], Int_uncert[uncert_name][1])) # RfR levers can be either 0 (not implemented) or 1 (implemented) for lev_name in rfr_lev: levers.append(IntegerParameter(lev_name, 0, 1)) # Early Warning System lever for lev_name in EWS_lev.keys(): levers.append( IntegerParameter(lev_name, EWS_lev[lev_name][0], EWS_lev[lev_name][1])) # load uncertainties and levers in dike_model: dike_model.uncertainties = uncertainties dike_model.levers = levers ## Problem formulations: # Outcomes are all costs, thus they have to minimized: direction = ScalarOutcome.MINIMIZE # 2-objective PF: if problem_formulation_id == 0: dikes_variable_names = [] for dike in function.dikelist: dikes_variable_names.extend([ '{}_{}'.format(dike, e) for e in ['Expected Annual Damage', 'Dike Investment Costs'] ]) dikes_variable_names.extend(['RfR Total Costs']) dikes_variable_names.extend(['Expected Evacuation Costs']) dike_model.outcomes = [ ScalarOutcome('All Costs', variable_name=[var for var in dikes_variable_names], function=sum_over, kind=direction), ScalarOutcome('Expected Number of Deaths', variable_name=[ '{}_Expected Number of Deaths'.format(dike) for dike in function.dikelist ], function=sum_over, kind=direction) ] # 3-objectives PF: elif problem_formulation_id == 1: dike_model.outcomes = [ ScalarOutcome('Expected Annual Damage', variable_name=[ '{}_Expected Annual Damage'.format(dike) for dike in function.dikelist ], function=sum_over, kind=direction), ScalarOutcome('Total Investment Costs', variable_name=[ '{}_Dike Investment Costs'.format(dike) for dike in function.dikelist ] + ['RfR Total Costs'] + ['Expected Evacuation Costs'], function=sum_over, kind=direction), ScalarOutcome('Expected Number of Deaths', variable_name=[ '{}_Expected Number of Deaths'.format(dike) for dike in function.dikelist ], function=sum_over, kind=direction) ] # 12-objectives PF: elif problem_formulation_id == 2: outcomes = [] for dike in function.dikelist: outcomes.append( ScalarOutcome( '{} Total Costs'.format(dike), variable_name=[ '{}_{}'.format(dike, e) for e in ['Expected Annual Damage', 'Dike Investment Costs'] ], function=sum_over, kind=direction)) outcomes.append( ScalarOutcome('{}_Expected Number of Deaths'.format(dike), kind=direction)) outcomes.append(ScalarOutcome('RfR Total Costs', kind=direction)) outcomes.append( ScalarOutcome('Expected Evacuation Costs', kind=direction)) dike_model.outcomes = outcomes # 17-objectives PF: elif problem_formulation_id == 3: outcomes = [] for dike in function.dikelist: for entry in [ 'Expected Annual Damage', 'Dike Investment Costs', 'Expected Number of Deaths' ]: o = ScalarOutcome('{}_{}'.format(dike, entry), kind=direction) outcomes.append(o) outcomes.append(ScalarOutcome('RfR Total Costs', kind=direction)) outcomes.append( ScalarOutcome('Expected Evacuation Costs', kind=direction)) dike_model.outcomes = outcomes else: raise TypeError('unknonw identifier') return dike_model
gd = nx.density(er) mydensity.append(gd) cum = 0 for d in mydensity: cum = cum + d density = cum / replications return {'density': density} if __name__ == '__main__': ema_logging.LOG_FORMAT = '[%(name)s/%(levelname)s/%(processName)s] %(message)s' ema_logging.log_to_stderr(ema_logging.INFO) model = Model('SimulateER', function=simulate_ER) # instantiate the model # specify uncertainties model.uncertainties = [RealParameter("p", 0.1, 1.0)] model.levers = [IntegerParameter("n", 10, 100)] # specify outcomes model.outcomes = [ScalarOutcome('density')] model.constants = [Constant('replications', 10)] n_scenarios = 10 n_policies = 10 res = perform_experiments(model, n_scenarios, n_policies) """ with MultiprocessingEvaluator(model) as evaluator: res = evaluator.perform_experiments(n_scenarios, n_policies,
if __name__ == '__main__': freeze_support() lake_model_actual() ##################################################################################################### # now connect the model with the workbench from ema_workbench import Model, RealParameter, ScalarOutcome, Constant model = Model('lakeproblem', function=lake_model_actual) model.time_horizon = 100 # specify uncertainties model.uncertainties = [ RealParameter('b', 0.1, 0.45), RealParameter('q', 2.0, 4.5), RealParameter('mean', 0.01, 0.05), RealParameter('stdev', 0.001, 0.005), RealParameter('delta', 0.93, 0.99) ] # specify outcomes model.outcomes = [ ScalarOutcome('max_P'), ScalarOutcome('utility'), ScalarOutcome('inertia'), ScalarOutcome('reliability') ] # set levers
#print(total_policy_list[0]) #print(total_policy_list[1]) if __name__ == "__main__": print("uncertainty analysis started for: " + principles_list[principle_index] + " case for " + str(nfe) + " scenario's") model = PyRICE(model_specification="EMA", welfare_function="egalitarian") RICE = Model('RICE', function=model) RICE.uncertainties = [ IntegerParameter('fdamage', 0, 2), IntegerParameter('t2xco2_index', 0, 999), IntegerParameter('t2xco2_dist', 0, 2), RealParameter('fosslim', 4000, 13649), IntegerParameter('scenario_pop_gdp', 0, 5), IntegerParameter('scenario_sigma', 0, 2), IntegerParameter('scenario_cback', 0, 1), IntegerParameter('scenario_elasticity_of_damages', 0, 2), IntegerParameter('scenario_limmiu', 0, 1) ] #same for all formulations RICE.outcomes = get_all_model_outcomes_uncertainty_search( optimization_formulation="utilitarian") ema_logging.log_to_stderr(ema_logging.INFO) #only needed on IPython console within Anaconda __spec__ = "ModuleSpec(name='builtins', loader=<class '_frozen_importlib.BuiltinImporter'>)"
if __name__ == '__main__': model = NetLogoModel("PoRModel", wd="./", model_file='git/model/PoR_Model.nlogo') model.run_length = 31 model.replications = 10 #netlogo = pyNetLogo.NetLogoLink(gui=False) #netlogo.load_model(os.path.abspath('git/model/PoR_Model.nlogo')) #netlogo.command('setup') # set levers model.levers = [ RealParameter('total-available-subsidy', 0, 100000000), RealParameter('subsidy-for-industries', 0, 200), RealParameter('total-subsidy-increase-for-target', 0, 15), RealParameter('industry-subsidy-increase-for-target', 0, 15), RealParameter('extensible-storage-price', 0, 50) ] #model.outcomes = [ScalarOutcome('co2 emitted to air', ScalarOutcome.MINIMIZE, variable_name='total-co2-emitted-to-air-global', # function=np.sum), # ScalarOutcome('total co2 stored', ScalarOutcome.MAXIMIZE, variable_name='total-co2-stored-global', # function=np.sum), # ScalarOutcome('total subsidy PoRA', ScalarOutcome.MINIMIZE, variable_name='total-subsidy-to-por-global', # function=np.max), # ScalarOutcome('total subsidy industries', ScalarOutcome.MINIMIZE, variable_name='total-subsidy-to-industries-global', # function=np.max)]
from ema_workbench.em_framework.evaluators import MultiprocessingEvaluator from ema_workbench.analysis.plotting import lines from ema_workbench.analysis.plotting_util import BOXPLOT if __name__ == '__main__': #turn on logging ema_logging.log_to_stderr(ema_logging.INFO) model = NetLogoModel('predprey', wd="./models/predatorPreyNetlogo", model_file="Wolf Sheep Predation.nlogo") model.run_length = 100 model.replications = 1 model.uncertainties = [ RealParameter("grass-regrowth-time", 1, 99), RealParameter("initial-number-sheep", 1, 200), RealParameter("initial-number-wolves", 1, 200), RealParameter("sheep-reproduce", 1, 20), RealParameter("wolf-reproduce", 1, 20), ] model.outcomes = [ TimeSeriesOutcome('sheep'), TimeSeriesOutcome('wolves'), TimeSeriesOutcome('grass'), TimeSeriesOutcome('TIME') ] #perform experiments n = 10
filepath_results = os.path.abspath(os.path.join(filepath_model, os.pardir)) + "\\results" multi_model = CombinedModel('UrbanEnergyTransition', wd=filepath_model, vensim_model_file='SD_ema2.vpmx', netlogo_model_file='ABM_ema.nlogo') start = datetime.now() # turn on logging ema_logging.log_to_stderr(ema_logging.INFO) # instantiate a model multi_model.uncertainties = [ # High level System uncertainties RealParameter('SD_Base Investments Renewable', 0.025, 0.125), RealParameter('SD_Maximum cost reduction', 0.1, 0.5), RealParameter('SD_Green Gas investments', 1e8, 4e8), # current expectation = 1.7 (CE Delft) RealParameter('SD_Gas production cost ramp[Natural Gas]', -0.001, 0.001), RealParameter('SD_Gas production cost ramp[Green Gas]', -0.001, 0.001), RealParameter('SD_Grey electricity cost ramp', -0.001, 0.001), #minimum is -0.001 RealParameter('SD_Renewable Electricity cost multiplier', 0.5, 2), RealParameter('SD_Foreign renewable multiplier', 0.5, 2), # Economy Scenarios RealParameter('SD_Expected Period', 4, 10), RealParameter('SD_Expected Amplitude', 0.01, 0.05), # People uncertainties RealParameter('ABM_group-behaviour', 0, 1),
ema_logging.log_to_stderr(ema_logging.INFO) BaseEvaluator.reporting_frequency = 0.1 # ema_logging.log_to_stderr(ema_logging.DEBUG) from PyRICE_V8 import PyRICE model = PyRICE(model_specification="EMA", welfare_function="utilitarian") RICE = Model('RICE', function=model) RICE.uncertainties = [ IntegerParameter('fdamage', 0, 1), IntegerParameter('scenario_pop_tfp', 0, 5), IntegerParameter('scenario_sigma', 0, 5), IntegerParameter('scenario_cback', 0, 2), IntegerParameter('cback_to_zero', 0, 1), RealParameter('fosslim', 4000.0, 13649), RealParameter('limmiu', 0.8, 1.2) ] RICE.levers = [ RealParameter('sr', 0.1, 0.5), RealParameter('irstp', 0.001, 0.015), IntegerParameter('miu_period', 5, 30) ] RICE.outcomes = [ ScalarOutcome('Damages 2055', ScalarOutcome.MINIMIZE), ScalarOutcome('Utility 2055', ScalarOutcome.MAXIMIZE), ScalarOutcome('Damages 2105', ScalarOutcome.MINIMIZE), ScalarOutcome('Utility 2105', ScalarOutcome.MAXIMIZE), ScalarOutcome('Damages 2155', ScalarOutcome.MINIMIZE),
nfe = 25000 if __name__ == "__main__": print("uncertainty analysis started for: " + principles_list[principle_index] + " case for " + str(nfe) + " scenario's") model = PyRICE(model_specification="EMA", welfare_function="egalitarian") RICE = Model('RICE', function=model) RICE.uncertainties = [ IntegerParameter('fdamage', 0, 2), IntegerParameter('t2xco2_index', 0, 999), IntegerParameter('t2xco2_dist', 0, 2), RealParameter('fosslim', 4000, 13649), IntegerParameter('scenario_pop_gdp', 0, 5), IntegerParameter('scenario_sigma', 0, 2), IntegerParameter('scenario_cback', 0, 1), IntegerParameter('scenario_elasticity_of_damages', 0, 2), IntegerParameter('scenario_limmiu', 0, 1) ] RICE.levers = [ RealParameter('sr', 0.1, 0.5), RealParameter('irstp', 0.001, 0.015), IntegerParameter('miu_period', 5, 30), IntegerParameter('egalitarian_discounting', 0, 1) ] #0 = no discouting , 1 = normal discounting, RICE.outcomes = [
model = VensimModel("fluCase", wd=r'./models/flu', model_file=r'FLUvensimV1basecase.vpm') #outcomes model.outcomes = [ TimeSeriesOutcome('deceased population region 1'), TimeSeriesOutcome('infected fraction R1'), ScalarOutcome('max infection fraction', variable_name='infected fraction R1', function=np.max) ] #Plain Parametric Uncertainties model.uncertainties = [ RealParameter('additional seasonal immune population fraction R1', 0, 0.5), RealParameter('additional seasonal immune population fraction R2', 0, 0.5), RealParameter('fatality ratio region 1', 0.0001, 0.1), RealParameter('fatality rate region 2', 0.0001, 0.1), RealParameter('initial immune fraction of the population of region 1', 0, 0.5), RealParameter('initial immune fraction of the population of region 2', 0, 0.5), RealParameter('normal interregional contact rate', 0, 0.9), RealParameter('permanent immune population fraction R1', 0, 0.5), RealParameter('permanent immune population fraction R2', 0, 0.5), RealParameter('recovery time region 1', 0.1, 0.75), RealParameter('recovery time region 2', 0.1, 0.75), RealParameter('susceptible to immune population delay time region 1', 0.5, 2),
This file illustrated the use the EMA classes for a contrived vensim example .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl> chamarat <c.hamarat (at) tudelft (dot) nl> ''' from __future__ import (division, unicode_literals, absolute_import, print_function) from ema_workbench import (TimeSeriesOutcome, perform_experiments, RealParameter, ema_logging) from ema_workbench.connectors.vensim import VensimModel if __name__ == "__main__": # turn on logging ema_logging.log_to_stderr(ema_logging.INFO) # instantiate a model wd = r'./models/vensim example' vensimModel = VensimModel("simpleModel", wd=wd, model_file=r'\model.vpm') vensimModel.uncertainties = [RealParameter("x11", 0, 2.5), RealParameter("x12", -2.5, 2.5)] vensimModel.outcomes = [TimeSeriesOutcome('a')] results = perform_experiments(vensimModel, 1000, parallel=True)
df_time_periods = db_file.parse('TimePeriods') #model = Model('simpleModel', function=optim_model) ############################################################################### #ema_logging.LOG_FORMAT = '[%(name)s/%(levelname)s/%(processName)s] %(message)s' ema_logging.log_to_stderr(ema_logging.DEBUG) ############################################################################## model = PyomoModel('simpleModel', wd='./data') # instantiate the model ############################################################################## # specify uncertainties model.uncertainties = [ CategoricalParameter('demand_index', (3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)), RealParameter('dev_rate_wind', 0.011, 0.033), #0.022 is estimated base value RealParameter('dev_rate_solar', 0.025, 0.075), #0.05 is estimated base value RealParameter("dev_rate_storage_elec", 0.025, 0.075), #0.05 is estimated base value #RealParameter("dev_rate_storage_heat",0.0125,0.0375),#0.016 is estimated base value RealParameter("dev_rate_conv_P2G", 0.0395, 0.1185), #0.079 is estimated base value #RealParameter("dev_rate_conv_HP",0.0125,0.0375),#0.01 is estimated base value RealParameter("discount_rate", 0.01, 0.15) ] #'base value' is 0.04. # specify outcomes model.outcomes = [ ArrayOutcome('value_vector'), ArrayOutcome('line_value'),
def plot_lotka_volterra(model_instance): model_instance.uncertainties = [ RealParameter('prey_birth_rate', 0.015, 0.035), RealParameter('predation_rate', 0.0005, 0.003), RealParameter('predator_efficiency', 0.001, 0.004), RealParameter('predator_loss_rate', 0.04, 0.08) ] model_instance.outcomes = [ TimeSeriesOutcome('TIME'), TimeSeriesOutcome('predators'), TimeSeriesOutcome('prey') ] with SequentialEvaluator(model_instance) as evaluator: sa_results = evaluator.perform_experiments(scenarios=50, uncertainty_sampling=LHS) experiments, outcomes = sa_results # Squeeze outcomes outcomes_squeezed = {} for key in outcomes: outcomes_squeezed[key] = np.squeeze(outcomes[key]) # Store final values of prey outcome prey_final = [] prey_mean = [] prey_std = [] # Iterate through rows of outcome numpy array (experiments) to calculate indicators for experiment_row in outcomes_squeezed["prey"]: prey_final.append(experiment_row[-1]) # Get last element prey_mean.append(np.mean(experiment_row)) prey_std.append(np.std(experiment_row)) # Collect indicators in dictionary to calculate regression for each of them indicators = { 'prey_final': prey_final, 'prey_mean': prey_mean, 'prey_std': prey_std } # Calculate regression to investigate relationship between uncertainties and final number of preys after one year, the mean value of preys, and the standard deviation of preys for indicator_key in indicators.keys(): generate_regression_single(experiments["prey_birth_rate"], indicators[indicator_key]) generate_regression_single(experiments["predation_rate"], indicators[indicator_key]) generate_regression_single(experiments["predator_efficiency"], indicators[indicator_key]) generate_regression_single(experiments["predator_loss_rate"], indicators[indicator_key]) # Select uncertainties that were applied in every experiment # uncertainties_experiments = experiments[["prey_birth_rate", "predation_rate", "predator_efficiency", "predator_loss_rate"]] # generate_regression_single(experiments["prey_birth_rate"], prey_final) #generate_regression(uncertainties_experiments, prey_final) #generate_regression(uncertainties_experiments, prey_mean) #generate_regression(uncertainties_experiments, prey_std) #for outcome_key in outcomes_squeezed.keys(): # if outcome_key != 'TIME': # Fetch numpy array from dictionary, and calculate average over every single column (average at every point of time) # Plot using workbench #plotting.lines(experiments, outcomes_squeezed, outcomes_to_show=outcome_key, density=plotting_util.Density.HIST) return prey_final, prey_mean, prey_std
model.assets = assets model.yearofintroduction = get_introductionyear() model.paths = paths uncertainties = [] for assettype, paths in paths.items(): if assettype != "SMR": unc = CategoricalParameter(f"{assettype} paths", paths) uncertainties.append(unc) uncertainties += [IntegerParameter("timing CCS", 2022, 2030), BooleanParameter("offshore wind growth"), CategoricalParameter("decisionMakingModel", ["Reactive", "Current", "Proactive", "Collaborative"]), RealParameter("capex_factor", 0.7, 1.3), RealParameter("leadtime_factor", 0.7, 1.3), BooleanParameter("shuffle-needed-investments?"), IntegerParameter("random-seed", -2147483648, 2147483647) ] model.uncertainties = uncertainties outcomes = [ArrayOutcome('stedin_capacity'), ArrayOutcome('stedin_load'), ArrayOutcome('tennet_capacity'), ArrayOutcome('tennet_load'), ArrayOutcome('gasunie_capacity'), ArrayOutcome('stedin capex'), ArrayOutcome('tennet capex'), ArrayOutcome('gasunie capex'),
events = o.data.events[PLAYER] metrics = o.data.endOfRun[PLAYER] out = collectOutputs(events, metrics) return out if __name__ == '__main__': ema_logging.LOG_FORMAT = '[%(name)s/%(levelname)s/%(processName)s] %(message)s' ema_logging.log_to_stderr(ema_logging.INFO) model = Model('omegaDriver', function=omegaDriver) # instantiate the model # specify uncertainties model.uncertainties = [ RealParameter("SCUDB.targetRange", 100000.0, 200000.0), RealParameter("SCUDB.targetAltitude", 15000.0, 20000.0) ] model.levers = [ RealParameter("SCUDB.MassProperties.initialMass", 5000.0, 6000.0) ] model.outcomes = [ ScalarOutcome('burnout'), ScalarOutcome('impact'), ScalarOutcome('apogeeAlt'), ScalarOutcome('apogeeTime') ] #model.constants = [Constant('replications', 10)]
.. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl> ''' from __future__ import (absolute_import, print_function, division, unicode_literals) from ema_workbench import (Model, RealParameter, ScalarOutcome, ema_logging, perform_experiments) def some_model(x1=None, x2=None, x3=None): return {'y': x1 * x2 + x3} if __name__ == '__main__': ema_logging.LOG_FORMAT = '[%(name)s/%(levelname)s/%(processName)s] %(message)s' ema_logging.log_to_stderr(ema_logging.INFO) model = Model('simpleModel', function=some_model) # instantiate the model # specify uncertainties model.uncertainties = [ RealParameter("x1", 0.1, 10), RealParameter("x2", -0.01, 0.01), RealParameter("x3", -0.01, 0.01) ] # specify outcomes model.outcomes = [ScalarOutcome('y')] results = perform_experiments(model, 100)
from ema_workbench import (RealParameter, ScalarOutcome, Constant, Model) from lakemodel_function import lake_problem import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt model = Model('lakeproblem', function=lake_problem) #specify uncertainties model.uncertainties = [ RealParameter('b', 0.1, 0.45), RealParameter('q', 2.0, 4.5), RealParameter('mean', 0.01, 0.05), RealParameter('stdev', 0.001, 0.005), RealParameter('delta', 0.93, 0.99) ] # set levers model.levers = [ RealParameter("c1", -2, 2), RealParameter("c2", -2, 2), RealParameter("r1", 0, 2), RealParameter("r2", 0, 2), RealParameter("w1", 0, 1) ] #specify outcomes model.outcomes = [ ScalarOutcome('max_P'),
# ema_logging.log_to_stderr(ema_logging.DEBUG) from PyRICE_V8 import PyRICE # Sufficitarian principle with aggregated utility model = PyRICE(model_specification="EMA",welfare_function="sufficitarian") RICE = Model('RICE', function = model) RICE.uncertainties =[IntegerParameter('fdamage',0,1), IntegerParameter('scenario_pop_tfp',0,5), IntegerParameter('scenario_sigma',0,5), IntegerParameter('scenario_cback',0,2), IntegerParameter('cback_to_zero',0,1), RealParameter('fosslim', 4000, 13649), RealParameter('limmiu',0.8,1.2)] RICE.levers = [RealParameter('sr', 0.1, 0.5), RealParameter('irstp', 0.001, 0.015), IntegerParameter('miu_period', 5, 30), IntegerParameter('sufficitarian_discounting', 0,1), RealParameter('growth_factor_suf',1,1.04), RealParameter('ini_suf_treshold',0.7,2.4)] RICE.outcomes =[ScalarOutcome('Distance to treshold 2055', ScalarOutcome.MINIMIZE), ScalarOutcome('Population under treshold 2055', ScalarOutcome.MINIMIZE), ScalarOutcome('Distance to treshold 2105', ScalarOutcome.MINIMIZE), ScalarOutcome('Population under treshold 2105', ScalarOutcome.MINIMIZE),
def perform_EMA_sobol_experiment(model, N): model.uncertainties = [ RealParameter('prey_birth_rate', 0.015, 0.035), RealParameter('predation_rate', 0.0005, 0.003), RealParameter('predator_efficiency', 0.001, 0.004), RealParameter('predator_loss_rate', 0.04, 0.08) ] # We assume that Sobol sampling automatically performs N * (2D+2) scenarios. # Therefore, we do not perform this calculation. # number_uncertainties = 4 # total_scenarios = N * (2*number_uncertainties + 2) total_scenarios = N model.outcomes = [ TimeSeriesOutcome('TIME', function=np.squeeze), TimeSeriesOutcome('predators', function=np.squeeze), TimeSeriesOutcome('prey', function=np.squeeze) ] #N * (2D+2) scenarios, where D is the number of uncertain parameters, #and N is the value for scenarios passed to perform_experiments experiments, outcomes = perform_experiments(model, scenarios=total_scenarios, uncertainty_sampling='sobol') # Store final values of prey outcome prey_final = [] prey_mean = [] prey_std = [] # Iterate through rows of outcome numpy array (experiments) to calculate indicators for experiment_row in outcomes["prey"]: prey_final.append(experiment_row[-1]) # Get last element prey_mean.append(np.mean(experiment_row)) prey_std.append(np.std(experiment_row)) # Collect indicators in dictionary to calculate regression for each of them #indicators = {'prey_final':prey_final, 'prey_mean':prey_mean, 'prey_std':prey_std} # Convert outputs to numpy array prey_final_np = np.array(prey_final) prey_mean_np = np.array(prey_mean) prey_std_np = np.array(prey_std) # Create plots including subplots fig1 = plt.figure() fig1.tight_layout() fig1.set_size_inches(20, 10) ax1 = fig1.add_subplot(131) ax2 = fig1.add_subplot(132, sharey=ax1) ax3 = fig1.add_subplot(133, sharey=ax1) # Perform sobol analysis on experiment result using SALib for every output of interest perform_SALib_sobol(model, prey_final_np, "Final value of prey", ax1) perform_SALib_sobol(model, prey_mean_np, "Mean value of prey", ax2) perform_SALib_sobol(model, prey_std_np, "Standard deviation of prey", ax3)
import numpy as np from ema_workbench import (RealParameter, TimeSeriesOutcome, ema_logging, perform_experiments) from ema_workbench.connectors.excel import ExcelModel from ema_workbench.em_framework.evaluators import MultiprocessingEvaluator from ema_workbench.em_framework.evaluators import SequentialEvaluator if __name__ == "__main__": ema_logging.log_to_stderr(level=ema_logging.INFO) model = ExcelModel("predatorPrey", wd=".", model_file='PredPrey.xlsx') model.uncertainties = [ RealParameter("B3", 0.015, 0.35), # prey_birth_rate RealParameter("B4", 0.0005, 0.003), # predation_rate RealParameter("B5", 0.001, 0.004), # predator_efficiency RealParameter("B6", 0.04, 0.08), # predator_loss_rate ] # specification of the outcomes model.outcomes = [ TimeSeriesOutcome( "B17:BDF17"), # we can refer to a range in the normal way TimeSeriesOutcome("B18:BDF18") ] # we can also use named range # name of the sheet model.default_sheet = "Sheet1"