def _get_exploratory_results(mediator, context):
    # turn logging on
    ema_logging.log_to_stderr(ema_logging.INFO)

    # create a EMA Model Object
    ema_model = _build_executable_model(mediator, context)

    with MultiprocessingEvaluator(ema_model,
                                  n_processes=context.num_processes,
                                  maxtasksperchild=4) as evaluator:
        # run model using EMA
        results = evaluator.perform_experiments(context.num_experiments)

        return results
Exemple #2
0
def performReevaluation(model, params, policies, outputFile):
    if (params.createNewReevaluationResults):
        with MultiprocessingEvaluator(model) as evaluator:
            scenarios = params.evaluationScenarios
            results = evaluator.perform_experiments(scenarios=scenarios,
                                                    policies=policies)
        if not os.path.exists(params.reevaluateOutputFolder):
            os.makedirs(params.reevaluateOutputFolder)
        save_results(results, params.reevaluateOutputFolder + outputFile)
    else:
        print('Loading reevaluation from ' + params.reevaluateOutputFolder +
              outputFile)
        results = load_results(params.reevaluateOutputFolder + outputFile)

    return results
    # no dike increase, no warning, none of the rfr
    zero_policy = {'DaysToThreat': 0}
    zero_policy.update(
        {'DikeIncrease {}'.format(n): 0
         for n in planning_steps})
    zero_policy.update({'RfR {}'.format(n): 0 for n in planning_steps})
    pol0 = {}

    for key in dike_model.levers:
        s1, s2 = key.name.split('_')
        pol0.update({key.name: zero_policy[s2]})

    policy0 = Policy('Policy 0', **pol0)

    with MultiprocessingEvaluator(dike_model) as evaluator:
        results1 = evaluator.perform_experiments(scenarios=100,
                                                 policies=policy0)
    experiments1, outcomes1 = results1

    sns.pairplot(pd.DataFrame.from_dict(outcomes1))
    plt.show()

#%% visual analysis
from ema_workbench.analysis import pairs_plotting

fig, axes = pairs_plotting.pairs_scatter(experiments1,
                                         outcomes1,
                                         group_by='policy',
                                         legend=False)
fig.set_size_inches(8, 8)
        ArrayOutcome('Total Electricity use'),
        ArrayOutcome('Neighbourhood Data'),
        ArrayOutcome('Municipality Data')
    ]

    multi_model.replications = 4
    multi_model.nrScenarios = 50
    multi_model.nrPolicies = 30

    #replications: 4
    #scenarios: 50
    #policies: 30

    #results = perform_experiments(multi_model, 1, 1)

    with MultiprocessingEvaluator(multi_model, n_processes=50) as evaluator:
        results = perform_experiments(multi_model,
                                      multi_model.nrScenarios,
                                      multi_model.nrPolicies,
                                      evaluator=evaluator,
                                      callback=Extra_dim_Callback)

    end = datetime.now()

    print('Total experiments took ' + str(end - start))

    no_categorical_results = [
        'SD_Average Gas Price', 'SD_Average Electricity Price',
        'SD_Average Heat Price',
        'SD_National Energy System Distribution[Natural Gas]',
        'SD_National Energy System Distribution[Green Gas]',
Exemple #5
0
                                       variable_name= 'Use SP-A',
                                       function = np.sum),
                          ScalarOutcome(name = 'Total green steam use by Nouryon (ton/week)',
                                       variable_name = 'Use SP-B',
                                       function = np.sum),
                          ArrayOutcome(name = 'Chlorine storage stock at Nouryon (ton)',
                                       variable_name = 'Chlorine storage'),
                          ArrayOutcome(name = 'Run-time (s)',
                                       variable_name = 'Run-time')]
    
    # define the full factorial set of policies with names
    policies = [Policy('None of the options', **{'Steam Pipe':False, 'E-boiler':False, 'Chlorine Storage':False}),
                Policy('Only Steam Pipe', **{'Steam Pipe':True, 'E-boiler':False, 'Chlorine Storage':False}),
                Policy('Only E-boiler', **{'Steam Pipe':False, 'E-boiler':True, 'Chlorine Storage':False}),
                Policy('Only Chlorine storage', **{'Steam Pipe':False, 'E-boiler':False, 'Chlorine Storage':True}),
                Policy('Steam Pipe & E-boiler', **{'Steam Pipe':True, 'E-boiler':True, 'Chlorine Storage':False}),
                Policy('Steam Pipe & Chlorine storage', **{'Steam Pipe':True, 'E-boiler':False, 'Chlorine Storage':True}),
                Policy('E-boiler & Chlorine storage', **{'Steam Pipe':False, 'E-boiler':True, 'Chlorine Storage':True}),
                Policy('All options', **{'Steam Pipe':True, 'E-boiler':True, 'Chlorine Storage':True})]
              
    # define the number of scenarios to be sampled
    scenarios = 100

    # run the models
    with MultiprocessingEvaluator(model_list, n_processes = 56) as evaluator:
         results = evaluator.perform_experiments(policies = policies, scenarios = scenarios)
    
    # save the results
    save_results(results, f'./results/results_open_exploration_{scenarios}_scenarios_improved_model.tar.gz')

Exemple #6
0
def main():

    hybridmodel = Model('hybridmodel', function=hybridloop)

    hybridmodel.uncertainties = [
        IntegerParameter("inputpowerfactor", 15, 25),  #7 13
        IntegerParameter("inputLNGprice", 200, 1000),
        IntegerParameter("inputtransferprice", 50, 300),
        IntegerParameter("inputCapincrease", 1000, 3000),
        IntegerParameter("inputCapincreasetime", 1, 2),
        IntegerParameter("inputLNGCapincrease", 1000, 3000),
        IntegerParameter("inputLNGCapincreasetime", 1, 2),
        #                             RealParameter("DemandBalanceSupplyEnergyPrice", 0.4, 0.7),
        RealParameter("MaximumChangeinDemand", 0.4, 0.7),
        RealParameter("SupplyElasticityGas", 0.06, 0.07),
        RealParameter("SupplyElasticityOil", 0.1, 0.2),
        RealParameter("SupplyElasticityCoal", 0.1, 0.2),
        RealParameter("SupplyElasticityNuclear", 0.007, 0.017),
        RealParameter("SupplyElasticityBiofuel", 0.1, 0.2),
        RealParameter("SupplyElasticityOR", 0.15, 0.3),
        IntegerParameter("EconomicGrowthScenario", 1, 3),
        IntegerParameter("EnergyIntensityScenario", 1, 3),
        RealParameter("CO2coal", 93.46, 113.67),
        RealParameter("CO2oil", 59.58, 102.12),
        RealParameter("Variancepower", -5.0, -0.1),
        IntegerParameter("POil", 8900, 9100),
        IntegerParameter("PCoal", 2800, 3100),
        IntegerParameter("PBio", 29000, 32000),
        IntegerParameter("PNuc", 16000, 17000),
        IntegerParameter("POR", 19000, 22000),
        IntegerParameter("PGasE", 6500, 7000),
        IntegerParameter("PGasNA", 2500, 2700),
        IntegerParameter("PGasSCA", 2500, 2700),
        IntegerParameter("PGasCIS", 6500, 7000),
        IntegerParameter("PGasME", 7000, 8000),
        IntegerParameter("PGasAF", 7000, 8000),
        IntegerParameter("PGasAP", 7000, 8000)
    ]

    hybridmodel.outcomes = [
        TimeSeriesOutcome("EU_GasSup"),
        TimeSeriesOutcome("EU_GasDem"),
        TimeSeriesOutcome("EU_GasCon"),
        TimeSeriesOutcome("EU_OilSup"),
        TimeSeriesOutcome("EU_OilDem"),
        TimeSeriesOutcome("EU_OilCon"),
        TimeSeriesOutcome("EU_CoalSup"),
        TimeSeriesOutcome("EU_CoalDem"),
        TimeSeriesOutcome("EU_CoalCon"),
        TimeSeriesOutcome("EU_NucSup"),
        TimeSeriesOutcome("EU_NucDem"),
        #                         TimeSeriesOutcome("EU_NucCon"),
        TimeSeriesOutcome("EU_BioSup"),
        TimeSeriesOutcome("EU_BioDem"),
        TimeSeriesOutcome("EU_BioCon"),
        TimeSeriesOutcome("EU_ORSup"),
        TimeSeriesOutcome("EU_ORDem"),
        #                         TimeSeriesOutcome("EU_ORCon"),
        TimeSeriesOutcome("EU_EDem"),
        TimeSeriesOutcome("EU_ESup"),
        TimeSeriesOutcome("EU_GDP"),
        TimeSeriesOutcome("EU_CO2"),
        TimeSeriesOutcome("EU_RusGas"),
        TimeSeriesOutcome("EU_EUGI"),
        TimeSeriesOutcome("EU_GIC"),
        TimeSeriesOutcome("EU_RGperAG"),
        TimeSeriesOutcome("EU_RGperTES"),
        TimeSeriesOutcome("EU_RGperGC"),
        TimeSeriesOutcome("EU_GICperBBTU"),
        TimeSeriesOutcome("Oil_Price"),
        TimeSeriesOutcome("Coal_Price"),
        TimeSeriesOutcome("Bio_Price"),
        TimeSeriesOutcome("Gas_PriceE"),
        TimeSeriesOutcome("Nuc_PriceE"),
        TimeSeriesOutcome("OR_PriceE"),
        TimeSeriesOutcome("FuncpriceGas"),
        TimeSeriesOutcome("FuncpriceOil"),
        TimeSeriesOutcome("FuncpriceCoal")
    ]

    hybridmodel.levers = [
        IntegerParameter("EnergyUnion", 0, 1),
        IntegerParameter("CO2Cost", 0, 1)
    ]

    # In[ ]:

    ema_logging.log_to_stderr(ema_logging.INFO)
    with MultiprocessingEvaluator(hybridmodel) as evaluator:
        results = evaluator.perform_experiments(scenarios=1,
                                                policies=4,
                                                levers_sampling='ff')

# In[1]:

    save_results(results, r'./1000 runs V30.tar.gz')
Exemple #7
0
    # })

paraxes = parcoords.ParallelAxes(limits, rot=0)
paraxes.plot(data)
paraxes.fig.set_size_inches(25, 10)
paraxes.legend()
plt.show()
paraxes.fig.savefig(os.path.join(fig_path, str(run) + '_v6__sc_disc_par_results2' + '.png'))

# %%
from ema_workbench.em_framework.optimization import (HyperVolume,EpsilonProgress)

convergence_metrics = [HyperVolume(minimum=[0,0,0,0], maximum=[3, 2,1.01,1.01]),
                       EpsilonProgress()]

with MultiprocessingEvaluator(model) as evaluator:
    results, convergence = evaluator.optimize(nfe=1e3, searchover='levers',
                                 convergence=convergence_metrics,
                                 epsilons=[0.1,]*len(model.outcomes))

#%%
from dicemodel.specs import nordhaus_policy, reference_scenario
eps = [0.001, 0.1, 0.1, 0.1] * (int(len(dice_sm.outcomes)/4.0))
convergence_metrics = [EpsilonProgress()]
nord_optimal_policy = Policy('nord_optimal_policy', **nordhaus_policy(np.mean(dice_opt.iloc[129]), 0.015, 0, 0, 29))
nfe = 100000
# Swtich to the worst case
for outcome in dice_sm.outcomes:
    if outcome.kind == ScalarOutcome.MINIMIZE:
        outcome.kind = ScalarOutcome.MAXIMIZE
    else:
# .. codeauthor::  jhkwakkel


if __name__ == '__main__':
    # turn on logging
    ema_logging.log_to_stderr(ema_logging.INFO)

    model = NetLogoModel('predprey',
                         wd="./models/predatorPreyNetlogo",
                         model_file="Wolf Sheep Predation.nlogo")
    model.run_length = 100
    model.replications = 10

    model.uncertainties = [RealParameter("grass-regrowth-time", 1, 99),
                           RealParameter("initial-number-sheep", 50, 100),
                           RealParameter("initial-number-wolves", 50, 100),
                           RealParameter("sheep-reproduce", 5, 10),
                           RealParameter("wolf-reproduce", 5, 10),
                           ]

    model.outcomes = [TimeSeriesOutcome('sheep'),
                      TimeSeriesOutcome('wolves'),
                      TimeSeriesOutcome('grass')]

    # perform experiments
    n = 10

    with MultiprocessingEvaluator(model, n_processes=2,
                                  maxtasksperchild=4) as evaluator:
        results = evaluator.perform_experiments(n)
    df_unc['Max'] = df_unc['Reference'] * 1.5

    vensimModel.uncertainties = [
        RealParameter(row['Uncertainties'], row['Min'], row['Max'])
        for index, row in df_unc.iterrows()
    ]

    #vensimModel.outcomes = [TimeSeriesOutcome(out) for out in df_out['Outcomes']]
    vensimModel.outcomes = [
        TimeSeriesOutcome('Total Agricultural and Land Use Emissions')
    ]

    sc = 0
    n = 2500
    vensimModel.constants = [Constant('SA Diet Composition Switch', sc)]
    with MultiprocessingEvaluator(vensimModel, n_processes=7) as evaluator:
        for sc in [0, 2, 3, 4]:
            start = time.time()
            results_sa = evaluator.perform_experiments(
                n, uncertainty_sampling=SOBOL, reporting_interval=5000)
            end = time.time()
            print("Experiments took {} seconds, {} hours.".format(
                end - start, (end - start) / 3600))

            fn = './Diet_Sobol_n{}_sc{}_v4_2050.tar.gz'.format(
                n, sc
            )  #v2 is with narrow ranges for efficacy and removing some of the unimportant parameters
            #v3 is with the new multiplicative formulation, and new social norm parameters
            save_results(results_sa, fn)

#
Exemple #10
0
def runMoea(model, params, fileEnd, reference=None, refNum=None):
    archiveName = 'archives_' + fileEnd
    convergenceName = 'convergences_' + fileEnd

    if not params.createNewOptimizationResults:
        print('Loading archives from ' + params.optimizeOutputFolder +
              archiveName + '.csv')
        print('Loading convergences from ' + params.optimizeOutputFolder +
              convergenceName + '.csv')
        archives = pd.read_csv(params.optimizeOutputFolder + archiveName +
                               '.csv',
                               index_col=0)
        convergences = pd.read_csv(params.optimizeOutputFolder +
                                   convergenceName + '.csv',
                                   index_col=0)
        return (archives, convergences)

    archs = []
    convs = []

    if not os.path.exists(params.optimizeOutputFolder):
        os.makedirs(params.optimizeOutputFolder)
    tmpfolder = params.optimizeOutputFolder + model.name + '/'
    if not os.path.exists(tmpfolder):
        os.makedirs(tmpfolder)

    with MultiprocessingEvaluator(model) as evaluator:
        for i in range(params.numberOptimizationRepetitions):
            print('Optimizing Run ', i)
            if params.name == 'mordm':
                convergences = [
                    HyperVolume(minimum=[0, 0, 0, 0], maximum=[2.5, 2, 1, 1]),
                    EpsilonProgress()
                ]
            else:
                convergences = [
                    HyperVolume(minimum=[0, 0, 0, 0], maximum=[10, 2, 1, 1]),
                    EpsilonProgress()
                ]
            if (params.algoName == 'NSGAIIHybrid'):
                for j, name in enumerate(['SBX', 'PCX', 'DE', 'UNDX', 'UM']):
                    Convergence.valid_metrics.add(name)
                    convergences.append(OperatorProbabilities(name, j))

            arch, conv = evaluator.optimize(algorithm=params.algorithm,
                                            nfe=params.nfeOptimize[model.name],
                                            searchover='levers',
                                            reference=reference,
                                            epsilons=params.epsilons,
                                            convergence=convergences,
                                            population_size=100)

            conv['run_index'] = i
            arch['run_index'] = i

            if refNum is not None:
                conv['reference_scenario'] = refNum
                arch['reference_scenario'] = refNum

            conv.to_csv(tmpfolder + convergenceName + '_' + str(i) + '.csv')
            arch.to_csv(tmpfolder + archiveName + '_' + str(i) + '.csv')

            convs.append(conv)
            archs.append(arch)

    archives = pd.concat(archs)
    convergences = pd.concat(convs)

    archives.to_csv(params.optimizeOutputFolder + archiveName + '.csv')
    convergences.to_csv(params.optimizeOutputFolder + convergenceName + '.csv')

    return (archives, convergences)
Exemple #11
0
    for k, dam in enumerate(dams):
        for j in range(12):
            l.append(RealParameter(dam[0][j], minimum[k], maximum[k]))
            l.append(RealParameter(dam[1][j], 0, 1))
            l.append(RealParameter(dam[2][j], 0, 1))

    basin_model.levers = l
    basin_model.outcomes = [
        ScalarOutcome('pwsobjective',
                      kind=ScalarOutcome.MINIMIZE,
                      expected_range=(1, 33.3)),
        ScalarOutcome('energyobjective',
                      kind=ScalarOutcome.MAXIMIZE,
                      expected_range=(641, 3291)),
        ScalarOutcome('virobjective',
                      kind=ScalarOutcome.MINIMIZE,
                      expected_range=(315, 1341))
    ]
    #ScalarOutcome('firobjective', kind=ScalarOutcome.MINIMIZE),

    convergence_metrics = [
        HyperVolume.from_outcomes(basin_model.outcomes),
        EpsilonProgress()
    ]

    with MultiprocessingEvaluator(basin_model, 3) as evaluator:
        #results, convergence=evaluator.optimize(nfe=1, searchover='levers',
        #epsilons=[0.1, 0.1, 0.1],
        #convergence=convergence_metrics, reference=None)#constraints=m)
        experiments, outcomes = evaluator.perform_experiments(policies=3)
def runMoea(model, params, fileEnd, refNum=-1):
    archiveName = 'archives_' + fileEnd
    convergenceName = 'convergences_' + fileEnd

    if not params.createNewOptimizationResults:
        print('Loading archives from ' + params.optimizeOutputFolder +
              archiveName + '.csv')
        print('Loading convergences from ' + params.optimizeOutputFolder +
              convergenceName + '.csv')
        archives = pd.read_csv(params.optimizeOutputFolder + archiveName +
                               '.csv',
                               index_col=0)
        convergences = pd.read_csv(params.optimizeOutputFolder +
                                   convergenceName + '.csv',
                                   index_col=0)
        return (archives, convergences)

    archs = []
    convs = []

    if not os.path.exists(params.optimizeOutputFolder):
        os.makedirs(params.optimizeOutputFolder)
    tmpfolder = params.optimizeOutputFolder + model.name + '/'
    if not os.path.exists(tmpfolder):
        os.makedirs(tmpfolder)

    with MultiprocessingEvaluator(model) as evaluator:
        for i in range(params.numberOptimizationRepetitions):
            print('Run ', i)
            convergence_metrics = [
                HyperVolume(minimum=[0, 0, 0, 0], maximum=[1, 1, 1, 1]),
                EpsilonProgress()
            ]
            if (params.algoName == 'NSGAIIHybrid'):
                for j, name in enumerate(['SBX', 'PCX', 'DE', 'UNDX', 'UM']):
                    Convergence.valid_metrics.add(name)
                    convergence_metrics.append(OperatorProbabilities(name, j))

            arch, conv = evaluator.robust_optimize(
                robustnessFunctions,
                params.optimizationScenarios,
                algorithm=params.algorithm,
                nfe=params.nfeOptimize[model.name],
                constraints=[],
                epsilons=params.epsilons,
                convergence=convergence_metrics)

            arch['run_index'] = i
            conv['run_index'] = i
            archs.append(arch)
            convs.append(conv)

            arch.to_csv(tmpfolder + archiveName + '_' + str(i) + '.csv')
            conv.to_csv(tmpfolder + convergenceName + '_' + str(i) + '.csv')

    archives = pd.concat(archs)
    convergences = pd.concat(convs)

    archives.to_csv(params.optimizeOutputFolder + archiveName + '.csv')
    convergences.to_csv(params.optimizeOutputFolder + convergenceName + '.csv')

    return (archives, convergences)
Exemple #13
0
	def test_robust_evaluation(self):
		# %%

		import os
		test_dir = os.path.dirname(__file__)

		from ema_workbench import ema_logging, MultiprocessingEvaluator, SequentialEvaluator
		from emat.examples import road_test
		import numpy, pandas, functools
		from emat import Measure
		s, db, m = road_test()

		MAXIMIZE = Measure.MAXIMIZE
		MINIMIZE = Measure.MINIMIZE

		robustness_functions = [
			Measure(
				'Expected Net Benefit',
				kind=Measure.INFO,
				variable_name='net_benefits',
				function=numpy.mean,
			),

			Measure(
				'Probability of Net Loss',
				kind=MINIMIZE,
				variable_name='net_benefits',
				function=lambda x: numpy.mean(x < 0),
				min=0,
				max=1,
			),

			Measure(
				'95%ile Travel Time',
				kind=MINIMIZE,
				variable_name='build_travel_time',
				function=functools.partial(numpy.percentile, q=95),
				min=60,
				max=150,
			),

			Measure(
				'99%ile Present Cost',
				kind=Measure.INFO,
				variable_name='present_cost_expansion',
				function=functools.partial(numpy.percentile, q=99),
			),

			Measure(
				'Expected Present Cost',
				kind=Measure.INFO,
				variable_name='present_cost_expansion',
				function=numpy.mean,
			),

		]
		# %%

		numpy.random.seed(42)

		with MultiprocessingEvaluator(m) as evaluator:
			r1 = m.robust_evaluate(
				robustness_functions,
				scenarios=20,
				policies=5,
				evaluator=evaluator,
			)

		import pandas
		correct = pandas.read_json(
			'{"amortization_period":{"0":19,"1":23,"2":50,"3":43,"4":35},"debt_type":{"0":"Rev Bond","1":"Paygo"'
			',"2":"GO Bond","3":"Paygo","4":"Rev Bond"},"expand_capacity":{"0":26.3384401031,"1":63.3898549337,"2'
			'":51.1360252492,"3":18.7230954832,"4":93.9205959335},"interest_rate_lock":{"0":false,"1":true,"2":fal'
			'se,"3":true,"4":false},"Expected Net Benefit":{"0":-157.486494925,"1":-244.2423401934,"2":-189.633908'
			'4553,"3":-4.2656265778,"4":-481.1208898635},"Probability of Net Loss":{"0":0.95,"1":1.0,"2":0.95,"3":'
			'0.7,"4":1.0},"95%ile Travel Time":{"0":74.6904209781,"1":65.8492894317,"2":67.6932507947,"3":79.09851'
			'23853,"4":63.203313888},"99%ile Present Cost":{"0":3789.8036648358,"1":9121.0832380586,"2":7357.89572'
			'71441,"3":2694.0416972887,"4":13514.111590462},"Expected Present Cost":{"0":3158.4461451444,"1":7601.'
			'5679809722,"2":6132.1164500957,"3":2245.2312484183,"4":11262.7453643551}}')
		correct['debt_type'] = correct['debt_type'].astype(
			pandas.CategoricalDtype(categories=['GO Bond', 'Rev Bond', 'Paygo'], ordered=True))

		pandas.testing.assert_frame_equal(r1, correct)

		numpy.random.seed(7)

		from ema_workbench.em_framework.samplers import sample_uncertainties
		scenes = sample_uncertainties(m, 20)

		scenes0 = pandas.DataFrame(scenes)
		cachefile = os.path.join(test_dir,'test_robust_results.csv')
		if not os.path.exists(cachefile):
			scenes0.to_csv(os.path.join(test_dir,'test_robust_evaluation_scenarios.csv'), index=None)
		scenes1 = pandas.read_csv(os.path.join(test_dir,'test_robust_evaluation_scenarios.csv'))
		pandas.testing.assert_frame_equal(scenes0, scenes1)

		from emat import Constraint

		constraint_1 = Constraint(
			"Maximum Log Expected Present Cost",
			outcome_names="Expected Present Cost",
			function=Constraint.must_be_less_than(4000),
		)

		constraint_2 = Constraint(
			"Minimum Capacity Expansion",
			parameter_names="expand_capacity",
			function=Constraint.must_be_greater_than(10),
		)

		constraint_3 = Constraint(
			"Maximum Paygo",
			parameter_names='debt_type',
			outcome_names='99%ile Present Cost',
			function=lambda i, j: max(0, j - 1500) if i == 'Paygo' else 0,
		)

		from emat.optimization import HyperVolume, EpsilonProgress, SolutionViewer, ConvergenceMetrics

		convergence_metrics = ConvergenceMetrics(
			HyperVolume.from_outcomes(robustness_functions),
			EpsilonProgress(),
			SolutionViewer.from_model_and_outcomes(m, robustness_functions),
		)

		numpy.random.seed(8)
		random.seed(8)

		# Test robust optimize
		with SequentialEvaluator(m) as evaluator:
			robust_results, convergence = m.robust_optimize(
					robustness_functions,
					scenarios=scenes,
					nfe=25,
					constraints=[
						constraint_1,
						constraint_2,
						constraint_3,
					],
					epsilons=[0.05,]*len(robustness_functions),
					convergence=convergence_metrics,
					evaluator=evaluator,
			)

		cachefile = os.path.join(test_dir,'test_robust_results.csv')
		if not os.path.exists(cachefile):
			robust_results.to_csv(cachefile, index=None)
		correct2 = pandas.read_csv(cachefile)
		correct2['debt_type'] = correct2['debt_type'].astype(
			pandas.CategoricalDtype(categories=['GO Bond', 'Rev Bond', 'Paygo'], ordered=True))
		pandas.testing.assert_frame_equal(robust_results, correct2, check_less_precise=True)
def optimize_lake_problem(use_original_R_metrics=False, demo=True):
    """Analysis of the Lake Problem.

    (1) Runs a multi-objective robust optimisation of the Lake Problem
        using both standard and custom robustness metrics;
    (2) analyses the effects of different sets of scenarios on the
        robustness values and robustness rankings;
    (3) plots these effects;
    (4) analyses the effects of different robustness metrics on the
        robustness values and robustness rankings; and
    (5) plots these effects.
    """
    filepath = './robust_results.h5'

    robustness_functions = (get_original_R_metrics() if use_original_R_metrics
                            else get_custom_R_metrics_for_workbench())

    lake_model = get_lake_model()

    if not os.path.exists(filepath):
        n_scenarios = 10 if demo else 200  # for demo purposes only, should in practice be higher
        scenarios = sample_uncertainties(lake_model, n_scenarios)
        nfe = 1000 if demo else 50000  # number of function evaluations

        # Needed on Linux-based machines
        multiprocessing.set_start_method('spawn', True)

        # Run optimisation
        with MultiprocessingEvaluator(lake_model) as evaluator:
            robust_results = evaluator.robust_optimize(
                robustness_functions,
                scenarios,
                nfe=nfe,
                population_size=(10 if demo else 50),
                epsilons=[
                    0.1,
                ] * len(robustness_functions))
        print(robust_results)

    robust_results = pd.read_hdf(filepath, key='df')

    # Results are performance in each timestep, followed by robustness
    # we only care about the robustness, so we get that
    col_names = robust_results.columns.values.tolist()
    col_names = col_names[-len(robustness_functions):]

    # Plot the robustness results
    sns.pairplot(robust_results, vars=col_names, diag_kind='kde')
    # plt.show()

    # Extract the decision alternatives from the results
    # We need to extract the decision alternatives
    decision_alternatives = robust_results.iloc[:, :-4].values
    decision_alternatives = [
        Policy(
            idx, **{
                str(idx): value
                for idx, value in enumerate(
                    decision_alternatives[idx].tolist())
            }) for idx in range(decision_alternatives.shape[0])
    ]

    # Find the influence of scenarios. Here we are creating 5
    # sets of 100 scenarios each, all using the same sampling
    # method.
    scenarios_per_set = 100
    n_sets = 5
    n_scenarios = scenarios_per_set * n_sets
    scenarios = sample_uncertainties(lake_model, n_scenarios)

    # Simulate optimal solutions across all scenarios
    with MultiprocessingEvaluator(lake_model) as evaluator:
        results = evaluator.perform_experiments(scenarios=scenarios,
                                                policies=decision_alternatives)
    # We will just look at the vulnerability ('max_P') for this example
    f = np.reshape(results[1]['max_P'], newshape=(-1, n_scenarios))
    # Split the results into the different sets of scenarios
    split_f = np.split(f, n_sets, axis=1)
    # Calculate robustness for each set of scenarios
    # Note that each split_f[set_idx] is a 2D array, with each row being
    # a decision alternative, and each column a scenario
    R_metric = get_custom_R_metrics()[0]
    R = [R_metric(split_f[set_idx]) for set_idx in range(n_sets)]
    R = np.transpose(R)

    # Calculate similarity in robustness from different scenario sets
    delta, tau = analysis.scenarios_similarity(R)
    # Plot the deltas using a helper function
    analysis.delta_plot(delta)
    # Plot the Kendall's tau-b values using a helper function
    analysis.tau_plot(tau)

    # We now want to test the effects of different robustness metrics,
    # across all of the 100 scenarios. We first define a few new
    # robustness metrics (in addition to our original R metric for
    # the vulnerability). For this example we use some classic metrics
    R_metrics = [
        R_metric,  # The original robustness metric
        functools.partial(metrics.maximax, maximise=False),
        functools.partial(metrics.laplace, maximise=False),
        functools.partial(metrics.minimax_regret, maximise=False),
        functools.partial(metrics.percentile_kurtosis, maximise=False)
    ]

    # Calculate robustness for each robustness metric
    R = np.transpose([R_metric(f) for R_metric in R_metrics])

    # Calculate similarity in robustness from different robustness metrics
    tau = analysis.R_metric_similarity(R)
    # Plot the Kendall's tau-b values using a helper function
    analysis.tau_plot(tau)
Exemple #15
0
    RICE.uncertainties = [
        IntegerParameter('fdamage', 0, 2),
        IntegerParameter('t2xco2_index', 0, 999),
        IntegerParameter('t2xco2_dist', 0, 2),
        RealParameter('fosslim', 4000, 13649),
        IntegerParameter('scenario_pop_gdp', 0, 5),
        IntegerParameter('scenario_sigma', 0, 2),
        IntegerParameter('scenario_cback', 0, 1),
        IntegerParameter('scenario_elasticity_of_damages', 0, 2),
        IntegerParameter('scenario_limmiu', 0, 1)
    ]

    #same for all formulations
    RICE.outcomes = get_all_model_outcomes_uncertainty_search(
        optimization_formulation="utilitarian")

    ema_logging.log_to_stderr(ema_logging.INFO)
    #only needed on IPython console within Anaconda
    __spec__ = "ModuleSpec(name='builtins', loader=<class '_frozen_importlib.BuiltinImporter'>)"

    with MultiprocessingEvaluator(RICE) as evaluator:
        results = evaluator.perform_experiments(
            scenarios=nfe, policies=total_policy_list[principle_index])

        file_name = "//root//util_gen//server//output//uncertainty_analsysis_" + principles_list[
            principle_index] + "_runs_" + str(nfe) + ".tar.gz"
        save_results(results, file_name)

    print("uncertainty cycle " + principles_list[principle_index] +
          " completed")
#####################################################################################################
from ema_workbench import Policy
# performing experiments
# generate experiments
n_scenarios = 1000
n_policies = 4
policy = Policy("no release", **{l.name: 0 for l in model.levers})

from ema_workbench import (MultiprocessingEvaluator, ema_logging,
                           perform_experiments)

ema_logging.log_to_stderr(ema_logging.INFO)

if __name__ == '__main__':
    freeze_support()
    with MultiprocessingEvaluator(model, n_processes=7) as evaluator:
        results = evaluator.perform_experiments(scenarios=1000,
                                                policies=4,
                                                levers_sampling=MC)

        #####################################################################################################

        # process the results of the experiments
        experiments, outcomes = results
    print(experiments.shape)
    print(list(outcomes.keys()))
    stop = time.time()
    print(f"Runtime in minutes: { ((stop-start)/60) }")

    from ema_workbench.analysis import pairs_plotting
    def test_robust_evaluation(self):
        # %%

        import os
        test_dir = os.path.dirname(__file__)

        from ema_workbench import ema_logging, MultiprocessingEvaluator, SequentialEvaluator
        from emat.examples import road_test
        import numpy, pandas, functools
        from emat import Measure
        s, db, m = road_test()

        MAXIMIZE = Measure.MAXIMIZE
        MINIMIZE = Measure.MINIMIZE

        robustness_functions = [
            Measure(
                'Expected Net Benefit',
                kind=Measure.INFO,
                variable_name='net_benefits',
                function=numpy.mean,
            ),
            Measure(
                'Probability of Net Loss',
                kind=MINIMIZE,
                variable_name='net_benefits',
                function=lambda x: numpy.mean(x < 0),
                min=0,
                max=1,
            ),
            Measure(
                '95%ile Travel Time',
                kind=MINIMIZE,
                variable_name='build_travel_time',
                function=functools.partial(numpy.percentile, q=95),
                min=60,
                max=150,
            ),
            Measure(
                '99%ile Present Cost',
                kind=Measure.INFO,
                variable_name='present_cost_expansion',
                function=functools.partial(numpy.percentile, q=99),
            ),
            Measure(
                'Expected Present Cost',
                kind=Measure.INFO,
                variable_name='present_cost_expansion',
                function=numpy.mean,
            ),
        ]
        # %%

        numpy.random.seed(42)

        with MultiprocessingEvaluator(m) as evaluator:
            r1 = m.robust_evaluate(
                robustness_functions,
                scenarios=20,
                policies=5,
                evaluator=evaluator,
            )

        stable_df('./road_test_robust_evaluate.pkl.gz', r1)

        numpy.random.seed(7)

        from ema_workbench.em_framework.samplers import sample_uncertainties
        scenes = sample_uncertainties(m, 20)

        scenes0 = pandas.DataFrame(scenes)
        stable_df('./test_robust_evaluation_scenarios.pkl.gz', scenes0)

        from emat import Constraint

        constraint_1 = Constraint(
            "Maximum Log Expected Present Cost",
            outcome_names="Expected Present Cost",
            function=Constraint.must_be_less_than(4000),
        )

        constraint_2 = Constraint(
            "Minimum Capacity Expansion",
            parameter_names="expand_capacity",
            function=Constraint.must_be_greater_than(10),
        )

        constraint_3 = Constraint(
            "Maximum Paygo",
            parameter_names='debt_type',
            outcome_names='99%ile Present Cost',
            function=lambda i, j: max(0, j - 1500) if i == 'Paygo' else 0,
        )

        from emat.optimization import HyperVolume, EpsilonProgress, SolutionViewer, ConvergenceMetrics

        convergence_metrics = ConvergenceMetrics(
            HyperVolume.from_outcomes(robustness_functions),
            EpsilonProgress(),
            SolutionViewer.from_model_and_outcomes(m, robustness_functions),
        )

        numpy.random.seed(8)
        random.seed(8)

        # Test robust optimize
        with SequentialEvaluator(m) as evaluator:
            robust = m.robust_optimize(
                robustness_functions,
                scenarios=scenes,
                nfe=25,
                constraints=[
                    constraint_1,
                    constraint_2,
                    constraint_3,
                ],
                epsilons=[
                    0.05,
                ] * len(robustness_functions),
                convergence=convergence_metrics,
                evaluator=evaluator,
            )
        robust_results, convergence = robust.result, robust.convergence

        stable_df('test_robust_results.pkl.gz', robust_results)