예제 #1
0
def generate(name, steam_pipe, e_boiler, chorine_storage):

    d = {}

    if steam_pipe == True:
        d['Steam Pipe to Nouryon'] = 7.5
        d['Steam Pipe to Air Liquide and Huntsman'] = 30
    else:
        d['Steam Pipe to Nouryon'] = 0
        d['Steam Pipe to Air Liquide and Huntsman'] = 0
    if e_boiler == True:
        d['E-boiler 1'] = 5
        d['E-boiler 2'] = 17.5
        d['E-boiler 3'] = 0
        d['E-boiler 4'] = 22.5
        d['E-boiler 5'] = 6
    else:
        d['E-boiler 1'] = 0
        d['E-boiler 2'] = 0
        d['E-boiler 3'] = 1000
        d['E-boiler 4'] = 0
        d['E-boiler 5'] = 0
    if chorine_storage == True:
        d['Chlorine storage'] = 3200
    else:
        d['Chlorine storage'] = 1600

    return Policy(name, **d)
예제 #2
0
파일: scope.py 프로젝트: tlumip/tmip-emat
    def default_policy(self, **kwargs):
        """
        The default settings for policy levers.

        Args:
            **kwargs:
                Override the defaults given in the scope
                with these values.

        Returns:
            ema_workbench.Policy
        """
        from ema_workbench import Policy
        values = {l.name: l.default for l in self.get_levers()}
        values.update(kwargs)
        return Policy('default', **values)
def runReevaluate(model, params, nondominated, fileEnd):
    outputFile = 'reevaluationScenarios_' + fileEnd + '.csv'
    s = reevaluate.buildReevaluationScenarios(model=baseModel,
                                              params=params,
                                              baseScenario=baseModelParams,
                                              outputFile=outputFile)
    params.evaluationScenarios = s

    if params.createNewReevaluationResults:
        cases = nondominated[util.getLeverNames(model)].to_dict('records')
        policies = [Policy(str(i), **entry) for i, entry in enumerate(cases)]
    else:
        policies = []

    ouputFile = 'reevaluation_' + fileEnd + '.tar.gz'
    results = reevaluate.performReevaluation(model,
                                             params=params,
                                             policies=policies,
                                             outputFile=ouputFile)
    return results
예제 #4
0
def get_model_for_problem_formulation(problem_formulation_idea):
    disease_model = VensimModel("multidisease",
                                wd=r'.\models',
                                model_file='disease_model.vpm')

    disease_model.uncertainties = [
        # Sanitation Unknowns
        IntegerParameter('desire for improved sanitation', 10, 100),
        # Water Supply Unknowns
        IntegerParameter('Cost of well repair', 660, 1800),
        # Water Quality Unknowns
        IntegerParameter('use HWT', 10, 100),
        # Hygiene Unknowns
        IntegerParameter('Intensity of hygiene campaign', 10, 100),
        # Vaccination Unknowns
        IntegerParameter('Reliability of vaccine supply', 10, 100),
        # Treatment Unknowns
        IntegerParameter('seeking treatment', 10, 100),
        # Other Unknowns
        IntegerParameter('percent willing to accept MDA', 10, 100),
        # RealParameter('Childbearing years', 9, 14), #N.B. huge impact
    ]

    disease_model.constants = [
        Constant('Length latrine program', 10),
        Constant('Length GW supply program', 10),
        Constant('Length of water quality program', 10),
        Constant("Duration of hygiene campaign", 10),
        Constant("Length of ORT subsidy", 10),
        Constant("Years of MDA campaign", 10)
    ]

    disease_model.levers = [
        # Sanitation Levers
        IntegerParameter("Number of new latrines to build", 0, 9000),
        IntegerParameter("Number of latrines to maintain", 0, 4000),
        # Water Supply Levers
        IntegerParameter("Number of new wells to drill", 0, 2000),
        IntegerParameter("Number of wells to repair", 0, 2000),
        # Water Quality Levers
        IntegerParameter("Availability HWT", 0, 100),
        # Hygiene Promotion Levers
        IntegerParameter("HW stations to build", 0, 8000),
        # Vaccination Levers
        IntegerParameter("percentage of infants to vaccinate", 0, 100),
        # Treatment Levers
        IntegerParameter("Access to tmt", 0, 100),
        # MDA levers
        IntegerParameter("percent adults given MDA", 0, 100),
        IntegerParameter("percent youth given Albendazole", 0, 100),
    ]

    # add policies
    disease_model.policies = [
        Policy(
            'LatrineProgram', **{
                "Number of new latrines to build": 5000,
                "Number of latrines to maintain": 4000,
                "Length latrine program": 10,
                "Number of new wells to drill": 0,
                "Number of wells to repair": 0,
                "Length GW supply program": 0,
                "Availability HWT": 0,
                "Length of water quality program": 0,
                "HW stations to build": 0,
                "Duration of hygiene campaign": 0,
                "percentage of infants to vaccinate": 0,
                "Access to tmt": 0,
                "Length of ORT subsidy": 0,
                "percent adults given Albendazole": 0,
                "percent youth given Albendazole": 0,
                "Years of MDA campaign": 0,
            }),
        Policy(
            'GWsupply', **{
                "Number of new latrines to build": 0,
                "Number of latrines to maintain": 0,
                "Length latrine program": 0,
                "Number of new wells to drill": 1000,
                "Number of wells to repair": 100,
                "Length GW supply program": 10,
                "Availability HWT": 0,
                "Length of water quality program": 0,
                "HW stations to build": 0,
                "Duration of hygiene campaign": 0,
                "percentage of infants to vaccinate": 0,
                "Access to tmt": 0,
                "Length of ORT subsidy": 0,
                "percent adults given Albendazole": 0,
                "percent youth given Albendazole": 0,
                "Years of MDA campaign": 0,
            }),
        Policy(
            'ORT', **{
                "Number of new latrines to build": 0,
                "Number of latrines to maintain": 0,
                "Length latrine program": 0,
                "Number of new wells to drill": 0,
                "Number of wells to repair": 0,
                "Length GW supply program": 0,
                "Availability HWT": 0,
                "Length of water quality program": 0,
                "HW stations to build": 0,
                "Duration of hygiene campaign": 0,
                "percentage of infants to vaccinate": 0,
                "Access to tmt": 100,
                "Length of ORT subsidy": 10,
                "percent adults given Albendazole": 0,
                "percent youth given Albendazole": 0,
                "Years of MDA campaign": 0,
            }),
        Policy(
            'Hygiene', **{
                "Number of new latrines to build": 0,
                "Number of latrines to maintain": 0,
                "Length latrine program": 0,
                "Number of new wells to drill": 0,
                "Number of wells to repair": 0,
                "Length GW supply program": 0,
                "Availability HWT": 0,
                "Length of water quality program": 0,
                "HW stations to build": 1000,
                "Duration of hygiene campaign": 10,
                "percentage of infants to vaccinate": 0,
                "Access to tmt": 0,
                "Length of ORT subsidy": 0,
                "percent adults given Albendazole": 0,
                "percent youth given Albendazole": 0,
                "Years of MDA campaign": 0,
            }),
        Policy(
            'Vaccin', **{
                "Number of new latrines to build": 0,
                "Number of latrines to maintain": 0,
                "Length latrine program": 0,
                "Number of new wells to drill": 0,
                "Number of wells to repair": 0,
                "Length GW supply program": 0,
                "Availability HWT": 0,
                "Length of water quality program": 0,
                "HW stations to build": 0,
                "Duration of hygiene campaign": 0,
                "percentage of infants to vaccinate": 100,
                "Access to tmt": 0,
                "Length of ORT subsidy": 0,
                "percent adults given Albendazole": 0,
                "percent youth given Albendazole": 0,
                "Years of MDA campaign": 0,
            }),
        Policy(
            'DrinkingWater', **{
                "Number of new latrines to build": 0,
                "Number of latrines to maintain": 0,
                "Length latrine program": 0,
                "Number of new wells to drill": 0,
                "Number of wells to repair": 0,
                "Length GW supply program": 0,
                "Availability HWT": 100,
                "Length of water quality program": 10,
                "HW stations to build": 0,
                "Duration of hygiene campaign": 0,
                "percentage of infants to vaccinate": 0,
                "Access to tmt": 0,
                "Length of ORT subsidy": 0,
                "percent adults given Albendazole": 0,
                "percent youth given Albendazole": 0,
                "Years of MDA campaign": 0,
            }),
        Policy(
            'MDA', **{
                "Number of new latrines to build": 0,
                "Number of latrines to maintain": 0,
                "Length latrine program": 0,
                "Number of new wells to drill": 0,
                "Number of wells to repair": 0,
                "Length GW supply program": 0,
                "Availability HWT": 0,
                "Length of water quality program": 0,
                "HW stations to build": 0,
                "Duration of hygiene campaign": 0,
                "percentage of infants to vaccinate": 0,
                "Access to tmt": 0,
                "Length of ORT subsidy": 0,
                "percent adults given Albendazole": 100,
                "percent youth given Albendazole": 100,
                "Years of MDA campaign": 10,
            }),
        Policy(
            'DoNothing', **{
                "Number of new latrines to build": 0,
                "Number of latrines to maintain": 0,
                "Length latrine program": 0,
                "Number of new wells to drill": 0,
                "Number of wells to repair": 0,
                "Length GW supply program": 0,
                "Availability HWT": 0,
                "Length of water quality program": 0,
                "HW stations to build": 0,
                "Duration of hygiene campaign": 0,
                "percentage of infants to vaccinate": 0,
                "Access to tmt": 0,
                "Length of ORT subsidy": 0,
                "percent adults given Albendazole": 0,
                "percent youth given Albendazole": 0,
                "Years of MDA campaign": 0,
            }),
    ]
    # Problem formulations:
    direction = ScalarOutcome.MINIMIZE
    if problem_formulation_idea == 1:  ##PF1: Minimum child (<5 yo) deaths due to Rotavirus
        disease_model.name = 'Minimize Child <5 Rotavirus infections by 2030'
        disease_model.outcomes.clear()
        disease_model.outcomes = [
            ScalarOutcome(
                'Mortality',  #Deaths due to Rotavirus
                variable_name=[
                    'children under 5 deaths[Rota]', 'ts until 2020',
                    'ts at 2030'
                ],
                function=avg_over_period,
                kind=direction,
                expected_range=(10000, 250000)),
            ScalarOutcome(
                'Morbidity',  #Rota DALYs children
                variable_name=[
                    "Years Lost to Disability in Children[Rota]",
                    'ts until 2020', 'ts at 2030'
                ],
                function=avg_over_period,
                kind=direction,
                expected_range=(6000, 9000)),
            ScalarOutcome(
                'Timeliness',  #Delta child infections 2030
                variable_name=[
                    "Number of Children Infected[Rota]", 'ts until 2020',
                    'ts at 2030'
                ],
                function=change_over_period,
                kind=direction,
                expected_range=(-0.9, 0.1)),
            ScalarOutcome(
                'CapEx',
                variable_name=['Upfront Cost', 'ts until 2020', 'ts at 2040'],
                function=total_over_period,
                kind=direction,
                expected_range=(0, 3000000000000)),
            ScalarOutcome(
                'OpEx',  #Recurring Cost
                variable_name=[
                    'Recurring Cost', 'ts until 2020', 'ts at 2040'
                ],
                function=total_over_period,
                kind=direction,
                expected_range=(0, 2000000000000)),
        ]

    elif problem_formulation_idea == 2:  ##PF2: Minimum prevalence of ascariasis in Youth (Infants, PreSACs, and SACs) in 5 years
        disease_model.name = 'Minimize Ascariasis in Youth by 2025'
        disease_model.outcomes.clear()
        disease_model.outcomes = [
            ScalarOutcome('Mortality',
                          variable_name=[
                              'Youth Mortality[Ascar]', 'ts until 2020',
                              'ts at 2025'
                          ],
                          function=avg_over_period,
                          kind=direction,
                          expected_range=(1000, 20000)),
            ScalarOutcome('Morbidity',
                          variable_name=[
                              'Years Lost to Disability in Youth[Ascar]',
                              'ts until 2020', 'ts at 2025'
                          ],
                          function=avg_over_period,
                          kind=direction,
                          expected_range=(20000, 160000)),
            ScalarOutcome(
                'Timeliness',  #Change in prevalence of ascariasis in youth by 2025
                variable_name=[
                    'Number of Youth Infected[Ascar]', 'ts until 2020',
                    'ts at 2025'
                ],
                function=change_over_period,
                kind=direction,
                expected_range=(-1, 0)),
            ScalarOutcome(
                'CapEx',  #Upfront Cost
                variable_name=['Upfront Cost', 'ts until 2020', 'ts at 2040'],
                function=total_over_period,
                kind=direction,
                expected_range=(0, 3000000000000)),
            ScalarOutcome(
                'OpEx',  #Recurring Cost
                variable_name=[
                    'Recurring Cost', 'ts until 2020', 'ts at 2040'
                ],
                function=total_over_period,
                kind=direction,
                expected_range=(0, 2000000000000)),
        ]
    elif problem_formulation_idea == 3:  #PF3: Minimum Child (<5 yo) mortality, all diseases, w/in one year
        disease_model.name = 'Immediately minimize Child <5 burden from all causes'
        disease_model.outcomes.clear()
        disease_model.outcomes = [
            ScalarOutcome('Mortality',
                          variable_name=[
                              'Total children under 5 deaths', 'ts until 2020',
                              'ts at 2021'
                          ],
                          function=avg_over_period,
                          kind=direction,
                          expected_range=(50000, 400000)),
            ScalarOutcome('Morbidity',
                          variable_name=[
                              'morbidity in children', 'ts until 2020',
                              'ts at 2021'
                          ],
                          function=avg_over_period,
                          kind=direction,
                          expected_range=(40000, 100000)),
            ScalarOutcome(
                'Timeliness',  #Delta child infections 2021
                variable_name=[
                    'Total children w gastroenteric infection',
                    'ts until 2020', 'ts at 2021'
                ],
                function=change_over_period,
                kind=direction,
                expected_range=(-0.5, 0)),
            ScalarOutcome(
                'CapEx',  #Upfront Cost
                variable_name=['Upfront Cost', 'ts until 2020', 'ts at 2040'],
                function=total_over_period,
                kind=direction,
                expected_range=(5000000, 3000000000000)),
            ScalarOutcome(
                'OpEx',  #Recurring Cost
                variable_name=[
                    'Recurring Cost', 'ts until 2020', 'ts at 2040'
                ],  #bc no rec cost will show up in 1 yr
                function=total_over_period,
                kind=direction,
                expected_range=(50000000000, 2000000000000)),
        ]
    elif problem_formulation_idea == 4:  #PF4: Minimum number infected, all diseases, sustainably
        disease_model.name = 'Minimize number infected all diseases by 2040'
        disease_model.outcomes.clear()
        disease_model.outcomes = [
            ScalarOutcome('Mortality',
                          variable_name=[
                              'Total lives lost', 'ts until 2020', 'ts at 2040'
                          ],
                          function=avg_over_period,
                          kind=direction,
                          expected_range=(50000, 250000)),
            ScalarOutcome('Morbidity',
                          variable_name=[
                              'disability burden', 'ts until 2020',
                              'ts at 2040'
                          ],
                          function=avg_over_period,
                          kind=direction,
                          expected_range=(100000, 900000)),
            ScalarOutcome(
                'Timeliness',  #delta infections 2040
                variable_name=[
                    'Total number of gastroenteric infection', 'ts until 2020',
                    'ts at 2040'
                ],
                function=change_over_period,
                kind=direction,
                expected_range=(-1, -.45)),
            ScalarOutcome(
                'CapEx',  #Upfront Cost
                variable_name=['Upfront Cost', 'ts until 2020', 'ts at 2040'],
                function=total_over_period,
                kind=direction,
                expected_range=(20000000000, 3000000000000)),
            ScalarOutcome(
                'OpEx',  #Recurring Cost
                variable_name=[
                    'Recurring Cost', 'ts until 2020', 'ts at 2040'
                ],
                function=total_over_period,
                kind=direction,
                expected_range=(20000000000, 2000000000000)),
            ##recurring costs divided by 20 years
        ]

    else:
        raise TypeError('unknown problem identifier')
    return disease_model
예제 #5
0
# %%
from ema_workbench.em_framework.optimization import (HyperVolume,EpsilonProgress)

convergence_metrics = [HyperVolume(minimum=[0,0,0,0], maximum=[3, 2,1.01,1.01]),
                       EpsilonProgress()]

with MultiprocessingEvaluator(model) as evaluator:
    results, convergence = evaluator.optimize(nfe=1e3, searchover='levers',
                                 convergence=convergence_metrics,
                                 epsilons=[0.1,]*len(model.outcomes))

#%%
from dicemodel.specs import nordhaus_policy, reference_scenario
eps = [0.001, 0.1, 0.1, 0.1] * (int(len(dice_sm.outcomes)/4.0))
convergence_metrics = [EpsilonProgress()]
nord_optimal_policy = Policy('nord_optimal_policy', **nordhaus_policy(np.mean(dice_opt.iloc[129]), 0.015, 0, 0, 29))
nfe = 100000
# Swtich to the worst case
for outcome in dice_sm.outcomes:
    if outcome.kind == ScalarOutcome.MINIMIZE:
        outcome.kind = ScalarOutcome.MAXIMIZE
    else:
        outcome.kind = ScalarOutcome.MINIMIZE
#%%
start = time.time()
print("starting search for wcs", flush=True)
with MultiprocessingEvaluator(dice_sm, n_processes=16) as evaluator:
    results, convergence = evaluator.optimize(nfe=nfe,
                                                searchover='uncertainties',
                                                reference=nord_optimal_policy,
                                                epsilons=eps,
예제 #6
0
        pol1.update({key.name: heightening_policy[s2]})

#%%
###this defines policy 2 (only early warning system)
    warning_policy = {'DaysToThreat': 2}
    warning_policy.update(
        {'DikeIncrease {}'.format(n): 0
         for n in planning_steps})
    warning_policy.update({'RfR {}'.format(n): 1 for n in planning_steps})

    pol2 = {}
    for key in dike_model.levers:
        s1, s2 = key.name.split('_')
        pol2.update({key.name: warning_policy[s2]})

    policy0 = Policy('Nothing', **pol0)
    policy1 = Policy("only heightening", **pol1)
    policy2 = Policy("only room for river", **pol2)

    policies = [policy0, policy1, policy2]

    #%%
    # with MultiprocessingEvaluator(dike_model) as evaluator:
    #     results = evaluator.perform_experiments(scenarios=50,
    #                                                 policies=
    #                                                 [policy0,policy1,
    #                                                   policy2,policy3])

    with MultiprocessingEvaluator(dike_model) as evaluator:
        results = evaluator.perform_experiments(
            scenarios=2000,
예제 #7
0
    model = VensimModel("fluCase",
                        wd='./models/flu',
                        model_file='FLUvensimV1basecase.vpm')

    # outcomes
    model.outcomes = [
        TimeSeriesOutcome('deceased population region 1'),
        TimeSeriesOutcome('infected fraction R1'),
        ScalarOutcome('max infection fraction',
                      variable_name='infected fraction R1',
                      function=np.max),
        ScalarOutcome('time of max',
                      variable_name=['infected fraction R1', 'TIME'],
                      function=time_of_max)
    ]

    # create uncertainties based on csv
    model.uncertainties = create_parameters(
        './models/flu/flu_uncertainties.csv')

    # add policies
    policies = [
        Policy('no policy', model_file='FLUvensimV1basecase.vpm'),
        Policy('static policy', model_file='FLUvensimV1static.vpm'),
        Policy('adaptive policy', model_file='FLUvensimV1dynamic.vpm')
    ]

    with MultiprocessingEvaluator(model, n_processes=4) as evaluator:
        results = evaluator.perform_experiments(1000, policies=policies)

    save_results(results, './data/1000 flu cases with policies.tar.gz')
예제 #8
0
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
from ema_workbench import ema_logging
from ema_workbench import Model, RealParameter, ScalarOutcome, CategoricalParameter, IntegerParameter, BooleanParameter
from ema_workbench.em_framework.samplers import sample_uncertainties
# from ema_workbench.em_framework.evaluators import MC
from dike_model_function import DikeNetwork
from problem_formulation import get_model_for_problem_formulation
ema_logging.log_to_stderr(ema_logging.INFO)

dike_model, planning_steps = get_model_for_problem_formulation(5)

policies_0 = [Policy('no policy', **{l.name: 0 for l in dike_model.levers})]

n_scen = 1000
print(n_scen)
with MultiprocessingEvaluator(dike_model) as evalu:
    sa_results = evalu.perform_experiments(n_scen,
                                           policies=policies_0,
                                           uncertainty_sampling='sobol',
                                           reporting_interval=400)

from ema_workbench import save_results
save_results(sa_results, './data/exp/sobolnopol40000scen.tar.gz')

uncertainty = dike_model.uncertainties
levers = dike_model.levers
예제 #9
0
principles_list = [
    "utilitarian", "prioritarian", "egalitarian", "sufficitarian"
]

total_policy_list = []

for principle in principles_list:
    policies = all_policies[all_policies['principle'] == principle]
    policies = policies.dropna(axis='columns')
    policies = policies.iloc[:, :-1]
    policy_list_principle = []

    #get list of policies as input for uncertainty sampling
    for i in range(0, len(policies)):
        policy_dict = policies.iloc[i].to_dict()
        policy_list_principle.append(Policy(policies.index[i], **policy_dict))
    total_policy_list.append(policy_list_principle)

#principle index
principle_index = 2

nfe = 50000

#print(total_policy_list[0])
#print(total_policy_list[1])

if __name__ == "__main__":
    print("uncertainty analysis started for: " +
          principles_list[principle_index] + " case for " + str(nfe) +
          " scenario's")
예제 #10
0
                                RealParameter('delta', 0.93, 0.99)]
    
    # set levers, one for each time step
    lake_model.levers = [RealParameter(str(i), 0, 0.1) for i in 
                         range(lake_model.time_horizon)]
    
    #specify outcomes 
    lake_model.outcomes = [ScalarOutcome('max_P',),
                           ScalarOutcome('utility'),
                           ScalarOutcome('inertia'),
                           ScalarOutcome('reliability')]
    
    # override some of the defaults of the model
    lake_model.constants = [Constant('alpha', 0.41),
                            Constant('nsamples', 150)]
        
    
    # generate sa single default no release policy
    policy = Policy('no release', **{str(i):0.1 for i in range(100)})
    
    n_scenarios = 1000
     
    with MultiprocessingEvaluator(lake_model) as evaluator:
        results = evaluator.perform_experiments(n_scenarios, policy, 
                                                uncertainty_sampling=SOBOL)
        
    sobol_stats, s2, s2_conf = analyze(results, 'max_P')
    print(sobol_stats)
    print(s2)
    print(s2_conf)
예제 #11
0
def optimize_lake_problem(use_original_R_metrics=False, demo=True):
    """Analysis of the Lake Problem.

    (1) Runs a multi-objective robust optimisation of the Lake Problem
        using both standard and custom robustness metrics;
    (2) analyses the effects of different sets of scenarios on the
        robustness values and robustness rankings;
    (3) plots these effects;
    (4) analyses the effects of different robustness metrics on the
        robustness values and robustness rankings; and
    (5) plots these effects.
    """
    filepath = './robust_results.h5'

    robustness_functions = (get_original_R_metrics() if use_original_R_metrics
                            else get_custom_R_metrics_for_workbench())

    lake_model = get_lake_model()

    if not os.path.exists(filepath):
        n_scenarios = 10 if demo else 200  # for demo purposes only, should in practice be higher
        scenarios = sample_uncertainties(lake_model, n_scenarios)
        nfe = 1000 if demo else 50000  # number of function evaluations

        # Needed on Linux-based machines
        multiprocessing.set_start_method('spawn', True)

        # Run optimisation
        with MultiprocessingEvaluator(lake_model) as evaluator:
            robust_results = evaluator.robust_optimize(
                robustness_functions,
                scenarios,
                nfe=nfe,
                population_size=(10 if demo else 50),
                epsilons=[
                    0.1,
                ] * len(robustness_functions))
        print(robust_results)

    robust_results = pd.read_hdf(filepath, key='df')

    # Results are performance in each timestep, followed by robustness
    # we only care about the robustness, so we get that
    col_names = robust_results.columns.values.tolist()
    col_names = col_names[-len(robustness_functions):]

    # Plot the robustness results
    sns.pairplot(robust_results, vars=col_names, diag_kind='kde')
    # plt.show()

    # Extract the decision alternatives from the results
    # We need to extract the decision alternatives
    decision_alternatives = robust_results.iloc[:, :-4].values
    decision_alternatives = [
        Policy(
            idx, **{
                str(idx): value
                for idx, value in enumerate(
                    decision_alternatives[idx].tolist())
            }) for idx in range(decision_alternatives.shape[0])
    ]

    # Find the influence of scenarios. Here we are creating 5
    # sets of 100 scenarios each, all using the same sampling
    # method.
    scenarios_per_set = 100
    n_sets = 5
    n_scenarios = scenarios_per_set * n_sets
    scenarios = sample_uncertainties(lake_model, n_scenarios)

    # Simulate optimal solutions across all scenarios
    with MultiprocessingEvaluator(lake_model) as evaluator:
        results = evaluator.perform_experiments(scenarios=scenarios,
                                                policies=decision_alternatives)
    # We will just look at the vulnerability ('max_P') for this example
    f = np.reshape(results[1]['max_P'], newshape=(-1, n_scenarios))
    # Split the results into the different sets of scenarios
    split_f = np.split(f, n_sets, axis=1)
    # Calculate robustness for each set of scenarios
    # Note that each split_f[set_idx] is a 2D array, with each row being
    # a decision alternative, and each column a scenario
    R_metric = get_custom_R_metrics()[0]
    R = [R_metric(split_f[set_idx]) for set_idx in range(n_sets)]
    R = np.transpose(R)

    # Calculate similarity in robustness from different scenario sets
    delta, tau = analysis.scenarios_similarity(R)
    # Plot the deltas using a helper function
    analysis.delta_plot(delta)
    # Plot the Kendall's tau-b values using a helper function
    analysis.tau_plot(tau)

    # We now want to test the effects of different robustness metrics,
    # across all of the 100 scenarios. We first define a few new
    # robustness metrics (in addition to our original R metric for
    # the vulnerability). For this example we use some classic metrics
    R_metrics = [
        R_metric,  # The original robustness metric
        functools.partial(metrics.maximax, maximise=False),
        functools.partial(metrics.laplace, maximise=False),
        functools.partial(metrics.minimax_regret, maximise=False),
        functools.partial(metrics.percentile_kurtosis, maximise=False)
    ]

    # Calculate robustness for each robustness metric
    R = np.transpose([R_metric(f) for R_metric in R_metrics])

    # Calculate similarity in robustness from different robustness metrics
    tau = analysis.R_metric_similarity(R)
    # Plot the Kendall's tau-b values using a helper function
    analysis.tau_plot(tau)
    ref_scenario = Scenario('reference', **scen1)

    # no dike increase, no warning, none of the rfr
    zero_policy = {'DaysToThreat': 0}
    zero_policy.update(
        {'DikeIncrease {}'.format(n): 0
         for n in planning_steps})
    zero_policy.update({'RfR {}'.format(n): 0 for n in planning_steps})
    pol0 = {}

    for key in dike_model.levers:
        s1, s2 = key.name.split('_')
        pol0.update({key.name: zero_policy[s2]})

    policy0 = Policy('Policy 0', **pol0)

    #%%

    # Call random scenarios or policies:
    #    n_scenarios = 5
    #    scenarios = sample_uncertainties(dike_model, 50)
    #    n_policies = 10

    # single run
    # start = time.time()
    # dike_model.run_model(ref_scenario, policy0)
    # end = time.time()
    # print(end - start)
    # results = dike_model.outcomes_output
예제 #13
0
#this defines policy 3 (a maximized approach)
    mixed_policy = {'DaysToThreat': 2}
    mixed_policy.update(
        {'DikeIncrease {}'.format(n): 5
         for n in planning_steps})
    mixed_policy.update(
        {'RfR {}'.format(n): 1 if n == 0 else 0
         for n in planning_steps})

    pol3 = {}
    for key in dike_model.levers:
        s1, s2 = key.name.split('_')
        pol3.update({key.name: mixed_policy[s2]})

#%%
    policy0 = Policy('Doing Nothing', **pol0)
    policy1 = Policy("Only Heightening", **pol1)
    policy2 = Policy("Only Room for River", **pol2)
    policy3 = Policy('Doing Both', **pol3)
    policies = [policy0, policy1, policy2, policy3]
    n_policies = len(policies)
    #%%
    n_scenarios = 2000
    with MultiprocessingEvaluator(dike_model) as evaluator:
        results = evaluator.perform_experiments(scenarios=n_scenarios,
                                                policies=policies)
    experiments, outcomes = results

    #%%
    outcomes['Total Investment Costs'] = (outcomes['Dike Investment Costs'] +
                                          outcomes['RfR Investment Costs'])
예제 #14
0
# end = time.time()
# print("Time taken: {:0.5f} minutes".format((end - start)/60))

# with open('Outcomes/initial_Pareto_policies.pkl', 'wb') as file_pi:
#     pickle.dump(results, file_pi)

with open('Outcomes/initial_Pareto_policies.pkl', 'rb') as file_pi:
    results = pickle.load(file_pi)

# Now that we can some policies somewhere on a Pareto front, we can run them under more scenarios and see the variance of their values across those scenarios.
policies = []
for row in range(results.shape[0]):
    policies.append(
        # Do not include the damage scores
        Policy(name=row, **results.iloc[row, :-5].to_dict()))

# with MultiprocessingEvaluator(dike_model) as evaluator:
#     results = evaluator.perform_experiments(scenarios=50,policies=policies)

# with open('Outcomes/epsilon_results.pkl', 'wb') as file_pi:
#     pickle.dump(results, file_pi)

with open('Outcomes/epsilon_results.pkl', 'rb') as file_pi:
    results = pickle.load(file_pi)

experiments, outcomes = results

outcomes = pd.DataFrame(outcomes)
experiments = pd.DataFrame(experiments)
results = experiments.join(outcomes)
예제 #15
0
    def run_experiments(
        self,
        design: pd.DataFrame = None,
        evaluator=None,
        *,
        design_name=None,
        db=None,
    ):
        """
        Runs a design of combined experiments using this model.

        A combined experiment includes a complete set of input values for
        all exogenous uncertainties (a Scenario) and all policy levers
        (a Policy). Unlike the perform_experiments function in the EMA Workbench,
        this method pairs each Scenario and Policy in sequence, instead
        of running all possible combinations of Scenario and Policy.
        This change ensures compatibility with the EMAT database modules, which
        preserve the complete set of input information (both uncertainties
        and levers) for each experiment.  To conduct a full cross-factorial set
        of experiments similar to the default settings for EMA Workbench,
        use a factorial design, by setting the `jointly` argument for the
        `design_experiments` to False, or by designing experiments outside
        of EMAT with your own approach.

        Args:
            design (pandas.DataFrame, optional): experiment definitions
                given as a DataFrame, where each exogenous uncertainties and
                policy levers is given as a column, and each row is an experiment.
            evaluator (ema_workbench.Evaluator, optional): Optionally give an
                evaluator instance.  If not given, a default SequentialEvaluator
                will be instantiated.
            design_name (str, optional): The name of a design of experiments to
                load from the database.  This design is only used if
                `design` is None.
            db (Database, optional): The database to use for loading and saving experiments.
                If none is given, the default database for this model is used.
                If there is no default db, and none is given here,
                the results are not stored in a database. Set to False to explicitly
                not use the default database, even if it exists.

        Returns:
            pandas.DataFrame:
                A DataFrame that contains all uncertainties, levers, and measures
                for the experiments.

        Raises:
            ValueError:
                If there are no experiments defined.  This includes
                the situation where `design` is given but no database is
                available.

        """

        from ema_workbench import Scenario, Policy, perform_experiments

        # catch user gives only a design, not experiment_parameters
        if isinstance(design, str) and design_name is None:
            design_name, design = design, None

        if design_name is None and design is None:
            raise ValueError(f"must give design_name or design")

        if db is None:
            db = self.db

        if design_name is not None and design is None:
            if not db:
                raise ValueError(
                    f'cannot load design "{design_name}", there is no db')
            design = db.read_experiment_parameters(self.scope.name,
                                                   design_name)

        if design.empty:
            raise ValueError(f"no experiments available")

        scenarios = [
            Scenario(**dict(zip(self.scope.get_uncertainty_names(), i)))
            for i in design[self.scope.get_uncertainty_names()].itertuples(
                index=False, name='ExperimentX')
        ]

        policies = [
            Policy(f"Incognito{n}", **dict(zip(self.scope.get_lever_names(),
                                               i)))
            for n, i in enumerate(design[self.scope.get_lever_names()].
                                  itertuples(index=False, name='ExperimentL'))
        ]

        if not evaluator:
            from ema_workbench import SequentialEvaluator
            evaluator = SequentialEvaluator(self)

        experiments, outcomes = perform_experiments(
            self,
            scenarios=scenarios,
            policies=policies,
            zip_over={'scenarios', 'policies'},
            evaluator=evaluator)
        experiments.index = design.index

        outcomes = pd.DataFrame.from_dict(outcomes)
        outcomes.index = design.index

        if db:
            db.write_experiment_measures(self.scope.name, self.metamodel_id,
                                         outcomes)

        return self.ensure_dtypes(
            pd.concat([
                experiments.drop(columns=['scenario', 'policy', 'model']),
                outcomes
            ],
                      axis=1,
                      sort=False))
예제 #16
0
                          ScalarOutcome(name = 'Total CO2 emissions (ton/week)',
                                       variable_name = 'CO2 emission',
                                       function = np.sum),
                          ScalarOutcome(name = 'Total green steam use by Air Liquide and Huntsman (ton/week)',
                                       variable_name= 'Use SP-A',
                                       function = np.sum),
                          ScalarOutcome(name = 'Total green steam use by Nouryon (ton/week)',
                                       variable_name = 'Use SP-B',
                                       function = np.sum),
                          ArrayOutcome(name = 'Chlorine storage stock at Nouryon (ton)',
                                       variable_name = 'Chlorine storage'),
                          ArrayOutcome(name = 'Run-time (s)',
                                       variable_name = 'Run-time')]
    
    # define the full factorial set of policies with names
    policies = [Policy('None of the options', **{'Steam Pipe':False, 'E-boiler':False, 'Chlorine Storage':False}),
                Policy('Only Steam Pipe', **{'Steam Pipe':True, 'E-boiler':False, 'Chlorine Storage':False}),
                Policy('Only E-boiler', **{'Steam Pipe':False, 'E-boiler':True, 'Chlorine Storage':False}),
                Policy('Only Chlorine storage', **{'Steam Pipe':False, 'E-boiler':False, 'Chlorine Storage':True}),
                Policy('Steam Pipe & E-boiler', **{'Steam Pipe':True, 'E-boiler':True, 'Chlorine Storage':False}),
                Policy('Steam Pipe & Chlorine storage', **{'Steam Pipe':True, 'E-boiler':False, 'Chlorine Storage':True}),
                Policy('E-boiler & Chlorine storage', **{'Steam Pipe':False, 'E-boiler':True, 'Chlorine Storage':True}),
                Policy('All options', **{'Steam Pipe':True, 'E-boiler':True, 'Chlorine Storage':True})]
              
    # define the number of scenarios to be sampled
    scenarios = 100

    # run the models
    with MultiprocessingEvaluator(model_list, n_processes = 56) as evaluator:
         results = evaluator.perform_experiments(policies = policies, scenarios = scenarios)
    
예제 #17
0
                  kind=ScalarOutcome.MINIMIZE,
                  expected_range=(3e8, 2e9)),
    ScalarOutcome('Evacuation',
                  variable_name=evacuation_column,
                  function=total_robustness,
                  kind=ScalarOutcome.MINIMIZE,
                  expected_range=(0, 5e7)),
]

results, convergence = pickle.load(open('results/MORO_50_nfe10000.pkl', 'rb'))

results.iloc[:, 0:-5].T.to_dict()

policy = []

for i in range(len(results)):
    pol = Policy(name="Policy " + str(i), **results.iloc[i, :-5].to_dict())
    policy.append(pol)

tic = time.time()

with MultiprocessingEvaluator(dike_model) as evaluator:
    results = evaluator.perform_experiments(scenarios=200, policies=policy)

toc = time.time()
print('Total run time:{} min'.format((toc - tic) / 60))

# Write the results so this step can be skipped when doing multiple analyzes
with open('results/MORO_evaluate.pkl', 'wb') as file_pi:
    pickle.dump(results, file_pi)
예제 #18
0
            scen1.update({key.name: reference_values[key.name]})

        else:
            scen1.update({key.name: reference_values[name_split[1]]})

    ref_scenario = Scenario('reference', **scen1)

    # no dike increase, no warning, none of the rfr
    zero_policy = {'DikeIncrease': 0, 'DaysToThreat': 0, 'RfR': 0}
    pol0 = {}

    for key in dike_model.levers:
        s1, s2 = key.name.split('_')
        pol0.update({key.name: zero_policy[s2]})

    policy0 = Policy('Policy 0', **pol0)

    # Call random scenarios or policies:
    #    n_scenarios = 5
    #    scenarios = sample_uncertainties(dike_model, 50)
    #    n_policies = 10

    # single run
    start = time.time()
    dike_model.run_model(ref_scenario, policy0)
    end = time.time()
    print(end - start)
    results = dike_model.outcomes_output

    # series run
#    experiments, outcomes = perform_experiments(dike_model, ref_scenario, 5)
예제 #19
0
    policy_4.update({'RfR {}'.format(n): 0 for n in planning_steps})
    pol4 = {}
    for key in dike_model.levers:
        s1, s2 = key.name.split('_')
        pol4.update({key.name: policy_4[s2]})    
        
    #this defines policy 5 ###################################
    policy_5 = {'DaysToThreat': 2}
    policy_5.update({'DikeIncrease {}'.format(n): 4 for n in planning_steps})
    policy_5.update({'RfR {}'.format(n): 0 for n in planning_steps})
    pol5 = {}
    for key in dike_model.levers:
        s1, s2 = key.name.split('_')
        pol5.update({key.name: policy_5[s2]})    

    policy0 = Policy("0 dm", **pol0)
    policy1 = Policy("1 dm", **pol1)
    policy2 = Policy("2 dm", **pol2)  
    policy3 = Policy('3 dm', **pol3)
    policy4 = Policy('4 dm', **pol4)
    policy5 = Policy('5 dm', **pol5)
    
    policies = [policy0, policy1, policy2, policy3, policy4, policy5]
    

#%%
    # with MultiprocessingEvaluator(dike_model) as evaluator:
    #     results = evaluator.perform_experiments(scenarios=50, policies=[policy0,policy1,policy2,policy3],
    #                                             uncertainty_sampling=LHS)
    n_scenarios = 500
    n_policies = len(policies)
# set levers
#model.levers = [RealParameter('decisions', 0, 0.1)]
model.levers = [
    RealParameter(f"l{i}", 0, 0.1) for i in range(model.time_horizon)
]

# model constants
model.constants = [Constant('alpha', 0.4), Constant('nsamples', 100)]

#####################################################################################################
from ema_workbench import Policy
# performing experiments
# generate experiments
n_scenarios = 1000
n_policies = 4
policy = Policy("no release", **{l.name: 0 for l in model.levers})

from ema_workbench import (MultiprocessingEvaluator, ema_logging,
                           perform_experiments)

ema_logging.log_to_stderr(ema_logging.INFO)

if __name__ == '__main__':
    freeze_support()
    with MultiprocessingEvaluator(model, n_processes=7) as evaluator:
        results = evaluator.perform_experiments(scenarios=1000,
                                                policies=4,
                                                levers_sampling=MC)

        #####################################################################################################
# override some of the defaults of the model
lake_model.constants = [Constant('alpha', 0.41),
                        Constant('nsamples', 100),
                        Constant('timehorizon', lake_model.time_horizon),
                       ]

import os

from ema_workbench import (perform_experiments, ema_logging, save_results, 
                           load_results, Policy)
from ema_workbench.em_framework import samplers

# turn on logging
ema_logging.log_to_stderr(ema_logging.INFO)

# perform experiments
nr_experiments = 2
nr_policies = 1
fn = './data/{}_experiments_openloop_noApollution.tar.gz'.format(nr_experiments)
policy = Policy({'c1':0,
                'c2':0,
                'r1':1,
                'r2':0,
                'w1':0})
results = perform_experiments(lake_model, 10, policies=[policy])
#try:
    # why regenerate the data?
#    results = load_results(fn)
#except IOError:
#    results = perform_experiments(lake_model, scenarios=nr_experiments)
save_results(results, fn)