Ejemplo n.º 1
0
def performReevaluation(model, params, policies, outputFile):
    if (params.createNewReevaluationResults):
        with MultiprocessingEvaluator(model) as evaluator:
            scenarios = params.evaluationScenarios
            results = evaluator.perform_experiments(scenarios=scenarios,
                                                    policies=policies)
        if not os.path.exists(params.reevaluateOutputFolder):
            os.makedirs(params.reevaluateOutputFolder)
        save_results(results, params.reevaluateOutputFolder + outputFile)
    else:
        print('Loading reevaluation from ' + params.reevaluateOutputFolder +
              outputFile)
        results = load_results(params.reevaluateOutputFolder + outputFile)

    return results
Ejemplo n.º 2
0
        RealParameter("dev_rate_storage_elec", 0.025,
                      0.075),  #0.05 is estimated base value
        #RealParameter("dev_rate_storage_heat",0.0125,0.0375),#0.016 is estimated base value
        RealParameter("dev_rate_conv_P2G", 0.0395,
                      0.1185),  #0.079 is estimated base value
        #RealParameter("dev_rate_conv_HP",0.0125,0.0375),#0.01 is estimated base value
        RealParameter("discount_rate", 0.01, 0.15)
    ]  #'base value' is 0.04.

    # specify outcomes
    model.outcomes = [
        ArrayOutcome('value_vector'),
        ArrayOutcome('line_value'),
        ArrayOutcome('cost_vector'),
        ArrayOutcome('line_cost_vector'),
        ScalarOutcome('stopping_condition')
    ]

    ##############################################################################
    # Use small number of experiments to quickly test the model and EMA workbench.
    #experiments, outcomes = perform_experiments(model, 2)

    #Open exploration
    n_experiments = 800

    with MultiprocessingEvaluator(model) as evaluator:
        results = evaluator.perform_experiments(n_experiments)

    save_results(results,
                 f'./results/{n_experiments }experiments_300s_7%.tar.gz')
        'SD_National Energy System Distribution[LT Heating Grid]',
        'SD_National Energy System Distribution[MT Heating Grid]',
        'SD_National Energy System Distribution[HT Heating Grid]',
        'SD_National Energy System Distribution[Air Heat Pump]',
        'SD_National Energy System Distribution[Ground Heat Pump]',
        'SD_Cumulative CO2 emmissions', 'SD_Percentage Renewable Electricity',
        'SD_CO2 Tax', 'SD_Gas Trade[Natural Gas]', 'SD_Gas Trade[Green Gas]',
        'SD_Electricity Trade', 'System Distributions',
        'Total Electricity use', 'Municipality Data'
    ]

    results_mean = {
        k: results[1][k].mean(axis=1)
        for k in no_categorical_results
    }

    save_results((results[0], results_mean),
                 filepath_results + '/results_50scen_30_pol.gz.tar')

    #make batches of 100 scenarios/policies to save neighbourhood data
    n = math.ceil(len(results[1]['Neighbourhood Data']) / 100)

    for i in range(n):
        a_file = open(
            filepath_results + "/Neighbourhood_results_#" + str(i) + ".pkl",
            "wb")
        pickle.dump((results[0],
                     results[1]['Neighbourhood Data'][i * 100:i * 100 + 100]),
                    a_file)
        a_file.close()
Ejemplo n.º 4
0
                                       variable_name= 'Use SP-A',
                                       function = np.sum),
                          ScalarOutcome(name = 'Total green steam use by Nouryon (ton/week)',
                                       variable_name = 'Use SP-B',
                                       function = np.sum),
                          ArrayOutcome(name = 'Chlorine storage stock at Nouryon (ton)',
                                       variable_name = 'Chlorine storage'),
                          ArrayOutcome(name = 'Run-time (s)',
                                       variable_name = 'Run-time')]
    
    # define the full factorial set of policies with names
    policies = [Policy('None of the options', **{'Steam Pipe':False, 'E-boiler':False, 'Chlorine Storage':False}),
                Policy('Only Steam Pipe', **{'Steam Pipe':True, 'E-boiler':False, 'Chlorine Storage':False}),
                Policy('Only E-boiler', **{'Steam Pipe':False, 'E-boiler':True, 'Chlorine Storage':False}),
                Policy('Only Chlorine storage', **{'Steam Pipe':False, 'E-boiler':False, 'Chlorine Storage':True}),
                Policy('Steam Pipe & E-boiler', **{'Steam Pipe':True, 'E-boiler':True, 'Chlorine Storage':False}),
                Policy('Steam Pipe & Chlorine storage', **{'Steam Pipe':True, 'E-boiler':False, 'Chlorine Storage':True}),
                Policy('E-boiler & Chlorine storage', **{'Steam Pipe':False, 'E-boiler':True, 'Chlorine Storage':True}),
                Policy('All options', **{'Steam Pipe':True, 'E-boiler':True, 'Chlorine Storage':True})]
              
    # define the number of scenarios to be sampled
    scenarios = 100

    # run the models
    with MultiprocessingEvaluator(model_list, n_processes = 56) as evaluator:
         results = evaluator.perform_experiments(policies = policies, scenarios = scenarios)
    
    # save the results
    save_results(results, f'./results/results_open_exploration_{scenarios}_scenarios_improved_model.tar.gz')

Ejemplo n.º 5
0
def main():

    hybridmodel = Model('hybridmodel', function=hybridloop)

    hybridmodel.uncertainties = [
        IntegerParameter("inputpowerfactor", 15, 25),  #7 13
        IntegerParameter("inputLNGprice", 200, 1000),
        IntegerParameter("inputtransferprice", 50, 300),
        IntegerParameter("inputCapincrease", 1000, 3000),
        IntegerParameter("inputCapincreasetime", 1, 2),
        IntegerParameter("inputLNGCapincrease", 1000, 3000),
        IntegerParameter("inputLNGCapincreasetime", 1, 2),
        #                             RealParameter("DemandBalanceSupplyEnergyPrice", 0.4, 0.7),
        RealParameter("MaximumChangeinDemand", 0.4, 0.7),
        RealParameter("SupplyElasticityGas", 0.06, 0.07),
        RealParameter("SupplyElasticityOil", 0.1, 0.2),
        RealParameter("SupplyElasticityCoal", 0.1, 0.2),
        RealParameter("SupplyElasticityNuclear", 0.007, 0.017),
        RealParameter("SupplyElasticityBiofuel", 0.1, 0.2),
        RealParameter("SupplyElasticityOR", 0.15, 0.3),
        IntegerParameter("EconomicGrowthScenario", 1, 3),
        IntegerParameter("EnergyIntensityScenario", 1, 3),
        RealParameter("CO2coal", 93.46, 113.67),
        RealParameter("CO2oil", 59.58, 102.12),
        RealParameter("Variancepower", -5.0, -0.1),
        IntegerParameter("POil", 8900, 9100),
        IntegerParameter("PCoal", 2800, 3100),
        IntegerParameter("PBio", 29000, 32000),
        IntegerParameter("PNuc", 16000, 17000),
        IntegerParameter("POR", 19000, 22000),
        IntegerParameter("PGasE", 6500, 7000),
        IntegerParameter("PGasNA", 2500, 2700),
        IntegerParameter("PGasSCA", 2500, 2700),
        IntegerParameter("PGasCIS", 6500, 7000),
        IntegerParameter("PGasME", 7000, 8000),
        IntegerParameter("PGasAF", 7000, 8000),
        IntegerParameter("PGasAP", 7000, 8000)
    ]

    hybridmodel.outcomes = [
        TimeSeriesOutcome("EU_GasSup"),
        TimeSeriesOutcome("EU_GasDem"),
        TimeSeriesOutcome("EU_GasCon"),
        TimeSeriesOutcome("EU_OilSup"),
        TimeSeriesOutcome("EU_OilDem"),
        TimeSeriesOutcome("EU_OilCon"),
        TimeSeriesOutcome("EU_CoalSup"),
        TimeSeriesOutcome("EU_CoalDem"),
        TimeSeriesOutcome("EU_CoalCon"),
        TimeSeriesOutcome("EU_NucSup"),
        TimeSeriesOutcome("EU_NucDem"),
        #                         TimeSeriesOutcome("EU_NucCon"),
        TimeSeriesOutcome("EU_BioSup"),
        TimeSeriesOutcome("EU_BioDem"),
        TimeSeriesOutcome("EU_BioCon"),
        TimeSeriesOutcome("EU_ORSup"),
        TimeSeriesOutcome("EU_ORDem"),
        #                         TimeSeriesOutcome("EU_ORCon"),
        TimeSeriesOutcome("EU_EDem"),
        TimeSeriesOutcome("EU_ESup"),
        TimeSeriesOutcome("EU_GDP"),
        TimeSeriesOutcome("EU_CO2"),
        TimeSeriesOutcome("EU_RusGas"),
        TimeSeriesOutcome("EU_EUGI"),
        TimeSeriesOutcome("EU_GIC"),
        TimeSeriesOutcome("EU_RGperAG"),
        TimeSeriesOutcome("EU_RGperTES"),
        TimeSeriesOutcome("EU_RGperGC"),
        TimeSeriesOutcome("EU_GICperBBTU"),
        TimeSeriesOutcome("Oil_Price"),
        TimeSeriesOutcome("Coal_Price"),
        TimeSeriesOutcome("Bio_Price"),
        TimeSeriesOutcome("Gas_PriceE"),
        TimeSeriesOutcome("Nuc_PriceE"),
        TimeSeriesOutcome("OR_PriceE"),
        TimeSeriesOutcome("FuncpriceGas"),
        TimeSeriesOutcome("FuncpriceOil"),
        TimeSeriesOutcome("FuncpriceCoal")
    ]

    hybridmodel.levers = [
        IntegerParameter("EnergyUnion", 0, 1),
        IntegerParameter("CO2Cost", 0, 1)
    ]

    # In[ ]:

    ema_logging.log_to_stderr(ema_logging.INFO)
    with MultiprocessingEvaluator(hybridmodel) as evaluator:
        results = evaluator.perform_experiments(scenarios=1,
                                                policies=4,
                                                levers_sampling='ff')

# In[1]:

    save_results(results, r'./1000 runs V30.tar.gz')
Ejemplo n.º 6
0
policies = policies.drop([o.name for o in model.outcomes], axis=1)
policies = policies.drop(["sum rfr", "sum deaths"], axis=1)
policies

# In[9]:

#From the identified levers create a Policy dictionary to be fed to the Multiprocess evaluator
policies_to_evaluate = []

for i, policy in policies.iterrows():
    policies_to_evaluate.append(Policy(str(i), **policy.to_dict()))

policies_to_evaluate

# In[10]:

#Run scenarios ansd policies random
with MultiprocessingEvaluator(model) as evaluator:
    results = evaluator.perform_experiments(1000, policies_to_evaluate)

# In[15]:

#Save results
from ema_workbench import save_results

save_results(results, 'identified solutions MORDM 1000scenarios.tar.gz')

# In[ ]:

# In[ ]:
    model = NetLogoModel('predprey', 
                          wd="./models/predatorPreyNetlogo", 
                          model_file="Wolf Sheep Predation.nlogo")
    model.run_length = 100
    model.replications = 1
    
    model.uncertainties = [RealParameter("grass-regrowth-time", 1, 99),
                           RealParameter("initial-number-sheep", 1, 200),
                           RealParameter("initial-number-wolves", 1, 200),
                           RealParameter("sheep-reproduce", 1, 20),
                           RealParameter("wolf-reproduce", 1, 20),
                     ]
    
    model.outcomes = [TimeSeriesOutcome('sheep'),
                      TimeSeriesOutcome('wolves'),
                      TimeSeriesOutcome('grass'),
                      TimeSeriesOutcome('TIME') ]
     
    #perform experiments
    n = 10
    
    with MultiprocessingEvaluator(model) as evaluator:
        results = perform_experiments(model, n, evaluator=evaluator)
        
   
    fn = r'./data/{} runs.tar.gz'.format(n)
    save_results(results, fn)
    
    

    print "finish"
Ejemplo n.º 8
0
#specify outcomes
model.outcomes = [ScalarOutcome('p_10'),
                  ScalarOutcome('r_10'),
                  ScalarOutcome('a_10'),
                  ScalarOutcome('p_30'),
                  ScalarOutcome('r_30'),
                  ScalarOutcome('a_30'),
                  ScalarOutcome('p_50'),
                  ScalarOutcome('r_50'),
                  ScalarOutcome('a_50'),
                  ScalarOutcome('p_70'),
                  ScalarOutcome('r_70'),
                  ScalarOutcome('a_70'),
                  ScalarOutcome('p_100'),
                  ScalarOutcome('r_100'),
                  ScalarOutcome('a_100'),
                  ScalarOutcome('p_avg'),
                  ScalarOutcome('r_avg'),
                  ScalarOutcome('a_avg')]


from ema_workbench import save_results
results = perform_experiments(model, 1000)
save_results(results, '1000_scenarios_%s_new_model_diverse.tar.gz'%domain)

sa_results = perform_experiments(model, 1050, uncertainty_sampling='sobol')
save_results(sa_results, '1050_scenarios_%s_sobol_new_model_diverse.tar.gz'%domain)


        Constant('nsamples', 100),
        Constant('timehorizon', lake_model.time_horizon)
    ]

    scenarios = ['Ref', 77, 96, 130, 181]
    random_scenarios = [81, 289, 391, 257]
    policies = []

    for s in random_scenarios:
        #         if s == 'Ref':
        #             solutions = pd.DataFrame.from_csv(r'../results/Results_EpsNsgaII_nfe10000_scRef_v3.csv')
        #         else:
        #             solutions = pd.DataFrame.from_csv(r'../results/Results_EpsNsgaII_nfe10000_sc{}_v5.csv'.format(s))

        #checked if there are duplicates: No.
        solutions = pd.DataFrame.from_csv(
            r'../data/brushed_random_nfe10000_sc{}.csv'.format(s))
        for index, row in solutions.iterrows():
            name = str(s) + '_' + str(index)
            decision = {
                lever.name: row[lever.name]
                for lever in lake_model.levers
            }  #levers are in the first columns of the solutions
            policies.append(Policy(name=name, **decision))
    #with MultiprocessingEvaluator(lake_model) as evaluator:
    #    results = evaluator.perform_experiments(scenarios=1000, policies=policies)
    results = perform_experiments(lake_model, 1000, policies, parallel=True)
    save_results(
        results,
        r'../CandidateTesting_1000scenarios_revisionRandom_nfe10000.tar.gz')
Ejemplo n.º 10
0
                        wd='./models/flu',
                        model_file='FLUvensimV1basecase.vpm')

    # outcomes
    model.outcomes = [
        TimeSeriesOutcome('deceased population region 1'),
        TimeSeriesOutcome('infected fraction R1'),
        ScalarOutcome('max infection fraction',
                      variable_name='infected fraction R1',
                      function=np.max),
        ScalarOutcome('time of max',
                      variable_name=['infected fraction R1', 'TIME'],
                      function=time_of_max)
    ]

    # create uncertainties based on csv
    model.uncertainties = create_parameters(
        './models/flu/flu_uncertainties.csv')

    # add policies
    policies = [
        Policy('no policy', model_file='FLUvensimV1basecase.vpm'),
        Policy('static policy', model_file='FLUvensimV1static.vpm'),
        Policy('adaptive policy', model_file='FLUvensimV1dynamic.vpm')
    ]

    with MultiprocessingEvaluator(model, n_processes=4) as evaluator:
        results = evaluator.perform_experiments(1000, policies=policies)

    save_results(results, './data/1000 flu cases with policies.tar.gz')
Ejemplo n.º 11
0
    # This section can export the results to an excelsheet.
    death_threshold = 0.0001
    y = outcomes['Expected Number of Deaths'] < 0.0001
    dfresults['Death Risk Condition < {}'.format(str(death_threshold))] = y

    #this section saves the results to excel if allowed
    to_excel = False
    if to_excel == True:
        timestamp = time.strftime("%m.%d-%H%M%S")
        dfresults.to_excel(r'.\results{}.xlsx'.format(timestamp), index=False)

    to_tar = True
    if to_tar == True:
        from ema_workbench import save_results
        save_results(results, 'HyperCube_open_exploration_iterations.tar.gz')

#%% Prim
# This section sets the conditions that are acceptable for our analysis
    cleaned_experiments = experiments.drop(
        labels=[l.name for l in dike_model.levers], axis=1)
    y = (dfoutcomes['Expected Number of Deaths'] <
         death_threshold) & (dfoutcomes['Total costs'] > 0)
    #y.value_counts()

    prim_alg = prim.Prim(cleaned_experiments, y, threshold=0.8)
    box1 = prim_alg.find_box()
    box1.show_tradeoff()
    plt.show()

    box1.inspect(2)
Ejemplo n.º 12
0
    #with MultiprocessingEvaluator(model, n_processes=4) as evaluator:
    #    results = evaluator.perform_experiments(scenarios=4, policies=5)

print('end!')
training_time = time.time() - start_time

print("--- %s seconds ---" % (training_time))
print('training time : {} mins and {} seconds'.format(
    (training_time // 60), round((training_time % 60), 1)))
print('training time : {} hours {} mins and {} seconds '.format(
    training_time // 3600, round((training_time % 3600 // 60), 1),
    round((training_time % 3600) % 60, 1)))
# Save the outputs
from ema_workbench import save_results
#save_results(results, r'./1000 runs.tar.gz')
#save_results(results, r'C:\Saeid\Prj100\SA_2\snowModelUZH\case1_sattel-hochstuckli\CHrandomness_5\7500_runs.tar.gz')
#save_results(results, r'C:\Saeid\Prj100\SA_2\snowModelUZH\case2_Atzmaening\CHrandomness_5\7500_runs.tar.gz')
save_results(
    results,
    r'C:\Saeid\Prj100\SA_2\snowModelUZH\case3_hoch-ybrig\setup1\Results_1\7500_runs.tar.gz'
)
#save_results(results, r'C:\Saeid\Prj100\SA_2\snowModelUZH\case4_villars-diablerets_elevations_b1339\CHrandomness_5\7500_runs.tar.gz')
#save_results(results, r'C:\Saeid\Prj100\SA_2\snowModelUZH\case4_villars-diablerets_elevations_b1822\CHrandomness_5\7500_runs.tar.gz')
#save_results(results, r'C:\Saeid\Prj100\SA_2\snowModelUZH\case4_villars-diablerets_elevations_b2000\CHrandomness_5\7500_runs.tar.gz')
#save_results(results, r'C:\Saeid\Prj100\SA_2\snowModelUZH\case4_villars-diablerets_elevations_b2500\CHrandomness_5\7500_runs.tar.gz')
#save_results(results, r'C:\Saeid\Prj100\SA_2\snowModelUZH\case5_champex\CHrandomness_5\7500_runs.tar.gz')
#save_results(results, r'C:\Saeid\Prj100\SA_2\snowModelUZH\case6_davos_elevations_b1564\CHrandomness_5\7500_runs.tar.gz')
#save_results(results, r'C:\Saeid\Prj100\SA_2\snowModelUZH\case6_davos_elevations_b2141\CHrandomness_5\7500_runs.tar.gz')
#save_results(results, r'C:\Saeid\Prj100\SA_2\snowModelUZH\case6_davos_elevations_b2584\CHrandomness_5\7500_runs.tar.gz')
                        model_file='FLUvensimV1basecase.vpm')

    # outcomes
    model.outcomes = [TimeSeriesOutcome('deceased population region 1'),
                      TimeSeriesOutcome('infected fraction R1'),
                      ScalarOutcome('max infection fraction',
                                    variable_name='infected fraction R1',
                                    function=np.max),
                      ScalarOutcome('time of max',
                                    variable_name=[
                                        'infected fraction R1', 'TIME'],
                                    function=time_of_max)]

    # create uncertainties based on csv
    model.uncertainties = create_parameters(
        './models/flu/flu_uncertainties.csv')

    # add policies
    policies = [Policy('no policy',
                       model_file='FLUvensimV1basecase.vpm'),
                Policy('static policy',
                       model_file='FLUvensimV1static.vpm'),
                Policy('adaptive policy',
                       model_file='FLUvensimV1dynamic.vpm')
                ]

    with MultiprocessingEvaluator(model, n_processes=4) as evaluator:
        results = evaluator.perform_experiments(1000, policies=policies)

    save_results(results, './data/1000 flu cases with policies.tar.gz')
Ejemplo n.º 14
0
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
from ema_workbench import ema_logging, MultiprocessingEvaluator
from ema_workbench import Model, RealParameter, ScalarOutcome, CategoricalParameter, IntegerParameter, BooleanParameter
from ema_workbench.em_framework.samplers import sample_uncertainties
# from ema_workbench.em_framework.evaluators import MC
from dike_model_function import DikeNetwork
from problem_formulation import get_model_for_problem_formulation
ema_logging.log_to_stderr(ema_logging.INFO)

dike_model, planning_steps = get_model_for_problem_formulation(5)

ema_logging.log_to_stderr(ema_logging.INFO)

with MultiprocessingEvaluator(dike_model) as evaluator:
    results = evaluator.perform_experiments(scenarios=10000,               #500
                                            policies=10,
                                            uncertainty_sampling='mc', reporting_interval=10000)

from ema_workbench import save_results

save_results(results, './mc10pol10000scen.tar.gz')
Ejemplo n.º 15
0
    model = NetLogoModel('predprey',
                         wd="./models/predatorPreyNetlogo",
                         model_file="Wolf Sheep Predation.nlogo")
    model.run_length = 100
    model.replications = 1

    model.uncertainties = [
        RealParameter("grass-regrowth-time", 1, 99),
        RealParameter("initial-number-sheep", 1, 200),
        RealParameter("initial-number-wolves", 1, 200),
        RealParameter("sheep-reproduce", 1, 20),
        RealParameter("wolf-reproduce", 1, 20),
    ]

    model.outcomes = [
        TimeSeriesOutcome('sheep'),
        TimeSeriesOutcome('wolves'),
        TimeSeriesOutcome('grass'),
        TimeSeriesOutcome('TIME')
    ]

    #perform experiments
    n = 10

    with MultiprocessingEvaluator(model) as evaluator:
        results = perform_experiments(model, n, evaluator=evaluator)

    fn = r'./data/{} runs.tar.gz'.format(n)
    save_results(results, fn)

    print "finish"
    vensimModel.outcomes = [
        TimeSeriesOutcome('Total Agricultural and Land Use Emissions')
    ]

    sc = 0
    n = 2500
    vensimModel.constants = [Constant('SA Diet Composition Switch', sc)]
    with MultiprocessingEvaluator(vensimModel, n_processes=7) as evaluator:
        for sc in [0, 2, 3, 4]:
            start = time.time()
            results_sa = evaluator.perform_experiments(
                n, uncertainty_sampling=SOBOL, reporting_interval=5000)
            end = time.time()
            print("Experiments took {} seconds, {} hours.".format(
                end - start, (end - start) / 3600))

            fn = './Diet_Sobol_n{}_sc{}_v4_2050.tar.gz'.format(
                n, sc
            )  #v2 is with narrow ranges for efficacy and removing some of the unimportant parameters
            #v3 is with the new multiplicative formulation, and new social norm parameters
            save_results(results_sa, fn)

#
#     experiments, outcomes = results_sa
#     data = outcomes['Total Agricultural and Land Use Emissions'][:, -1]
#     problem = get_SALib_problem(vensimModel.uncertainties)
#     scores = sobol.analyze(problem, data, calc_second_order=True, print_to_console=True)
#
#     plot_scores(scores, problem, n)
#     plt.show()
Ejemplo n.º 17
0
    RICE.uncertainties = [
        IntegerParameter('fdamage', 0, 2),
        IntegerParameter('t2xco2_index', 0, 999),
        IntegerParameter('t2xco2_dist', 0, 2),
        RealParameter('fosslim', 4000, 13649),
        IntegerParameter('scenario_pop_gdp', 0, 5),
        IntegerParameter('scenario_sigma', 0, 2),
        IntegerParameter('scenario_cback', 0, 1),
        IntegerParameter('scenario_elasticity_of_damages', 0, 2),
        IntegerParameter('scenario_limmiu', 0, 1)
    ]

    #same for all formulations
    RICE.outcomes = get_all_model_outcomes_uncertainty_search(
        optimization_formulation="utilitarian")

    ema_logging.log_to_stderr(ema_logging.INFO)
    #only needed on IPython console within Anaconda
    __spec__ = "ModuleSpec(name='builtins', loader=<class '_frozen_importlib.BuiltinImporter'>)"

    with MultiprocessingEvaluator(RICE) as evaluator:
        results = evaluator.perform_experiments(
            scenarios=nfe, policies=total_policy_list[principle_index])

        file_name = "//root//util_gen//server//output//uncertainty_analsysis_" + principles_list[
            principle_index] + "_runs_" + str(nfe) + ".tar.gz"
        save_results(results, file_name)

    print("uncertainty cycle " + principles_list[principle_index] +
          " completed")
Ejemplo n.º 18
0
ema_logging.log_to_stderr(ema_logging.INFO)

dike_model, planning_steps = get_model_for_problem_formulation(5)

policies_0 = [Policy('no policy', **{l.name: 0 for l in dike_model.levers})]

n_scen = 1000
print(n_scen)
with MultiprocessingEvaluator(dike_model) as evalu:
    sa_results = evalu.perform_experiments(n_scen,
                                           policies=policies_0,
                                           uncertainty_sampling='sobol',
                                           reporting_interval=400)

from ema_workbench import save_results
save_results(sa_results, './data/exp/sobolnopol40000scen.tar.gz')

uncertainty = dike_model.uncertainties
levers = dike_model.levers

dike_model.levers = uncertainty
dike_model.uncertainty = levers

n_scen = 10
print(n_scen)
with MultiprocessingEvaluator(dike_model) as evalu:
    sa_results = evalu.perform_experiments(n_scen,
                                           policies=25,
                                           uncertainty_sampling='sobol',
                                           reporting_interval=400)