Exemplo n.º 1
0
]

epsilon_list = [0.01]
eps = []

for i in epsilon_list:
    k = np.ones((len(RICE.outcomes))) * i
    eps.append(k)

nfe = 200
convergence_metrics = [EpsilonProgress()]

convergence_metrics = [
    HyperVolume(
        minimum=[0, -10000, 0, -10000, 0, -10000, 0, -10000, 0, -10000, -5000],
        maximum=[
            500, 10000, 500, 10000, 500, 10000, 500, 10000, 500, 10000, 20000
        ]),
    EpsilonProgress()
]

constraints = [
    Constraint("Utility 2305",
               outcome_names="Utility 2305",
               function=lambda x: max(0, -x))
]

if __name__ == "__main__":
    for i in range(len(epsilon_list)):
        start = time.time()
        print("used epsilon is: " + str(epsilon_list[i]))
Exemplo n.º 2
0
        ScalarOutcome('sum-co2-emitted-to-air-global',
                      ScalarOutcome.MAXIMIZE,
                      variable_name='sum-co2-emitted-to-air-global',
                      function=np.max),
        ScalarOutcome('sum-subsidy-to-por-global',
                      ScalarOutcome.MINIMIZE,
                      variable_name='sum-subsidy-to-por-global',
                      function=np.max),
        ScalarOutcome('sum-subsidy-to-industries-global',
                      ScalarOutcome.MINIMIZE,
                      variable_name='sum-subsidy-to-industries-global',
                      function=np.max)
    ]

    convergence = [
        HyperVolume(minimum=[0, 0, 0, 0], maximum=[1e9, 1e9, 1e8, 1e8]),
        EpsilonProgress()
    ]

    with SequentialEvaluator(model) as evaluator:
        results, convergence = evaluator.optimize(nfe=10000,
                                                  searchover='levers',
                                                  epsilons=[
                                                      0.1,
                                                  ] * len(model.outcomes),
                                                  convergence=convergence,
                                                  logging_freq=10,
                                                  convergence_freq=100)

    results.to_csv('./data/MORDM_nfe100.csv')
    convergence.to_csv('./data/MORDM_nfe100_conv.csv')
Exemplo n.º 3
0
    # 'Undiscounted Period Welfare 2300':[-1000,0],
    # 'Consumption SDR 2300':[0, 0.1],
    # 'Damage SDR 2300':[0,0.5],     
    # })

paraxes = parcoords.ParallelAxes(limits, rot=0)
paraxes.plot(data)
paraxes.fig.set_size_inches(25, 10)
paraxes.legend()
plt.show()
paraxes.fig.savefig(os.path.join(fig_path, str(run) + '_v6__sc_disc_par_results2' + '.png'))

# %%
from ema_workbench.em_framework.optimization import (HyperVolume,EpsilonProgress)

convergence_metrics = [HyperVolume(minimum=[0,0,0,0], maximum=[3, 2,1.01,1.01]),
                       EpsilonProgress()]

with MultiprocessingEvaluator(model) as evaluator:
    results, convergence = evaluator.optimize(nfe=1e3, searchover='levers',
                                 convergence=convergence_metrics,
                                 epsilons=[0.1,]*len(model.outcomes))

#%%
from dicemodel.specs import nordhaus_policy, reference_scenario
eps = [0.001, 0.1, 0.1, 0.1] * (int(len(dice_sm.outcomes)/4.0))
convergence_metrics = [EpsilonProgress()]
nord_optimal_policy = Policy('nord_optimal_policy', **nordhaus_policy(np.mean(dice_opt.iloc[129]), 0.015, 0, 0, 29))
nfe = 100000
# Swtich to the worst case
for outcome in dice_sm.outcomes:
Exemplo n.º 4
0
                ScalarOutcome('Population under treshold 2205', ScalarOutcome.MINIMIZE),
                
                ScalarOutcome('Distance to treshold 2305', ScalarOutcome.MINIMIZE),
                ScalarOutcome('Population under treshold 2305', ScalarOutcome.MINIMIZE),
                ScalarOutcome('Total Aggregated Utility',ScalarOutcome.MAXIMIZE)
               ]

epsilon_list = [0.01]
eps = []

for i in epsilon_list:
    k = np.ones((len(RICE.outcomes))) * i
    eps.append(k)
    
nfe = 200000
convergence_metrics = [HyperVolume(minimum=[0,0,0,0,0,0,0,0,0,0,0],
                                   maximum=[100,10000,100,10000,100,10000,100,10000,100,10000,10000]),
                       EpsilonProgress()]

constraints = [Constraint('Total Aggregated Utility', outcome_names='Total Aggregated Utility',
                          function=lambda x:max(0, -x))]

if __name__ == "__main__":
    for i in range(len(epsilon_list)):    
        start = time.time()
        print("used epsilon is: " +  str(epsilon_list[i]))
        print("starting search for policy generation cycle: " + str(i+1) + "/" + str(len(epsilon_list)), flush=True)
        
        #only needed on IPython console within Anaconda
        __spec__ = "ModuleSpec(name='builtins', loader=<class '_frozen_importlib.BuiltinImporter'>)"
        
        with MultiprocessingEvaluator(RICE) as evaluator:
                  ScalarOutcome.MINIMIZE),
    ScalarOutcome('Lowest income per capita 2305', ScalarOutcome.MAXIMIZE),
    ScalarOutcome('Highest climate impact per capita 2305',
                  ScalarOutcome.MINIMIZE)
]

epsilon_list = [0.01]
eps = []

for i in epsilon_list:
    k = np.ones((len(RICE.outcomes))) * i
    eps.append(k)

nfe = 200000
convergence_metrics = [
    HyperVolume(minimum=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                maximum=[50, 1, 200, 1, 200, 1, 200, 1, 200, 1]),
    EpsilonProgress()
]

constraints = [
    Constraint("Lowest income per capita 2305",
               outcome_names="Lowest income per capita 2305",
               function=lambda x: max(0, -x))
]

if __name__ == "__main__":
    for i in range(len(epsilon_list)):
        start = time.time()
        print("used epsilon is: " + str(epsilon_list[i]))
        print(
            "starting search for optimization 1 - policy generation cycle: " +
    ScalarOutcome('Intratemporal impact GINI 2305', ScalarOutcome.MINIMIZE),
    ScalarOutcome('Intertemporal utility GINI', ScalarOutcome.MINIMIZE),
    ScalarOutcome('Intertemporal impact GINI', ScalarOutcome.MINIMIZE)
]

epsilon_list = [0.001]
eps = []

for i in epsilon_list:
    k = np.ones((len(RICE.outcomes))) * i
    eps.append(k)

nfe = 200000

convergence_metrics = [
    HyperVolume(minimum=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                maximum=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
    EpsilonProgress()
]

if __name__ == "__main__":
    for i in range(len(epsilon_list)):
        start = time.time()
        print("used epsilon is: " + str(epsilon_list[i]))
        print(
            "starting search for optimization 1 - policy generation cycle: " +
            str(i + 1) + "/" + str(len(epsilon_list)),
            flush=True)
        __spec__ = "ModuleSpec(name='builtins', loader=<class '_frozen_importlib.BuiltinImporter'>)"
        with MultiprocessingEvaluator(RICE) as evaluator:
            results, convergence = evaluator.optimize(
                nfe=nfe,
Exemplo n.º 7
0
def runMoea(model, params, fileEnd, reference=None, refNum=None):
    archiveName = 'archives_' + fileEnd
    convergenceName = 'convergences_' + fileEnd

    if not params.createNewOptimizationResults:
        print('Loading archives from ' + params.optimizeOutputFolder +
              archiveName + '.csv')
        print('Loading convergences from ' + params.optimizeOutputFolder +
              convergenceName + '.csv')
        archives = pd.read_csv(params.optimizeOutputFolder + archiveName +
                               '.csv',
                               index_col=0)
        convergences = pd.read_csv(params.optimizeOutputFolder +
                                   convergenceName + '.csv',
                                   index_col=0)
        return (archives, convergences)

    archs = []
    convs = []

    if not os.path.exists(params.optimizeOutputFolder):
        os.makedirs(params.optimizeOutputFolder)
    tmpfolder = params.optimizeOutputFolder + model.name + '/'
    if not os.path.exists(tmpfolder):
        os.makedirs(tmpfolder)

    with MultiprocessingEvaluator(model) as evaluator:
        for i in range(params.numberOptimizationRepetitions):
            print('Optimizing Run ', i)
            if params.name == 'mordm':
                convergences = [
                    HyperVolume(minimum=[0, 0, 0, 0], maximum=[2.5, 2, 1, 1]),
                    EpsilonProgress()
                ]
            else:
                convergences = [
                    HyperVolume(minimum=[0, 0, 0, 0], maximum=[10, 2, 1, 1]),
                    EpsilonProgress()
                ]
            if (params.algoName == 'NSGAIIHybrid'):
                for j, name in enumerate(['SBX', 'PCX', 'DE', 'UNDX', 'UM']):
                    Convergence.valid_metrics.add(name)
                    convergences.append(OperatorProbabilities(name, j))

            arch, conv = evaluator.optimize(algorithm=params.algorithm,
                                            nfe=params.nfeOptimize[model.name],
                                            searchover='levers',
                                            reference=reference,
                                            epsilons=params.epsilons,
                                            convergence=convergences,
                                            population_size=100)

            conv['run_index'] = i
            arch['run_index'] = i

            if refNum is not None:
                conv['reference_scenario'] = refNum
                arch['reference_scenario'] = refNum

            conv.to_csv(tmpfolder + convergenceName + '_' + str(i) + '.csv')
            arch.to_csv(tmpfolder + archiveName + '_' + str(i) + '.csv')

            convs.append(conv)
            archs.append(arch)

    archives = pd.concat(archs)
    convergences = pd.concat(convs)

    archives.to_csv(params.optimizeOutputFolder + archiveName + '.csv')
    convergences.to_csv(params.optimizeOutputFolder + convergenceName + '.csv')

    return (archives, convergences)
Exemplo n.º 8
0
    for k, dam in enumerate(dams):
        for j in range(12):
            l.append(RealParameter(dam[0][j], minimum[k], maximum[k]))
            l.append(RealParameter(dam[1][j], 0, 1))
            l.append(RealParameter(dam[2][j], 0, 1))

    basin_model.levers = l
    basin_model.outcomes = [
        ScalarOutcome('pwsobjective',
                      kind=ScalarOutcome.MINIMIZE,
                      expected_range=(1, 33.3)),
        ScalarOutcome('energyobjective',
                      kind=ScalarOutcome.MAXIMIZE,
                      expected_range=(641, 3291)),
        ScalarOutcome('virobjective',
                      kind=ScalarOutcome.MINIMIZE,
                      expected_range=(315, 1341))
    ]
    #ScalarOutcome('firobjective', kind=ScalarOutcome.MINIMIZE),

    convergence_metrics = [
        HyperVolume.from_outcomes(basin_model.outcomes),
        EpsilonProgress()
    ]

    with MultiprocessingEvaluator(basin_model, 3) as evaluator:
        #results, convergence=evaluator.optimize(nfe=1, searchover='levers',
        #epsilons=[0.1, 0.1, 0.1],
        #convergence=convergence_metrics, reference=None)#constraints=m)
        experiments, outcomes = evaluator.perform_experiments(policies=3)
def runMoea(model, params, fileEnd, refNum=-1):
    archiveName = 'archives_' + fileEnd
    convergenceName = 'convergences_' + fileEnd

    if not params.createNewOptimizationResults:
        print('Loading archives from ' + params.optimizeOutputFolder +
              archiveName + '.csv')
        print('Loading convergences from ' + params.optimizeOutputFolder +
              convergenceName + '.csv')
        archives = pd.read_csv(params.optimizeOutputFolder + archiveName +
                               '.csv',
                               index_col=0)
        convergences = pd.read_csv(params.optimizeOutputFolder +
                                   convergenceName + '.csv',
                                   index_col=0)
        return (archives, convergences)

    archs = []
    convs = []

    if not os.path.exists(params.optimizeOutputFolder):
        os.makedirs(params.optimizeOutputFolder)
    tmpfolder = params.optimizeOutputFolder + model.name + '/'
    if not os.path.exists(tmpfolder):
        os.makedirs(tmpfolder)

    with MultiprocessingEvaluator(model) as evaluator:
        for i in range(params.numberOptimizationRepetitions):
            print('Run ', i)
            convergence_metrics = [
                HyperVolume(minimum=[0, 0, 0, 0], maximum=[1, 1, 1, 1]),
                EpsilonProgress()
            ]
            if (params.algoName == 'NSGAIIHybrid'):
                for j, name in enumerate(['SBX', 'PCX', 'DE', 'UNDX', 'UM']):
                    Convergence.valid_metrics.add(name)
                    convergence_metrics.append(OperatorProbabilities(name, j))

            arch, conv = evaluator.robust_optimize(
                robustnessFunctions,
                params.optimizationScenarios,
                algorithm=params.algorithm,
                nfe=params.nfeOptimize[model.name],
                constraints=[],
                epsilons=params.epsilons,
                convergence=convergence_metrics)

            arch['run_index'] = i
            conv['run_index'] = i
            archs.append(arch)
            convs.append(conv)

            arch.to_csv(tmpfolder + archiveName + '_' + str(i) + '.csv')
            conv.to_csv(tmpfolder + convergenceName + '_' + str(i) + '.csv')

    archives = pd.concat(archs)
    convergences = pd.concat(convs)

    archives.to_csv(params.optimizeOutputFolder + archiveName + '.csv')
    convergences.to_csv(params.optimizeOutputFolder + convergenceName + '.csv')

    return (archives, convergences)
Exemplo n.º 10
0
                             variable_name='A.5_Expected Annual Damage', function=A5_Total_Costs),
                        ScalarOutcome('RfR total costs', kind=MINIMIZE, variable_name='RfR Total Costs', function=RfR_Total_Costs),
                        ScalarOutcome('Evacuation costs', kind=MINIMIZE, variable_name='Expected Evacuation Costs', function=Expected_Evacuation_Costs)
                       ]

constraints = [Constraint("ConstrA.1_Expected Annual Damage", outcome_names="A.1_Expected Annual Damage",
                           function=lambda x:max(0, x-40000000)),
               Constraint("ConstrA.1_Expected Number of Deaths", outcome_names="A.1_Expected Number of Deaths",
                          function=lambda x:max(0, x-1))
               ]
                                              
# Record the run time
start = time.time()

# We assume we needed to put 17x 0 and 17x 1 for convergence, because we have 17 outcomes in Robustness_functions?
# In assignment 10, there were 4 outcomes, and also 4x 0 and 4x 1 here in the convergence part
convergence = [HyperVolume(minimum=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], maximum=[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]),
               EpsilonProgress()]
nfe = 200 # int(1e4)

# Run it                                              
if __name__ == '__main__':                                            
    with MultiprocessingEvaluator(dike_model, n_processes=7) as evaluator:
        results = evaluator.robust_optimize(robustness_functions, scenarios, 
                                                   nfe=nfe, convergence=convergence, constraints=constraints,
                                                   epsilons=[0.05,]*len(robustness_functions))

    utilities.save_results(results, 'Outcomes/MOROrijkswaterstaatConstraints.tar.gz')

end = time.time()
print('Total run time:{} min'.format((end - start)/60))
Exemplo n.º 11
0
dike_model.uncertainties['A.1_pfail'] = RealParameter('A.1_pfail', 0, 0.367)
dike_model.uncertainties['A.3_pfail'] = RealParameter('A.3_pfail', 0, 0.226)

# And now we can run the main computationally expensive MORO!

BaseEvaluator.reporting_frequency = 0.1
ema_logging.log_to_stderr(ema_logging.INFO)

n_scenarios = 1
scenarios = sample_uncertainties(dike_model, n_scenarios)
nfe = int(201)

# The expected ranges are set to minimize noise as discussed in section 3.4 of doi: 10.1016/j.envsoft.2011.04.003
epsilons = ranges.values
convergence = [
    HyperVolume(hyp_ranges_min, hyp_ranges_max * 1e21),
    EpsilonProgress()
]

# Time the output
start = time.time()

with MultiprocessingEvaluator(dike_model) as evaluator:
    results, convergence = evaluator.robust_optimize(robustness_functions,
                                                     scenarios=scenarios,
                                                     nfe=nfe,
                                                     epsilons=epsilons,
                                                     convergence=convergence,
                                                     convergence_freq=20,
                                                     logging_freq=1,
                                                     constraint=constraints)