ScalarOutcome('sum-co2-emitted-to-air-global', ScalarOutcome.MAXIMIZE, variable_name='sum-co2-emitted-to-air-global', function=np.max), ScalarOutcome('sum-subsidy-to-por-global', ScalarOutcome.MINIMIZE, variable_name='sum-subsidy-to-por-global', function=np.max), ScalarOutcome('sum-subsidy-to-industries-global', ScalarOutcome.MINIMIZE, variable_name='sum-subsidy-to-industries-global', function=np.max) ] convergence = [ HyperVolume(minimum=[0, 0, 0, 0], maximum=[1e9, 1e9, 1e8, 1e8]), EpsilonProgress() ] with SequentialEvaluator(model) as evaluator: results, convergence = evaluator.optimize(nfe=10000, searchover='levers', epsilons=[ 0.1, ] * len(model.outcomes), convergence=convergence, logging_freq=10, convergence_freq=100) results.to_csv('./data/MORDM_nfe100.csv') convergence.to_csv('./data/MORDM_nfe100_conv.csv')
def runMoea(model, params, fileEnd, reference=None, refNum=None): archiveName = 'archives_' + fileEnd convergenceName = 'convergences_' + fileEnd if not params.createNewOptimizationResults: print('Loading archives from ' + params.optimizeOutputFolder + archiveName + '.csv') print('Loading convergences from ' + params.optimizeOutputFolder + convergenceName + '.csv') archives = pd.read_csv(params.optimizeOutputFolder + archiveName + '.csv', index_col=0) convergences = pd.read_csv(params.optimizeOutputFolder + convergenceName + '.csv', index_col=0) return (archives, convergences) archs = [] convs = [] if not os.path.exists(params.optimizeOutputFolder): os.makedirs(params.optimizeOutputFolder) tmpfolder = params.optimizeOutputFolder + model.name + '/' if not os.path.exists(tmpfolder): os.makedirs(tmpfolder) with MultiprocessingEvaluator(model) as evaluator: for i in range(params.numberOptimizationRepetitions): print('Optimizing Run ', i) if params.name == 'mordm': convergences = [ HyperVolume(minimum=[0, 0, 0, 0], maximum=[2.5, 2, 1, 1]), EpsilonProgress() ] else: convergences = [ HyperVolume(minimum=[0, 0, 0, 0], maximum=[10, 2, 1, 1]), EpsilonProgress() ] if (params.algoName == 'NSGAIIHybrid'): for j, name in enumerate(['SBX', 'PCX', 'DE', 'UNDX', 'UM']): Convergence.valid_metrics.add(name) convergences.append(OperatorProbabilities(name, j)) arch, conv = evaluator.optimize(algorithm=params.algorithm, nfe=params.nfeOptimize[model.name], searchover='levers', reference=reference, epsilons=params.epsilons, convergence=convergences, population_size=100) conv['run_index'] = i arch['run_index'] = i if refNum is not None: conv['reference_scenario'] = refNum arch['reference_scenario'] = refNum conv.to_csv(tmpfolder + convergenceName + '_' + str(i) + '.csv') arch.to_csv(tmpfolder + archiveName + '_' + str(i) + '.csv') convs.append(conv) archs.append(arch) archives = pd.concat(archs) convergences = pd.concat(convs) archives.to_csv(params.optimizeOutputFolder + archiveName + '.csv') convergences.to_csv(params.optimizeOutputFolder + convergenceName + '.csv') return (archives, convergences)
] #0 = no discouting , 1 = normal discounting, RICE.outcomes = [ ScalarOutcome('Intratemporal utility GINI 2055', ScalarOutcome.MINIMIZE), ScalarOutcome('Intratemporal impact GINI 2055', ScalarOutcome.MINIMIZE), ScalarOutcome('Intratemporal utility GINI 2105', ScalarOutcome.MINIMIZE), ScalarOutcome('Intratemporal impact GINI 2105', ScalarOutcome.MINIMIZE), ScalarOutcome('Intertemporal utility GINI', ScalarOutcome.MINIMIZE), ScalarOutcome('Intertemporal impact GINI', ScalarOutcome.MINIMIZE) ] convergence_metrics = [EpsilonProgress()] minimize = ScalarOutcome.MINIMIZE maximize = ScalarOutcome.MAXIMIZE for outcome in RICE.outcomes: if outcome.kind == minimize: outcome.kind = maximize else: outcome.kind = minimize ema_logging.log_to_stderr(ema_logging.INFO) #only needed on IPython console within Anaconda __spec__ = "ModuleSpec(name='builtins', loader=<class '_frozen_importlib.BuiltinImporter'>)" for policy in total_policy_list[principle_index]:
def runMoea(model, params, fileEnd, refNum=-1): archiveName = 'archives_' + fileEnd convergenceName = 'convergences_' + fileEnd if not params.createNewOptimizationResults: print('Loading archives from ' + params.optimizeOutputFolder + archiveName + '.csv') print('Loading convergences from ' + params.optimizeOutputFolder + convergenceName + '.csv') archives = pd.read_csv(params.optimizeOutputFolder + archiveName + '.csv', index_col=0) convergences = pd.read_csv(params.optimizeOutputFolder + convergenceName + '.csv', index_col=0) return (archives, convergences) archs = [] convs = [] if not os.path.exists(params.optimizeOutputFolder): os.makedirs(params.optimizeOutputFolder) tmpfolder = params.optimizeOutputFolder + model.name + '/' if not os.path.exists(tmpfolder): os.makedirs(tmpfolder) with MultiprocessingEvaluator(model) as evaluator: for i in range(params.numberOptimizationRepetitions): print('Run ', i) convergence_metrics = [ HyperVolume(minimum=[0, 0, 0, 0], maximum=[1, 1, 1, 1]), EpsilonProgress() ] if (params.algoName == 'NSGAIIHybrid'): for j, name in enumerate(['SBX', 'PCX', 'DE', 'UNDX', 'UM']): Convergence.valid_metrics.add(name) convergence_metrics.append(OperatorProbabilities(name, j)) arch, conv = evaluator.robust_optimize( robustnessFunctions, params.optimizationScenarios, algorithm=params.algorithm, nfe=params.nfeOptimize[model.name], constraints=[], epsilons=params.epsilons, convergence=convergence_metrics) arch['run_index'] = i conv['run_index'] = i archs.append(arch) convs.append(conv) arch.to_csv(tmpfolder + archiveName + '_' + str(i) + '.csv') conv.to_csv(tmpfolder + convergenceName + '_' + str(i) + '.csv') archives = pd.concat(archs) convergences = pd.concat(convs) archives.to_csv(params.optimizeOutputFolder + archiveName + '.csv') convergences.to_csv(params.optimizeOutputFolder + convergenceName + '.csv') return (archives, convergences)