]  #0 = no discouting , 1 = normal discounting,

    RICE.outcomes = [
        ScalarOutcome('Intratemporal utility GINI 2055',
                      ScalarOutcome.MINIMIZE),
        ScalarOutcome('Intratemporal impact GINI 2055',
                      ScalarOutcome.MINIMIZE),
        ScalarOutcome('Intratemporal utility GINI 2105',
                      ScalarOutcome.MINIMIZE),
        ScalarOutcome('Intratemporal impact GINI 2105',
                      ScalarOutcome.MINIMIZE),
        ScalarOutcome('Intertemporal utility GINI', ScalarOutcome.MINIMIZE),
        ScalarOutcome('Intertemporal impact GINI', ScalarOutcome.MINIMIZE)
    ]

    convergence_metrics = [EpsilonProgress()]
    minimize = ScalarOutcome.MINIMIZE
    maximize = ScalarOutcome.MAXIMIZE

    for outcome in RICE.outcomes:
        if outcome.kind == minimize:
            outcome.kind = maximize
        else:
            outcome.kind = minimize

    ema_logging.log_to_stderr(ema_logging.INFO)
    #only needed on IPython console within Anaconda
    __spec__ = "ModuleSpec(name='builtins', loader=<class '_frozen_importlib.BuiltinImporter'>)"

    for policy in total_policy_list[principle_index]:
Example #2
0
def runMoea(model, params, fileEnd, reference=None, refNum=None):
    archiveName = 'archives_' + fileEnd
    convergenceName = 'convergences_' + fileEnd

    if not params.createNewOptimizationResults:
        print('Loading archives from ' + params.optimizeOutputFolder +
              archiveName + '.csv')
        print('Loading convergences from ' + params.optimizeOutputFolder +
              convergenceName + '.csv')
        archives = pd.read_csv(params.optimizeOutputFolder + archiveName +
                               '.csv',
                               index_col=0)
        convergences = pd.read_csv(params.optimizeOutputFolder +
                                   convergenceName + '.csv',
                                   index_col=0)
        return (archives, convergences)

    archs = []
    convs = []

    if not os.path.exists(params.optimizeOutputFolder):
        os.makedirs(params.optimizeOutputFolder)
    tmpfolder = params.optimizeOutputFolder + model.name + '/'
    if not os.path.exists(tmpfolder):
        os.makedirs(tmpfolder)

    with MultiprocessingEvaluator(model) as evaluator:
        for i in range(params.numberOptimizationRepetitions):
            print('Optimizing Run ', i)
            if params.name == 'mordm':
                convergences = [
                    HyperVolume(minimum=[0, 0, 0, 0], maximum=[2.5, 2, 1, 1]),
                    EpsilonProgress()
                ]
            else:
                convergences = [
                    HyperVolume(minimum=[0, 0, 0, 0], maximum=[10, 2, 1, 1]),
                    EpsilonProgress()
                ]
            if (params.algoName == 'NSGAIIHybrid'):
                for j, name in enumerate(['SBX', 'PCX', 'DE', 'UNDX', 'UM']):
                    Convergence.valid_metrics.add(name)
                    convergences.append(OperatorProbabilities(name, j))

            arch, conv = evaluator.optimize(algorithm=params.algorithm,
                                            nfe=params.nfeOptimize[model.name],
                                            searchover='levers',
                                            reference=reference,
                                            epsilons=params.epsilons,
                                            convergence=convergences,
                                            population_size=100)

            conv['run_index'] = i
            arch['run_index'] = i

            if refNum is not None:
                conv['reference_scenario'] = refNum
                arch['reference_scenario'] = refNum

            conv.to_csv(tmpfolder + convergenceName + '_' + str(i) + '.csv')
            arch.to_csv(tmpfolder + archiveName + '_' + str(i) + '.csv')

            convs.append(conv)
            archs.append(arch)

    archives = pd.concat(archs)
    convergences = pd.concat(convs)

    archives.to_csv(params.optimizeOutputFolder + archiveName + '.csv')
    convergences.to_csv(params.optimizeOutputFolder + convergenceName + '.csv')

    return (archives, convergences)
Example #3
0
                ScalarOutcome('Distance to treshold 2305', ScalarOutcome.MINIMIZE),
                ScalarOutcome('Population under treshold 2305', ScalarOutcome.MINIMIZE),
                ScalarOutcome('Total Aggregated Utility',ScalarOutcome.MAXIMIZE)
               ]

epsilon_list = [0.01]
eps = []

for i in epsilon_list:
    k = np.ones((len(RICE.outcomes))) * i
    eps.append(k)
    
nfe = 200000
convergence_metrics = [HyperVolume(minimum=[0,0,0,0,0,0,0,0,0,0,0],
                                   maximum=[100,10000,100,10000,100,10000,100,10000,100,10000,10000]),
                       EpsilonProgress()]

constraints = [Constraint('Total Aggregated Utility', outcome_names='Total Aggregated Utility',
                          function=lambda x:max(0, -x))]

if __name__ == "__main__":
    for i in range(len(epsilon_list)):    
        start = time.time()
        print("used epsilon is: " +  str(epsilon_list[i]))
        print("starting search for policy generation cycle: " + str(i+1) + "/" + str(len(epsilon_list)), flush=True)
        
        #only needed on IPython console within Anaconda
        __spec__ = "ModuleSpec(name='builtins', loader=<class '_frozen_importlib.BuiltinImporter'>)"
        
        with MultiprocessingEvaluator(RICE) as evaluator:
            results, convergence = evaluator.optimize(nfe=nfe,
def runMoea(model, params, fileEnd, refNum=-1):
    archiveName = 'archives_' + fileEnd
    convergenceName = 'convergences_' + fileEnd

    if not params.createNewOptimizationResults:
        print('Loading archives from ' + params.optimizeOutputFolder +
              archiveName + '.csv')
        print('Loading convergences from ' + params.optimizeOutputFolder +
              convergenceName + '.csv')
        archives = pd.read_csv(params.optimizeOutputFolder + archiveName +
                               '.csv',
                               index_col=0)
        convergences = pd.read_csv(params.optimizeOutputFolder +
                                   convergenceName + '.csv',
                                   index_col=0)
        return (archives, convergences)

    archs = []
    convs = []

    if not os.path.exists(params.optimizeOutputFolder):
        os.makedirs(params.optimizeOutputFolder)
    tmpfolder = params.optimizeOutputFolder + model.name + '/'
    if not os.path.exists(tmpfolder):
        os.makedirs(tmpfolder)

    with MultiprocessingEvaluator(model) as evaluator:
        for i in range(params.numberOptimizationRepetitions):
            print('Run ', i)
            convergence_metrics = [
                HyperVolume(minimum=[0, 0, 0, 0], maximum=[1, 1, 1, 1]),
                EpsilonProgress()
            ]
            if (params.algoName == 'NSGAIIHybrid'):
                for j, name in enumerate(['SBX', 'PCX', 'DE', 'UNDX', 'UM']):
                    Convergence.valid_metrics.add(name)
                    convergence_metrics.append(OperatorProbabilities(name, j))

            arch, conv = evaluator.robust_optimize(
                robustnessFunctions,
                params.optimizationScenarios,
                algorithm=params.algorithm,
                nfe=params.nfeOptimize[model.name],
                constraints=[],
                epsilons=params.epsilons,
                convergence=convergence_metrics)

            arch['run_index'] = i
            conv['run_index'] = i
            archs.append(arch)
            convs.append(conv)

            arch.to_csv(tmpfolder + archiveName + '_' + str(i) + '.csv')
            conv.to_csv(tmpfolder + convergenceName + '_' + str(i) + '.csv')

    archives = pd.concat(archs)
    convergences = pd.concat(convs)

    archives.to_csv(params.optimizeOutputFolder + archiveName + '.csv')
    convergences.to_csv(params.optimizeOutputFolder + convergenceName + '.csv')

    return (archives, convergences)