Ejemplo n.º 1
0
def buildOptimizationScenarios(model, params, outputFile):
    if params.createNewOptimizationResults:
        if params.createNewOptimizationScenarios:
            if params.optimizationScenarios is None:
                scenarios = sample_uncertainties(model,
                                                 params.numEvaluationScenarios)

                if not os.path.exists(params.optimizeOutputFolder):
                    os.makedirs(params.optimizeOutputFolder)
                df = pd.DataFrame(scenarios.designs, columns=scenarios.params)
                df.to_csv(params.optimizeOutputFolder + outputFile,
                          index=False)

                return scenarios
            else:
                return params.optimizationScenarios
        else:
            df = pd.read_csv(params.optimizeOutputFolder + outputFile)
            designs = list(df.itertuples(index=False, name=None))
            scenarios = DefaultDesigns(designs=designs,
                                       parameters=model.uncertainties,
                                       n=len(designs))
            scenarios.kind = Scenario

            return scenarios
    else:
        return None
Ejemplo n.º 2
0
def buildReevaluationScenarios(model, params, baseScenario, outputFile):
    if params.createNewReevaluationResults:
        if params.createNewReevaluationScenarios:
            if params.evaluationScenarios is None:
                scenarios = sample_uncertainties(model,
                                                 params.numEvaluationScenarios)
                scenarios.designs.append(tuple(baseScenario.values()))
                scenarios.n += 1

                if not os.path.exists(params.reevaluateOutputFolder):
                    os.makedirs(params.reevaluateOutputFolder)
                df = pd.DataFrame(scenarios.designs, columns=scenarios.params)
                df.to_csv(params.reevaluateOutputFolder + outputFile,
                          index=False)

                return scenarios
            else:
                return params.evaluationScenarios
        else:
            df = pd.read_csv(params.reevaluateOutputFolder + outputFile)
            designs = list(df.itertuples(index=False, name=None))
            params = [item for item in baseModel.uncertainties]
            scenarios = DefaultDesigns(designs=designs,
                                       parameters=params,
                                       n=len(designs))
            scenarios.kind = Scenario

            return scenarios
    else:
        return None
Ejemplo n.º 3
0
    def robust_evaluate(
        self,
        robustness_functions,
        scenarios,
        policies,
        evaluator=None,
    ):
        """
        Perform robust evaluation(s).

        The robust evaluation is used to generate statistical measures
        of outcomes, instead of generating the individual outcomes themselves.
        For each policy, the model is evaluated against all of the considered
        scenarios, and then the robustness measures are evaluated using the
        set of outcomes from the original runs.  The robustness measures
        are aggregate measures that are computed from a set of outcomes.
        For example, this may be expected value, median, n-th percentile,
        minimum, or maximum value of any individual outcome.  It is also
        possible to have joint measures, e.g. expected value of the larger
        of outcome 1 or outcome 2.

        Args:
            robustness_functions (Collection[Measure]): A collection of
                aggregate statistical performance measures.
            scenarios (int or Collection): A collection of scenarios to
                use in the evaluation(s), or give an integer to generate
                that number of random scenarios.
            policies (int, or collection): A collection of policies to
                use in the evaluation(s), or give an integer to generate
                that number of random policies.
            evaluator (Evaluator, optional): The evaluator to use to
                run the model. If not given, a SequentialEvaluator will
                be created.

        Returns:
            pandas.DataFrame: The computed value of each item
            in `robustness_functions`, for each policy in `policies`.
        """

        if evaluator is None:
            from ema_workbench.em_framework import SequentialEvaluator
            evaluator = SequentialEvaluator(self)

        from ema_workbench.em_framework.samplers import sample_uncertainties, sample_levers

        if isinstance(scenarios, int):
            n_scenarios = scenarios
            scenarios = sample_uncertainties(self, n_scenarios)

        with evaluator:
            robust_results = evaluator.robust_evaluate(
                robustness_functions,
                scenarios,
                policies,
            )

        robust_results = self.ensure_dtypes(robust_results)
        return robust_results
    
    # override some of the defaults of the model
    lake_model.constants = [Constant('alpha', 0.41),
                            Constant('nsamples', 100),
                            Constant('myears', 100)]
    
    # setup and execute the robust optimization
    def signal_to_noise(data):
        mean = np.mean(data)
        std = np.std(data)
        sn = mean/std
        return sn
    
    MAXIMIZE = ScalarOutcome.MAXIMIZE  # @UndefinedVariable
    MINIMIZE = ScalarOutcome.MINIMIZE  # @UndefinedVariable
    robustnes_functions = [ScalarOutcome('mean p', kind=MINIMIZE, 
                             variable_name='max_P', function=np.mean),
                           ScalarOutcome('std p', kind=MINIMIZE, 
                             variable_name='max_P', function=np.std),
                           ScalarOutcome('sn reliability', kind=MAXIMIZE, 
                             variable_name='reliability', 
                             function=signal_to_noise)]
    n_scenarios = 10
    scenarios = sample_uncertainties(lake_model, n_scenarios)
    nfe = 1000
    
    with MultiprocessingEvaluator(lake_model) as evaluator:
        evaluator.robust_optimize(robustnes_functions, scenarios, nfe=nfe, 
                                epsilons=[0.1,]*len(robustnes_functions),
                                population_size=5)
Ejemplo n.º 5
0
        return sn

    MAXIMIZE = ScalarOutcome.MAXIMIZE  # @UndefinedVariable
    MINIMIZE = ScalarOutcome.MINIMIZE  # @UndefinedVariable
    robustnes_functions = [
        ScalarOutcome('mean p',
                      kind=MINIMIZE,
                      variable_name='max_P',
                      function=np.mean),
        ScalarOutcome('std p',
                      kind=MINIMIZE,
                      variable_name='max_P',
                      function=np.std),
        ScalarOutcome('sn reliability',
                      kind=MAXIMIZE,
                      variable_name='reliability',
                      function=signal_to_noise)
    ]
    n_scenarios = 10
    scenarios = sample_uncertainties(lake_model, n_scenarios)
    nfe = 1000

    with MultiprocessingEvaluator(lake_model) as evaluator:
        evaluator.robust_optimize(robustnes_functions,
                                  scenarios,
                                  nfe=nfe,
                                  epsilons=[
                                      0.1,
                                  ] * len(robustnes_functions),
                                  population_size=5)
    RealParameter("Lever Voluntary or Involuntary Vaccination", 0.00001, 1),
    RealParameter("Lever Vaccination", 0.00001, 1),
    RealParameter("Lever Medical Care I", 0.00001, 1),
    RealParameter("Lever Medical Care II", 0.00001, 1)
]

mdl = VensimModel('Immunization',
                  model_file=r'./models/03_SEIR_SIR_ActionsAsLevers.vpm')
mdl.uncertainties = uncertainties
mdl.outcomes = outcomes
mdl.constants = constants
mdl.levers = levers
mdl.robustness = robustness
mdl.time_horizon = 360

n_scenarios = 50
scenarios = sample_uncertainties(mdl, n_scenarios)
nfe = 1000

with SequentialEvaluator(mdl) as evaluator:
    robust_results = evaluator.robust_optimize(mdl.robustness,
                                               scenarios,
                                               nfe=nfe,
                                               epsilons=[
                                                   0.1,
                                               ] * len(mdl.robustness),
                                               population_size=25)

output_file = './platypus_results/' + infection + 'I_' + dying + 'D.csv'
robust_results.to_csv(output_file)
Ejemplo n.º 7
0
    def robust_optimize(
        self,
        robustness_functions,
        scenarios,
        evaluator=None,
        nfe=10000,
        convergence=None,
        constraints=None,
        **kwargs,
    ):
        """
        Perform robust optimization.

        The robust optimization generally a multi-objective optimization task.
        It is undertaken using statistical measures of outcomes evaluated across
        a number of scenarios, instead of using the individual outcomes themselves.
        For each candidate policy, the model is evaluated against all of the considered
        scenarios, and then the robustness measures are evaluated using the
        set of outcomes from the original runs.  The robustness measures
        are aggregate measures that are computed from a set of outcomes.
        For example, this may be expected value, median, n-th percentile,
        minimum, or maximum value of any individual outcome.  It is also
        possible to have joint measures, e.g. expected value of the larger
        of outcome 1 or outcome 2.

        Each robustness function is indicated as a maximization or minimization
        target, where higher or lower values are better, respectively.
        The optimization process then tries to identify one or more
        non-dominated solutions for the possible policy levers.

        Args:
            robustness_functions (Collection[Measure]): A collection of
                aggregate statistical performance measures.
            scenarios (int or Collection): A collection of scenarios to
                use in the evaluation(s), or give an integer to generate
                that number of random scenarios.
            evaluator (Evaluator, optional): The evaluator to use to
                run the model. If not given, a SequentialEvaluator will
                be created.
            algorithm (platypus.Algorithm, optional): Select an
                algorithm for multi-objective optimization.  See
                `platypus` documentation for details.
            nfe (int, default 10_000): Number of function evaluations.
                This generally needs to be fairly large to achieve stable
                results in all but the most trivial applications.
            convergence (emat.optimization.ConvergenceMetrics, optional)
            constraints (Collection[Constraint], optional)
            kwargs: any additional arguments will be passed on to the
                platypus algorithm.

        Returns:
            pandas.DataFrame: The set of non-dominated solutions found.

            When `convergence` is given, the convergence measures are
            also returned, as a second pandas.DataFrame.
        """

        if evaluator is None:
            from ema_workbench.em_framework import SequentialEvaluator
            evaluator = SequentialEvaluator(self)

        from ema_workbench.em_framework.samplers import sample_uncertainties, sample_levers

        if isinstance(scenarios, int):
            n_scenarios = scenarios
            scenarios = sample_uncertainties(self, n_scenarios)

        # if epsilons is None:
        #     epsilons = [0.05, ] * len(robustness_functions)
        #
        with evaluator:
            robust_results = evaluator.robust_optimize(
                robustness_functions,
                scenarios,
                nfe=nfe,
                constraints=constraints,
                # epsilons=epsilons,
                convergence=convergence,
                **kwargs,
            )

        if isinstance(robust_results, tuple) and len(robust_results) == 2:
            robust_results, result_convergence = robust_results
        else:
            result_convergence = None

        robust_results = self.ensure_dtypes(robust_results)

        if result_convergence is None:
            return robust_results
        else:
            return robust_results, result_convergence
Ejemplo n.º 8
0
ref_scenario = Scenario('reference', **scen1)

# no dike increase, no warning, none of the rfr
zero_policy = {'DikeIncrease': 0, 'DaysToThreat': 0, 'RfR': 0}
pol0 = {}

for key in dike_model.levers:
    s1, s2 = key.name.split('_')
    pol0.update({key.name: zero_policy[s2]})

policy0 = Policy('Policy 0', **pol0)

# Call random scenarios or policies:
n_scenarios = 100
scenarios = sample_uncertainties(dike_model, 50)
n_policies = 10

# single run
#     start = time.time()
#     dike_model.run_model(ref_scenario, policy0)
#     end = time.time()
#     print(end - start)
#     results = dike_model.outcomes_output

# series run
#    experiments, outcomes = perform_experiments(dike_model, ref_scenario, 5)

# multiprocessing
with MultiprocessingEvaluator(dike_model) as evaluator:
    results = evaluator.perform_experiments(n_scenarios, n_policies)
Ejemplo n.º 9
0
	def test_robust_evaluation(self):
		# %%

		import os
		test_dir = os.path.dirname(__file__)

		from ema_workbench import ema_logging, MultiprocessingEvaluator, SequentialEvaluator
		from emat.examples import road_test
		import numpy, pandas, functools
		from emat import Measure
		s, db, m = road_test()

		MAXIMIZE = Measure.MAXIMIZE
		MINIMIZE = Measure.MINIMIZE

		robustness_functions = [
			Measure(
				'Expected Net Benefit',
				kind=Measure.INFO,
				variable_name='net_benefits',
				function=numpy.mean,
			),

			Measure(
				'Probability of Net Loss',
				kind=MINIMIZE,
				variable_name='net_benefits',
				function=lambda x: numpy.mean(x < 0),
				min=0,
				max=1,
			),

			Measure(
				'95%ile Travel Time',
				kind=MINIMIZE,
				variable_name='build_travel_time',
				function=functools.partial(numpy.percentile, q=95),
				min=60,
				max=150,
			),

			Measure(
				'99%ile Present Cost',
				kind=Measure.INFO,
				variable_name='present_cost_expansion',
				function=functools.partial(numpy.percentile, q=99),
			),

			Measure(
				'Expected Present Cost',
				kind=Measure.INFO,
				variable_name='present_cost_expansion',
				function=numpy.mean,
			),

		]
		# %%

		numpy.random.seed(42)

		with MultiprocessingEvaluator(m) as evaluator:
			r1 = m.robust_evaluate(
				robustness_functions,
				scenarios=20,
				policies=5,
				evaluator=evaluator,
			)

		import pandas
		correct = pandas.read_json(
			'{"amortization_period":{"0":19,"1":23,"2":50,"3":43,"4":35},"debt_type":{"0":"Rev Bond","1":"Paygo"'
			',"2":"GO Bond","3":"Paygo","4":"Rev Bond"},"expand_capacity":{"0":26.3384401031,"1":63.3898549337,"2'
			'":51.1360252492,"3":18.7230954832,"4":93.9205959335},"interest_rate_lock":{"0":false,"1":true,"2":fal'
			'se,"3":true,"4":false},"Expected Net Benefit":{"0":-157.486494925,"1":-244.2423401934,"2":-189.633908'
			'4553,"3":-4.2656265778,"4":-481.1208898635},"Probability of Net Loss":{"0":0.95,"1":1.0,"2":0.95,"3":'
			'0.7,"4":1.0},"95%ile Travel Time":{"0":74.6904209781,"1":65.8492894317,"2":67.6932507947,"3":79.09851'
			'23853,"4":63.203313888},"99%ile Present Cost":{"0":3789.8036648358,"1":9121.0832380586,"2":7357.89572'
			'71441,"3":2694.0416972887,"4":13514.111590462},"Expected Present Cost":{"0":3158.4461451444,"1":7601.'
			'5679809722,"2":6132.1164500957,"3":2245.2312484183,"4":11262.7453643551}}')
		correct['debt_type'] = correct['debt_type'].astype(
			pandas.CategoricalDtype(categories=['GO Bond', 'Rev Bond', 'Paygo'], ordered=True))

		pandas.testing.assert_frame_equal(r1, correct)

		numpy.random.seed(7)

		from ema_workbench.em_framework.samplers import sample_uncertainties
		scenes = sample_uncertainties(m, 20)

		scenes0 = pandas.DataFrame(scenes)
		cachefile = os.path.join(test_dir,'test_robust_results.csv')
		if not os.path.exists(cachefile):
			scenes0.to_csv(os.path.join(test_dir,'test_robust_evaluation_scenarios.csv'), index=None)
		scenes1 = pandas.read_csv(os.path.join(test_dir,'test_robust_evaluation_scenarios.csv'))
		pandas.testing.assert_frame_equal(scenes0, scenes1)

		from emat import Constraint

		constraint_1 = Constraint(
			"Maximum Log Expected Present Cost",
			outcome_names="Expected Present Cost",
			function=Constraint.must_be_less_than(4000),
		)

		constraint_2 = Constraint(
			"Minimum Capacity Expansion",
			parameter_names="expand_capacity",
			function=Constraint.must_be_greater_than(10),
		)

		constraint_3 = Constraint(
			"Maximum Paygo",
			parameter_names='debt_type',
			outcome_names='99%ile Present Cost',
			function=lambda i, j: max(0, j - 1500) if i == 'Paygo' else 0,
		)

		from emat.optimization import HyperVolume, EpsilonProgress, SolutionViewer, ConvergenceMetrics

		convergence_metrics = ConvergenceMetrics(
			HyperVolume.from_outcomes(robustness_functions),
			EpsilonProgress(),
			SolutionViewer.from_model_and_outcomes(m, robustness_functions),
		)

		numpy.random.seed(8)
		random.seed(8)

		# Test robust optimize
		with SequentialEvaluator(m) as evaluator:
			robust_results, convergence = m.robust_optimize(
					robustness_functions,
					scenarios=scenes,
					nfe=25,
					constraints=[
						constraint_1,
						constraint_2,
						constraint_3,
					],
					epsilons=[0.05,]*len(robustness_functions),
					convergence=convergence_metrics,
					evaluator=evaluator,
			)

		cachefile = os.path.join(test_dir,'test_robust_results.csv')
		if not os.path.exists(cachefile):
			robust_results.to_csv(cachefile, index=None)
		correct2 = pandas.read_csv(cachefile)
		correct2['debt_type'] = correct2['debt_type'].astype(
			pandas.CategoricalDtype(categories=['GO Bond', 'Rev Bond', 'Paygo'], ordered=True))
		pandas.testing.assert_frame_equal(robust_results, correct2, check_less_precise=True)
Ejemplo n.º 10
0
def optimize_lake_problem(use_original_R_metrics=False, demo=True):
    """Analysis of the Lake Problem.

    (1) Runs a multi-objective robust optimisation of the Lake Problem
        using both standard and custom robustness metrics;
    (2) analyses the effects of different sets of scenarios on the
        robustness values and robustness rankings;
    (3) plots these effects;
    (4) analyses the effects of different robustness metrics on the
        robustness values and robustness rankings; and
    (5) plots these effects.
    """
    filepath = './robust_results.h5'

    robustness_functions = (get_original_R_metrics() if use_original_R_metrics
                            else get_custom_R_metrics_for_workbench())

    lake_model = get_lake_model()

    if not os.path.exists(filepath):
        n_scenarios = 10 if demo else 200  # for demo purposes only, should in practice be higher
        scenarios = sample_uncertainties(lake_model, n_scenarios)
        nfe = 1000 if demo else 50000  # number of function evaluations

        # Needed on Linux-based machines
        multiprocessing.set_start_method('spawn', True)

        # Run optimisation
        with MultiprocessingEvaluator(lake_model) as evaluator:
            robust_results = evaluator.robust_optimize(
                robustness_functions,
                scenarios,
                nfe=nfe,
                population_size=(10 if demo else 50),
                epsilons=[
                    0.1,
                ] * len(robustness_functions))
        print(robust_results)

    robust_results = pd.read_hdf(filepath, key='df')

    # Results are performance in each timestep, followed by robustness
    # we only care about the robustness, so we get that
    col_names = robust_results.columns.values.tolist()
    col_names = col_names[-len(robustness_functions):]

    # Plot the robustness results
    sns.pairplot(robust_results, vars=col_names, diag_kind='kde')
    # plt.show()

    # Extract the decision alternatives from the results
    # We need to extract the decision alternatives
    decision_alternatives = robust_results.iloc[:, :-4].values
    decision_alternatives = [
        Policy(
            idx, **{
                str(idx): value
                for idx, value in enumerate(
                    decision_alternatives[idx].tolist())
            }) for idx in range(decision_alternatives.shape[0])
    ]

    # Find the influence of scenarios. Here we are creating 5
    # sets of 100 scenarios each, all using the same sampling
    # method.
    scenarios_per_set = 100
    n_sets = 5
    n_scenarios = scenarios_per_set * n_sets
    scenarios = sample_uncertainties(lake_model, n_scenarios)

    # Simulate optimal solutions across all scenarios
    with MultiprocessingEvaluator(lake_model) as evaluator:
        results = evaluator.perform_experiments(scenarios=scenarios,
                                                policies=decision_alternatives)
    # We will just look at the vulnerability ('max_P') for this example
    f = np.reshape(results[1]['max_P'], newshape=(-1, n_scenarios))
    # Split the results into the different sets of scenarios
    split_f = np.split(f, n_sets, axis=1)
    # Calculate robustness for each set of scenarios
    # Note that each split_f[set_idx] is a 2D array, with each row being
    # a decision alternative, and each column a scenario
    R_metric = get_custom_R_metrics()[0]
    R = [R_metric(split_f[set_idx]) for set_idx in range(n_sets)]
    R = np.transpose(R)

    # Calculate similarity in robustness from different scenario sets
    delta, tau = analysis.scenarios_similarity(R)
    # Plot the deltas using a helper function
    analysis.delta_plot(delta)
    # Plot the Kendall's tau-b values using a helper function
    analysis.tau_plot(tau)

    # We now want to test the effects of different robustness metrics,
    # across all of the 100 scenarios. We first define a few new
    # robustness metrics (in addition to our original R metric for
    # the vulnerability). For this example we use some classic metrics
    R_metrics = [
        R_metric,  # The original robustness metric
        functools.partial(metrics.maximax, maximise=False),
        functools.partial(metrics.laplace, maximise=False),
        functools.partial(metrics.minimax_regret, maximise=False),
        functools.partial(metrics.percentile_kurtosis, maximise=False)
    ]

    # Calculate robustness for each robustness metric
    R = np.transpose([R_metric(f) for R_metric in R_metrics])

    # Calculate similarity in robustness from different robustness metrics
    tau = analysis.R_metric_similarity(R)
    # Plot the Kendall's tau-b values using a helper function
    analysis.tau_plot(tau)
Ejemplo n.º 11
0
def robust_optimize(
    model,
    robustness_functions,
    scenarios,
    evaluator=None,
    nfe=10000,
    convergence='default',
    display_convergence=True,
    convergence_freq=100,
    constraints=None,
    epsilons=0.1,
    algorithm=None,
    check_extremes=False,
    **kwargs,
):
    """
	Perform robust optimization.

	The robust optimization generally a multi-objective optimization task.
	It is undertaken using statistical measures of outcomes evaluated across
	a number of scenarios, instead of using the individual outcomes themselves.
	For each candidate policy, the model is evaluated against all of the considered
	scenarios, and then the robustness measures are evaluated using the
	set of outcomes from the original runs.  The robustness measures
	are aggregate measures that are computed from a set of outcomes.
	For example, this may be expected value, median, n-th percentile,
	minimum, or maximum value of any individual outcome.  It is also
	possible to have joint measures, e.g. expected value of the larger
	of outcome 1 or outcome 2.

	Each robustness function is indicated as a maximization or minimization
	target, where higher or lower values are better, respectively.
	The optimization process then tries to identify one or more
	non-dominated solutions for the possible policy levers.

	Args:
		model (AbstractCoreModel): A core model to use for
			robust optimization.
		robustness_functions (Collection[Measure]): A collection of
			aggregate statistical performance measures.
		scenarios (int or Collection): A collection of scenarios to
			use in the evaluation(s), or give an integer to generate
			that number of random scenarios.
		evaluator (Evaluator, optional): The evaluator to use to
			run the model. If not given, a SequentialEvaluator will
			be created.
		nfe (int, default 10_000): Number of function evaluations.
			This generally needs to be fairly large to achieve stable
			results in all but the most trivial applications.
		convergence ('default', None, or emat.optimization.ConvergenceMetrics):
			A convergence display during optimization.
		display_convergence (bool, default True): Automatically display
			the convergence metric figures when optimizing.
		constraints (Collection[Constraint], optional)
			Solutions will be constrained to only include values that
			satisfy these constraints. The constraints can be based on
			the policy levers, or on the computed values of the robustness
			functions, or some combination thereof.
		epsilons ('auto' or float or array-like): Used to limit the number of
			distinct solutions generated.  Set to a larger value to get
			fewer distinct solutions.  When 'auto', epsilons are set based
			on the standard deviations of a preliminary set of experiments.
		algorithm (platypus.Algorithm or str, optional): Select an
			algorithm for multi-objective optimization.  The algorithm can
			be given directly, or named in a string. See `platypus`
			documentation for details.
		check_extremes (bool or int, default False): Conduct additional
			evaluations, setting individual policy levers to their
			extreme values, for each candidate Pareto optimal solution.
		kwargs: any additional arguments will be passed on to the
			platypus algorithm.

	Returns:
		emat.OptimizationResult:
			The set of non-dominated solutions found.
			When `convergence` is given, the convergence measures are
			included, as a pandas.DataFrame in the `convergence` attribute.

	Raises:
		ValueError:
			If any of the `robustness_functions` are not emat.Measures, or
			do not have a function set, or share a name with any parameter,
			measure, constant, or performance measure in the scope.
		KeyError:
			If any of the `robustness_functions` relies on a named variable
			that does not appear in the scope.
	"""
    if not isinstance(model, AbstractCoreModel):
        raise ValueError(
            f'model must be AbstractCoreModel subclass, not {type(model)}')

    for rf in robustness_functions:
        if not isinstance(rf, Measure):
            raise ValueError(
                f'robustness functions must be defined as emat.Measure objects'
            )
        if rf.function is None:
            raise ValueError(
                f'robustness function must have a function attribute set ({rf.name})'
            )
        if rf.name in model.scope:
            raise ValueError(
                f'cannot name robustness function the same as any scope name ({rf.name})'
            )
        for rf_v in rf.variable_name:
            if rf_v not in model.scope:
                raise KeyError(rf_v)

    if constraints:
        for c in constraints:
            for pn in c.parameter_names:
                if pn in model.scope.get_uncertainty_names():
                    raise ValueError(
                        f"cannot constrain on uncertainties ({c.name})")

    epsilons, convergence, display_convergence, evaluator = model._common_optimization_setup(
        epsilons, convergence, display_convergence, evaluator)

    if algorithm is None:
        algorithm = platypus.EpsNSGAII
    if isinstance(algorithm, str):
        algorithm = getattr(platypus, algorithm, algorithm)
        if isinstance(algorithm, str):
            raise ValueError(f"platypus algorithm {algorithm} not found")
    if not issubclass(algorithm, platypus.Algorithm):
        raise ValueError(
            f"algorithm must be a platypus.Algorithm subclass, not {algorithm}"
        )

    if isinstance(scenarios, int):
        n_scenarios = scenarios
        scenarios = sample_uncertainties(model, n_scenarios)

    with evaluator:
        if epsilons == 'auto':
            trial = model.robust_evaluate(
                robustness_functions=robustness_functions,
                scenarios=scenarios,
                policies=30,
                evaluator=evaluator,
            )
            epsilons = [
                max(0.1,
                    numpy.std(trial[rf.name]) / 20)
                for rf in robustness_functions
            ]

        robust_results = evaluator.robust_optimize(
            robustness_functions,
            scenarios,
            nfe=nfe,
            constraints=constraints,
            epsilons=epsilons,
            convergence=convergence,
            convergence_freq=convergence_freq,
            algorithm=algorithm,
            **kwargs,
        )

    if isinstance(robust_results, tuple) and len(robust_results) == 2:
        robust_results, result_convergence = robust_results
    else:
        result_convergence = None

    robust_results = model.ensure_dtypes(robust_results)

    result = OptimizationResult(
        robust_results,
        result_convergence,
        scope=model.scope,
        robustness_functions=robustness_functions,
        scenarios=scenarios,
    )

    if check_extremes:
        result.check_extremes(
            model,
            1 if check_extremes is True else check_extremes,
            evaluator=evaluator,
            constraints=constraints,
        )

    return result
Ejemplo n.º 12
0
    def test_robust_evaluation(self):
        # %%

        import os
        test_dir = os.path.dirname(__file__)

        from ema_workbench import ema_logging, MultiprocessingEvaluator, SequentialEvaluator
        from emat.examples import road_test
        import numpy, pandas, functools
        from emat import Measure
        s, db, m = road_test()

        MAXIMIZE = Measure.MAXIMIZE
        MINIMIZE = Measure.MINIMIZE

        robustness_functions = [
            Measure(
                'Expected Net Benefit',
                kind=Measure.INFO,
                variable_name='net_benefits',
                function=numpy.mean,
            ),
            Measure(
                'Probability of Net Loss',
                kind=MINIMIZE,
                variable_name='net_benefits',
                function=lambda x: numpy.mean(x < 0),
                min=0,
                max=1,
            ),
            Measure(
                '95%ile Travel Time',
                kind=MINIMIZE,
                variable_name='build_travel_time',
                function=functools.partial(numpy.percentile, q=95),
                min=60,
                max=150,
            ),
            Measure(
                '99%ile Present Cost',
                kind=Measure.INFO,
                variable_name='present_cost_expansion',
                function=functools.partial(numpy.percentile, q=99),
            ),
            Measure(
                'Expected Present Cost',
                kind=Measure.INFO,
                variable_name='present_cost_expansion',
                function=numpy.mean,
            ),
        ]
        # %%

        numpy.random.seed(42)

        with MultiprocessingEvaluator(m) as evaluator:
            r1 = m.robust_evaluate(
                robustness_functions,
                scenarios=20,
                policies=5,
                evaluator=evaluator,
            )

        stable_df('./road_test_robust_evaluate.pkl.gz', r1)

        numpy.random.seed(7)

        from ema_workbench.em_framework.samplers import sample_uncertainties
        scenes = sample_uncertainties(m, 20)

        scenes0 = pandas.DataFrame(scenes)
        stable_df('./test_robust_evaluation_scenarios.pkl.gz', scenes0)

        from emat import Constraint

        constraint_1 = Constraint(
            "Maximum Log Expected Present Cost",
            outcome_names="Expected Present Cost",
            function=Constraint.must_be_less_than(4000),
        )

        constraint_2 = Constraint(
            "Minimum Capacity Expansion",
            parameter_names="expand_capacity",
            function=Constraint.must_be_greater_than(10),
        )

        constraint_3 = Constraint(
            "Maximum Paygo",
            parameter_names='debt_type',
            outcome_names='99%ile Present Cost',
            function=lambda i, j: max(0, j - 1500) if i == 'Paygo' else 0,
        )

        from emat.optimization import HyperVolume, EpsilonProgress, SolutionViewer, ConvergenceMetrics

        convergence_metrics = ConvergenceMetrics(
            HyperVolume.from_outcomes(robustness_functions),
            EpsilonProgress(),
            SolutionViewer.from_model_and_outcomes(m, robustness_functions),
        )

        numpy.random.seed(8)
        random.seed(8)

        # Test robust optimize
        with SequentialEvaluator(m) as evaluator:
            robust = m.robust_optimize(
                robustness_functions,
                scenarios=scenes,
                nfe=25,
                constraints=[
                    constraint_1,
                    constraint_2,
                    constraint_3,
                ],
                epsilons=[
                    0.05,
                ] * len(robustness_functions),
                convergence=convergence_metrics,
                evaluator=evaluator,
            )
        robust_results, convergence = robust.result, robust.convergence

        stable_df('test_robust_results.pkl.gz', robust_results)