コード例 #1
0
    def robust_evaluate(
        self,
        robustness_functions,
        scenarios,
        policies,
        evaluator=None,
    ):
        """
        Perform robust evaluation(s).

        The robust evaluation is used to generate statistical measures
        of outcomes, instead of generating the individual outcomes themselves.
        For each policy, the model is evaluated against all of the considered
        scenarios, and then the robustness measures are evaluated using the
        set of outcomes from the original runs.  The robustness measures
        are aggregate measures that are computed from a set of outcomes.
        For example, this may be expected value, median, n-th percentile,
        minimum, or maximum value of any individual outcome.  It is also
        possible to have joint measures, e.g. expected value of the larger
        of outcome 1 or outcome 2.

        Args:
            robustness_functions (Collection[Measure]): A collection of
                aggregate statistical performance measures.
            scenarios (int or Collection): A collection of scenarios to
                use in the evaluation(s), or give an integer to generate
                that number of random scenarios.
            policies (int, or collection): A collection of policies to
                use in the evaluation(s), or give an integer to generate
                that number of random policies.
            evaluator (Evaluator, optional): The evaluator to use to
                run the model. If not given, a SequentialEvaluator will
                be created.

        Returns:
            pandas.DataFrame: The computed value of each item
            in `robustness_functions`, for each policy in `policies`.
        """

        if evaluator is None:
            from ema_workbench.em_framework import SequentialEvaluator
            evaluator = SequentialEvaluator(self)

        from ema_workbench.em_framework.samplers import sample_uncertainties, sample_levers

        if isinstance(scenarios, int):
            n_scenarios = scenarios
            scenarios = sample_uncertainties(self, n_scenarios)

        with evaluator:
            robust_results = evaluator.robust_evaluate(
                robustness_functions,
                scenarios,
                policies,
            )

        robust_results = self.ensure_dtypes(robust_results)
        return robust_results
コード例 #2
0
def run_full_tree_experiment(number_samples):
    with SequentialEvaluator(model) as evaluator:
        experiments, outcomes = evaluator.perform_experiments(
            scenarios=number_samples, uncertainty_sampling=LHS)

    # Store final values of prey outcome
    prey_final = []
    prey_mean = []
    prey_std = []

    # Iterate through rows of outcome numpy array (experiments) to calculate indicators
    for experiment_row in outcomes["prey"]:
        prey_final.append(experiment_row[-1])  # Get last element
        prey_mean.append(np.mean(experiment_row))
        prey_std.append(np.std(experiment_row))

    # Convert outputs to numpy array
    prey_final_np = np.array(prey_final)
    prey_mean_np = np.array(prey_mean)
    prey_std_np = np.array(prey_std)

    # Feature scoring
    x = experiments

    fig1 = plt.figure()
    fig1.tight_layout()
    fig1.suptitle("Number of trees (10, 100, 1000)")
    ax1 = fig1.add_subplot(311)
    ax2 = fig1.add_subplot(312)
    ax3 = fig1.add_subplot(313)

    # Generate heatmaps for different parameters of tree number
    tree_generate_heatmap(x, prey_final_np, prey_mean_np, prey_std_np, 10, 0.6,
                          ax1)
    tree_generate_heatmap(x, prey_final_np, prey_mean_np, prey_std_np, 100,
                          0.6, ax2)
    tree_generate_heatmap(x, prey_final_np, prey_mean_np, prey_std_np, 1000,
                          0.6, ax3)

    # The findings are very robust against the number of trees. 100 trees provide a sufficient approximation.
    # Increasing the number by 1-2 order of magnitudes does not change the results significantly.
    # The effects of predator efficiency and its interactions with other uncertainties have the highest impact on the mean value of prey.

    fig2 = plt.figure()
    fig2.tight_layout()
    fig2.suptitle("Max. features (0.4, 0.6, 0.8)")
    ax1 = fig2.add_subplot(311)
    ax2 = fig2.add_subplot(312)
    ax3 = fig2.add_subplot(313)

    # Generate heatmaps for different parameters of max. features
    tree_generate_heatmap(x, prey_final_np, prey_mean_np, prey_std_np, 100,
                          0.4, ax1)
    tree_generate_heatmap(x, prey_final_np, prey_mean_np, prey_std_np, 100,
                          0.6, ax2)
    tree_generate_heatmap(x, prey_final_np, prey_mean_np, prey_std_np, 100,
                          0.8, ax3)

    plt.show()
コード例 #3
0
# override some of the defaults of the model
model.constants = [
    Constant('alpha', 0.41),
    Constant('nsamples', 150),
    Constant('steps', 100)
]

from ema_workbench import (SequentialEvaluator, ema_logging,
                           perform_experiments)
ema_logging.log_to_stderr(ema_logging.INFO)

from SALib.analyze import sobol
from ema_workbench.em_framework.salib_samplers import get_SALib_problem

with SequentialEvaluator(model) as evaluator:
    sa_results = evaluator.perform_experiments(scenarios=1,
                                               uncertainty_sampling='sobol')

experiments, outcomes = sa_results

problem = get_SALib_problem(model.uncertainties)
Si = sobol.analyze(problem,
                   outcomes['max_P'],
                   calc_second_order=True,
                   print_to_console=False)

scores_filtered = {k: Si[k] for k in ['ST', 'ST_conf', 'S1', 'S1_conf']}
Si_df = pd.DataFrame(scores_filtered, index=problem['names'])

sns.set_style('white')
コード例 #4
0
def plot_lotka_volterra(model_instance):

    model_instance.uncertainties = [
        RealParameter('prey_birth_rate', 0.015, 0.035),
        RealParameter('predation_rate', 0.0005, 0.003),
        RealParameter('predator_efficiency', 0.001, 0.004),
        RealParameter('predator_loss_rate', 0.04, 0.08)
    ]

    model_instance.outcomes = [
        TimeSeriesOutcome('TIME'),
        TimeSeriesOutcome('predators'),
        TimeSeriesOutcome('prey')
    ]

    with SequentialEvaluator(model_instance) as evaluator:
        sa_results = evaluator.perform_experiments(scenarios=50,
                                                   uncertainty_sampling=LHS)

    experiments, outcomes = sa_results

    # Squeeze outcomes
    outcomes_squeezed = {}

    for key in outcomes:
        outcomes_squeezed[key] = np.squeeze(outcomes[key])

    # Store final values of prey outcome
    prey_final = []
    prey_mean = []
    prey_std = []

    # Iterate through rows of outcome numpy array (experiments) to calculate indicators
    for experiment_row in outcomes_squeezed["prey"]:
        prey_final.append(experiment_row[-1])  # Get last element
        prey_mean.append(np.mean(experiment_row))
        prey_std.append(np.std(experiment_row))

    # Collect indicators in dictionary to calculate regression for each of them
    indicators = {
        'prey_final': prey_final,
        'prey_mean': prey_mean,
        'prey_std': prey_std
    }

    # Calculate regression to investigate relationship between uncertainties and final number of preys after one year, the mean value of preys, and the standard deviation of preys
    for indicator_key in indicators.keys():
        generate_regression_single(experiments["prey_birth_rate"],
                                   indicators[indicator_key])
        generate_regression_single(experiments["predation_rate"],
                                   indicators[indicator_key])
        generate_regression_single(experiments["predator_efficiency"],
                                   indicators[indicator_key])
        generate_regression_single(experiments["predator_loss_rate"],
                                   indicators[indicator_key])

    # Select uncertainties that were applied in every experiment
    # uncertainties_experiments = experiments[["prey_birth_rate", "predation_rate", "predator_efficiency", "predator_loss_rate"]]

    # generate_regression_single(experiments["prey_birth_rate"], prey_final)

    #generate_regression(uncertainties_experiments, prey_final)
    #generate_regression(uncertainties_experiments, prey_mean)
    #generate_regression(uncertainties_experiments, prey_std)

    #for outcome_key in outcomes_squeezed.keys():
    #   if outcome_key != 'TIME':
    # Fetch numpy array from dictionary, and calculate average over every single column (average at every point of time)
    # Plot using workbench
    #plotting.lines(experiments, outcomes_squeezed, outcomes_to_show=outcome_key, density=plotting_util.Density.HIST)

    return prey_final, prey_mean, prey_std
コード例 #5
0
    def robust_optimize(
        self,
        robustness_functions,
        scenarios,
        evaluator=None,
        nfe=10000,
        convergence=None,
        constraints=None,
        **kwargs,
    ):
        """
        Perform robust optimization.

        The robust optimization generally a multi-objective optimization task.
        It is undertaken using statistical measures of outcomes evaluated across
        a number of scenarios, instead of using the individual outcomes themselves.
        For each candidate policy, the model is evaluated against all of the considered
        scenarios, and then the robustness measures are evaluated using the
        set of outcomes from the original runs.  The robustness measures
        are aggregate measures that are computed from a set of outcomes.
        For example, this may be expected value, median, n-th percentile,
        minimum, or maximum value of any individual outcome.  It is also
        possible to have joint measures, e.g. expected value of the larger
        of outcome 1 or outcome 2.

        Each robustness function is indicated as a maximization or minimization
        target, where higher or lower values are better, respectively.
        The optimization process then tries to identify one or more
        non-dominated solutions for the possible policy levers.

        Args:
            robustness_functions (Collection[Measure]): A collection of
                aggregate statistical performance measures.
            scenarios (int or Collection): A collection of scenarios to
                use in the evaluation(s), or give an integer to generate
                that number of random scenarios.
            evaluator (Evaluator, optional): The evaluator to use to
                run the model. If not given, a SequentialEvaluator will
                be created.
            algorithm (platypus.Algorithm, optional): Select an
                algorithm for multi-objective optimization.  See
                `platypus` documentation for details.
            nfe (int, default 10_000): Number of function evaluations.
                This generally needs to be fairly large to achieve stable
                results in all but the most trivial applications.
            convergence (emat.optimization.ConvergenceMetrics, optional)
            constraints (Collection[Constraint], optional)
            kwargs: any additional arguments will be passed on to the
                platypus algorithm.

        Returns:
            pandas.DataFrame: The set of non-dominated solutions found.

            When `convergence` is given, the convergence measures are
            also returned, as a second pandas.DataFrame.
        """

        if evaluator is None:
            from ema_workbench.em_framework import SequentialEvaluator
            evaluator = SequentialEvaluator(self)

        from ema_workbench.em_framework.samplers import sample_uncertainties, sample_levers

        if isinstance(scenarios, int):
            n_scenarios = scenarios
            scenarios = sample_uncertainties(self, n_scenarios)

        # if epsilons is None:
        #     epsilons = [0.05, ] * len(robustness_functions)
        #
        with evaluator:
            robust_results = evaluator.robust_optimize(
                robustness_functions,
                scenarios,
                nfe=nfe,
                constraints=constraints,
                # epsilons=epsilons,
                convergence=convergence,
                **kwargs,
            )

        if isinstance(robust_results, tuple) and len(robust_results) == 2:
            robust_results, result_convergence = robust_results
        else:
            result_convergence = None

        robust_results = self.ensure_dtypes(robust_results)

        if result_convergence is None:
            return robust_results
        else:
            return robust_results, result_convergence
コード例 #6
0
    def run_experiments(
        self,
        design: pd.DataFrame = None,
        evaluator=None,
        *,
        design_name=None,
        db=None,
    ):
        """
        Runs a design of combined experiments using this model.

        A combined experiment includes a complete set of input values for
        all exogenous uncertainties (a Scenario) and all policy levers
        (a Policy). Unlike the perform_experiments function in the EMA Workbench,
        this method pairs each Scenario and Policy in sequence, instead
        of running all possible combinations of Scenario and Policy.
        This change ensures compatibility with the EMAT database modules, which
        preserve the complete set of input information (both uncertainties
        and levers) for each experiment.  To conduct a full cross-factorial set
        of experiments similar to the default settings for EMA Workbench,
        use a factorial design, by setting the `jointly` argument for the
        `design_experiments` to False, or by designing experiments outside
        of EMAT with your own approach.

        Args:
            design (pandas.DataFrame, optional): experiment definitions
                given as a DataFrame, where each exogenous uncertainties and
                policy levers is given as a column, and each row is an experiment.
            evaluator (ema_workbench.Evaluator, optional): Optionally give an
                evaluator instance.  If not given, a default SequentialEvaluator
                will be instantiated.
            design_name (str, optional): The name of a design of experiments to
                load from the database.  This design is only used if
                `design` is None.
            db (Database, optional): The database to use for loading and saving experiments.
                If none is given, the default database for this model is used.
                If there is no default db, and none is given here,
                the results are not stored in a database. Set to False to explicitly
                not use the default database, even if it exists.

        Returns:
            pandas.DataFrame:
                A DataFrame that contains all uncertainties, levers, and measures
                for the experiments.

        Raises:
            ValueError:
                If there are no experiments defined.  This includes
                the situation where `design` is given but no database is
                available.

        """

        from ema_workbench import Scenario, Policy, perform_experiments

        # catch user gives only a design, not experiment_parameters
        if isinstance(design, str) and design_name is None:
            design_name, design = design, None

        if design_name is None and design is None:
            raise ValueError(f"must give design_name or design")

        if db is None:
            db = self.db

        if design_name is not None and design is None:
            if not db:
                raise ValueError(
                    f'cannot load design "{design_name}", there is no db')
            design = db.read_experiment_parameters(self.scope.name,
                                                   design_name)

        if design.empty:
            raise ValueError(f"no experiments available")

        scenarios = [
            Scenario(**dict(zip(self.scope.get_uncertainty_names(), i)))
            for i in design[self.scope.get_uncertainty_names()].itertuples(
                index=False, name='ExperimentX')
        ]

        policies = [
            Policy(f"Incognito{n}", **dict(zip(self.scope.get_lever_names(),
                                               i)))
            for n, i in enumerate(design[self.scope.get_lever_names()].
                                  itertuples(index=False, name='ExperimentL'))
        ]

        if not evaluator:
            from ema_workbench import SequentialEvaluator
            evaluator = SequentialEvaluator(self)

        experiments, outcomes = perform_experiments(
            self,
            scenarios=scenarios,
            policies=policies,
            zip_over={'scenarios', 'policies'},
            evaluator=evaluator)
        experiments.index = design.index

        outcomes = pd.DataFrame.from_dict(outcomes)
        outcomes.index = design.index

        if db:
            db.write_experiment_measures(self.scope.name, self.metamodel_id,
                                         outcomes)

        return self.ensure_dtypes(
            pd.concat([
                experiments.drop(columns=['scenario', 'policy', 'model']),
                outcomes
            ],
                      axis=1,
                      sort=False))
コード例 #7
0
            ScalarOutcome('Disutility of Damage 2300', kind=ScalarOutcome.MINIMIZE),
            ScalarOutcome('Welfare 2300', kind=ScalarOutcome.MAXIMIZE),
            # ScalarOutcome('Undiscounted Period Welfare 2300', kind=ScalarOutcome.INFO),
            ScalarOutcome('Consumption SDR 2300', kind=ScalarOutcome.INFO),
            ScalarOutcome('Damage SDR 2300', kind=ScalarOutcome.INFO)
            ]


# %%
n_scenarios = 1000
n_policies = 25
run = 10
#%%

# %%
with SequentialEvaluator(dice_sm) as evaluator:
    results1 = evaluator.optimize(nfe=5e3, searchover='levers', epsilons=[0.01,]*len(dice_sm.outcomes))
# %%
with SequentialEvaluator(dice_sm) as evaluator:
    results2 = evaluator.optimize(nfe=5e3, searchover='levers', epsilons=[0.1,]*len(dice_sm.outcomes))


# %%
from ema_workbench.analysis import parcoords
# import changefont as cf

data = results1.loc[:, [o.name for o in dice_sm.outcomes]]
limits = parcoords.get_limits(data)
# limits['Welfare 2300'] = [-18000,0]
# limits['Undiscounted Period Welfare 2300'] = [-1000, 0]
# limits['Consumption SDR 2300'] = [0, 0.1]
コード例 #8
0
    #n=n+1
    #a=Constraint("Constraint_"+str(n),parameter_names=[dams[k][1][i],dams[k][2][i]],function=lambda x,y:max(0,-x+y))
    #m.append(a)
    #n=n+1

    basin_model.outcomes = [
        ScalarOutcome('pwsshortageavg', kind=ScalarOutcome.MINIMIZE),
        ScalarOutcome('energyaverage', kind=ScalarOutcome.MAXIMIZE),
        ScalarOutcome('pwssum', kind=ScalarOutcome.MINIMIZE)
    ]

    convergence_metrics = [EpsilonProgress()]

    #with SequentialEvaluator(basin_model) as evaluator:
    #experiments, outcomes=evaluator.perform_experiments(policies=10)#constraints=constraints)
    with SequentialEvaluator(basin_model) as evaluator:
        #experiments, outcomes=evaluator.perform_experiments(scenarios=100)
        results, convergence = evaluator.optimize(
            nfe=100,
            searchover='levers',
            epsilons=[1, 1, 1],
            convergence=convergence_metrics,
            reference=None)  #constraints=m)

from ema_workbench.analysis import parcoords

outcomes = results.loc[:, ['pwsshortageavg', 'energyaverage', 'pwssum']]
limits = parcoords.get_limits(outcomes)
axes = parcoords.ParallelAxes(limits)
axes.plot(outcomes)
コード例 #9
0
from ema_workbench.em_framework.samplers import sample_uncertainties
from ema_workbench.util import ema_logging
from problem_formulation import get_model_for_problem_formulation
from ema_workbench.analysis import prim

import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

#%%
ema_logging.log_to_stderr(ema_logging.INFO)

dike_model, planning_steps = get_model_for_problem_formulation(1)

# singleprocessing
with SequentialEvaluator(dike_model) as evaluator:
    results = evaluator.perform_experiments(scenarios=100, policies=10)

## multiprocessing
#with MultiprocessingEvaluator(dike_model) as evaluator:
#    results = evaluator.perform_experiments(scenarios=100, policies=4)

#%%
experiments, outcomes = results

experiments.to_csv('./results/exp_unc_10p_100s.csv')
pd.DataFrame(outcomes).to_csv('./results/out_unc_10p_100s.csv')

classification = outcomes['Expected Number of Deaths'] != 0

exp1 = experiments.loc[:, list(dike_model.uncertainties.keys())]
コード例 #10
0
	def test_robust_evaluation(self):
		# %%

		import os
		test_dir = os.path.dirname(__file__)

		from ema_workbench import ema_logging, MultiprocessingEvaluator, SequentialEvaluator
		from emat.examples import road_test
		import numpy, pandas, functools
		from emat import Measure
		s, db, m = road_test()

		MAXIMIZE = Measure.MAXIMIZE
		MINIMIZE = Measure.MINIMIZE

		robustness_functions = [
			Measure(
				'Expected Net Benefit',
				kind=Measure.INFO,
				variable_name='net_benefits',
				function=numpy.mean,
			),

			Measure(
				'Probability of Net Loss',
				kind=MINIMIZE,
				variable_name='net_benefits',
				function=lambda x: numpy.mean(x < 0),
				min=0,
				max=1,
			),

			Measure(
				'95%ile Travel Time',
				kind=MINIMIZE,
				variable_name='build_travel_time',
				function=functools.partial(numpy.percentile, q=95),
				min=60,
				max=150,
			),

			Measure(
				'99%ile Present Cost',
				kind=Measure.INFO,
				variable_name='present_cost_expansion',
				function=functools.partial(numpy.percentile, q=99),
			),

			Measure(
				'Expected Present Cost',
				kind=Measure.INFO,
				variable_name='present_cost_expansion',
				function=numpy.mean,
			),

		]
		# %%

		numpy.random.seed(42)

		with MultiprocessingEvaluator(m) as evaluator:
			r1 = m.robust_evaluate(
				robustness_functions,
				scenarios=20,
				policies=5,
				evaluator=evaluator,
			)

		import pandas
		correct = pandas.read_json(
			'{"amortization_period":{"0":19,"1":23,"2":50,"3":43,"4":35},"debt_type":{"0":"Rev Bond","1":"Paygo"'
			',"2":"GO Bond","3":"Paygo","4":"Rev Bond"},"expand_capacity":{"0":26.3384401031,"1":63.3898549337,"2'
			'":51.1360252492,"3":18.7230954832,"4":93.9205959335},"interest_rate_lock":{"0":false,"1":true,"2":fal'
			'se,"3":true,"4":false},"Expected Net Benefit":{"0":-157.486494925,"1":-244.2423401934,"2":-189.633908'
			'4553,"3":-4.2656265778,"4":-481.1208898635},"Probability of Net Loss":{"0":0.95,"1":1.0,"2":0.95,"3":'
			'0.7,"4":1.0},"95%ile Travel Time":{"0":74.6904209781,"1":65.8492894317,"2":67.6932507947,"3":79.09851'
			'23853,"4":63.203313888},"99%ile Present Cost":{"0":3789.8036648358,"1":9121.0832380586,"2":7357.89572'
			'71441,"3":2694.0416972887,"4":13514.111590462},"Expected Present Cost":{"0":3158.4461451444,"1":7601.'
			'5679809722,"2":6132.1164500957,"3":2245.2312484183,"4":11262.7453643551}}')
		correct['debt_type'] = correct['debt_type'].astype(
			pandas.CategoricalDtype(categories=['GO Bond', 'Rev Bond', 'Paygo'], ordered=True))

		pandas.testing.assert_frame_equal(r1, correct)

		numpy.random.seed(7)

		from ema_workbench.em_framework.samplers import sample_uncertainties
		scenes = sample_uncertainties(m, 20)

		scenes0 = pandas.DataFrame(scenes)
		cachefile = os.path.join(test_dir,'test_robust_results.csv')
		if not os.path.exists(cachefile):
			scenes0.to_csv(os.path.join(test_dir,'test_robust_evaluation_scenarios.csv'), index=None)
		scenes1 = pandas.read_csv(os.path.join(test_dir,'test_robust_evaluation_scenarios.csv'))
		pandas.testing.assert_frame_equal(scenes0, scenes1)

		from emat import Constraint

		constraint_1 = Constraint(
			"Maximum Log Expected Present Cost",
			outcome_names="Expected Present Cost",
			function=Constraint.must_be_less_than(4000),
		)

		constraint_2 = Constraint(
			"Minimum Capacity Expansion",
			parameter_names="expand_capacity",
			function=Constraint.must_be_greater_than(10),
		)

		constraint_3 = Constraint(
			"Maximum Paygo",
			parameter_names='debt_type',
			outcome_names='99%ile Present Cost',
			function=lambda i, j: max(0, j - 1500) if i == 'Paygo' else 0,
		)

		from emat.optimization import HyperVolume, EpsilonProgress, SolutionViewer, ConvergenceMetrics

		convergence_metrics = ConvergenceMetrics(
			HyperVolume.from_outcomes(robustness_functions),
			EpsilonProgress(),
			SolutionViewer.from_model_and_outcomes(m, robustness_functions),
		)

		numpy.random.seed(8)
		random.seed(8)

		# Test robust optimize
		with SequentialEvaluator(m) as evaluator:
			robust_results, convergence = m.robust_optimize(
					robustness_functions,
					scenarios=scenes,
					nfe=25,
					constraints=[
						constraint_1,
						constraint_2,
						constraint_3,
					],
					epsilons=[0.05,]*len(robustness_functions),
					convergence=convergence_metrics,
					evaluator=evaluator,
			)

		cachefile = os.path.join(test_dir,'test_robust_results.csv')
		if not os.path.exists(cachefile):
			robust_results.to_csv(cachefile, index=None)
		correct2 = pandas.read_csv(cachefile)
		correct2['debt_type'] = correct2['debt_type'].astype(
			pandas.CategoricalDtype(categories=['GO Bond', 'Rev Bond', 'Paygo'], ordered=True))
		pandas.testing.assert_frame_equal(robust_results, correct2, check_less_precise=True)
コード例 #11
0
	def test_road_test(self):
		road_test_scope_file = emat.package_file('model', 'tests', 'road_test.yaml')

		road_scope = emat.Scope(road_test_scope_file)

		# <emat.Scope with 2 constants, 7 uncertainties, 4 levers, 7 measures>
		assert len(road_scope.get_measures()) == 7
		assert len(road_scope.get_levers()) == 4
		assert len(road_scope.get_uncertainties()) == 7
		assert len(road_scope.get_constants()) == 2

		emat_db = emat.SQLiteDB()

		road_scope.store_scope(emat_db)

		with pytest.raises(KeyError):
			road_scope.store_scope(emat_db)

		assert emat_db.read_scope_names() == ['EMAT Road Test']

		design = design_experiments(road_scope, db=emat_db, n_samples_per_factor=10, sampler='lhs')
		design.head()

		large_design = design_experiments(road_scope, db=emat_db, n_samples=5000, sampler='lhs',
										  design_name='lhs_large')
		large_design.head()

		assert list(large_design.columns) == [
			'alpha',
			'amortization_period',
			'beta',
			'debt_type',
			'expand_capacity',
			'input_flow',
			'interest_rate',
			'interest_rate_lock',
			'unit_cost_expansion',
			'value_of_time',
			'yield_curve',
			'free_flow_time',
			'initial_capacity',
		]

		assert list(large_design.head().index) == [111, 112, 113, 114, 115]

		assert emat_db.read_design_names('EMAT Road Test') == ['lhs', 'lhs_large']

		m = PythonCoreModel(Road_Capacity_Investment, scope=road_scope, db=emat_db)

		with SequentialEvaluator(m) as eval_seq:
			lhs_results = m.run_experiments(design_name='lhs', evaluator=eval_seq)

		lhs_results.head()

		assert lhs_results.head()['present_cost_expansion'].values == approx(
			[2154.41598475, 12369.38053473, 4468.50683924, 6526.32517089, 2460.91070514])

		assert lhs_results.head()['net_benefits'].values == approx(
			[-79.51551505, -205.32148044, -151.94431822, -167.62487134, -3.97293985])

		with SequentialEvaluator(m) as eval_seq:
			lhs_large_results = m.run_experiments(design_name='lhs_large', evaluator=eval_seq)
		lhs_large_results.head()

		assert lhs_large_results.head()['net_benefits'].values == approx(
			[-584.36098322, -541.5458395, -185.16661464, -135.85689709, -357.36106457])

		lhs_outcomes = m.read_experiment_measures(design_name='lhs')
		assert lhs_outcomes.head()['time_savings'].values == approx(
			[13.4519273, 26.34172999, 12.48385198, 15.10165981, 15.48056139])

		correct_scores = numpy.array(
			[[0.06603461, 0.04858595, 0.06458574, 0.03298163, 0.05018515, 0., 0., 0.53156587, 0.05060416, 0.02558088,
			  0.04676956, 0.04131266, 0.04179378],
			 [0.06003223, 0.04836434, 0.06059554, 0.03593644, 0.27734396, 0., 0., 0.28235419, 0.05303979, 0.03985181,
			  0.04303371, 0.05004349, 0.04940448],
			 [0.08760605, 0.04630414, 0.0795043, 0.03892201, 0.10182534, 0., 0., 0.42508457, 0.04634321, 0.03216387,
			  0.0497183, 0.04953772, 0.0429905],
			 [0.08365598, 0.04118732, 0.06716887, 0.03789444, 0.06509519, 0., 0., 0.31494171, 0.06517462, 0.02895742,
			  0.04731707, 0.17515158, 0.07345581],
			 [0.06789382, 0.07852257, 0.05066944, 0.04807088, 0.32054735, 0., 0., 0.15953055, 0.05320201, 0.02890069,
			  0.07033928, 0.06372418, 0.05859923],
			 [0.05105435, 0.09460353, 0.04614178, 0.04296901, 0.45179611, 0., 0., 0.04909801, 0.05478798, 0.023099,
			  0.08160785, 0.05642169, 0.04842069],
			 [0.04685703, 0.03490931, 0.03214081, 0.03191602, 0.56130318, 0., 0., 0.04011044, 0.04812986, 0.02228924,
			  0.09753361, 0.04273004, 0.04208045], ])

		scores = m.get_feature_scores('lhs', random_state=123)

		for _i in range(scores.metadata.values.shape[0]):
			for _j in range(scores.metadata.values.shape[1]):
				assert scores.metadata.values[_i,_j] == approx(correct_scores[_i,_j], rel=.1)

		from ema_workbench.analysis import prim

		x = m.read_experiment_parameters(design_name='lhs_large')

		prim_alg = prim.Prim(
			m.read_experiment_parameters(design_name='lhs_large'),
			m.read_experiment_measures(design_name='lhs_large')['net_benefits'] > 0,
			threshold=0.4,
		)

		box1 = prim_alg.find_box()

		assert dict(box1.peeling_trajectory.iloc[45]) == approx({
			'coverage': 0.8014705882352942,
			'density': 0.582109479305741,
			'id': 45,
			'mass': 0.1498,
			'mean': 0.582109479305741,
			'res_dim': 4,
		})

		from emat.util.xmle import Show
		from emat.util.xmle.elem import Elem

		assert isinstance(Show(box1.show_tradeoff()), Elem)

		from ema_workbench.analysis import cart

		cart_alg = cart.CART(
			m.read_experiment_parameters(design_name='lhs_large'),
			m.read_experiment_measures(design_name='lhs_large')['net_benefits'] > 0,
		)
		cart_alg.build_tree()

		cart_dict = dict(cart_alg.boxes[0].iloc[0])
		assert cart_dict['debt_type'] == {'GO Bond', 'Paygo', 'Rev Bond'}
		assert cart_dict['interest_rate_lock'] == {False, True}
		del cart_dict['debt_type']
		del cart_dict['interest_rate_lock']
		assert cart_dict == approx({
			'free_flow_time': 60,
			'initial_capacity': 100,
			'alpha': 0.10001988547129116,
			'beta': 3.500215589924521,
			'input_flow': 80.0,
			'value_of_time': 0.00100690634109406,
			'unit_cost_expansion': 95.00570832093116,
			'interest_rate': 0.0250022738169142,
			'yield_curve': -0.0024960505548531774,
			'expand_capacity': 0.0006718732232418368,
			'amortization_period': 15,
		})

		assert isinstance(Show(cart_alg.show_tree(format='svg')), Elem)

		from emat import Measure

		MAXIMIZE = Measure.MAXIMIZE
		MINIMIZE = Measure.MINIMIZE

		robustness_functions = [
			Measure(
				'Expected Net Benefit',
				kind=Measure.INFO,
				variable_name='net_benefits',
				function=numpy.mean,
				#         min=-150,
				#         max=50,
			),

			Measure(
				'Probability of Net Loss',
				kind=MINIMIZE,
				variable_name='net_benefits',
				function=lambda x: numpy.mean(x < 0),
				min=0,
				max=1,
			),

			Measure(
				'95%ile Travel Time',
				kind=MINIMIZE,
				variable_name='build_travel_time',
				function=functools.partial(numpy.percentile, q=95),
				min=60,
				max=150,
			),

			Measure(
				'99%ile Present Cost',
				kind=Measure.INFO,
				variable_name='present_cost_expansion',
				function=functools.partial(numpy.percentile, q=99),
				#         min=0,
				#         max=10,
			),

			Measure(
				'Expected Present Cost',
				kind=Measure.INFO,
				variable_name='present_cost_expansion',
				function=numpy.mean,
				#         min=0,
				#         max=10,
			),

		]

		from emat import Constraint

		constraint_1 = Constraint(
			"Maximum Log Expected Present Cost",
			outcome_names="Expected Present Cost",
			function=Constraint.must_be_less_than(4000),
		)

		constraint_2 = Constraint(
			"Minimum Capacity Expansion",
			parameter_names="expand_capacity",
			function=Constraint.must_be_greater_than(10),
		)

		constraint_3 = Constraint(
			"Maximum Paygo",
			parameter_names='debt_type',
			outcome_names='99%ile Present Cost',
			function=lambda i, j: max(0, j - 1500) if i == 'Paygo' else 0,
		)

		from emat.optimization import HyperVolume, EpsilonProgress, SolutionViewer, ConvergenceMetrics

		convergence_metrics = ConvergenceMetrics(
			HyperVolume.from_outcomes(robustness_functions),
			EpsilonProgress(),
			SolutionViewer.from_model_and_outcomes(m, robustness_functions),
		)

		with SequentialEvaluator(m) as eval_seq:
			robust_results, convergence = m.robust_optimize(
				robustness_functions,
				scenarios=20,
				nfe=5,
				constraints=[
					constraint_1,
					constraint_2,
					constraint_3,
				],
				epsilons=[0.05, ] * len(robustness_functions),
				convergence=convergence_metrics,
				evaluator=eval_seq,
			)

		assert isinstance(robust_results, pandas.DataFrame)

		mm = m.create_metamodel_from_design('lhs')

		design2 = design_experiments(road_scope, db=emat_db, n_samples_per_factor=10, sampler='lhs', random_seed=2)

		design2_results = mm.run_experiments(design2)
コード例 #12
0
    def test_road_test(self):
        road_test_scope_file = emat.package_file('model', 'tests',
                                                 'road_test.yaml')

        road_scope = emat.Scope(road_test_scope_file)

        # <emat.Scope with 2 constants, 7 uncertainties, 4 levers, 7 measures>
        assert len(road_scope.get_measures()) == 7
        assert len(road_scope.get_levers()) == 4
        assert len(road_scope.get_uncertainties()) == 7
        assert len(road_scope.get_constants()) == 2

        emat_db = emat.SQLiteDB()

        road_scope.store_scope(emat_db)

        with pytest.raises(KeyError):
            road_scope.store_scope(emat_db)

        assert emat_db.read_scope_names() == ['EMAT Road Test']

        design = design_experiments(road_scope,
                                    db=emat_db,
                                    n_samples_per_factor=10,
                                    sampler='lhs')
        design.head()

        large_design = design_experiments(road_scope,
                                          db=emat_db,
                                          n_samples=5000,
                                          sampler='lhs',
                                          design_name='lhs_large')
        large_design.head()

        assert list(large_design.columns) == [
            'alpha',
            'amortization_period',
            'beta',
            'debt_type',
            'expand_capacity',
            'input_flow',
            'interest_rate',
            'interest_rate_lock',
            'unit_cost_expansion',
            'value_of_time',
            'yield_curve',
            'free_flow_time',
            'initial_capacity',
        ]

        assert list(large_design.head().index) == [111, 112, 113, 114, 115]

        assert emat_db.read_design_names('EMAT Road Test') == [
            'lhs', 'lhs_large'
        ]

        m = PythonCoreModel(Road_Capacity_Investment,
                            scope=road_scope,
                            db=emat_db)

        with SequentialEvaluator(m) as eval_seq:
            lhs_results = m.run_experiments(design_name='lhs',
                                            evaluator=eval_seq)

        lhs_results.head()

        assert lhs_results.head()['present_cost_expansion'].values == approx([
            2154.41598475, 12369.38053473, 4468.50683924, 6526.32517089,
            2460.91070514
        ])

        assert lhs_results.head()['net_benefits'].values == approx([
            -22.29090499, -16.84301382, -113.98841188, 11.53956058, 78.03661612
        ])

        with SequentialEvaluator(m) as eval_seq:
            lhs_large_results = m.run_experiments(design_name='lhs_large',
                                                  evaluator=eval_seq)
        lhs_large_results.head()

        assert lhs_large_results.head()['net_benefits'].values == approx([
            -522.45283083, -355.1599307, -178.6623215, 23.46263498,
            -301.17700968
        ])

        lhs_outcomes = m.read_experiment_measures(design_name='lhs')
        assert lhs_outcomes.head()['time_savings'].values == approx(
            [13.4519273, 26.34172999, 12.48385198, 15.10165981, 15.48056139])

        scores = m.get_feature_scores('lhs', random_state=123)

        stable_df("./road_test_feature_scores.pkl.gz", scores.data)

        from ema_workbench.analysis import prim

        x = m.read_experiment_parameters(design_name='lhs_large')

        prim_alg = prim.Prim(
            m.read_experiment_parameters(design_name='lhs_large'),
            m.read_experiment_measures(design_name='lhs_large')['net_benefits']
            > 0,
            threshold=0.4,
        )

        box1 = prim_alg.find_box()

        stable_df("./road_test_box1_peeling_trajectory.pkl.gz",
                  box1.peeling_trajectory)

        from emat.util.xmle import Show
        from emat.util.xmle.elem import Elem

        assert isinstance(Show(box1.show_tradeoff()), Elem)

        from ema_workbench.analysis import cart

        cart_alg = cart.CART(
            m.read_experiment_parameters(design_name='lhs_large'),
            m.read_experiment_measures(design_name='lhs_large')['net_benefits']
            > 0,
        )
        cart_alg.build_tree()

        stable_df("./road_test_cart_box0.pkl.gz", cart_alg.boxes[0])

        cart_dict = dict(cart_alg.boxes[0].iloc[0])
        assert cart_dict['debt_type'] == {'GO Bond', 'Paygo', 'Rev Bond'}
        assert cart_dict['interest_rate_lock'] == {False, True}

        assert isinstance(Show(cart_alg.show_tree(format='svg')), Elem)

        from emat import Measure

        MAXIMIZE = Measure.MAXIMIZE
        MINIMIZE = Measure.MINIMIZE

        robustness_functions = [
            Measure(
                'Expected Net Benefit',
                kind=Measure.INFO,
                variable_name='net_benefits',
                function=numpy.mean,
                #         min=-150,
                #         max=50,
            ),
            Measure(
                'Probability of Net Loss',
                kind=MINIMIZE,
                variable_name='net_benefits',
                function=lambda x: numpy.mean(x < 0),
                min=0,
                max=1,
            ),
            Measure(
                '95%ile Travel Time',
                kind=MINIMIZE,
                variable_name='build_travel_time',
                function=functools.partial(numpy.percentile, q=95),
                min=60,
                max=150,
            ),
            Measure(
                '99%ile Present Cost',
                kind=Measure.INFO,
                variable_name='present_cost_expansion',
                function=functools.partial(numpy.percentile, q=99),
                #         min=0,
                #         max=10,
            ),
            Measure(
                'Expected Present Cost',
                kind=Measure.INFO,
                variable_name='present_cost_expansion',
                function=numpy.mean,
                #         min=0,
                #         max=10,
            ),
        ]

        from emat import Constraint

        constraint_1 = Constraint(
            "Maximum Log Expected Present Cost",
            outcome_names="Expected Present Cost",
            function=Constraint.must_be_less_than(4000),
        )

        constraint_2 = Constraint(
            "Minimum Capacity Expansion",
            parameter_names="expand_capacity",
            function=Constraint.must_be_greater_than(10),
        )

        constraint_3 = Constraint(
            "Maximum Paygo",
            parameter_names='debt_type',
            outcome_names='99%ile Present Cost',
            function=lambda i, j: max(0, j - 1500) if i == 'Paygo' else 0,
        )

        from emat.optimization import HyperVolume, EpsilonProgress, SolutionViewer, ConvergenceMetrics

        convergence_metrics = ConvergenceMetrics(
            HyperVolume.from_outcomes(robustness_functions),
            EpsilonProgress(),
            SolutionViewer.from_model_and_outcomes(m, robustness_functions),
        )

        with SequentialEvaluator(m) as eval_seq:
            robust = m.robust_optimize(
                robustness_functions,
                scenarios=20,
                nfe=5,
                constraints=[
                    constraint_1,
                    constraint_2,
                    constraint_3,
                ],
                epsilons=[
                    0.05,
                ] * len(robustness_functions),
                convergence=convergence_metrics,
                evaluator=eval_seq,
            )
        robust_results, convergence = robust.result, robust.convergence

        assert isinstance(robust_results, pandas.DataFrame)

        mm = m.create_metamodel_from_design('lhs')

        design2 = design_experiments(road_scope,
                                     db=emat_db,
                                     n_samples_per_factor=10,
                                     sampler='lhs',
                                     random_seed=2)

        design2_results = mm.run_experiments(design2)
コード例 #13
0
    def test_robust_evaluation(self):
        # %%

        import os
        test_dir = os.path.dirname(__file__)

        from ema_workbench import ema_logging, MultiprocessingEvaluator, SequentialEvaluator
        from emat.examples import road_test
        import numpy, pandas, functools
        from emat import Measure
        s, db, m = road_test()

        MAXIMIZE = Measure.MAXIMIZE
        MINIMIZE = Measure.MINIMIZE

        robustness_functions = [
            Measure(
                'Expected Net Benefit',
                kind=Measure.INFO,
                variable_name='net_benefits',
                function=numpy.mean,
            ),
            Measure(
                'Probability of Net Loss',
                kind=MINIMIZE,
                variable_name='net_benefits',
                function=lambda x: numpy.mean(x < 0),
                min=0,
                max=1,
            ),
            Measure(
                '95%ile Travel Time',
                kind=MINIMIZE,
                variable_name='build_travel_time',
                function=functools.partial(numpy.percentile, q=95),
                min=60,
                max=150,
            ),
            Measure(
                '99%ile Present Cost',
                kind=Measure.INFO,
                variable_name='present_cost_expansion',
                function=functools.partial(numpy.percentile, q=99),
            ),
            Measure(
                'Expected Present Cost',
                kind=Measure.INFO,
                variable_name='present_cost_expansion',
                function=numpy.mean,
            ),
        ]
        # %%

        numpy.random.seed(42)

        with MultiprocessingEvaluator(m) as evaluator:
            r1 = m.robust_evaluate(
                robustness_functions,
                scenarios=20,
                policies=5,
                evaluator=evaluator,
            )

        stable_df('./road_test_robust_evaluate.pkl.gz', r1)

        numpy.random.seed(7)

        from ema_workbench.em_framework.samplers import sample_uncertainties
        scenes = sample_uncertainties(m, 20)

        scenes0 = pandas.DataFrame(scenes)
        stable_df('./test_robust_evaluation_scenarios.pkl.gz', scenes0)

        from emat import Constraint

        constraint_1 = Constraint(
            "Maximum Log Expected Present Cost",
            outcome_names="Expected Present Cost",
            function=Constraint.must_be_less_than(4000),
        )

        constraint_2 = Constraint(
            "Minimum Capacity Expansion",
            parameter_names="expand_capacity",
            function=Constraint.must_be_greater_than(10),
        )

        constraint_3 = Constraint(
            "Maximum Paygo",
            parameter_names='debt_type',
            outcome_names='99%ile Present Cost',
            function=lambda i, j: max(0, j - 1500) if i == 'Paygo' else 0,
        )

        from emat.optimization import HyperVolume, EpsilonProgress, SolutionViewer, ConvergenceMetrics

        convergence_metrics = ConvergenceMetrics(
            HyperVolume.from_outcomes(robustness_functions),
            EpsilonProgress(),
            SolutionViewer.from_model_and_outcomes(m, robustness_functions),
        )

        numpy.random.seed(8)
        random.seed(8)

        # Test robust optimize
        with SequentialEvaluator(m) as evaluator:
            robust = m.robust_optimize(
                robustness_functions,
                scenarios=scenes,
                nfe=25,
                constraints=[
                    constraint_1,
                    constraint_2,
                    constraint_3,
                ],
                epsilons=[
                    0.05,
                ] * len(robustness_functions),
                convergence=convergence_metrics,
                evaluator=evaluator,
            )
        robust_results, convergence = robust.result, robust.convergence

        stable_df('test_robust_results.pkl.gz', robust_results)