Пример #1
0
    def default_scenario(self, **kwargs):
        """
        The default settings for exogenous uncertainties.

        Args:
            **kwargs:
                Override the defaults given in the scope
                with these values.

        Returns:
            ema_workbench.Scenario
        """
        from ema_workbench import Scenario
        values = {u.name: u.default for u in self.get_uncertainties()}
        values.update(kwargs)
        return Scenario('default', **values)
def moea_multi(model, params):
    archives = []
    convergences = []

    refs = params.references[model.name] + [baseModelParams]
    for idx, ref in enumerate(refs):
        refScenario = (-1 if idx == len(refs) - 1 else idx)
        print('Reference scenario', refScenario)
        fileEnd = outputFileEnd(model, params, refScenario=refScenario)
        results = mordm.runMoea(model,
                                params=params,
                                fileEnd=fileEnd,
                                reference=Scenario('reference', **ref),
                                refNum=refScenario)
        archives.append(results[0])
        convergences.append(results[1])

    return (archives, convergences)
def default_scenario(dike_model):
    reference_values = {
        'Bmax': 175,
        'Brate': 1.5,
        'pfail': 0.5,
        'discount rate': 3.5,
        'ID flood wave shape': 4
    }
    scen1 = {}

    for key in dike_model.uncertainties:
        name_split = key.name.split('_')

        if len(name_split) == 1:
            scen1.update({key.name: reference_values[key.name]})

        else:
            scen1.update({key.name: reference_values[name_split[1]]})

    ref_scenario = Scenario('reference', **scen1)
    return ref_scenario
Пример #4
0
        'pfail': 0.5,
        'discount rate': 3.5,
        'ID flood wave shape': 4
    }
    scen1 = {}

    for key in dike_model.uncertainties:
        name_split = key.name.split('_')

        if len(name_split) == 1:
            scen1.update({key.name: reference_values[key.name]})

        else:
            scen1.update({key.name: reference_values[name_split[1]]})

    ref_scenario = Scenario('reference', **scen1)

    # no dike increase, no warning, none of the rfr
    zero_policy = {'DikeIncrease': 0, 'DaysToThreat': 0, 'RfR': 0}
    pol0 = {}

    for key in dike_model.levers:
        s1, s2 = key.name.split('_')
        pol0.update({key.name: zero_policy[s2]})

    policy0 = Policy('Policy 0', **pol0)

    # Call random scenarios or policies:
    #    n_scenarios = 5
    #    scenarios = sample_uncertainties(dike_model, 50)
    #    n_policies = 10
Пример #5
0
    lake_model.levers = [RealParameter("c1", -2, 2),
                         RealParameter("c2", -2, 2),
                         RealParameter("r1", 0, 2),
                         RealParameter("r2", 0, 2),
                         CategoricalParameter("w1", np.linspace(0, 1, 10))
                         ]
    # specify outcomes
    lake_model.outcomes = [ScalarOutcome('max_P',
                                         kind=ScalarOutcome.MINIMIZE),  # @UndefinedVariable
                           ScalarOutcome('utility',
                                         kind=ScalarOutcome.MAXIMIZE),  # @UndefinedVariable
                           ScalarOutcome('inertia',
                                         kind=ScalarOutcome.MAXIMIZE),  # @UndefinedVariable
                           ScalarOutcome('reliability',
                                         kind=ScalarOutcome.MAXIMIZE)]  # @UndefinedVariable

    # override some of the defaults of the model
    lake_model.constants = [Constant('alpha', 0.41),
                            Constant('nsamples', 100),
                            Constant('myears', 100)]

    # reference is optional, but can be used to implement search for
    # various user specified scenarios along the lines suggested by
    # Watson and Kasprzyk (2017)
    reference = Scenario('reference', b=0.4, q=2, mean=0.02, stdev=0.01)

    with MultiprocessingEvaluator(lake_model) as evaluator:
        evaluator.optimize(searchover='levers', nfe=100000,
                           epsilons=[0.1, ] * len(lake_model.outcomes),
                           reference=reference)
Пример #6
0
    def run_experiments(
        self,
        design: pd.DataFrame = None,
        evaluator=None,
        *,
        design_name=None,
        db=None,
    ):
        """
        Runs a design of combined experiments using this model.

        A combined experiment includes a complete set of input values for
        all exogenous uncertainties (a Scenario) and all policy levers
        (a Policy). Unlike the perform_experiments function in the EMA Workbench,
        this method pairs each Scenario and Policy in sequence, instead
        of running all possible combinations of Scenario and Policy.
        This change ensures compatibility with the EMAT database modules, which
        preserve the complete set of input information (both uncertainties
        and levers) for each experiment.  To conduct a full cross-factorial set
        of experiments similar to the default settings for EMA Workbench,
        use a factorial design, by setting the `jointly` argument for the
        `design_experiments` to False, or by designing experiments outside
        of EMAT with your own approach.

        Args:
            design (pandas.DataFrame, optional): experiment definitions
                given as a DataFrame, where each exogenous uncertainties and
                policy levers is given as a column, and each row is an experiment.
            evaluator (ema_workbench.Evaluator, optional): Optionally give an
                evaluator instance.  If not given, a default SequentialEvaluator
                will be instantiated.
            design_name (str, optional): The name of a design of experiments to
                load from the database.  This design is only used if
                `design` is None.
            db (Database, optional): The database to use for loading and saving experiments.
                If none is given, the default database for this model is used.
                If there is no default db, and none is given here,
                the results are not stored in a database. Set to False to explicitly
                not use the default database, even if it exists.

        Returns:
            pandas.DataFrame:
                A DataFrame that contains all uncertainties, levers, and measures
                for the experiments.

        Raises:
            ValueError:
                If there are no experiments defined.  This includes
                the situation where `design` is given but no database is
                available.

        """

        from ema_workbench import Scenario, Policy, perform_experiments

        # catch user gives only a design, not experiment_parameters
        if isinstance(design, str) and design_name is None:
            design_name, design = design, None

        if design_name is None and design is None:
            raise ValueError(f"must give design_name or design")

        if db is None:
            db = self.db

        if design_name is not None and design is None:
            if not db:
                raise ValueError(
                    f'cannot load design "{design_name}", there is no db')
            design = db.read_experiment_parameters(self.scope.name,
                                                   design_name)

        if design.empty:
            raise ValueError(f"no experiments available")

        scenarios = [
            Scenario(**dict(zip(self.scope.get_uncertainty_names(), i)))
            for i in design[self.scope.get_uncertainty_names()].itertuples(
                index=False, name='ExperimentX')
        ]

        policies = [
            Policy(f"Incognito{n}", **dict(zip(self.scope.get_lever_names(),
                                               i)))
            for n, i in enumerate(design[self.scope.get_lever_names()].
                                  itertuples(index=False, name='ExperimentL'))
        ]

        if not evaluator:
            from ema_workbench import SequentialEvaluator
            evaluator = SequentialEvaluator(self)

        experiments, outcomes = perform_experiments(
            self,
            scenarios=scenarios,
            policies=policies,
            zip_over={'scenarios', 'policies'},
            evaluator=evaluator)
        experiments.index = design.index

        outcomes = pd.DataFrame.from_dict(outcomes)
        outcomes.index = design.index

        if db:
            db.write_experiment_measures(self.scope.name, self.metamodel_id,
                                         outcomes)

        return self.ensure_dtypes(
            pd.concat([
                experiments.drop(columns=['scenario', 'policy', 'model']),
                outcomes
            ],
                      axis=1,
                      sort=False))
Пример #7
0
epsilon_list = [1, 1, 1, 1, 1, 10]

nfe = 75000

convergence_metrics = [EpsilonProgress()]

constraints = [
    Constraint('Total Aggregated Utility',
               outcome_names='Total Aggregated Utility',
               function=lambda x: max(500, -x))
]

if __name__ == "__main__":
    for i in range(0, range_of_levers):
        reference_scenario = Scenario('reference', **{'irstp': range_irstp[i]})

        start = time.time()
        print("starting search for reference scenario: " + str(i) + "NFE = " +
              str(nfe))

        #only needed on IPython console within Anaconda
        __spec__ = "ModuleSpec(name='builtins', loader=<class '_frozen_importlib.BuiltinImporter'>)"

        with MultiprocessingEvaluator(RICE) as evaluator:
            results, convergence = evaluator.optimize(
                nfe=nfe,
                searchover='levers',
                epsilons=epsilon_list,
                reference=reference_scenario,
                convergence=convergence_metrics,
def specify_scenario(reference_values, dike_model):
    scen = {}
    for key in dike_model.uncertainties:
        scen.update({key.name: reference_values[key.name]})
    reference_scenario = Scenario('reference', **scen)
    return reference_scenario
Пример #9
0
def reference_scenario(reference_values, dice_sm):
    scen = {}
    for key in dice_sm.uncertainties:
        scen.update({key.name: reference_values[key.name]})
    reference_scenario = Scenario('reference', **scen)
    return reference_scenario
def moea_mordm(model, params):
    return mordm.runMoea(model,
                         params=params,
                         fileEnd=outputFileEnd(model, params),
                         reference=Scenario('reference', **baseModelParams),
                         refNum=-1)
Пример #11
0
    def test_robust_optimization(self):

        import numpy.random
        import random
        numpy.random.seed(42)
        random.seed(42)
        import textwrap
        import pandas
        import numpy
        import emat.examples
        scope, db, model = emat.examples.road_test()

        result = model.optimize(
            nfe=10,
            searchover='levers',
            check_extremes=1,
        )

        if not os.path.exists('./test_robust_optimization.1.pkl.gz'):
            result.result.to_pickle('./test_robust_optimization.1.pkl.gz')
        pandas.testing.assert_frame_equal(
            result.result,
            pandas.read_pickle('./test_robust_optimization.1.pkl.gz'))

        from ema_workbench import Scenario, Policy
        assert result.scenario == Scenario(
            **{
                'alpha': 0.15,
                'beta': 4.0,
                'input_flow': 100,
                'value_of_time': 0.075,
                'unit_cost_expansion': 100,
                'interest_rate': 0.03,
                'yield_curve': 0.01
            })

        worst = model.optimize(nfe=10,
                               searchover='uncertainties',
                               reverse_targets=True,
                               check_extremes=1,
                               reference={
                                   'expand_capacity': 100.0,
                                   'amortization_period': 50,
                                   'debt_type': 'PayGo',
                                   'interest_rate_lock': False,
                               })

        if not os.path.exists('./test_robust_optimization.2.pkl.gz'):
            worst.result.to_pickle('./test_robust_optimization.2.pkl.gz')
        pandas.testing.assert_frame_equal(
            worst.result,
            pandas.read_pickle('./test_robust_optimization.2.pkl.gz'))

        from emat import Measure

        minimum_net_benefit = Measure(
            name='Minimum Net Benefits',
            kind=Measure.MAXIMIZE,
            variable_name='net_benefits',
            function=min,
        )

        expected_net_benefit = Measure(
            name='Mean Net Benefits',
            kind=Measure.MAXIMIZE,
            variable_name='net_benefits',
            function=numpy.mean,
        )

        import functools

        pct5_net_benefit = Measure(
            '5%ile Net Benefits',
            kind=Measure.MAXIMIZE,
            variable_name='net_benefits',
            function=functools.partial(numpy.percentile, q=5),
        )

        from scipy.stats import percentileofscore

        neg_net_benefit = Measure(
            'Possibility of Negative Net Benefits',
            kind=Measure.MINIMIZE,
            variable_name='net_benefits',
            function=functools.partial(percentileofscore,
                                       score=0,
                                       kind='strict'),
        )

        pct95_cost = Measure(
            '95%ile Capacity Expansion Cost',
            kind=Measure.MINIMIZE,
            variable_name='cost_of_capacity_expansion',
            function=functools.partial(numpy.percentile, q=95),
        )

        expected_time_savings = Measure(
            'Expected Time Savings',
            kind=Measure.MAXIMIZE,
            variable_name='time_savings',
            function=numpy.mean,
        )

        robust_result = model.robust_optimize(
            robustness_functions=[
                expected_net_benefit,
                pct5_net_benefit,
                neg_net_benefit,
                pct95_cost,
                expected_time_savings,
            ],
            scenarios=50,
            nfe=10,
            check_extremes=1,
        )

        if not os.path.exists('./test_robust_optimization.3.pkl.gz'):
            robust_result.result.to_pickle(
                './test_robust_optimization.3.pkl.gz')
        pandas.testing.assert_frame_equal(
            robust_result.result,
            pandas.read_pickle('./test_robust_optimization.3.pkl.gz'))

        from emat import Constraint

        c_min_expansion = Constraint(
            "Minimum Capacity Expansion",
            parameter_names="expand_capacity",
            function=Constraint.must_be_greater_than(10),
        )

        c_positive_mean_net_benefit = Constraint(
            "Minimum Net Benefit",
            outcome_names="Mean Net Benefits",
            function=Constraint.must_be_greater_than(0),
        )

        constraint_bad = Constraint(
            "Maximum Interest Rate",
            parameter_names="interest_rate",
            function=Constraint.must_be_less_than(0.03),
        )

        pct99_present_cost = Measure(
            '99%ile Present Cost',
            kind=Measure.INFO,
            variable_name='present_cost_expansion',
            function=functools.partial(numpy.percentile, q=99),
        )

        c_max_paygo = Constraint(
            "Maximum Paygo",
            parameter_names='debt_type',
            outcome_names='99%ile Present Cost',
            function=lambda i, j: max(0, j - 3000) if i == 'Paygo' else 0,
        )

        robust_constrained = model.robust_optimize(
            robustness_functions=[
                expected_net_benefit,
                pct5_net_benefit,
                neg_net_benefit,
                pct95_cost,
                expected_time_savings,
                pct99_present_cost,
            ],
            constraints=[
                c_min_expansion,
                c_positive_mean_net_benefit,
                c_max_paygo,
            ],
            scenarios=50,
            nfe=10,
            check_extremes=1,
        )

        if not os.path.exists('./test_robust_optimization.4.pkl.gz'):
            robust_constrained.result.to_pickle(
                './test_robust_optimization.4.pkl.gz')
        pandas.testing.assert_frame_equal(
            robust_constrained.result,
            pandas.read_pickle('./test_robust_optimization.4.pkl.gz'))

        with pytest.raises(ValueError):
            model.robust_optimize(
                robustness_functions=[
                    expected_net_benefit,
                    pct5_net_benefit,
                    neg_net_benefit,
                    pct95_cost,
                    expected_time_savings,
                    pct99_present_cost,
                ],
                constraints=[
                    constraint_bad,
                    c_min_expansion,
                    c_positive_mean_net_benefit,
                    c_max_paygo,
                ],
                scenarios=50,
                nfe=10,
                check_extremes=1,
            )