예제 #1
0
    def test_derive_meta_w_transform(self):
        from emat.examples import road_test

        s, db, m = road_test(yamlfile='road_test2.yaml')

        db.get_db_info()

        m.design_experiments(n_samples=10, design_name='tiny')

        db.read_experiment_all(None, None)

        with pytest.raises(emat.PendingExperimentsError):
            m.create_metamodel_from_design('tiny', random_state=123)

        m.run_experiments(design_name='tiny')

        mm = emat.create_metamodel(
            m.scope,
            db.read_experiment_all(s.name, 'tiny'),
            random_state=123,
            metamodel_id=db.get_new_metamodel_id(None),
        )

        assert mm.scope != m.scope  # now not equal as road_test2 has transforms that are stripped.
        mm.db = db
        tiny2 = m.design_experiments(n_samples=10, design_name='tiny2', random_seed=456)

        assert tiny2.iloc[0]['debt_type'] == 'GO Bond'

        assert dict(tiny2.iloc[0].drop('debt_type')) == approx({
            'alpha': 0.10428005571929212,
            'amortization_period': 33,
            'beta': 4.8792451185772014,
            'expand_capacity': 61.4210886403998,
            'input_flow': 137,
            'interest_rate': 0.03099304322197216,
            'interest_rate_lock': 0,
            'unit_cost_expansion': 121.85520427974882,
            'value_of_time': 0.002953613029133872,
            'yield_curve': 0.016255990123028242,
            'free_flow_time': 60,
            'initial_capacity': 100})

        result2 = mm.run_experiments('tiny2')

        # print(dict(mm.read_experiment_measures('tiny2').iloc[0]))
        #
        # print({
        #     'no_build_travel_time': 81.6839454971052,
        #     'build_travel_time': 61.91038371206646,
        #     'time_savings': 44.94189289289446,
        #     'value_of_time_savings': 2904.081661408463,
        #     'net_benefits': -34.09931528157315,
        #     'cost_of_capacity_expansion': 1085.3565091745982,
        #     'present_cost_expansion': 19923.66625500023,
        # })
        #
        assert dict(mm.read_experiment_measures('tiny2').iloc[0]) == approx({
            'no_build_travel_time': 81.6839454971052,
            'build_travel_time': 61.91038371206646,
            'log_build_travel_time': 4.120826572003798,
            'time_savings': 44.94189289289446,
            'value_of_time_savings': 2904.081661408463,
            'net_benefits': -34.09931528157315,
            'cost_of_capacity_expansion': 1085.3565091745982,
            'present_cost_expansion': 19923.66625500023,
        })

        assert m.run_experiment(tiny2.iloc[0]) == approx({
            'no_build_travel_time': 89.07004237532217,
            'build_travel_time': 62.81032484779827,
            'log_build_travel_time': np.log(62.81032484779827),
            'time_savings': 26.259717527523904,
            'value_of_time_savings': 10.62586300480175,
            'present_cost_expansion': 7484.479303360477,
            'cost_of_capacity_expansion': 395.69034710662226,
            'net_benefits': -385.0644841018205,
        })

        with pytest.raises(ValueError):
            # no metamodels stored
            mm3 = db.read_metamodel(None, None)

        db.write_metamodel(mm)
        mm2 = db.read_metamodel(None, 1)
        mm3 = db.read_metamodel(None, None)
        assert mm2 == mm == mm3
        assert mm2 is not mm

        assert mm2.function(**(tiny2.iloc[0])) == approx({
            'no_build_travel_time': 81.6839454971052,
            'build_travel_time': 61.91038371206646,
            'log_build_travel_time': 4.120826572003798,
            'time_savings': 44.94189289289446,
            'value_of_time_savings': 2904.081661408463,
            'net_benefits': -34.09931528157315,
            'cost_of_capacity_expansion': 1085.3565091745982,
            'present_cost_expansion': 19923.66625500023,
        })

        mm3.metamodel_id = db.get_new_metamodel_id(None)
        db.write_metamodel(mm3)

        with pytest.raises(ValueError):
            # now too many to get without giving an ID
            mm4 = db.read_metamodel(None, None)
예제 #2
0
	def test_robust_evaluation(self):
		# %%

		import os
		test_dir = os.path.dirname(__file__)

		from emat.workbench import ema_logging, MultiprocessingEvaluator, SequentialEvaluator
		from emat.examples import road_test
		import numpy, pandas, functools
		from emat import Measure
		s, db, m = road_test()

		MAXIMIZE = Measure.MAXIMIZE
		MINIMIZE = Measure.MINIMIZE

		robustness_functions = [
			Measure(
				'Expected Net Benefit',
				kind=Measure.INFO,
				variable_name='net_benefits',
				function=numpy.mean,
			),

			Measure(
				'Probability of Net Loss',
				kind=MINIMIZE,
				variable_name='net_benefits',
				function=lambda x: numpy.mean(x < 0),
				min=0,
				max=1,
			),

			Measure(
				'95%ile Travel Time',
				kind=MINIMIZE,
				variable_name='build_travel_time',
				function=functools.partial(numpy.percentile, q=95),
				min=60,
				max=150,
			),

			Measure(
				'99%ile Present Cost',
				kind=Measure.INFO,
				variable_name='present_cost_expansion',
				function=functools.partial(numpy.percentile, q=99),
			),

			Measure(
				'Expected Present Cost',
				kind=Measure.INFO,
				variable_name='present_cost_expansion',
				function=numpy.mean,
			),

		]
		# %%

		numpy.random.seed(42)
		os.chdir(test_dir)
		with SequentialEvaluator(m) as evaluator:
			r1 = m.robust_evaluate(
				robustness_functions,
				scenarios=20,
				policies=5,
				evaluator=evaluator,
			)

		stable_df('./road_test_robust_evaluate.pkl.gz', r1)

		numpy.random.seed(7)

		from emat.workbench.em_framework.samplers import sample_uncertainties
		scenes = sample_uncertainties(m, 20)

		scenes0 = pandas.DataFrame(scenes)
		stable_df('./test_robust_evaluation_scenarios.pkl.gz', scenes0)

		from emat import Constraint

		constraint_1 = Constraint(
			"Maximum Log Expected Present Cost",
			outcome_names="Expected Present Cost",
			function=Constraint.must_be_less_than(4000),
		)

		constraint_2 = Constraint(
			"Minimum Capacity Expansion",
			parameter_names="expand_capacity",
			function=Constraint.must_be_greater_than(10),
		)

		constraint_3 = Constraint(
			"Maximum Paygo",
			parameter_names='debt_type',
			outcome_names='99%ile Present Cost',
			function=lambda i, j: max(0, j - 1500) if i == 'Paygo' else 0,
		)

		from emat.optimization import HyperVolume, EpsilonProgress, SolutionViewer, ConvergenceMetrics

		convergence_metrics = ConvergenceMetrics(
			HyperVolume.from_outcomes(robustness_functions),
			EpsilonProgress(),
			SolutionViewer.from_model_and_outcomes(m, robustness_functions),
		)

		numpy.random.seed(8)
		random.seed(8)

		# Test robust optimize
		with SequentialEvaluator(m) as evaluator:
			robust = m.robust_optimize(
					robustness_functions,
					scenarios=scenes,
					nfe=25,
					constraints=[
						constraint_1,
						constraint_2,
						constraint_3,
					],
					epsilons=[0.05,]*len(robustness_functions),
					convergence=convergence_metrics,
					evaluator=evaluator,
			)
		robust_results, convergence = robust.result, robust.convergence

		stable_df('test_robust_results.pkl.gz', robust_results)
예제 #3
0
    def test_derive_meta(self):
        from emat.examples import road_test

        s, db, m = road_test()

        db.get_db_info()

        m.design_experiments(n_samples=10, design_name='tiny')

        db.read_experiment_all(None, None)

        with pytest.raises(emat.PendingExperimentsError):
            m.create_metamodel_from_design('tiny', random_state=123)

        m.run_experiments(design_name='tiny')

        mm = emat.create_metamodel(
            m.scope,
            db.read_experiment_all(s.name, 'tiny'),
            random_state=123,
            metamodel_id=db.get_new_metamodel_id(None),
        )
        mm.db = db # add db after creation to prevent writing it into the db
        assert mm.scope == m.scope

        tiny2 = m.design_experiments(n_samples=10, design_name='tiny2', random_seed=456)

        assert tiny2.iloc[0]['debt_type'] == 'GO Bond'

        stable_df('./test_tiny2.pkl.gz',tiny2)

        result2 = mm.run_experiments('tiny2')

        tiny2out = mm.read_experiment_measures('tiny2')
        stable_df('./test_tiny2out.pkl.gz', tiny2out)

        with pytest.raises(ValueError):
            # no metamodels stored
            mm3 = db.read_metamodel(None, None)

        db.write_metamodel(mm)
        mm2 = db.read_metamodel(None, 1)
        mm3 = db.read_metamodel(None, None)
        assert mm2 == mm == mm3
        assert mm2 is not mm

        print(mm2.function(**(tiny2.iloc[0])))

        assert mm2.function(**(tiny2.iloc[0])) == approx({
            'no_build_travel_time': 83.57502327972276,
            'build_travel_time': 62.221693766038015,
            'time_savings': 57.612063365257995,
            'value_of_time_savings': 3749.2913256457214,
            'net_benefits': 395.55020765212254,
            'cost_of_capacity_expansion': 1252.6916865286616,
            'present_cost_expansion': 23000.275573551233,
        })

        mm3.metamodel_id = db.get_new_metamodel_id(None)
        db.write_metamodel(mm3)

        with pytest.raises(ValueError):
            # now too many to get without giving an ID
            mm4 = db.read_metamodel(None, None)
예제 #4
0
	def test_robust_evaluation(self):
		# %%

		import os
		test_dir = os.path.dirname(__file__)

		from ema_workbench import ema_logging, MultiprocessingEvaluator, SequentialEvaluator
		from emat.examples import road_test
		import numpy, pandas, functools
		from emat import Measure
		s, db, m = road_test()

		MAXIMIZE = Measure.MAXIMIZE
		MINIMIZE = Measure.MINIMIZE

		robustness_functions = [
			Measure(
				'Expected Net Benefit',
				kind=Measure.INFO,
				variable_name='net_benefits',
				function=numpy.mean,
			),

			Measure(
				'Probability of Net Loss',
				kind=MINIMIZE,
				variable_name='net_benefits',
				function=lambda x: numpy.mean(x < 0),
				min=0,
				max=1,
			),

			Measure(
				'95%ile Travel Time',
				kind=MINIMIZE,
				variable_name='build_travel_time',
				function=functools.partial(numpy.percentile, q=95),
				min=60,
				max=150,
			),

			Measure(
				'99%ile Present Cost',
				kind=Measure.INFO,
				variable_name='present_cost_expansion',
				function=functools.partial(numpy.percentile, q=99),
			),

			Measure(
				'Expected Present Cost',
				kind=Measure.INFO,
				variable_name='present_cost_expansion',
				function=numpy.mean,
			),

		]
		# %%

		numpy.random.seed(42)

		with MultiprocessingEvaluator(m) as evaluator:
			r1 = m.robust_evaluate(
				robustness_functions,
				scenarios=20,
				policies=5,
				evaluator=evaluator,
			)

		import pandas
		correct = pandas.read_json(
			'{"amortization_period":{"0":19,"1":23,"2":50,"3":43,"4":35},"debt_type":{"0":"Rev Bond","1":"Paygo"'
			',"2":"GO Bond","3":"Paygo","4":"Rev Bond"},"expand_capacity":{"0":26.3384401031,"1":63.3898549337,"2'
			'":51.1360252492,"3":18.7230954832,"4":93.9205959335},"interest_rate_lock":{"0":false,"1":true,"2":fal'
			'se,"3":true,"4":false},"Expected Net Benefit":{"0":-157.486494925,"1":-244.2423401934,"2":-189.633908'
			'4553,"3":-4.2656265778,"4":-481.1208898635},"Probability of Net Loss":{"0":0.95,"1":1.0,"2":0.95,"3":'
			'0.7,"4":1.0},"95%ile Travel Time":{"0":74.6904209781,"1":65.8492894317,"2":67.6932507947,"3":79.09851'
			'23853,"4":63.203313888},"99%ile Present Cost":{"0":3789.8036648358,"1":9121.0832380586,"2":7357.89572'
			'71441,"3":2694.0416972887,"4":13514.111590462},"Expected Present Cost":{"0":3158.4461451444,"1":7601.'
			'5679809722,"2":6132.1164500957,"3":2245.2312484183,"4":11262.7453643551}}')
		correct['debt_type'] = correct['debt_type'].astype(
			pandas.CategoricalDtype(categories=['GO Bond', 'Rev Bond', 'Paygo'], ordered=True))

		pandas.testing.assert_frame_equal(r1, correct)

		numpy.random.seed(7)

		from ema_workbench.em_framework.samplers import sample_uncertainties
		scenes = sample_uncertainties(m, 20)

		scenes0 = pandas.DataFrame(scenes)
		cachefile = os.path.join(test_dir,'test_robust_results.csv')
		if not os.path.exists(cachefile):
			scenes0.to_csv(os.path.join(test_dir,'test_robust_evaluation_scenarios.csv'), index=None)
		scenes1 = pandas.read_csv(os.path.join(test_dir,'test_robust_evaluation_scenarios.csv'))
		pandas.testing.assert_frame_equal(scenes0, scenes1)

		from emat import Constraint

		constraint_1 = Constraint(
			"Maximum Log Expected Present Cost",
			outcome_names="Expected Present Cost",
			function=Constraint.must_be_less_than(4000),
		)

		constraint_2 = Constraint(
			"Minimum Capacity Expansion",
			parameter_names="expand_capacity",
			function=Constraint.must_be_greater_than(10),
		)

		constraint_3 = Constraint(
			"Maximum Paygo",
			parameter_names='debt_type',
			outcome_names='99%ile Present Cost',
			function=lambda i, j: max(0, j - 1500) if i == 'Paygo' else 0,
		)

		from emat.optimization import HyperVolume, EpsilonProgress, SolutionViewer, ConvergenceMetrics

		convergence_metrics = ConvergenceMetrics(
			HyperVolume.from_outcomes(robustness_functions),
			EpsilonProgress(),
			SolutionViewer.from_model_and_outcomes(m, robustness_functions),
		)

		numpy.random.seed(8)
		random.seed(8)

		# Test robust optimize
		with SequentialEvaluator(m) as evaluator:
			robust_results, convergence = m.robust_optimize(
					robustness_functions,
					scenarios=scenes,
					nfe=25,
					constraints=[
						constraint_1,
						constraint_2,
						constraint_3,
					],
					epsilons=[0.05,]*len(robustness_functions),
					convergence=convergence_metrics,
					evaluator=evaluator,
			)

		cachefile = os.path.join(test_dir,'test_robust_results.csv')
		if not os.path.exists(cachefile):
			robust_results.to_csv(cachefile, index=None)
		correct2 = pandas.read_csv(cachefile)
		correct2['debt_type'] = correct2['debt_type'].astype(
			pandas.CategoricalDtype(categories=['GO Bond', 'Rev Bond', 'Paygo'], ordered=True))
		pandas.testing.assert_frame_equal(robust_results, correct2, check_less_precise=True)