def test_optimisation_with_surrogate(building, problem, samples): """make sure we can run optimisation algorithms on the fit surrogates""" evaluator = EvaluatorEP(problem, building) outputs = evaluator.df_apply(samples) train_in, test_in, train_out, test_out = train_test_split(samples, outputs, test_size=0.2) reg = linear_model.LinearRegression() reg.fit(train_in, train_out) def evaluation_func(ind): return ((reg.predict([ind])[0][0],),()) surrogate = EvaluatorSR(evaluation_func, problem) s = optimizer.NSGAII(surrogate, 1000) optimal = s.iloc[0]['pareto-optimal'] non_optimal = s.iloc[1]['pareto-optimal'] assert optimal, f'Optimal ouput was displayed as not Pareto Optimal' assert not non_optimal, f'Non-optimal output was displayed as Pareto Optimal' #change this to 0 to see stdout and stderr assert 1
def test_flexibility(problem): """to make sure that you can use multiple algorithms on the same data set""" idf = ef.get_idf() evaluator = EvaluatorEP(problem, idf) random.seed(1) #run the first algorithm platypus_problem = evaluator.to_platypus() algorithm = platypus.NSGAII(problem=platypus_problem, population_size=5) algorithm.run(5) #run the second algorithm generator = platypus.InjectedPopulation(algorithm.population) alg2 = platypus.EpsMOEA(problem=platypus_problem, generator=generator, epsilons=3, population_size=5) alg2.run(5) results = optimizer.solutions_to_df(alg2.result, problem, parts=['inputs', 'outputs']) value = results.iloc[0]['Electricity:Facility'] assert np.isclose(value, 1747893172.6172004), f'Unexpected result: {value}' assert all([optimal == True for optimal in results['pareto-optimal'] ]), f'Algorithm not producing optimal outputs' #change this to 0 to see stdout and stderr assert 1
def test_constraints(building, parameters): """Testing for expected output with certain constraints, also acts as a test for using NSGAII""" objectives = ['Electricity:Facility', 'Gas:Facility'] problem = EPProblem(inputs=parameters, outputs=objectives, constraints=['CO2:Facility'], constraint_bounds=['>=750']) evaluator = EvaluatorEP(problem, building) results = NSGAII(evaluator, evaluations=10, population_size=2) value = results.iloc[0]['CO2:Facility'] + results.iloc[0]['violation'] assert value >= 750, f'Constraint did not effect output, value should be above 750 but was: {value}' #Check to make sure the output changes with different constraints problem = EPProblem(inputs=parameters, outputs=objectives, constraints=['CO2:Facility'], constraint_bounds=['<=750']) evaluator = EvaluatorEP(problem, building) results = NSGAII(evaluator, evaluations=10, population_size=2) value = results.iloc[0]['CO2:Facility'] - results.iloc[0]['violation'] assert value <= 750, f'Constraint did not effect output, value should be below 750 but was: {value}' #change this to 0 to see stdout and stderr assert 1
def test_evaluatorEH_EP_df(hub, hub_problem, building, problem, energyplus_df): """To make sure that dataframe EvaluatorEP output can be used in an EvaluatorEH""" evaluatorEP = EvaluatorEP(problem, building) evaluatorEH = EvaluatorEH(hub_problem, hub) result = evaluatorEH.df_apply(evaluatorEP.df_apply(energyplus_df)) assert np.isclose(result.iat[0, 0], 2.721700e+09) and np.isclose( result.iat[0, 1], 33.7551) and np.isclose( result.iat[1, 0], 2.705480e+09) and np.isclose( result.iat[1, 1], 33.7551), f'Unexpected result for EvaluatorEH, {result}' #change this to 0 to see stdout and stderr assert 1
def test_exception_throwing(building, problem, samples): """test to make sure exceptions are thrown or ignored depending on the error_mode""" #check that an exception is raised with FailFast mode with pytest.raises(Exception): evaluator = EvaluatorEP(problem, building, error_mode='FailFast').df_apply(samples) #check that no exceptions are raised in the other mode try: evaluator = EvaluatorEP(problem, building, error_mode='Silent').df_apply(samples) except Exception as e: assert 0 #change this to 0 to see stdout and stderr assert 1
def test_evaluatorEH_EP(hub, hub_problem, building, problem): """To make sure that base EvaluatorEP output can be used in an EvaluatorEH""" evaluatorEP = EvaluatorEP(problem, building) evaluatorEH = EvaluatorEH(hub_problem, hub) result = evaluatorEH(evaluatorEP([0.5])) assert np.isclose(result[0], 2721700000.0) and np.isclose( result[1], 33.7551), f'Unexpected result for EvaluatorEH, {result}' #change this to 0 to see stdout and stderr assert 1
def test_objectives(building, parameters): """Testing custom functions and basic objective creation""" def variance(result): return result.data['Value'].var() objectives = [MeterReader('Electricity:Facility', name='Electricity Usage'), MeterReader('Electricity:Facility',func=variance, name='Electricity Variance')] problem = EPProblem(inputs=parameters, outputs=objectives) evaluator = EvaluatorEP(problem, building) samples = sampling.dist_sampler(sampling.seeded_sampler, problem, 10) results = evaluator.df_apply(samples, keep_input=True) value = results.iloc[0]['Electricity Variance'] assert np.isclose(value, 829057663033101.1), f'Unexpected value when using custom function:{value}' #change this to 0 to see stdout and stderr assert 1
def test_evaluatorEP(building, problem): """To make sure EvaluatorEP can be initialised and works as intended""" evaluator = EvaluatorEP(problem, building) result = evaluator([0.5]) # run with thickness set to 0.5 assert np.isclose(result[0], 1818735943.9307632) and np.isclose( result[1], 2172045529.871896), f'Unexpected result for EvaluatorEP, {result}' #change this to 0 to see stdout and stderr assert 1
def test_fit(building, problem, samples): """check to make sure linear regression works and is close to the actual value""" evaluator = EvaluatorEP(problem, building) outputs = evaluator.df_apply(samples) train_in, test_in, train_out, test_out = train_test_split(samples, outputs, test_size=0.2) reg = linear_model.LinearRegression() reg.fit(train_in, train_out) results = test_in.copy() results['energy use'] = test_out results['predicted'] = reg.predict(test_in) actual = results.iloc[0]['energy use'] predicted = results.iloc[0]['predicted'] assert np.isclose(actual - predicted, 8666133.340038776), f'Unexpected difference of value: {actual - predicted}' #change this to 0 to see stdout and stderr assert 1
def test_error_values(building, problem, samples): """check that automatic error handling will assign the desired error values""" #check that the default error value is assigned to invalid materials evaluator = EvaluatorEP(problem, building, error_mode='Silent') results = evaluator.df_apply(samples) value = results.iloc[0]['Electricity:Facility'] assert value == np.inf, f'Invalid material not assigned the default error value, value assigned was: {value}' #check that a custom error value is assigned to invalid materials error_value = ((-1, ), ()) evaluator = EvaluatorEP(problem, building, error_mode='Silent', error_value=error_value) results = evaluator.df_apply(samples) value = results.iloc[0]['Electricity:Facility'] assert value == -1, f'Invalid material not assigned the correct error value, value assigned was: {value}' #check that valid inputs aren't assigned error values value = results.iloc[4]['Electricity:Facility'] assert value != -1, f'Valid material was assigned the error value: {error_value}' #change this to 0 to see stdout and stderr assert 1
def test_expected_values(building, problem, samples): """check that the obtained results are consistent when using the same inputs""" def get_plot_data(model, density): #helper function from the example notebook p1 = problem.inputs[0].value_descriptor a = np.linspace(p1.min, p1.max, density) p2 = problem.inputs[1].value_descriptor b = np.linspace(p2.min, p2.max, density) plot_data = pd.DataFrame(np.transpose( [np.tile(a, len(b)), np.repeat(b, len(a))]), columns=problem.names('inputs')) return pd.concat( [plot_data, pd.Series(model.predict(plot_data))], axis=1) evaluator = EvaluatorEP(problem, building, error_mode='Silent') train = evaluator.df_apply(samples, keep_input=True) print(problem.names()) x, y, c = problem.names() #train and get the values model = pipeline.make_pipeline(StandardScaler(), linear_model.Ridge()) model.fit(train[[x, y]].values, train[c].values) density = 30 df = get_plot_data(model, int(density * 1.5)) #check to see if the extremes and the midpoint are the expected values assert np.isclose( [df.iloc[0][0]], [1592469368.6909447]), f'Unexpected value for low extreme' assert np.isclose( [df.iloc[2024][0]], [2122632827.9627075]), f'Unexpected value for high extreme' assert np.isclose([df.iloc[1012][0]], [1857551098.326826]), f'Unexpected value for midpoint' #change this to 0 to see stdout and stderr assert 1
def test_init(building, parameters): """Testing the initialization and functionality of an adaptive surrogate model""" problem = EPProblem(parameters) evaluator = EvaluatorEP(problem, building) inputs = sampling.dist_sampler(sampling.seeded_sampler, problem, 10) inputs = sampling.add_extremes(inputs, problem) #using the custom class for our own model training k = KirgingEval(reference=evaluator) k.do_infill(inputs) k.model.train() k.infill(5) #change this to 0 to see stdout and stderr assert 1