def test_error_values(building, problem, samples):
    """check that automatic error handling will assign the desired error values"""

    #check that the default error value is assigned to invalid materials
    evaluator = EvaluatorEP(problem, building, error_mode='Silent')
    results = evaluator.df_apply(samples)
    value = results.iloc[0]['Electricity:Facility']
    assert value == np.inf, f'Invalid material not assigned the default error value, value assigned was: {value}'

    #check that a custom error value is assigned to invalid materials
    error_value = ((-1, ), ())
    evaluator = EvaluatorEP(problem,
                            building,
                            error_mode='Silent',
                            error_value=error_value)
    results = evaluator.df_apply(samples)
    value = results.iloc[0]['Electricity:Facility']
    assert value == -1, f'Invalid material not assigned the correct error value, value assigned was: {value}'

    #check that valid inputs aren't assigned error values
    value = results.iloc[4]['Electricity:Facility']
    assert value != -1, f'Valid material was assigned the error value: {error_value}'

    #change this to 0 to see stdout and stderr
    assert 1
Exemplo n.º 2
0
def test_optimisation_with_surrogate(building, problem, samples):
    """make sure we can run optimisation algorithms on the fit surrogates"""

    evaluator = EvaluatorEP(problem, building)
    outputs = evaluator.df_apply(samples)

    train_in, test_in, train_out, test_out = train_test_split(samples, outputs, test_size=0.2)

    reg = linear_model.LinearRegression()
    reg.fit(train_in, train_out)

    def evaluation_func(ind):
        return ((reg.predict([ind])[0][0],),())

    surrogate = EvaluatorSR(evaluation_func, problem)

    s = optimizer.NSGAII(surrogate, 1000)

    optimal = s.iloc[0]['pareto-optimal']
    non_optimal = s.iloc[1]['pareto-optimal']

    assert optimal, f'Optimal ouput was displayed as not Pareto Optimal'
    assert not non_optimal, f'Non-optimal output was displayed as Pareto Optimal'

    #change this to 0 to see stdout and stderr
    assert 1
Exemplo n.º 3
0
def test_evaluatorEH_EP_df(hub, hub_problem, building, problem, energyplus_df):
    """To make sure that dataframe EvaluatorEP output can be used in an EvaluatorEH"""
    evaluatorEP = EvaluatorEP(problem, building)
    evaluatorEH = EvaluatorEH(hub_problem, hub)
    result = evaluatorEH.df_apply(evaluatorEP.df_apply(energyplus_df))

    assert np.isclose(result.iat[0, 0], 2.721700e+09) and np.isclose(
        result.iat[0, 1], 33.7551) and np.isclose(
            result.iat[1, 0], 2.705480e+09) and np.isclose(
                result.iat[1, 1],
                33.7551), f'Unexpected result for EvaluatorEH, {result}'
    #change this to 0 to see stdout and stderr
    assert 1
Exemplo n.º 4
0
def test_objectives(building, parameters):
    """Testing custom functions and basic objective creation"""

    def variance(result):
        return result.data['Value'].var()

    objectives = [MeterReader('Electricity:Facility', name='Electricity Usage'),
              MeterReader('Electricity:Facility',func=variance, name='Electricity Variance')]
    problem = EPProblem(inputs=parameters, outputs=objectives)

    evaluator = EvaluatorEP(problem, building)
    samples = sampling.dist_sampler(sampling.seeded_sampler, problem, 10)
    results = evaluator.df_apply(samples, keep_input=True)

    value = results.iloc[0]['Electricity Variance']

    assert np.isclose(value, 829057663033101.1), f'Unexpected value when using custom function:{value}'
    #change this to 0 to see stdout and stderr
    assert 1
Exemplo n.º 5
0
def test_fit(building, problem, samples):
    """check to make sure linear regression works and is close to the actual value"""

    evaluator = EvaluatorEP(problem, building)
    outputs = evaluator.df_apply(samples)

    train_in, test_in, train_out, test_out = train_test_split(samples, outputs, test_size=0.2)

    reg = linear_model.LinearRegression()
    reg.fit(train_in, train_out)
    results = test_in.copy()
    results['energy use'] = test_out
    results['predicted'] = reg.predict(test_in)

    actual = results.iloc[0]['energy use']
    predicted = results.iloc[0]['predicted']
    assert np.isclose(actual - predicted, 8666133.340038776), f'Unexpected difference of value: {actual - predicted}'

    #change this to 0 to see stdout and stderr
    assert 1
Exemplo n.º 6
0
def test_expected_values(building, problem, samples):
    """check that the obtained results are consistent when using the same inputs"""
    def get_plot_data(model, density):
        #helper function from the example notebook
        p1 = problem.inputs[0].value_descriptor
        a = np.linspace(p1.min, p1.max, density)
        p2 = problem.inputs[1].value_descriptor
        b = np.linspace(p2.min, p2.max, density)
        plot_data = pd.DataFrame(np.transpose(
            [np.tile(a, len(b)), np.repeat(b, len(a))]),
                                 columns=problem.names('inputs'))
        return pd.concat(
            [plot_data, pd.Series(model.predict(plot_data))], axis=1)

    evaluator = EvaluatorEP(problem, building, error_mode='Silent')
    train = evaluator.df_apply(samples, keep_input=True)
    print(problem.names())
    x, y, c = problem.names()

    #train and get the values
    model = pipeline.make_pipeline(StandardScaler(), linear_model.Ridge())
    model.fit(train[[x, y]].values, train[c].values)
    density = 30
    df = get_plot_data(model, int(density * 1.5))

    #check to see if the extremes and the midpoint are the expected values
    assert np.isclose(
        [df.iloc[0][0]],
        [1592469368.6909447]), f'Unexpected value for low extreme'
    assert np.isclose(
        [df.iloc[2024][0]],
        [2122632827.9627075]), f'Unexpected value for high extreme'
    assert np.isclose([df.iloc[1012][0]],
                      [1857551098.326826]), f'Unexpected value for midpoint'

    #change this to 0 to see stdout and stderr
    assert 1