Пример #1
0
    def _getSamples(w):
        nonlocal idf
        global df
        if idf is None:
            idf = ef.get_idf(fm.getFile('building'), fm.getFile('idd'),
                             fm.getFile('epw'))
        with results:
            print('Simulating...')
            df = sampling.dist_sampler(samplerType.value, transformers,
                                       samplesBox.value)
            df['energy use'] = ef.Evaluator(transformers, idf).df_apply(df)
            print('Done')

            display(df)
def test_init(building, parameters):
    """Testing the initialization and functionality of an adaptive surrogate model"""

    problem = EPProblem(parameters)
    evaluator = EvaluatorEP(problem, building)

    inputs = sampling.dist_sampler(sampling.seeded_sampler, problem, 10)
    inputs = sampling.add_extremes(inputs, problem)

    #using the custom class for our own model training
    k = KirgingEval(reference=evaluator)
    k.do_infill(inputs)
    k.model.train()
    k.infill(5)

    #change this to 0 to see stdout and stderr
    assert 1
Пример #3
0
def test_objectives(building, parameters):
    """Testing custom functions and basic objective creation"""

    def variance(result):
        return result.data['Value'].var()

    objectives = [MeterReader('Electricity:Facility', name='Electricity Usage'),
              MeterReader('Electricity:Facility',func=variance, name='Electricity Variance')]
    problem = EPProblem(inputs=parameters, outputs=objectives)

    evaluator = EvaluatorEP(problem, building)
    samples = sampling.dist_sampler(sampling.seeded_sampler, problem, 10)
    results = evaluator.df_apply(samples, keep_input=True)

    value = results.iloc[0]['Electricity Variance']

    assert np.isclose(value, 829057663033101.1), f'Unexpected value when using custom function:{value}'
    #change this to 0 to see stdout and stderr
    assert 1
Пример #4
0
def test_custom_evaluation():
    """check to see if descriptors display as intended, and check to make sure custom evaluations work with EvaluatorSR"""

    #create the descriptors
    zero_to_nine = RangeParameter(min_val = 0, max_val=9)
    single_digit_integers = CategoryParameter(options=[0,1,2,3,4,5,6,7,8,9])
    text_example = CategoryParameter(options=['a', 'b', 'c', 'other'])

    #create the parameters and the problem
    parameters = [
        Parameter(value_descriptor=zero_to_nine, name='0-9'),
        Parameter(value_descriptor=single_digit_integers, name='single digit'),
        Parameter(value_descriptor=text_example, name='text')
    ]
    problem = Problem(parameters, outputs=['output'])

    #create the sampling distribution (seeded with only one output that should give     4.939321535345923	7	c	34.575251 after evaluating)
    samples = sampling.dist_sampler(sampling.seeded_sampler, problem, num_samples=1)
    values = [samples.iloc[0]['0-9'], samples.iloc[0]['single digit'], samples.iloc[0]['text']]
    assert np.isclose(values[0], 4.939321535345923) and values[1:] == [7, 'c'], f'Unexpected sample values: {values}'

    #custom evaluation function from the jupyter notebook
    def evaluation_function(values):
        x,y,z = values
        if z == 'other':
            return (0,), ()
        else:
            return (x * y,), ()

    evaluator = EvaluatorSR(evaluation_function, problem)
    # The evaluator will use this objective by default
    outputs = evaluator.df_apply(samples ,keep_input=True)
    result = outputs.iloc[0]['output']
    print(result)
    assert np.isclose(result, 34.57525074742146), f'Unexpected evaluation result: {result}'

    #change this to 0 to see stdout and stderr
    assert 1
Пример #5
0
def samples(problem):
    samples = sampling.dist_sampler(sampling.seeded_sampler, problem, 10)
    return samples
def samples(problem):
    #seeded_sampler results in indexes 0-3 being Invalid Material and 4 being HW CONCRETE
    samples = sampling.dist_sampler(sampling.seeded_sampler, problem, 5)
    return samples