コード例 #1
0
def sampling_scaler(locator, random_variables, target_parameters, boolean_vars,
                    list_building_names, number_samples_scaler, nn_delay, gv,
                    config, climatic_variables, year, use_daysim_radiation,
                    use_stochastic_occupancy, region):
    '''
    this function creates a number of random samples for the entire district (city)
    :param locator: points to the variables
    :param random_variables: a list containing the names of variables associated with uncertainty (can be accessed from 'nn_settings.py')
    :param target_parameters: a list containing the name of desirable outputs (can be accessed from 'nn_settings.py')
    :param list_building_names: a list containing the name of desired buildings
    :param weather_path: weather path
    :param gv: global variables
    :return: -
    '''

    #   get number of buildings
    size_city = np.shape(list_building_names)
    size_city = size_city[0]
    #   create random samples of the entire district
    for i in range(
            number_samples_scaler
    ):  # the parameter "number_samples" is accessible from 'nn_settings.py'
        bld_counter = 0
        # create list of samples with a LHC sampler and save to disk
        samples, samples_norm, pdf_list = latin_sampler(
            locator, size_city, random_variables, region)

        # create a file of overides with the samples
        dictionary = dict(zip(random_variables, samples.transpose()))
        overides_dataframe = pd.DataFrame(dictionary)
        overides_dataframe['Name'] = list_building_names

        # replace the 1, 0 with True and False
        for var in boolean_vars:
            somthing = overides_dataframe['ECONOMIZER']
            overides_dataframe[var].replace(1, 'True', inplace=True)
            overides_dataframe[var].replace(0, 'False', inplace=True)
            overides_dataframe[var].replace(0.0, 'False', inplace=True)

        # save file so the demand calculation can know about it.
        overides_dataframe.to_csv(locator.get_building_overrides())

        # run cea demand
        config.demand.override_variables = True
        demand_main.demand_calculation(locator, config)
        urban_input_matrix, urban_taget_matrix = input_prepare_main(
            list_building_names, locator, target_parameters, nn_delay,
            climatic_variables, config.region, year, use_daysim_radiation,
            use_stochastic_occupancy)

        scaler_inout_path = locator.get_minmaxscaler_folder()
        file_path_inputs = os.path.join(scaler_inout_path,
                                        "input%(i)s.csv" % locals())
        data_file_inputs = pd.DataFrame(urban_input_matrix)
        data_file_inputs.to_csv(file_path_inputs, header=False, index=False)

        file_path_targets = os.path.join(scaler_inout_path,
                                         "target%(i)s.csv" % locals())
        data_file_targets = pd.DataFrame(urban_taget_matrix)
        data_file_targets.to_csv(file_path_targets, header=False, index=False)
コード例 #2
0
def sampling_single(locator, random_variables, target_parameters,
                    list_building_names, gv, config, nn_delay,
                    climatic_variables, region, year, use_daysim_radiation,
                    use_stochastic_occupancy):
    size_city = np.shape(list_building_names)
    size_city = size_city[0]

    bld_counter = 0
    # create list of samples with a LHC sampler and save to disk (*.csv)
    samples, samples_norm, pdf_list = latin_sampler(locator, size_city,
                                                    random_variables, region)
    for building_name in (list_building_names):
        np.save(locator.get_calibration_folder(), samples)
        building_load = config.single_calibration.load
        override_file = Gdf.from_file(
            locator.get_zone_geometry()).set_index('Name')
        override_file = pd.DataFrame(index=override_file.index)
        problem = {
            'variables': random_variables,
            'building_load': target_parameters,
            'probabiltiy_vars': pdf_list
        }
        pickle.dump(
            problem,
            file(locator.get_calibration_problem(building_name, building_load),
                 'w'))
        sample = np.asarray(zip(random_variables, samples[bld_counter, :]))
        apply_sample_parameters(locator, sample, override_file)
        bld_counter = bld_counter + 1
    # read the saved *.csv file and replace Boolean with logical (True/False)
    overwritten = pd.read_csv(locator.get_building_overrides())
    bld_counter = 0
    for building_name in (list_building_names):
        sample = np.asarray(zip(random_variables, samples[bld_counter, :]))
        for boolean_mask in (boolean_vars):
            indices = np.where(sample == boolean_mask)

            if sample[indices[0], 1] == '0.0':
                sample[indices[0], 1] = 'False'
            else:
                sample[indices[0], 1] = 'True'

        overwritten.loc[overwritten.Name == building_name,
                        random_variables] = sample[:, 1]
        bld_counter = bld_counter + 1

    # write to csv format
    overwritten.to_csv(locator.get_building_overrides())

    #   run cea demand
    demand_main.demand_calculation(locator, config)

    #   prepare the inputs for feeding into the neural network
    urban_input_matrix, urban_taget_matrix = input_prepare_main(
        list_building_names, locator, target_parameters, gv, nn_delay,
        climatic_variables, region, year, use_daysim_radiation,
        use_stochastic_occupancy)

    return urban_input_matrix, urban_taget_matrix
コード例 #3
0
def sampling_main(locator, config):
    """
    This script creates samples using a lating Hypercube sample of 5 variables of interest.
    then runs the demand calculation of CEA for all the samples. It delivers a json file storing
    the results of cv_rmse and rmse for each sample.

    for more details on the work behind this please check:
    Rysanek A., Fonseca A., Schlueter, A. Bayesian calibration of Dyanmic building Energy Models. Applied Energy 2017.

    :param locator: pointer to location of CEA files
    :param variables: input variables of CEA to sample. They must be 6!
    :param building_name: name of building to calibrate
    :param building_load: name of building load to calibrate
    :return:
        1. a file storing values of cv_rmse and rmse for all samples. the file is sotred in
            file(locator.get_calibration_cvrmse_file(building_name)
        2 a file storing information about variables, the building_load and the probability distribtuions used in the
          excercise. the file is stored in locator.get_calibration_problem(building_name)

    :rtype: .json and .pkl
    """

    # Local variables

    number_samples = config.single_calibration.samples
    variables = config.single_calibration.variables
    building_name = config.single_calibration.building
    building_load = config.single_calibration.load
    override_file = Gdf.from_file(
        locator.get_zone_geometry()).set_index('Name')
    override_file = pd.DataFrame(index=override_file.index)
    region = config.region

    # Generate latin hypercube samples
    latin_samples, latin_samples_norm, distributions = latin_sampler.latin_sampler(
        locator, number_samples, variables, region)

    # Run demand calulation for every latin sample
    cv_rmse_list = []
    rmse_list = []
    for i in range(number_samples):

        #create list of tuples with variables and sample
        sample = zip(variables, latin_samples[i, :])

        #create overrides and return pointer to files
        apply_sample_parameters(locator, sample, override_file)

        # run cea demand and calculate cv_rmse
        simulation = simulate_demand_sample(locator, building_name,
                                            building_load, config)

        #calculate cv_rmse
        measured = pd.read_csv(
            locator.get_demand_measured_file(building_name))[building_load +
                                                             "_kWh"].values
        cv_rmse, rmse = calc_cv_rmse(simulation, measured)

        cv_rmse_list.append(cv_rmse)
        rmse_list.append(rmse)
        print("The cv_rmse for this iteration is:", cv_rmse)

    # Save results into json
    # Create problem and save to disk as a pickle
    problem = {
        'variables': variables,
        'building_load': building_load,
        'probabiltiy_vars': distributions,
        'samples': latin_samples,
        'samples_norm': latin_samples_norm,
        'cv_rmse': cv_rmse_list,
        'rmse': rmse_list
    }
    pickle.dump(
        problem,
        open(locator.get_calibration_problem(building_name, building_load),
             'w'))
コード例 #4
0
def sampling_main(locator, random_variables, target_parameters,
                  list_building_names, weather_path, multiprocessing, config,
                  nn_delay, climatic_variables, year):
    '''
    this function creates a number of random samples for the entire district (city)
    :param locator: points to the variables
    :param random_variables: a list containing the names of variables associated with uncertainty (can be accessed from 'nn_settings.py')
    :param target_parameters: a list containing the name of desirable outputs (can be accessed from 'nn_settings.py')
    :param list_building_names: a list containing the name of desired buildings
    :param weather_path: weather path
    :return: -
    '''

    #   get number of buildings
    size_city = np.shape(list_building_names)
    size_city = size_city[0]
    #   create random samples of the entire district
    for i in range(
            number_samples
    ):  # the parameter "number_samples" is accessible from 'nn_settings.py'
        bld_counter = 0
        # create list of samples with a LHC sampler and save to disk
        samples, samples_norm, pdf_list = latin_sampler(
            locator, size_city, random_variables)
        #samples = samples[0]  # extract the non-normalized samples

        # create a file of overides with the samples
        dictionary = dict(zip(random_variables, samples.transpose()))
        overides_dataframe = pd.DataFrame(dictionary)
        overides_dataframe['Name'] = list_building_names

        # replace the 1, 0 with True and False
        for var in boolean_vars:
            overides_dataframe[var].replace(1, "True", inplace=True)
            overides_dataframe[var].replace(0, "False", inplace=True)
            overides_dataframe[var].replace(0.0, "False", inplace=True)

        # save file so the demand calculation can know about it.
        overides_dataframe.to_csv(locator.get_building_overrides())

        #   run cea demand
        config.demand.override_variables = True
        demand_main.demand_calculation(locator, config)
        #   prepare the inputs for feeding into the neural network
        urban_input_matrix, urban_taget_matrix = input_prepare_main(
            list_building_names, locator, target_parameters, nn_delay,
            climatic_variables, year)
        #   drop half the inputs and targets to avoid overfitting and save RAM / Disk space
        urban_input_matrix, urban_taget_matrix = input_dropout(
            urban_input_matrix, urban_taget_matrix)
        #   get the pathfor saving the files
        nn_inout_path = locator.get_nn_inout_folder()
        #   save inputs with sequential naming
        file_path_inputs = os.path.join(nn_inout_path,
                                        "input%(i)s.csv" % locals())
        data_file_inputs = pd.DataFrame(urban_input_matrix)
        data_file_inputs.to_csv(file_path_inputs, header=False, index=False)
        #   save inputs with sequential naming
        file_path_targets = os.path.join(nn_inout_path,
                                         "target%(i)s.csv" % locals())
        data_file_targets = pd.DataFrame(urban_taget_matrix)
        data_file_targets.to_csv(file_path_targets, header=False, index=False)