Ejemplo n.º 1
0
def sampling_scaler(locator, random_variables, target_parameters, boolean_vars,
                    list_building_names, number_samples_scaler, nn_delay, gv,
                    config, climatic_variables, year, use_daysim_radiation,
                    use_stochastic_occupancy, region):
    '''
    this function creates a number of random samples for the entire district (city)
    :param locator: points to the variables
    :param random_variables: a list containing the names of variables associated with uncertainty (can be accessed from 'nn_settings.py')
    :param target_parameters: a list containing the name of desirable outputs (can be accessed from 'nn_settings.py')
    :param list_building_names: a list containing the name of desired buildings
    :param weather_path: weather path
    :param gv: global variables
    :return: -
    '''

    #   get number of buildings
    size_city = np.shape(list_building_names)
    size_city = size_city[0]
    #   create random samples of the entire district
    for i in range(
            number_samples_scaler
    ):  # the parameter "number_samples" is accessible from 'nn_settings.py'
        bld_counter = 0
        # create list of samples with a LHC sampler and save to disk
        samples, samples_norm, pdf_list = latin_sampler(
            locator, size_city, random_variables, region)

        # create a file of overides with the samples
        dictionary = dict(zip(random_variables, samples.transpose()))
        overides_dataframe = pd.DataFrame(dictionary)
        overides_dataframe['Name'] = list_building_names

        # replace the 1, 0 with True and False
        for var in boolean_vars:
            somthing = overides_dataframe['ECONOMIZER']
            overides_dataframe[var].replace(1, 'True', inplace=True)
            overides_dataframe[var].replace(0, 'False', inplace=True)
            overides_dataframe[var].replace(0.0, 'False', inplace=True)

        # save file so the demand calculation can know about it.
        overides_dataframe.to_csv(locator.get_building_overrides())

        # run cea demand
        config.demand.override_variables = True
        demand_main.demand_calculation(locator, config)
        urban_input_matrix, urban_taget_matrix = input_prepare_main(
            list_building_names, locator, target_parameters, nn_delay,
            climatic_variables, config.region, year, use_daysim_radiation,
            use_stochastic_occupancy)

        scaler_inout_path = locator.get_minmaxscaler_folder()
        file_path_inputs = os.path.join(scaler_inout_path,
                                        "input%(i)s.csv" % locals())
        data_file_inputs = pd.DataFrame(urban_input_matrix)
        data_file_inputs.to_csv(file_path_inputs, header=False, index=False)

        file_path_targets = os.path.join(scaler_inout_path,
                                         "target%(i)s.csv" % locals())
        data_file_targets = pd.DataFrame(urban_taget_matrix)
        data_file_targets.to_csv(file_path_targets, header=False, index=False)
Ejemplo n.º 2
0
def sampling_single(locator, random_variables, target_parameters,
                    list_building_names, gv, config, nn_delay,
                    climatic_variables, region, year, use_daysim_radiation,
                    use_stochastic_occupancy):
    size_city = np.shape(list_building_names)
    size_city = size_city[0]

    bld_counter = 0
    # create list of samples with a LHC sampler and save to disk (*.csv)
    samples, samples_norm, pdf_list = latin_sampler(locator, size_city,
                                                    random_variables, region)
    for building_name in (list_building_names):
        np.save(locator.get_calibration_folder(), samples)
        building_load = config.single_calibration.load
        override_file = Gdf.from_file(
            locator.get_zone_geometry()).set_index('Name')
        override_file = pd.DataFrame(index=override_file.index)
        problem = {
            'variables': random_variables,
            'building_load': target_parameters,
            'probabiltiy_vars': pdf_list
        }
        pickle.dump(
            problem,
            file(locator.get_calibration_problem(building_name, building_load),
                 'w'))
        sample = np.asarray(zip(random_variables, samples[bld_counter, :]))
        apply_sample_parameters(locator, sample, override_file)
        bld_counter = bld_counter + 1
    # read the saved *.csv file and replace Boolean with logical (True/False)
    overwritten = pd.read_csv(locator.get_building_overrides())
    bld_counter = 0
    for building_name in (list_building_names):
        sample = np.asarray(zip(random_variables, samples[bld_counter, :]))
        for boolean_mask in (boolean_vars):
            indices = np.where(sample == boolean_mask)

            if sample[indices[0], 1] == '0.0':
                sample[indices[0], 1] = 'False'
            else:
                sample[indices[0], 1] = 'True'

        overwritten.loc[overwritten.Name == building_name,
                        random_variables] = sample[:, 1]
        bld_counter = bld_counter + 1

    # write to csv format
    overwritten.to_csv(locator.get_building_overrides())

    #   run cea demand
    demand_main.demand_calculation(locator, config)

    #   prepare the inputs for feeding into the neural network
    urban_input_matrix, urban_taget_matrix = input_prepare_main(
        list_building_names, locator, target_parameters, gv, nn_delay,
        climatic_variables, region, year, use_daysim_radiation,
        use_stochastic_occupancy)

    return urban_input_matrix, urban_taget_matrix
Ejemplo n.º 3
0
def sampling_main(locator, random_variables, target_parameters,
                  list_building_names, weather_path, gv, multiprocessing,
                  config, nn_delay, climatic_variables, region, year,
                  use_daysim_radiation):
    '''
    this function creates a number of random samples for the entire district (city)
    :param locator: points to the variables
    :param random_variables: a list containing the names of variables associated with uncertainty (can be accessed from 'nn_settings.py')
    :param target_parameters: a list containing the name of desirable outputs (can be accessed from 'nn_settings.py')
    :param list_building_names: a list containing the name of desired buildings
    :param weather_path: weather path
    :param gv: global variables
    :return: -
    '''

    #   get number of buildings
    size_city = np.shape(list_building_names)
    size_city = size_city[0]
    #   create random samples of the entire district
    for i in range(
            number_samples
    ):  # the parameter "number_samples" is accessible from 'nn_settings.py'
        bld_counter = 0
        # create list of samples with a LHC sampler and save to disk
        samples, samples_norm, pdf_list = latin_sampler(
            locator, size_city, random_variables, region)
        #samples = samples[0]  # extract the non-normalized samples

        # create a file of overides with the samples
        dictionary = dict(zip(random_variables, samples.transpose()))
        overides_dataframe = pd.DataFrame(dictionary)
        overides_dataframe['Name'] = list_building_names

        # replace the 1, 0 with True and False
        for var in boolean_vars:
            overides_dataframe[var].replace(1, "True", inplace=True)
            overides_dataframe[var].replace(0, "False", inplace=True)
            overides_dataframe[var].replace(0.0, "False", inplace=True)

        # save file so the demand calculation can know about it.
        overides_dataframe.to_csv(locator.get_building_overrides())

        #   run cea demand
        config.demand.override_variables = True
        demand_main.demand_calculation(locator, gv, config)
        #   prepare the inputs for feeding into the neural network
        urban_input_matrix, urban_taget_matrix = input_prepare_main(
            list_building_names, locator, target_parameters, gv, nn_delay,
            climatic_variables, region, year, use_daysim_radiation)
        #   drop half the inputs and targets to avoid overfitting and save RAM / Disk space
        urban_input_matrix, urban_taget_matrix = input_dropout(
            urban_input_matrix, urban_taget_matrix)
        #   get the pathfor saving the files
        nn_inout_path = locator.get_nn_inout_folder()
        #   save inputs with sequential naming
        file_path_inputs = os.path.join(nn_inout_path,
                                        "input%(i)s.csv" % locals())
        data_file_inputs = pd.DataFrame(urban_input_matrix)
        data_file_inputs.to_csv(file_path_inputs, header=False, index=False)
        #   save inputs with sequential naming
        file_path_targets = os.path.join(nn_inout_path,
                                         "target%(i)s.csv" % locals())
        data_file_targets = pd.DataFrame(urban_taget_matrix)
        data_file_targets.to_csv(file_path_targets, header=False, index=False)