Ejemplo n.º 1
0
def test_benchmark():
    locator = inputlocator.InputLocator(
        scenario_path=r'C:\reference-case-zug\baseline')
    locator_list = [locator, locator, locator, locator]
    output_file = os.path.expandvars(r'%TEMP%\test_benchmark.pdf')
    benchmark(locator_list=locator_list, output_file=output_file)
    print 'test_benchmark() succeeded'
Ejemplo n.º 2
0
def run_as_script():
    gv = cea.globalvar.GlobalVariables()
    scenario_path = gv.scenario_reference
    locator = inputlocator.InputLocator(scenario=scenario_path)

    building_name = 'B155066'  # intended building
    cluster_labels = ss_calibrator(building_name)
Ejemplo n.º 3
0
def run_as_script(scenario_path=None):
    """
    run the whole network summary routine
    """
    import cea.globalvar
    import cea.inputlocator as inputlocator
    from geopandas import GeoDataFrame as gpdf
    from cea.utilities import epwreader
    from cea.resources import geothermal

    gv = cea.globalvar.GlobalVariables()

    if scenario_path is None:
        scenario_path = gv.scenario_reference

    locator = inputlocator.InputLocator(scenario_path=scenario_path)
    total_demand = pd.read_csv(locator.get_total_demand())
    building_names = pd.read_csv(locator.get_total_demand())['Name']
    weather_file = locator.get_default_weather()
    # add geothermal part of preprocessing
    T_ambient = epwreader.epw_reader(weather_file)['drybulb_C']
    gv.ground_temperature = geothermal.calc_ground_temperature(T_ambient.values, gv)
    #substation_main(locator, total_demand, total_demand['Name'], gv, False)

    t = 1000  # FIXME
    T_DH = 60  # FIXME
    network = 'DH'  # FIXME
    t_flag = True  # FIXME

    substations_HEX_specs, buildings = substation_HEX_design_main(locator, total_demand, building_names, gv)

    substation_return_model_main(locator, gv, building_names, buildings, substations_HEX_specs, T_DH, t, network, t_flag)

    print 'substation_main() succeeded'
Ejemplo n.º 4
0
def run_as_script():
    import cea.globalvar
    import cea.inputlocator as inputlocator
    gv = cea.globalvar.GlobalVariables()
    scenario_path = gv.scenario_reference
    locator = inputlocator.InputLocator(scenario=scenario_path)
    output_parameters = ['QHf_MWhyr', 'QCf_MWhyr', 'Ef_MWhyr', 'QEf_MWhyr']
    method = 'sobol'  # method
    samples = 1000
    graph(locator, output_parameters, method, samples)
Ejemplo n.º 5
0
def run_as_script(config):

    scenario = config.scenario
    locator = inputlocator.InputLocator(scenario=scenario)

    # based on the variables listed in the uncertainty database and selected
    # through a screening process. they need to be 5.
    variables = ['U_win', 'U_wall', 'n50', 'Ths_set_C',
                 'Cm_Af']  #uncertain variables
    building_name = 'B155066'  # intended building
    building_load = 'Qhsf_kWh'  # target of prediction
    sampling_main(locator, variables, building_name, building_load, config)
Ejemplo n.º 6
0
def run_as_script():
    import cea.globalvar as gv
    import cea.inputlocator as inputlocator
    gv = gv.GlobalVariables()
    scenario_path = gv.scenario_reference
    locator = inputlocator.InputLocator(scenario_path=scenario_path)
    weather_path = locator.get_default_weather()
    output_parameters = ['QHf_MWhyr', 'QCf_MWhyr', 'Ef_MWhyr', 'QEf_MWhyr']
    method = 'morris'
    groups_var = ['THERMAL']
    num_samples = 1000  # generally 1000 or until it converges
    sensitivity_main(locator, weather_path, gv, output_parameters, groups_var,
                     num_samples, method)
def testing():
    import geopandas
    import cea.globalvar
    gv = cea.globalvar.GlobalVariables()
    from cea import inputlocator
    import time
    import cea.geometry.geometry_reader

    # generate windows based on geometry of vertical surfaces in radiation file
    locator = inputlocator.InputLocator(
        scenario_path=r'C:\reference-case\baseline')

    surface_properties = pd.read_csv(locator.get_surface_properties())
    gdf_building_architecture = geopandas.GeoDataFrame.from_file(
        locator.get_building_architecture()).drop('geometry',
                                                  axis=1).set_index('Name')
    prop_geometry = geopandas.GeoDataFrame.from_file(
        locator.get_building_geometry())
    prop_geometry['footprint'] = prop_geometry.area
    prop_geometry['perimeter'] = prop_geometry.length
    prop_geometry = prop_geometry.drop('geometry', axis=1).set_index('Name')
    df_windows = cea.geometry.geometry_reader.simple_window_generator.create_windows(
        surface_properties, gdf_building_architecture)

    building_test = 'B153737'  # 'B154767' this building doesn't have windows
    # get building windows
    df_windows_building_test = df_windows.loc[df_windows['name_building'] ==
                                              building_test].to_dict('list')
    # get building geometry
    gdf_building_test = prop_geometry.ix[building_test]
    gdf_building_architecture = gdf_building_architecture.ix[building_test]

    r_window_arg = 0.1
    temp_ext = 5
    temp_zone = 22
    u_wind = 0.5
    u_wind_10 = u_wind
    factor_cros = 1  # 1 = cross ventilation possible # TODO: get from building properties

    dict_props_nat_vent = get_properties_natural_ventilation(
        gdf_building_test, gdf_building_architecture, gv)
    qm_arg_in, qm_arg_out \
        = calc_qm_arg(factor_cros, temp_ext, df_windows_building_test, u_wind_10, temp_zone, r_window_arg)

    t0 = time.time()
    res = calc_air_flows(temp_zone, u_wind, temp_ext, dict_props_nat_vent)
    t1 = time.time()

    print(res)
    print(['time: ', t1 - t0])
Ejemplo n.º 8
0
def run_as_script(scenario_path=None):
    """
    Run the properties script with input from the reference case and compare the results. This ensures that changes
    made to this script (e.g. refactorings) do not stop the script from working and also that the results stay the same.
    """
    import cea.globalvar
    gv = cea.globalvar.GlobalVariables()
    if not scenario_path:
        scenario_path = gv.scenario_reference
    locator = inputlocator.InputLocator(scenario_path=scenario_path)
    properties(locator=locator,
               prop_thermal_flag=True,
               prop_architecture_flag=True,
               prop_hvac_flag=True,
               prop_comfort_flag=True,
               prop_internal_loads_flag=True,
               gv=gv)
Ejemplo n.º 9
0
def main(config):
    """
    run the whole network summary routine
    """
    import cea.inputlocator as inputlocator

    locator = inputlocator.InputLocator(scenario=config.scenario)
    total_demand = pd.read_csv(locator.get_total_demand())

    substation_main(locator,
                    total_demand,
                    total_demand['Name'],
                    heating_configuration=7,
                    cooling_configuration=7,
                    Flag=False)

    print 'substation_main() succeeded'
Ejemplo n.º 10
0
def run_as_script(scenario_path=None):
    """
    run the whole network summary routine
    """
    import cea.globalvar
    import cea.inputlocator as inputlocator

    gv = cea.globalvar.GlobalVariables()

    if scenario_path is None:
        scenario_path = gv.scenario_reference

    locator = inputlocator.InputLocator(scenario_path=scenario_path)
    total_demand = pd.read_csv(locator.get_total_demand())
    building_names = pd.read_csv(locator.get_total_demand())['Name']

    substation_main(locator, total_demand, total_demand['Name'], gv, False)

    print 'substation_main() succeeded'
Ejemplo n.º 11
0
def main(config):
    import cea.inputlocator as inputlocator
    locator = inputlocator.InputLocator(scenario=config.scenario)

    #Options
    optimize = False
    raw_data_plot = True
    multicriteria = False
    plot_pareto = False
    clustering = True
    cluster_plot = True
    building_names = [
        'dorm', 'lab', 'office'
    ]  #['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B09']#['B01']#['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B09']
    building_load = 'Ef_kWh'
    type_data = 'measured'

    if optimize:
        for name in building_names:
            data = demand_CEA_reader(locator=locator,
                                     building_name=name,
                                     building_load=building_load,
                                     type=type_data)
            start_generation = None  # or the number of generation to start from
            number_individuals = 16
            number_generations = 50
            optimization_clustering_main(locator=locator,
                                         data=data,
                                         start_generation=start_generation,
                                         number_individuals=number_individuals,
                                         number_generations=number_generations,
                                         building_name=name)
    if multicriteria:
        generation = 50
        weight_fitness1 = 100  # accurracy
        weight_fitness2 = 100  # complexity
        weight_fitness3 = 70  # compression
        what_to_plot = "paretofrontier"
        output_path = locator.get_calibration_cluster_mcda(generation)
        for i, name in enumerate(building_names):
            # read_checkpoint
            input_path = locator.get_calibration_cluster_opt_checkpoint(
                generation, name)
            result = mcda_cluster_main(input_path=input_path,
                                       what_to_plot=what_to_plot,
                                       weight_fitness1=weight_fitness1,
                                       weight_fitness2=weight_fitness2,
                                       weight_fitness3=weight_fitness3)
            result["name"] = name
            if i == 0:
                result_final = pd.DataFrame(result).T
            else:
                result_final = result_final.append(pd.DataFrame(result).T,
                                                   ignore_index=True)
        result_final.to_csv(output_path)

    if plot_pareto:
        for name in building_names[:1]:
            data = demand_CEA_reader(locator=locator,
                                     building_name=name,
                                     building_load=building_load,
                                     type=type_data)
            days_of_analysis = len(data)
            generation_to_plot = 50
            annotate_benchmarks = True
            annotate_fitness = False
            show_in_screen = False
            save_to_disc = True
            what_to_plot = "paretofrontier"  #paretofrontier, halloffame, or population
            optimal_individual = pd.read_csv(
                locator.get_calibration_cluster_mcda(generation_to_plot))
            optimal_individual = optimal_individual.loc[
                optimal_individual["name"] == name]
            labelx = 'Accurracy (A) [-]'
            labely = 'Complexity (B) [-]'
            labelz = r'Compression ($\Gamma$) [-]'
            output = os.path.join(
                locator.get_calibration_clustering_plots_folder(),
                "plot_gen_" + str(generation_to_plot) + "_building_name_" +
                name + ".png")

            # read_checkpoint
            input_path = locator.get_calibration_cluster_opt_checkpoint(
                generation_to_plot, name)
            frontier_2D_3OB(
                input_path=input_path,
                what_to_plot=what_to_plot,
                output_path=output,
                labelx=labelx,
                labely=labely,
                labelz=labelz,
                days_of_analysis=days_of_analysis,
                show_benchmarks=annotate_benchmarks,
                show_fitness=annotate_fitness,
                show_in_screen=show_in_screen,
                save_to_disc=save_to_disc,
                optimal_individual=optimal_individual,
            )
    if clustering:
        name = 'lab'
        data = demand_CEA_reader(locator=locator,
                                 building_name=name,
                                 building_load=building_load,
                                 type=type_data)
        word_size = 4
        alphabet_size = 17

        clustering_sax(locator=locator,
                       data=data,
                       word_size=word_size,
                       alphabet_size=alphabet_size)

        if cluster_plot:
            show_benchmark = True
            save_to_disc = True
            show_in_screen = False
            show_legend = False
            labelx = "Hour of the day"
            labely = "Electrical load [kW]"

            input_path = locator.get_calibration_cluster('clusters_mean')
            data = pd.read_csv(input_path)
            output_path = os.path.join(
                locator.get_calibration_clustering_plots_folder(),
                "w_a_" + str(word_size) + "_" + str(alphabet_size) +
                "_building_name_" + name + ".png")

            plot_day(
                data=data,
                output_path=output_path,
                labelx=labelx,
                labely=labely,
                save_to_disc=save_to_disc,
                show_in_screen=show_in_screen,
                show_legend=show_legend)  #, show_benchmark=show_benchmark)

    if raw_data_plot:
        name = 'lab'
        show_benchmark = True
        save_to_disc = True
        show_in_screen = False
        show_legend = True
        labelx = "Hour of the day"
        labely = "Electrical load [kW]"
        data = demand_CEA_reader(locator=locator,
                                 building_name=name,
                                 building_load=building_load,
                                 type=type_data)

        data = pd.DataFrame(
            dict((str(key), value) for (key, value) in enumerate(data)))

        # input_path = locator.get_calibration_cluster('clusters_mean')
        output_path = os.path.join(
            locator.get_calibration_clustering_plots_folder(),
            "raw_building_name_" + name + ".png")

        plot_day(data=data,
                 output_path=output_path,
                 labelx=labelx,
                 labely=labely,
                 save_to_disc=save_to_disc,
                 show_in_screen=show_in_screen,
                 show_legend=show_legend)  # , show_benchmark=show_benchmark)
Ejemplo n.º 12
0
def ss_calibrator(building_name):

    from cea.analysis.clustering.kmeans.k_means_partitioner import partitioner
    list_median, cluster_labels = partitioner(building_name)
    intended_parameters = [
        'people', 'Eaf', 'Elf', 'Qwwf', 'I_rad', 'I_sol', 'T_ext', 'rh_ext',
        'ta_hs_set', 'ta_cs_set', 'theta_a', 'Qhsf', 'Qcsf'
    ]
    # collect the simulation results
    gv = cea.globalvar.GlobalVariables()
    scenario_path = gv.scenario_reference
    locator = inputlocator.InputLocator(scenario=scenario_path)
    metered_path = r'C:\reference-case-open\baseline\inputs\building-metering'
    metered_building = os.path.join(metered_path, '%s.csv' % building_name)
    ht_cl_el = ['Qhsf']
    measured_data_pd = pd.read_csv(metered_building, usecols=ht_cl_el)
    measured_data = np.array(measured_data_pd)
    test_NN_input_path = os.path.join(locator.get_calibration_folder(),
                                      "test_NN_input.csv" % locals())
    test_NN_target_path = os.path.join(locator.get_calibration_folder(),
                                       "test_NN_target.csv" % locals())
    input_NN_x = np.array(pd.read_csv(test_NN_input_path))
    target_NN_t = np.array(pd.read_csv(test_NN_target_path))
    json_NN_path = os.path.join(locator.get_calibration_folder(),
                                "trained_network_ht.json" % locals())
    weight_NN_path = os.path.join(locator.get_calibration_folder(),
                                  "trained_network_ht.h5" % locals())

    json_NN_path = os.path.join(locator.get_calibration_folder(),
                                "trained_network_ht.json" % locals())
    weight_NN_path = os.path.join(locator.get_calibration_folder(),
                                  "trained_network_ht.h5" % locals())

    scalerX = MinMaxScaler(feature_range=(0, 1))
    inputs_x = scalerX.fit_transform(input_NN_x)
    scalerT = MinMaxScaler(feature_range=(0, 1))
    targets_t = scalerT.fit_transform(target_NN_t)

    # load json and create model
    json_file = open(json_NN_path, 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    perceptron_ht = model_from_json(loaded_model_json)

    # load weights into new model
    perceptron_ht.load_weights(weight_NN_path)
    perceptron_ht.compile(loss='binary_crossentropy',
                          optimizer='rmsprop',
                          metrics=['accuracy'])
    file_path = os.path.join(locator.get_demand_results_folder(),
                             "%(building_name)s.xls" % locals())
    calcs_outputs_xls = pd.read_excel(file_path)
    temp_file = os.path.join(locator.get_temporary_folder(),
                             "%(building_name)s.csv" % locals())
    calcs_outputs_xls.to_csv(temp_file,
                             index=False,
                             header=True,
                             float_format='%.3f',
                             decimal='.')
    calcs_trimmed_csv = pd.read_csv(temp_file, usecols=intended_parameters)
    calcs_trimmed_csv[
        'I_real'] = calcs_trimmed_csv['I_rad'] + calcs_trimmed_csv['I_sol']
    calcs_trimmed_csv['ta_hs_set'].fillna(0, inplace=True)
    calcs_trimmed_csv['ta_cs_set'].fillna(50, inplace=True)
    NN_input = calcs_trimmed_csv
    input_drops = ['I_rad', 'I_sol', 'theta_a', 'Qhsf', 'Qcsf']
    NN_input = NN_input.drop(input_drops, 1)

    NN_input = np.array(NN_input)
    target1 = calcs_trimmed_csv['Qhsf']
    target2 = calcs_trimmed_csv['Qcsf']
    target3 = calcs_trimmed_csv['theta_a']
    NN_target_ht = pd.concat([target1, target3], axis=1)
    NN_target_cl = pd.concat([target2, target3], axis=1)
    NN_target_ht = np.array(NN_target_ht)
    NN_target_cl = np.array(NN_target_cl)

    # return NN_input, NN_target_ht, NN_target_cl
    from cea.demand.calibration.subset_calibrator.surrogate_4_calibration import prep_NN_inputs
    NN_delays = 1
    NN_input_ready_ht, NN_target_ready_ht = prep_NN_inputs(
        NN_input, NN_target_ht, NN_delays)
    NN_input_ready_cl, NN_target_ready_cl = prep_NN_inputs(
        NN_input, NN_target_cl, NN_delays)

    one_array_override = np.array(
        pd.read_csv(locator.get_building_overrides(), skiprows=1, nrows=1))
    one_array_override1 = np.delete(one_array_override, 0, 1)
    rows_override, cols_override = one_array_override1.shape
    rows_NN_input, cols_NN_input = NN_input_ready_ht.shape
    random_variables_matrix = []
    random_variables_matrix = np.array(random_variables_matrix)
    vector_of_ones = np.ones((rows_NN_input, 1))

    for k in range(0, cols_override):
        random_variable_call = one_array_override1[0, k]
        random_variable_col = np.multiply(random_variable_call, vector_of_ones)
        if k < 1:
            random_variables_matrix = random_variable_col
        else:
            random_variables_matrix = np.append(random_variables_matrix,
                                                random_variable_col,
                                                axis=1)

    combined_inputs_ht = np.concatenate(
        (NN_input_ready_ht, random_variables_matrix), axis=1)
    combined_inputs_cl = np.concatenate(
        (NN_input_ready_cl, random_variables_matrix), axis=1)
    nn_X_ht = combined_inputs_ht
    nn_X_cl = combined_inputs_cl
    nn_T_ht = NN_target_ready_ht
    nn_T_cl = NN_target_ready_cl

    nn_input_rows, nn_input_cols, = NN_input.shape
    reshaped_nn_input = np.reshape(NN_input, (365, 24, nn_input_cols))
    reshape_measured_data = np.reshape(measured_data, (365, 24))
    reshape_target1 = np.reshape(target1, (365, 24))
    reshape_target2 = np.reshape(target2, (365, 24))
    reshape_target3 = np.reshape(target3, (365, 24))

    first_cluster = cluster_labels[0]

    first_group = np.where(cluster_labels == first_cluster)[0]
    first_mat = reshaped_nn_input[first_group, :, :]
    measured_data_trim = reshape_measured_data[first_group, :]
    avg_cluster_measured = np.average(measured_data_trim, axis=0)
    target1 = reshape_target1[first_group, :]
    target2 = reshape_target2[first_group, :]
    target3 = reshape_target3[first_group, :]

    nn_input_rows, nn_input_cols, nn_input_tens = first_mat.shape
    #nn_input_rows=list(int(nn_input_rows))
    #second_mat=np.reshape(first_mat,(nn_input_rows*nn_input_cols,nn_input_tens))
    for number_samples in range(nn_input_rows):

        NN_input = first_mat[number_samples]
        NN_input = np.array(NN_input)
        target1a = target1[number_samples]
        target2a = target2[number_samples]
        target3a = target3[number_samples]
        NN_target_ht = np.vstack((target1a, target3a))
        #NN_target_cl = pd.concat([target2, target3], axis=1)
        NN_target_ht = np.array(NN_target_ht)
        NN_target_ht = np.transpose(NN_target_ht)
        #NN_target_cl=np.array(NN_target_cl)
        NN_delays = 1
        NN_input_ready_ht, NN_target_ready_ht = prep_NN_inputs(
            NN_input, NN_target_ht, NN_delays)
        #NN_input_ready_cl, NN_target_ready_cl = prep_NN_inputs(NN_input, NN_target_cl, NN_delays)
        random_variables_matrix2 = random_variables_matrix[0:23, :]
        combined_inputs_ht = np.concatenate(
            (NN_input_ready_ht, random_variables_matrix2), axis=1)
        #combined_inputs_cl = np.concatenate((NN_input_ready_cl, random_variables_matrix), axis=1)
        nn_X_ht = combined_inputs_ht
        #nn_X_cl = combined_inputs_cl
        nn_T_ht = NN_target_ready_ht
        #nn_T_cl = NN_target_ready_cl

        inputs_x = scalerX.transform(nn_X_ht)
        predict_NN_ht = perceptron_ht.predict(inputs_x)
        filtered_predict = scalerT.inverse_transform(predict_NN_ht)
        target_filter_node = NN_target_ready_ht[:, 0]
        filter_logic = np.isin(target_filter_node, 0)
        target_anomalies = np.asarray(np.where(filter_logic), dtype=np.int)
        t_anomalies_rows, t_anomalies_cols = target_anomalies.shape
        anomalies_replacements = np.zeros(t_anomalies_cols)
        anomalies_replacements = np.transpose(anomalies_replacements)
        filtered_predict[target_anomalies, 0] = anomalies_replacements
        avg_cluster_single = np.average(avg_cluster_measured)
        trim_avg_cluster = avg_cluster_measured[1:24]
        main_filtered_predict = filtered_predict[:, 0]
        CV_RMSE = np.divide((sqrt(
            mean_squared_error(trim_avg_cluster, main_filtered_predict))),
                            avg_cluster_single)

        ####LHS#
        lhs_samples_num = 1000
        #design = gmm_random_sampler(random_variables_matrix2, lhs_samples_num)
        design = lhs(5, samples=lhs_samples_num)
        lower = [
            0.5 * random_variables_matrix2[0, 0],
            0.5 * random_variables_matrix2[0, 1],
            0.5 * random_variables_matrix2[0, 2],
            0.5 * random_variables_matrix2[0, 3],
            0.5 * random_variables_matrix2[0, 4]
        ]
        upper = [
            1.5 * random_variables_matrix2[0, 0],
            1.5 * random_variables_matrix2[0, 1],
            1.5 * random_variables_matrix2[0, 2],
            1.5 * random_variables_matrix2[0, 3],
            1.5 * random_variables_matrix2[0, 4]
        ]
        for i in xrange(4):
            design[:, i] = uniform(loc=lower[i], scale=upper[i]).ppf(design[:,
                                                                            i])

        CV_RMSE_mat = ss_loop(design, lhs_samples_num, NN_input_ready_ht,
                              scalerX, perceptron_ht, scalerT, nn_T_ht,
                              avg_cluster_measured)
        min_CV_RMSE = np.ndarray.min(CV_RMSE_mat)

        counter_while = 0
        counter_max = 100

        while min_CV_RMSE < 0.15 and counter_while < counter_max:
            # momentum_low= 0.9 + ((float(counter_while)/float(counter_max))/float(10))
            # momentum_up = 1.1 - ((float(counter_while) / float(counter_max)) / float(10))
            jumbo_outputs = np.concatenate((CV_RMSE_mat, design), axis=1)
            jumbo_outputs = jumbo_outputs[np.argsort(jumbo_outputs[:, 0])]
            filtered_samples = jumbo_outputs[0:10, :]
            # lower = [momentum_low*np.ndarray.min(filtered_samples[:,1]), momentum_low*np.ndarray.min(filtered_samples[:,2]),
            #          momentum_low*np.ndarray.min(filtered_samples[:,3]),momentum_low *np.ndarray.min(filtered_samples[:, 4]),
            #          momentum_low*np.ndarray.min(filtered_samples[:,5])]
            # upper = [momentum_up*np.ndarray.max(filtered_samples[:,1]),momentum_up*np.ndarray.max(filtered_samples[:,2]),
            #          momentum_up*np.ndarray.max(filtered_samples[:,3]),momentum_up*np.ndarray.max(filtered_samples[:, 4]),
            #          momentum_up*np.ndarray.max(filtered_samples[:,5])]
            # for i in xrange(4):
            #     design = lhs(5, samples=lhs_samples_num)
            #     design[:, i] = uniform(loc=lower[i], scale=upper[i]).ppf(design[:, i])
            gmm_samples_num = 100
            gmm_input = filtered_samples[:, 1:6]
            design = gmm_random_sampler(gmm_input, gmm_samples_num)
            #design = list(design)
            design = np.array(design[0])
            CV_RMSE_mat = ss_loop(design, gmm_samples_num, NN_input_ready_ht,
                                  scalerX, perceptron_ht, scalerT, nn_T_ht,
                                  avg_cluster_measured)
            min_CV_RMSE = np.ndarray.min(CV_RMSE_mat)
            counter_while = counter_while + 1

            print(min_CV_RMSE, counter_while)
Ejemplo n.º 13
0
def run_as_script():

    gv = cea.globalvar.GlobalVariables()
    scenario_path = gv.scenario_reference
    locator = inputlocator.InputLocator(scenario=scenario_path)
    json_NN_path = os.path.join(locator.get_calibration_folder(),
                                "trained_network_ht.json" % locals())
    weight_NN_path = os.path.join(locator.get_calibration_folder(),
                                  "trained_network_ht.h5" % locals())

    # load json and create model
    json_file = open(json_NN_path, 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    perceptron_ht = model_from_json(loaded_model_json)

    # load weights into new model
    perceptron_ht.load_weights(weight_NN_path)

    # predict new outputs and estimate the error
    test_NN_input_path = os.path.join(locator.get_calibration_folder(),
                                      "test_NN_input.csv" % locals())
    test_NN_target_path = os.path.join(locator.get_calibration_folder(),
                                       "test_NN_target.csv" % locals())
    input_NN_x = np.array(pd.read_csv(test_NN_input_path))
    target_NN_t = np.array(pd.read_csv(test_NN_target_path))
    perceptron_ht.compile(loss='binary_crossentropy',
                          optimizer='rmsprop',
                          metrics=['accuracy'])

    scalerX = MinMaxScaler(feature_range=(0, 1))
    inputs_x = scalerX.fit_transform(input_NN_x)
    scalerT = MinMaxScaler(feature_range=(0, 1))
    targets_t = scalerT.fit_transform(target_NN_t)

    predict_NN_ht = perceptron_ht.predict(inputs_x)
    filtered_predict = scalerT.inverse_transform(predict_NN_ht)

    filter_logic = np.isin(targets_t, 0)
    target_anomalies = np.asarray(np.where(filter_logic), dtype=np.int)
    t_anomalies_rows, t_anomalies_cols = target_anomalies.shape
    anomalies_replacements = np.zeros(t_anomalies_cols)
    filtered_predict[target_anomalies, 0] = anomalies_replacements

    final_target = pd.DataFrame(target_NN_t)
    final_output = pd.DataFrame(filtered_predict)
    save_target_path = os.path.join(locator.get_calibration_folder(),
                                    "saved_targets.csv" % locals())
    save_output_path = os.path.join(locator.get_calibration_folder(),
                                    "saved_outputs.csv" % locals())
    final_target.to_csv(save_target_path,
                        index=False,
                        header=False,
                        float_format='%.3f',
                        decimal='.')
    final_output.to_csv(save_output_path,
                        index=False,
                        header=False,
                        float_format='%.3f',
                        decimal='.')

    rmse = sqrt(mean_squared_error(target_NN_t[:, 0], filtered_predict[:, 0]))
    mean_target = np.mean(target_NN_t[:, 0])
    cv_rmse = np.divide(rmse, mean_target)

    print(rmse, cv_rmse)
Ejemplo n.º 14
0
def run_as_script():
    import cea.globalvar as gv
    import cea.inputlocator as inputlocator
    gv = gv.GlobalVariables()
    scenario_path = gv.scenario_reference
    locator = inputlocator.InputLocator(scenario_path=scenario_path)

    #Options
    optimize = True
    multicriteria = True
    plot_pareto = True
    clustering = True
    cluster_plot = True
    building_names = ['M01']#['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B09']#['B01']#['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B09']
    building_load = 'Ef_kWh'
    type_data = 'measured'

    if optimize:
        for name in building_names:
            data = demand_CEA_reader(locator=locator, building_name=name, building_load=building_load,
                                     type=type_data)
            start_generation = None  # or the number of generation to start from
            number_individuals = 16
            number_generations = 100
            optimization_clustering_main(locator=locator, data=data, start_generation=start_generation,
                                         number_individuals=number_individuals, number_generations=number_generations,
                                         building_name=name, gv=gv)
    if multicriteria:
        for i,name in enumerate(building_names):
            generation = 100
            weight_fitness1 = 100 # accurracy
            weight_fitness2 = 80 # complexity
            weight_fitness3 = 80 # compression
            what_to_plot = "paretofrontier"
            output_path = locator.get_calibration_cluster_mcda(generation)

            # read_checkpoint
            input_path = locator.get_calibration_cluster_opt_checkpoint(generation, name)
            result = mcda_cluster_main(input_path=input_path, what_to_plot=what_to_plot,
                                       weight_fitness1=weight_fitness1, weight_fitness2=weight_fitness2,
                                       weight_fitness3=weight_fitness3)
            result["name"] = name
            if i ==0:
               result_final = pd.DataFrame(result).T
            else:
               result_final = result_final.append(pd.DataFrame(result).T, ignore_index=True)
        result_final.to_csv(output_path)

    if plot_pareto:
        for name in building_names:
            generation_to_plot = 100
            annotate_benchmarks = True
            annotate_fitness = False
            show_in_screen = False
            save_to_disc = True
            what_to_plot = "paretofrontier" #paretofrontier, halloffame, or population
            optimal_individual = pd.read_csv(locator.get_calibration_cluster_mcda(generation_to_plot))
            optimal_individual = optimal_individual.loc[optimal_individual["name"]==name]
            labelx = 'Accurracy (A) [-]'
            labely = 'Complexity (B) [-]'
            labelz = r'Compression ($\Gamma$) [-]'
            output = os.path.join(locator.get_calibration_clustering_plots_folder(),
                                 "plot_gen_"+str(generation_to_plot)+"_building_name_"+name+".png")

            # read_checkpoint
            input_path = locator.get_calibration_cluster_opt_checkpoint(generation_to_plot, name)
            frontier_2D_3OB(input_path=input_path, what_to_plot = what_to_plot, output_path=output,
                            labelx= labelx,
                            labely = labely, labelz = labelz, show_benchmarks= annotate_benchmarks,
                            show_fitness=annotate_fitness,
                            show_in_screen = show_in_screen,
                            save_to_disc=save_to_disc,
                            optimal_individual= optimal_individual)
    if clustering:
        name = 'M01'
        data = demand_CEA_reader(locator=locator, building_name=name, building_load=building_load,
                                 type=type_data)
        word_size = 9
        alphabet_size = 7
        clustering_main(locator=locator, data=data, word_size=word_size, alphabet_size=alphabet_size, gv=gv)

        if cluster_plot:
            show_benchmark = True
            save_to_disc = True
            show_in_screen = False
            show_legend = False
            labelx = "Hour of the day"
            labely = "Electrical load [kW]"
            # input_path = demand_CEA_reader(locator=locator, building_name=name, building_load=building_load,
            #                  type=type_data)
            #
            # input_path = pd.DataFrame(dict((str(key), value) for (key, value) in enumerate(input_path)))

            input_path = locator.get_calibration_cluster('clusters_mean')
            output_path = os.path.join(locator.get_calibration_clustering_plots_folder(),
                                 "w_a_"+str(word_size)+"_"+str(alphabet_size)+"_building_name_"+name+".png")

            clusters_day_mean(input_path=input_path, output_path=output_path,labelx=labelx,
                              labely=labely, save_to_disc=save_to_disc, show_in_screen=show_in_screen,
                              show_legend=show_legend)#, show_benchmark=show_benchmark)
Ejemplo n.º 15
0
    return area_facade_zone, area_roof_zone, height_zone, slope_roof


def get_windows_of_building(dataframe_windows, name_building):
    return dataframe_windows.loc[dataframe_windows['name_building'] ==
                                 name_building]


# TESTING
if __name__ == '__main__':

    calc_q_m_mech()

    # generate windows based on geometry of vertical surfaces in radiation file
    locator = inputlocator.InputLocator(
        scenario_path=r'C:\cea-reference-case\reference-case\baseline')
    dataframe_radiation = pandas.read_csv(locator.get_radiation())
    geodataframe_building_architecture = geopandas.GeoDataFrame.from_file(
        locator.get_building_architecture())
    # print(geodataframe_building_architecture)
    geodataframe_building_geometry = geopandas.GeoDataFrame.from_file(
        locator.get_building_geometry())
    # print(geodataframe_building_geometry)

    dataframe_windows = create_windows(dataframe_radiation,
                                       geodataframe_building_architecture)

    building_test = 'B302040213'

    # get building windows
    windows_building_test = get_windows_of_building(dataframe_windows,
Ejemplo n.º 16
0
from __future__ import division
import pandas as pd
import matplotlib.pyplot as plt
from cea import config
import numpy as np
from cea import inputlocator

configur = config.Configuration()
locator = inputlocator.InputLocator(scenario=configur.scenario)

# index = pd.date_range('1/1/2016', periods=8760, freq='H')
file = pd.read_csv(
    r'C:\reference-case-open\baseline\outputs\data\demand/total_demand.csv')
# file.set_index(index, inplace=True)
print file[['Qhsf_MWhyr', 'Ef_MWhyr', 'Qcsf_MWhyr']].sum(axis=0)

list_buildings = pd.read_csv(locator.get_total_demand())['Name'].values
I_sol = np.array([])
for building in list_buildings:
    data = pd.read_excel(
        r'C:\reference-case-open\baseline\outputs\data\demand/' + building +
        '.xls')
    I_sol = np.append(I_sol, [data['I_sol_gross'].sum() / 1000])

I_sol2 = np.nansum(I_sol)
print I_sol2

#print file[2300:2370]
file.plot()

# plt.show()
Ejemplo n.º 17
0
def test_benchmark_targets():
    locator = inputlocator.InputLocator(
        scenario_path=r'C:\reference-case-zug\baseline')
    calc_benchmark_targets(locator)
Ejemplo n.º 18
0
def run_as_script():
    scenario_path = config.scenario
    locator = inputlocator.InputLocator(scenario=scenario_path)

    building_name = 'B155066'  # intended building
    cluster_labels = ss_calibrator(building_name)