예제 #1
0
def internal_loads_mapper(list_uses, locator, occupant_densities, building_typology_df):
    internal_DB = pd.read_excel(locator.get_database_use_types_properties(), 'INTERNAL_LOADS')
    # define comfort
    prop_internal_df = building_typology_df.merge(internal_DB, left_on='1ST_USE', right_on='code')
    # write to shapefile
    fields = ['Name',
              'Occ_m2p',
              'Qs_Wp',
              'X_ghp',
              'Ea_Wm2',
              'El_Wm2',
              'Ed_Wm2',
              'Ev_kWveh',
              'Qcre_Wm2',
              'Vww_ldp',
              'Vw_ldp',
              'Qhpro_Wm2',
              'Qcpro_Wm2',
              'Epro_Wm2']
    prop_internal_df_merged = calculate_average_multiuse(fields,
                                                         prop_internal_df,
                                                         occupant_densities,
                                                         list_uses,
                                                         internal_DB)
    dataframe_to_dbf(prop_internal_df_merged[fields], locator.get_building_internal())
예제 #2
0
def calc_spatio_temporal_visuals(locator, period, variables_to_plot,
                                 list_of_buildings, initial_date):
    # now the dates in which the building demand is calculated is stored in 'date'
    date = pd.date_range(initial_date, periods=HOURS_IN_YEAR,
                         freq='H')[period[0]:period[1]]
    time = date.strftime("%Y%m%d%H%M%S")

    # this loop checks if all the buildings are selected and gets the building names from Total demand.csv file
    if 'all' in list_of_buildings:
        building_names = pd.read_csv(locator.get_total_demand())['Name'].values
    else:
        building_names = list_of_buildings

    for i, building in enumerate(building_names):
        # importing corresponding variables of each building and then slicing it to take just a single period value
        # i.e a time step
        data = pd.read_csv(locator.SC_results(
            building, panel_type='FP'))[variables_to_plot][period[0]:period[1]]
        data['date'] = time
        data['Name'] = building
        data['rad_kWh/m2'] = data['radiation_kWh'] / data['Area_SC_m2']

        if i == 0:
            final = data
        else:
            final = final.append(data, ignore_index=True)

    dbf.dataframe_to_dbf(final, locator.get_4D_sc_plot(period))
예제 #3
0
def calculate_typology_file(locator, zone_df, year_construction, occupancy_type, occupancy_output_path):
    """
    This script fills in the occupancy.dbf file with one occupancy type
    :param zone_df:
    :param occupancy_type:
    :param occupancy_output_path:
    :return:
    """
    #calculate construction year
    typology_df = calculate_age(zone_df, year_construction)

    #calculate the most likely construction standard
    standard_database = pd.read_excel(locator.get_database_construction_standards(), sheet_name='STANDARD_DEFINITION')
    typology_df['STANDARD'] = calc_category(standard_database, typology_df['YEAR'].values)

    #Calculate the most likely use type
    typology_df['1ST_USE'] = 'MULTI_RES'
    typology_df['1ST_USE_R'] = 1.0
    typology_df['2ND_USE'] = "NONE"
    typology_df['2ND_USE_R'] = 0.0
    typology_df['3RD_USE'] = "NONE"
    typology_df['3RD_USE_R'] = 0.0
    if occupancy_type == "Get it from open street maps":
        no_buildings = typology_df.shape[0]
        for index in range(no_buildings):
            typology_df.loc[index, "USE_A_R"] = 1.0
            if zone_df.loc[index, "category"] == "yes":
                typology_df.loc[index, "USE_A"] = "MULTI_RES"
                typology_df.loc[index, "REFERENCE"] = "CEA - assumption"
            elif zone_df.loc[index, "category"] == "residential" or zone_df.loc[index, "category"] == "apartments":
                typology_df.loc[index, "USE_A"] = "MULTI_RES"
                typology_df.loc[index, "REFERENCE"] = "OSM - as it is"
            elif zone_df.loc[index, "category"] == "commercial" or zone_df.loc[index, "category"] == "civic":
                typology_df.loc[index, "USE_A"] = "OFFICE"
                typology_df.loc[index, "REFERENCE"] = "OSM - as it is"
            elif zone_df.loc[index, "category"] == "school":
                typology_df.loc[index, "USE_A"] = "SCHOOL"
                typology_df.loc[index, "REFERENCE"] = "OSM - as it is"
            elif zone_df.loc[index, "category"] == "garage" or zone_df.loc[index, "category"] == "garages" or zone_df.loc[index, "category"] == "warehouse":
                typology_df.loc[index, "USE_A"] = "PARKING"
                typology_df.loc[index, "REFERENCE"] = "OSM - as it is"
            elif zone_df.loc[index, "category"] == "house" or zone_df.loc[index, "category"] == "terrace" or zone_df.loc[index, "category"] == "detached":
                typology_df.loc[index, "USE_A"] = "SINGLE_RES"
                typology_df.loc[index, "REFERENCE"] = "OSM - as it is"
            elif zone_df.loc[index, "category"] == "retail":
                typology_df.loc[index, "USE_A"] = "RETAIL"
                typology_df.loc[index, "REFERENCE"] = "OSM - as it is"
            elif zone_df.loc[index, "category"] == "industrial":
                typology_df.loc[index, "USE_A"] = "INDUSTRIAL"
                typology_df.loc[index, "REFERENCE"] = "OSM - as it is"
            elif zone_df.loc[index, "category"] == "warehouse":
                typology_df.loc[index, "USE_A"] = "INDUSTRIAL"
                typology_df.loc[index, "REFERENCE"] = "OSM - as it is"
            else:
                typology_df.loc[index, "USE_A"] = "MULTI_RES"
                typology_df.loc[index, "REFERENCE"] = "CEA - assumption"

    fields = COLUMNS_ZONE_TYPOLOGY
    dataframe_to_dbf(typology_df[fields+['REFERENCE']], occupancy_output_path)
def supply_mapper(locator, building_typology_df):
    supply_DB = pd.read_excel(locator.get_database_construction_standards(),
                              'SUPPLY_ASSEMBLIES')
    prop_supply_df = building_typology_df.merge(supply_DB,
                                                left_on='STANDARD',
                                                right_on='STANDARD')
    fields = ['Name', 'type_cs', 'type_hs', 'type_dhw', 'type_el']
    dataframe_to_dbf(prop_supply_df[fields], locator.get_building_supply())
 def test_roundtrip(self):
     """Make sure the roundtrip df -> dbf -> df keeps the data intact."""
     df = pd.DataFrame({
         'a': ['foo', 'bar', 'baz'],
         'b': np.random.randn(3)
     })
     dbf_path = tempfile.mktemp(suffix='.dbf')
     dbf.dataframe_to_dbf(df, dbf_path)
     assert_frame_equal(df, dbf.dbf_to_dataframe(dbf_path))
예제 #6
0
def calculate_occupancy_file(zone_df, occupancy_type, occupancy_output_path):
    """
    This script fills in the occupancy.dbf file with one occupancy type
    :param zone_df:
    :param occupancy_type:
    :param occupancy_output_path:
    :return:
    """
    occupancy_df = zone_df[["Name"]].copy()
    for occupancy in COLUMNS_ZONE_OCCUPANCY:
        if occupancy_type == occupancy:
            occupancy_df.loc[:, occupancy] = 1.0
        else:
            occupancy_df.loc[:, occupancy] = 0.0

    # get the occupancy form open street maps if indicated
    if occupancy_type == "Get it from open street maps":
        no_buildings = occupancy_df.shape[0]
        for index in range(no_buildings):
            if zone_df.loc[index, "category"] == "yes":
                occupancy_df.loc[index, "MULTI_RES"] = 1.0
                occupancy_df.loc[index, "REFERENCE"] = "CEA - assumption"
            elif zone_df.loc[index,
                             "category"] == "residential" or zone_df.loc[
                                 index, "category"] == "apartments":
                occupancy_df.loc[index, "MULTI_RES"] = 1.0
                occupancy_df.loc[index, "REFERENCE"] = "OSM - as it is"
            elif zone_df.loc[index, "category"] == "commercial" or zone_df.loc[
                    index, "category"] == "civic":
                occupancy_df.loc[index, "OFFICE"] = 1.0
                occupancy_df.loc[index, "REFERENCE"] = "OSM - as it is"
            elif zone_df.loc[index, "category"] == "school":
                occupancy_df.loc[index, "SCHOOL"] = 1.0
                occupancy_df.loc[index, "REFERENCE"] = "OSM - as it is"
            elif zone_df.loc[index, "category"] == "garage" or zone_df.loc[
                    index, "category"] == "garages" or zone_df.loc[
                        index, "category"] == "warehouse":
                occupancy_df.loc[index, "PARKING"] = 1.0
                occupancy_df.loc[index, "REFERENCE"] = "OSM - as it is"
            elif zone_df.loc[index, "category"] == "house" or zone_df.loc[
                    index, "category"] == "terrace" or zone_df.loc[
                        index, "category"] == "detached":
                occupancy_df.loc[index, "SINGLE_RES"] = 1.0
                occupancy_df.loc[index, "REFERENCE"] = "OSM - as it is"
            elif zone_df.loc[index, "category"] == "retail":
                occupancy_df.loc[index, "RETAIL"] = 1.0
                occupancy_df.loc[index, "REFERENCE"] = "OSM - as it is"
            elif zone_df.loc[index, "category"] == "industrial":
                occupancy_df.loc[index, "INDUSTRIAL"] = 1.0
                occupancy_df.loc[index, "REFERENCE"] = "OSM - as it is"
            elif zone_df.loc[index, "category"] == "warehouse":
                occupancy_df.loc[index, "INDUSTRIAL"] = 1.0
                occupancy_df.loc[index, "REFERENCE"] = "OSM - as it is"
            else:
                occupancy_df.loc[index, "MULTI_RES"] = 1.0
                occupancy_df.loc[index, "REFERENCE"] = "CEA - assumption"
    dataframe_to_dbf(occupancy_df, occupancy_output_path)
예제 #7
0
    def write(self, df, *args, **kwargs):
        """
        :type df: pd.Dataframe
        """
        self.validate(df)
        from cea.utilities.dbf import dataframe_to_dbf
        path_to_dbf = self(*args, **kwargs)

        parent_folder = os.path.dirname(path_to_dbf)
        if not os.path.exists(parent_folder):
            os.makedirs(parent_folder)

        dataframe_to_dbf(df, path_to_dbf)
def aircon_mapper(locator, typology_df):
    air_conditioning_DB = pd.read_excel(
        locator.get_database_construction_standards(), 'HVAC_ASSEMBLIES')
    # define HVAC systems types
    prop_HVAC_df = typology_df.merge(air_conditioning_DB,
                                     left_on='STANDARD',
                                     right_on='STANDARD')
    # write to shapefile
    fields = [
        'Name', 'type_cs', 'type_hs', 'type_dhw', 'type_ctrl', 'type_vent',
        'heat_starts', 'heat_ends', 'cool_starts', 'cool_ends'
    ]
    dataframe_to_dbf(prop_HVAC_df[fields],
                     locator.get_building_air_conditioning())
def architecture_mapper(locator, typology_df):
    architecture_DB = pd.read_excel(
        locator.get_database_construction_standards(), 'ENVELOPE_ASSEMBLIES')
    prop_architecture_df = typology_df.merge(architecture_DB,
                                             left_on='STANDARD',
                                             right_on='STANDARD')
    fields = [
        'Name', 'Hs_ag', 'Hs_bg', 'Ns', 'Es', 'void_deck', 'wwr_north',
        'wwr_west', 'wwr_east', 'wwr_south', 'type_cons', 'type_leak',
        'type_floor', 'type_part', 'type_base', 'type_roof', 'type_wall',
        'type_win', 'type_shade'
    ]
    dataframe_to_dbf(prop_architecture_df[fields],
                     locator.get_building_architecture())
예제 #10
0
def calculate_age_file(zone_df, year_construction, age_output_path):
    """
    This script fills in the age.dbf file with one year of construction
    :param zone_df:
    :param year_construction:
    :param age_output_path:
    :return:
    """
    #create dataframe to fill in the data
    for column in COLUMNS_ZONE_AGE:
        if column == 'built':
            zone_df.loc[:, column] = year_construction
        else:
            zone_df.loc[:, column] = 0

    if year_construction is None:
        print(
            'Warning! you have not indicated a year of construction for the buildings, '
            'we are reverting to data stored in Open Street Maps (It might not be accurate at all),'
            'if we do not find data in OSM for a particular building, we get the median in the surroundings, '
            'if we do not get any data we assume all buildings being constructed in the year 2000'
        )
        list_of_columns = zone_df.columns
        if "start_date" not in list_of_columns:  # this field describes the construction year of buildings
            zone_df["start_date"] = 2000
            zone_df['REFERENCE'] = "CEA - assumption"
        else:
            zone_df['REFERENCE'] = [
                "OSM - median" if x is np.nan else "OSM - as it is"
                for x in zone_df['start_date']
            ]

        data_floors_sum_with_nan = [
            np.nan if x is np.nan else int(x) for x in zone_df['start_date']
        ]
        data_osm_floors_joined = int(
            math.ceil(np.nanmedian(data_floors_sum_with_nan))
        )  # median so we get close to the worse case
        zone_df["built"] = [
            int(x) if x is not np.nan else data_osm_floors_joined
            for x in data_floors_sum_with_nan
        ]
    else:
        zone_df['REFERENCE'] = "CEA - assumption"

    fields = ["Name"] + COLUMNS_ZONE_AGE + ['REFERENCE']
    age_dbf = zone_df[fields]

    dataframe_to_dbf(age_dbf, age_output_path)
예제 #11
0
def calculate_typology_file(locator, zone_df, year_construction,
                            occupancy_type, occupancy_output_path):
    """
    This script fills in the occupancy.dbf file with one occupancy type
    :param zone_df:
    :param occupancy_type:
    :param occupancy_output_path:
    :return:
    """
    # calculate construction year
    typology_df = calculate_age(zone_df, year_construction)

    # calculate the most likely construction standard
    standard_database = pd.read_excel(
        locator.get_database_construction_standards(),
        sheet_name='STANDARD_DEFINITION')
    typology_df['STANDARD'] = calc_category(standard_database,
                                            typology_df['YEAR'].values)

    # Calculate the most likely use type
    typology_df['1ST_USE'] = 'MULTI_RES'
    typology_df['1ST_USE_R'] = 1.0
    typology_df['2ND_USE'] = "NONE"
    typology_df['2ND_USE_R'] = 0.0
    typology_df['3RD_USE'] = "NONE"
    typology_df['3RD_USE_R'] = 0.0
    if occupancy_type == "Get it from open street maps":
        # for OSM building/amenity types with a clear CEA use type, this use type is assigned
        in_categories = zone_df['category'].isin(
            OSM_BUILDING_CATEGORIES.keys())
        zone_df.loc[in_categories,
                    '1ST_USE'] = zone_df[in_categories]['category'].map(
                        OSM_BUILDING_CATEGORIES)

        # for un-conditioned OSM building categories without a clear CEA use type, "PARKING" is assigned
        if 'amenity' in zone_df.columns:
            in_unconditioned_categories = zone_df['category'].isin(
                OTHER_OSM_CATEGORIES_UNCONDITIONED) | zone_df['amenity'].isin(
                    OTHER_OSM_CATEGORIES_UNCONDITIONED)
        else:
            in_unconditioned_categories = zone_df['category'].isin(
                OTHER_OSM_CATEGORIES_UNCONDITIONED)
        zone_df.loc[in_unconditioned_categories, '1ST_USE'] = "PARKING"

    fields = COLUMNS_ZONE_TYPOLOGY
    dataframe_to_dbf(typology_df[fields + ['REFERENCE']],
                     occupancy_output_path)
def indoor_comfort_mapper(list_uses, locator, occupant_densities,
                          building_typology_df):
    comfort_DB = pd.read_excel(locator.get_database_use_types_properties(),
                               'INDOOR_COMFORT')
    # define comfort
    prop_comfort_df = building_typology_df.merge(comfort_DB,
                                                 left_on='1ST_USE',
                                                 right_on='code')
    # write to shapefile
    fields = [
        'Name', 'Tcs_set_C', 'Ths_set_C', 'Tcs_setb_C', 'Ths_setb_C',
        'Ve_lpspax', 'RH_min_pc', 'RH_max_pc'
    ]
    prop_comfort_df_merged = calculate_average_multiuse(
        fields, prop_comfort_df, occupant_densities, list_uses, comfort_DB)
    dataframe_to_dbf(prop_comfort_df_merged[fields],
                     locator.get_building_comfort())
def migrate_2_29_to_2_31(scenario):
    def lookup_standard(year, standards_df):
        matched_standards = standards_df[(standards_df.YEAR_START <= year)
                                         & (year <= standards_df.YEAR_END)]
        if len(matched_standards):
            # find first standard that is similar to the year
            standard = matched_standards.iloc[0]
        else:
            raise ValueError(
                'Could not find a `STANDARD` in the databases to match the year `{}`.'
                'You can try adding it to the `CONSTRUCTION_STANDARDS` input database and try again.'
                .format(year))
        return standard.STANDARD

    def convert_occupancy(name, occupancy_dbf):
        row = occupancy_dbf[occupancy_dbf.Name == name].iloc[0]
        uses = set(row.to_dict().keys()) - {"Name", "REFERENCE"}
        uses = sorted(uses,
                      cmp=lambda a, b: cmp(float(row[a]), float(row[b])),
                      reverse=True)
        result = {
            "1ST_USE": uses[0],
            "1ST_USE_R": float(row[uses[0]]),
            "2ND_USE": uses[1],
            "2ND_USE_R": float(row[uses[1]]),
            "3RD_USE": uses[2],
            "3RD_USE_R": float(row[uses[2]])
        }
        if pd.np.isclose(result["2ND_USE_R"], 0.0):
            result["1ST_USE_R"] = 1.0
            result["2ND_USE_R"] = 0.0
            result["3RD_USE_R"] = 0.0
            result["2ND_USE"] = "NONE"
            result["3RD_USE"] = "NONE"
        elif pd.np.isclose(result["3RD_USE_R"], 0.0):
            result["1ST_USE_R"] = 1.0 - result["2ND_USE_R"]
            result["3RD_USE_R"] = 0.0
            result["3RD_USE"] = "NONE"

        result["1ST_USE_R"] = 1.0 - result["2ND_USE_R"] - result["3RD_USE_R"]
        return result

    def merge_age_and_occupancy_to_typology(age_dbf, occupancy_dbf,
                                            standards_df):
        # merge age.dbf and occupancy.dbf to typology.dbf
        typology_dbf_columns = [
            "Name", "YEAR", "STANDARD", "1ST_USE", "1ST_USE_R", "2ND_USE",
            "2ND_USE_R", "3RD_USE", "3RD_USE_R"
        ]
        typology_dbf = pd.DataFrame(columns=typology_dbf_columns)

        for rindex, row in age_dbf.iterrows():
            typology_row = {
                "Name": row.Name,
                "YEAR": row.built,
                "STANDARD": lookup_standard(row.built, standards_df)
            }
            typology_row.update(convert_occupancy(row.Name, occupancy_dbf))

            typology_dbf = typology_dbf.append(typology_row, ignore_index=True)

        return typology_dbf

    age_dbf_path = os.path.join(scenario, "inputs", "building-properties",
                                "age.dbf")
    occupancy_dbf_path = os.path.join(scenario, "inputs",
                                      "building-properties", "occupancy.dbf")

    age_df = dbf_to_dataframe(age_dbf_path)
    occupancy_df = dbf_to_dataframe(occupancy_dbf_path)

    locator = cea.inputlocator.InputLocator(scenario=scenario)
    standards_df = pd.read_excel(locator.get_database_construction_standards(),
                                 "STANDARD_DEFINITION")
    typology_df = merge_age_and_occupancy_to_typology(age_df, occupancy_df,
                                                      standards_df)

    print("- writing typology.dbf")
    dataframe_to_dbf(typology_df, locator.get_building_typology())
    print("- removing occupancy.dbf and age.dbf")
    os.remove(age_dbf_path)
    os.remove(occupancy_dbf_path)
    print(
        "- removing invalid input-tables (NOTE: run archetypes-mapper again)")
    for fname in {
            "supply_systems.dbf", "internal_loads.dbf", "indoor_comfort.dbf",
            "air_conditioning.dbf", "architecture.dbf"
    }:
        fpath = os.path.join(scenario, "inputs", "building-properties", fname)
        if os.path.exists(fpath):
            print("  - removing {fname}".format(fname=fname))
            os.remove(fpath)
    print("- done")
    print(
        "- NOTE: You'll need to run the archetpyes-mapper tool after this migration!"
    )
예제 #14
0
def retrofit_scenario_creator(locator_baseline, locator_retrofit, geometry_df, age, architecture, internal_loads, comfort, hvac,
                              supply, occupancy, data, keep_partial_matches):
    """
    This creates a new retrofit scenario, based on the criteria we have selected as True
    :return:
    """

    #confirm that the builings selected are part of the zone

    new_geometry = geometry_df.merge(data, on='Name')
    if new_geometry.empty and keep_partial_matches:
        raise ValueError("The keep partial matches flag is on, Still, there is not a single building matching any of "
                         "the criteria, please try other criteria / thresholds instead")


    new_geometry.to_file(locator_retrofit.get_zone_geometry(), driver='ESRI Shapefile')
    district = gdf.from_file(locator_baseline.get_surroundings_geometry())
    district.to_file(locator_retrofit.get_surroundings_geometry())
    dbf.dataframe_to_dbf(age.merge(data, on='Name'), locator_retrofit.get_building_age())
    dbf.dataframe_to_dbf(architecture.merge(data, on='Name'), locator_retrofit.get_building_architecture())
    dbf.dataframe_to_dbf(comfort.merge(data, on='Name'), locator_retrofit.get_building_comfort())
    dbf.dataframe_to_dbf(internal_loads.merge(data, on='Name'), locator_retrofit.get_building_internal())
    dbf.dataframe_to_dbf(hvac.merge(data, on='Name'), locator_retrofit.get_building_air_conditioning())
    dbf.dataframe_to_dbf(supply.merge(data, on='Name'), locator_retrofit.get_building_supply())
    dbf.dataframe_to_dbf(occupancy.merge(data, on='Name'), locator_retrofit.get_building_occupancy())
    shutil.copy2(locator_baseline.get_terrain(), locator_retrofit.get_terrain())
def data_helper(locator, region, overwrite_technology_folder,
                update_architecture_dbf, update_HVAC_systems_dbf,
                update_indoor_comfort_dbf, update_internal_loads_dbf,
                update_supply_systems_dbf, update_schedule_operation_cea,
                buildings):
    """
    algorithm to query building properties from statistical database
    Archetypes_HVAC_properties.csv. for more info check the integrated demand
    model of Fonseca et al. 2015. Appl. energy.

    :param InputLocator locator: an InputLocator instance set to the scenario to work on
    :param boolean update_architecture_dbf: if True, update the construction and architecture properties.
    :param boolean update_indoor_comfort_dbf: if True, get properties about thermal comfort.
    :param boolean update_HVAC_systems_dbf: if True, get properties about types of HVAC systems, otherwise False.
    :param boolean update_internal_loads_dbf: if True, get properties about internal loads, otherwise False.

    The following files are created by this script, depending on which flags were set:

    - building_HVAC: .dbf
        describes the queried properties of HVAC systems.

    - architecture.dbf
        describes the queried properties of architectural features

    - building_thermal: .shp
        describes the queried thermal properties of buildings

    - indoor_comfort.shp
        describes the queried thermal properties of buildings
    """
    # get technology database
    if overwrite_technology_folder:
        # copy all the region-specific archetypes to the scenario's technology folder
        get_technology_related_databases(locator, region)

    # get occupancy and age files
    building_occupancy_df = dbf_to_dataframe(locator.get_building_occupancy())
    building_age_df = dbf_to_dataframe(locator.get_building_age())

    # validate list of uses in case study
    list_uses = get_list_of_uses_in_case_study(building_occupancy_df)

    # get occupant densities from archetypes schedules
    occupant_densities = {}
    occ_densities = pd.read_excel(locator.get_archetypes_properties(),
                                  'INTERNAL_LOADS').set_index('Code')
    for use in list_uses:
        if occ_densities.loc[use, 'Occ_m2pax'] > 0.0:
            occupant_densities[use] = 1 / occ_densities.loc[use, 'Occ_m2pax']
        else:
            occupant_densities[use] = 0.0

    # prepare shapefile to store results (a shapefile with only names of buildings
    names_df = building_age_df[['Name']]

    # define main use:
    building_occupancy_df['mainuse'] = calc_mainuse(building_occupancy_df,
                                                    list_uses)

    # dataframe with joined data for categories
    categories_df = building_occupancy_df.merge(building_age_df, on='Name')

    # get properties about the construction and architecture
    if update_architecture_dbf:
        architecture_DB = pd.read_excel(locator.get_archetypes_properties(),
                                        'ARCHITECTURE')
        architecture_DB['Code'] = architecture_DB.apply(lambda x: calc_code(
            x['building_use'], x['year_start'], x['year_end'], x['standard']),
                                                        axis=1)
        categories_df['cat_built'] = calc_category(architecture_DB,
                                                   categories_df, 'built', 'C')
        retrofit_category = ['envelope', 'roof', 'windows']
        for category in retrofit_category:
            categories_df['cat_' + category] = calc_category(
                architecture_DB, categories_df, category, 'R')

        prop_architecture_df = get_prop_architecture(categories_df,
                                                     architecture_DB,
                                                     list_uses)

        # write to dbf file
        prop_architecture_df_merged = names_df.merge(prop_architecture_df,
                                                     on="Name")

        fields = [
            'Name', 'Hs_ag', 'Hs_bg', 'Ns', 'Es', 'void_deck', 'wwr_north',
            'wwr_west', 'wwr_east', 'wwr_south', 'type_cons', 'type_leak',
            'type_roof', 'type_wall', 'type_win', 'type_shade'
        ]

        dataframe_to_dbf(prop_architecture_df_merged[fields],
                         locator.get_building_architecture())

    # get properties about types of HVAC systems
    if update_HVAC_systems_dbf:
        construction_properties_hvac = pd.read_excel(
            locator.get_archetypes_properties(), 'HVAC')
        construction_properties_hvac[
            'Code'] = construction_properties_hvac.apply(
                lambda x: calc_code(x['building_use'], x['year_start'], x[
                    'year_end'], x['standard']),
                axis=1)

        categories_df['cat_HVAC'] = calc_category(construction_properties_hvac,
                                                  categories_df, 'HVAC', 'R')

        # define HVAC systems types
        prop_HVAC_df = categories_df.merge(construction_properties_hvac,
                                           left_on='cat_HVAC',
                                           right_on='Code')

        # write to shapefile
        fields = [
            'Name', 'type_cs', 'type_hs', 'type_dhw', 'type_ctrl', 'type_vent',
            'heat_starts', 'heat_ends', 'cool_starts', 'cool_ends'
        ]
        prop_HVAC_df_merged = names_df.merge(prop_HVAC_df, on="Name")
        dataframe_to_dbf(prop_HVAC_df_merged[fields],
                         locator.get_building_air_conditioning())

    if update_indoor_comfort_dbf:
        comfort_DB = pd.read_excel(locator.get_archetypes_properties(),
                                   'INDOOR_COMFORT')

        # define comfort
        prop_comfort_df = categories_df.merge(comfort_DB,
                                              left_on='mainuse',
                                              right_on='Code')

        # write to shapefile
        fields = [
            'Name', 'Tcs_set_C', 'Ths_set_C', 'Tcs_setb_C', 'Ths_setb_C',
            'Ve_lpspax', 'RH_min_pc', 'RH_max_pc'
        ]
        prop_comfort_df_merged = names_df.merge(prop_comfort_df, on="Name")
        prop_comfort_df_merged = calculate_average_multiuse(
            fields, prop_comfort_df_merged, occupant_densities, list_uses,
            comfort_DB)

        dataframe_to_dbf(prop_comfort_df_merged[fields],
                         locator.get_building_comfort())

    if update_internal_loads_dbf:
        internal_DB = pd.read_excel(locator.get_archetypes_properties(),
                                    'INTERNAL_LOADS')

        # define comfort
        prop_internal_df = categories_df.merge(internal_DB,
                                               left_on='mainuse',
                                               right_on='Code')

        # write to shapefile
        fields = [
            'Name', 'Occ_m2pax', 'Qs_Wpax', 'X_ghpax', 'Ea_Wm2', 'El_Wm2',
            'Ed_Wm2', 'Qcre_Wm2', 'Vww_lpdpax', 'Vw_lpdpax', 'Qhpro_Wm2',
            'Qcpro_Wm2', 'Epro_Wm2'
        ]
        prop_internal_df_merged = names_df.merge(prop_internal_df, on="Name")
        prop_internal_df_merged = calculate_average_multiuse(
            fields, prop_internal_df_merged, occupant_densities, list_uses,
            internal_DB)

        dataframe_to_dbf(prop_internal_df_merged[fields],
                         locator.get_building_internal())

    if update_schedule_operation_cea:
        if buildings == []:
            buildings = locator.get_zone_building_names()
        calc_mixed_schedule(locator, building_occupancy_df, buildings)

    if update_supply_systems_dbf:
        supply_DB = pd.read_excel(locator.get_archetypes_properties(),
                                  'SUPPLY')
        supply_DB['Code'] = supply_DB.apply(lambda x: calc_code(
            x['building_use'], x['year_start'], x['year_end'], x['standard']),
                                            axis=1)

        categories_df['cat_supply'] = calc_category(supply_DB, categories_df,
                                                    'HVAC', 'R')

        # define HVAC systems types
        prop_supply_df = categories_df.merge(supply_DB,
                                             left_on='cat_supply',
                                             right_on='Code')

        # write to shapefile
        prop_supply_df_merged = names_df.merge(prop_supply_df, on="Name")
        fields = ['Name', 'type_cs', 'type_hs', 'type_dhw', 'type_el']
        dataframe_to_dbf(prop_supply_df_merged[fields],
                         locator.get_building_supply())
예제 #16
0
def calc_score(static_params, dynamic_params):
    """
    This tool reduces the error between observed (real life measured data) and predicted (output of the model data) values by changing some of CEA inputs.
    Annual data is compared in terms of MBE and monthly data in terms of NMBE and CvRMSE (follwing ASHRAE Guideline 14-2002).
    A new input folder with measurements has to be created, with a csv each for monthly and annual data provided as input for this tool.
    A new output csv is generated providing the calibration results (iteration number, parameters tested and results(score metric))
    """
    ## define set of CEA inputs to be calibrated and initial guess values
    SEED = dynamic_params['SEED']
    np.random.seed(
        SEED
    )  #initalize seed numpy randomly npy.random.seed (once call the function) - inside put the seed
    #import random (initialize) npy.random.randint(low=1, high=100, size= number of buildings)/1000 - for every parameter.
    Hs_ag = dynamic_params['Hs_ag']
    Tcs_set_C = dynamic_params['Tcs_set_C']
    Es = dynamic_params['Es']
    Ns = dynamic_params['Ns']
    Occ_m2pax = dynamic_params['Occ_m2pax']
    Vww_lpdpax = dynamic_params['Vww_lpdpax']
    Ea_Wm2 = dynamic_params['Ea_Wm2']
    El_Wm2 = dynamic_params['El_Wm2']

    ##define fixed constant parameters (to be redefined by CEA config file)
    #Hs_ag = 0.15
    #Tcs_set_C = 28
    Tcs_setb_C = 40
    void_deck = 1
    height_bg = 0
    floors_bg = 0

    scenario_list = static_params['scenario_list']
    config = static_params['config']

    locators_of_scenarios = []
    measured_building_names_of_scenarios = []
    for scenario in scenario_list:
        config.scenario = scenario
        locator = cea.inputlocator.InputLocator(config.scenario)
        measured_building_names = get_measured_building_names(locator)
        modify_monthly_multiplier(locator, config, measured_building_names)

        # store for later use
        locators_of_scenarios.append(locator)
        measured_building_names_of_scenarios.append(measured_building_names)

        ## overwrite inputs with corresponding initial values

        # Changes and saves variables related to the architecture
        df_arch = dbf_to_dataframe(locator.get_building_architecture())
        number_of_buildings = df_arch.shape[0]
        Rand_it = np.random.randint(low=-30, high=30,
                                    size=number_of_buildings) / 100
        df_arch.Es = Es * (1 + Rand_it)
        df_arch.Ns = Ns * (1 + Rand_it)
        df_arch.Hs_ag = Hs_ag * (1 + Rand_it)
        df_arch.void_deck = void_deck
        dataframe_to_dbf(df_arch, locator.get_building_architecture())

        # Changes and saves variables related to intetnal loads
        df_intload = dbf_to_dataframe(locator.get_building_internal())
        df_intload.Occ_m2pax = Occ_m2pax * (1 + Rand_it)
        df_intload.Vww_lpdpax = Vww_lpdpax * (1 + Rand_it)
        df_intload.Ea_Wm2 = Ea_Wm2 * (1 + Rand_it)
        df_intload.El_Wm2 = El_Wm2 * (1 + Rand_it)
        dataframe_to_dbf(df_intload, locator.get_building_internal())

        #Changes and saves variables related to comfort
        df_comfort = dbf_to_dataframe(locator.get_building_comfort())
        df_comfort.Tcs_set_C = Tcs_set_C * (1 + Rand_it)
        df_comfort.Tcs_setb_C = Tcs_setb_C
        dataframe_to_dbf(df_comfort, locator.get_building_comfort())

        # Changes and saves variables related to zone
        df_zone = dbf_to_dataframe(locator.get_zone_geometry().split('.')[0] +
                                   '.dbf')
        df_zone.height_bg = height_bg
        df_zone.floors_bg = floors_bg
        dataframe_to_dbf(df_zone,
                         locator.get_zone_geometry().split('.')[0] + '.dbf')

        ## run building schedules and energy demand
        config.schedule_maker.buildings = measured_building_names
        schedule_maker.schedule_maker_main(locator, config)
        config.demand.buildings = measured_building_names
        demand_main.demand_calculation(locator, config)

    # calculate the score
    score = validation.validation(scenario_list=scenario_list,
                                  locators_of_scenarios=locators_of_scenarios,
                                  measured_building_names_of_scenarios=
                                  measured_building_names_of_scenarios)

    return score
예제 #17
0
def create_new_project(locator, config):
    # Local variables
    zone_geometry_path = config.create_new_project.zone
    surroundings_geometry_path = config.create_new_project.surroundings
    street_geometry_path = config.create_new_project.streets
    terrain_path = config.create_new_project.terrain
    typology_path = config.create_new_project.typology

    # import file
    zone, lat, lon = shapefile_to_WSG_and_UTM(zone_geometry_path)
    # verify if input file is correct for CEA, if not an exception will be released
    verify_input_geometry_zone(zone)
    zone.to_file(locator.get_zone_geometry())

    # apply coordinate system of terrain into zone and save zone to disk.
    terrain = raster_to_WSG_and_UTM(terrain_path, lat, lon)
    driver = gdal.GetDriverByName('GTiff')
    verify_input_terrain(terrain)
    driver.CreateCopy(locator.get_terrain(), terrain)

    # now create the surroundings file if it does not exist
    if surroundings_geometry_path == '':
        print(
            "there is no surroundings file, we proceed to create it based on the geometry of your zone"
        )
        zone.to_file(locator.get_surroundings_geometry())
    else:
        # import file
        surroundings, _, _ = shapefile_to_WSG_and_UTM(
            surroundings_geometry_path)
        # verify if input file is correct for CEA, if not an exception will be released
        verify_input_geometry_surroundings(zone)
        # create new file
        surroundings.to_file(locator.get_surroundings_geometry())

    # now transfer the streets
    if street_geometry_path == '':
        print(
            "there is no street file, optimizaiton of cooling networks wont be possible"
        )
    else:
        street, _, _ = shapefile_to_WSG_and_UTM(street_geometry_path)
        street.to_file(locator.get_street_network())

    ## create occupancy file and year file
    if typology_path == '':
        print(
            "there is no typology file, we proceed to create it based on the geometry of your zone"
        )
        zone = Gdf.from_file(zone_geometry_path).drop('geometry', axis=1)
        zone['STANDARD'] = 'T6'
        zone['YEAR'] = 2020
        zone['1ST_USE'] = 'MULTI_RES'
        zone['1ST_USE_R'] = 1.0
        zone['2ND_USE'] = "NONE"
        zone['2ND_USE_R'] = 0.0
        zone['3RD_USE'] = "NONE"
        zone['3RD_USE_R'] = 0.0
        dataframe_to_dbf(zone[COLUMNS_ZONE_TYPOLOGY],
                         locator.get_building_typology())
    else:
        # import file
        occupancy_file = dbf_to_dataframe(typology_path)
        occupancy_file_test = occupancy_file[COLUMNS_ZONE_TYPOLOGY]
        # verify if input file is correct for CEA, if not an exception will be released
        verify_input_typology(occupancy_file_test)
        # create new file
        copyfile(typology_path, locator.get_building_typology())

    # add other folders by calling the locator
    locator.get_measurements()
    locator.get_input_network_folder("DH", "")
    locator.get_input_network_folder("DC", "")
    locator.get_weather_folder()
예제 #18
0
def create_new_project(locator, config):
    # Local variables
    zone_geometry_path = config.create_new_project.zone
    district_geometry_path = config.create_new_project.district
    street_geometry_path = config.create_new_project.streets
    terrain_path = config.create_new_project.terrain
    occupancy_path = config.create_new_project.occupancy
    age_path  = config.create_new_project.age

    # verify files (if they have the columns cea needs) and then save to new project location
    zone, lat, lon = shapefile_to_WSG_and_UTM(zone_geometry_path)
    try:
        zone_test = zone[COLUMNS_ZONE_GEOMETRY]
    except ValueError:
        print("one or more columns in the input file is not compatible with cea, please ensure the column" +
              " names comply with:", COLUMNS_ZONE_GEOMETRY)
    else:
        # apply coordinate system of terrain into zone and save zone to disk.
        terrain = raster_to_WSG_and_UTM(terrain_path, lat, lon)
        zone.to_file(locator.get_zone_geometry())
        driver = gdal.GetDriverByName('GTiff')
        driver.CreateCopy(locator.get_terrain(), terrain)

    # now create the district file if it does not exist
    if district_geometry_path == '':
        print("there is no district file, we proceed to create it based on the geometry of your zone")
        zone.to_file(locator.get_district_geometry())
    else:
        district, _, _ = shapefile_to_WSG_and_UTM(district_geometry_path)
        try:
            district_test = district[COLUMNS_DISTRICT_GEOMETRY]
        except ValueError:
            print("one or more columns in the input file is not compatible with cea, please ensure the column" +
                  " names comply with:", COLUMNS_DISTRICT_GEOMETRY)
        else:
            district.to_file(locator.get_district_geometry())

    # now transfer the streets
    if street_geometry_path == '':
        print("there is no street file, optimizaiton of cooling networks wont be possible")
    else:
        street, _, _ = shapefile_to_WSG_and_UTM(street_geometry_path)
        street.to_file(locator.get_street_network())

    ## create occupancy file and year file
    if occupancy_path == '':
        print("there is no occupancy file, we proceed to create it based on the geometry of your zone")
        zone = Gdf.from_file(zone_geometry_path).drop('geometry', axis=1)
        for field in COLUMNS_ZONE_OCCUPANCY:
            zone[field] = 0.0
        zone[COLUMNS_ZONE_OCCUPANCY[:2]] = 0.5  # adding 0.5 area use to the first two uses
        dataframe_to_dbf(zone[['Name'] + COLUMNS_ZONE_OCCUPANCY], locator.get_building_occupancy())
    else:
        try:
            occupancy_file = dbf_to_dataframe(occupancy_path)
            occupancy_file_test = occupancy_file[['Name']+COLUMNS_ZONE_OCCUPANCY]
            copyfile(occupancy_path, locator.get_building_occupancy())
        except ValueError:
            print("one or more columns in the input file is not compatible with cea, please ensure the column" +
                  " names comply with:", COLUMNS_ZONE_OCCUPANCY)

    ## create age file
    if age_path == '':
        print("there is no file with the age of the buildings, we proceed to create it based on the geometry of your zone")
        zone = Gdf.from_file(zone_geometry_path).drop('geometry', axis=1)
        for field in COLUMNS_ZONE_AGE:
            zone[field] = 0.0
        zone['built'] = 2017  # adding year of construction
        dataframe_to_dbf(zone[['Name'] + COLUMNS_ZONE_AGE], locator.get_building_age())
    else:
        try:
            age_file = dbf_to_dataframe(age_path)
            age_file_test = age_file[['Name']+COLUMNS_ZONE_AGE]
            copyfile(age_path, locator.get_building_age())
        except ValueError:
            print("one or more columns in the input file is not compatible with cea, please ensure the column" +
                  " names comply with:", COLUMNS_ZONE_AGE)


    # add other folders by calling the locator
    locator.get_measurements()
    locator.get_input_network_folder("DH","")
    locator.get_input_network_folder("DC","")
    locator.get_weather_folder()
예제 #19
0
def migrate_3_22_to_3_22_1(scenario):
    '''
    Renames columns in `indoor_comfort.dbf` and `internal_loads.dbf` to remove the use of "pax" meaning "people".
    '''

    INDOOR_COMFORT_COLUMNS = {'Ve_lpspax': 'Ve_lsp'}
    INTERNAL_LOADS_COLUMNS = {
        'Occ_m2pax': 'Occ_m2p',
        'Qs_Wpax': 'Qs_Wp',
        'Vw_lpdpax': 'Vw_ldp',
        'Vww_lpdpax': 'Vww_ldp',
        'X_ghpax': 'X_ghp'
    }
    OCCUPANCY_COLUMNS = {'people_pax': 'people_p'}

    if indoor_comfort_is_3_22(scenario):
        # import building properties
        indoor_comfort = dbf_to_dataframe(
            os.path.join(scenario, 'inputs', 'building-properties',
                         'indoor_comfort.dbf'))
        # make a backup copy of original data for user's own reference
        os.rename(
            os.path.join(scenario, 'inputs', 'building-properties',
                         'indoor_comfort.dbf'),
            os.path.join(scenario, 'inputs', 'building-properties',
                         'indoor_comfort_original.dbf'))
        # rename columns containing "pax"
        indoor_comfort.rename(columns=INDOOR_COMFORT_COLUMNS, inplace=True)
        # export dataframes to dbf files
        print("- writing indoor_comfort.dbf")
        dataframe_to_dbf(
            indoor_comfort,
            os.path.join(scenario, 'inputs', 'building-properties',
                         'indoor_comfort.dbf'))

    if internal_loads_is_3_22(scenario):
        # import building properties
        internal_loads = dbf_to_dataframe(
            os.path.join(scenario, 'inputs', 'building-properties',
                         'internal_loads.dbf'))
        # make a backup copy of original data for user's own reference
        os.rename(
            os.path.join(scenario, 'inputs', 'building-properties',
                         'internal_loads.dbf'),
            os.path.join(scenario, 'inputs', 'building-properties',
                         'internal_loads_original.dbf'))
        # rename columns containing "pax"
        internal_loads.rename(columns=INTERNAL_LOADS_COLUMNS, inplace=True)
        # export dataframes to dbf files
        print("- writing internal_loads.dbf")
        dataframe_to_dbf(
            internal_loads,
            os.path.join(scenario, 'inputs', 'building-properties',
                         'internal_loads.dbf'))

    # import building properties
    use_type_properties = pd.read_excel(os.path.join(
        scenario, 'inputs', 'technology', 'archetypes', 'use_types',
        'USE_TYPE_PROPERTIES.xlsx'),
                                        sheet_name=None)
    if max([
            i in use_type_properties['INTERNAL_LOADS'].columns
            for i in INTERNAL_LOADS_COLUMNS.keys()
    ]) or max([
            i in use_type_properties['INDOOR_COMFORT'].columns
            for i in INDOOR_COMFORT_COLUMNS.keys()
    ]):
        os.rename(
            os.path.join(scenario, 'inputs', 'technology', 'archetypes',
                         'use_types', 'USE_TYPE_PROPERTIES.xlsx'),
            os.path.join(scenario, 'inputs', 'technology', 'archetypes',
                         'use_types', 'USE_TYPE_PROPERTIES_original.xlsx'))
        # rename columns containing "pax"
        use_type_properties['INDOOR_COMFORT'].rename(
            columns=INDOOR_COMFORT_COLUMNS, inplace=True)
        use_type_properties['INTERNAL_LOADS'].rename(
            columns=INTERNAL_LOADS_COLUMNS, inplace=True)
        # export dataframes to dbf files
        print("-writing USE_TYPE_PROPERTIES.xlsx")
        with pd.ExcelWriter(
                os.path.join(scenario, 'inputs', 'technology', 'archetypes',
                             'use_types',
                             'USE_TYPE_PROPERTIES.xlsx')) as writer1:
            for sheet_name in use_type_properties.keys():
                use_type_properties[sheet_name].to_excel(writer1,
                                                         sheet_name=sheet_name,
                                                         index=False)
    if output_occupancy_is_3_22(scenario):
        # if occupancy schedule files are found in the outputs, these are also renamed
        print("-writing schedules in ./outputs/data/occupancy")
        for file_name in os.listdir(
                os.path.join(scenario, 'outputs', 'data', 'occupancy')):
            schedule_df = pd.read_csv(
                os.path.join(scenario, 'outputs', 'data', 'occupancy',
                             file_name))
            if 'people_pax' in schedule_df.columns:
                os.rename(
                    os.path.join(scenario, 'outputs', 'data', 'occupancy',
                                 file_name),
                    os.path.join(
                        scenario, 'outputs', 'data', 'occupancy',
                        file_name.split('.')[0] + '_original.' +
                        file_name.split('.')[1]))
                schedule_df.rename(columns=OCCUPANCY_COLUMNS, inplace=True)
                # export dataframes to dbf files
                schedule_df.to_csv(
                    os.path.join(scenario, 'outputs', 'data', 'occupancy',
                                 file_name))

    print("- done")
def rename_dbf_file(path, pk, old, new):
    df = dbf.dbf_to_dataframe(path)
    df.loc[df[pk] == old, pk] = new
    dbf.dataframe_to_dbf(df, path)
예제 #21
0
def create_new_project(locator, config):
    # Local variables
    zone_geometry_path = config.create_new_project.zone
    district_geometry_path = config.create_new_project.district
    street_geometry_path = config.create_new_project.streets
    terrain_path = config.create_new_project.terrain
    occupancy_path = config.create_new_project.occupancy
    age_path = config.create_new_project.age

    # import file
    zone, lat, lon = shapefile_to_WSG_and_UTM(zone_geometry_path)
    # verify if input file is correct for CEA, if not an exception will be released
    verify_input_geometry_zone(zone)
    zone.to_file(locator.get_zone_geometry())


    # apply coordinate system of terrain into zone and save zone to disk.
    terrain = raster_to_WSG_and_UTM(terrain_path, lat, lon)
    driver = gdal.GetDriverByName('GTiff')
    verify_input_terrain(driver, locator.get_terrain(), terrain)
    driver.CreateCopy(locator.get_terrain(), terrain)

    # now create the district file if it does not exist
    if district_geometry_path == '':
        print("there is no district file, we proceed to create it based on the geometry of your zone")
        zone.to_file(locator.get_district_geometry())
    else:
        # import file
        district, _, _ = shapefile_to_WSG_and_UTM(district_geometry_path)
        # verify if input file is correct for CEA, if not an exception will be released
        verify_input_geometry_district(zone)
        # create new file
        district.to_file(locator.get_district_geometry())

    # now transfer the streets
    if street_geometry_path == '':
        print("there is no street file, optimizaiton of cooling networks wont be possible")
    else:
        street, _, _ = shapefile_to_WSG_and_UTM(street_geometry_path)
        street.to_file(locator.get_street_network())

    ## create occupancy file and year file
    if occupancy_path == '':
        print("there is no occupancy file, we proceed to create it based on the geometry of your zone")
        zone = Gdf.from_file(zone_geometry_path).drop('geometry', axis=1)
        for field in COLUMNS_ZONE_OCCUPANCY:
            zone[field] = 0.0
        zone[COLUMNS_ZONE_OCCUPANCY[:2]] = 0.5  # adding 0.5 area use to the first two uses
        dataframe_to_dbf(zone[['Name'] + COLUMNS_ZONE_OCCUPANCY], locator.get_building_occupancy())
    else:
        # import file
        occupancy_file = dbf_to_dataframe(occupancy_path)
        occupancy_file_test = occupancy_file[['Name'] + COLUMNS_ZONE_OCCUPANCY]
        # verify if input file is correct for CEA, if not an exception will be released
        verify_input_occupancy(occupancy_file_test)
        # create new file
        copyfile(occupancy_path, locator.get_building_occupancy())

    ## create age file
    if age_path == '':
        print(
            "there is no file with the age of the buildings, we proceed to create it based on the geometry of your zone")
        zone = Gdf.from_file(zone_geometry_path).drop('geometry', axis=1)
        for field in COLUMNS_ZONE_AGE:
            zone[field] = 0.0
        zone['built'] = 2017  # adding year of construction
        dataframe_to_dbf(zone[['Name'] + COLUMNS_ZONE_AGE], locator.get_building_age())
    else:
        # import file
        age_file = dbf_to_dataframe(age_path)
        age_file_test = age_file[['Name'] + COLUMNS_ZONE_AGE]
        # verify if input file is correct for CEA, if not an exception will be released
        verify_input_age(age_file_test)
        # create new file
        copyfile(age_path, locator.get_building_age())

    # add other folders by calling the locator
    locator.get_measurements()
    locator.get_input_network_folder("DH", "")
    locator.get_input_network_folder("DC", "")
    locator.get_weather_folder()
예제 #22
0
def data_helper(locator, region, overwrite_technology_folder,
                update_architecture_dbf, update_technical_systems_dbf,
                update_indoor_comfort_dbf, update_internal_loads_dbf,
                update_supply_systems_dbf, update_restrictions_dbf):
    """
    algorithm to query building properties from statistical database
    Archetypes_HVAC_properties.csv. for more info check the integrated demand
    model of Fonseca et al. 2015. Appl. energy.

    :param InputLocator locator: an InputLocator instance set to the scenario to work on
    :param boolean update_architecture_dbf: if True, update the construction and architecture properties.
    :param boolean update_indoor_comfort_dbf: if True, get properties about thermal comfort.
    :param boolean update_technical_systems_dbf: if True, get properties about types of HVAC systems, otherwise False.
    :param boolean update_internal_loads_dbf: if True, get properties about internal loads, otherwise False.

    The following files are created by this script, depending on which flags were set:

    - building_HVAC: .dbf
        describes the queried properties of HVAC systems.

    - architecture.dbf
        describes the queried properties of architectural features

    - building_thermal: .shp
        describes the queried thermal properties of buildings

    - indoor_comfort.shp
        describes the queried thermal properties of buildings
    """
    # get technology database
    if overwrite_technology_folder:
        # copy all the region-specific archetypes to the scenario's technology folder
        get_technology_related_databases(locator, region)

    # get occupancy and age files
    building_occupancy_df = dbf_to_dataframe(locator.get_building_occupancy())
    columns = building_occupancy_df.columns

    #validate list of uses
    list_uses = []
    for name in columns:
        if name in COLUMNS_ZONE_OCCUPANCY:
            list_uses.append(name)  # append valid uses
        elif name in {'Name', 'REFERENCE'}:
            pass  # do nothing with 'Name' and 'Reference'
        else:
            raise InvalidOccupancyNameException(
                'occupancy.dbf has use "{}". This use is not part of the database. Change occupancy.dbf'
                ' or customize archetypes database AND databases_verification.py.'
                .format(name))

    building_age_df = dbf_to_dataframe(locator.get_building_age())

    # get occupant densities from archetypes schedules
    occupant_densities = {}
    for use in list_uses:
        archetypes_schedules = pd.read_excel(
            locator.get_archetypes_schedules(), use, index_col=0).T
        area_per_occupant = archetypes_schedules['density'].values[:1][0]
        if area_per_occupant > 0:
            occupant_densities[use] = 1 / area_per_occupant
        else:
            occupant_densities[use] = 0

    # prepare shapefile to store results (a shapefile with only names of buildings
    names_df = building_age_df[['Name']]

    # define main use:
    building_occupancy_df['mainuse'] = calc_mainuse(building_occupancy_df,
                                                    list_uses)

    # dataframe with joined data for categories
    categories_df = building_occupancy_df.merge(building_age_df, on='Name')

    # get properties about the construction and architecture
    if update_architecture_dbf:
        architecture_DB = pd.read_excel(locator.get_archetypes_properties(),
                                        'ARCHITECTURE')
        architecture_DB['Code'] = architecture_DB.apply(lambda x: calc_code(
            x['building_use'], x['year_start'], x['year_end'], x['standard']),
                                                        axis=1)
        categories_df['cat_built'] = calc_category(architecture_DB,
                                                   categories_df, 'built', 'C')
        retrofit_category = ['envelope', 'roof', 'windows']
        for category in retrofit_category:
            categories_df['cat_' + category] = calc_category(
                architecture_DB, categories_df, category, 'R')

        prop_architecture_df = get_prop_architecture(categories_df,
                                                     architecture_DB,
                                                     list_uses)

        # write to dbf file
        prop_architecture_df_merged = names_df.merge(prop_architecture_df,
                                                     on="Name")

        fields = [
            'Name', 'Hs', 'Ns', 'Es', 'void_deck', 'wwr_north', 'wwr_west',
            'wwr_east', 'wwr_south', 'type_cons', 'type_leak', 'type_roof',
            'type_wall', 'type_win', 'type_shade'
        ]

        dataframe_to_dbf(prop_architecture_df_merged[fields],
                         locator.get_building_architecture())

    # get properties about types of HVAC systems
    if update_technical_systems_dbf:
        construction_properties_hvac = pd.read_excel(
            locator.get_archetypes_properties(), 'HVAC')
        construction_properties_hvac[
            'Code'] = construction_properties_hvac.apply(
                lambda x: calc_code(x['building_use'], x['year_start'], x[
                    'year_end'], x['standard']),
                axis=1)

        categories_df['cat_HVAC'] = calc_category(construction_properties_hvac,
                                                  categories_df, 'HVAC', 'R')

        # define HVAC systems types
        prop_HVAC_df = categories_df.merge(construction_properties_hvac,
                                           left_on='cat_HVAC',
                                           right_on='Code')

        # write to shapefile
        prop_HVAC_df_merged = names_df.merge(prop_HVAC_df, on="Name")
        fields = [
            'Name', 'type_cs', 'type_hs', 'type_dhw', 'type_ctrl', 'type_vent'
        ]
        dataframe_to_dbf(prop_HVAC_df_merged[fields],
                         locator.get_building_hvac())

    if update_indoor_comfort_dbf:
        comfort_DB = pd.read_excel(locator.get_archetypes_properties(),
                                   'INDOOR_COMFORT')

        # define comfort
        prop_comfort_df = categories_df.merge(comfort_DB,
                                              left_on='mainuse',
                                              right_on='Code')

        # write to shapefile
        prop_comfort_df_merged = names_df.merge(prop_comfort_df, on="Name")
        prop_comfort_df_merged = calculate_average_multiuse(
            prop_comfort_df_merged, occupant_densities, list_uses, comfort_DB)
        fields = [
            'Name', 'Tcs_set_C', 'Ths_set_C', 'Tcs_setb_C', 'Ths_setb_C',
            'Ve_lps', 'rhum_min_pc', 'rhum_max_pc'
        ]
        dataframe_to_dbf(prop_comfort_df_merged[fields],
                         locator.get_building_comfort())

    if update_internal_loads_dbf:
        internal_DB = pd.read_excel(locator.get_archetypes_properties(),
                                    'INTERNAL_LOADS')

        # define comfort
        prop_internal_df = categories_df.merge(internal_DB,
                                               left_on='mainuse',
                                               right_on='Code')

        # write to shapefile
        prop_internal_df_merged = names_df.merge(prop_internal_df, on="Name")
        prop_internal_df_merged = calculate_average_multiuse(
            prop_internal_df_merged, occupant_densities, list_uses,
            internal_DB)
        fields = [
            'Name', 'Qs_Wp', 'X_ghp', 'Ea_Wm2', 'El_Wm2', 'Epro_Wm2',
            'Qcre_Wm2', 'Ed_Wm2', 'Vww_lpd', 'Vw_lpd', 'Qhpro_Wm2', 'Qcpro_Wm2'
        ]
        dataframe_to_dbf(prop_internal_df_merged[fields],
                         locator.get_building_internal())

    if update_supply_systems_dbf:
        supply_DB = pd.read_excel(locator.get_archetypes_properties(),
                                  'SUPPLY')
        supply_DB['Code'] = supply_DB.apply(lambda x: calc_code(
            x['building_use'], x['year_start'], x['year_end'], x['standard']),
                                            axis=1)

        categories_df['cat_supply'] = calc_category(supply_DB, categories_df,
                                                    'HVAC', 'R')

        # define HVAC systems types
        prop_supply_df = categories_df.merge(supply_DB,
                                             left_on='cat_supply',
                                             right_on='Code')

        # write to shapefile
        prop_supply_df_merged = names_df.merge(prop_supply_df, on="Name")
        fields = ['Name', 'type_cs', 'type_hs', 'type_dhw', 'type_el']
        dataframe_to_dbf(prop_supply_df_merged[fields],
                         locator.get_building_supply())

    if update_restrictions_dbf:
        new_names_df = names_df.copy(
        )  #this to avoid that the dataframe is reused
        COLUMNS_ZONE_RESTRICTIONS = [
            'SOLAR', 'GEOTHERMAL', 'WATERBODY', 'NATURALGAS', 'BIOGAS'
        ]
        for field in COLUMNS_ZONE_RESTRICTIONS:
            new_names_df[field] = 0
        dataframe_to_dbf(new_names_df[['Name'] + COLUMNS_ZONE_RESTRICTIONS],
                         locator.get_building_restrictions())
예제 #23
0
def data_helper(locator, config, prop_architecture_flag, prop_hvac_flag,
                prop_comfort_flag, prop_internal_loads_flag,
                prop_supply_systems_flag, prop_restrictions_flag):
    """
    algorithm to query building properties from statistical database
    Archetypes_HVAC_properties.csv. for more info check the integrated demand
    model of Fonseca et al. 2015. Appl. energy.

    :param InputLocator locator: an InputLocator instance set to the scenario to work on
    :param boolean prop_architecture_flag: if True, get properties about the construction and architecture.
    :param boolean prop_comfort_flag: if True, get properties about thermal comfort.
    :param boolean prop_hvac_flag: if True, get properties about types of HVAC systems, otherwise False.
    :param boolean prop_internal_loads_flag: if True, get properties about internal loads, otherwise False.

    The following files are created by this script, depending on which flags were set:

    - building_HVAC: .dbf
        describes the queried properties of HVAC systems.

    - architecture.dbf
        describes the queried properties of architectural features

    - building_thermal: .shp
        describes the queried thermal properties of buildings

    - indoor_comfort.shp
        describes the queried thermal properties of buildings
    """

    # get occupancy and age files
    building_occupancy_df = dbf_to_dataframe(locator.get_building_occupancy())
    list_uses = list(building_occupancy_df.drop(
        ['Name'], axis=1).columns)  # parking excluded in U-Values
    building_age_df = dbf_to_dataframe(locator.get_building_age())

    # get occupant densities from archetypes schedules
    occupant_densities = {}
    for use in list_uses:
        archetypes_schedules = pd.read_excel(
            locator.get_archetypes_schedules(config.region), use).T
        area_per_occupant = archetypes_schedules['density'].values[:1][0]
        if area_per_occupant > 0:
            occupant_densities[use] = 1 / area_per_occupant
        else:
            occupant_densities[use] = 0

    # prepare shapefile to store results (a shapefile with only names of buildings
    names_df = building_age_df[['Name']]

    # define main use:
    building_occupancy_df['mainuse'] = calc_mainuse(building_occupancy_df,
                                                    list_uses)

    # dataframe with jonned data for categories
    categories_df = building_occupancy_df.merge(building_age_df, on='Name')

    # get properties about the construction and architecture
    if prop_architecture_flag:
        architecture_DB = get_database(
            locator.get_archetypes_properties(config.region), 'ARCHITECTURE')
        architecture_DB['Code'] = architecture_DB.apply(lambda x: calc_code(
            x['building_use'], x['year_start'], x['year_end'], x['standard']),
                                                        axis=1)
        categories_df['cat_built'] = calc_category(architecture_DB,
                                                   categories_df, 'built', 'C')
        retrofit_category = ['envelope', 'roof', 'windows']
        for category in retrofit_category:
            categories_df['cat_' + category] = calc_category(
                architecture_DB, categories_df, category, 'R')

        prop_architecture_df = get_prop_architecture(categories_df,
                                                     architecture_DB,
                                                     list_uses)

        # write to shapefile
        prop_architecture_df_merged = names_df.merge(prop_architecture_df,
                                                     on="Name")

        fields = [
            'Name', 'Hs', 'void_deck', 'wwr_north', 'wwr_west', 'wwr_east',
            'wwr_south', 'type_cons', 'type_leak', 'type_roof', 'type_wall',
            'type_win', 'type_shade'
        ]

        dataframe_to_dbf(prop_architecture_df_merged[fields],
                         locator.get_building_architecture())

    # get properties about types of HVAC systems
    if prop_hvac_flag:
        HVAC_DB = get_database(
            locator.get_archetypes_properties(config.region), 'HVAC')
        HVAC_DB['Code'] = HVAC_DB.apply(lambda x: calc_code(
            x['building_use'], x['year_start'], x['year_end'], x['standard']),
                                        axis=1)

        categories_df['cat_HVAC'] = calc_category(HVAC_DB, categories_df,
                                                  'HVAC', 'R')

        # define HVAC systems types
        prop_HVAC_df = categories_df.merge(HVAC_DB,
                                           left_on='cat_HVAC',
                                           right_on='Code')

        # write to shapefile
        prop_HVAC_df_merged = names_df.merge(prop_HVAC_df, on="Name")
        fields = [
            'Name', 'type_cs', 'type_hs', 'type_dhw', 'type_ctrl', 'type_vent'
        ]
        dataframe_to_dbf(prop_HVAC_df_merged[fields],
                         locator.get_building_hvac())

    if prop_comfort_flag:
        comfort_DB = get_database(
            locator.get_archetypes_properties(config.region), 'INDOOR_COMFORT')

        # define comfort
        prop_comfort_df = categories_df.merge(comfort_DB,
                                              left_on='mainuse',
                                              right_on='Code')

        # write to shapefile
        prop_comfort_df_merged = names_df.merge(prop_comfort_df, on="Name")
        prop_comfort_df_merged = calculate_average_multiuse(
            prop_comfort_df_merged, occupant_densities, list_uses, comfort_DB)
        fields = [
            'Name', 'Tcs_set_C', 'Ths_set_C', 'Tcs_setb_C', 'Ths_setb_C',
            'Ve_lps', 'rhum_min_pc', 'rhum_max_pc'
        ]
        dataframe_to_dbf(prop_comfort_df_merged[fields],
                         locator.get_building_comfort())

    if prop_internal_loads_flag:
        internal_DB = get_database(
            locator.get_archetypes_properties(config.region), 'INTERNAL_LOADS')

        # define comfort
        prop_internal_df = categories_df.merge(internal_DB,
                                               left_on='mainuse',
                                               right_on='Code')

        # write to shapefile
        prop_internal_df_merged = names_df.merge(prop_internal_df, on="Name")
        prop_internal_df_merged = calculate_average_multiuse(
            prop_internal_df_merged, occupant_densities, list_uses,
            internal_DB)
        fields = [
            'Name', 'Qs_Wp', 'X_ghp', 'Ea_Wm2', 'El_Wm2', 'Epro_Wm2',
            'Qcre_Wm2', 'Ed_Wm2', 'Vww_lpd', 'Vw_lpd', 'Qhpro_Wm2'
        ]
        dataframe_to_dbf(prop_internal_df_merged[fields],
                         locator.get_building_internal())

    if prop_supply_systems_flag:
        supply_DB = get_database(
            locator.get_archetypes_properties(config.region), 'SUPPLY')
        supply_DB['Code'] = supply_DB.apply(lambda x: calc_code(
            x['building_use'], x['year_start'], x['year_end'], x['standard']),
                                            axis=1)

        categories_df['cat_supply'] = calc_category(supply_DB, categories_df,
                                                    'HVAC', 'R')

        # define HVAC systems types
        prop_supply_df = categories_df.merge(supply_DB,
                                             left_on='cat_supply',
                                             right_on='Code')

        # write to shapefile
        prop_supply_df_merged = names_df.merge(prop_supply_df, on="Name")
        fields = ['Name', 'type_cs', 'type_hs', 'type_dhw', 'type_el']
        dataframe_to_dbf(prop_supply_df_merged[fields],
                         locator.get_building_supply())

    if prop_restrictions_flag:
        COLUMNS_ZONE_RESTRICTIONS = [
            'SOLAR', 'GEOTHERMAL', 'WATERBODY', 'NATURALGAS', 'BIOGAS'
        ]
        for field in COLUMNS_ZONE_RESTRICTIONS:
            names_df[field] = 0
        dataframe_to_dbf(names_df[['Name'] + COLUMNS_ZONE_RESTRICTIONS],
                         locator.get_building_restrictions())
예제 #24
0
def thermal_network_simplified(locator, config, network_name):
    # local variables
    network_type = config.thermal_network.network_type
    min_head_substation_kPa = config.thermal_network.min_head_substation
    thermal_transfer_unit_design_head_m = min_head_substation_kPa * 1000 / M_WATER_TO_PA
    coefficient_friction_hazen_williams = config.thermal_network.hw_friction_coefficient
    velocity_ms = config.thermal_network.peak_load_velocity
    fraction_equivalent_length = config.thermal_network.equivalent_length_factor
    peak_load_percentage = config.thermal_network.peak_load_percentage

    # GET INFORMATION ABOUT THE NETWORK
    edge_df, node_df = get_thermal_network_from_shapefile(locator, network_type, network_name)

    # GET INFORMATION ABOUT THE DEMAND OF BUILDINGS AND CONNECT TO THE NODE INFO
    # calculate substations for all buildings
    # local variables
    total_demand = pd.read_csv(locator.get_total_demand())
    volume_flow_m3pers_building = pd.DataFrame()
    T_sup_K_building = pd.DataFrame()
    T_re_K_building = pd.DataFrame()
    Q_demand_kWh_building = pd.DataFrame()
    if network_type == "DH":
        buildings_name_with_heating = get_building_names_with_load(total_demand, load_name='QH_sys_MWhyr')
        buildings_name_with_space_heating = get_building_names_with_load(total_demand, load_name='Qhs_sys_MWhyr')
        DHN_barcode = "0"
        if (buildings_name_with_heating != [] and buildings_name_with_space_heating != []):
            building_names = [building for building in buildings_name_with_heating if building in
                              node_df.Building.values]
            substation.substation_main_heating(locator, total_demand, building_names, DHN_barcode=DHN_barcode)
        else:
            raise Exception('problem here')

        for building_name in building_names:
            substation_results = pd.read_csv(
                locator.get_optimization_substations_results_file(building_name, "DH", DHN_barcode))
            volume_flow_m3pers_building[building_name] = substation_results["mdot_DH_result_kgpers"] / P_WATER_KGPERM3
            T_sup_K_building[building_name] = substation_results["T_supply_DH_result_K"]
            T_re_K_building[building_name] = np.where(substation_results["T_return_DH_result_K"] >273.15,
                                                      substation_results["T_return_DH_result_K"], np.nan)
            Q_demand_kWh_building[building_name] = (substation_results["Q_heating_W"] + substation_results[
                "Q_dhw_W"]) / 1000

    if network_type == "DC":
        buildings_name_with_cooling = get_building_names_with_load(total_demand, load_name='QC_sys_MWhyr')
        DCN_barcode = "0"
        if buildings_name_with_cooling != []:
            building_names = [building for building in buildings_name_with_cooling if building in
                              node_df.Building.values]
            substation.substation_main_cooling(locator, total_demand, building_names, DCN_barcode=DCN_barcode)
        else:
            raise Exception('problem here')

        for building_name in building_names:
            substation_results = pd.read_csv(
                locator.get_optimization_substations_results_file(building_name, "DC", DCN_barcode))
            volume_flow_m3pers_building[building_name] = substation_results[
                                                             "mdot_space_cooling_data_center_and_refrigeration_result_kgpers"] / P_WATER_KGPERM3
            T_sup_K_building[building_name] = substation_results[
                "T_supply_DC_space_cooling_data_center_and_refrigeration_result_K"]
            T_re_K_building[building_name] = substation_results[
                "T_return_DC_space_cooling_data_center_and_refrigeration_result_K"]
            Q_demand_kWh_building[building_name] = substation_results[
                                                       "Q_space_cooling_data_center_and_refrigeration_W"] / 1000


    import cea.utilities
    with cea.utilities.pushd(locator.get_thermal_network_folder()):
        # Create a water network model
        wn = wntr.network.WaterNetworkModel()

        # add loads
        building_base_demand_m3s = {}
        for building in volume_flow_m3pers_building.keys():
            building_base_demand_m3s[building] = volume_flow_m3pers_building[building].max()
            pattern_demand = (volume_flow_m3pers_building[building].values / building_base_demand_m3s[building]).tolist()
            wn.add_pattern(building, pattern_demand)

        # add nodes
        consumer_nodes = []
        building_nodes_pairs = {}
        building_nodes_pairs_inversed = {}
        for node in node_df.iterrows():
            if node[1]["Type"] == "CONSUMER":
                demand_pattern = node[1]['Building']
                base_demand_m3s = building_base_demand_m3s[demand_pattern]
                consumer_nodes.append(node[0])
                building_nodes_pairs[node[0]] = demand_pattern
                building_nodes_pairs_inversed[demand_pattern] = node[0]
                wn.add_junction(node[0],
                                base_demand=base_demand_m3s,
                                demand_pattern=demand_pattern,
                                elevation=thermal_transfer_unit_design_head_m,
                                coordinates=node[1]["coordinates"])
            elif node[1]["Type"] == "PLANT":
                base_head = int(thermal_transfer_unit_design_head_m*1.2)
                start_node = node[0]
                name_node_plant = start_node
                wn.add_reservoir(start_node,
                                 base_head=base_head,
                                 coordinates=node[1]["coordinates"])
            else:
                wn.add_junction(node[0],
                                elevation=0,
                                coordinates=node[1]["coordinates"])

        # add pipes
        for edge in edge_df.iterrows():
            length_m = edge[1]["length_m"]
            edge_name = edge[0]
            wn.add_pipe(edge_name, edge[1]["start node"],
                        edge[1]["end node"],
                        length=length_m * (1 + fraction_equivalent_length),
                        roughness=coefficient_friction_hazen_williams,
                        minor_loss=0.0,
                        status='OPEN')

        # add options
        wn.options.time.duration = 8759 * 3600   # this indicates epanet to do one year simulation
        wn.options.time.hydraulic_timestep = 60 * 60
        wn.options.time.pattern_timestep = 60 * 60
        wn.options.solver.accuracy = 0.01
        wn.options.solver.trials = 100

        # 1st ITERATION GET MASS FLOWS AND CALCULATE DIAMETER
        sim = wntr.sim.EpanetSimulator(wn)
        results = sim.run_sim()
        max_volume_flow_rates_m3s = results.link['flowrate'].abs().max()
        pipe_names = max_volume_flow_rates_m3s.index.values
        pipe_catalog = pd.read_excel(locator.get_database_distribution_systems(), sheet_name='THERMAL_GRID')
        Pipe_DN, D_ext_m, D_int_m, D_ins_m = zip(
            *[calc_max_diameter(flow, pipe_catalog, velocity_ms=velocity_ms, peak_load_percentage=peak_load_percentage) for
              flow in max_volume_flow_rates_m3s])
        pipe_dn = pd.Series(Pipe_DN, pipe_names)
        diameter_int_m = pd.Series(D_int_m, pipe_names)
        diameter_ext_m = pd.Series(D_ext_m, pipe_names)
        diameter_ins_m = pd.Series(D_ins_m, pipe_names)

        # 2nd ITERATION GET PRESSURE POINTS AND MASSFLOWS FOR SIZING PUMPING NEEDS - this could be for all the year
        # modify diameter and run simulations
        edge_df['Pipe_DN'] = pipe_dn
        edge_df['D_int_m'] = D_int_m
        for edge in edge_df.iterrows():
            edge_name = edge[0]
            pipe = wn.get_link(edge_name)
            pipe.diameter = diameter_int_m[edge_name]
        sim = wntr.sim.EpanetSimulator(wn)
        results = sim.run_sim()

        # 3rd ITERATION GET FINAL UTILIZATION OF THE GRID (SUPPLY SIDE)
        # get accumulated head loss per hour
        unitary_head_ftperkft = results.link['headloss'].abs()
        unitary_head_mperm = unitary_head_ftperkft * FT_TO_M / (FT_TO_M * 1000)
        head_loss_m = unitary_head_mperm.copy()
        for column in head_loss_m.columns.values:
            length_m = edge_df.loc[column]['length_m']
            head_loss_m[column] = head_loss_m[column] * length_m
        reservoir_head_loss_m = head_loss_m.sum(axis=1) + thermal_transfer_unit_design_head_m*1.2 # fixme: only one thermal_transfer_unit_design_head_m from one substation?

        # apply this pattern to the reservoir and get results
        base_head = reservoir_head_loss_m.max()
        pattern_head_m = (reservoir_head_loss_m.values / base_head).tolist()
        wn.add_pattern('reservoir', pattern_head_m)
        reservoir = wn.get_node(name_node_plant)
        reservoir.head_timeseries.base_value = int(base_head)
        reservoir.head_timeseries._pattern = 'reservoir'
        sim = wntr.sim.EpanetSimulator(wn)
        results = sim.run_sim()

    # POSTPROCESSING

    # $ POSTPROCESSING - PRESSURE/HEAD LOSSES PER PIPE PER HOUR OF THE YEAR
    # at the pipes
    unitary_head_loss_supply_network_ftperkft = results.link['headloss'].abs()
    linear_pressure_loss_Paperm = unitary_head_loss_supply_network_ftperkft * FT_WATER_TO_PA / (FT_TO_M * 1000)
    head_loss_supply_network_Pa = linear_pressure_loss_Paperm.copy()
    for column in head_loss_supply_network_Pa.columns.values:
        length_m = edge_df.loc[column]['length_m']
        head_loss_supply_network_Pa[column] = head_loss_supply_network_Pa[column] * length_m

    head_loss_return_network_Pa = head_loss_supply_network_Pa.copy(0)
    # at the substations
    head_loss_substations_ft = results.node['head'][consumer_nodes].abs()
    head_loss_substations_Pa = head_loss_substations_ft * FT_WATER_TO_PA

    #POSTPORCESSING MASSFLOW RATES
    # MASS_FLOW_RATE (EDGES)
    flow_rate_supply_m3s = results.link['flowrate'].abs()
    massflow_supply_kgs = flow_rate_supply_m3s * P_WATER_KGPERM3

    # $ POSTPROCESSING - PRESSURE LOSSES ACCUMULATED PER HOUR OF THE YEAR (TIMES 2 to account for return)
    accumulated_head_loss_supply_Pa = head_loss_supply_network_Pa.sum(axis=1)
    accumulated_head_loss_return_Pa = head_loss_return_network_Pa.sum(axis=1)
    accumulated_head_loss_substations_Pa = head_loss_substations_Pa.sum(axis=1)
    accumulated_head_loss_total_Pa = accumulated_head_loss_supply_Pa + accumulated_head_loss_return_Pa + accumulated_head_loss_substations_Pa

    # $ POSTPROCESSING - THERMAL LOSSES PER PIPE PER HOUR OF THE YEAR (SUPPLY)
    # calculate the thermal characteristics of the grid
    temperature_of_the_ground_K = calculate_ground_temperature(locator)
    thermal_coeffcient_WperKm = pd.Series(
        np.vectorize(calc_linear_thermal_loss_coefficient)(diameter_ext_m, diameter_int_m, diameter_ins_m), pipe_names)
    average_temperature_supply_K = T_sup_K_building.mean(axis=1)


    thermal_losses_supply_kWh = results.link['headloss'].copy()
    thermal_losses_supply_kWh.reset_index(inplace=True, drop=True)
    thermal_losses_supply_Wperm = thermal_losses_supply_kWh.copy()
    for pipe in pipe_names:
        length_m = edge_df.loc[pipe]['length_m']
        massflow_kgs = massflow_supply_kgs[pipe]
        k_WperKm_pipe = thermal_coeffcient_WperKm[pipe]
        k_kWperK = k_WperKm_pipe * length_m / 1000
        thermal_losses_supply_kWh[pipe] = np.vectorize(calc_thermal_loss_per_pipe)(average_temperature_supply_K.values,
                                                                     massflow_kgs.values,
                                                                     temperature_of_the_ground_K,
                                                                     k_kWperK,
                                                                     )

        thermal_losses_supply_Wperm[pipe] = (thermal_losses_supply_kWh[pipe] / length_m) * 1000

    # return pipes
    average_temperature_return_K = T_re_K_building.mean(axis=1)
    thermal_losses_return_kWh = results.link['headloss'].copy()
    thermal_losses_return_kWh.reset_index(inplace=True, drop=True)
    for pipe in pipe_names:
        length_m = edge_df.loc[pipe]['length_m']
        massflow_kgs = massflow_supply_kgs[pipe]
        k_WperKm_pipe = thermal_coeffcient_WperKm[pipe]
        k_kWperK = k_WperKm_pipe * length_m / 1000
        thermal_losses_return_kWh[pipe] = np.vectorize(calc_thermal_loss_per_pipe)(average_temperature_return_K.values,
                                                                     massflow_kgs.values,
                                                                     temperature_of_the_ground_K,
                                                                     k_kWperK,
                                                                     )
    # WRITE TO DISK

    # LINEAR PRESSURE LOSSES (EDGES)
    linear_pressure_loss_Paperm.to_csv(locator.get_network_linear_pressure_drop_edges(network_type, network_name),
                                       index=False)

    # MASS_FLOW_RATE (EDGES)
    flow_rate_supply_m3s = results.link['flowrate'].abs()
    massflow_supply_kgs = flow_rate_supply_m3s * P_WATER_KGPERM3
    massflow_supply_kgs.to_csv(locator.get_thermal_network_layout_massflow_edges_file(network_type, network_name),
                               index=False)

    # VELOCITY (EDGES)
    velocity_edges_ms = results.link['velocity'].abs()
    velocity_edges_ms.to_csv(locator.get_thermal_network_velocity_edges_file(network_type, network_name),
                             index=False)

    # PRESSURE LOSSES (NODES)
    pressure_at_nodes_ft = results.node['pressure'].abs()
    pressure_at_nodes_Pa = pressure_at_nodes_ft * FT_TO_M * M_WATER_TO_PA
    pressure_at_nodes_Pa.to_csv(locator.get_network_pressure_at_nodes(network_type, network_name), index=False)

    # MASS_FLOW_RATE (NODES)
    # $ POSTPROCESSING - MASSFLOWRATES PER NODE PER HOUR OF THE YEAR
    flow_rate_supply_nodes_m3s = results.node['demand'].abs()
    massflow_supply_nodes_kgs = flow_rate_supply_nodes_m3s * P_WATER_KGPERM3
    massflow_supply_nodes_kgs.to_csv(locator.get_thermal_network_layout_massflow_nodes_file(network_type, network_name),
                                     index=False)

    # thermal demand per building (no losses in the network or substations)
    Q_demand_Wh_building = Q_demand_kWh_building * 1000
    Q_demand_Wh_building.to_csv(locator.get_thermal_demand_csv_file(network_type, network_name), index=False)

    # pressure losses total
    # $ POSTPROCESSING - PUMPING NEEDS PER HOUR OF THE YEAR (TIMES 2 to account for return)
    flow_rate_substations_m3s = results.node['demand'][consumer_nodes].abs()
    head_loss_supply_kWperm = (linear_pressure_loss_Paperm * (flow_rate_supply_m3s * 3600)) / (3.6E6 * PUMP_ETA)
    head_loss_return_kWperm = head_loss_supply_kWperm.copy()
    pressure_loss_supply_edge_kW = (head_loss_supply_network_Pa * (flow_rate_supply_m3s * 3600)) / (3.6E6 * PUMP_ETA)
    head_loss_return_kW = pressure_loss_supply_edge_kW.copy()
    head_loss_substations_kW = (head_loss_substations_Pa * (flow_rate_substations_m3s * 3600)) / (3.6E6 * PUMP_ETA)
    accumulated_head_loss_supply_kW = pressure_loss_supply_edge_kW.sum(axis=1)
    accumulated_head_loss_return_kW = head_loss_return_kW.sum(axis=1)
    accumulated_head_loss_substations_kW = head_loss_substations_kW.sum(axis=1)
    accumulated_head_loss_total_kW = accumulated_head_loss_supply_kW + \
                                     accumulated_head_loss_return_kW + \
                                     accumulated_head_loss_substations_kW
    head_loss_system_Pa = pd.DataFrame({"pressure_loss_supply_Pa": accumulated_head_loss_supply_Pa,
                                        "pressure_loss_return_Pa": accumulated_head_loss_return_Pa,
                                        "pressure_loss_substations_Pa": accumulated_head_loss_substations_Pa,
                                        "pressure_loss_total_Pa": accumulated_head_loss_total_Pa})
    head_loss_system_Pa.to_csv(locator.get_network_total_pressure_drop_file(network_type, network_name),
                               index=False)

    # $ POSTPROCESSING - PLANT HEAT REQUIREMENT
    plant_load_kWh = thermal_losses_supply_kWh.sum(axis=1) * 2 + Q_demand_kWh_building.sum(
        axis=1) - accumulated_head_loss_total_kW.values
    plant_load_kWh.to_csv(locator.get_thermal_network_plant_heat_requirement_file(network_type, network_name),
                          header=['thermal_load_kW'], index=False)

    # pressure losses per piping system
    pressure_loss_supply_edge_kW.to_csv(
        locator.get_thermal_network_pressure_losses_edges_file(network_type, network_name), index=False)

    # pressure losses per substation
    head_loss_substations_kW = head_loss_substations_kW.rename(columns=building_nodes_pairs)
    head_loss_substations_kW.to_csv(locator.get_thermal_network_substation_ploss_file(network_type, network_name),
                                    index=False)

    # pumping needs losses total
    pumping_energy_system_kWh = pd.DataFrame({"pressure_loss_supply_kW": accumulated_head_loss_supply_kW,
                                              "pressure_loss_return_kW": accumulated_head_loss_return_kW,
                                              "pressure_loss_substations_kW": accumulated_head_loss_substations_kW,
                                              "pressure_loss_total_kW": accumulated_head_loss_total_kW})
    pumping_energy_system_kWh.to_csv(
        locator.get_network_energy_pumping_requirements_file(network_type, network_name), index=False)

    # pumping needs losses total
    temperatures_plant_C = pd.DataFrame({"temperature_supply_K": average_temperature_supply_K,
                                         "temperature_return_K": average_temperature_return_K})
    temperatures_plant_C.to_csv(locator.get_network_temperature_plant(network_type, network_name), index=False)

    # thermal losses
    thermal_losses_supply_kWh.to_csv(locator.get_network_thermal_loss_edges_file(network_type, network_name),
                                     index=False)
    thermal_losses_supply_Wperm.to_csv(locator.get_network_linear_thermal_loss_edges_file(network_type, network_name),
                                       index=False)

    # thermal losses total
    accumulated_thermal_losses_supply_kWh = thermal_losses_supply_kWh.sum(axis=1)
    accumulated_thermal_losses_return_kWh = thermal_losses_return_kWh.sum(axis=1)
    accumulated_thermal_loss_total_kWh = accumulated_thermal_losses_supply_kWh + accumulated_thermal_losses_return_kWh
    thermal_losses_total_kWh = pd.DataFrame({"thermal_loss_supply_kW": accumulated_thermal_losses_supply_kWh,
                                             "thermal_loss_return_kW": accumulated_thermal_losses_return_kWh,
                                             "thermal_loss_total_kW": accumulated_thermal_loss_total_kWh})
    thermal_losses_total_kWh.to_csv(locator.get_network_total_thermal_loss_file(network_type, network_name),
                                    index=False)

    # return average temperature of supply at the substations
    T_sup_K_nodes = T_sup_K_building.rename(columns=building_nodes_pairs_inversed)
    average_year = T_sup_K_nodes.mean(axis=1)
    for node in node_df.index.values:
        T_sup_K_nodes[node] = average_year
    T_sup_K_nodes.to_csv(locator.get_network_temperature_supply_nodes_file(network_type, network_name),
                         index=False)

    # return average temperature of return at the substations
    T_return_K_nodes = T_re_K_building.rename(columns=building_nodes_pairs_inversed)
    average_year = T_return_K_nodes.mean(axis=1)
    for node in node_df.index.values:
        T_return_K_nodes[node] = average_year
    T_return_K_nodes.to_csv(locator.get_network_temperature_return_nodes_file(network_type, network_name),
                         index=False)

    # summary of edges used for the calculation
    fields_edges = ['length_m', 'Pipe_DN', 'Type_mat', 'D_int_m']
    edge_df[fields_edges].to_csv(locator.get_thermal_network_edge_list_file(network_type, network_name))
    fields_nodes = ['Type', 'Building']
    node_df[fields_nodes].to_csv(locator.get_thermal_network_node_types_csv_file(network_type, network_name))

    # correct diameter of network and save to the shapefile
    from cea.utilities.dbf import dataframe_to_dbf, dbf_to_dataframe
    fields = ['length_m', 'Pipe_DN', 'Type_mat']
    edge_df = edge_df[fields]
    edge_df['name'] = edge_df.index.values
    network_edges_df = dbf_to_dataframe(
        locator.get_network_layout_edges_shapefile(network_type, network_name).split('.shp')[0] + '.dbf')
    network_edges_df = network_edges_df.merge(edge_df, left_on='Name', right_on='name', suffixes=('_x', ''))
    network_edges_df = network_edges_df.drop(['Pipe_DN_x', 'Type_mat_x', 'name', 'length_m_x'], axis=1)
    dataframe_to_dbf(network_edges_df,
                     locator.get_network_layout_edges_shapefile(network_type, network_name).split('.shp')[0] + '.dbf')
예제 #25
0
def gwr_mapper(config, locator):
    gwr_path = config.gwr_mapper.gwr_path
    zone_path = locator.get_zone_geometry()
    # surroundings_path = locator.get_surroundings_geometry()

    gwr_df = read_gwr(gwr_path)
    zone_gdf = gpd.read_file(zone_path)

    # Filter GWR data to zone extent
    print('Filtering GWR data to location')
    reprojected_zone_gdf = zone_gdf.to_crs(LV95_PROJECTION)
    minx, miny, maxx, maxy = reprojected_zone_gdf['geometry'].total_bounds
    gwr_df = filter_gwr_by_bounds(gwr_df, minx, miny, maxx, maxy)

    print('Translating GWR to CEA code')
    gwr_df = gwr_to_cea_code(gwr_df)

    print('Mapping GWR Buildings to CEA Buildings')
    coord_points = [
        Point(x, y)
        for x, y in zip(gwr_df['e_coordinate'], gwr_df['n_coordinate'])
    ]
    gwr_gdf = gpd.GeoDataFrame(gwr_df,
                               geometry=coord_points,
                               crs=LV95_PROJECTION)
    building_properties = [
        map_props_to_geom(building_name, building_geom, gwr_gdf)
        for building_name, building_geom in zip(
            reprojected_zone_gdf['Name'], reprojected_zone_gdf['geometry'])
    ]
    properties_df = pd.concat(building_properties)

    print('Filling in missing data')
    # Fill empty rows with most common building type
    building_type = properties_df['building_type'].value_counts().idxmax()
    common_building_properties_df = properties_df.loc[
        properties_df['building_type'] == building_type]
    construction_year = common_building_properties_df[
        'construction_year'].median()
    number_floors = common_building_properties_df['number_floors'].median()
    heating_tech_code = common_building_properties_df[
        'heating_tech_code'].value_counts().idxmax()
    hot_water_tech_code = common_building_properties_df[
        'hot_water_tech_code'].value_counts().idxmax()

    properties_df['construction_year'] = properties_df[
        'construction_year'].fillna(construction_year).astype(int)
    properties_df['number_floors'] = properties_df['number_floors'].fillna(
        number_floors).astype(int)
    properties_df['occupancy_ratio'] = properties_df['occupancy_ratio'].fillna(
        '{}:{}'.format(building_type, 1.0)).astype(str)
    properties_df['heating_tech_code'] = properties_df[
        'heating_tech_code'].fillna(heating_tech_code).astype(str)
    properties_df['hot_water_tech_code'] = properties_df[
        'hot_water_tech_code'].fillna(hot_water_tech_code).astype(str)

    # properties_df.to_csv(r'C:\Users\Reynold Mok\Downloads\GWR Data\mappings.csv')

    print('Setting CEA building floors from GWR data')
    new_zone_gdf = zone_gdf.set_index('Name')
    gwr_floors_df = properties_df[[
        'Name', 'number_floors'
    ]].set_index('Name').rename(columns={"number_floors": "floors_ag"})
    new_zone_gdf.update(gwr_floors_df)
    new_zone_gdf['height_ag'] = new_zone_gdf['floors_ag'] * 3
    new_zone_gdf = new_zone_gdf.reset_index()
    new_zone_gdf.to_file(zone_path)

    print('Generating CEA building typology from GWR data')
    standard_definition_df = pd.read_excel(
        locator.get_database_construction_standards(),
        sheet_name='STANDARD_DEFINITION')
    typology_df = generate_typology(properties_df, standard_definition_df)

    print('Run CEA `archetypes-mapper` with generated building typology')
    typology_path = locator.get_building_typology()
    if not os.path.exists(os.path.dirname(typology_path)):
        os.makedirs(os.path.dirname(typology_path))
    dataframe_to_dbf(typology_df, typology_path)

    mapper_flags = {
        'update_architecture_dbf': True,
        'update_air_conditioning_systems_dbf': True,
        'update_indoor_comfort_dbf': True,
        'update_internal_loads_dbf': True,
        'update_supply_systems_dbf': True,
        'update_schedule_operation_cea': True
    }
    archetypes_mapper(locator,
                      buildings=typology_df['Name'].values,
                      **mapper_flags)

    print('Update heating and hot water supply systems from GWR data')
    supply_systems_df = dbf_to_dataframe(
        locator.get_building_supply()).set_index('Name')
    gwr_supply_systems_df = properties_df[[
        'Name', 'heating_tech_code', 'hot_water_tech_code'
    ]].set_index('Name').rename(columns={
        "heating_tech_code": "type_hs",
        "hot_water_tech_code": "type_dhw"
    })
    supply_systems_df.update(gwr_supply_systems_df)
    supply_systems_df = supply_systems_df.reset_index()
    dataframe_to_dbf(supply_systems_df, locator.get_building_supply())