Exemple #1
0
def schedule_maker_main(locator, config, building=None):
    # local variables
    buildings = config.schedule_maker.buildings
    schedule_model = config.schedule_maker.schedule_model

    if schedule_model == 'deterministic':
        stochastic_schedule = False
    elif schedule_model == 'stochastic':
        stochastic_schedule = True
    else:
        raise ValueError("Invalid schedule model: {schedule_model}".format(**locals()))

    if building != None:
        buildings = [building]  # this is to run the tests

    # CHECK DATABASE
    if is_3_22(config.scenario):
        raise ValueError("""The data format of indoor comfort has been changed after v3.22. 
        Please run Data migrator in Utilities.""")

    # get variables of indoor comfort and internal loads
    internal_loads = dbf_to_dataframe(locator.get_building_internal()).set_index('Name')
    indoor_comfort = dbf_to_dataframe(locator.get_building_comfort()).set_index('Name')
    architecture = dbf_to_dataframe(locator.get_building_architecture()).set_index('Name')

    # get building properties
    prop_geometry = Gdf.from_file(locator.get_zone_geometry())
    prop_geometry['footprint'] = prop_geometry.area
    prop_geometry['GFA_m2'] = prop_geometry['footprint'] * (prop_geometry['floors_ag'] + prop_geometry['floors_bg'])
    prop_geometry['GFA_ag_m2'] = prop_geometry['footprint'] * prop_geometry['floors_ag']
    prop_geometry['GFA_bg_m2'] = prop_geometry['footprint'] * prop_geometry['floors_bg']
    prop_geometry = prop_geometry.merge(architecture, on='Name').set_index('Name')
    prop_geometry = calc_useful_areas(prop_geometry)

    # get calculation year from weather file
    weather_path = locator.get_weather_file()
    weather_data = epwreader.epw_reader(weather_path)[['year', 'drybulb_C', 'wetbulb_C',
                                                       'relhum_percent', 'windspd_ms', 'skytemp_C']]
    year = weather_data['year'][0]

    # create date range for the calculation year
    date_range = get_date_range_hours_from_year(year)

    # SCHEDULE MAKER
    n = len(buildings)
    calc_schedules_multiprocessing = cea.utilities.parallel.vectorize(calc_schedules,
                                                                      config.get_number_of_processes(),
                                                                      on_complete=print_progress)

    calc_schedules_multiprocessing(repeat(locator, n),
                                   buildings,
                                   repeat(date_range, n),
                                   [internal_loads.loc[b] for b in buildings],
                                   [indoor_comfort.loc[b] for b in buildings],
                                   [prop_geometry.loc[b] for b in buildings],
                                   repeat(stochastic_schedule, n))
    return None
Exemple #2
0
def calculate_pipe_transmittance(locator, buildings_names):
    # Get data
    age_df = dbf_to_dataframe(locator.get_building_age())
    age_df.set_index('Name', inplace=True)

    Y_dic = {}
    for building in buildings_names:
        age_built = age_df.loc[building, 'built']
        age_HVAC = age_df.loc[building, 'HVAC']

        # Calculate
        if age_built >= 1995 or age_HVAC > 1995:
            Y_dic[building] = 0.2
        elif 1985 <= age_built < 1995:
            Y_dic[building] = 0.3
            if age_HVAC == age_built:
                print(
                    'Incorrect HVAC renovation year for ' + building +
                    ': if HVAC has not been renovated, the year should be set to 0'
                )
                quit()
        else:
            Y_dic[building] = 0.4

    return Y_dic
Exemple #3
0
    def preprocessing_occupancy_type_comparison(self):
        data_processed = pd.DataFrame()
        scenarios_clean = []
        for i, scenario_name in enumerate(self.scenarios_names):
            if scenario_name in scenarios_clean:
                scenario_name = scenario_name + "_duplicated_" + str(i)
            scenarios_clean.append(scenario_name)

        for scenario, scenario_name in zip(self.scenarios, scenarios_clean):
            locator = cea.inputlocator.InputLocator(scenario)
            district_occupancy_df = dbf_to_dataframe(
                locator.get_building_typology())
            district_occupancy_df.set_index('Name', inplace=True)
            district_gfa_df = pd.read_csv(
                locator.get_total_demand())[['GFA_m2'] + ["Name"]]
            district_gfa_df.set_index('Name', inplace=True)
            data_raw = pd.DataFrame(district_occupancy_df.values *
                                    district_gfa_df.values,
                                    columns=district_occupancy_df.columns,
                                    index=district_occupancy_df.index)
            # sum per function
            data_raw = data_raw.sum(axis=0)
            data_raw_df = pd.DataFrame({
                scenario_name: data_raw
            },
                                       index=data_raw.index).T
            data_processed = data_processed.append(data_raw_df)
        return data_processed
Exemple #4
0
def internal_loads_is_3_22(scenario):
    internal_loads = dbf_to_dataframe(
        os.path.join(scenario, "inputs", "building-properties",
                     "internal_loads.dbf"))

    if not 'Occ_m2pax' in internal_loads.columns:
        return False
    return True
Exemple #5
0
def indoor_comfort_is_3_22(scenario):
    indoor_comfort = dbf_to_dataframe(
        os.path.join(scenario, "inputs", "building-properties",
                     "indoor_comfort.dbf"))

    if not 'Ve_lpspax' in indoor_comfort.columns:
        return False
    return True
 def test_roundtrip(self):
     """Make sure the roundtrip df -> dbf -> df keeps the data intact."""
     df = pd.DataFrame({
         'a': ['foo', 'bar', 'baz'],
         'b': np.random.randn(3)
     })
     dbf_path = tempfile.mktemp(suffix='.dbf')
     dbf.dataframe_to_dbf(df, dbf_path)
     assert_frame_equal(df, dbf.dbf_to_dataframe(dbf_path))
Exemple #7
0
def get_array_architecture_variables(building, building_name, locator):
    '''
    this function collects envelope thermal/physical chatacteristics
    :param building: the intended building dataset
    :param building_name: the intended building name from the list of buildings
    :param locator: points to the variables
    :return: array of architectural features * HOURS_IN_YEAR(array_arch)
    '''
    #   pointing to the building dataframe
    data_architecture = dbf_to_dataframe(locator.get_building_architecture())
    data_architecture.set_index('Name', inplace=True)
    #   Window to wall ratio (as an average of all walls)
    array_wwr = np.empty(HOURS_IN_YEAR)
    average_wwr = np.mean([
        data_architecture.ix[building_name, 'wwr_south'],
        data_architecture.ix[building_name, 'wwr_north'],
        data_architecture.ix[building_name,
                             'wwr_west'], data_architecture.ix[building_name,
                                                               'wwr_east']
    ])
    array_wwr.fill(average_wwr)
    #   thermal mass
    array_cm = np.empty(HOURS_IN_YEAR)
    array_cm.fill(building.architecture.Cm_Af)
    #   air leakage (infiltration)
    array_n50 = np.empty(HOURS_IN_YEAR)
    array_n50.fill(building.architecture.n50)
    #   roof properties
    array_Uroof = np.empty(HOURS_IN_YEAR)
    array_Uroof.fill(building.architecture.U_roof)
    array_aroof = np.empty(HOURS_IN_YEAR)
    array_aroof.fill(building.architecture.a_roof)
    #   walls properties
    array_Uwall = np.empty(HOURS_IN_YEAR)
    array_Uwall.fill(building.architecture.U_wall)
    array_awall = np.empty(HOURS_IN_YEAR)
    array_awall.fill(building.architecture.a_wall)
    #   basement properties
    array_Ubase = np.empty(HOURS_IN_YEAR)
    array_Ubase.fill(building.architecture.U_base)
    #   glazing properties
    array_Uwin = np.empty(HOURS_IN_YEAR)
    array_Uwin.fill(building.architecture.U_win)
    array_Gwin = np.empty(HOURS_IN_YEAR)
    array_Gwin.fill(building.architecture.G_win)
    #   shading properties
    array_rfsh = np.empty(HOURS_IN_YEAR)
    array_rfsh.fill(building.architecture.rf_sh)
    #   concatenate architectural arrays
    array_arch = np.column_stack(
        (array_wwr, array_cm, array_n50, array_Uroof, array_aroof, array_Uwall,
         array_awall, array_Ubase, array_Uwin, array_Gwin, array_rfsh))

    return array_arch
Exemple #8
0
def extract_cea_inputs_files(locator):
    """
    extract information from the zones in the case study
    :param locator:
    :return:
    """
    # Get dataframes
    zone_occupancy_df = dbf_to_dataframe(locator.get_building_occupancy())
    zone_df = Gdf.from_file(locator.get_zone_geometry())
    architecture_df = dbf_to_dataframe(locator.get_building_architecture())
    technical_systems_df = dbf_to_dataframe(locator.get_building_air_conditioning())
    supply_systems_df = dbf_to_dataframe(locator.get_building_supply())

    # Set index
    zone_occupancy_df.set_index('Name', inplace=True)
    zone_df.set_index('Name', inplace=True)
    architecture_df.set_index('Name', inplace=True)
    technical_systems_df.set_index('Name', inplace=True)
    supply_systems_df.set_index('Name', inplace=True)

    return zone_occupancy_df, zone_df, architecture_df, technical_systems_df, supply_systems_df
def disconnected_buildings_heating_main(locator, total_demand, building_names,
                                        config, prices, lca):
    """
    Computes the parameters for the operation of disconnected buildings
    output results in csv files.
    There is no optimization at this point. The different technologies are calculated and compared 1 to 1 to
    each technology. it is a classical combinatorial problem.
    :param locator: locator class
    :param building_names: list with names of buildings
    :type locator: class
    :type building_names: list
    :return: results of operation of buildings located in locator.get_optimization_decentralized_folder
    :rtype: Nonetype
    """
    t0 = time.perf_counter()
    prop_geometry = Gdf.from_file(locator.get_zone_geometry())
    geometry = pd.DataFrame({
        'Name': prop_geometry.Name,
        'Area': prop_geometry.area
    })
    geothermal_potential_data = dbf.dbf_to_dataframe(
        locator.get_building_supply())
    geothermal_potential_data = pd.merge(geothermal_potential_data,
                                         geometry,
                                         on='Name')
    geothermal_potential_data['Area_geo'] = geothermal_potential_data['Area']
    weather_path = locator.get_weather_file()
    weather_data = epwreader.epw_reader(weather_path)[[
        'year', 'drybulb_C', 'wetbulb_C', 'relhum_percent', 'windspd_ms',
        'skytemp_C'
    ]]

    T_ground_K = calc_ground_temperature(locator,
                                         weather_data['drybulb_C'],
                                         depth_m=10)
    supply_systems = SupplySystemsDatabase(locator)

    # This will calculate the substation state if all buildings where connected(this is how we study this)
    substation.substation_main_heating(locator, total_demand, building_names)

    n = len(building_names)
    cea.utilities.parallel.vectorize(disconnected_heating_for_building,
                                     config.get_number_of_processes())(
                                         building_names,
                                         repeat(supply_systems, n),
                                         repeat(T_ground_K, n),
                                         repeat(geothermal_potential_data, n),
                                         repeat(lca, n), repeat(locator, n),
                                         repeat(prices, n))

    print(time.perf_counter() - t0,
          "seconds process time for the Disconnected Building Routine \n")
Exemple #10
0
    def read(self, *args, **kwargs):
        """
        Open the file indicated by the locator method and return it as a DataFrame.
        args and kwargs are passed to the original (undecorated) locator method to figure out the location of the
        file.

        :param args:
        :param kwargs:
        :rtype: pd.DataFrame
        """
        from cea.utilities.dbf import dbf_to_dataframe
        df = dbf_to_dataframe(self(*args, **kwargs))
        self.validate(df)
        return df
def get_building_connectivity(locator):
    supply_systems = dbf_to_dataframe(locator.get_building_supply())
    data_all_in_one_systems = pd.read_excel(locator.get_database_supply_systems(), sheet_name='ALL_IN_ONE_SYSTEMS')
    heating_infrastructure = data_all_in_one_systems[data_all_in_one_systems['system'].isin(['HEATING', 'NONE'])]
    heating_infrastructure = heating_infrastructure.set_index('code')['scale']

    cooling_infrastructure = data_all_in_one_systems[data_all_in_one_systems['system'].isin(['COOLING', 'NONE'])]
    cooling_infrastructure = cooling_infrastructure.set_index('code')['scale']

    building_connectivity = supply_systems[['Name']].copy()
    building_connectivity['DH_connectivity'] = (
            supply_systems['type_hs'].map(heating_infrastructure) == 'DISTRICT').astype(int)
    building_connectivity['DC_connectivity'] = (
            supply_systems['type_cs'].map(cooling_infrastructure) == 'DISTRICT').astype(int)
    return building_connectivity
Exemple #12
0
 def preprocessing_occupancy_type_comparison(self):
     data_processed = pd.DataFrame()
     for scenario in self.scenarios:
         locator = cea.inputlocator.InputLocator(scenario)
         scenario_name = os.path.basename(scenario)
         district_occupancy_df = dbf_to_dataframe(
             locator.get_building_occupancy())
         district_occupancy_df.set_index('Name', inplace=True)
         district_gfa_df = pd.read_csv(
             locator.get_total_demand())[['GFA_m2'] + ["Name"]]
         district_gfa_df.set_index('Name', inplace=True)
         data_raw = pd.DataFrame(district_occupancy_df.values *
                                 district_gfa_df.values,
                                 columns=district_occupancy_df.columns,
                                 index=district_occupancy_df.index)
         # sum per function
         data_raw = data_raw.sum(axis=0)
         data_raw_df = pd.DataFrame({
             scenario_name: data_raw
         },
                                    index=data_raw.index).T
         data_processed = data_processed.append(data_raw_df)
     return data_processed
Exemple #13
0
def create_new_project(locator, config):
    # Local variables
    zone_geometry_path = config.create_new_project.zone
    surroundings_geometry_path = config.create_new_project.surroundings
    street_geometry_path = config.create_new_project.streets
    terrain_path = config.create_new_project.terrain
    typology_path = config.create_new_project.typology

    # import file
    zone, lat, lon = shapefile_to_WSG_and_UTM(zone_geometry_path)
    # verify if input file is correct for CEA, if not an exception will be released
    verify_input_geometry_zone(zone)
    zone.to_file(locator.get_zone_geometry())

    # apply coordinate system of terrain into zone and save zone to disk.
    terrain = raster_to_WSG_and_UTM(terrain_path, lat, lon)
    driver = gdal.GetDriverByName('GTiff')
    verify_input_terrain(terrain)
    driver.CreateCopy(locator.get_terrain(), terrain)

    # now create the surroundings file if it does not exist
    if surroundings_geometry_path == '':
        print(
            "there is no surroundings file, we proceed to create it based on the geometry of your zone"
        )
        zone.to_file(locator.get_surroundings_geometry())
    else:
        # import file
        surroundings, _, _ = shapefile_to_WSG_and_UTM(
            surroundings_geometry_path)
        # verify if input file is correct for CEA, if not an exception will be released
        verify_input_geometry_surroundings(zone)
        # create new file
        surroundings.to_file(locator.get_surroundings_geometry())

    # now transfer the streets
    if street_geometry_path == '':
        print(
            "there is no street file, optimizaiton of cooling networks wont be possible"
        )
    else:
        street, _, _ = shapefile_to_WSG_and_UTM(street_geometry_path)
        street.to_file(locator.get_street_network())

    ## create occupancy file and year file
    if typology_path == '':
        print(
            "there is no typology file, we proceed to create it based on the geometry of your zone"
        )
        zone = Gdf.from_file(zone_geometry_path).drop('geometry', axis=1)
        zone['STANDARD'] = 'T6'
        zone['YEAR'] = 2020
        zone['1ST_USE'] = 'MULTI_RES'
        zone['1ST_USE_R'] = 1.0
        zone['2ND_USE'] = "NONE"
        zone['2ND_USE_R'] = 0.0
        zone['3RD_USE'] = "NONE"
        zone['3RD_USE_R'] = 0.0
        dataframe_to_dbf(zone[COLUMNS_ZONE_TYPOLOGY],
                         locator.get_building_typology())
    else:
        # import file
        occupancy_file = dbf_to_dataframe(typology_path)
        occupancy_file_test = occupancy_file[COLUMNS_ZONE_TYPOLOGY]
        # verify if input file is correct for CEA, if not an exception will be released
        verify_input_typology(occupancy_file_test)
        # create new file
        copyfile(typology_path, locator.get_building_typology())

    # add other folders by calling the locator
    locator.get_measurements()
    locator.get_input_network_folder("DH", "")
    locator.get_input_network_folder("DC", "")
    locator.get_weather_folder()
def calc_data(data_frame, locator):
    """
    split up operative temperature and humidity points into 4 categories for plotting
    (1) occupied in heating season
    (2) un-occupied in heating season
    (3) occupied in cooling season
    (4) un-occupied in cooling season

    :param data_frame: results from demand calculation
    :type data_frame: pandas.DataFrame
    :param config: cea config
    :type config: cea.config.Configuration
    :param locator: cea input locator
    :type locator: cea.inputlocator.InputLocator
    :return: dict of lists with operative temperatures and moistures
     \for 4 conditions (summer (un)occupied, winter (un)occupied)
    :rtype: dict
    """
    from cea.demand.building_properties import verify_has_season

    # read region-specific control parameters (identical for all buildings), i.e. heating and cooling season
    building_name = data_frame.Name[0]
    air_con_data = dbf_to_dataframe(locator.get_building_air_conditioning()).set_index('Name')
    has_winter = verify_has_season(building_name,
                                   air_con_data.loc[building_name, 'heat_starts'],
                                   air_con_data.loc[building_name, 'heat_ends'])
    has_summer = verify_has_season(building_name,
                                   air_con_data.loc[building_name, 'cool_starts'],
                                   air_con_data.loc[building_name, 'cool_ends'])

    winter_start = air_con_data.loc[building_name, 'heat_starts']
    winter_end = air_con_data.loc[building_name, 'heat_ends']
    summer_start = air_con_data.loc[building_name, 'cool_starts']
    summer_end =  air_con_data.loc[building_name, 'cool_ends']

    # split up operative temperature and humidity points into 4 categories
    # (1) occupied in heating season
    # (2) un-occupied in heating season
    # (3) occupied in cooling season
    # (4) un-occupied in cooling season

    t_op_occupied_summer = []
    x_int_occupied_summer = []
    t_op_unoccupied_summer = []
    x_int_unoccupied_summer = []
    t_op_occupied_winter = []
    x_int_occupied_winter = []
    t_op_unoccupied_winter = []
    x_int_unoccupied_winter = []

    # convert index from string to datetime (because someone changed the type)
    data_frame.index = pd.to_datetime(data_frame.index)

    # find indexes of the 4 categories
    for index, row in data_frame.iterrows():

        # occupied in winter
        if row['people'] > 0 and has_winter and datetime_in_season(index, winter_start, winter_end):
            t_op_occupied_winter.append(row['theta_o_C'])
            x_int_occupied_winter.append(row['x_int'])
        # unoccupied in winter
        elif row['people'] == 0 and has_winter and datetime_in_season(index, winter_start, winter_end):
            t_op_unoccupied_winter.append(row['theta_o_C'])
            x_int_unoccupied_winter.append(row['x_int'])
        # occupied in summer
        elif row['people'] > 0 and has_summer and datetime_in_season(index, summer_start, summer_end):
            t_op_occupied_summer.append(row['theta_o_C'])
            x_int_occupied_summer.append(row['x_int'])
        # unoccupied in summer
        elif row['people'] == 0 and has_summer and datetime_in_season(index, summer_start, summer_end):
            t_op_unoccupied_summer.append(row['theta_o_C'])
            x_int_unoccupied_summer.append(row['x_int'])

    return {'t_op_occupied_winter': t_op_occupied_winter, 'x_int_occupied_winter': x_int_occupied_winter,
            't_op_unoccupied_winter': t_op_unoccupied_winter, 'x_int_unoccupied_winter': x_int_unoccupied_winter,
            't_op_occupied_summer': t_op_occupied_summer, 'x_int_occupied_summer': x_int_occupied_summer,
            't_op_unoccupied_summer': t_op_unoccupied_summer, 'x_int_unoccupied_summer': x_int_unoccupied_summer}
def disconnected_buildings_heating_main(locator, building_names, config,
                                        prices, lca):
    """
    Computes the parameters for the operation of disconnected buildings
    output results in csv files.
    There is no optimization at this point. The different technologies are calculated and compared 1 to 1 to
    each technology. it is a classical combinatorial problem.
    :param locator: locator class
    :param building_names: list with names of buildings
    :type locator: class
    :type building_names: list
    :return: results of operation of buildings located in locator.get_optimization_disconnected_folder
    :rtype: Nonetype
    """
    t0 = time.clock()
    prop_geometry = Gdf.from_file(locator.get_zone_geometry())
    restrictions = Gdf.from_file(locator.get_building_restrictions())

    geometry = pd.DataFrame({
        'Name': prop_geometry.Name,
        'Area': prop_geometry.area
    })
    geothermal_potential_data = dbf.dbf_to_dataframe(
        locator.get_building_supply())
    geothermal_potential_data = pd.merge(geothermal_potential_data,
                                         geometry,
                                         on='Name').merge(restrictions,
                                                          on='Name')
    geothermal_potential_data['Area_geo'] = (
        1 - geothermal_potential_data['GEOTHERMAL']
    ) * geothermal_potential_data['Area']
    weather_data = epwreader.epw_reader(config.weather)[[
        'year', 'drybulb_C', 'wetbulb_C', 'relhum_percent', 'windspd_ms',
        'skytemp_C'
    ]]
    ground_temp = calc_ground_temperature(locator,
                                          weather_data['drybulb_C'],
                                          depth_m=10)

    BestData = {}
    total_demand = pd.read_csv(locator.get_total_demand())

    def calc_new_load(mdot, TsupDH, Tret):
        """
        This function calculates the load distribution side of the district heating distribution.
        :param mdot: mass flow
        :param TsupDH: supply temeperature
        :param Tret: return temperature
        :param gv: global variables class
        :type mdot: float
        :type TsupDH: float
        :type Tret: float
        :type gv: class
        :return: Qload: load of the distribution
        :rtype: float
        """
        Qload = mdot * HEAT_CAPACITY_OF_WATER_JPERKGK * (TsupDH - Tret) * (
            1 + Q_LOSS_DISCONNECTED)
        if Qload < 0:
            Qload = 0

        return Qload

    for building_name in building_names:
        print building_name
        substation.substation_main(locator,
                                   total_demand,
                                   building_names=[building_name],
                                   heating_configuration=7,
                                   cooling_configuration=7,
                                   Flag=False)
        loads = pd.read_csv(
            locator.get_optimization_substations_results_file(building_name),
            usecols=[
                "T_supply_DH_result_K", "T_return_DH_result_K",
                "mdot_DH_result_kgpers"
            ])
        Qload = np.vectorize(calc_new_load)(loads["mdot_DH_result_kgpers"],
                                            loads["T_supply_DH_result_K"],
                                            loads["T_return_DH_result_K"])
        Qannual = Qload.sum()
        Qnom = Qload.max() * (1 + SIZING_MARGIN
                              )  # 1% reliability margin on installed capacity

        # Create empty matrices
        result = np.zeros((13, 7))
        result[0][0] = 1
        result[1][1] = 1
        result[2][2] = 1
        InvCosts = np.zeros((13, 1))
        resourcesRes = np.zeros((13, 4))
        QannualB_GHP = np.zeros(
            (10, 1))  # For the investment costs of the boiler used with GHP
        Wel_GHP = np.zeros((10, 1))  # For the investment costs of the GHP

        # Supply with the Boiler / FC / GHP
        Tret = loads["T_return_DH_result_K"].values
        TsupDH = loads["T_supply_DH_result_K"].values
        mdot = loads["mdot_DH_result_kgpers"].values

        for hour in range(8760):

            if Tret[hour] == 0:
                Tret[hour] = TsupDH[hour]

            # Boiler NG
            BoilerEff = Boiler.calc_Cop_boiler(Qload[hour], Qnom, Tret[hour])

            Qgas = Qload[hour] / BoilerEff

            result[0][4] += prices.NG_PRICE * Qgas  # CHF
            result[0][
                5] += lca.NG_BACKUPBOILER_TO_CO2_STD * Qgas * 3600E-6  # kgCO2
            result[0][
                6] += lca.NG_BACKUPBOILER_TO_OIL_STD * Qgas * 3600E-6  # MJ-oil-eq
            resourcesRes[0][0] += Qload[hour]

            if DISC_BIOGAS_FLAG == 1:
                result[0][4] += prices.BG_PRICE * Qgas  # CHF
                result[0][
                    5] += lca.BG_BACKUPBOILER_TO_CO2_STD * Qgas * 3600E-6  # kgCO2
                result[0][
                    6] += lca.BG_BACKUPBOILER_TO_OIL_STD * Qgas * 3600E-6  # MJ-oil-eq

            # Boiler BG
            result[1][4] += prices.BG_PRICE * Qgas  # CHF
            result[1][
                5] += lca.BG_BACKUPBOILER_TO_CO2_STD * Qgas * 3600E-6  # kgCO2
            result[1][
                6] += lca.BG_BACKUPBOILER_TO_OIL_STD * Qgas * 3600E-6  # MJ-oil-eq
            resourcesRes[1][1] += Qload[hour]

            # FC
            (FC_Effel, FC_Effth) = FC.calc_eta_FC(Qload[hour], Qnom, 1, "B")
            Qgas = Qload[hour] / (FC_Effth + FC_Effel)
            Qelec = Qgas * FC_Effel

            result[2][
                4] += prices.NG_PRICE * Qgas - lca.ELEC_PRICE * Qelec  # CHF, extra electricity sold to grid
            result[2][
                5] += 0.0874 * Qgas * 3600E-6 + 773 * 0.45 * Qelec * 1E-6 - lca.EL_TO_CO2 * Qelec * 3600E-6  # kgCO2
            # Bloom box emissions within the FC: 773 lbs / MWh_el (and 1 lbs = 0.45 kg)
            # http://www.carbonlighthouse.com/2011/09/16/bloom-box/
            result[2][
                6] += 1.51 * Qgas * 3600E-6 - lca.EL_TO_OIL_EQ * Qelec * 3600E-6  # MJ-oil-eq

            resourcesRes[2][0] += Qload[hour]
            resourcesRes[2][2] += Qelec

            # GHP
            for i in range(10):

                QnomBoiler = i / 10 * Qnom
                QnomGHP = Qnom - QnomBoiler

                if Qload[hour] <= QnomGHP:
                    (wdot_el, qcolddot, qhotdot_missing,
                     tsup2) = HP.calc_Cop_GHP(ground_temp[hour], mdot[hour],
                                              TsupDH[hour], Tret[hour])

                    if Wel_GHP[i][0] < wdot_el:
                        Wel_GHP[i][0] = wdot_el

                    result[3 + i][4] += lca.ELEC_PRICE * wdot_el  # CHF
                    result[3 + i][
                        5] += lca.SMALL_GHP_TO_CO2_STD * wdot_el * 3600E-6  # kgCO2
                    result[3 + i][
                        6] += lca.SMALL_GHP_TO_OIL_STD * wdot_el * 3600E-6  # MJ-oil-eq

                    resourcesRes[3 + i][2] -= wdot_el
                    resourcesRes[3 + i][3] += Qload[hour] - qhotdot_missing

                    if qhotdot_missing > 0:
                        print "GHP unable to cover the whole demand, boiler activated!"
                        BoilerEff = Boiler.calc_Cop_boiler(
                            qhotdot_missing, QnomBoiler, tsup2)
                        Qgas = qhotdot_missing / BoilerEff

                        result[3 + i][4] += prices.NG_PRICE * Qgas  # CHF
                        result[3 + i][
                            5] += lca.NG_BACKUPBOILER_TO_CO2_STD * Qgas * 3600E-6  # kgCO2
                        result[3 + i][
                            6] += lca.NG_BACKUPBOILER_TO_OIL_STD * Qgas * 3600E-6  # MJ-oil-eq

                        QannualB_GHP[i][0] += qhotdot_missing
                        resourcesRes[3 + i][0] += qhotdot_missing

                else:
                    # print "Boiler activated to compensate GHP", i
                    # if gv.DiscGHPFlag == 0:
                    #    print QnomGHP
                    #   QnomGHP = 0
                    #   print "GHP not allowed 2, set QnomGHP to zero"

                    TexitGHP = QnomGHP / (
                        mdot[hour] *
                        HEAT_CAPACITY_OF_WATER_JPERKGK) + Tret[hour]
                    (wdot_el, qcolddot, qhotdot_missing,
                     tsup2) = HP.calc_Cop_GHP(ground_temp[hour], mdot[hour],
                                              TexitGHP, Tret[hour])

                    if Wel_GHP[i][0] < wdot_el:
                        Wel_GHP[i][0] = wdot_el

                    result[3 + i][4] += lca.ELEC_PRICE * wdot_el  # CHF
                    result[3 + i][
                        5] += lca.SMALL_GHP_TO_CO2_STD * wdot_el * 3600E-6  # kgCO2
                    result[3 + i][
                        6] += lca.SMALL_GHP_TO_OIL_STD * wdot_el * 3600E-6  # MJ-oil-eq

                    resourcesRes[3 + i][2] -= wdot_el
                    resourcesRes[3 + i][3] += QnomGHP - qhotdot_missing

                    if qhotdot_missing > 0:
                        print "GHP unable to cover the whole demand, boiler activated!"
                        BoilerEff = Boiler.calc_Cop_boiler(
                            qhotdot_missing, QnomBoiler, tsup2)
                        Qgas = qhotdot_missing / BoilerEff

                        result[3 + i][4] += prices.NG_PRICE * Qgas  # CHF
                        result[3 + i][
                            5] += lca.NG_BACKUPBOILER_TO_CO2_STD * Qgas * 3600E-6  # kgCO2
                        result[3 + i][
                            6] += lca.NG_BACKUPBOILER_TO_OIL_STD * Qgas * 3600E-6  # MJ-oil-eq

                        QannualB_GHP[i][0] += qhotdot_missing
                        resourcesRes[3 + i][0] += qhotdot_missing

                    QtoBoiler = Qload[hour] - QnomGHP
                    QannualB_GHP[i][0] += QtoBoiler

                    BoilerEff = Boiler.calc_Cop_boiler(QtoBoiler, QnomBoiler,
                                                       TexitGHP)
                    Qgas = QtoBoiler / BoilerEff

                    result[3 + i][4] += prices.NG_PRICE * Qgas  # CHF
                    result[3 + i][
                        5] += lca.NG_BACKUPBOILER_TO_CO2_STD * Qgas * 3600E-6  # kgCO2
                    result[3 + i][
                        6] += lca.NG_BACKUPBOILER_TO_OIL_STD * Qgas * 3600E-6  # MJ-oil-eq
                    resourcesRes[3 + i][0] += QtoBoiler

        # Investment Costs / CO2 / Prim
        Capex_a_Boiler, Opex_Boiler = Boiler.calc_Cinv_boiler(
            Qnom, locator, config, 'BO1')
        InvCosts[0][0] = Capex_a_Boiler + Opex_Boiler
        InvCosts[1][0] = Capex_a_Boiler + Opex_Boiler

        Capex_a_FC, Opex_FC = FC.calc_Cinv_FC(Qnom, locator, config)
        InvCosts[2][0] = Capex_a_FC + Opex_FC

        for i in range(10):
            result[3 + i][0] = i / 10
            result[3 + i][3] = 1 - i / 10

            QnomBoiler = i / 10 * Qnom

            Capex_a_Boiler, Opex_Boiler = Boiler.calc_Cinv_boiler(
                QnomBoiler, locator, config, 'BO1')

            InvCosts[3 + i][0] = Capex_a_Boiler + Opex_Boiler

            Capex_a_GHP, Opex_GHP = HP.calc_Cinv_GHP(Wel_GHP[i][0], locator,
                                                     config)
            InvCaGHP = Capex_a_GHP + Opex_GHP
            InvCosts[3 + i][0] += InvCaGHP * prices.EURO_TO_CHF

        # Best configuration
        Best = np.zeros((13, 1))
        indexBest = 0

        TotalCosts = np.zeros((13, 2))
        TotalCO2 = np.zeros((13, 2))
        TotalPrim = np.zeros((13, 2))

        for i in range(13):
            TotalCosts[i][0] = TotalCO2[i][0] = TotalPrim[i][0] = i

            TotalCosts[i][1] = InvCosts[i][0] + result[i][4]
            TotalCO2[i][1] = result[i][5]
            TotalPrim[i][1] = result[i][6]

        CostsS = TotalCosts[np.argsort(TotalCosts[:, 1])]
        CO2S = TotalCO2[np.argsort(TotalCO2[:, 1])]
        PrimS = TotalPrim[np.argsort(TotalPrim[:, 1])]

        el = len(CostsS)
        rank = 0
        Bestfound = False

        optsearch = np.empty(el)
        optsearch.fill(3)
        indexBest = 0
        geothermal_potential = geothermal_potential_data.set_index('Name')

        # Check the GHP area constraint
        for i in range(10):
            QGHP = (1 - i / 10) * Qnom
            areaAvail = geothermal_potential.ix[building_name, 'Area_geo']
            Qallowed = np.ceil(areaAvail / GHP_A) * GHP_HMAX_SIZE  # [W_th]
            if Qallowed < QGHP:
                optsearch[i + 3] += 1
                Best[i + 3][0] = -1

        while not Bestfound and rank < el:

            optsearch[int(CostsS[rank][0])] -= 1
            optsearch[int(CO2S[rank][0])] -= 1
            optsearch[int(PrimS[rank][0])] -= 1

            if np.count_nonzero(optsearch) != el:
                Bestfound = True
                indexBest = np.where(optsearch == 0)[0][0]

            rank += 1

        # get the best option according to the ranking.
        Best[indexBest][0] = 1
        Qnom_array = np.ones(len(Best[:, 0])) * Qnom

        # Save results in csv file
        dico = {}
        dico["BoilerNG Share"] = result[:, 0]
        dico["BoilerBG Share"] = result[:, 1]
        dico["FC Share"] = result[:, 2]
        dico["GHP Share"] = result[:, 3]
        dico["Operation Costs [CHF]"] = result[:, 4]
        dico["CO2 Emissions [kgCO2-eq]"] = result[:, 5]
        dico["Primary Energy Needs [MJoil-eq]"] = result[:, 6]
        dico["Annualized Investment Costs [CHF]"] = InvCosts[:, 0]
        dico["Total Costs [CHF]"] = TotalCosts[:, 1]
        dico["Best configuration"] = Best[:, 0]
        dico["Nominal Power"] = Qnom_array
        dico["QfromNG"] = resourcesRes[:, 0]
        dico["QfromBG"] = resourcesRes[:, 1]
        dico["EforGHP"] = resourcesRes[:, 2]
        dico["QfromGHP"] = resourcesRes[:, 3]

        results_to_csv = pd.DataFrame(dico)

        fName_result = locator.get_optimization_disconnected_folder_building_result_heating(
            building_name)
        results_to_csv.to_csv(fName_result, sep=',')

        BestComb = {}
        BestComb["BoilerNG Share"] = result[indexBest, 0]
        BestComb["BoilerBG Share"] = result[indexBest, 1]
        BestComb["FC Share"] = result[indexBest, 2]
        BestComb["GHP Share"] = result[indexBest, 3]
        BestComb["Operation Costs [CHF]"] = result[indexBest, 4]
        BestComb["CO2 Emissions [kgCO2-eq]"] = result[indexBest, 5]
        BestComb["Primary Energy Needs [MJoil-eq]"] = result[indexBest, 6]
        BestComb["Annualized Investment Costs [CHF]"] = InvCosts[indexBest, 0]
        BestComb["Total Costs [CHF]"] = TotalCosts[indexBest, 1]
        BestComb["Best configuration"] = Best[indexBest, 0]
        BestComb["Nominal Power"] = Qnom

        BestData[building_name] = BestComb

    if 0:
        fName = locator.get_optimization_disconnected_folder_disc_op_summary_heating(
        )
        results_to_csv = pd.DataFrame(BestData)
        results_to_csv.to_csv(fName, sep=',')

    print time.clock(
    ) - t0, "seconds process time for the Disconnected Building Routine \n"
Exemple #16
0
def create_new_project(locator, config):
    # Local variables
    zone_geometry_path = config.create_new_project.zone
    district_geometry_path = config.create_new_project.district
    street_geometry_path = config.create_new_project.streets
    terrain_path = config.create_new_project.terrain
    occupancy_path = config.create_new_project.occupancy
    age_path  = config.create_new_project.age

    # verify files (if they have the columns cea needs) and then save to new project location
    zone, lat, lon = shapefile_to_WSG_and_UTM(zone_geometry_path)
    try:
        zone_test = zone[COLUMNS_ZONE_GEOMETRY]
    except ValueError:
        print("one or more columns in the input file is not compatible with cea, please ensure the column" +
              " names comply with:", COLUMNS_ZONE_GEOMETRY)
    else:
        # apply coordinate system of terrain into zone and save zone to disk.
        terrain = raster_to_WSG_and_UTM(terrain_path, lat, lon)
        zone.to_file(locator.get_zone_geometry())
        driver = gdal.GetDriverByName('GTiff')
        driver.CreateCopy(locator.get_terrain(), terrain)

    # now create the district file if it does not exist
    if district_geometry_path == '':
        print("there is no district file, we proceed to create it based on the geometry of your zone")
        zone.to_file(locator.get_district_geometry())
    else:
        district, _, _ = shapefile_to_WSG_and_UTM(district_geometry_path)
        try:
            district_test = district[COLUMNS_DISTRICT_GEOMETRY]
        except ValueError:
            print("one or more columns in the input file is not compatible with cea, please ensure the column" +
                  " names comply with:", COLUMNS_DISTRICT_GEOMETRY)
        else:
            district.to_file(locator.get_district_geometry())

    # now transfer the streets
    if street_geometry_path == '':
        print("there is no street file, optimizaiton of cooling networks wont be possible")
    else:
        street, _, _ = shapefile_to_WSG_and_UTM(street_geometry_path)
        street.to_file(locator.get_street_network())

    ## create occupancy file and year file
    if occupancy_path == '':
        print("there is no occupancy file, we proceed to create it based on the geometry of your zone")
        zone = Gdf.from_file(zone_geometry_path).drop('geometry', axis=1)
        for field in COLUMNS_ZONE_OCCUPANCY:
            zone[field] = 0.0
        zone[COLUMNS_ZONE_OCCUPANCY[:2]] = 0.5  # adding 0.5 area use to the first two uses
        dataframe_to_dbf(zone[['Name'] + COLUMNS_ZONE_OCCUPANCY], locator.get_building_occupancy())
    else:
        try:
            occupancy_file = dbf_to_dataframe(occupancy_path)
            occupancy_file_test = occupancy_file[['Name']+COLUMNS_ZONE_OCCUPANCY]
            copyfile(occupancy_path, locator.get_building_occupancy())
        except ValueError:
            print("one or more columns in the input file is not compatible with cea, please ensure the column" +
                  " names comply with:", COLUMNS_ZONE_OCCUPANCY)

    ## create age file
    if age_path == '':
        print("there is no file with the age of the buildings, we proceed to create it based on the geometry of your zone")
        zone = Gdf.from_file(zone_geometry_path).drop('geometry', axis=1)
        for field in COLUMNS_ZONE_AGE:
            zone[field] = 0.0
        zone['built'] = 2017  # adding year of construction
        dataframe_to_dbf(zone[['Name'] + COLUMNS_ZONE_AGE], locator.get_building_age())
    else:
        try:
            age_file = dbf_to_dataframe(age_path)
            age_file_test = age_file[['Name']+COLUMNS_ZONE_AGE]
            copyfile(age_path, locator.get_building_age())
        except ValueError:
            print("one or more columns in the input file is not compatible with cea, please ensure the column" +
                  " names comply with:", COLUMNS_ZONE_AGE)


    # add other folders by calling the locator
    locator.get_measurements()
    locator.get_input_network_folder("DH","")
    locator.get_input_network_folder("DC","")
    locator.get_weather_folder()
def archetypes_mapper(locator, update_architecture_dbf,
                      update_air_conditioning_systems_dbf,
                      update_indoor_comfort_dbf, update_internal_loads_dbf,
                      update_supply_systems_dbf, update_schedule_operation_cea,
                      buildings):
    """
    algorithm to query building properties from statistical database
    Archetypes_HVAC_properties.csv. for more info check the integrated demand
    model of Fonseca et al. 2015. Appl. energy.

    :param InputLocator locator: an InputLocator instance set to the scenario to work on
    :param boolean update_architecture_dbf: if True, update the construction and architecture properties.
    :param boolean update_indoor_comfort_dbf: if True, get properties about thermal comfort.
    :param boolean update_air_conditioning_systems_dbf: if True, get properties about types of HVAC systems, otherwise False.
    :param boolean update_internal_loads_dbf: if True, get properties about internal loads, otherwise False.

    The following files are created by this script, depending on which flags were set:

    - building_HVAC: .dbf
        describes the queried properties of HVAC systems.

    - architecture.dbf
        describes the queried properties of architectural features

    - building_thermal: .shp
        describes the queried thermal properties of buildings

    - indoor_comfort.shp
        describes the queried thermal properties of buildings
    """
    # get occupancy and age files
    building_typology_df = dbf_to_dataframe(locator.get_building_typology())

    # validate list of uses in case study
    list_uses = get_list_of_uses_in_case_study(building_typology_df)

    # get occupant densities from archetypes schedules
    occupant_densities = {}
    occ_densities = pd.read_excel(locator.get_database_use_types_properties(),
                                  'INTERNAL_LOADS').set_index('code')
    for use in list_uses:
        if occ_densities.loc[use, 'Occ_m2pax'] > 0.0:
            occupant_densities[use] = 1 / occ_densities.loc[use, 'Occ_m2pax']
        else:
            occupant_densities[use] = 0.0

    # get properties about the construction and architecture
    if update_architecture_dbf:
        architecture_mapper(locator, building_typology_df)

    # get properties about types of HVAC systems
    if update_air_conditioning_systems_dbf:
        aircon_mapper(locator, building_typology_df)

    if update_indoor_comfort_dbf:
        indoor_comfort_mapper(list_uses, locator, occupant_densities,
                              building_typology_df)

    if update_internal_loads_dbf:
        internal_loads_mapper(list_uses, locator, occupant_densities,
                              building_typology_df)

    if update_schedule_operation_cea:
        calc_mixed_schedule(locator, building_typology_df, buildings)

    if update_supply_systems_dbf:
        supply_mapper(locator, building_typology_df)
Exemple #18
0
def migrate_3_22_to_3_22_1(scenario):
    '''
    Renames columns in `indoor_comfort.dbf` and `internal_loads.dbf` to remove the use of "pax" meaning "people".
    '''

    INDOOR_COMFORT_COLUMNS = {'Ve_lpspax': 'Ve_lsp'}
    INTERNAL_LOADS_COLUMNS = {
        'Occ_m2pax': 'Occ_m2p',
        'Qs_Wpax': 'Qs_Wp',
        'Vw_lpdpax': 'Vw_ldp',
        'Vww_lpdpax': 'Vww_ldp',
        'X_ghpax': 'X_ghp'
    }
    OCCUPANCY_COLUMNS = {'people_pax': 'people_p'}

    if indoor_comfort_is_3_22(scenario):
        # import building properties
        indoor_comfort = dbf_to_dataframe(
            os.path.join(scenario, 'inputs', 'building-properties',
                         'indoor_comfort.dbf'))
        # make a backup copy of original data for user's own reference
        os.rename(
            os.path.join(scenario, 'inputs', 'building-properties',
                         'indoor_comfort.dbf'),
            os.path.join(scenario, 'inputs', 'building-properties',
                         'indoor_comfort_original.dbf'))
        # rename columns containing "pax"
        indoor_comfort.rename(columns=INDOOR_COMFORT_COLUMNS, inplace=True)
        # export dataframes to dbf files
        print("- writing indoor_comfort.dbf")
        dataframe_to_dbf(
            indoor_comfort,
            os.path.join(scenario, 'inputs', 'building-properties',
                         'indoor_comfort.dbf'))

    if internal_loads_is_3_22(scenario):
        # import building properties
        internal_loads = dbf_to_dataframe(
            os.path.join(scenario, 'inputs', 'building-properties',
                         'internal_loads.dbf'))
        # make a backup copy of original data for user's own reference
        os.rename(
            os.path.join(scenario, 'inputs', 'building-properties',
                         'internal_loads.dbf'),
            os.path.join(scenario, 'inputs', 'building-properties',
                         'internal_loads_original.dbf'))
        # rename columns containing "pax"
        internal_loads.rename(columns=INTERNAL_LOADS_COLUMNS, inplace=True)
        # export dataframes to dbf files
        print("- writing internal_loads.dbf")
        dataframe_to_dbf(
            internal_loads,
            os.path.join(scenario, 'inputs', 'building-properties',
                         'internal_loads.dbf'))

    # import building properties
    use_type_properties = pd.read_excel(os.path.join(
        scenario, 'inputs', 'technology', 'archetypes', 'use_types',
        'USE_TYPE_PROPERTIES.xlsx'),
                                        sheet_name=None)
    if max([
            i in use_type_properties['INTERNAL_LOADS'].columns
            for i in INTERNAL_LOADS_COLUMNS.keys()
    ]) or max([
            i in use_type_properties['INDOOR_COMFORT'].columns
            for i in INDOOR_COMFORT_COLUMNS.keys()
    ]):
        os.rename(
            os.path.join(scenario, 'inputs', 'technology', 'archetypes',
                         'use_types', 'USE_TYPE_PROPERTIES.xlsx'),
            os.path.join(scenario, 'inputs', 'technology', 'archetypes',
                         'use_types', 'USE_TYPE_PROPERTIES_original.xlsx'))
        # rename columns containing "pax"
        use_type_properties['INDOOR_COMFORT'].rename(
            columns=INDOOR_COMFORT_COLUMNS, inplace=True)
        use_type_properties['INTERNAL_LOADS'].rename(
            columns=INTERNAL_LOADS_COLUMNS, inplace=True)
        # export dataframes to dbf files
        print("-writing USE_TYPE_PROPERTIES.xlsx")
        with pd.ExcelWriter(
                os.path.join(scenario, 'inputs', 'technology', 'archetypes',
                             'use_types',
                             'USE_TYPE_PROPERTIES.xlsx')) as writer1:
            for sheet_name in use_type_properties.keys():
                use_type_properties[sheet_name].to_excel(writer1,
                                                         sheet_name=sheet_name,
                                                         index=False)
    if output_occupancy_is_3_22(scenario):
        # if occupancy schedule files are found in the outputs, these are also renamed
        print("-writing schedules in ./outputs/data/occupancy")
        for file_name in os.listdir(
                os.path.join(scenario, 'outputs', 'data', 'occupancy')):
            schedule_df = pd.read_csv(
                os.path.join(scenario, 'outputs', 'data', 'occupancy',
                             file_name))
            if 'people_pax' in schedule_df.columns:
                os.rename(
                    os.path.join(scenario, 'outputs', 'data', 'occupancy',
                                 file_name),
                    os.path.join(
                        scenario, 'outputs', 'data', 'occupancy',
                        file_name.split('.')[0] + '_original.' +
                        file_name.split('.')[1]))
                schedule_df.rename(columns=OCCUPANCY_COLUMNS, inplace=True)
                # export dataframes to dbf files
                schedule_df.to_csv(
                    os.path.join(scenario, 'outputs', 'data', 'occupancy',
                                 file_name))

    print("- done")
def migrate_2_29_to_2_31(scenario):
    def lookup_standard(year, standards_df):
        matched_standards = standards_df[(standards_df.YEAR_START <= year)
                                         & (year <= standards_df.YEAR_END)]
        if len(matched_standards):
            # find first standard that is similar to the year
            standard = matched_standards.iloc[0]
        else:
            raise ValueError(
                'Could not find a `STANDARD` in the databases to match the year `{}`.'
                'You can try adding it to the `CONSTRUCTION_STANDARDS` input database and try again.'
                .format(year))
        return standard.STANDARD

    def convert_occupancy(name, occupancy_dbf):
        row = occupancy_dbf[occupancy_dbf.Name == name].iloc[0]
        uses = set(row.to_dict().keys()) - {"Name", "REFERENCE"}
        uses = sorted(uses,
                      cmp=lambda a, b: cmp(float(row[a]), float(row[b])),
                      reverse=True)
        result = {
            "1ST_USE": uses[0],
            "1ST_USE_R": float(row[uses[0]]),
            "2ND_USE": uses[1],
            "2ND_USE_R": float(row[uses[1]]),
            "3RD_USE": uses[2],
            "3RD_USE_R": float(row[uses[2]])
        }
        if pd.np.isclose(result["2ND_USE_R"], 0.0):
            result["1ST_USE_R"] = 1.0
            result["2ND_USE_R"] = 0.0
            result["3RD_USE_R"] = 0.0
            result["2ND_USE"] = "NONE"
            result["3RD_USE"] = "NONE"
        elif pd.np.isclose(result["3RD_USE_R"], 0.0):
            result["1ST_USE_R"] = 1.0 - result["2ND_USE_R"]
            result["3RD_USE_R"] = 0.0
            result["3RD_USE"] = "NONE"

        result["1ST_USE_R"] = 1.0 - result["2ND_USE_R"] - result["3RD_USE_R"]
        return result

    def merge_age_and_occupancy_to_typology(age_dbf, occupancy_dbf,
                                            standards_df):
        # merge age.dbf and occupancy.dbf to typology.dbf
        typology_dbf_columns = [
            "Name", "YEAR", "STANDARD", "1ST_USE", "1ST_USE_R", "2ND_USE",
            "2ND_USE_R", "3RD_USE", "3RD_USE_R"
        ]
        typology_dbf = pd.DataFrame(columns=typology_dbf_columns)

        for rindex, row in age_dbf.iterrows():
            typology_row = {
                "Name": row.Name,
                "YEAR": row.built,
                "STANDARD": lookup_standard(row.built, standards_df)
            }
            typology_row.update(convert_occupancy(row.Name, occupancy_dbf))

            typology_dbf = typology_dbf.append(typology_row, ignore_index=True)

        return typology_dbf

    age_dbf_path = os.path.join(scenario, "inputs", "building-properties",
                                "age.dbf")
    occupancy_dbf_path = os.path.join(scenario, "inputs",
                                      "building-properties", "occupancy.dbf")

    age_df = dbf_to_dataframe(age_dbf_path)
    occupancy_df = dbf_to_dataframe(occupancy_dbf_path)

    locator = cea.inputlocator.InputLocator(scenario=scenario)
    standards_df = pd.read_excel(locator.get_database_construction_standards(),
                                 "STANDARD_DEFINITION")
    typology_df = merge_age_and_occupancy_to_typology(age_df, occupancy_df,
                                                      standards_df)

    print("- writing typology.dbf")
    dataframe_to_dbf(typology_df, locator.get_building_typology())
    print("- removing occupancy.dbf and age.dbf")
    os.remove(age_dbf_path)
    os.remove(occupancy_dbf_path)
    print(
        "- removing invalid input-tables (NOTE: run archetypes-mapper again)")
    for fname in {
            "supply_systems.dbf", "internal_loads.dbf", "indoor_comfort.dbf",
            "air_conditioning.dbf", "architecture.dbf"
    }:
        fpath = os.path.join(scenario, "inputs", "building-properties", fname)
        if os.path.exists(fpath):
            print("  - removing {fname}".format(fname=fname))
            os.remove(fpath)
    print("- done")
    print(
        "- NOTE: You'll need to run the archetpyes-mapper tool after this migration!"
    )
Exemple #20
0
    def __init__(self, locator, building_names=None):
        """
        Read building properties from input shape files and construct a new BuildingProperties object.

        :param locator: an InputLocator for locating the input files
        :type locator: cea.inputlocator.InputLocator

        :param List[str] building_names: list of buildings to read properties

        :returns: BuildingProperties
        :rtype: BuildingProperties
        """

        if building_names is None:
            building_names = locator.get_zone_building_names()

        self.building_names = building_names
        print("read input files")
        prop_geometry = Gdf.from_file(locator.get_zone_geometry())
        prop_geometry['footprint'] = prop_geometry.area
        prop_geometry['perimeter'] = prop_geometry.length
        prop_geometry['Blength'], prop_geometry['Bwidth'] = self.calc_bounding_box_geom(locator.get_zone_geometry())
        prop_geometry = prop_geometry.drop('geometry', axis=1).set_index('Name')
        prop_hvac = dbf_to_dataframe(locator.get_building_air_conditioning())
        prop_typology = dbf_to_dataframe(locator.get_building_typology()).set_index('Name')
        # Drop 'REFERENCE' column if it exists
        if 'REFERENCE' in prop_typology:
            prop_typology.drop('REFERENCE', 1, inplace=True)
        prop_architectures = dbf_to_dataframe(locator.get_building_architecture())
        prop_comfort = dbf_to_dataframe(locator.get_building_comfort()).set_index('Name')
        prop_internal_loads = dbf_to_dataframe(locator.get_building_internal()).set_index('Name')
        prop_supply_systems_building = dbf_to_dataframe(locator.get_building_supply())

        # GET SYSTEMS EFFICIENCIES
        prop_supply_systems = get_properties_supply_sytems(locator, prop_supply_systems_building).set_index(
            'Name')

        # get temperatures of operation
        prop_HVAC_result = get_properties_technical_systems(locator, prop_hvac).set_index('Name')

        # get envelope properties
        prop_envelope = get_envelope_properties(locator, prop_architectures).set_index('Name')

        # get properties of rc demand model
        prop_rc_model = self.calc_prop_rc_model(locator, prop_typology, prop_envelope,
                                                prop_geometry, prop_HVAC_result)

        # get solar properties
        solar = get_prop_solar(locator, building_names, prop_rc_model, prop_envelope).set_index('Name')

        # df_windows = geometry_reader.create_windows(surface_properties, prop_envelope)
        # TODO: to check if the Win_op and height of window is necessary.
        # TODO: maybe mergin branch i9 with CItyGML could help with this
        print("done")

        # save resulting data
        self._prop_supply_systems = prop_supply_systems
        self._prop_geometry = prop_geometry
        self._prop_envelope = prop_envelope
        self._prop_typology = prop_typology
        self._prop_HVAC_result = prop_HVAC_result
        self._prop_comfort = prop_comfort
        self._prop_internal_loads = prop_internal_loads
        self._prop_age = prop_typology[['YEAR']]
        self._solar = solar
        self._prop_RC_model = prop_rc_model
def disconnected_buildings_heating_main(locator, total_demand, building_names, config, prices, lca):
    """
    Computes the parameters for the operation of disconnected buildings
    output results in csv files.
    There is no optimization at this point. The different technologies are calculated and compared 1 to 1 to
    each technology. it is a classical combinatorial problem.
    :param locator: locator class
    :param building_names: list with names of buildings
    :type locator: class
    :type building_names: list
    :return: results of operation of buildings located in locator.get_optimization_decentralized_folder
    :rtype: Nonetype
    """
    t0 = time.clock()
    prop_geometry = Gdf.from_file(locator.get_zone_geometry())
    geometry = pd.DataFrame({'Name': prop_geometry.Name, 'Area': prop_geometry.area})
    geothermal_potential_data = dbf.dbf_to_dataframe(locator.get_building_supply())
    geothermal_potential_data = pd.merge(geothermal_potential_data, geometry, on='Name')
    geothermal_potential_data['Area_geo'] = geothermal_potential_data['Area']
    weather_path = locator.get_weather_file()
    weather_data = epwreader.epw_reader(weather_path)[['year', 'drybulb_C', 'wetbulb_C',
                                                         'relhum_percent', 'windspd_ms', 'skytemp_C']]

    T_ground_K = calc_ground_temperature(locator, weather_data['drybulb_C'], depth_m=10)


    # This will calculate the substation state if all buildings where connected(this is how we study this)
    substation.substation_main_heating(locator, total_demand, building_names)

    for building_name in building_names:
        print('running for building %s') %(building_name)
        # run substation model to derive temperatures of the building
        substation_results = pd.read_csv(locator.get_optimization_substations_results_file(building_name, "DH", ""))
        q_load_Wh = np.vectorize(calc_new_load)(substation_results["mdot_DH_result_kgpers"],
                                                substation_results["T_supply_DH_result_K"],
                                                substation_results["T_return_DH_result_K"])
        Qnom_W = q_load_Wh.max()

        # Create empty matrices
        Opex_a_var_USD = np.zeros((13, 7))
        Capex_total_USD = np.zeros((13, 7))
        Capex_a_USD = np.zeros((13, 7))
        Opex_a_fixed_USD = np.zeros((13, 7))
        Capex_opex_a_fixed_only_USD = np.zeros((13, 7))
        Opex_a_USD = np.zeros((13, 7))
        GHG_tonCO2 = np.zeros((13, 7))
        PEN_MJoil = np.zeros((13, 7))
        # indicate supply technologies for each configuration
        Opex_a_var_USD[0][0] = 1  # Boiler NG
        Opex_a_var_USD[1][1] = 1  # Boiler BG
        Opex_a_var_USD[2][2] = 1  # Fuel Cell

        resourcesRes = np.zeros((13, 4))
        Q_Boiler_for_GHP_W = np.zeros((10, 1))  # Save peak capacity of GHP Backup Boilers
        GHP_el_size_W = np.zeros((10, 1))  # Save peak capacity of GHP

        # save supply system activation of all supply configurations
        heating_dispatch = {}

        # Supply with the Boiler / FC / GHP
        Tret_K = substation_results["T_return_DH_result_K"].values
        Tsup_K = substation_results["T_supply_DH_result_K"].values
        mdot_kgpers = substation_results["mdot_DH_result_kgpers"].values

        ## Start Hourly calculation
        print building_name, ' decentralized heating supply systems simulations...'
        Tret_K = np.where(Tret_K > 0.0, Tret_K, Tsup_K)

        ## 0: Boiler NG
        BoilerEff = np.vectorize(Boiler.calc_Cop_boiler)(q_load_Wh, Qnom_W, Tret_K)
        Qgas_to_Boiler_Wh = np.divide(q_load_Wh, BoilerEff, out=np.zeros_like(q_load_Wh), where=BoilerEff != 0.0)
        Boiler_Status = np.where(Qgas_to_Boiler_Wh > 0.0, 1, 0)
        # add costs
        Opex_a_var_USD[0][4] += sum(prices.NG_PRICE * Qgas_to_Boiler_Wh)  # CHF
        GHG_tonCO2[0][5] += calc_emissions_Whyr_to_tonCO2yr(sum(Qgas_to_Boiler_Wh), lca.NG_TO_CO2_EQ)  # ton CO2
        PEN_MJoil[0][6] += calc_pen_Whyr_to_MJoilyr(sum(Qgas_to_Boiler_Wh), lca.NG_TO_OIL_EQ)  # MJ-oil-eq
        # add activation
        resourcesRes[0][0] += sum(q_load_Wh)  # q from NG
        heating_dispatch[0] = {'Q_Boiler_gen_directload_W': q_load_Wh,
                                         'Boiler_Status': Boiler_Status,
                                         'NG_Boiler_req_W': Qgas_to_Boiler_Wh,
                                         'E_hs_ww_req_W': np.zeros(8760)}
        ## 1: Boiler BG
        # add costs
        Opex_a_var_USD[1][4] += sum(prices.BG_PRICE * Qgas_to_Boiler_Wh)  # CHF
        GHG_tonCO2[1][5] += calc_emissions_Whyr_to_tonCO2yr(sum(Qgas_to_Boiler_Wh), lca.NG_TO_CO2_EQ)  # ton CO2
        PEN_MJoil[1][6] += calc_pen_Whyr_to_MJoilyr(sum(Qgas_to_Boiler_Wh), lca.NG_TO_OIL_EQ)  # MJ-oil-eq
        # add activation
        resourcesRes[1][1] += sum(q_load_Wh)  # q from BG
        heating_dispatch[1] = {'Q_Boiler_gen_directload_W': q_load_Wh,
                               'Boiler_Status': Boiler_Status,
                               'BG_Boiler_req_W': Qgas_to_Boiler_Wh,
                               'E_hs_ww_req_W': np.zeros(8760)}

        ## 2: Fuel Cell
        (FC_Effel, FC_Effth) = np.vectorize(FC.calc_eta_FC)(q_load_Wh, Qnom_W, 1, "B")
        Qgas_to_FC_Wh = q_load_Wh / (FC_Effth + FC_Effel)  # FIXME: should be q_load_Wh/FC_Effth?
        el_from_FC_Wh = Qgas_to_FC_Wh * FC_Effel
        FC_Status = np.where(Qgas_to_FC_Wh > 0.0, 1, 0)
        # add variable costs, emissions and primary energy
        Opex_a_var_USD[2][4] += sum(prices.NG_PRICE * Qgas_to_FC_Wh - prices.ELEC_PRICE_EXPORT * el_from_FC_Wh)  # CHF, extra electricity sold to grid
        GHG_tonCO2_from_FC = (0.0874 * Qgas_to_FC_Wh * 3600E-6 + 773 * 0.45 * el_from_FC_Wh * 1E-6 -
                              lca.EL_TO_CO2_EQ * el_from_FC_Wh * 3600E-6) / 1E3
        GHG_tonCO2[2][5] += sum(GHG_tonCO2_from_FC)  # tonCO2
        # Bloom box emissions within the FC: 773 lbs / MWh_el (and 1 lbs = 0.45 kg)
        # http://www.carbonlighthouse.com/2011/09/16/bloom-box/
        PEN_MJoil_from_FC = 1.51 * Qgas_to_FC_Wh * 3600E-6 - lca.EL_TO_OIL_EQ * el_from_FC_Wh * 3600E-6
        PEN_MJoil[2][6] += sum(PEN_MJoil_from_FC)  # MJ-oil-eq
        # add activation
        resourcesRes[2][0] = sum(q_load_Wh)  # q from NG
        resourcesRes[2][2] = sum(el_from_FC_Wh)  # el for GHP # FIXME: el from FC
        heating_dispatch[2] = {'Q_Fuelcell_gen_directload_W': q_load_Wh,
                               'Fuelcell_Status': FC_Status,
                               'NG_FuelCell_req_W': Qgas_to_FC_Wh,
                               'E_Fuelcell_gen_export_W': el_from_FC_Wh,
                               'E_hs_ww_req_W': np.zeros(8760)}

        # 3-13: Boiler NG + GHP
        for i in range(10):
            # set nominal size for Boiler and GHP
            QnomBoiler_W = i / 10.0 * Qnom_W
            mdot_kgpers_Boiler = i / 10.0 * mdot_kgpers
            QnomGHP_W = Qnom_W - QnomBoiler_W

            # GHP operation
            Texit_GHP_nom_K = QnomGHP_W / (mdot_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK) + Tret_K
            el_GHP_Wh, q_load_NG_Boiler_Wh, \
            qhot_missing_Wh, \
            Texit_GHP_K, q_from_GHP_Wh = np.vectorize(calc_GHP_operation)(QnomGHP_W, T_ground_K, Texit_GHP_nom_K,
                                                                      Tret_K, Tsup_K, mdot_kgpers, q_load_Wh)
            GHP_el_size_W[i][0] = max(el_GHP_Wh)
            GHP_Status = np.where(q_from_GHP_Wh > 0.0, 1, 0)

            # GHP Backup Boiler operation
            if max(qhot_missing_Wh) > 0.0:
                print "GHP unable to cover the whole demand, boiler activated!"
                Qnom_GHP_Backup_Boiler_W = max(qhot_missing_Wh)
                BoilerEff = np.vectorize(Boiler.calc_Cop_boiler)(qhot_missing_Wh, Qnom_GHP_Backup_Boiler_W, Texit_GHP_K)
                Qgas_to_GHPBoiler_Wh = np.divide(qhot_missing_Wh, BoilerEff,
                                                 out=np.zeros_like(qhot_missing_Wh), where=BoilerEff != 0.0)
            else:
                Qgas_to_GHPBoiler_Wh = np.zeros(q_load_Wh.shape[0])
                Qnom_GHP_Backup_Boiler_W = 0.0
            Q_Boiler_for_GHP_W[i][0] = Qnom_GHP_Backup_Boiler_W
            GHPbackupBoiler_Status = np.where(qhot_missing_Wh > 0.0, 1, 0)

            # NG Boiler operation
            BoilerEff = np.vectorize(Boiler.calc_Cop_boiler)(q_load_NG_Boiler_Wh, QnomBoiler_W, Texit_GHP_K)
            Qgas_to_Boiler_Wh = np.divide(q_load_NG_Boiler_Wh, BoilerEff,
                                          out=np.zeros_like(q_load_NG_Boiler_Wh), where=BoilerEff != 0.0)
            Boiler_Status = np.where(q_load_NG_Boiler_Wh > 0.0, 1, 0)

            # add costs
            # electricity
            el_total_Wh = el_GHP_Wh
            Opex_a_var_USD[3 + i][4] += sum(prices.ELEC_PRICE * el_total_Wh)  # CHF
            GHG_tonCO2[3 + i][5] += calc_emissions_Whyr_to_tonCO2yr(sum(el_total_Wh), lca.EL_TO_CO2_EQ)  # ton CO2
            PEN_MJoil[3 + i][6] += calc_pen_Whyr_to_MJoilyr(sum(el_total_Wh), lca.EL_TO_OIL_EQ)  # MJ-oil-eq
            # gas
            Q_gas_total_Wh = Qgas_to_GHPBoiler_Wh + Qgas_to_Boiler_Wh
            Opex_a_var_USD[3 + i][4] += sum(prices.NG_PRICE * Q_gas_total_Wh)  # CHF
            GHG_tonCO2[3 + i][5] += calc_emissions_Whyr_to_tonCO2yr(sum(Q_gas_total_Wh), lca.NG_TO_CO2_EQ)  # ton CO2
            PEN_MJoil[3 + i][6] += calc_pen_Whyr_to_MJoilyr(sum(Q_gas_total_Wh), lca.NG_TO_OIL_EQ)  # MJ-oil-eq
            # add activation
            resourcesRes[3 + i][0] = sum(qhot_missing_Wh + q_load_NG_Boiler_Wh)
            resourcesRes[3 + i][2] = sum(el_GHP_Wh)
            resourcesRes[3 + i][3] = sum(q_from_GHP_Wh)

            heating_dispatch[3 + i] = {'Q_GHP_gen_directload_W': q_from_GHP_Wh,
                                       'Q_BackupBoiler_gen_directload_W': qhot_missing_Wh,
                                       'Q_Boiler_gen_directload_W': q_load_NG_Boiler_Wh,
                                       'GHP_Status': GHP_Status,
                                       'BackupBoiler_Status': GHPbackupBoiler_Status,
                                       'Boiler_Status': Boiler_Status,
                                       'NG_BackupBoiler_req_Wh': Qgas_to_GHPBoiler_Wh,
                                       'NG_Boiler_req_Wh': Qgas_to_Boiler_Wh,
                                       'E_hs_ww_req_W': el_GHP_Wh}

        # Add all costs
        # 0: Boiler NG
        Capex_a_Boiler_USD, Opex_a_fixed_Boiler_USD, Capex_Boiler_USD = Boiler.calc_Cinv_boiler(Qnom_W, locator, config,
                                                                                                'BO1')
        Capex_total_USD[0][0] = Capex_Boiler_USD
        Capex_a_USD[0][0] = Capex_a_Boiler_USD
        Opex_a_fixed_USD[0][0] = Opex_a_fixed_Boiler_USD
        Capex_opex_a_fixed_only_USD[0][0] = Capex_a_Boiler_USD + Opex_a_fixed_Boiler_USD  # TODO:variable price?

        # 1: Boiler BG
        Capex_total_USD[1][0] = Capex_Boiler_USD
        Capex_a_USD[1][0] = Capex_a_Boiler_USD
        Opex_a_fixed_USD[1][0] = Opex_a_fixed_Boiler_USD
        Capex_opex_a_fixed_only_USD[1][0] = Capex_a_Boiler_USD + Opex_a_fixed_Boiler_USD  # TODO:variable price?

        # 2: Fuel Cell
        Capex_a_FC_USD, Opex_fixed_FC_USD, Capex_FC_USD = FC.calc_Cinv_FC(Qnom_W, locator, config)
        Capex_total_USD[2][0] = Capex_FC_USD
        Capex_a_USD[2][0] = Capex_a_FC_USD
        Opex_a_fixed_USD[2][0] = Opex_fixed_FC_USD
        Capex_opex_a_fixed_only_USD[2][0] = Capex_a_FC_USD + Opex_fixed_FC_USD  # TODO:variable price?

        # 3-13: BOILER + GHP
        for i in range(10):
            Opex_a_var_USD[3 + i][0] = i / 10.0  # Boiler share
            Opex_a_var_USD[3 + i][3] = 1 - i / 10.0  # GHP share

            # Get boiler costs
            QnomBoiler_W = i / 10.0 * Qnom_W
            Capex_a_Boiler_USD, Opex_a_fixed_Boiler_USD, Capex_Boiler_USD = Boiler.calc_Cinv_boiler(QnomBoiler_W,
                                                                                                    locator,
                                                                                                    config, 'BO1')

            Capex_total_USD[3 + i][0] += Capex_Boiler_USD
            Capex_a_USD[3 + i][0] += Capex_a_Boiler_USD
            Opex_a_fixed_USD[3 + i][0] += Opex_a_fixed_Boiler_USD
            Capex_opex_a_fixed_only_USD[3 + i][
                0] += Capex_a_Boiler_USD + Opex_a_fixed_Boiler_USD  # TODO:variable price?

            # Get back up boiler costs
            Qnom_Backup_Boiler_W = Q_Boiler_for_GHP_W[i][0]
            Capex_a_GHPBoiler_USD, Opex_a_fixed_GHPBoiler_USD, Capex_GHPBoiler_USD = Boiler.calc_Cinv_boiler(
                Qnom_Backup_Boiler_W, locator,
                config, 'BO1')

            Capex_total_USD[3 + i][0] += Capex_GHPBoiler_USD
            Capex_a_USD[3 + i][0] += Capex_a_GHPBoiler_USD
            Opex_a_fixed_USD[3 + i][0] += Opex_a_fixed_GHPBoiler_USD
            Capex_opex_a_fixed_only_USD[3 + i][
                0] += Capex_a_GHPBoiler_USD + Opex_a_fixed_GHPBoiler_USD  # TODO:variable price?

            # Get ground source heat pump costs
            Capex_a_GHP_USD, Opex_a_fixed_GHP_USD, Capex_GHP_USD = HP.calc_Cinv_GHP(GHP_el_size_W[i][0], locator,
                                                                                    config)
            Capex_total_USD[3 + i][0] += Capex_GHP_USD
            Capex_a_USD[3 + i][0] += Capex_a_GHP_USD
            Opex_a_fixed_USD[3 + i][0] += Opex_a_fixed_GHP_USD
            Capex_opex_a_fixed_only_USD[3 + i][0] += Capex_a_GHP_USD + Opex_a_fixed_GHP_USD  # TODO:variable price?

        # Best configuration
        Best = np.zeros((13, 1))
        indexBest = 0
        TAC_USD = np.zeros((13, 2))
        TotalCO2 = np.zeros((13, 2))
        TotalPrim = np.zeros((13, 2))
        for i in range(13):
            TAC_USD[i][0] = TotalCO2[i][0] = TotalPrim[i][0] = i
            Opex_a_USD[i][1] = Opex_a_fixed_USD[i][0] + + Opex_a_var_USD[i][4]
            TAC_USD[i][1] = Capex_opex_a_fixed_only_USD[i][0] + Opex_a_var_USD[i][4]
            TotalCO2[i][1] = GHG_tonCO2[i][5]
            TotalPrim[i][1] = PEN_MJoil[i][6]

        CostsS = TAC_USD[np.argsort(TAC_USD[:, 1])]
        CO2S = TotalCO2[np.argsort(TotalCO2[:, 1])]
        PrimS = TotalPrim[np.argsort(TotalPrim[:, 1])]

        el = len(CostsS)
        rank = 0
        Bestfound = False

        optsearch = np.empty(el)
        optsearch.fill(3)
        indexBest = 0
        geothermal_potential = geothermal_potential_data.set_index('Name')

        # Check the GHP area constraint
        for i in range(10):
            QGHP = (1 - i / 10.0) * Qnom_W
            areaAvail = geothermal_potential.ix[building_name, 'Area_geo']
            Qallowed = np.ceil(areaAvail / GHP_A) * GHP_HMAX_SIZE  # [W_th]
            if Qallowed < QGHP:
                optsearch[i + 3] += 1
                Best[i + 3][0] = - 1

        while not Bestfound and rank < el:

            optsearch[int(CostsS[rank][0])] -= 1
            optsearch[int(CO2S[rank][0])] -= 1
            optsearch[int(PrimS[rank][0])] -= 1

            if np.count_nonzero(optsearch) != el:
                Bestfound = True
                indexBest = np.where(optsearch == 0)[0][0]

            rank += 1

        # get the best option according to the ranking.
        Best[indexBest][0] = 1

        # Save results in csv file
        performance_results = {
            "Nominal heating load": Qnom_W,
            "Capacity_BaseBoiler_NG_W": Qnom_W * Opex_a_var_USD[:, 0],
            "Capacity_FC_NG_W": Qnom_W * Opex_a_var_USD[:, 2],
            "Capacity_GS_HP_W": Qnom_W * Opex_a_var_USD[:, 3],
            "TAC_USD": TAC_USD[:, 1],
            "Capex_a_USD": Capex_a_USD[:, 0],
            "Capex_total_USD": Capex_total_USD[:, 0],
            "Opex_fixed_USD": Opex_a_fixed_USD[:, 0],
            "Opex_var_USD": Opex_a_var_USD[:, 4],
            "GHG_tonCO2": GHG_tonCO2[:, 5],
            "PEN_MJoil": PEN_MJoil[:, 6],
            "Best configuration": Best[:, 0]}

        results_to_csv = pd.DataFrame(performance_results)

        fName_result = locator.get_optimization_decentralized_folder_building_result_heating(building_name)
        results_to_csv.to_csv(fName_result, sep=',')

        # save activation for the best supply system configuration
        best_activation_df = pd.DataFrame.from_dict(heating_dispatch[indexBest])  #
        best_activation_df.to_csv(
            locator.get_optimization_decentralized_folder_building_result_heating_activation(building_name))

    print time.clock() - t0, "seconds process time for the Disconnected Building Routine \n"
Exemple #22
0
def calc_score(static_params, dynamic_params):
    """
    This tool reduces the error between observed (real life measured data) and predicted (output of the model data) values by changing some of CEA inputs.
    Annual data is compared in terms of MBE and monthly data in terms of NMBE and CvRMSE (follwing ASHRAE Guideline 14-2002).
    A new input folder with measurements has to be created, with a csv each for monthly and annual data provided as input for this tool.
    A new output csv is generated providing the calibration results (iteration number, parameters tested and results(score metric))
    """
    ## define set of CEA inputs to be calibrated and initial guess values
    SEED = dynamic_params['SEED']
    np.random.seed(
        SEED
    )  #initalize seed numpy randomly npy.random.seed (once call the function) - inside put the seed
    #import random (initialize) npy.random.randint(low=1, high=100, size= number of buildings)/1000 - for every parameter.
    Hs_ag = dynamic_params['Hs_ag']
    Tcs_set_C = dynamic_params['Tcs_set_C']
    Es = dynamic_params['Es']
    Ns = dynamic_params['Ns']
    Occ_m2pax = dynamic_params['Occ_m2pax']
    Vww_lpdpax = dynamic_params['Vww_lpdpax']
    Ea_Wm2 = dynamic_params['Ea_Wm2']
    El_Wm2 = dynamic_params['El_Wm2']

    ##define fixed constant parameters (to be redefined by CEA config file)
    #Hs_ag = 0.15
    #Tcs_set_C = 28
    Tcs_setb_C = 40
    void_deck = 1
    height_bg = 0
    floors_bg = 0

    scenario_list = static_params['scenario_list']
    config = static_params['config']

    locators_of_scenarios = []
    measured_building_names_of_scenarios = []
    for scenario in scenario_list:
        config.scenario = scenario
        locator = cea.inputlocator.InputLocator(config.scenario)
        measured_building_names = get_measured_building_names(locator)
        modify_monthly_multiplier(locator, config, measured_building_names)

        # store for later use
        locators_of_scenarios.append(locator)
        measured_building_names_of_scenarios.append(measured_building_names)

        ## overwrite inputs with corresponding initial values

        # Changes and saves variables related to the architecture
        df_arch = dbf_to_dataframe(locator.get_building_architecture())
        number_of_buildings = df_arch.shape[0]
        Rand_it = np.random.randint(low=-30, high=30,
                                    size=number_of_buildings) / 100
        df_arch.Es = Es * (1 + Rand_it)
        df_arch.Ns = Ns * (1 + Rand_it)
        df_arch.Hs_ag = Hs_ag * (1 + Rand_it)
        df_arch.void_deck = void_deck
        dataframe_to_dbf(df_arch, locator.get_building_architecture())

        # Changes and saves variables related to intetnal loads
        df_intload = dbf_to_dataframe(locator.get_building_internal())
        df_intload.Occ_m2pax = Occ_m2pax * (1 + Rand_it)
        df_intload.Vww_lpdpax = Vww_lpdpax * (1 + Rand_it)
        df_intload.Ea_Wm2 = Ea_Wm2 * (1 + Rand_it)
        df_intload.El_Wm2 = El_Wm2 * (1 + Rand_it)
        dataframe_to_dbf(df_intload, locator.get_building_internal())

        #Changes and saves variables related to comfort
        df_comfort = dbf_to_dataframe(locator.get_building_comfort())
        df_comfort.Tcs_set_C = Tcs_set_C * (1 + Rand_it)
        df_comfort.Tcs_setb_C = Tcs_setb_C
        dataframe_to_dbf(df_comfort, locator.get_building_comfort())

        # Changes and saves variables related to zone
        df_zone = dbf_to_dataframe(locator.get_zone_geometry().split('.')[0] +
                                   '.dbf')
        df_zone.height_bg = height_bg
        df_zone.floors_bg = floors_bg
        dataframe_to_dbf(df_zone,
                         locator.get_zone_geometry().split('.')[0] + '.dbf')

        ## run building schedules and energy demand
        config.schedule_maker.buildings = measured_building_names
        schedule_maker.schedule_maker_main(locator, config)
        config.demand.buildings = measured_building_names
        demand_main.demand_calculation(locator, config)

    # calculate the score
    score = validation.validation(scenario_list=scenario_list,
                                  locators_of_scenarios=locators_of_scenarios,
                                  measured_building_names_of_scenarios=
                                  measured_building_names_of_scenarios)

    return score
Exemple #23
0
def create_new_project(locator, config):
    # Local variables
    zone_geometry_path = config.create_new_project.zone
    district_geometry_path = config.create_new_project.district
    street_geometry_path = config.create_new_project.streets
    terrain_path = config.create_new_project.terrain
    occupancy_path = config.create_new_project.occupancy
    age_path = config.create_new_project.age

    # import file
    zone, lat, lon = shapefile_to_WSG_and_UTM(zone_geometry_path)
    # verify if input file is correct for CEA, if not an exception will be released
    verify_input_geometry_zone(zone)
    zone.to_file(locator.get_zone_geometry())


    # apply coordinate system of terrain into zone and save zone to disk.
    terrain = raster_to_WSG_and_UTM(terrain_path, lat, lon)
    driver = gdal.GetDriverByName('GTiff')
    verify_input_terrain(driver, locator.get_terrain(), terrain)
    driver.CreateCopy(locator.get_terrain(), terrain)

    # now create the district file if it does not exist
    if district_geometry_path == '':
        print("there is no district file, we proceed to create it based on the geometry of your zone")
        zone.to_file(locator.get_district_geometry())
    else:
        # import file
        district, _, _ = shapefile_to_WSG_and_UTM(district_geometry_path)
        # verify if input file is correct for CEA, if not an exception will be released
        verify_input_geometry_district(zone)
        # create new file
        district.to_file(locator.get_district_geometry())

    # now transfer the streets
    if street_geometry_path == '':
        print("there is no street file, optimizaiton of cooling networks wont be possible")
    else:
        street, _, _ = shapefile_to_WSG_and_UTM(street_geometry_path)
        street.to_file(locator.get_street_network())

    ## create occupancy file and year file
    if occupancy_path == '':
        print("there is no occupancy file, we proceed to create it based on the geometry of your zone")
        zone = Gdf.from_file(zone_geometry_path).drop('geometry', axis=1)
        for field in COLUMNS_ZONE_OCCUPANCY:
            zone[field] = 0.0
        zone[COLUMNS_ZONE_OCCUPANCY[:2]] = 0.5  # adding 0.5 area use to the first two uses
        dataframe_to_dbf(zone[['Name'] + COLUMNS_ZONE_OCCUPANCY], locator.get_building_occupancy())
    else:
        # import file
        occupancy_file = dbf_to_dataframe(occupancy_path)
        occupancy_file_test = occupancy_file[['Name'] + COLUMNS_ZONE_OCCUPANCY]
        # verify if input file is correct for CEA, if not an exception will be released
        verify_input_occupancy(occupancy_file_test)
        # create new file
        copyfile(occupancy_path, locator.get_building_occupancy())

    ## create age file
    if age_path == '':
        print(
            "there is no file with the age of the buildings, we proceed to create it based on the geometry of your zone")
        zone = Gdf.from_file(zone_geometry_path).drop('geometry', axis=1)
        for field in COLUMNS_ZONE_AGE:
            zone[field] = 0.0
        zone['built'] = 2017  # adding year of construction
        dataframe_to_dbf(zone[['Name'] + COLUMNS_ZONE_AGE], locator.get_building_age())
    else:
        # import file
        age_file = dbf_to_dataframe(age_path)
        age_file_test = age_file[['Name'] + COLUMNS_ZONE_AGE]
        # verify if input file is correct for CEA, if not an exception will be released
        verify_input_age(age_file_test)
        # create new file
        copyfile(age_path, locator.get_building_age())

    # add other folders by calling the locator
    locator.get_measurements()
    locator.get_input_network_folder("DH", "")
    locator.get_input_network_folder("DC", "")
    locator.get_weather_folder()
def rename_dbf_file(path, pk, old, new):
    df = dbf.dbf_to_dataframe(path)
    df.loc[df[pk] == old, pk] = new
    dbf.dataframe_to_dbf(df, path)
Exemple #25
0
def retrofit_main(locator_baseline, retrofit_scenario_name, keep_partial_matches,
                  retrofit_target_year=None,
                  age_threshold=None,
                  eui_heating_threshold=None,
                  eui_hot_water_threshold=None,
                  eui_cooling_threshold=None,
                  eui_electricity_threshold=None,
                  heating_costs_threshold=None,
                  hot_water_costs_threshold=None,
                  cooling_costs_threshold=None,
                  electricity_costs_threshold=None,
                  heating_losses_threshold=None,
                  hot_water_losses_threshold=None,
                  cooling_losses_threshold=None,
                  emissions_operation_threshold=None):
    selection_names = []  # list to store names of selected buildings to retrofit


    #load databases and select only buildings in geometry
    #geometry
    geometry_df = gdf.from_file(locator_baseline.get_zone_geometry())
    zone_building_names = locator_baseline.get_zone_building_names()
    #age
    age = dbf.dbf_to_dataframe(locator_baseline.get_building_age())
    age = age.loc[age['Name'].isin(zone_building_names)]

    architecture = dbf.dbf_to_dataframe(locator_baseline.get_building_architecture())
    architecture = architecture.loc[architecture['Name'].isin(zone_building_names)]

    comfort = dbf.dbf_to_dataframe(locator_baseline.get_building_comfort())
    comfort = comfort.loc[comfort['Name'].isin(zone_building_names)]

    internal_loads = dbf.dbf_to_dataframe(locator_baseline.get_building_internal())
    internal_loads = internal_loads.loc[internal_loads['Name'].isin(zone_building_names)]

    hvac = dbf.dbf_to_dataframe(locator_baseline.get_building_air_conditioning())
    hvac = hvac.loc[hvac['Name'].isin(zone_building_names)]

    supply = dbf.dbf_to_dataframe(locator_baseline.get_building_supply())
    supply = supply.loc[supply['Name'].isin(zone_building_names)]

    occupancy = dbf.dbf_to_dataframe(locator_baseline.get_building_occupancy())
    occupancy = occupancy.loc[occupancy['Name'].isin(zone_building_names)]


    # CASE 1 - age threshold
    age_crit = [["age", age_threshold]]
    for criteria_name, criteria_threshold in age_crit:
        if criteria_threshold is not None:
            age_difference = retrofit_target_year - criteria_threshold
            selection_names.append(("Crit_" + criteria_name, age_filter_HVAC(age, age_difference)))

    # CASE 2 - energy use intensity threshold
    eui_crit = [["Qhs_sys", eui_heating_threshold],
                ["Qww_sys", eui_hot_water_threshold],
                ["Qcs_sys", eui_cooling_threshold],
                ["E_sys", eui_electricity_threshold]]
    for criteria_name, criteria_threshold in eui_crit:
        if criteria_threshold is not None:
            demand_totals = pd.read_csv(locator_baseline.get_total_demand())
            selection_names.append(
                ("c_eui_" + criteria_name, eui_filter_HVAC(demand_totals, criteria_name, criteria_threshold)))

    # CASE 3 - costs threshold
    op_costs_crit = {"Qhs_sys": heating_costs_threshold,
                     "Qww_sys": hot_water_costs_threshold,
                     "Qcs_sys": cooling_costs_threshold,
                     "Qcre_sys": cooling_costs_threshold,
                     "Qcdata_sys": cooling_costs_threshold,
                     "E_sys": electricity_costs_threshold}

    costs_totals = pd.read_csv(locator_baseline.get_costs_operation_file())
    costs_summed = costs_totals.copy()
    for key in op_costs_crit.keys():
        costs_summed[key] = np.zeros(len(costs_totals.index))
    for cost_label in costs_totals.columns:
        if 'm2yr' in cost_label:
            for service in ['hs', 'ww', 'cs', 'cre', 'cdata']:
                if service in cost_label.split('_'):
                    costs_summed['Q'+service+'_sys'] += costs_totals[cost_label]
            if not any(service in cost_label.split('_') for service in ['hs', 'ww', 'cs', 'cre', 'cdata']):
                costs_summed['E_sys'] += costs_totals[cost_label]
    for criteria_name in op_costs_crit.keys():
        criteria_threshold = op_costs_crit[criteria_name]
        if criteria_threshold is not None:
            selection_names.append(
                ("c_cost_" + criteria_name, emissions_filter_HVAC(costs_summed, criteria_name, criteria_threshold)))

    # CASE 4 - losses threshold
    losses_crit = [["Qhs_sys", "Qhs_sys_MWhyr", "Qhs_MWhyr", heating_losses_threshold],
                   ["Qww_sys", "Qww_sys_MWhyr", "Qww_MWhyr", hot_water_losses_threshold],
                   ["Qcs_sys", "Qcs_sys_MWhyr", "Qcs_MWhyr", cooling_losses_threshold]]
    for criteria_name, load_with_losses, load_end_use, criteria_threshold in losses_crit:
        if criteria_threshold is not None:
            demand_totals = pd.read_csv(locator_baseline.get_total_demand())
            selection_names.append(("c_loss_" + criteria_name,
                                    losses_filter_HVAC(demand_totals, load_with_losses, load_end_use,
                                                       criteria_threshold)))
    # CASE 5 - emissions threshold
    LCA_crit = [["ghg", "O_ghg_ton", emissions_operation_threshold]]
    for criteria_name, lca_name, criteria_threshold in LCA_crit:
        if criteria_threshold is not None:
            emissions_totals = pd.read_csv(locator_baseline.get_lca_operation())
            selection_names.append(
                ("c_" + criteria_name, emissions_filter_HVAC(emissions_totals, lca_name, criteria_threshold)))

    # appending all the results
    if keep_partial_matches:
        type_of_join = "outer"
    else:
        type_of_join = "inner"
    counter = 0
    for (criteria, list_true_values) in selection_names:
        if counter == 0:
            data = pd.DataFrame({"Name": list_true_values})
            data[criteria] = "TRUE"
        else:
            y = pd.DataFrame({"Name": list_true_values})
            y[criteria] = "TRUE"
            data = data.merge(y, on="Name", how=type_of_join)
        counter += 1

    data.fillna(value="FALSE", inplace=True)
    if data.empty and (keep_partial_matches==False):
        raise ValueError("There is not a single building matching all selected criteria,"
                         "try to keep those buildings that partially match the criteria")

    # Create a retrofit case with the buildings that pass the criteria
    retrofit_scenario_path = os.path.join(locator_baseline.get_project_path(), retrofit_scenario_name)
    locator_retrofit = cea.inputlocator.InputLocator(scenario=retrofit_scenario_path)
    retrofit_scenario_creator(locator_baseline, locator_retrofit, geometry_df, age, architecture, internal_loads, comfort, hvac,
                              supply, occupancy, data, type_of_join)
def building_capex(year_to_calculate, locator):
    """
    Calculation of non-supply capital costs.

    The results are

    As part of the algorithm, the following files are read from InputLocator:

    - architecture.shp: shapefile with the architecture of each building
        locator.get_building_architecture()
    - occupancy.shp: shapefile with the occupancy types of each building
        locator.get_building_occupancy()
    - age.shp: shapefile with the age and retrofit date of each building
        locator.get_building_age()
    - zone.shp: shapefile with the geometry of each building in the zone of study
        locator.get_zone_geometry()
    - Archetypes_properties: csv file with the database of archetypes including embodied energy and emissions
        locator.l()

    As a result, the following file is created:

    - Total_building_capex: .csv
        csv file of yearly primary energy and grey emissions per building stored in locator.get_lca_embodied()

    :param year_to_calculate:  year between 1900 and 2100 indicating when embodied energy is evaluated
        to account for emissions already offset from building construction and retrofits more than 60 years ago.
    :type year_to_calculate: int
    :param locator: an instance of InputLocator set to the scenario
    :type locator: InputLocator
    :returns: This function does not return anything
    :rtype: NoneType


    Files read / written from InputLocator:

    get_building_architecture
    get_building_occupancy
    get_building_age
    get_zone_geometry
    get_archetypes_embodied_energy
    get_archetypes_embodied_emissions

    path_LCA_embodied_energy:
        path to database of archetypes embodied energy file
        Archetypes_embodied_energy.csv
    path_LCA_embodied_emissions:
        path to database of archetypes grey emissions file
        Archetypes_embodied_emissions.csv
    path_age_shp: string
        path to building_age.shp
    path_occupancy_shp:
        path to building_occupancyshp
    path_geometry_shp:
        path to building_geometrys.hp
    path_architecture_shp:
        path to building_architecture.shp
    path_results : string
        path to demand results folder emissions
    """
    print('Calculating the Total Annualized costs of building components.')
    # local variables
    age_df = dbf_to_dataframe(locator.get_building_typology())
    architecture_df = dbf_to_dataframe(locator.get_building_architecture())
    supply_df = dbf_to_dataframe(locator.get_building_supply())
    hvac_df = dbf_to_dataframe(locator.get_building_air_conditioning())
    geometry_df = Gdf.from_file(locator.get_zone_geometry())
    geometry_df['footprint'] = geometry_df.area
    geometry_df['perimeter'] = geometry_df.length
    geometry_df = geometry_df.drop('geometry', axis=1)

    # local variables
    surface_database_windows = pd.read_excel(
        locator.get_database_envelope_systems(), "WINDOW")
    surface_database_roof = pd.read_excel(
        locator.get_database_envelope_systems(), "ROOF")
    surface_database_walls = pd.read_excel(
        locator.get_database_envelope_systems(), "WALL")
    surface_database_floors = pd.read_excel(
        locator.get_database_envelope_systems(), "FLOOR")
    surface_database_cons = pd.read_excel(
        locator.get_database_envelope_systems(), "CONSTRUCTION")
    surface_database_leak = pd.read_excel(
        locator.get_database_envelope_systems(), "TIGHTNESS")
    hvac_database_cooling = pd.read_excel(
        locator.get_database_air_conditioning_systems(), "COOLING")
    hvac_database_heating = pd.read_excel(
        locator.get_database_air_conditioning_systems(), "HEATING")
    hvac_database_vent = pd.read_excel(
        locator.get_database_air_conditioning_systems(), "VENTILATION")

    # query data
    df = architecture_df.merge(surface_database_windows,
                               left_on='type_win',
                               right_on='code')
    df2 = architecture_df.merge(surface_database_roof,
                                left_on='type_roof',
                                right_on='code')
    df3 = architecture_df.merge(surface_database_walls,
                                left_on='type_wall',
                                right_on='code')
    df4 = architecture_df.merge(surface_database_floors,
                                left_on='type_floor',
                                right_on='code')
    df5 = architecture_df.merge(surface_database_floors,
                                left_on='type_base',
                                right_on='code')
    df5.rename({'capex_FLOOR': 'capex_BASE'}, inplace=True, axis=1)
    df6 = architecture_df.merge(surface_database_walls,
                                left_on='type_part',
                                right_on='code')
    df6.rename({'capex_WALL': 'capex_PART'}, inplace=True, axis=1)
    df7 = architecture_df.merge(surface_database_cons,
                                left_on='type_cons',
                                right_on='code')
    df8 = architecture_df.merge(surface_database_leak,
                                left_on='type_leak',
                                right_on='code')
    df9 = hvac_df.merge(hvac_database_cooling,
                        left_on='type_cs',
                        right_on='code')
    df10 = hvac_df.merge(hvac_database_heating,
                         left_on='type_hs',
                         right_on='code')
    df14 = hvac_df.merge(hvac_database_vent,
                         left_on='type_vent',
                         right_on='code')

    fields = ['Name', "capex_WIN"]
    fields2 = ['Name', "capex_ROOF"]
    fields3 = ['Name', "capex_WALL"]
    fields4 = ['Name', "capex_FLOOR"]
    fields5 = ['Name', "capex_BASE"]
    fields6 = ['Name', "capex_PART"]

    # added for bigmacc
    fields7 = ['Name', "capex_CONS"]
    fields8 = ['Name', "capex_LEAK"]
    fields9 = ['Name', "capex_hvacCS"]
    fields10 = ['Name', "capex_hvacHS"]
    fields14 = ['Name', "capex_hvacVENT"]

    surface_properties = df[fields].merge(df2[fields2], on='Name').merge(
        df3[fields3], on='Name').merge(df4[fields4], on='Name').merge(
            df5[fields5], on='Name').merge(df6[fields6], on='Name').merge(
                df7[fields7], on='Name').merge(df8[fields8], on='Name').merge(
                    df9[fields9],
                    on='Name').merge(df10[fields10],
                                     on='Name').merge(df14[fields14],
                                                      on='Name')

    # DataFrame with joined data for all categories
    data_meged_df = geometry_df.merge(age_df, on='Name').merge(
        surface_properties, on='Name').merge(architecture_df,
                                             on='Name').merge(hvac_df,
                                                              on='Name')

    # calculate building geometry
    ## total window area
    average_wwr = [
        np.mean([a, b, c, d]) for a, b, c, d in
        zip(data_meged_df['wwr_south'], data_meged_df['wwr_north'],
            data_meged_df['wwr_west'], data_meged_df['wwr_east'])
    ]

    data_meged_df['windows_ag'] = average_wwr * data_meged_df[
        'perimeter'] * data_meged_df['height_ag']

    ## wall area above ground
    data_meged_df['area_walls_ext_ag'] = data_meged_df[
        'perimeter'] * data_meged_df['height_ag'] - data_meged_df['windows_ag']

    # fix according to the void deck
    data_meged_df['empty_envelope_ratio'] = 1 - (
        (data_meged_df['void_deck'] *
         (data_meged_df['height_ag'] / data_meged_df['floors_ag'])) /
        (data_meged_df['area_walls_ext_ag'] + data_meged_df['windows_ag']))
    data_meged_df['windows_ag'] = data_meged_df['windows_ag'] * data_meged_df[
        'empty_envelope_ratio']
    data_meged_df['area_walls_ext_ag'] = data_meged_df[
        'area_walls_ext_ag'] * data_meged_df['empty_envelope_ratio']

    ## wall area below ground
    data_meged_df['area_walls_ext_bg'] = data_meged_df[
        'perimeter'] * data_meged_df['height_bg']
    ## floor area above ground
    data_meged_df['floor_area_ag'] = data_meged_df[
        'footprint'] * data_meged_df['floors_ag']
    ## floor area below ground
    data_meged_df['floor_area_bg'] = data_meged_df[
        'footprint'] * data_meged_df['floors_bg']
    ## total floor area
    data_meged_df['GFA_m2'] = data_meged_df['floor_area_ag'] + data_meged_df[
        'floor_area_bg']

    result_emissions = calculate_contributions(data_meged_df,
                                               year_to_calculate)

    # export the results for building system costs
    result_emissions.to_csv(locator.get_building_tac_file(),
                            index=False,
                            float_format='%.2f',
                            na_rep='nan')
    print('done!')
Exemple #27
0
def data_helper(locator, config, prop_architecture_flag, prop_hvac_flag,
                prop_comfort_flag, prop_internal_loads_flag,
                prop_supply_systems_flag, prop_restrictions_flag):
    """
    algorithm to query building properties from statistical database
    Archetypes_HVAC_properties.csv. for more info check the integrated demand
    model of Fonseca et al. 2015. Appl. energy.

    :param InputLocator locator: an InputLocator instance set to the scenario to work on
    :param boolean prop_architecture_flag: if True, get properties about the construction and architecture.
    :param boolean prop_comfort_flag: if True, get properties about thermal comfort.
    :param boolean prop_hvac_flag: if True, get properties about types of HVAC systems, otherwise False.
    :param boolean prop_internal_loads_flag: if True, get properties about internal loads, otherwise False.

    The following files are created by this script, depending on which flags were set:

    - building_HVAC: .dbf
        describes the queried properties of HVAC systems.

    - architecture.dbf
        describes the queried properties of architectural features

    - building_thermal: .shp
        describes the queried thermal properties of buildings

    - indoor_comfort.shp
        describes the queried thermal properties of buildings
    """

    # get occupancy and age files
    building_occupancy_df = dbf_to_dataframe(locator.get_building_occupancy())
    list_uses = list(building_occupancy_df.drop(
        ['Name'], axis=1).columns)  # parking excluded in U-Values
    building_age_df = dbf_to_dataframe(locator.get_building_age())

    # get occupant densities from archetypes schedules
    occupant_densities = {}
    for use in list_uses:
        archetypes_schedules = pd.read_excel(
            locator.get_archetypes_schedules(config.region), use).T
        area_per_occupant = archetypes_schedules['density'].values[:1][0]
        if area_per_occupant > 0:
            occupant_densities[use] = 1 / area_per_occupant
        else:
            occupant_densities[use] = 0

    # prepare shapefile to store results (a shapefile with only names of buildings
    names_df = building_age_df[['Name']]

    # define main use:
    building_occupancy_df['mainuse'] = calc_mainuse(building_occupancy_df,
                                                    list_uses)

    # dataframe with jonned data for categories
    categories_df = building_occupancy_df.merge(building_age_df, on='Name')

    # get properties about the construction and architecture
    if prop_architecture_flag:
        architecture_DB = get_database(
            locator.get_archetypes_properties(config.region), 'ARCHITECTURE')
        architecture_DB['Code'] = architecture_DB.apply(lambda x: calc_code(
            x['building_use'], x['year_start'], x['year_end'], x['standard']),
                                                        axis=1)
        categories_df['cat_built'] = calc_category(architecture_DB,
                                                   categories_df, 'built', 'C')
        retrofit_category = ['envelope', 'roof', 'windows']
        for category in retrofit_category:
            categories_df['cat_' + category] = calc_category(
                architecture_DB, categories_df, category, 'R')

        prop_architecture_df = get_prop_architecture(categories_df,
                                                     architecture_DB,
                                                     list_uses)

        # write to shapefile
        prop_architecture_df_merged = names_df.merge(prop_architecture_df,
                                                     on="Name")

        fields = [
            'Name', 'Hs', 'void_deck', 'wwr_north', 'wwr_west', 'wwr_east',
            'wwr_south', 'type_cons', 'type_leak', 'type_roof', 'type_wall',
            'type_win', 'type_shade'
        ]

        dataframe_to_dbf(prop_architecture_df_merged[fields],
                         locator.get_building_architecture())

    # get properties about types of HVAC systems
    if prop_hvac_flag:
        HVAC_DB = get_database(
            locator.get_archetypes_properties(config.region), 'HVAC')
        HVAC_DB['Code'] = HVAC_DB.apply(lambda x: calc_code(
            x['building_use'], x['year_start'], x['year_end'], x['standard']),
                                        axis=1)

        categories_df['cat_HVAC'] = calc_category(HVAC_DB, categories_df,
                                                  'HVAC', 'R')

        # define HVAC systems types
        prop_HVAC_df = categories_df.merge(HVAC_DB,
                                           left_on='cat_HVAC',
                                           right_on='Code')

        # write to shapefile
        prop_HVAC_df_merged = names_df.merge(prop_HVAC_df, on="Name")
        fields = [
            'Name', 'type_cs', 'type_hs', 'type_dhw', 'type_ctrl', 'type_vent'
        ]
        dataframe_to_dbf(prop_HVAC_df_merged[fields],
                         locator.get_building_hvac())

    if prop_comfort_flag:
        comfort_DB = get_database(
            locator.get_archetypes_properties(config.region), 'INDOOR_COMFORT')

        # define comfort
        prop_comfort_df = categories_df.merge(comfort_DB,
                                              left_on='mainuse',
                                              right_on='Code')

        # write to shapefile
        prop_comfort_df_merged = names_df.merge(prop_comfort_df, on="Name")
        prop_comfort_df_merged = calculate_average_multiuse(
            prop_comfort_df_merged, occupant_densities, list_uses, comfort_DB)
        fields = [
            'Name', 'Tcs_set_C', 'Ths_set_C', 'Tcs_setb_C', 'Ths_setb_C',
            'Ve_lps', 'rhum_min_pc', 'rhum_max_pc'
        ]
        dataframe_to_dbf(prop_comfort_df_merged[fields],
                         locator.get_building_comfort())

    if prop_internal_loads_flag:
        internal_DB = get_database(
            locator.get_archetypes_properties(config.region), 'INTERNAL_LOADS')

        # define comfort
        prop_internal_df = categories_df.merge(internal_DB,
                                               left_on='mainuse',
                                               right_on='Code')

        # write to shapefile
        prop_internal_df_merged = names_df.merge(prop_internal_df, on="Name")
        prop_internal_df_merged = calculate_average_multiuse(
            prop_internal_df_merged, occupant_densities, list_uses,
            internal_DB)
        fields = [
            'Name', 'Qs_Wp', 'X_ghp', 'Ea_Wm2', 'El_Wm2', 'Epro_Wm2',
            'Qcre_Wm2', 'Ed_Wm2', 'Vww_lpd', 'Vw_lpd', 'Qhpro_Wm2'
        ]
        dataframe_to_dbf(prop_internal_df_merged[fields],
                         locator.get_building_internal())

    if prop_supply_systems_flag:
        supply_DB = get_database(
            locator.get_archetypes_properties(config.region), 'SUPPLY')
        supply_DB['Code'] = supply_DB.apply(lambda x: calc_code(
            x['building_use'], x['year_start'], x['year_end'], x['standard']),
                                            axis=1)

        categories_df['cat_supply'] = calc_category(supply_DB, categories_df,
                                                    'HVAC', 'R')

        # define HVAC systems types
        prop_supply_df = categories_df.merge(supply_DB,
                                             left_on='cat_supply',
                                             right_on='Code')

        # write to shapefile
        prop_supply_df_merged = names_df.merge(prop_supply_df, on="Name")
        fields = ['Name', 'type_cs', 'type_hs', 'type_dhw', 'type_el']
        dataframe_to_dbf(prop_supply_df_merged[fields],
                         locator.get_building_supply())

    if prop_restrictions_flag:
        COLUMNS_ZONE_RESTRICTIONS = [
            'SOLAR', 'GEOTHERMAL', 'WATERBODY', 'NATURALGAS', 'BIOGAS'
        ]
        for field in COLUMNS_ZONE_RESTRICTIONS:
            names_df[field] = 0
        dataframe_to_dbf(names_df[['Name'] + COLUMNS_ZONE_RESTRICTIONS],
                         locator.get_building_restrictions())
Exemple #28
0
    def __init__(self, locator, override_variables=False):
        """
        Read building properties from input shape files and construct a new BuildingProperties object.

        :param locator: an InputLocator for locating the input files
        :type locator: cea.inputlocator.InputLocator

        :param override_variables: override_variables from config
        :type override_variables: bool

        :returns: BuildingProperties
        :rtype: BuildingProperties
        """

        print("read input files")
        prop_geometry = Gdf.from_file(locator.get_zone_geometry())
        prop_geometry['footprint'] = prop_geometry.area
        prop_geometry['perimeter'] = prop_geometry.length
        prop_geometry['Blength'], prop_geometry[
            'Bwidth'] = self.calc_bounding_box_geom(
                locator.get_zone_geometry())
        prop_geometry = prop_geometry.drop('geometry',
                                           axis=1).set_index('Name')
        prop_hvac = dbf_to_dataframe(locator.get_building_hvac())
        prop_occupancy_df = dbf_to_dataframe(
            locator.get_building_occupancy()).set_index('Name')
        # Drop 'REFERENCE' column if it exists
        if 'REFERENCE' in prop_occupancy_df:
            prop_occupancy_df.drop('REFERENCE', 1, inplace=True)
        prop_occupancy_df.fillna(
            value=0.0, inplace=True)  # fix badly formatted occupancy file...
        prop_occupancy = prop_occupancy_df.loc[:, (prop_occupancy_df != 0).any(
            axis=0)]
        prop_architectures = dbf_to_dataframe(
            locator.get_building_architecture())
        prop_age = dbf_to_dataframe(
            locator.get_building_age()).set_index('Name')
        # Drop 'REFERENCE' column if it exists
        if 'REFERENCE' in prop_age:
            prop_age.drop('REFERENCE', 1, inplace=True)
        prop_comfort = dbf_to_dataframe(
            locator.get_building_comfort()).set_index('Name')
        prop_internal_loads = dbf_to_dataframe(
            locator.get_building_internal()).set_index('Name')
        prop_supply_systems_building = dbf_to_dataframe(
            locator.get_building_supply())

        # GET SYSTEMS EFFICIENCIES
        prop_supply_systems = get_properties_supply_sytems(
            locator, prop_supply_systems_building).set_index('Name')

        # get temperatures of operation
        prop_HVAC_result = get_properties_technical_systems(
            locator, prop_hvac).set_index('Name')

        # get envelope properties
        prop_envelope = get_envelope_properties(
            locator, prop_architectures).set_index('Name')

        # apply overrides
        if override_variables:
            self._overrides = pd.read_csv(
                locator.get_building_overrides()).set_index('Name')
            prop_envelope = self.apply_overrides(prop_envelope)
            prop_internal_loads = self.apply_overrides(prop_internal_loads)
            prop_comfort = self.apply_overrides(prop_comfort)
            prop_HVAC_result = self.apply_overrides(prop_HVAC_result)

        # get properties of rc demand model
        prop_rc_model = self.calc_prop_rc_model(locator, prop_occupancy,
                                                prop_envelope, prop_geometry,
                                                prop_HVAC_result)

        # get solar properties
        solar = get_prop_solar(locator, prop_rc_model,
                               prop_envelope).set_index('Name')

        # df_windows = geometry_reader.create_windows(surface_properties, prop_envelope)
        # TODO: to check if the Win_op and height of window is necessary.
        # TODO: maybe mergin branch i9 with CItyGML could help with this
        print("done")

        # save resulting data
        self._prop_supply_systems = prop_supply_systems
        self._prop_geometry = prop_geometry
        self._prop_envelope = prop_envelope
        self._prop_occupancy = prop_occupancy
        self._prop_HVAC_result = prop_HVAC_result
        self._prop_comfort = prop_comfort
        self._prop_internal_loads = prop_internal_loads
        self._prop_age = prop_age
        self._solar = solar
        self._prop_RC_model = prop_rc_model
Exemple #29
0
def thermal_network_simplified(locator, config, network_name):
    # local variables
    network_type = config.thermal_network.network_type
    min_head_substation_kPa = config.thermal_network.min_head_substation
    thermal_transfer_unit_design_head_m = min_head_substation_kPa * 1000 / M_WATER_TO_PA
    coefficient_friction_hazen_williams = config.thermal_network.hw_friction_coefficient
    velocity_ms = config.thermal_network.peak_load_velocity
    fraction_equivalent_length = config.thermal_network.equivalent_length_factor
    peak_load_percentage = config.thermal_network.peak_load_percentage

    # GET INFORMATION ABOUT THE NETWORK
    edge_df, node_df = get_thermal_network_from_shapefile(locator, network_type, network_name)

    # GET INFORMATION ABOUT THE DEMAND OF BUILDINGS AND CONNECT TO THE NODE INFO
    # calculate substations for all buildings
    # local variables
    total_demand = pd.read_csv(locator.get_total_demand())
    volume_flow_m3pers_building = pd.DataFrame()
    T_sup_K_building = pd.DataFrame()
    T_re_K_building = pd.DataFrame()
    Q_demand_kWh_building = pd.DataFrame()
    if network_type == "DH":
        buildings_name_with_heating = get_building_names_with_load(total_demand, load_name='QH_sys_MWhyr')
        buildings_name_with_space_heating = get_building_names_with_load(total_demand, load_name='Qhs_sys_MWhyr')
        DHN_barcode = "0"
        if (buildings_name_with_heating != [] and buildings_name_with_space_heating != []):
            building_names = [building for building in buildings_name_with_heating if building in
                              node_df.Building.values]
            substation.substation_main_heating(locator, total_demand, building_names, DHN_barcode=DHN_barcode)
        else:
            raise Exception('problem here')

        for building_name in building_names:
            substation_results = pd.read_csv(
                locator.get_optimization_substations_results_file(building_name, "DH", DHN_barcode))
            volume_flow_m3pers_building[building_name] = substation_results["mdot_DH_result_kgpers"] / P_WATER_KGPERM3
            T_sup_K_building[building_name] = substation_results["T_supply_DH_result_K"]
            T_re_K_building[building_name] = np.where(substation_results["T_return_DH_result_K"] >273.15,
                                                      substation_results["T_return_DH_result_K"], np.nan)
            Q_demand_kWh_building[building_name] = (substation_results["Q_heating_W"] + substation_results[
                "Q_dhw_W"]) / 1000

    if network_type == "DC":
        buildings_name_with_cooling = get_building_names_with_load(total_demand, load_name='QC_sys_MWhyr')
        DCN_barcode = "0"
        if buildings_name_with_cooling != []:
            building_names = [building for building in buildings_name_with_cooling if building in
                              node_df.Building.values]
            substation.substation_main_cooling(locator, total_demand, building_names, DCN_barcode=DCN_barcode)
        else:
            raise Exception('problem here')

        for building_name in building_names:
            substation_results = pd.read_csv(
                locator.get_optimization_substations_results_file(building_name, "DC", DCN_barcode))
            volume_flow_m3pers_building[building_name] = substation_results[
                                                             "mdot_space_cooling_data_center_and_refrigeration_result_kgpers"] / P_WATER_KGPERM3
            T_sup_K_building[building_name] = substation_results[
                "T_supply_DC_space_cooling_data_center_and_refrigeration_result_K"]
            T_re_K_building[building_name] = substation_results[
                "T_return_DC_space_cooling_data_center_and_refrigeration_result_K"]
            Q_demand_kWh_building[building_name] = substation_results[
                                                       "Q_space_cooling_data_center_and_refrigeration_W"] / 1000


    import cea.utilities
    with cea.utilities.pushd(locator.get_thermal_network_folder()):
        # Create a water network model
        wn = wntr.network.WaterNetworkModel()

        # add loads
        building_base_demand_m3s = {}
        for building in volume_flow_m3pers_building.keys():
            building_base_demand_m3s[building] = volume_flow_m3pers_building[building].max()
            pattern_demand = (volume_flow_m3pers_building[building].values / building_base_demand_m3s[building]).tolist()
            wn.add_pattern(building, pattern_demand)

        # add nodes
        consumer_nodes = []
        building_nodes_pairs = {}
        building_nodes_pairs_inversed = {}
        for node in node_df.iterrows():
            if node[1]["Type"] == "CONSUMER":
                demand_pattern = node[1]['Building']
                base_demand_m3s = building_base_demand_m3s[demand_pattern]
                consumer_nodes.append(node[0])
                building_nodes_pairs[node[0]] = demand_pattern
                building_nodes_pairs_inversed[demand_pattern] = node[0]
                wn.add_junction(node[0],
                                base_demand=base_demand_m3s,
                                demand_pattern=demand_pattern,
                                elevation=thermal_transfer_unit_design_head_m,
                                coordinates=node[1]["coordinates"])
            elif node[1]["Type"] == "PLANT":
                base_head = int(thermal_transfer_unit_design_head_m*1.2)
                start_node = node[0]
                name_node_plant = start_node
                wn.add_reservoir(start_node,
                                 base_head=base_head,
                                 coordinates=node[1]["coordinates"])
            else:
                wn.add_junction(node[0],
                                elevation=0,
                                coordinates=node[1]["coordinates"])

        # add pipes
        for edge in edge_df.iterrows():
            length_m = edge[1]["length_m"]
            edge_name = edge[0]
            wn.add_pipe(edge_name, edge[1]["start node"],
                        edge[1]["end node"],
                        length=length_m * (1 + fraction_equivalent_length),
                        roughness=coefficient_friction_hazen_williams,
                        minor_loss=0.0,
                        status='OPEN')

        # add options
        wn.options.time.duration = 8759 * 3600   # this indicates epanet to do one year simulation
        wn.options.time.hydraulic_timestep = 60 * 60
        wn.options.time.pattern_timestep = 60 * 60
        wn.options.solver.accuracy = 0.01
        wn.options.solver.trials = 100

        # 1st ITERATION GET MASS FLOWS AND CALCULATE DIAMETER
        sim = wntr.sim.EpanetSimulator(wn)
        results = sim.run_sim()
        max_volume_flow_rates_m3s = results.link['flowrate'].abs().max()
        pipe_names = max_volume_flow_rates_m3s.index.values
        pipe_catalog = pd.read_excel(locator.get_database_distribution_systems(), sheet_name='THERMAL_GRID')
        Pipe_DN, D_ext_m, D_int_m, D_ins_m = zip(
            *[calc_max_diameter(flow, pipe_catalog, velocity_ms=velocity_ms, peak_load_percentage=peak_load_percentage) for
              flow in max_volume_flow_rates_m3s])
        pipe_dn = pd.Series(Pipe_DN, pipe_names)
        diameter_int_m = pd.Series(D_int_m, pipe_names)
        diameter_ext_m = pd.Series(D_ext_m, pipe_names)
        diameter_ins_m = pd.Series(D_ins_m, pipe_names)

        # 2nd ITERATION GET PRESSURE POINTS AND MASSFLOWS FOR SIZING PUMPING NEEDS - this could be for all the year
        # modify diameter and run simulations
        edge_df['Pipe_DN'] = pipe_dn
        edge_df['D_int_m'] = D_int_m
        for edge in edge_df.iterrows():
            edge_name = edge[0]
            pipe = wn.get_link(edge_name)
            pipe.diameter = diameter_int_m[edge_name]
        sim = wntr.sim.EpanetSimulator(wn)
        results = sim.run_sim()

        # 3rd ITERATION GET FINAL UTILIZATION OF THE GRID (SUPPLY SIDE)
        # get accumulated head loss per hour
        unitary_head_ftperkft = results.link['headloss'].abs()
        unitary_head_mperm = unitary_head_ftperkft * FT_TO_M / (FT_TO_M * 1000)
        head_loss_m = unitary_head_mperm.copy()
        for column in head_loss_m.columns.values:
            length_m = edge_df.loc[column]['length_m']
            head_loss_m[column] = head_loss_m[column] * length_m
        reservoir_head_loss_m = head_loss_m.sum(axis=1) + thermal_transfer_unit_design_head_m*1.2 # fixme: only one thermal_transfer_unit_design_head_m from one substation?

        # apply this pattern to the reservoir and get results
        base_head = reservoir_head_loss_m.max()
        pattern_head_m = (reservoir_head_loss_m.values / base_head).tolist()
        wn.add_pattern('reservoir', pattern_head_m)
        reservoir = wn.get_node(name_node_plant)
        reservoir.head_timeseries.base_value = int(base_head)
        reservoir.head_timeseries._pattern = 'reservoir'
        sim = wntr.sim.EpanetSimulator(wn)
        results = sim.run_sim()

    # POSTPROCESSING

    # $ POSTPROCESSING - PRESSURE/HEAD LOSSES PER PIPE PER HOUR OF THE YEAR
    # at the pipes
    unitary_head_loss_supply_network_ftperkft = results.link['headloss'].abs()
    linear_pressure_loss_Paperm = unitary_head_loss_supply_network_ftperkft * FT_WATER_TO_PA / (FT_TO_M * 1000)
    head_loss_supply_network_Pa = linear_pressure_loss_Paperm.copy()
    for column in head_loss_supply_network_Pa.columns.values:
        length_m = edge_df.loc[column]['length_m']
        head_loss_supply_network_Pa[column] = head_loss_supply_network_Pa[column] * length_m

    head_loss_return_network_Pa = head_loss_supply_network_Pa.copy(0)
    # at the substations
    head_loss_substations_ft = results.node['head'][consumer_nodes].abs()
    head_loss_substations_Pa = head_loss_substations_ft * FT_WATER_TO_PA

    #POSTPORCESSING MASSFLOW RATES
    # MASS_FLOW_RATE (EDGES)
    flow_rate_supply_m3s = results.link['flowrate'].abs()
    massflow_supply_kgs = flow_rate_supply_m3s * P_WATER_KGPERM3

    # $ POSTPROCESSING - PRESSURE LOSSES ACCUMULATED PER HOUR OF THE YEAR (TIMES 2 to account for return)
    accumulated_head_loss_supply_Pa = head_loss_supply_network_Pa.sum(axis=1)
    accumulated_head_loss_return_Pa = head_loss_return_network_Pa.sum(axis=1)
    accumulated_head_loss_substations_Pa = head_loss_substations_Pa.sum(axis=1)
    accumulated_head_loss_total_Pa = accumulated_head_loss_supply_Pa + accumulated_head_loss_return_Pa + accumulated_head_loss_substations_Pa

    # $ POSTPROCESSING - THERMAL LOSSES PER PIPE PER HOUR OF THE YEAR (SUPPLY)
    # calculate the thermal characteristics of the grid
    temperature_of_the_ground_K = calculate_ground_temperature(locator)
    thermal_coeffcient_WperKm = pd.Series(
        np.vectorize(calc_linear_thermal_loss_coefficient)(diameter_ext_m, diameter_int_m, diameter_ins_m), pipe_names)
    average_temperature_supply_K = T_sup_K_building.mean(axis=1)


    thermal_losses_supply_kWh = results.link['headloss'].copy()
    thermal_losses_supply_kWh.reset_index(inplace=True, drop=True)
    thermal_losses_supply_Wperm = thermal_losses_supply_kWh.copy()
    for pipe in pipe_names:
        length_m = edge_df.loc[pipe]['length_m']
        massflow_kgs = massflow_supply_kgs[pipe]
        k_WperKm_pipe = thermal_coeffcient_WperKm[pipe]
        k_kWperK = k_WperKm_pipe * length_m / 1000
        thermal_losses_supply_kWh[pipe] = np.vectorize(calc_thermal_loss_per_pipe)(average_temperature_supply_K.values,
                                                                     massflow_kgs.values,
                                                                     temperature_of_the_ground_K,
                                                                     k_kWperK,
                                                                     )

        thermal_losses_supply_Wperm[pipe] = (thermal_losses_supply_kWh[pipe] / length_m) * 1000

    # return pipes
    average_temperature_return_K = T_re_K_building.mean(axis=1)
    thermal_losses_return_kWh = results.link['headloss'].copy()
    thermal_losses_return_kWh.reset_index(inplace=True, drop=True)
    for pipe in pipe_names:
        length_m = edge_df.loc[pipe]['length_m']
        massflow_kgs = massflow_supply_kgs[pipe]
        k_WperKm_pipe = thermal_coeffcient_WperKm[pipe]
        k_kWperK = k_WperKm_pipe * length_m / 1000
        thermal_losses_return_kWh[pipe] = np.vectorize(calc_thermal_loss_per_pipe)(average_temperature_return_K.values,
                                                                     massflow_kgs.values,
                                                                     temperature_of_the_ground_K,
                                                                     k_kWperK,
                                                                     )
    # WRITE TO DISK

    # LINEAR PRESSURE LOSSES (EDGES)
    linear_pressure_loss_Paperm.to_csv(locator.get_network_linear_pressure_drop_edges(network_type, network_name),
                                       index=False)

    # MASS_FLOW_RATE (EDGES)
    flow_rate_supply_m3s = results.link['flowrate'].abs()
    massflow_supply_kgs = flow_rate_supply_m3s * P_WATER_KGPERM3
    massflow_supply_kgs.to_csv(locator.get_thermal_network_layout_massflow_edges_file(network_type, network_name),
                               index=False)

    # VELOCITY (EDGES)
    velocity_edges_ms = results.link['velocity'].abs()
    velocity_edges_ms.to_csv(locator.get_thermal_network_velocity_edges_file(network_type, network_name),
                             index=False)

    # PRESSURE LOSSES (NODES)
    pressure_at_nodes_ft = results.node['pressure'].abs()
    pressure_at_nodes_Pa = pressure_at_nodes_ft * FT_TO_M * M_WATER_TO_PA
    pressure_at_nodes_Pa.to_csv(locator.get_network_pressure_at_nodes(network_type, network_name), index=False)

    # MASS_FLOW_RATE (NODES)
    # $ POSTPROCESSING - MASSFLOWRATES PER NODE PER HOUR OF THE YEAR
    flow_rate_supply_nodes_m3s = results.node['demand'].abs()
    massflow_supply_nodes_kgs = flow_rate_supply_nodes_m3s * P_WATER_KGPERM3
    massflow_supply_nodes_kgs.to_csv(locator.get_thermal_network_layout_massflow_nodes_file(network_type, network_name),
                                     index=False)

    # thermal demand per building (no losses in the network or substations)
    Q_demand_Wh_building = Q_demand_kWh_building * 1000
    Q_demand_Wh_building.to_csv(locator.get_thermal_demand_csv_file(network_type, network_name), index=False)

    # pressure losses total
    # $ POSTPROCESSING - PUMPING NEEDS PER HOUR OF THE YEAR (TIMES 2 to account for return)
    flow_rate_substations_m3s = results.node['demand'][consumer_nodes].abs()
    head_loss_supply_kWperm = (linear_pressure_loss_Paperm * (flow_rate_supply_m3s * 3600)) / (3.6E6 * PUMP_ETA)
    head_loss_return_kWperm = head_loss_supply_kWperm.copy()
    pressure_loss_supply_edge_kW = (head_loss_supply_network_Pa * (flow_rate_supply_m3s * 3600)) / (3.6E6 * PUMP_ETA)
    head_loss_return_kW = pressure_loss_supply_edge_kW.copy()
    head_loss_substations_kW = (head_loss_substations_Pa * (flow_rate_substations_m3s * 3600)) / (3.6E6 * PUMP_ETA)
    accumulated_head_loss_supply_kW = pressure_loss_supply_edge_kW.sum(axis=1)
    accumulated_head_loss_return_kW = head_loss_return_kW.sum(axis=1)
    accumulated_head_loss_substations_kW = head_loss_substations_kW.sum(axis=1)
    accumulated_head_loss_total_kW = accumulated_head_loss_supply_kW + \
                                     accumulated_head_loss_return_kW + \
                                     accumulated_head_loss_substations_kW
    head_loss_system_Pa = pd.DataFrame({"pressure_loss_supply_Pa": accumulated_head_loss_supply_Pa,
                                        "pressure_loss_return_Pa": accumulated_head_loss_return_Pa,
                                        "pressure_loss_substations_Pa": accumulated_head_loss_substations_Pa,
                                        "pressure_loss_total_Pa": accumulated_head_loss_total_Pa})
    head_loss_system_Pa.to_csv(locator.get_network_total_pressure_drop_file(network_type, network_name),
                               index=False)

    # $ POSTPROCESSING - PLANT HEAT REQUIREMENT
    plant_load_kWh = thermal_losses_supply_kWh.sum(axis=1) * 2 + Q_demand_kWh_building.sum(
        axis=1) - accumulated_head_loss_total_kW.values
    plant_load_kWh.to_csv(locator.get_thermal_network_plant_heat_requirement_file(network_type, network_name),
                          header=['thermal_load_kW'], index=False)

    # pressure losses per piping system
    pressure_loss_supply_edge_kW.to_csv(
        locator.get_thermal_network_pressure_losses_edges_file(network_type, network_name), index=False)

    # pressure losses per substation
    head_loss_substations_kW = head_loss_substations_kW.rename(columns=building_nodes_pairs)
    head_loss_substations_kW.to_csv(locator.get_thermal_network_substation_ploss_file(network_type, network_name),
                                    index=False)

    # pumping needs losses total
    pumping_energy_system_kWh = pd.DataFrame({"pressure_loss_supply_kW": accumulated_head_loss_supply_kW,
                                              "pressure_loss_return_kW": accumulated_head_loss_return_kW,
                                              "pressure_loss_substations_kW": accumulated_head_loss_substations_kW,
                                              "pressure_loss_total_kW": accumulated_head_loss_total_kW})
    pumping_energy_system_kWh.to_csv(
        locator.get_network_energy_pumping_requirements_file(network_type, network_name), index=False)

    # pumping needs losses total
    temperatures_plant_C = pd.DataFrame({"temperature_supply_K": average_temperature_supply_K,
                                         "temperature_return_K": average_temperature_return_K})
    temperatures_plant_C.to_csv(locator.get_network_temperature_plant(network_type, network_name), index=False)

    # thermal losses
    thermal_losses_supply_kWh.to_csv(locator.get_network_thermal_loss_edges_file(network_type, network_name),
                                     index=False)
    thermal_losses_supply_Wperm.to_csv(locator.get_network_linear_thermal_loss_edges_file(network_type, network_name),
                                       index=False)

    # thermal losses total
    accumulated_thermal_losses_supply_kWh = thermal_losses_supply_kWh.sum(axis=1)
    accumulated_thermal_losses_return_kWh = thermal_losses_return_kWh.sum(axis=1)
    accumulated_thermal_loss_total_kWh = accumulated_thermal_losses_supply_kWh + accumulated_thermal_losses_return_kWh
    thermal_losses_total_kWh = pd.DataFrame({"thermal_loss_supply_kW": accumulated_thermal_losses_supply_kWh,
                                             "thermal_loss_return_kW": accumulated_thermal_losses_return_kWh,
                                             "thermal_loss_total_kW": accumulated_thermal_loss_total_kWh})
    thermal_losses_total_kWh.to_csv(locator.get_network_total_thermal_loss_file(network_type, network_name),
                                    index=False)

    # return average temperature of supply at the substations
    T_sup_K_nodes = T_sup_K_building.rename(columns=building_nodes_pairs_inversed)
    average_year = T_sup_K_nodes.mean(axis=1)
    for node in node_df.index.values:
        T_sup_K_nodes[node] = average_year
    T_sup_K_nodes.to_csv(locator.get_network_temperature_supply_nodes_file(network_type, network_name),
                         index=False)

    # return average temperature of return at the substations
    T_return_K_nodes = T_re_K_building.rename(columns=building_nodes_pairs_inversed)
    average_year = T_return_K_nodes.mean(axis=1)
    for node in node_df.index.values:
        T_return_K_nodes[node] = average_year
    T_return_K_nodes.to_csv(locator.get_network_temperature_return_nodes_file(network_type, network_name),
                         index=False)

    # summary of edges used for the calculation
    fields_edges = ['length_m', 'Pipe_DN', 'Type_mat', 'D_int_m']
    edge_df[fields_edges].to_csv(locator.get_thermal_network_edge_list_file(network_type, network_name))
    fields_nodes = ['Type', 'Building']
    node_df[fields_nodes].to_csv(locator.get_thermal_network_node_types_csv_file(network_type, network_name))

    # correct diameter of network and save to the shapefile
    from cea.utilities.dbf import dataframe_to_dbf, dbf_to_dataframe
    fields = ['length_m', 'Pipe_DN', 'Type_mat']
    edge_df = edge_df[fields]
    edge_df['name'] = edge_df.index.values
    network_edges_df = dbf_to_dataframe(
        locator.get_network_layout_edges_shapefile(network_type, network_name).split('.shp')[0] + '.dbf')
    network_edges_df = network_edges_df.merge(edge_df, left_on='Name', right_on='name', suffixes=('_x', ''))
    network_edges_df = network_edges_df.drop(['Pipe_DN_x', 'Type_mat_x', 'name', 'length_m_x'], axis=1)
    dataframe_to_dbf(network_edges_df,
                     locator.get_network_layout_edges_shapefile(network_type, network_name).split('.shp')[0] + '.dbf')
Exemple #30
0
def lca_embodied(year_to_calculate, locator):
    """
    Algorithm to calculate the embodied emissions and non-renewable primary energy of buildings according to the method
    of [Fonseca et al., 2015] and [Thoma et al., 2014]. The calculation method assumes a 60 year payoff for the embodied
    energy and emissions of a building, after which both values become zero.

    The results are provided in total as well as per square meter:

    - embodied non-renewable primary energy: E_nre_pen_GJ and E_nre_pen_MJm2
    - embodied greenhouse gas emissions: GHG_sys_embodied_tonCO2 and GHG_sys_embodied_kgCO2m2

    As part of the algorithm, the following files are read from InputLocator:

    - architecture.shp: shapefile with the architecture of each building
        locator.get_building_architecture()
    - occupancy.shp: shapefile with the occupancy types of each building
        locator.get_building_occupancy()
    - age.shp: shapefile with the age and retrofit date of each building
        locator.get_building_age()
    - zone.shp: shapefile with the geometry of each building in the zone of study
        locator.get_zone_geometry()
    - Archetypes_properties: csv file with the database of archetypes including embodied energy and emissions
        locator.get_database_construction_standards()

    As a result, the following file is created:

    - Total_LCA_embodied: .csv
        csv file of yearly primary energy and grey emissions per building stored in locator.get_lca_embodied()

    :param year_to_calculate:  year between 1900 and 2100 indicating when embodied energy is evaluated
        to account for emissions already offset from building construction and retrofits more than 60 years ago.
    :type year_to_calculate: int
    :param locator: an instance of InputLocator set to the scenario
    :type locator: InputLocator
    :returns: This function does not return anything
    :rtype: NoneType

    .. [Fonseca et al., 2015] Fonseca et al. (2015) "Assessing the environmental impact of future urban developments at
        neighborhood scale." CISBAT 2015.
    .. [Thoma et al., 2014] Thoma et al. (2014). "Estimation of base-values for grey energy, primary energy, global
        warming potential (GWP 100A) and Umweltbelastungspunkte (UBP 2006) for Swiss constructions from before 1920
        until today." CUI 2014.


    Files read / written from InputLocator:

    get_building_architecture
    get_building_occupancy
    get_building_age
    get_zone_geometry
    get_archetypes_embodied_energy
    get_archetypes_embodied_emissions

    path_LCA_embodied_energy:
        path to database of archetypes embodied energy file
        Archetypes_embodied_energy.csv
    path_LCA_embodied_emissions:
        path to database of archetypes grey emissions file
        Archetypes_embodied_emissions.csv
    path_age_shp: string
        path to building_age.shp
    path_occupancy_shp:
        path to building_occupancyshp
    path_geometry_shp:
        path to building_geometrys.hp
    path_architecture_shp:
        path to building_architecture.shp
    path_results : string
        path to demand results folder emissions
    """

    # local variables
    age_df = dbf_to_dataframe(locator.get_building_typology())
    architecture_df = dbf_to_dataframe(locator.get_building_architecture())
    geometry_df = Gdf.from_file(locator.get_zone_geometry())
    geometry_df['footprint'] = geometry_df.area
    geometry_df['perimeter'] = geometry_df.length
    geometry_df = geometry_df.drop('geometry', axis=1)

    # local variables
    surface_database_windows = pd.read_excel(
        locator.get_database_envelope_systems(), "WINDOW")
    surface_database_roof = pd.read_excel(
        locator.get_database_envelope_systems(), "ROOF")
    surface_database_walls = pd.read_excel(
        locator.get_database_envelope_systems(), "WALL")
    surface_database_floors = pd.read_excel(
        locator.get_database_envelope_systems(), "FLOOR")

    # querry data
    df = architecture_df.merge(surface_database_windows,
                               left_on='type_win',
                               right_on='code')
    df2 = architecture_df.merge(surface_database_roof,
                                left_on='type_roof',
                                right_on='code')
    df3 = architecture_df.merge(surface_database_walls,
                                left_on='type_wall',
                                right_on='code')
    df4 = architecture_df.merge(surface_database_floors,
                                left_on='type_floor',
                                right_on='code')
    df5 = architecture_df.merge(surface_database_floors,
                                left_on='type_base',
                                right_on='code')
    df5.rename({'GHG_FLOOR_kgCO2m2': 'GHG_BASE_kgCO2m2'}, inplace=True, axis=1)
    df6 = architecture_df.merge(surface_database_walls,
                                left_on='type_part',
                                right_on='code')
    df6.rename({'GHG_WALL_kgCO2m2': 'GHG_PART_kgCO2m2'}, inplace=True, axis=1)
    fields = ['Name', "GHG_WIN_kgCO2m2"]
    fields2 = ['Name', "GHG_ROOF_kgCO2m2"]
    fields3 = ['Name', "GHG_WALL_kgCO2m2"]
    fields4 = ['Name', "GHG_FLOOR_kgCO2m2"]
    fields5 = ['Name', "GHG_BASE_kgCO2m2"]
    fields6 = ['Name', "GHG_PART_kgCO2m2"]
    surface_properties = df[fields].merge(df2[fields2], on='Name').merge(
        df3[fields3],
        on='Name').merge(df4[fields4],
                         on='Name').merge(df5[fields5],
                                          on='Name').merge(df6[fields6],
                                                           on='Name')

    # DataFrame with joined data for all categories
    data_meged_df = geometry_df.merge(age_df, on='Name').merge(
        surface_properties, on='Name').merge(architecture_df, on='Name')

    # calculate building geometry
    ## total window area
    average_wwr = [
        np.mean([a, b, c, d]) for a, b, c, d in
        zip(data_meged_df['wwr_south'], data_meged_df['wwr_north'],
            data_meged_df['wwr_west'], data_meged_df['wwr_east'])
    ]

    data_meged_df['windows_ag'] = average_wwr * data_meged_df[
        'perimeter'] * data_meged_df['height_ag']

    ## wall area above ground
    data_meged_df['area_walls_ext_ag'] = data_meged_df[
        'perimeter'] * data_meged_df['height_ag'] - data_meged_df['windows_ag']

    # fix according to the void deck
    data_meged_df['empty_envelope_ratio'] = 1 - (
        (data_meged_df['void_deck'] *
         (data_meged_df['height_ag'] / data_meged_df['floors_ag'])) /
        (data_meged_df['area_walls_ext_ag'] + data_meged_df['windows_ag']))
    data_meged_df['windows_ag'] = data_meged_df['windows_ag'] * data_meged_df[
        'empty_envelope_ratio']
    data_meged_df['area_walls_ext_ag'] = data_meged_df[
        'area_walls_ext_ag'] * data_meged_df['empty_envelope_ratio']

    ## wall area below ground
    data_meged_df['area_walls_ext_bg'] = data_meged_df[
        'perimeter'] * data_meged_df['height_bg']
    ## floor area above ground
    data_meged_df['floor_area_ag'] = data_meged_df[
        'footprint'] * data_meged_df['floors_ag']
    ## floor area below ground
    data_meged_df['floor_area_bg'] = data_meged_df[
        'footprint'] * data_meged_df['floors_bg']
    ## total floor area
    data_meged_df['GFA_m2'] = data_meged_df['floor_area_ag'] + data_meged_df[
        'floor_area_bg']

    result_emissions = calculate_contributions(data_meged_df,
                                               year_to_calculate)

    # export the results for embodied emissions (E_ghg_) and non-renewable primary energy (E_nre_pen_) for each
    # building, both total (in t CO2-eq. and GJ) and per square meter (in kg CO2-eq./m2 and MJ/m2)
    result_emissions.to_csv(locator.get_lca_embodied(),
                            index=False,
                            float_format='%.2f')
    print('done!')