def test_photovoltaic(): gv = cea.globalvar.GlobalVariables() scenario_path = gv.scenario_reference locator = cea.inputlocator.InputLocator(scenario_path=scenario_path) weather_path = locator.get_default_weather() list_buildings_names = dbfreader.dbf2df(locator.get_building_occupancy())['Name'] min_radiation = 0.75 # points are selected with at least a minimum production of this % from the maximum in the area. type_PVpanel = "PV1" # monocrystalline, T2 is poly and T3 is amorphous. it relates to the database of technologies worst_hour = 8744 # first hour of sun on the solar solstice misc_losses = 0.1 # cabling, resistances etc.. pvonroof = True # flag for considering PV on roof #FIXME: define pvonwall = True # flag for considering PV on wall #FIXME: define longitude = 7.439583333333333 latitude = 46.95240555555556 date_start = gv.date_start for building in list_buildings_names: radiation = locator.get_radiation_building(building_name= building) radiation_metadata = locator.get_radiation_metadata(building_name= building) calc_PV(locator=locator, radiation_csv= radiation, metadata_csv= radiation_metadata, latitude=latitude, longitude=longitude, weather_path=weather_path, building_name = building, pvonroof=pvonroof, pvonwall=pvonwall, misc_losses=misc_losses, worst_hour=worst_hour, type_PVpanel=type_PVpanel, min_radiation=min_radiation, date_start=date_start)
def calc_spatio_temporal_visuals(locator, period): date = pd.date_range('1/1/2010', periods=8760, freq='H')[period[0]: period[1]] buildings = dbfreader.dbf2df(locator.get_building_occupancy())['Name'] location = locator.get_solar_radiation_folder() time = date.strftime("%Y%m%d%H%M%S") for i, building in enumerate(buildings): data = pd.read_csv(os.path.join(location, building+'_geometry.csv')) geometry = data.set_index('SURFACE') solar = pd.read_csv(os.path.join(location, building+'_insolation_Whm2.csv')) surfaces = solar.columns.values for surface in surfaces: Xcoor = geometry.loc[surface, 'Xcoor'] Ycoor = geometry.loc[surface, 'Ycoor'] Zcoor = geometry.loc[surface, 'Zcoor'] result = pd.DataFrame({'date': time , 'surface':building+surface, 'I_Wm2': solar[surface].values[period[0]: period[1]], 'Xcoor': Xcoor, 'Ycoor': Ycoor, 'Zcoor':Zcoor}) if i == 0: final = result else: final = final.append(result, ignore_index=True) dbfreader.df2dbf(final, locator.get_solar_radiation_folder()+"result_solar_48h.dbf")
def lca_embodied(year_to_calculate, locator, gv): """ Algorithm to calculate the embodied emissions and non-renewable primary energy of buildings according to the method of [Fonseca et al., 2015] and [Thoma et al., 2014]. The calculation method assumes a 60 year payoff for the embodied energy and emissions of a building, after which both values become zero. The results are provided in total as well as per square meter: - embodied non-renewable primary energy: E_nre_pen_GJ and E_nre_pen_MJm2 - embodied greenhouse gas emissions: E_ghg_ton and E_ghg_kgm2 As part of the algorithm, the following files are read from InputLocator: - architecture.shp: shapefile with the architecture of each building locator.get_building_architecture() - occupancy.shp: shapefile with the occupancy types of each building locator.get_building_occupancy() - age.shp: shapefile with the age and retrofit date of each building locator.get_building_age() - zone.shp: shapefile with the geometry of each building in the zone of study locator.get_building_geometry() - Archetypes_properties: csv file with the database of archetypes including embodied energy and emissions locator.get_archetypes_properties() As a result, the following file is created: - Total_LCA_embodied: .csv csv file of yearly primary energy and grey emissions per building stored in locator.get_lca_embodied() :param year_to_calculate: year between 1900 and 2100 indicating when embodied energy is evaluated to account for emissions already offset from building construction and retrofits more than 60 years ago. :type year_to_calculate: int :param locator: an instance of InputLocator set to the scenario :type locator: InputLocator :returns: This function does not return anything :rtype: NoneType .. [Fonseca et al., 2015] Fonseca et al. (2015) "Assessing the environmental impact of future urban developments at neighborhood scale." CISBAT 2015. .. [Thoma et al., 2014] Thoma et al. (2014). "Estimation of base-values for grey energy, primary energy, global warming potential (GWP 100A) and Umweltbelastungspunkte (UBP 2006) for Swiss constructions from before 1920 until today." CUI 2014. Files read / written from InputLocator: get_building_architecture get_building_occupancy get_building_age get_building_geometry get_archetypes_embodied_energy get_archetypes_embodied_emissions path_LCA_embodied_energy: path to database of archetypes embodied energy file Archetypes_embodied_energy.csv path_LCA_embodied_emissions: path to database of archetypes grey emissions file Archetypes_embodied_emissions.csv path_age_shp: string path to building_age.shp path_occupancy_shp: path to building_occupancyshp path_geometry_shp: path to building_geometrys.hp path_architecture_shp: path to building_architecture.shp path_results : string path to demand results folder emissions """ # local variables architecture_df = dbf2df(locator.get_building_architecture()) prop_occupancy_df = dbf2df(locator.get_building_occupancy()) occupancy_df = pd.DataFrame( prop_occupancy_df.loc[:, (prop_occupancy_df != 0).any(axis=0)]) age_df = dbf2df(locator.get_building_age()) geometry_df = Gdf.from_file(locator.get_building_geometry()) geometry_df['footprint'] = geometry_df.area geometry_df['perimeter'] = geometry_df.length geometry_df = geometry_df.drop('geometry', axis=1) # get list of uses list_uses = list(occupancy_df.drop({'PFloor', 'Name'}, axis=1).columns) # define main use: occupancy_df['mainuse'] = calc_mainuse(occupancy_df, list_uses) # DataFrame with joined data for all categories cat_df = occupancy_df.merge(age_df, on='Name').merge(geometry_df, on='Name').merge( architecture_df, on='Name') # calculate building geometry ## total window area cat_df['windows_ag'] = cat_df['win_wall'] * cat_df['perimeter'] * ( cat_df['height_ag'] * cat_df['PFloor']) ## wall area above ground cat_df['area_walls_ext_ag'] = cat_df['perimeter'] * ( cat_df['height_ag'] * cat_df['PFloor']) - cat_df['windows_ag'] ## wall area below ground cat_df['area_walls_ext_bg'] = cat_df['perimeter'] * cat_df['height_bg'] ## floor area above ground cat_df['floor_area_ag'] = cat_df['footprint'] * cat_df['floors_ag'] ## floor area below ground cat_df['floor_area_bg'] = cat_df['footprint'] * cat_df['floors_bg'] ## total floor area cat_df['total_area'] = cat_df['floor_area_ag'] + cat_df['floor_area_bg'] # get categories for each year of construction/retrofit ## each building component gets categorized according to its occupancy type, construction year and retrofit year ## e.g., for an office building built in 1975, cat_df['cat_built'] = 'OFFICE3' ## e.g., for an office building with windows renovated in 1975, cat_df['cat_windows'] = 'OFFICE9' # calculate contributions to embodied energy and emissions ## calculated by multiplying the area of the given component by the energy and emissions per square meter for the ## given category according to the data in the archetype database result_energy = calculate_contributions('EMBODIED_ENERGY', cat_df, gv, locator, year_to_calculate, total_column='GEN_GJ', specific_column='GEN_MJm2') result_emissions = calculate_contributions('EMBODIED_EMISSIONS', cat_df, gv, locator, year_to_calculate, total_column='CO2_ton', specific_column='CO2_kgm2') # export the results for embodied emissions (E_ghg_) and non-renewable primary energy (E_nre_pen_) for each # building, both total (in t CO2-eq. and GJ) and per square meter (in kg CO2-eq./m2 and MJ/m2) fields_to_plot = [ 'Name', 'GFA_m2', 'E_nre_pen_GJ', 'E_nre_pen_MJm2', 'E_ghg_ton', 'E_ghg_kgm2' ] pd.DataFrame({ 'Name': result_energy.Name, 'E_nre_pen_GJ': result_energy.GEN_GJ, 'E_nre_pen_MJm2': result_energy.GEN_MJm2, 'E_ghg_ton': result_emissions.CO2_ton, 'E_ghg_kgm2': result_emissions.CO2_kgm2, 'GFA_m2': result_energy.total_area }).to_csv(locator.get_lca_embodied(), columns=fields_to_plot, index=False, float_format='%.2f') print('done!')
def __init__(self, locator, gv): """ Read building properties from input shape files and construct a new BuildingProperties object. :param locator: an InputLocator for locating the input files :type locator: cea.inputlocator.InputLocator :param gv: contains the context (constants and models) for the calculation :type gv: cea.globalvar.GlobalVariables :returns: object of type BuildingProperties :rtype: BuildingProperties - get_radiation: C:\reference-case\baseline\outputs\data\solar-radiation\radiation.csv - get_surface_properties: C:\reference-case\baseline\outputs\data\solar-radiation\properties_surfaces.csv - get_building_geometry: C:\reference-case\baseline\inputs\building-geometry\zone.shp - get_building_hvac: C:\reference-case\baseline\inputs\building-properties\technical_systems.shp - get_building_thermal: C:\reference-case\baseline\inputs\building-properties\thermal_properties.shp - get_building_occupancy: C:\reference-case\baseline\inputs\building-properties\occupancy.shp - get_building_architecture: C:\reference-case\baseline\inputs\building-properties\architecture.shp - get_building_age: C:\reference-case\baseline\inputs\building-properties\age.shp - get_building_comfort: C:\reference-case\baseline\inputs\building-properties\indoor_comfort.shp - get_building_internal: C:\reference-case\baseline\inputs\building-properties\internal_loads.shp """ from cea.geometry import geometry_reader self.gv = gv gv.log("read input files") surface_properties = pd.read_csv(locator.get_surface_properties()) prop_geometry = Gdf.from_file(locator.get_building_geometry()) prop_geometry['footprint'] = prop_geometry.area prop_geometry['perimeter'] = prop_geometry.length prop_geometry['Blength'], prop_geometry[ 'Bwidth'] = self.calc_bounding_box_geom( locator.get_building_geometry()) prop_geometry = prop_geometry.drop('geometry', axis=1).set_index('Name') prop_hvac = dbf2df(locator.get_building_hvac()) prop_occupancy_df = dbf2df( locator.get_building_occupancy()).set_index('Name') prop_occupancy_df.fillna( value=0.0, inplace=True) # fix badly formatted occupancy file... prop_occupancy = prop_occupancy_df.loc[:, (prop_occupancy_df != 0).any( axis=0)] prop_architectures = dbf2df(locator.get_building_architecture()) prop_age = dbf2df(locator.get_building_age()).set_index('Name') prop_comfort = dbf2df(locator.get_building_comfort()).set_index('Name') prop_internal_loads = dbf2df( locator.get_building_internal()).set_index('Name') # get solar properties solar = get_prop_solar(locator).set_index('Name') # get temperatures of operation prop_HVAC_result = get_properties_technical_systems( locator, prop_hvac).set_index('Name') # get envelope properties prop_envelope = get_envelope_properties( locator, prop_architectures).set_index('Name') # apply overrides if os.path.exists(locator.get_building_overrides()): self._overrides = pd.read_csv( locator.get_building_overrides()).set_index('Name') prop_envelope = self.apply_overrides(prop_envelope) prop_internal_loads = self.apply_overrides(prop_internal_loads) prop_comfort = self.apply_overrides(prop_comfort) # get properties of rc demand model prop_rc_model = self.calc_prop_rc_model(prop_occupancy, prop_envelope, prop_geometry, prop_HVAC_result, surface_properties, gv) #df_windows = geometry_reader.create_windows(surface_properties, prop_envelope) #TODO: to check if the Win_op and height of window is necessary. #TODO: maybe mergin branch i9 with CItyGML could help with this gv.log("done") # save resulting data self._prop_surface = surface_properties self._prop_geometry = prop_geometry self._prop_envelope = prop_envelope self._prop_occupancy = prop_occupancy self._prop_HVAC_result = prop_HVAC_result self._prop_comfort = prop_comfort self._prop_internal_loads = prop_internal_loads self._prop_age = prop_age self._solar = solar self._prop_RC_model = prop_rc_model
def properties(locator, prop_architecture_flag, prop_hvac_flag, prop_comfort_flag, prop_internal_loads_flag): """ algorithm to query building properties from statistical database Archetypes_HVAC_properties.csv. for more info check the integrated demand model of Fonseca et al. 2015. Appl. energy. :param InputLocator locator: an InputLocator instance set to the scenario to work on :param boolean prop_architecture_flag: if True, get properties about the construction and architecture. :param boolean prop_comfort_flag: if True, get properties about thermal comfort. :param boolean prop_hvac_flag: if True, get properties about types of HVAC systems, otherwise False. :param boolean prop_internal_loads_flag: if True, get properties about internal loads, otherwise False. The following files are created by this script, depending on which flags were set: - building_HVAC: .dbf describes the queried properties of HVAC systems. - architecture.dbf describes the queried properties of architectural features - building_thermal: .shp describes the queried thermal properties of buildings - indoor_comfort.shp describes the queried thermal properties of buildings """ # get occupancy and age files building_occupancy_df = dbf2df(locator.get_building_occupancy()) list_uses = list( building_occupancy_df.drop( ['PFloor', 'Name'], axis=1).columns) # parking excluded in U-Values building_age_df = dbf2df(locator.get_building_age()) # get occupant densities from archetypes schedules occupant_densities = {} for use in list_uses: archetypes_schedules = pd.read_excel( locator.get_archetypes_schedules(), use).T area_per_occupant = archetypes_schedules['density'].values[:1][0] if area_per_occupant > 0: occupant_densities[use] = 1 / area_per_occupant else: occupant_densities[use] = 0 # prepare shapefile to store results (a shapefile with only names of buildings names_df = building_age_df[['Name']] # define main use: building_occupancy_df['mainuse'] = calc_mainuse(building_occupancy_df, list_uses) # dataframe with jonned data for categories categories_df = building_occupancy_df.merge(building_age_df, on='Name') # get properties about the construction and architecture if prop_architecture_flag: architecture_DB = get_database(locator.get_archetypes_properties(), 'ARCHITECTURE') architecture_DB['Code'] = architecture_DB.apply(lambda x: calc_code( x['building_use'], x['year_start'], x['year_end'], x['standard']), axis=1) categories_df['cat_architecture'] = calc_category( architecture_DB, categories_df) prop_architecture_df = categories_df.merge(architecture_DB, left_on='cat_architecture', right_on='Code') # adjust 'Hs' for multiuse buildings prop_architecture_df['Hs'] = correct_archetype_areas( prop_architecture_df, architecture_DB, list_uses) # write to shapefile prop_architecture_df_merged = names_df.merge(prop_architecture_df, on="Name") fields = [ 'Name', 'Hs', 'win_wall', 'type_cons', 'type_leak', 'type_roof', 'type_wall', 'type_win', 'type_shade' ] df2dbf(prop_architecture_df_merged[fields], locator.get_building_architecture()) # get properties about types of HVAC systems if prop_hvac_flag: HVAC_DB = get_database(locator.get_archetypes_properties(), 'HVAC') HVAC_DB['Code'] = HVAC_DB.apply(lambda x: calc_code( x['building_use'], x['year_start'], x['year_end'], x['standard']), axis=1) categories_df['cat_HVAC'] = calc_category(HVAC_DB, categories_df) # define HVAC systems types prop_HVAC_df = categories_df.merge(HVAC_DB, left_on='cat_HVAC', right_on='Code') # write to shapefile prop_HVAC_df_merged = names_df.merge(prop_HVAC_df, on="Name") fields = [ 'Name', 'type_cs', 'type_hs', 'type_dhw', 'type_ctrl', 'type_vent' ] df2dbf(prop_HVAC_df_merged[fields], locator.get_building_hvac()) if prop_comfort_flag: comfort_DB = get_database(locator.get_archetypes_properties(), 'INDOOR_COMFORT') # define comfort prop_comfort_df = categories_df.merge(comfort_DB, left_on='mainuse', right_on='Code') # write to shapefile prop_comfort_df_merged = names_df.merge(prop_comfort_df, on="Name") prop_comfort_df_merged = calculate_average_multiuse( prop_comfort_df_merged, occupant_densities, list_uses, comfort_DB) fields = [ 'Name', 'Tcs_set_C', 'Ths_set_C', 'Tcs_setb_C', 'Ths_setb_C', 'Ve_lps' ] df2dbf(prop_comfort_df_merged[fields], locator.get_building_comfort()) if prop_internal_loads_flag: internal_DB = get_database(locator.get_archetypes_properties(), 'INTERNAL_LOADS') # define comfort prop_internal_df = categories_df.merge(internal_DB, left_on='mainuse', right_on='Code') # write to shapefile prop_internal_df_merged = names_df.merge(prop_internal_df, on="Name") prop_internal_df_merged = calculate_average_multiuse( prop_internal_df_merged, occupant_densities, list_uses, internal_DB) fields = [ 'Name', 'Qs_Wp', 'X_ghp', 'Ea_Wm2', 'El_Wm2', 'Epro_Wm2', 'Ere_Wm2', 'Ed_Wm2', 'Vww_lpd', 'Vw_lpd' ] df2dbf(prop_internal_df_merged[fields], locator.get_building_internal())