def write_coordinates_to_shp_file(config, locator, list_geotranch, name): """ Write grid.shp and thermal_network.shp on base of list of coordinate data :param list_geotranch: tuples with geo data of startnode and endnode :type list_geotranch: list(float, float) :param name: filename of shp file :type name: string :return: shp file stored in \\inputs\\networks\\ :rtype: Nonetype """ input_street_shp = locator.get_street_network() output_path_shp = locator.get_electric_network_output_location(name) geometry = [ shapely.geometry.LineString(json.loads(g)) for g in list_geotranch ] gdf_street = gpd.GeoDataFrame.from_file(input_street_shp) lat, lon = get_lat_lon_projected_shapefile(gdf_street) crs = get_projected_coordinate_system(lat, lon) gdf = gpd.GeoDataFrame(crs=crs, geometry=geometry) gdf.to_file(output_path_shp, driver='ESRI Shapefile', encoding='ISO-8859-1')
def main(config): assert os.path.exists( config.scenario), 'Scenario not found: %s' % config.scenario locator = cea.inputlocator.InputLocator(scenario=config.scenario) print('Running photovoltaic with scenario = %s' % config.scenario) print('Running photovoltaic with annual-radiation-threshold-kWh/m2 = %s' % config.solar.annual_radiation_threshold) print('Running photovoltaic with panel-on-roof = %s' % config.solar.panel_on_roof) print('Running photovoltaic with panel-on-wall = %s' % config.solar.panel_on_wall) print('Running photovoltaic with solar-window-solstice = %s' % config.solar.solar_window_solstice) print('Running photovoltaic with type-pvpanel = %s' % config.solar.type_pvpanel) if config.solar.custom_tilt_angle: print( 'Running photovoltaic with custom-tilt-angle = %s and panel-tilt-angle = %s' % (config.solar.custom_tilt_angle, config.solar.panel_tilt_angle)) else: print('Running photovoltaic with custom-tilt-angle = %s' % config.solar.custom_tilt_angle) if config.solar.custom_roof_coverage: print( 'Running photovoltaic with custom-roof-coverage = %s and max-roof-coverage = %s' % (config.solar.custom_roof_coverage, config.solar.max_roof_coverage)) else: print('Running photovoltaic with custom-roof-coverage = %s' % config.solar.custom_roof_coverage) building_names = locator.get_zone_building_names() zone_geometry_df = gdf.from_file(locator.get_zone_geometry()) latitude, longitude = get_lat_lon_projected_shapefile(zone_geometry_df) # list_buildings_names =['B026', 'B036', 'B039', 'B043', 'B050'] for missing buildings weather_data = epwreader.epw_reader(locator.get_weather_file()) date_local = solar_equations.calc_datetime_local_from_weather_file( weather_data, latitude, longitude) num_process = config.get_number_of_processes() n = len(building_names) cea.utilities.parallel.vectorize(calc_PV, num_process)(repeat(locator, n), repeat(config, n), repeat(latitude, n), repeat(longitude, n), repeat(weather_data, n), repeat(date_local, n), building_names) # aggregate results from all buildings write_aggregate_results(locator, building_names, num_process)
def main(config): assert os.path.exists(config.scenario), 'Scenario not found: %s' % config.scenario locator = cea.inputlocator.InputLocator(scenario=config.scenario) print('Running photovoltaic with scenario = %s' % config.scenario) print('Running photovoltaic with annual-radiation-threshold-kWh/m2 = %s' % config.solar.annual_radiation_threshold) print('Running photovoltaic with panel-on-roof = %s' % config.solar.panel_on_roof) print('Running photovoltaic with panel-on-wall = %s' % config.solar.panel_on_wall) print('Running photovoltaic with solar-window-solstice = %s' % config.solar.solar_window_solstice) print('Running photovoltaic with type-pvpanel = %s' % config.solar.type_pvpanel) buildings_names = locator.get_zone_building_names() zone_geometry_df = gdf.from_file(locator.get_zone_geometry()) latitude, longitude = get_lat_lon_projected_shapefile(zone_geometry_df) # list_buildings_names =['B026', 'B036', 'B039', 'B043', 'B050'] for missing buildings weather_data = epwreader.epw_reader(locator.get_weather_file()) date_local = solar_equations.calc_datetime_local_from_weather_file(weather_data, latitude, longitude) n = len(buildings_names) cea.utilities.parallel.vectorize(calc_PV, config.get_number_of_processes())(repeat(locator, n), repeat(config, n), repeat(latitude, n), repeat(longitude, n), repeat(weather_data, n), repeat(date_local, n), buildings_names) # aggregate results from all buildings aggregated_annual_results = {} for i, building in enumerate(buildings_names): hourly_results_per_building = pd.read_csv(locator.PV_results(building)) if i == 0: aggregated_hourly_results_df = hourly_results_per_building else: aggregated_hourly_results_df = aggregated_hourly_results_df + hourly_results_per_building annual_energy_production = hourly_results_per_building.filter(like='_kWh').sum() panel_area_per_building = hourly_results_per_building.filter(like='_m2').iloc[0] building_annual_results = annual_energy_production.append(panel_area_per_building) aggregated_annual_results[building] = building_annual_results # save hourly results aggregated_hourly_results_df = aggregated_hourly_results_df.set_index('Date') aggregated_hourly_results_df.to_csv(locator.PV_totals(), index=True, float_format='%.2f') # save annual results aggregated_annual_results_df = pd.DataFrame(aggregated_annual_results).T aggregated_annual_results_df.to_csv(locator.PV_total_buildings(), index=True, float_format='%.2f')
def df_to_json(file_location, bbox=False): from cea.utilities.standardize_coordinates import get_lat_lon_projected_shapefile, get_projected_coordinate_system try: table_df = geopandas.GeoDataFrame.from_file(file_location) # Save coordinate system lat, lon = get_lat_lon_projected_shapefile(table_df) crs = get_projected_coordinate_system(lat, lon) # make sure that the geojson is coded in latitude / longitude out = table_df.to_crs(get_geographic_coordinate_system()) out = json.loads(out.to_json(show_bbox=bbox)) return out, crs except (IOError, DriverError) as e: print(e) return None, None except Exception as e: traceback.print_exc() return None, None
def write(self, df, *args, **kwargs): """ :type df: geopandas.GeoDataFrame """ from cea.utilities.standardize_coordinates import ( get_lat_lon_projected_shapefile, get_projected_coordinate_system) self.validate(df) path_to_shp = self(*args, **kwargs) parent_folder = os.path.dirname(path_to_shp) if not os.path.exists(parent_folder): os.makedirs(parent_folder) lat, lon = get_lat_lon_projected_shapefile(df) # get coordinate system and re project to UTM df = df.to_crs(get_projected_coordinate_system(lat, lon)) df.to_file(path_to_shp)
def main(config): assert os.path.exists( config.scenario), 'Scenario not found: %s' % config.scenario locator = cea.inputlocator.InputLocator(scenario=config.scenario) print('Running photovoltaic with scenario = %s' % config.scenario) print('Running photovoltaic with annual-radiation-threshold-kWh/m2 = %s' % config.solar.annual_radiation_threshold) print('Running photovoltaic with panel-on-roof = %s' % config.solar.panel_on_roof) print('Running photovoltaic with panel-on-wall = %s' % config.solar.panel_on_wall) print('Running photovoltaic with solar-window-solstice = %s' % config.solar.solar_window_solstice) print('Running photovoltaic with type-pvpanel = %s' % config.solar.type_pvpanel) list_buildings_names = locator.get_zone_building_names() data = gdf.from_file(locator.get_zone_geometry()) latitude, longitude = get_lat_lon_projected_shapefile(data) # list_buildings_names =['B026', 'B036', 'B039', 'B043', 'B050'] for missing buildings for building in list_buildings_names: radiation_path = locator.get_radiation_building(building_name=building) radiation_metadata = locator.get_radiation_metadata( building_name=building) calc_PV(locator=locator, config=config, radiation_path=radiation_path, metadata_csv=radiation_metadata, latitude=latitude, longitude=longitude, weather_path=config.weather, building_name=building) for i, building in enumerate(list_buildings_names): data = pd.read_csv(locator.PV_results(building)) if i == 0: df = data else: df = df + data df = df.set_index('Date') df.to_csv(locator.PV_totals(), index=True, float_format='%.2f')
def df_to_json(file_location, bbox=False, trigger_abort=True): from cea.utilities.standardize_coordinates import get_lat_lon_projected_shapefile, get_projected_coordinate_system try: table_df = geopandas.GeoDataFrame.from_file(file_location) # Save coordinate system lat, lon = get_lat_lon_projected_shapefile(table_df) crs = get_projected_coordinate_system(lat, lon) # make sure that the geojson is coded in latitude / longitude out = table_df.to_crs(get_geographic_coordinate_system()) out = json.loads(out.to_json(show_bbox=bbox)) return out, crs except IOError as e: print(e) if trigger_abort: abort(400, 'Input file not found: %s' % file_location) except RuntimeError as e: print(e) if trigger_abort: abort(400, e.message)
def main(config): assert os.path.exists( config.scenario), 'Scenario not found: %s' % config.scenario locator = cea.inputlocator.InputLocator(scenario=config.scenario) print('Running photovoltaic-thermal with scenario = %s' % config.scenario) print( 'Running photovoltaic-thermal with annual-radiation-threshold-kWh/m2 = %s' % config.solar.annual_radiation_threshold) print('Running photovoltaic-thermal with panel-on-roof = %s' % config.solar.panel_on_roof) print('Running photovoltaic-thermal with panel-on-wall = %s' % config.solar.panel_on_wall) print('Running photovoltaic-thermal with solar-window-solstice = %s' % config.solar.solar_window_solstice) print('Running photovoltaic-thermal with t-in-pvt = %s' % config.solar.t_in_pvt) print('Running photovoltaic-thermal with type-pvpanel = %s' % config.solar.type_pvpanel) building_names = config.solar.buildings if not building_names: building_names = locator.get_zone_building_names() hourly_results_per_building = gdf.from_file(locator.get_zone_geometry()) latitude, longitude = get_lat_lon_projected_shapefile( hourly_results_per_building) # weather hourly_results_per_building weather_data = epwreader.epw_reader(locator.get_weather_file()) date_local = solar_equations.calc_datetime_local_from_weather_file( weather_data, latitude, longitude) print('reading weather hourly_results_per_building done.') n = len(building_names) cea.utilities.parallel.vectorize( calc_PVT, config.get_number_of_processes())(repeat(locator, n), repeat(config, n), repeat(latitude, n), repeat(longitude, n), repeat(weather_data, n), repeat(date_local, n), building_names) # aggregate results from all buildings aggregated_annual_results = {} for i, building in enumerate(building_names): hourly_results_per_building = pd.read_csv( locator.PVT_results(building)) if i == 0: aggregated_hourly_results_df = hourly_results_per_building temperature_sup = [] temperature_re = [] temperature_sup.append(hourly_results_per_building['T_PVT_sup_C']) temperature_re.append(hourly_results_per_building['T_PVT_re_C']) else: aggregated_hourly_results_df = aggregated_hourly_results_df + hourly_results_per_building temperature_sup.append(hourly_results_per_building['T_PVT_sup_C']) temperature_re.append(hourly_results_per_building['T_PVT_re_C']) annual_energy_production = hourly_results_per_building.filter( like='_kWh').sum() panel_area_per_building = hourly_results_per_building.filter( like='_m2').iloc[0] building_annual_results = annual_energy_production.append( panel_area_per_building) aggregated_annual_results[building] = building_annual_results # save hourly results aggregated_hourly_results_df['T_PVT_sup_C'] = pd.DataFrame( temperature_sup).mean(axis=0) aggregated_hourly_results_df['T_PVT_re_C'] = pd.DataFrame( temperature_re).mean(axis=0) aggregated_hourly_results_df = aggregated_hourly_results_df[ aggregated_hourly_results_df.columns.drop( aggregated_hourly_results_df.filter( like='Tout', axis=1).columns)] # drop columns with Tout aggregated_hourly_results_df = aggregated_hourly_results_df.set_index( 'Date') aggregated_hourly_results_df.to_csv(locator.PVT_totals(), index=True, float_format='%.2f', na_rep='nan') # save annual results aggregated_annual_results_df = pd.DataFrame(aggregated_annual_results).T aggregated_annual_results_df.to_csv(locator.PVT_total_buildings(), index=True, index_label="Name", float_format='%.2f')
def main(config): assert os.path.exists( config.scenario), 'Scenario not found: %s' % config.scenario locator = cea.inputlocator.InputLocator(scenario=config.scenario) print('Running photovoltaic with scenario = %s' % config.scenario) print('Running photovoltaic with annual-radiation-threshold-kWh/m2 = %s' % config.solar.annual_radiation_threshold) print('Running photovoltaic with panel-on-roof = %s' % config.solar.panel_on_roof) print('Running photovoltaic with panel-on-wall = %s' % config.solar.panel_on_wall) print('Running photovoltaic with solar-window-solstice = %s' % config.solar.solar_window_solstice) print('Running photovoltaic with type-pvpanel = %s' % config.solar.type_pvpanel) list_buildings_names = locator.get_zone_building_names() hourly_results_per_building = gdf.from_file(locator.get_zone_geometry()) latitude, longitude = get_lat_lon_projected_shapefile( hourly_results_per_building) # list_buildings_names =['B026', 'B036', 'B039', 'B043', 'B050'] for missing buildings for building in list_buildings_names: radiation_path = locator.get_radiation_building(building_name=building) radiation_metadata = locator.get_radiation_metadata( building_name=building) calc_PV(locator=locator, config=config, radiation_path=radiation_path, metadata_csv=radiation_metadata, latitude=latitude, longitude=longitude, weather_path=config.weather, building_name=building) # aggregate results from all buildings aggregated_annual_results = {} for i, building in enumerate(list_buildings_names): hourly_results_per_building = pd.read_csv(locator.PV_results(building)) if i == 0: aggregated_hourly_results_df = hourly_results_per_building else: aggregated_hourly_results_df = aggregated_hourly_results_df + hourly_results_per_building annual_energy_production = hourly_results_per_building.filter( like='_kWh').sum() panel_area_per_building = hourly_results_per_building.filter( like='_m2').iloc[0] building_annual_results = annual_energy_production.append( panel_area_per_building) aggregated_annual_results[building] = building_annual_results # save hourly results aggregated_hourly_results_df = aggregated_hourly_results_df.set_index( 'Date') aggregated_hourly_results_df.to_csv(locator.PV_totals(), index=True, float_format='%.2f') # save annual results aggregated_annual_results_df = pd.DataFrame(aggregated_annual_results).T aggregated_annual_results_df.to_csv(locator.PV_total_buildings(), index=True, float_format='%.2f')
def route_get_building_properties(): import cea.plots import cea.glossary # FIXME: Find a better way to ensure order of tabs tabs = [ 'zone', 'age', 'occupancy', 'architecture', 'internal-loads', 'indoor-comfort', 'technical-systems', 'supply-systems', 'district', 'restrictions' ] locator = cea.inputlocator.InputLocator(current_app.cea_config.scenario) store = { 'tables': {}, 'geojsons': {}, 'columns': {}, 'column_types': {}, 'crs': {}, 'glossary': {} } glossary = cea.glossary.read_glossary_df() for db in INPUTS: db_info = INPUTS[db] location = getattr(locator, db_info['location'])() try: if db_info['type'] == 'shp': table_df = geopandas.GeoDataFrame.from_file(location) # save projected coordinate system lat, lon = get_lat_lon_projected_shapefile(table_df) store['crs'][db] = get_projected_coordinate_system(lat, lon) from cea.utilities.standardize_coordinates import get_geographic_coordinate_system store['geojsons'][db] = json.loads( table_df.to_crs( get_geographic_coordinate_system()).to_json( show_bbox=True)) table_df = pandas.DataFrame(table_df.drop(columns='geometry')) if 'REFERENCE' in db_info[ 'fieldnames'] and 'REFERENCE' not in table_df.columns: table_df['REFERENCE'] = None store['tables'][db] = json.loads( table_df.set_index('Name').to_json(orient='index')) else: assert db_info[ 'type'] == 'dbf', 'Unexpected database type: %s' % db_info[ 'type'] table_df = cea.utilities.dbf.dbf_to_dataframe(location) if 'REFERENCE' in db_info[ 'fieldnames'] and 'REFERENCE' not in table_df.columns: table_df['REFERENCE'] = None store['tables'][db] = json.loads( table_df.set_index('Name').to_json(orient='index')) store['columns'][db] = db_info['fieldnames'] store['column_types'][db] = { k: v.__name__ for k, v in db_info['fieldtypes'].items() } filenames = glossary['FILE_NAME'].str.split(pat='/').str[-1] store['glossary'].update( json.loads( glossary[filenames == '%s.%s' % (db.replace('-', '_'), db_info['type'])][[ 'VARIABLE', 'UNIT', 'DESCRIPTION' ]].set_index('VARIABLE').to_json(orient='index'))) except IOError as e: print(e) store['tables'][db] = {} return render_template('table.html', store=store, tabs=tabs, last_updated=dir_last_updated())