Пример #1
0
def make_osmexposure(highValueArea,
                     mode="default",
                     country=None,
                     save_path=None,
                     check_plot=1,
                     **kwargs):
    """
    Generate climada-compatiple entity by assigning values to midpoints of
    individual house shapes from OSM query, according to surface area and country.

    Parameters:
        highValueArea (str): absolute path for gdf of building features queried
          from get_features_OSM()
        mode (str): "LitPop" or "default": Default assigns a value of 5400 Chf to
          each m2 of building, LitPop assigns total LitPop value for the region
          proportionally to houses (by base area of house)
        Country (str): ISO3 code or name of country in which entity is located.
          Only if mode = LitPop
        kwargs (dict): arguments for LitPop set_country method

    Returns:
        exp_building (Exposure): (CLIMADA-compatible) with allocated asset values.
          Saved as exposure_buildings_mode_lat_lon.h5

    Example:
        buildings_47_8 = \
        make_osmexposure(save_path + '/OSM_features_47_8.shp',
                         mode="default", save_path = save_path, check_plot=1)
    """
    if save_path is None:
        save_path = Path.cwd()
    elif isinstance(save_path, str):
        save_path = Path(save_path)

    High_Value_Area_gdf = _get_midpoints(highValueArea)

    High_Value_Area_gdf = _assign_values_exposure(High_Value_Area_gdf, mode,
                                                  country, **kwargs)

    # put back into CLIMADA-compatible entity format and save as hdf5 file:
    exp_buildings = Exposures(High_Value_Area_gdf)
    exp_buildings.set_lat_lon()
    exp_buildings.check()
    exp_buildings.write_hdf5(
        save_path.joinpath('exposure_buildings_' + mode + '_' +
                           str(int(min(High_Value_Area_gdf.bounds.miny))) +
                           '_' +
                           str(int(min(High_Value_Area_gdf.bounds.minx))) +
                           '.h5'))

    # plotting
    if check_plot == 1:
        # normal hexagons
        exp_buildings.plot_hexbin(pop_name=True)
        # select the OSM background image from the available ctx.sources
        # - returns connection error, left out for now:
        #fig, ax = exp_buildings.plot_basemap(buffer=30000, url=ctx.sources.OSM_C, cmap='brg')

    return exp_buildings
def get_osmstencil_litpop(bbox, country, mode, highValueArea=None, \
                              save_path=os.getcwd(), check_plot=1, **kwargs):
    """
    Generate climada-compatible exposure by downloading LitPop exposure for a bounding box,
    corrected for centroids which lie inside a certain high-value multipolygon area
    from previous OSM query.

    Parameters:
        bbox (array): List of coordinates in format [South, West, North, East]
        Country (str): ISO3 code or name of country in which bbox is located
        highValueArea (str): path of gdf of high-value area from previous step.
          If empty, searches for cwd/High_Value_Area_lat_lon.shp
        mode (str): mode of re-assigning low-value points to high-value points.
          "nearest", "even", or "proportional"
        kwargs (dict): arguments for LitPop set_country method

    Returns:
        exp_sub_high_exp (Exposure): (CLIMADA-compatible) with re-allocated asset
          values with name exposure_high_lat_lon

    Example:
        exposure_high_47_8 = get_osmstencil_litpop([47.16, 8.0, 47.3, 8.0712],\
                          'CHE',"proportional", highValueArea = \
                          save_path + '/High_Value_Area_47_8.shp' ,\
                          save_path = save_path)
    """
    if highValueArea == None:
        try:
            High_Value_Area_gdf = \
            geopandas.read_file(os.getcwd() + '/High_Value_Area_'+ str(int(bbox[0]))+'_'+
                                str(int(bbox[1]))+".shp")
        except:
            print('No file found of form %s. Please add or specify path.' \
                  %(os.getcwd() + 'High_Value_Area_'+str(int(bbox[0]))+'_'+\
                    str(int(bbox[1]))+".shp"))
    else:
        High_Value_Area_gdf = geopandas.read_file(highValueArea)

    exp_sub = _get_litpop_bbox(country, High_Value_Area_gdf, **kwargs)

    exp_sub_high = _split_exposure_highlow(exp_sub, mode, High_Value_Area_gdf)

    ###### how to "spread" centroids with value to e.g. hexagons? ###########
    # put exp_sub_high back into CLIMADA-compatible exposure format and save as hdf5 file:
    exp_sub_high_exp = Exposures(exp_sub_high)
    exp_sub_high_exp.set_lat_lon()
    exp_sub_high_exp.check()
    exp_sub_high_exp.write_hdf5(save_path + '/exposure_high_'+str(int(bbox[0]))+\
                                '_'+str(int(bbox[1]))+'.h5')
    # plotting
    if check_plot == 1:
        # normal hexagons
        exp_sub_high_exp.plot_hexbin(pop_name=True)
        # select the OSM background image from the available ctx.sources - doesnt work atm
        #fig, ax = exp_sub_high_exp.plot_basemap(buffer=30000, url=ctx.sources.OSM_C, cmap='brg')

    return exp_sub_high_exp
Пример #3
0
    def test_impact_pnt_agg(self):
        """Test impact agreggation method"""
        gdf_mix = GDF_LINE.append(GDF_POLY).append(GDF_POINT).reset_index(
            drop=True)
        exp_mix = Exposures(gdf_mix)

        exp_pnt = u_lp.exp_geom_to_pnt(exp_mix,
                                       res=1,
                                       to_meters=False,
                                       disagg_met=u_lp.DisaggMethod.DIV,
                                       disagg_val=None)
        imp_pnt = Impact()
        imp_pnt.calc(exp_pnt, IMPF_SET, HAZ, save_mat=True)
        imp_agg = u_lp.impact_pnt_agg(imp_pnt, exp_pnt.gdf, u_lp.AggMethod.SUM)
        aai_agg = 1282901.377219451
        eai_exp = np.array([
            1.73069928e-04, 8.80741357e-04, 4.32240819e-03, 8.62816073e-03,
            2.21441154e-02, 1.09329988e-02, 8.58546479e-02, 4.62370081e-02,
            8.99584440e-02, 1.27160538e-02, 8.60317575e-02, 2.02440009e-01,
            2.32808488e-02, 2.86159458e-02, 4.26205598e-03, 2.40051484e-01,
            5.29133033e-03, 2.72705887e-03, 8.87954091e-03, 2.95633263e-02,
            6.33106879e-01, 1.33011693e-03, 1.11120718e-01, 7.72573773e-02,
            6.12233710e-03, 1.61239410e-02, 1.01492204e-01, 7.45522678e-02,
            1.41155415e-01, 1.53820450e-01, 2.27951125e-02, 2.23629697e-02,
            8.59651753e-03, 5.98415680e-03, 1.24717770e-02, 1.24717770e-02,
            1.48060577e-05, 1.48060577e-05, 5.18270742e-03, 5.18270742e-03,
            8.36178802e+03, 7.30704698e+03, 1.20628926e+04, 3.54061498e+04,
            1.23524320e+04, 7.78074661e+04, 1.28292995e+05, 2.31231953e+05,
            1.31911226e+05, 5.37897306e+05, 8.37016948e+04, 1.65661030e+04
        ])
        check_impact(self, imp_agg, HAZ, exp_mix, aai_agg, eai_exp)
Пример #4
0
    def test_calc_geom_impact_mixed(self):
        """ test calc_geom_impact() with a mixed exp (points, lines and polygons) """
        # mixed exposures
        gdf_mix = GDF_LINE.append(GDF_POLY).append(GDF_POINT).reset_index(
            drop=True)
        exp_mix = Exposures(gdf_mix)

        imp1 = u_lp.calc_geom_impact(exp_mix,
                                     IMPF_SET,
                                     HAZ,
                                     res=0.05,
                                     to_meters=False,
                                     disagg_met=u_lp.DisaggMethod.DIV,
                                     disagg_val=None,
                                     agg_met=u_lp.AggMethod.SUM)
        aai_agg1 = 2354303.388829326
        eai_exp1 = np.array([
            5.44242706e-04, 7.83583295e-03, 1.83750670e-01, 1.73511269e-02,
            1.94180761e-02, 3.90576163e-02, 1.10985612e-02, 1.86135108e-01,
            6.14306427e-02, 6.16206874e-02, 8.56458490e-03, 8.81751253e-03,
            4.26205598e-03, 8.12498654e-02, 1.57396460e-01, 6.00203189e-03,
            3.19600253e-01, 1.46198876e-01, 1.29361932e-01, 1.33011693e-03,
            1.38153438e-01, 4.20094145e-02, 9.14516636e-02, 3.61084945e-02,
            4.75139931e-02, 7.99620467e-02, 9.23306174e-02, 1.04525623e-01,
            1.61059946e+04, 1.07420484e+04, 1.44746070e+04, 7.18796281e+04,
            2.58806206e+04, 2.01316315e+05, 1.76071458e+05, 3.92482129e+05,
            2.90364327e+05, 9.05399356e+05, 1.94728210e+05, 5.11729689e+04,
            2.84224294e+02, 2.45938137e+02, 1.90644327e+02, 1.73925079e+02,
            1.76091839e+02, 4.43054173e+02, 4.41378151e+02, 4.74316805e+02,
            4.83873464e+02, 2.59001795e+02, 2.48200400e+02, 2.62995792e+02
        ])
        check_impact(self, imp1, HAZ, exp_mix, aai_agg1, eai_exp1)

        imp2 = u_lp.calc_geom_impact(exp_mix,
                                     IMPF_SET,
                                     HAZ,
                                     res=5000,
                                     to_meters=True,
                                     disagg_met=u_lp.DisaggMethod.FIX,
                                     disagg_val=None,
                                     agg_met=u_lp.AggMethod.SUM)
        aai_agg2 = 321653482.41806
        eai_exp2 = np.array([
            5.44242706e-04, 4.83197677e-03, 4.12448052e-01, 1.34215052e-01,
            2.55089453e-01, 3.82348309e-01, 2.24599809e-01, 2.57801309e-01,
            3.67620642e-01, 5.24002585e-01, 5.62882027e-02, 6.17225877e-02,
            8.52411196e-03, 4.87499192e-01, 9.09740934e-01, 8.01838920e-03,
            7.96127932e-02, 1.34945299e+00, 9.06839997e-01, 4.01295245e-01,
            5.93452277e-01, 8.40188290e-02, 4.67806576e-01, 8.21743744e-02,
            2.48612395e-01, 1.24387821e-01, 3.48131313e-01, 5.53983704e-01,
            1.48411250e+06, 1.09137411e+06, 1.62477251e+06, 1.43455724e+07,
            2.94783633e+06, 1.06950486e+07, 3.17592949e+07, 4.58152749e+07,
            3.94173129e+07, 1.48016265e+08, 1.87811203e+07, 5.41509882e+06,
            1.24792652e+04, 1.20008305e+04, 1.43296472e+04, 3.15280802e+04,
            3.32644558e+04, 3.19325625e+04, 3.11256252e+04, 3.20372742e+04,
            1.67623417e+04, 1.64528393e+04, 1.47050883e+04, 1.37721978e+04
        ])
        check_impact(self, imp2, HAZ, exp_mix, aai_agg2, eai_exp2)
Пример #5
0
    def test_tracks_in_exp_pass(self):
        """Check if tracks in exp are filtered correctly"""

        # Load two tracks from ibtracks
        storms = {'in': '2000233N12316', 'out': '2000160N21267'}
        tc_track = tc.TCTracks()
        tc_track.read_ibtracs_netcdf(storm_id=list(storms.values()))

        # Define exposure from geopandas
        world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
        exp_world = Exposures(world)
        exp = Exposures(exp_world.gdf[exp_world.gdf.name == 'Cuba'])

        # Compute tracks in exp
        tracks_in_exp = tc_track.tracks_in_exp(exp.gdf, buffer=1.0)

        self.assertTrue(tracks_in_exp.get_track(storms['in']))
        self.assertFalse(tracks_in_exp.get_track(storms['out']))
Пример #6
0
def exp_dem(x_exp=1, exp=None):
    while not exp:
        try:
            exp = Exposures.from_hdf5(EXP_DEMO_H5)
        except HDF5ExtError:
            # possibly raised by pd.HDFStore when the file is locked by another process due to multiprocessing
            time.sleep(0.1)
    exp_tmp = exp.copy(deep=True)
    exp_tmp.gdf.value *= x_exp
    return exp_tmp
Пример #7
0
    def to_exposures(self, dataset, dump_dir=SYSTEM_DIR):
        """Downloads hdf5 files belonging to the given datasets reads them into Exposures and
        concatenates them into a single climada.Exposures object.

        Parameters
        ----------
        dataset : DatasetInfo
            Dataset to download and read into climada.Exposures objects.
        dump_dir : str, optional
            Directory where the files should be downoladed. Default: SYSTEM_DIR (as configured in
            climada.conf, i.g. ~/climada/data).
            If the directory is the SYSTEM_DIR, the eventual target directory is organized into
            dump_dir > exposures_type > dataset name > version

        Returns
        -------
        climada.entity.exposures.Exposures
            The combined exposures object
        """
        target_dir = self._organize_path(dataset, dump_dir) \
                     if dump_dir == SYSTEM_DIR else dump_dir
        exposures_list = [
            Exposures.from_hdf5(self._download_file(target_dir, dsf))
            for dsf in dataset.files
            if dsf.file_format == 'hdf5'
        ]
        if not exposures_list:
            raise ValueError("no hdf5 files found in dataset")
        if len(exposures_list) == 1:
            return exposures_list[0]
        exposures_concat = Exposures()
        exposures_concat = exposures_concat.concat(exposures_list)
        exposures_concat.check()
        return exposures_concat
Пример #8
0
def load_exp_infr(force_new_hdf5_generation, name_hdf5_file, input_folder, haz_real):
    """
    Load generate Exposure of infrastructure if forced or if hdf5 file not present. 
    Otherwise load hdf5 file.

    Parameters
    ----------
    force_new_hdf5_generation : dict of bool
        contains bool wether new Exposure should be forcefully generated.
    name_hdf5_file : str
        name of hdf5 file from wich Exposure is loaded.
    input_folder : str
        Path to input folder containing hdf5 file.
    haz_real : climada.hazard.base.Hazard
        CLIMADA hazard.

    Returns
    -------
    exp_infr : climada.entity.exposures.base.Exposures
        CLIMADA Exposure of infrastructure.

    """
    file = Path(input_folder + "/" + name_hdf5_file["exp_infr"])
    if force_new_hdf5_generation["exp_infr"] or not file.exists(): #be carefull, this step will take ages when you do both at once
        # LitPop Exposure
        print("generating new exp_infr")
        exp_infr = LitPop()
        exp_infr.set_country('Switzerland', reference_year = 2019)
        exp_infr.set_geometry_points()
        exp_infr = exp_infr.rename(columns = {'if_': 'if_HL'})
        exp_infr = Exposures(exp_infr)
        exp_infr.set_lat_lon()
        exp_infr.check()
        exp_infr.assign_centroids(haz_real, method = "NN", distance ="haversine", threshold = 2)
        exp_infr.write_hdf5(input_folder + "/exp_switzerland.hdf5")
    else:
        # LitPop Exposure
        exp_infr= LitPop()
        exp_infr.read_hdf5(input_folder +"/exp_switzerland.hdf5")
        exp_infr.check()
    return exp_infr
file_info = ''.join([directory_exposures, 'age_categories.csv'])
file_locations = ''.join([directory_exposures, 'STATPOP2018.csv'])
shp_dir = '../../input_data/shapefiles/KANTONS_projected_epsg4326/'

#call_exposures_switzerland_productivity(file_info, file_locations, shp_dir, save=True)
exposures = {}
for code, category in {
        'IL': 'inside low physical activity',
        'IM': 'inside moderate physical activity',
        'OM': 'outside moderate physical activity',
        'OH': 'outside high physical activity'
}.items():
    exposures_file = ''.join(
        [directory_exposures, 'exposures_productivity_ch_', code, '.h5'])
    exposures[category] = Exposures()
    exposures[category].read_hdf5(exposures_file)
    exposures[category] = exposures[category][exposures[category]['canton'] ==
                                              'Zürich']
    exposures[category] = Exposures(exposures[category])
    exposures[category].check()

impacts_productivity = ImpactsHeatProductivity(scenarios, years, n_mc)
impacts_productivity.impacts_years_scenarios(exposures, directory_hazard,
                                             nyears_hazards)

#with open(''.join([directory_output, 'impact_', str(n_mc), 'mc', '.pickle']), 'wb') as handle:
#    pickle.dump(impacts_mortality, handle, protocol=pickle.HIGHEST_PROTOCOL)

executionTime = (time.time() - startTime)
    df_data["E"] = df_data["X"] + 2000000
    df_data["N"] = df_data["Y"] + 1000000
    #df_data_18 = pd.read_csv(path_data_18)
else:
    path_data = "~/Documents/ETH/Masterarbeit/agrar_exposure/data_arealstatistik/AREA_NOLU04_46_191202.csv"
    df_data = pd.read_csv(path_data)

df_data = df_data[["X", "Y", "LU09R_46", "E", "N"]]
df_data["is_agrar"] = df_data.apply(lambda row: is_agrar(row), axis=1)

#reduce data to only include points specified in is_agrar() function
df_data = df_data[df_data["is_agrar"] == 1]

lambd, eps = transform_coord(E=df_data["E"], N=df_data["N"])

exp_hail_agr = Exposures()
exp_hail_agr["latitude"] = eps
exp_hail_agr["longitude"] = lambd
exp_hail_agr["region_id"] = df_data["LU09R_46"]

# Giving value to the different sectors. Source: https://www.pxweb.bfs.admin.ch/pxweb/de/px-x-0704000000_121/px-x-0704000000_121/px-x-0704000000_121.px
# Obst 201 (C.1.1.01.116 Obst): 559'104'082 (Rebbau muss abgezogen werden) -> 351'088'936
# Rebbau 202 (C1.1.01.1162 Weintrauben): 208'015'146
# ackerbau 221 (C1.1.01.11 Pflanzliche Erzeugung): 4'436'181'114 (Obst und Wein (C1.1.01.117 479'824'835))
# = 3955797175

#

value_obst = 351008936
avg_value_obst = value_obst / exp_hail_agr[exp_hail_agr["region_id"] ==
                                           201].shape[0]
            exposure_tmp.value > 0].count()
        grid_stats.loc[idx, 'sum'] = exposure_tmp.value.sum()
        grid_stats.loc[idx, 'max'] = exposure_tmp.value.max()
        grid_stats.loc[idx, 'mean'] = exposure_tmp.value.mean()
        grid_stats.loc[idx, 'median'] = exposure_tmp.value.median()

    del exposure_tmp
    grid_stats.to_csv(
        os.path.join(
            RES_DIR,
            'LitPop_pc_%iarcsec_%i_grid_stats.csv' % (RES_ARCSEC, REF_YEAR)))
    grid_stats.to_csv(
        os.path.join(
            ENTITY_DIR,
            'LitPop_pc_%iarcsec_%i_grid_stats.csv' % (RES_ARCSEC, REF_YEAR)))
    exposure_data = Exposures(exposure_data)
    print('\n' + '\x1b[1;03;30;30m' + 'exposure_data is now an Exposures:',
          str(type(exposure_data)) + '\x1b[0m')
    exposure_data.set_geometry_points(
    )  # set geometry attribute (shapely Points) from GeoDataFrame from latitude and longitude
    print('\n' + '\x1b[1;03;30;30m' + 'check method logs:' + '\x1b[0m')
    exposure_data.check()  # puts metadata that has not been assigned

    print('\n' + '\x1b[1;03;30;30m' + 'exposure_data looks like:' + '\x1b[0m')
    print(exposure_data.head())
    print('\n' + '\x1b[1;03;30;30m' + 'plotting global map...' + '\x1b[0m')
    print('Global max. grid cell value: USD %1.0f' %
          (exposure_data.value.max()))
    print('Global mean grid cell value: USD %1.0f' %
          (exposure_data.value.mean()))
    print('Global median grid cell value: USD %1.0f' %
Пример #12
0
    def test_calc_geom_impact_lines(self):
        """ test calc_geom_impact() with lines"""
        # line exposures only
        exp_line_novals = Exposures(GDF_LINE.drop(columns='value'))

        imp1 = u_lp.calc_geom_impact(EXP_LINE,
                                     IMPF_SET,
                                     HAZ,
                                     res=0.05,
                                     to_meters=False,
                                     disagg_met=u_lp.DisaggMethod.DIV,
                                     disagg_val=None,
                                     agg_met=u_lp.AggMethod.SUM)
        aai_agg1 = 2.18359
        eai_exp1 = np.array([
            8.50634478e-02, 4.24820916e-02, 1.04429093e-01, 1.27160538e-02,
            8.60539827e-02, 1.75262423e-01, 2.32808488e-02, 2.92552267e-02,
            4.26205598e-03, 2.31991466e-01, 5.29133033e-03, 2.72705887e-03,
            8.87954091e-03, 2.95633263e-02, 5.61356696e-01, 1.33011693e-03,
            9.95247490e-02, 7.72573773e-02, 6.12233710e-03, 1.61239410e-02,
            1.14566573e-01, 7.45522678e-02, 2.95181528e-01, 4.64021003e-02,
            1.45806743e-02, 2.49435540e-02, 2.96121155e-05, 1.03654148e-02
        ])
        check_impact(self, imp1, HAZ, EXP_LINE, aai_agg1, eai_exp1)

        imp2 = u_lp.calc_geom_impact(EXP_LINE,
                                     IMPF_SET,
                                     HAZ,
                                     res=300,
                                     to_meters=True,
                                     disagg_met=u_lp.DisaggMethod.DIV,
                                     disagg_val=None,
                                     agg_met=u_lp.AggMethod.SUM)
        np.testing.assert_allclose(imp2.eai_exp, imp1.eai_exp, rtol=0.1)

        imp3 = u_lp.calc_geom_impact(exp_line_novals,
                                     IMPF_SET,
                                     HAZ,
                                     res=300,
                                     to_meters=True,
                                     disagg_met=u_lp.DisaggMethod.FIX,
                                     disagg_val=5000,
                                     agg_met=u_lp.AggMethod.SUM)
        aai_agg3 = 2.830144
        eai_exp3 = np.array([
            0.10973467, 0.05930568, 0.1291031, 0.02170876, 0.11591773,
            0.20360855, 0.03329673, 0.03672271, 0.00779005, 0.28260995,
            0.01006294, 0.00989869, 0.01279569, 0.04986454, 0.62946471,
            0.00431759, 0.12464957, 0.12455043, 0.01734576, 0.02508649,
            0.15109773, 0.12019767, 0.36631115, 0.06004143, 0.05308581,
            0.04738706, 0.00483797, 0.01935157
        ])
        check_impact(self, imp3, HAZ, exp_line_novals, aai_agg3, eai_exp3)

        imp4 = u_lp.calc_geom_impact(EXP_LINE,
                                     IMPF_SET,
                                     HAZ,
                                     res=300,
                                     to_meters=True,
                                     disagg_met=u_lp.DisaggMethod.FIX,
                                     disagg_val=5000,
                                     agg_met=u_lp.AggMethod.SUM)
        np.testing.assert_array_equal(imp3.eai_exp, imp4.eai_exp)
Пример #13
0
    def test_calc_geom_impact_polys(self):
        """ test calc_geom_impact() with polygons"""
        #to_meters=False, DIV, res=0.1, SUM
        imp1 = u_lp.calc_geom_impact(EXP_POLY,
                                     IMPF_SET,
                                     HAZ,
                                     res=0.1,
                                     to_meters=False,
                                     disagg_met=u_lp.DisaggMethod.DIV,
                                     disagg_val=None,
                                     agg_met=u_lp.AggMethod.SUM)
        aai_agg = 2182703.085366719
        eai_exp = np.array([
            17554.08233195, 9896.48265036, 16862.31818246, 72055.81490662,
            21485.93199464, 253701.42418527, 135031.5217457, 387550.35813156,
            352213.16031506, 480603.19106997, 203634.46630402, 232114.3335491
        ])
        check_impact(self, imp1, HAZ, EXP_POLY, aai_agg, eai_exp)

        #to_meters=False, DIV, res=10, SUM
        imp2 = u_lp.calc_geom_impact(EXP_POLY,
                                     IMPF_SET,
                                     HAZ,
                                     res=10,
                                     to_meters=False,
                                     disagg_met=u_lp.DisaggMethod.DIV,
                                     disagg_val=None,
                                     agg_met=u_lp.AggMethod.SUM)
        aai_agg2 = 1282899.0530
        eai_exp2 = np.array([
            8361.78802035, 7307.04698346, 12062.89257699, 35406.14977618,
            12352.43204322, 77807.46608747, 128292.99535735, 231231.95252362,
            131911.22622791, 537897.30570932, 83701.69475186, 16566.10301167
        ])
        check_impact(self, imp2, HAZ, EXP_POLY, aai_agg2, eai_exp2)

        #to_meters=True, DIV, res=800, SUM
        imp3 = u_lp.calc_geom_impact(EXP_POLY,
                                     IMPF_SET,
                                     HAZ,
                                     res=800,
                                     to_meters=True,
                                     disagg_met=u_lp.DisaggMethod.DIV,
                                     disagg_val=None,
                                     agg_met=u_lp.AggMethod.SUM)
        self.assertIsInstance(imp3, Impact)
        self.assertTrue(hasattr(imp3, 'geom_exp'))
        self.assertTrue(hasattr(imp3, 'coord_exp'))
        self.assertTrue(np.all(imp3.geom_exp == EXP_POLY.gdf.geometry))
        self.assertEqual(len(imp3.coord_exp), len(EXP_POLY.gdf))
        self.assertAlmostEqual(imp3.aai_agg, 2317081.0602, 3)

        #to_meters=True, DIV, res=1000, SUM
        imp4 = u_lp.calc_geom_impact(EXP_POLY,
                                     IMPF_SET,
                                     HAZ,
                                     res=1000,
                                     to_meters=True,
                                     disagg_met=u_lp.DisaggMethod.DIV,
                                     disagg_val=None,
                                     agg_met=u_lp.AggMethod.SUM)
        aai_agg4 = 2326978.3422
        eai_exp4 = np.array([
            17558.22201377, 10796.36836336, 16239.35385599, 73254.21872128,
            25202.52110382, 216510.67702673, 135412.73610909, 410197.10023667,
            433400.62668497, 521005.95549878, 254979.4396249, 212421.12303947
        ])
        check_impact(self, imp4, HAZ, EXP_POLY, aai_agg4, eai_exp4)

        #to_meters=True, DIV, res=1000, SUM, dissag_va=10e6
        imp5 = u_lp.calc_geom_impact(EXP_POLY,
                                     IMPF_SET,
                                     HAZ,
                                     res=1000,
                                     to_meters=True,
                                     disagg_met=u_lp.DisaggMethod.DIV,
                                     disagg_val=10e6,
                                     agg_met=u_lp.AggMethod.SUM)
        aai_agg5 = 132.81559
        eai_exp5 = np.array([
            3.55826479, 2.55715709, 2.49840826, 3.51427162, 4.30164506,
            19.36203038, 5.28426336, 14.25330336, 37.29091663, 14.05986724,
            6.88087542, 19.2545918
        ])
        check_impact(self, imp5, HAZ, EXP_POLY, aai_agg5, eai_exp5)

        gdf_noval = GDF_POLY.copy()
        gdf_noval.pop('value')
        exp_noval = Exposures(gdf_noval)
        #to_meters=True, DIV, res=950, SUM, dissag_va=10e6
        imp6 = u_lp.calc_geom_impact(exp_noval,
                                     IMPF_SET,
                                     HAZ,
                                     res=950,
                                     to_meters=True,
                                     disagg_met=u_lp.DisaggMethod.DIV,
                                     disagg_val=10e6,
                                     agg_met=u_lp.AggMethod.SUM)
        np.testing.assert_allclose(imp5.eai_exp, imp6.eai_exp, rtol=0.1)

        #to_meters=True, FIX, res=1000, SUM, dissag_va=10e6
        imp7 = u_lp.calc_geom_impact(exp_noval,
                                     IMPF_SET,
                                     HAZ,
                                     res=1000,
                                     to_meters=True,
                                     disagg_met=u_lp.DisaggMethod.FIX,
                                     disagg_val=10e6,
                                     agg_met=u_lp.AggMethod.SUM)
        aai_agg7 = 412832.86028
        eai_exp7 = np.array([
            8561.18507994, 6753.45186608, 8362.17243334, 18014.15630989,
            8986.13653385, 36826.58179136, 27446.46387061, 45468.03772305,
            130145.29903078, 54861.60197959, 26849.17587226, 40558.59779586
        ])
        check_impact(self, imp7, HAZ, EXP_POLY, aai_agg7, eai_exp7)
def call_exposures(kanton=None, age_group=None, epsg_output=4326):
    """write the Exposures:

                    Parameters:
                        kanton (str or None): Name of canton. Default: None (all of Switzerland)
                        age_group (str or None): specific age group, as given in the "GIS_Data_code" of the age_categories.csv file. Default: None
                        epsg_output (int): EPSG code of the output. Default: 4326.

                    Returns:
                        Dictionary containing one Exposure per age category (ratio of pop. per hectare instead of the number of people)
                          """

    directory = '../../input_data/exposures/'
    exposures = {}  # dictionary of the exposures, where we will further put each category of Exposure as a key
    
    population_info = pd.read_csv(
        ''.join([directory, 'age_categories.csv']))  # file containing the information on the age categories
    
    population_loc = pd.read_csv(''.join([directory, 'STATPOP2018.csv']))
    # file containing the geographical location of the population by age group
    
    epsg_data = 2056  # espg of the population_loc data
    
    # get tot. population (CH/Canton)
    pop_values = population_loc[population_loc.columns[8:]]
    pop_hectare_ch = pop_values.sum(axis=1) # to sum over the columns
    pop_tot_ch = pop_hectare_ch.sum(axis=0) # to sum over the rows
    
    if kanton:
        shp_dir = '../../input_data/shapefiles/KANTONS_projected_epsg4326/' \
                      'swissBOUNDARIES3D_1_3_TLM_KANTONSGEBIET_epsg4326.shp'
        
        pop_loc_ch = population_loc.copy()
        pop_loc_ch['longitude'] = np.asarray(pop_loc_ch['E_KOORD']).flatten()
        pop_loc_ch['latitude'] = np.asarray(pop_loc_ch['N_KOORD']).flatten()
        pop_loc_canton = vector_shapefile_mask(pop_loc_ch, shp_dir, kanton, epsg_data,
                                                          epsg_output)
                
        pop_values_canton = pop_loc_canton[pop_loc_canton.columns[8:]]
        pop_hectare_canton = pop_values_canton.sum(axis=1)
        pop_tot_canton = pop_hectare_canton.sum(axis=0)
        
        #print(pop_tot_canton)
        #print(pop_tot_ch)
        
    # get subset of the population data for each category
    # Under 75 years:
    U = population_info.loc[population_info['Age_category'] == 'U']
    # Above 75 years
    O = population_info.loc[population_info['Age_category'] == 'O']

    if age_group is None:
        groups = list(population_loc)[8:]  # take all age_groups
    else:
        groups = age_group # take only the given age_groups

    age_type = {}
    if_ref = {}
    exposures_name = set()

    for group in groups:
        category = population_info[population_info['GIS_Data_code'] == group]['Age_category'].values[0]

        if category == 'U':
            exposures_name.add('Under 75 years')
            age_type['Under 75 years'] = U
            if_ref['Under 75 years'] = 1

        if category == 'O':
            exposures_name.add('Over 75 years')
            age_type['Over 75 years'] = O
            if_ref['Over 75 years'] = 2

    for name in exposures_name:

        code_i_l = ['E_KOORD', 'N_KOORD']
        if age_group is None:
            code_i_l.extend(list(age_type[name]['GIS_Data_code']))
        else:
            code_i_l.extend(age_group)

        population_sum_intensity = DataFrame()  # dataframe with ratio of the pop. for each category
        population_loc_intensity = population_loc[code_i_l]

        population_sum_intensity['longitude'] = np.asarray(population_loc_intensity['E_KOORD']).flatten()
        population_sum_intensity['latitude'] = np.asarray(population_loc_intensity['N_KOORD']).flatten()
        population_sum_intensity['value'] = np.asarray(
            population_loc_intensity[population_loc_intensity.columns[2:]].sum(axis=1) / pop_tot_ch)
        n_exp = len(population_sum_intensity['value'])

        if kanton:  # test if a canton was specified, in that case
            # we first get a panda geodataframe and define the exposures slightly differently
            shp_dir = '../../input_data/shapefiles/KANTONS_projected_epsg4326/' \
                      'swissBOUNDARIES3D_1_3_TLM_KANTONSGEBIET_epsg4326.shp'

            population_sum_intensity = vector_shapefile_mask(population_sum_intensity, shp_dir, kanton, epsg_data,
                                                          epsg_output)

            population_sum_intensity['value'] = population_sum_intensity['value'] * pop_tot_ch / pop_tot_canton

            population_sum_intensity = Exposures(population_sum_intensity)  # define as Exposure class
            population_sum_intensity.set_lat_lon()
            n_exp = len(population_sum_intensity['value'])
            population_sum_intensity['if_heat'] = np.full((n_exp), if_ref[name], dtype=int)
            population_sum_intensity.value_unit = 'Number of people'
            population_sum_intensity.fillna(0)
            population_sum_intensity.check()

        else:  # normal case, for entire Switzerland

            population_sum_intensity = Exposures(population_sum_intensity)
            population_sum_intensity.set_geometry_points()
            population_sum_intensity.value_unit = 'Number of people'
            population_sum_intensity['if_heat'] = np.full((n_exp), if_ref[name], dtype=int)
            population_sum_intensity.crs = {'init': ''.join(['epsg:', str(epsg_data)])} # crs: Coordinate Reference Systems
            population_sum_intensity.check()
            population_sum_intensity.fillna(0)
            population_sum_intensity.to_crs(epsg=epsg_output, inplace=True)
        
        name_category = name
        exposures[name_category] = population_sum_intensity

    return exposures
Пример #15
0
    """ Define impact functions """
    if_1m = IF_FL_1m()
    if_fl = ImpactFuncSet()
    if_fl.tag.description = '1m step function'
    if_fl.append(if_1m)

    return if_fl

ifs_step = iffl()

yy_start = 106
yy_end = 116

idx_band = 1

exp = Exposures()

ssp_file = EXP_POP_PTH +'baseYr_total_2000.tif'
exp.set_from_raster(ssp_file, transform=DST_META['transform'], height=DST_META['height'], 
                width=DST_META['width'], resampling=Resampling.average)

exp.value *= 25     # sum of the grids after upscaling
if np.any(exp.value<0) == True:
    raise ValueError
exp.value_unit = 'N people per pixel'
exp.ref_year = 2000
exp[INDICATOR_CENTR+HAZ_TYPE] = np.arange(len(exp), dtype=int)
exp[INDICATOR_IF+HAZ_TYPE] = np.ones(len(exp), dtype=int)
exp.check()

for year in YEAR:    
Пример #16
0
plot_minimum = 100

if not os.path.exists(RES_DIR):
    os.makedirs(RES_DIR)

files = [i for i in os.listdir(ENTITY_DIR) if os.path.isfile(os.path.join(ENTITY_DIR,i)) and \
         filename_start in i]
files = np.unique(files)
print('\n' + '\x1b[1;03;30;30m' + 'Number of country exposure files: %i' %(len(files)) + '\x1b[0m')

"""LOADING DATA FROM CSV AND REGRIDDING TO TARGET RESOLUTION PER COUNTRY:"""
print('\n' + '\x1b[1;03;30;30m' + 'REGRIDDING TO TARGET RESOLUTION PER COUNTRY' + '\x1b[0m')
for res_target in res_targets:
    for idx, fi in enumerate(files):
        exposure_tmp = Exposures()
        if os.path.exists(os.path.join(RES_DIR, '%s_%ias.tiff' %(fi[0:-4]+fadd, res_target))):
            print('\n' + '\x1b[1;03;30;30m' + 'TIFF exists already, skipping: %s_%ias.tiff' %(fi[0:-4], res_target) + '\x1b[0m')

            continue
        else:
            print('\n' + '\x1b[1;03;30;30m' + 'Loading: %s ...' %(fi) + '\x1b[0m')
            exposure_tmp = exposure_tmp.from_csv(os.path.join(ENTITY_DIR, fi), index_col=None)

            if np.isnan(exposure_tmp.value.max()):
                continue
            exposure_tmp = Exposures(exposure_tmp)
            exposure_tmp.set_geometry_points() # set geometry attribute (shapely Points) from GeoDataFrame from latitude and longitude
            exposure_tmp.check() # puts metadata that has not been assigned        
            if write_to_hdf5:
                exposure_tmp.write_hdf5(os.path.join(ENTITY_DIR_HDF5, '%s.hdf5' %(fi[0:-4]+fadd)))
def exp_dem(x_exp=1):
    exp = Exposures()
    exp.read_hdf5(EXP_DEMO_H5)
    exp.gdf.value *= x_exp
    exp.check()
    return exp
Пример #18
0
def load_exp_agr(force_new_hdf5_generation, name_hdf5_file, input_folder, haz_real):
    """
    

    Parameters
    ----------
    Load generate Exposure of agriculture if forced or if hdf5 file not present. 
    Otherwise load hdf5 file.

    Parameters
    ----------
    force_new_hdf5_generation : dict of bool
        contains bool wether new Exposure should be forcefully generated.
    name_hdf5_file : str
        name of hdf5 file from wich Exposure is loaded.
    input_folder : str
        Path to input folder containing hdf5 file.
    haz_real : climada.hazard.base.Hazard
        CLIMADA hazard.

    Returns
    -------
    exp_infr : climada.entity.exposures.base.Exposures
        CLIMADA Exposure of Exposure.

    """
    file1 = Path(input_folder + "/" + name_hdf5_file["exp_agr"])
    file2 = Path(input_folder + "/" + "exp_agr_no_centr.hdf5")
    if not file2.exists() and not file1.exists():
        print("Please use import_agrar_exposure to create the hdf5 file!" + 
              " and move it to the input folder")
        sys.exit()
    elif force_new_hdf5_generation["exp_agr"]: #be carefull, this step will take ages when you do both at once
        if not file2.exists():
                    print("Please use import_agrar_exposure to create the hdf5 file!" + 
                          " and move it to the input folder")
                    sys.exit()
        exp_agr = Exposures()
        exp_agr.read_hdf5(input_folder + "/exp_agr_no_centr.hdf5")
    
        exp_agr.check()
        exp_agr.assign_centroids(haz_real, method = "NN", distance = "haversine", threshold = 2)
        exp_agr.check()
        exp_agr.write_hdf5(input_folder + "/exp_agr.hdf5")
    
    else:
        #Agrar Exposure    
        exp_agr = Exposures()
        exp_agr.read_hdf5(input_folder + "/exp_agr.hdf5")
        exp_agr.check()
    return exp_agr
Пример #19
0
def call_exposures(kanton=None,
                   branch=None,
                   only_outside=False,
                   epsg_output=4326):
    """write the Exposures:

                    Parameters:

                        kanton (str or None): Name of canton. Default: None (all of Switzerland)
                        branch (str or None): specific economic branch, as given in the 'GIS_data_code' of the
                                                work_intensity.csv file. Default: None
                        only_outside (bool): rather to only output the exposures outside,
                                                considering in that case that the people inside
                                                are not exposed to the risk of heat
                        epsg_output (int): EPSG code of the output. Default: 4326.

                    Returns:
                        Dictionary containing one Exposure per category of workers
                          """

    directory = '../../input_data/exposures/'
    exposures = {
    }  # dictionary of the exposures, where we will further put each category of Exposure as a key
    workers_info = pd.read_csv(''.join([
        directory, 'work_intensity.csv'
    ]))  # file containing the information on the intensity,
    # inside/outside, salary of each branch
    workers_dist = pd.read_csv(''.join(
        [directory, 'lv95_vollzeitequivalente.csv']))
    # file containing the geographical location of the workers by branch
    epsg_data = 2056  # espg of the workers_dist data

    workers_dist_monetary = DataFrame()  # new dataframe where we transform
    # the 'full time equivalents' values in monetary values by multiplying by the salary

    workers_dist_monetary['E_KOORD'] = workers_dist[
        'E_KOORD']  # same coordinates
    workers_dist_monetary['N_KOORD'] = workers_dist['N_KOORD']

    #  get subset of thw workers data for each category
    # inside low:
    i_l = workers_info.loc[(workers_info['Indoor/Outdoor'] == 'I')
                           & (workers_info['Occupation_category'] == 'L')]
    # inside moderate:
    i_m = workers_info.loc[(workers_info['Indoor/Outdoor'] == 'I')
                           & (workers_info['Occupation_category'] == 'M')]
    # outside moderate:
    o_m = workers_info.loc[(workers_info['Indoor/Outdoor'] == 'O')
                           & (workers_info['Occupation_category'] == 'M')]
    # outside high:
    o_h = workers_info.loc[(workers_info['Indoor/Outdoor'] == 'O')
                           & (workers_info['Occupation_category'] == 'H')]

    if branch is None:
        occupation = list(workers_dist)[2:]  # take all branches
    else:
        occupation = branch  # take only the given branches

    for o_ in occupation:
        salary = workers_info.loc[(workers_info['GIS_Data_code'] == o_),
                                  'Hourly salary (CHF/h)'].values[0]
        workers_dist_monetary.loc[:,
                                  o_] = workers_dist.loc[:,
                                                         o_] * salary  # for each occupation multiply by the
        # corresponding salary

    work_type = {}
    if_ref = {}
    exposures_name = set()

    for o_ in occupation:
        in_out = workers_info[workers_info['GIS_Data_code'] ==
                              o_]['Indoor/Outdoor'].values[0]  #
        intensity = workers_info[workers_info['GIS_Data_code'] ==
                                 o_]['Occupation_category'].values[0]

        if not only_outside:  # if we didn't specify that we only wanted the exposures outside

            if in_out == 'I' and intensity == 'L':
                exposures_name.add('inside low physical activity')
                work_type['inside low physical activity'] = i_l
                if_ref['inside low physical activity'] = 1

            if in_out == 'I' and intensity == 'M':
                exposures_name.add('inside moderate physical activity')
                work_type['inside moderate physical activity'] = i_m
                if_ref['inside moderate physical activity'] = 2

        if in_out == 'O' and intensity == 'M':
            exposures_name.add('outside moderate physical activity')
            work_type['outside moderate physical activity'] = o_m
            if_ref['outside moderate physical activity'] = 2

        if in_out == 'O' and intensity == 'H':
            exposures_name.add('outside high physical activity')
            work_type['outside high physical activity'] = o_h
            if_ref['outside high physical activity'] = 3

    for w_ in exposures_name:

        code_i_l = ['E_KOORD', 'N_KOORD']
        if branch is None:
            code_i_l.extend(list(work_type[w_]['GIS_Data_code']))
        else:
            code_i_l.extend(branch)

        workers_sum_intensity = DataFrame(
        )  # last dataframe with monetary value for each intensity
        workers_dist_intensity = workers_dist_monetary[code_i_l]

        workers_sum_intensity['longitude'] = np.asarray(
            workers_dist_intensity['E_KOORD']).flatten()
        workers_sum_intensity['latitude'] = np.asarray(
            workers_dist_intensity['N_KOORD']).flatten()
        workers_sum_intensity['value'] = np.asarray(
            workers_dist_intensity[workers_dist_intensity.columns[2:]].sum(
                axis=1))
        n_exp = len(workers_sum_intensity['value'])

        if kanton:  # test if a canton was specified, in that case
            # we first get a panda geodataframe and define the exposures slightly differently
            shp_dir = '../../input_data/shapefiles/KANTONS_projected_epsg4326/' \
                      'swissBOUNDARIES3D_1_3_TLM_KANTONSGEBIET_epsg4326.shp'

            workers_sum_intensity = vector_shapefile_mask(
                workers_sum_intensity, shp_dir, kanton, epsg_data, epsg_output)

            workers_sum_intensity = Exposures(
                workers_sum_intensity)  # define as Exposure class
            workers_sum_intensity.set_lat_lon()
            n_exp = len(workers_sum_intensity['value'])
            workers_sum_intensity['if_heat'] = np.full((n_exp),
                                                       if_ref[w_],
                                                       dtype=int)
            workers_sum_intensity.value_unit = 'CHF'
            workers_sum_intensity.fillna(0)
            workers_sum_intensity.check()

        else:  # normal case, for entire Switzerland

            workers_sum_intensity = Exposures(workers_sum_intensity)
            workers_sum_intensity.set_geometry_points()
            workers_sum_intensity.value_unit = 'CHF'
            workers_sum_intensity['if_heat'] = np.full((n_exp),
                                                       if_ref[w_],
                                                       dtype=int)
            workers_sum_intensity.crs = {
                'init': ''.join(['epsg:', str(epsg_data)])
            }
            workers_sum_intensity.check()
            workers_sum_intensity.fillna(0)
            workers_sum_intensity.to_crs(epsg=epsg_output, inplace=True)
        name = w_
        exposures[name] = workers_sum_intensity

    return exposures