Ejemplo n.º 1
0
    def test_calc_sector_total_impact(self):
        """Test running total impact calculations."""
        sup = SupplyChain()
        sup.read_wiod16(year='test',
                        range_rows=(5, 117),
                        range_cols=(4, 116),
                        col_iso3=2,
                        col_sectors=1)

        # Tropical cyclone over Florida and Caribbean
        hazard = Hazard('TC')
        hazard.read_mat(HAZ_TEST_MAT)

        # Read demo entity values
        # Set the entity default file to the demo one
        exp = Exposures()
        exp.read_hdf5(EXP_DEMO_H5)
        exp.check()
        exp.gdf.region_id = 840  #assign right id for USA
        exp.assign_centroids(hazard)

        impf_tc = IFTropCyclone()
        impf_tc.set_emanuel_usa()
        impf_set = ImpactFuncSet()
        impf_set.append(impf_tc)
        impf_set.check()

        sup.calc_sector_direct_impact(hazard, exp, impf_set)
        sup.calc_indirect_impact(io_approach='ghosh')
        sup.calc_total_impact()

        self.assertAlmostEqual((sup.years.shape[0], sup.mriot_data.shape[0]),
                               sup.total_impact.shape)
        self.assertAlmostEqual((sup.mriot_data.shape[0], ),
                               sup.total_aai_agg.shape)
Ejemplo n.º 2
0
    def test_EU(self):
        """test with demo data containing France and Germany"""
        bbox = [-5, 42, 16, 55]
        haz = RelativeCropyield()
        haz.set_from_single_run(input_dir=INPUT_DIR,
                                yearrange=(2001, 2005),
                                bbox=bbox,
                                ag_model='lpjml',
                                cl_model='ipsl-cm5a-lr',
                                scenario='historical',
                                soc='2005soc',
                                co2='co2',
                                crop='whe',
                                irr='noirr',
                                fn_str_var=FN_STR_DEMO)
        hist_mean = haz.calc_mean(yearrange_mean=(2001, 2005))
        haz.set_rel_yield_to_int(hist_mean)
        haz.centroids.set_region_id()

        exp = CropProduction()
        exp.set_from_single_run(input_dir=INPUT_DIR,
                                filename=FILENAME_LU,
                                hist_mean=FILENAME_MEAN,
                                bbox=bbox,
                                yearrange=(2001, 2005),
                                scenario='flexible',
                                unit='t',
                                crop='whe',
                                irr='firr')

        exp.set_to_usd(INPUT_DIR)
        exp.assign_centroids(haz, threshold=20)

        if_cp = ImpactFuncSet()
        if_def = IFRelativeCropyield()
        if_def.set_relativeyield()
        if_cp.append(if_def)
        if_cp.check()

        impact = Impact()
        impact.calc(exp.loc[exp.region_id == 276],
                    if_cp,
                    haz.select(['2002']),
                    save_mat=True)

        exp_manual = exp.value.loc[exp.region_id == 276].values
        impact_manual = haz.select(event_names=['2002'],
                                   reg_id=276).intensity.multiply(exp_manual)
        dif = (impact_manual - impact.imp_mat).data

        self.assertEqual(haz.tag.haz_type, 'RC')
        self.assertEqual(haz.size, 5)
        self.assertEqual(haz.centroids.size, 1092)
        self.assertAlmostEqual(haz.intensity.mean(), -2.0489097e-08)
        self.assertAlmostEqual(exp.value.max(), 53074789.755290434)
        self.assertEqual(exp.latitude.values.size, 1092)
        self.assertAlmostEqual(exp.value[3], 0.0)
        self.assertAlmostEqual(exp.value[1077], 405026.6857207429)
        self.assertAlmostEqual(impact.imp_mat.data[3], -176102.5359452465)
        self.assertEqual(len(dif), 0)
Ejemplo n.º 3
0
def irma_percen(irma_tc_s, irma_tc_n, exp_s, exp_n):
    """ Plot irma damage in % in lesser antilles and TCA. """
    if_exp = ImpactFuncSet()
    if_em = IFTropCyclone()
    if_em.set_emanuel_usa()
    if_exp.append(if_em)

    fig, axs = plt.subplots(1,
                            2,
                            figsize=(16, 20),
                            subplot_kw=dict(projection=ccrs.PlateCarree()),
                            squeeze=True,
                            sharex=False,
                            sharey=False)

    for idx, axis in enumerate(axs.flatten()):
        grid = axis.gridlines(draw_labels=True, alpha=0.2)
        grid.xlabels_top = grid.ylabels_right = False
        grid.xformatter = LONGITUDE_FORMATTER
        grid.yformatter = LATITUDE_FORMATTER

    # south
    plot_percen(irma_tc_s, exp_s, if_exp, axs[0])

    # nord
    im = plot_percen(irma_tc_n, exp_n, if_exp, axs[1])

    plt.subplots_adjust(wspace=0.14)
    fig.subplots_adjust(right=0.88)
    cbar_ax = fig.add_axes([0.885, 0.4285, 0.03, 0.148])
    fig.colorbar(im, cax=cbar_ax, orientation='vertical', label='% Damage')

    return fig
Ejemplo n.º 4
0
def iffl():
    """ Define impact functions """
    if_1m = IF_FL_1m()
    if_fl = ImpactFuncSet()
    if_fl.tag.description = '1m step function'
    if_fl.append(if_1m)

    return if_fl
Ejemplo n.º 5
0
    def test_EU_nan(self):
        """Test whether setting the zeros in exp.value to NaN changes the impact"""
        bbox = [0, 42, 10, 52]
        haz = RelativeCropyield()
        haz.set_from_isimip_netcdf(input_dir=INPUT_DIR,
                                   yearrange=(2001, 2005),
                                   bbox=bbox,
                                   ag_model='lpjml',
                                   cl_model='ipsl-cm5a-lr',
                                   scenario='historical',
                                   soc='2005soc',
                                   co2='co2',
                                   crop='whe',
                                   irr='noirr',
                                   fn_str_var=FN_STR_DEMO)
        hist_mean = haz.calc_mean(yearrange_mean=(2001, 2005))
        haz.set_rel_yield_to_int(hist_mean)
        haz.centroids.set_region_id()

        exp = CropProduction()
        exp.set_from_isimip_netcdf(input_dir=INPUT_DIR,
                                   filename=FILENAME_LU,
                                   hist_mean=FILENAME_MEAN,
                                   bbox=bbox,
                                   yearrange=(2001, 2005),
                                   scenario='flexible',
                                   unit='t/y',
                                   crop='whe',
                                   irr='firr')
        exp.assign_centroids(haz, threshold=20)

        impf_cp = ImpactFuncSet()
        impf_def = ImpfRelativeCropyield()
        impf_def.set_relativeyield()
        impf_cp.append(impf_def)
        impf_cp.check()

        impact = Impact()
        impact.calc(exp, impf_cp, haz, save_mat=True)

        exp_nan = CropProduction()
        exp_nan.set_from_isimip_netcdf(input_dir=INPUT_DIR,
                                       filename=FILENAME_LU,
                                       hist_mean=FILENAME_MEAN,
                                       bbox=[0, 42, 10, 52],
                                       yearrange=(2001, 2005),
                                       scenario='flexible',
                                       unit='t/y',
                                       crop='whe',
                                       irr='firr')
        exp_nan.gdf.value[exp_nan.gdf.value == 0] = np.nan
        exp_nan.assign_centroids(haz, threshold=20)

        impact_nan = Impact()
        impact_nan.calc(exp_nan, impf_cp, haz, save_mat=True)
        self.assertListEqual(list(impact.at_event), list(impact_nan.at_event))
        self.assertAlmostEqual(12.056545220060798, impact_nan.aai_agg)
        self.assertAlmostEqual(12.056545220060798, impact.aai_agg)
Ejemplo n.º 6
0
def impf_dem(x_paa=1, x_mdd=1):
    impf = ImpactFunc()
    impf.haz_type = 'TC'
    impf.id = 1
    impf.intensity_unit = 'm/s'
    impf.intensity = np.linspace(0, 150, num=100)
    impf.mdd = np.repeat(1, len(impf.intensity)) * x_mdd
    impf.paa = np.arange(0, len(impf.intensity)) / len(impf.intensity) * x_paa
    impf.check()
    impf_set = ImpactFuncSet()
    impf_set.append(impf)
    return impf_set
def calib_instance(hazard,
                   exposure,
                   impact_func,
                   df_out=pd.DataFrame(),
                   yearly_impact=False):
    """ calculate one impact instance for the calibration algorithm and write 
        to given DataFrame

        Parameters:
            hazard: hazard set instance
            exposure: exposure set instance
            impact_func: impact function instance
            
        Optional Parameters:
            df_out: Output DataFrame with headers of columns defined and optionally with
                first row (index=0) defined with values. If columns "impact", 
                "event_id", or "year" are not included, they are created here.
                Data like reported impacts or impact function parameters can be
                given here; values are preserved.
            yearly_impact (boolean): if set True, impact is returned per year, 
                not per event

        Returns:
            df_out: DataFrame with modelled impact written to rows for each year
                or event.
    """
    IFS = ImpactFuncSet()
    IFS.append(impact_func)
    impacts = Impact()
    impacts.calc(exposure, IFS, hazard)
    if yearly_impact:  # impact per year
        IYS = impacts.calc_impact_year_set(all_years=True)
        # Loop over whole year range:
        for cnt_, year in enumerate(np.sort(list((IYS.keys())))):
            if cnt_ > 0:
                df_out.loc[cnt_] = df_out.loc[0]  # copy info from first row
            if year in IYS:
                df_out.loc[cnt_, 'impact'] = IYS[year]
            else:
                df_out.loc[cnt_, 'impact'] = 0
            df_out.loc[cnt_, 'year'] = year

    else:  # impact per event
        for cnt_, impact in enumerate(impacts.at_event):
            if cnt_ > 0:
                df_out.loc[cnt_] = df_out.loc[0]  # copy info from first row
            df_out.loc[cnt_, 'impact'] = impact
            df_out.loc[cnt_, 'event_id'] = int(impacts.event_id[cnt_])
            df_out.loc[cnt_, 'event_name'] = impacts.event_name[cnt_]
            df_out.loc[cnt_, 'year'] = \
                dt.datetime.fromordinal(impacts.date[cnt_]).year
    return df_out
Ejemplo n.º 8
0
 def test_Forecast_calc_properties(self):
     """Test calc and propety functions from the Forecast class"""
     #hazard
     haz = StormEurope()
     haz.read_cosmoe_file(
         HAZ_DIR.joinpath('storm_europe_cosmoe_forecast_vmax_testfile.nc'),
         run_datetime=dt.datetime(2018, 1, 1),
         event_date=dt.datetime(2018, 1, 3))
     #exposure
     data = {}
     data['latitude'] = haz.centroids.lat
     data['longitude'] = haz.centroids.lon
     data['value'] = np.ones_like(data['latitude']) * 100000
     data['deductible'] = np.zeros_like(data['latitude'])
     data[INDICATOR_IMPF + 'WS'] = np.ones_like(data['latitude'])
     data['region_id'] = np.ones_like(data['latitude'], dtype=int) * 756
     expo = Exposures(gpd.GeoDataFrame(data=data))
     #vulnerability
     #generate vulnerability
     impact_function = ImpfStormEurope()
     impact_function.set_welker()
     impact_function_set = ImpactFuncSet()
     impact_function_set.append(impact_function)
     #create and calculate Forecast
     forecast = Forecast({dt.datetime(2018, 1, 1): haz}, expo,
                         impact_function_set)
     forecast.calc()
     # test
     self.assertEqual(len(forecast.run_datetime), 1)
     self.assertEqual(forecast.run_datetime[0], dt.datetime(2018, 1, 1))
     self.assertEqual(forecast.event_date, dt.datetime(2018, 1, 3))
     self.assertEqual(forecast.lead_time().days, 2)
     self.assertEqual(forecast.summary_str(),
                      'WS_NWP_run2018010100_event20180103_Switzerland')
     self.assertAlmostEqual(forecast.ai_agg(), 26.347, places=1)
     self.assertAlmostEqual(forecast.ei_exp()[1], 7.941, places=1)
     self.assertEqual(len(forecast.hazard), 1)
     self.assertIsInstance(forecast.hazard[0], StormEurope)
     self.assertIsInstance(forecast.exposure, Exposures)
     self.assertIsInstance(forecast.vulnerability, ImpactFuncSet)
Ejemplo n.º 9
0
def plot_right(irma_tc, exp, ax, scale_pos, plot_line=False):
    """ Plot irma damage in USD. """
    if_exp = ImpactFuncSet()
    if_em = IFTropCyclone()
    if_em.set_emanuel_usa()
    if_exp.append(if_em)

    imp_irma = Impact()
    imp_irma.calc(exp, if_exp, irma_tc)
    extent = [
        exp.longitude.min() - BUFFER_DEG,
        exp.longitude.max() + BUFFER_DEG,
        exp.latitude.min() - BUFFER_DEG,
        exp.latitude.max() + BUFFER_DEG
    ]
    ax.set_extent((extent))
    u_plot.add_shapes(ax)

    sel_pos = np.argwhere(imp_irma.eai_exp > 0)[:, 0]
    hex_bin = ax.hexbin(imp_irma.coord_exp[sel_pos, 1],
                        imp_irma.coord_exp[sel_pos, 0],
                        C=imp_irma.eai_exp[sel_pos],
                        reduce_C_function=np.average,
                        transform=ccrs.PlateCarree(),
                        gridsize=2000,
                        norm=LogNorm(vmin=MIN_VAL, vmax=MAX_VAL),
                        cmap='YlOrRd',
                        vmin=MIN_VAL,
                        vmax=MAX_VAL)
    ax.set_title('')
    ax.grid(False)
    add_cntry_names(ax, extent)
    scale_bar(ax, scale_pos, 10)

    if plot_line:
        x1, y1 = [-64.57, -64.82], [18.28, 18.47]
        ax.plot(x1, y1, linewidth=1.0, color='grey', linestyle='--')

    return hex_bin
Ejemplo n.º 10
0
def calc_imp(expo_dict, tc_dict, data_dir):
    """ Compute impacts of TCs in every island group. """
    try:
        abs_path = os.path.join(data_dir, 'imp_isl.p')
        with open(abs_path, 'rb') as f:
            imp_dict = pickle.load(f)
        print('Loaded imp_isl:', len(imp_dict))
    except FileNotFoundError:
        if_exp = ImpactFuncSet()
        if_em = IFTropCyclone()
        if_em.set_emanuel_usa()
        if_exp.append(if_em)

        imp_dict = dict()
        for isl_iso in expo_dict:
            imp = Impact()
            imp.calc(expo_dict[isl_iso], if_exp, tc_dict[isl_iso])
            imp_dict[isl_iso] = imp

        save(os.path.join(data_dir, 'imp_isl.p'), imp_dict)

    return imp_dict
Ejemplo n.º 11
0
def flood_imp_func_set():
    """Builds impact function set for river flood, using standard files"""

    if_set = ImpactFuncSet()

    if_africa = IFRiverFlood()
    if_africa.set_RF_IF_Africa()
    if_set.append(if_africa)

    if_asia = IFRiverFlood()
    if_asia.set_RF_IF_Asia()
    if_set.append(if_asia)

    if_europe = IFRiverFlood()
    if_europe.set_RF_IF_Europe()
    if_set.append(if_europe)

    if_na = IFRiverFlood()
    if_na.set_RF_IF_NorthAmerica()
    if_set.append(if_na)

    if_oceania = IFRiverFlood()
    if_oceania.set_RF_IF_Oceania()
    if_set.append(if_oceania)

    if_sa = IFRiverFlood()
    if_sa.set_RF_IF_SouthAmerica()
    if_set.append(if_sa)

    return if_set
#d.set_file_path(file_path_spei)

"""Setup the hazard"""
new_haz = d.setup()

"""Plot intensity of one year event""" 
# new_haz.plot_intensity_drought(event='2003')

"""Initialize Impact function"""
dr_if = ImpactFuncSet()
if_def = IFDrought()
"""set impact function: for min: set_default; for sum-thr: set_default_sumthr; for sum: set_default_sum"""
#if_def.set_default()
#if_def.set_default_sumthr()
if_def.set_default_sum()
dr_if.append(if_def)

"""Initialize Exposure"""
exposure_agrar = SpamAgrar()
exposure_agrar.init_spam_agrar(country='CHE')

"""If intensity def is not default, exposure has to be adapted"""
"""In case of sum-thr: 'if_DR_sumthr', in case of sum:'if_DR_sum' """
#exposure_agrar['if_DR_sumthr'] = np.ones(exposure_agrar.shape[0])
exposure_agrar['if_DR_sum'] = np.ones(exposure_agrar.shape[0])

"""Initialize impact of the drought"""
imp_drought = Impact()

"""Calculate Damage for a specific event"""
imp_drought.calc(exposure_agrar, dr_if, new_haz)
Ejemplo n.º 13
0
haz_real.check()
haz_synth.check()

if plot_img:
    haz_real.plot_intensity(event=0)
    haz_real.plot_fraction(event=0)

#%% Impact_function
# Set impact function (see tutorial climada_entity_ImpactFuncSet)
ifset_hail = ImpactFuncSet()
for imp_fun_dict in imp_fun_parameter:
    imp_fun = fct.create_impact_func(haz_type, imp_fun_dict["imp_id"],
                                     imp_fun_dict["L"], imp_fun_dict["x_0"],
                                     imp_fun_dict["k"])
    ifset_hail.append(imp_fun)
ifset_hail.plot()

#%% Exposure

exp_infr = fct.load_exp_infr(force_new_hdf5_generation, name_hdf5_file,
                             input_folder, haz_real)
exp_meshs = fct.load_exp_agr(force_new_hdf5_generation, name_hdf5_file,
                             input_folder, haz_real)
exp_dur = exp_meshs.copy()
exp_dur["if_HL"] = exp_dur[
    "if_HL"] + 3  #change if_HL to match the corresponding imp_id
if plot_img:
    exp_infr.plot_basemap()
    #This takes to long. Do over night!!!
    #exp_agr.plot_basemap()
Ejemplo n.º 14
0
def calib_instance(hazard,
                   exposure,
                   impact_func,
                   df_out=pd.DataFrame(),
                   yearly_impact=False,
                   return_cost='False'):
    """calculate one impact instance for the calibration algorithm and write
        to given DataFrame

        Parameters
        ----------
        hazard : Hazard
        exposure : Exposure
        impact_func : ImpactFunc
        df_out : Dataframe, optional
            Output DataFrame with headers of columns defined and optionally with
            first row (index=0) defined with values. If columns "impact",
            "event_id", or "year" are not included, they are created here.
            Data like reported impacts or impact function parameters can be
            given here; values are preserved.
        yearly_impact : boolean, optional
            if set True, impact is returned per year, not per event
        return_cost : str, optional
            if not 'False' but any of 'R2', 'logR2',
            cost is returned instead of df_out

        Returns
        -------
        df_out: DataFrame
            DataFrame with modelled impact written to rows for each year
            or event.
    """
    IFS = ImpactFuncSet()
    IFS.append(impact_func)
    impacts = Impact()
    impacts.calc(exposure, IFS, hazard)
    if yearly_impact:  # impact per year
        IYS = impacts.calc_impact_year_set(all_years=True)
        # Loop over whole year range:
        if df_out.empty | df_out.index.shape[0] == 1:
            for cnt_, year in enumerate(np.sort(list((IYS.keys())))):
                if cnt_ > 0:
                    df_out.loc[cnt_] = df_out.loc[
                        0]  # copy info from first row
                if year in IYS:
                    df_out.loc[cnt_, 'impact_CLIMADA'] = IYS[year]
                else:
                    df_out.loc[cnt_, 'impact_CLIMADA'] = 0.0
                df_out.loc[cnt_, 'year'] = year
        else:
            years_in_common = df_out.loc[
                df_out['year'].isin(np.sort(list((IYS.keys())))), 'year']
            for cnt_, year in years_in_common.iteritems():
                df_out.loc[df_out['year'] == year,
                           'impact_CLIMADA'] = IYS[year]

    else:  # impact per event
        if df_out.empty | df_out.index.shape[0] == 1:
            for cnt_, impact in enumerate(impacts.at_event):
                if cnt_ > 0:
                    df_out.loc[cnt_] = df_out.loc[
                        0]  # copy info from first row
                df_out.loc[cnt_, 'impact_CLIMADA'] = impact
                df_out.loc[cnt_, 'event_id'] = int(impacts.event_id[cnt_])
                df_out.loc[cnt_, 'event_name'] = impacts.event_name[cnt_]
                df_out.loc[cnt_, 'year'] = \
                    dt.datetime.fromordinal(impacts.date[cnt_]).year
                df_out.loc[cnt_, 'date'] = impacts.date[cnt_]
        elif df_out.index.shape[0] == impacts.at_event.shape[0]:
            for cnt_, (impact,
                       ind) in enumerate(zip(impacts.at_event, df_out.index)):
                df_out.loc[ind, 'impact_CLIMADA'] = impact
                df_out.loc[ind, 'event_id'] = int(impacts.event_id[cnt_])
                df_out.loc[ind, 'event_name'] = impacts.event_name[cnt_]
                df_out.loc[ind, 'year'] = \
                    dt.datetime.fromordinal(impacts.date[cnt_]).year
                df_out.loc[ind, 'date'] = impacts.date[cnt_]
        else:
            raise ValueError('adding simulated impacts to reported impacts not'
                             ' yet implemented. use yearly_impact=True or run'
                             ' without init_impact_data.')
    if not return_cost == 'False':
        df_out = calib_cost_calc(df_out, return_cost)
    return df_out
Ejemplo n.º 15
0
    def test_Forecast_plot(self):
        """Test cplotting functions from the Forecast class"""
        #hazard
        haz1 = StormEurope()
        haz1.read_cosmoe_file(
            HAZ_DIR.joinpath('storm_europe_cosmoe_forecast_vmax_testfile.nc'),
            run_datetime=dt.datetime(2018, 1, 1),
            event_date=dt.datetime(2018, 1, 3))
        haz1.centroids.lat += 0.6
        haz1.centroids.lon -= 1.2
        haz2 = StormEurope()

        haz2.read_cosmoe_file(
            HAZ_DIR.joinpath('storm_europe_cosmoe_forecast_vmax_testfile.nc'),
            run_datetime=dt.datetime(2018, 1, 1),
            event_date=dt.datetime(2018, 1, 3))
        haz2.centroids.lat += 0.6
        haz2.centroids.lon -= 1.2
        #exposure
        data = {}
        data['latitude'] = haz1.centroids.lat
        data['longitude'] = haz1.centroids.lon
        data['value'] = np.ones_like(data['latitude']) * 100000
        data['deductible'] = np.zeros_like(data['latitude'])
        data[INDICATOR_IMPF + 'WS'] = np.ones_like(data['latitude'])
        data['region_id'] = np.ones_like(data['latitude'], dtype=int) * 756
        expo = Exposures(gpd.GeoDataFrame(data=data))
        #vulnerability
        #generate vulnerability
        impact_function = ImpfStormEurope()
        impact_function.set_welker()
        impact_function_set = ImpactFuncSet()
        impact_function_set.append(impact_function)
        #create and calculate Forecast
        forecast = Forecast(
            {
                dt.datetime(2018, 1, 2): haz1,
                dt.datetime(2017, 12, 31): haz2
            }, expo, impact_function_set)
        forecast.calc()
        #test plotting functions
        forecast.plot_imp_map(run_datetime=dt.datetime(2017, 12, 31),
                              save_fig=False,
                              close_fig=True)
        forecast.plot_hist(run_datetime=dt.datetime(2017, 12, 31),
                           save_fig=False,
                           close_fig=True)
        forecast.plot_exceedence_prob(run_datetime=dt.datetime(2017, 12, 31),
                                      threshold=5000,
                                      save_fig=False,
                                      close_fig=True)

        #create a file containing the polygons of Swiss cantons using natural earth
        cantons_file = CONFIG.local_data.save_dir.dir() / 'CHE_cantons.shp'
        adm1_shape_file = shapereader.natural_earth(
            resolution='10m',
            category='cultural',
            name='admin_1_states_provinces')
        if not cantons_file.exists():
            with fiona.open(adm1_shape_file, 'r') as source:
                with fiona.open(cantons_file, 'w', **source.meta) as sink:
                    for f in source:
                        if f['properties']['adm0_a3'] == 'CHE':
                            sink.write(f)
        forecast.plot_warn_map(
            str(cantons_file),
            decision_level='polygon',
            thresholds=[100000, 500000, 1000000, 5000000],
            probability_aggregation='mean',
            area_aggregation='sum',
            title="Building damage warning",
            explain_text="warn level based on aggregated damages",
            save_fig=False,
            close_fig=True)
        forecast.plot_warn_map(
            str(cantons_file),
            decision_level='exposure_point',
            thresholds=[1, 1000, 5000, 5000000],
            probability_aggregation=0.2,
            area_aggregation=0.2,
            title="Building damage warning",
            explain_text="warn level based on aggregated damages",
            run_datetime=dt.datetime(2017, 12, 31),
            save_fig=False,
            close_fig=True)
        forecast.plot_hexbin_ei_exposure()
        plt.close()
        with self.assertRaises(ValueError):
            forecast.plot_warn_map(
                str(cantons_file),
                decision_level='test_fail',
                probability_aggregation=0.2,
                area_aggregation=0.2,
                title="Building damage warning",
                explain_text="warn level based on aggregated damages",
                save_fig=False,
                close_fig=True)
        plt.close()
        with self.assertRaises(ValueError):
            forecast.plot_warn_map(
                str(cantons_file),
                decision_level='exposure_point',
                probability_aggregation='test_fail',
                area_aggregation=0.2,
                title="Building damage warning",
                explain_text="warn level based on aggregated damages",
                save_fig=False,
                close_fig=True)
        plt.close()
        with self.assertRaises(ValueError):
            forecast.plot_warn_map(
                str(cantons_file),
                decision_level='exposure_point',
                probability_aggregation=0.2,
                area_aggregation='test_fail',
                title="Building damage warning",
                explain_text="warn level based on aggregated damages",
                save_fig=False,
                close_fig=True)
        plt.close()
Ejemplo n.º 16
0
def call_impact_functions(with_without_error):
    """get curve for the impact function:

                        Parameters:

                            with_without_error (bool): rather to give best estimate or to add a random variation. Default: True
                        Returns: climada impact functions set

                              """

    # get the data from the studies:
    directory_if = '../../input_data/impact_functions/'

    file_low = pd.read_csv(''.join([directory_if, 'impact_low.csv']))
    function_low = impact_functions_random(file_low, 'low', with_without_error)

    file_moderate = pd.read_csv(''.join([directory_if, 'impact_moderate.csv']))
    function_moderate = impact_functions_random(file_moderate, 'moderate',
                                                with_without_error)

    file_high = pd.read_csv(''.join([directory_if, 'impact_high.csv']))
    function_high = impact_functions_random(file_high, 'high',
                                            with_without_error)

    # make impact function set:

    if_heat_set = ImpactFuncSet()
    x = np.linspace(20, 40, num=30)

    if_heat1 = ImpactFunc()
    if_heat1.haz_type = 'heat'
    if_heat1.id = 1
    if_heat1.name = 'low physical activity'
    if_heat1.intensity_unit = 'Degrees C'
    if_heat1.intensity = x
    if_heat1.mdd = (sigmoid(x, *function_low)) / 100
    if_heat1.mdd[if_heat1.mdd < 0] = 0  # to avoid having negative values
    if_heat1.mdd[if_heat1.mdd > 100] = 100  # to avoid having values over a 100
    if_heat1.paa = np.linspace(1, 1, num=30)
    if_heat_set.append(if_heat1)

    if_heat2 = ImpactFunc()
    if_heat2.haz_type = 'heat'
    if_heat2.id = 2
    if_heat2.name = 'medium physical activity'
    if_heat2.intensity_unit = 'Degrees C'
    if_heat2.intensity = x
    if_heat2.mdd = (sigmoid(x, *function_moderate)) / 100
    if_heat2.mdd[if_heat2.mdd < 0] = 0
    if_heat2.mdd[if_heat2.mdd > 100] = 100
    if_heat2.paa = np.linspace(1, 1, num=30)
    if_heat_set.append(if_heat2)

    if_heat3 = ImpactFunc()
    if_heat3.haz_type = 'heat'
    if_heat3.id = 3
    if_heat3.name = 'high physical activity'
    if_heat3.intensity_unit = 'Degrees C'
    if_heat3.intensity = x
    if_heat3.mdd = (sigmoid(x, *function_high)) / 100
    if_heat3.mdd[if_heat3.mdd < 0] = 0
    if_heat3.mdd[if_heat3.mdd > 100] = 100
    if_heat3.paa = np.linspace(1, 1, num=30)
    if_heat_set.append(if_heat3)

    return if_heat_set
Ejemplo n.º 17
0
    def calc_sector_direct_impact(self):
        """Test running direct impact calculations."""

        sup = SupplyChain()
        sup.read_wiod16(year='test',
                        range_rows=(5, 117),
                        range_cols=(4, 116),
                        col_iso3=2,
                        col_sectors=1)

        # Tropical cyclone over Florida and Caribbean
        hazard = Hazard('TC')
        hazard.read_mat(HAZ_TEST_MAT)

        # Read demo entity values
        # Set the entity default file to the demo one
        exp = Exposures()
        exp.read_hdf5(EXP_DEMO_H5)
        exp.check()
        exp.gdf.region_id = 840  #assign right id for USA
        exp.assign_centroids(hazard)

        impf_tc = IFTropCyclone()
        impf_tc.set_emanuel_usa()
        impf_set = ImpactFuncSet()
        impf_set.append(impf_tc)
        impf_set.check()

        subsecs = list(range(10)) + list(range(15, 25))
        sup.calc_sector_direct_impact(hazard,
                                      exp,
                                      impf_set,
                                      selected_subsec=subsecs)
        self.assertAlmostEqual((sup.years.shape[0], sup.mriot_data.shape[0]),
                               sup.direct_impact.shape)
        self.assertAlmostEqual(sup.direct_impact.sum(),
                               sup.direct_impact[:, sup.reg_pos['USA']].sum(),
                               places=3)
        self.assertAlmostEqual((sup.mriot_data.shape[0], ),
                               sup.direct_aai_agg.shape)
        self.assertAlmostEqual(sup.direct_aai_agg.sum(),
                               sup.direct_aai_agg[sup.reg_pos['USA']].sum(),
                               places=3)
        self.assertAlmostEqual(sup.reg_dir_imp[0], 'USA')
        self.assertAlmostEqual(sup.direct_impact.sum(),
                               sup.direct_impact[:, subsecs].sum(),
                               places=3)
        self.assertAlmostEqual(sup.direct_aai_agg.sum(),
                               sup.direct_aai_agg[subsecs].sum(),
                               places=3)

        sup.calc_sector_direct_impact(hazard,
                                      exp,
                                      impf_set,
                                      selected_subsec='manufacturing')
        self.assertAlmostEqual((sup.years.shape[0], sup.mriot_data.shape[0]),
                               sup.direct_impact.shape)
        self.assertAlmostEqual(sup.direct_impact.sum(),
                               sup.direct_impact[:, sup.reg_pos['USA']].sum(),
                               places=3)
        self.assertAlmostEqual((sup.mriot_data.shape[0], ),
                               sup.direct_aai_agg.shape)
        self.assertAlmostEqual(sup.direct_aai_agg.sum(),
                               sup.direct_aai_agg[sup.reg_pos['USA']].sum(),
                               places=3)
        self.assertAlmostEqual(sup.reg_dir_imp[0], 'USA')
        self.assertAlmostEqual(sup.direct_impact.sum(),
                               sup.direct_impact[:, range(4, 23)].sum(),
                               places=3)
        self.assertAlmostEqual(sup.direct_aai_agg.sum(),
                               sup.direct_aai_agg[range(4, 23)].sum(),
                               places=3)

        sup.calc_sector_direct_impact(hazard,
                                      exp,
                                      impf_set,
                                      selected_subsec='agriculture')
        self.assertAlmostEqual((sup.years.shape[0], sup.mriot_data.shape[0]),
                               sup.direct_impact.shape)
        self.assertAlmostEqual(sup.direct_impact.sum(),
                               sup.direct_impact[:, sup.reg_pos['USA']].sum(),
                               places=3)
        self.assertAlmostEqual((sup.mriot_data.shape[0], ),
                               sup.direct_aai_agg.shape)
        self.assertAlmostEqual(sup.direct_aai_agg.sum(),
                               sup.direct_aai_agg[sup.reg_pos['USA']].sum(),
                               places=3)
        self.assertAlmostEqual(sup.direct_impact.sum(),
                               sup.direct_impact[:, range(0, 1)].sum(),
                               places=3)
        self.assertAlmostEqual(sup.direct_aai_agg.sum(),
                               sup.direct_aai_agg[range(0, 1)].sum(),
                               places=3)

        sup.calc_sector_direct_impact(hazard,
                                      exp,
                                      impf_set,
                                      selected_subsec='mining')
        self.assertAlmostEqual((sup.years.shape[0], sup.mriot_data.shape[0]),
                               sup.direct_impact.shape)
        self.assertAlmostEqual(sup.direct_impact.sum(),
                               sup.direct_impact[:, sup.reg_pos['USA']].sum(),
                               places=3)
        self.assertAlmostEqual((sup.mriot_data.shape[0], ),
                               sup.direct_aai_agg.shape)
        self.assertAlmostEqual(sup.direct_aai_agg.sum(),
                               sup.direct_aai_agg[sup.reg_pos['USA']].sum(),
                               places=3)
        self.assertAlmostEqual(sup.direct_impact.sum(),
                               sup.direct_impact[:, range(3, 4)].sum(),
                               places=3)
        self.assertAlmostEqual(sup.direct_aai_agg.sum(),
                               sup.direct_aai_agg[range(3, 4)].sum(),
                               places=3)

        sup.calc_sector_direct_impact(hazard,
                                      exp,
                                      impf_set,
                                      selected_subsec='service')
        self.assertAlmostEqual((sup.years.shape[0], sup.mriot_data.shape[0]),
                               sup.direct_impact.shape)
        self.assertAlmostEqual(sup.direct_impact.sum(),
                               sup.direct_impact[:, sup.reg_pos['USA']].sum(),
                               places=3)
        self.assertAlmostEqual((sup.mriot_data.shape[0], ),
                               sup.direct_aai_agg.shape)
        self.assertAlmostEqual(sup.direct_aai_agg.sum(),
                               sup.direct_aai_agg[sup.reg_pos['USA']].sum(),
                               places=3)
        self.assertAlmostEqual(sup.direct_impact.sum(),
                               sup.direct_impact[:, range(26, 56)].sum(),
                               places=3)
        self.assertAlmostEqual(sup.direct_aai_agg.sum(),
                               sup.direct_aai_agg[range(26, 56)].sum(),
                               places=3)
Ejemplo n.º 18
0
def make_Y(parameter, *args):
    """
    Score function for Optimization process. 
    Multiple scoring options are present (spearman, pearson, RMSE, RMSF)
    Parameters
    ----------
    parameter : np.ndarray
        array containing parameter that are optimized.
    *args : 
        imp_fun_parameter: dict
            Contains ind and Parameter for Impact function
        exp: climada.entity.exposures.base.Exposures
            CLIMADA Expusure.
        haz: climada.hazard.base.Hazard
            CLIMADA hazard
        haz_type: str
            Type of Hazard ("HL")
        num_fct: int
            number of Impact functions ([1,3])

    Returns
    -------
    score: float
        Variable that is minimizes by optimization. Mulitple variables possible.
    """
    # *args = imp_fun_parameter, exp, agr, haz_type
    # a = time.perf_counter()
    parameter_optimize, exp, haz, haz_type, num_fct, score_type, type_imp_fun = args
    ifset_hail = ImpactFuncSet()
    if type_imp_fun == "sig":
        if num_fct ==1:
            parameter_optimize[0]["L"] = parameter[0]
            parameter_optimize[0]["x_0"] = parameter[1]
            parameter_optimize[0]["k"] = parameter[2]
        else:
            parameter_optimize[0]["L"] = parameter[0]
            parameter_optimize[0]["x_0"] = parameter[1]
            parameter_optimize[0]["k"] = parameter[2]
            parameter_optimize[1]["L"] = parameter[3]
            parameter_optimize[1]["x_0"] = parameter[4]
            parameter_optimize[1]["k"] = parameter[5]
            parameter_optimize[2]["L"] = parameter[6]
            parameter_optimize[2]["x_0"] = parameter[7]
            parameter_optimize[2]["k"] = parameter[8]
        # b = time.perf_counter()
        # print("time to write parameter_optimize: ", b-a)
        for imp_fun_dict in parameter_optimize:
            imp_fun = create_impact_func(haz_type, 
                                 imp_fun_dict["imp_id"], 
                                 imp_fun_dict["L"], 
                                 imp_fun_dict["x_0"], 
                                 imp_fun_dict["k"])
            ifset_hail.append(imp_fun)
    elif type_imp_fun == "lin":
        parameter_optimize[0]["m"] = parameter[0]
        imp_fun = create_impact_func_lin(haz_type,
                                         parameter_optimize[0]["imp_id"], 
                                         m = parameter[0])
        ifset_hail.append(imp_fun)
    c  = time.perf_counter()
    # print("time to make imp_fun: ", c-b)
    
    imp = Impact()
    # imp.calc(self = imp, exposures = exp, impact_funcs = ifset_hail, hazard = haz, save_mat = True)
    imp.calc(exp, ifset_hail, haz, save_mat = False)
    d = time.perf_counter()
    print("time to calc impact: ", d-c)
    Y = list(imp.calc_impact_year_set(year_range = [2002, 2019]).values())
    all_eq = 0
    
    #very stupid bugfix. Ther where problem when all y values where 0,
    #so this test if this is the case and changes the last value if so
    for count, y in enumerate(Y):
        if y==0:
            all_eq += 1
            Y[count] = 0.1
    if all_eq == len(Y):
        Y[-1] = 0.2
            
    Y_norm = np.divide(Y, min(Y))
    Observ = [27.48, 46.14, 80.67, 76.80, 32.66, 62.47, 26.30, 110.60, 13.01,
              34.53, 21.50, 71.77, 22.80, 19.84, 17.50, 35.80, 24.40, 33.30]
    O_norm = np.divide(Observ, min(Observ))
    # res = mean_squared_error(Y_norm, O_norm)**0.5
    rmsf = RMSF(Y_norm, O_norm)
    rmse = mean_squared_error(O_norm, Y_norm)
    print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
    print("Params {}".format(parameter_optimize))
    print("The sum of the new Impact is: {}".format(sum(Y)))
    spear_coef, spear_p_value = spearmanr(O_norm, Y_norm)    
    print("spearman for agr  (score, p_value) = ({}, {})".format(spear_coef, spear_p_value))

    pears_coef, pears_p_value = stats.pearsonr(O_norm, Y_norm)   
    print("pearson for agr  (score, p_value) = ({}, {})".format(pears_coef, pears_p_value))
    print("RMSF: ", rmsf)
    print("RMSE", rmse)
    print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
    # e= time.perf_counter()
    # print("time to get result: ", e-d)
    if score_type == "pearson":
        score = pears_coef * -1
    elif score_type == "spearman":
        score = spear_coef * -1
    elif score_type == "RMSF":
        score = rmsf
    elif score_type == "RMSE":
        score = rmse
    return score
Ejemplo n.º 19
0
                       input_folder, years)

haz_real.check()

if plot_img:
    haz_real.plot_intensity(event=0)
    haz_real.plot_fraction(event=0)

#%% Impact_function
# Set impact function (see tutorial climada_entity_ImpactFuncSet)
ifset_hail = ImpactFuncSet()
for imp_fun_dict in imp_fun_parameter:
    imp_fun = fct.create_impact_func(haz_type, imp_fun_dict["imp_id"],
                                     imp_fun_dict["L"], imp_fun_dict["x_0"],
                                     imp_fun_dict["k"])
    ifset_hail.append(imp_fun)
if plot_img:
    ifset_hail.plot()

#%% Exposure

exp_infr = fct.load_exp_infr(force_new_hdf5_generation, name_hdf5_file,
                             input_folder, haz_real)
exp_meshs = fct.load_exp_agr(force_new_hdf5_generation, name_hdf5_file,
                             input_folder, haz_real)
exp_dur = exp_meshs.copy()
exp_dur["if_HL"] = exp_dur[
    "if_HL"] + 3  #change if_HL to match the corresponding imp_id
if plot_img:
    exp_infr.plot_basemap()
    #This takes to long. Do over night!!!