def test_switzerland(self): drought = Drought() drought.set_area(44.5, 5, 50, 12) hazard_set = drought.setup() imp_drought = Impact() dr_if = ImpactFuncSet() if_def = IFDrought() if_def.set_default() dr_if.append(if_def) exposure_agrar = SpamAgrar() exposure_agrar.init_spam_agrar(country='CHE') exposure_agrar.assign_centroids(hazard_set) imp_drought.calc(exposure_agrar, dr_if, hazard_set) index_event_start = imp_drought.event_name.index('2003') damages_drought = imp_drought.at_event[index_event_start] self.assertEqual(hazard_set.tag.haz_type, 'DR') self.assertEqual(hazard_set.size, 114) self.assertEqual(hazard_set.centroids.size, 130) self.assertEqual(exposure_agrar.latitude.values.size, 766 / 2) self.assertEqual(exposure_agrar.value[3], 1720024.4) self.assertEqual(damages_drought, 61995472.555223145)
def test_EU(self): """test with demo data containing France and Germany""" bbox = [-5, 42, 16, 55] haz = RelativeCropyield() haz.set_from_single_run(input_dir=INPUT_DIR, yearrange=(2001, 2005), bbox=bbox, ag_model='lpjml', cl_model='ipsl-cm5a-lr', scenario='historical', soc='2005soc', co2='co2', crop='whe', irr='noirr', fn_str_var=FN_STR_DEMO) hist_mean = haz.calc_mean(yearrange_mean=(2001, 2005)) haz.set_rel_yield_to_int(hist_mean) haz.centroids.set_region_id() exp = CropProduction() exp.set_from_single_run(input_dir=INPUT_DIR, filename=FILENAME_LU, hist_mean=FILENAME_MEAN, bbox=bbox, yearrange=(2001, 2005), scenario='flexible', unit='t', crop='whe', irr='firr') exp.set_to_usd(INPUT_DIR) exp.assign_centroids(haz, threshold=20) if_cp = ImpactFuncSet() if_def = IFRelativeCropyield() if_def.set_relativeyield() if_cp.append(if_def) if_cp.check() impact = Impact() impact.calc(exp.loc[exp.region_id == 276], if_cp, haz.select(['2002']), save_mat=True) exp_manual = exp.value.loc[exp.region_id == 276].values impact_manual = haz.select(event_names=['2002'], reg_id=276).intensity.multiply(exp_manual) dif = (impact_manual - impact.imp_mat).data self.assertEqual(haz.tag.haz_type, 'RC') self.assertEqual(haz.size, 5) self.assertEqual(haz.centroids.size, 1092) self.assertAlmostEqual(haz.intensity.mean(), -2.0489097e-08) self.assertAlmostEqual(exp.value.max(), 53074789.755290434) self.assertEqual(exp.latitude.values.size, 1092) self.assertAlmostEqual(exp.value[3], 0.0) self.assertAlmostEqual(exp.value[1077], 405026.6857207429) self.assertAlmostEqual(impact.imp_mat.data[3], -176102.5359452465) self.assertEqual(len(dif), 0)
def plot_percen(irma_tc, exp, if_exp, axs): """ Plot irma damage in %. """ # south extent = [ exp.longitude.min() - BUFFER_DEG, exp.longitude.max() + BUFFER_DEG, exp.latitude.min() - BUFFER_DEG, exp.latitude.max() + BUFFER_DEG ] axs.set_extent((extent)) u_plot.add_shapes(axs) imp_irma = Impact() imp_irma.calc(exp, if_exp, irma_tc) imp_irma.eai_exp[exp.value > 0] = \ imp_irma.eai_exp[exp.value > 0]/exp.value[exp.value > 0]*100 imp_irma.eai_exp[exp.value == 0] = 0. sel_exp = imp_irma.eai_exp > 0 im = axs.hexbin(exp.longitude[sel_exp], exp.latitude[sel_exp], C=imp_irma.eai_exp[sel_exp], reduce_C_function=np.average, transform=ccrs.PlateCarree(), gridsize=2000, cmap='YlOrRd', vmin=0, vmax=50) axs.set_title('') axs.grid(False) scale_bar(axs, (0.90, 0.90), 10) return im
def test_impact(self): ent = Entity.from_excel(ENT_DEMO_TODAY) ent.check() hazard = Hazard.from_mat(HAZ_TEST_MAT) impact = Impact() ent.exposures.assign_centroids(hazard) impact.calc(ent.exposures, ent.impact_funcs, hazard) return impact
def test_EU_nan(self): """Test whether setting the zeros in exp.value to NaN changes the impact""" bbox = [0, 42, 10, 52] haz = RelativeCropyield() haz.set_from_isimip_netcdf(input_dir=INPUT_DIR, yearrange=(2001, 2005), bbox=bbox, ag_model='lpjml', cl_model='ipsl-cm5a-lr', scenario='historical', soc='2005soc', co2='co2', crop='whe', irr='noirr', fn_str_var=FN_STR_DEMO) hist_mean = haz.calc_mean(yearrange_mean=(2001, 2005)) haz.set_rel_yield_to_int(hist_mean) haz.centroids.set_region_id() exp = CropProduction() exp.set_from_isimip_netcdf(input_dir=INPUT_DIR, filename=FILENAME_LU, hist_mean=FILENAME_MEAN, bbox=bbox, yearrange=(2001, 2005), scenario='flexible', unit='t/y', crop='whe', irr='firr') exp.assign_centroids(haz, threshold=20) impf_cp = ImpactFuncSet() impf_def = ImpfRelativeCropyield() impf_def.set_relativeyield() impf_cp.append(impf_def) impf_cp.check() impact = Impact() impact.calc(exp, impf_cp, haz, save_mat=True) exp_nan = CropProduction() exp_nan.set_from_isimip_netcdf(input_dir=INPUT_DIR, filename=FILENAME_LU, hist_mean=FILENAME_MEAN, bbox=[0, 42, 10, 52], yearrange=(2001, 2005), scenario='flexible', unit='t/y', crop='whe', irr='firr') exp_nan.gdf.value[exp_nan.gdf.value == 0] = np.nan exp_nan.assign_centroids(haz, threshold=20) impact_nan = Impact() impact_nan.calc(exp_nan, impf_cp, haz, save_mat=True) self.assertListEqual(list(impact.at_event), list(impact_nan.at_event)) self.assertAlmostEqual(12.056545220060798, impact_nan.aai_agg) self.assertAlmostEqual(12.056545220060798, impact.aai_agg)
def test_calib_instance(self): """ Test save calib instance """ # Read default entity values ent = Entity() ent.read_excel(ENT_DEMO_TODAY) ent.check() # Read default hazard file hazard = Hazard('TC') hazard.read_mat(HAZ_TEST_MAT) # get impact function from set imp_func = ent.impact_funcs.get_func(hazard.tag.haz_type, ent.exposures.if_TC.median()) # Assign centroids to exposures ent.exposures.assign_centroids(hazard) # create input frame df_in = pd.DataFrame.from_dict({ 'v_threshold': [25.7], 'other_param': [2], 'hazard': [HAZ_TEST_MAT] }) df_in_yearly = pd.DataFrame.from_dict({ 'v_threshold': [25.7], 'other_param': [2], 'hazard': [HAZ_TEST_MAT] }) # Compute the impact over the whole exposures df_out = calib_instance(hazard, ent.exposures, imp_func, df_in) df_out_yearly = calib_instance(hazard, ent.exposures, imp_func, df_in_yearly, yearly_impact=True) # calc Impact as comparison impact = Impact() impact.calc(ent.exposures, ent.impact_funcs, hazard) IYS = impact.calc_impact_year_set(all_years=True) # do the tests self.assertTrue(isinstance(df_out, pd.DataFrame)) self.assertTrue(isinstance(df_out_yearly, pd.DataFrame)) self.assertEqual(df_out.shape[0], hazard.event_id.size) self.assertEqual(df_out_yearly.shape[0], 161) self.assertTrue(all(df_out['event_id'] == hazard.event_id)) self.assertTrue( all(df_out[df_in.columns[0]].isin(df_in[df_in.columns[0]]))) self.assertTrue( all(df_out_yearly[df_in.columns[1]].isin(df_in[df_in.columns[1]]))) self.assertTrue( all(df_out_yearly[df_in.columns[2]].isin(df_in[df_in.columns[2]]))) self.assertTrue( all(df_out['impact_CLIMADA'].values == impact.at_event)) self.assertTrue( all(df_out_yearly['impact_CLIMADA'].values == [*IYS.values()]))
def calib_instance(hazard, exposure, impact_func, df_out=pd.DataFrame(), yearly_impact=False): """ calculate one impact instance for the calibration algorithm and write to given DataFrame Parameters: hazard: hazard set instance exposure: exposure set instance impact_func: impact function instance Optional Parameters: df_out: Output DataFrame with headers of columns defined and optionally with first row (index=0) defined with values. If columns "impact", "event_id", or "year" are not included, they are created here. Data like reported impacts or impact function parameters can be given here; values are preserved. yearly_impact (boolean): if set True, impact is returned per year, not per event Returns: df_out: DataFrame with modelled impact written to rows for each year or event. """ IFS = ImpactFuncSet() IFS.append(impact_func) impacts = Impact() impacts.calc(exposure, IFS, hazard) if yearly_impact: # impact per year IYS = impacts.calc_impact_year_set(all_years=True) # Loop over whole year range: for cnt_, year in enumerate(np.sort(list((IYS.keys())))): if cnt_ > 0: df_out.loc[cnt_] = df_out.loc[0] # copy info from first row if year in IYS: df_out.loc[cnt_, 'impact'] = IYS[year] else: df_out.loc[cnt_, 'impact'] = 0 df_out.loc[cnt_, 'year'] = year else: # impact per event for cnt_, impact in enumerate(impacts.at_event): if cnt_ > 0: df_out.loc[cnt_] = df_out.loc[0] # copy info from first row df_out.loc[cnt_, 'impact'] = impact df_out.loc[cnt_, 'event_id'] = int(impacts.event_id[cnt_]) df_out.loc[cnt_, 'event_name'] = impacts.event_name[cnt_] df_out.loc[cnt_, 'year'] = \ dt.datetime.fromordinal(impacts.date[cnt_]).year return df_out
def test_full_impact(self): """test full flood impact""" testRF = RiverFlood() testRF.set_from_nc(dph_path=HAZ_DEMO_FLDDPH, frc_path=HAZ_DEMO_FLDFRC, countries=['CHE']) gdpa = GDP2Asset() gdpa.set_countries(countries=['CHE'], ref_year=2000, path=DEMO_GDP2ASSET) if_set = flood_imp_func_set() imp = Impact() imp.calc(gdpa, if_set, testRF) self.assertAlmostEqual(imp.at_event[0], 226839.72426476143) self.assertAlmostEqual(gdpa.gdf['if_RF'].iloc[0], 3.0)
def _map_impact_calc(self, sample_iterrows): """ Map to compute impact for all parameter samples in parrallel Parameters ---------- sample_iterrows : pd.DataFrame.iterrows() Generator of the parameter samples Returns ------- : list impact metrics list for all samples containing aai_agg, rp_curve, eai_exp (np.array([]) if self.calc_eai_exp=False) and at_event (np.array([]) if self.calc_at_event=False). """ # [1] only the rows of the dataframe passed by pd.DataFrame.iterrows() exp_samples = sample_iterrows[1][self.unc_vars['exp'].labels].to_dict() haz_samples = sample_iterrows[1][self.unc_vars['haz'].labels].to_dict() impf_samples = sample_iterrows[1][ self.unc_vars['impf'].labels].to_dict() exp = self.unc_vars['exp'].uncvar_func(**exp_samples) haz = self.unc_vars['haz'].uncvar_func(**haz_samples) impf = self.unc_vars['impf'].uncvar_func(**impf_samples) imp = Impact() imp.calc(exposures=exp, impact_funcs=impf, hazard=haz) # Extract from climada.impact the chosen metrics freq_curve = imp.calc_freq_curve(self.rp).impact if self.calc_eai_exp: eai_exp = imp.eai_exp else: eai_exp = np.array([]) if self.calc_at_event: at_event = imp.at_event else: at_event = np.array([]) return [imp.aai_agg, freq_curve, eai_exp, at_event, imp.tot_value]
def calc_imp(expo_dict, tc_dict, data_dir): """ Compute impacts of TCs in every island group. """ try: abs_path = os.path.join(data_dir, 'imp_isl.p') with open(abs_path, 'rb') as f: imp_dict = pickle.load(f) print('Loaded imp_isl:', len(imp_dict)) except FileNotFoundError: if_exp = ImpactFuncSet() if_em = IFTropCyclone() if_em.set_emanuel_usa() if_exp.add_func(if_em) imp_dict = dict() for isl_iso in expo_dict: imp = Impact() imp.calc(expo_dict[isl_iso], if_exp, tc_dict[isl_iso]) imp_dict[isl_iso] = imp save(os.path.join(data_dir, 'imp_isl.p'), imp_dict) return imp_dict
def plot_right(irma_tc, exp, ax, scale_pos, plot_line=False): """ Plot irma damage in USD. """ if_exp = ImpactFuncSet() if_em = IFTropCyclone() if_em.set_emanuel_usa() if_exp.append(if_em) imp_irma = Impact() imp_irma.calc(exp, if_exp, irma_tc) extent = [ exp.longitude.min() - BUFFER_DEG, exp.longitude.max() + BUFFER_DEG, exp.latitude.min() - BUFFER_DEG, exp.latitude.max() + BUFFER_DEG ] ax.set_extent((extent)) u_plot.add_shapes(ax) sel_pos = np.argwhere(imp_irma.eai_exp > 0)[:, 0] hex_bin = ax.hexbin(imp_irma.coord_exp[sel_pos, 1], imp_irma.coord_exp[sel_pos, 0], C=imp_irma.eai_exp[sel_pos], reduce_C_function=np.average, transform=ccrs.PlateCarree(), gridsize=2000, norm=LogNorm(vmin=MIN_VAL, vmax=MAX_VAL), cmap='YlOrRd', vmin=MIN_VAL, vmax=MAX_VAL) ax.set_title('') ax.grid(False) add_cntry_names(ax, extent) scale_bar(ax, scale_pos, 10) if plot_line: x1, y1 = [-64.57, -64.82], [18.28, 18.47] ax.plot(x1, y1, linewidth=1.0, color='grey', linestyle='--') return hex_bin
def plot_event(name, ifset_hail, haz_real, haz_dur, exp_infr, exp_meshs, exp_dur, plot_img): print("Event Analysis for {}".format(name)) ev_id = haz_real.get_event_id(event_name=name) meshs_intensity = haz_real.intensity[ev_id].todense().astype(int) meshs_intensity_no_0 = np.array( meshs_intensity[meshs_intensity != 0]).ravel() #remove outliers meshs_intensity_no_0 = np.delete(meshs_intensity_no_0, np.where(meshs_intensity_no_0 == 244)) dur_intensity = haz_dur.intensity[ev_id].todense().astype(int) dur_intensity_no_0 = np.array(dur_intensity[dur_intensity != 0]).ravel() fig, axs = plt.subplots(1, 2, sharey=False, tight_layout=False) fig.suptitle("Histogramm event {}".format(name)) axs[0].hist(meshs_intensity_no_0, bins=25) axs[1].hist(dur_intensity_no_0) axs[0].set(xlabel="meshs [mm]", ylabel="frequency") axs[1].set(xlabel="duration [min]", ylabel="frequency") axs[0].locator_params(axis="y", integer=True) axs[1].locator_params(axis="y", integer=True) fig.subplots_adjust(wspace=0.35) plt.show() haz_real.plot_intensity(event=ev_id) plt.show() haz_real_ev = haz_real.select(event_names=[name]) haz_dur_ev = haz_dur.select(event_names=[name]) imp_agr_real_ev = Impact() imp_agr_dur_ev = Impact() imp_infr_real_ev = Impact() imp_agr_real_ev.calc(exp_meshs, ifset_hail, haz_real_ev, save_mat=True) imp_agr_dur_ev.calc(exp_dur, ifset_hail, haz_dur_ev, save_mat=True) imp_infr_real_ev.calc(exp_infr, ifset_hail, haz_real_ev, save_mat=True) print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") print( "Meshs on agr at event {}: at_event: {} mio; aai_agg: {} mio; eai_exp: {} mio" .format(name, imp_agr_real_ev.at_event / 1e6, imp_agr_real_ev.aai_agg / 1e6, imp_agr_real_ev.eai_exp / 1e6)) print( "Duration on agr at event {}: at_event: {} mio; aai_agg: {} mio; eai_exp: {} mio" .format(name, imp_agr_dur_ev.at_event / 1e6, imp_agr_dur_ev.aai_agg / 1e6, imp_agr_dur_ev.eai_exp / 1e6)) print( "Meshs on infr at event {}: at_event: {} mio; aai_agg: {} mio; eai_exp: {} mio" .format(name, imp_infr_real_ev.at_event / 1e6, imp_infr_real_ev.aai_agg / 1e6, imp_infr_real_ev.eai_exp / 1e6)) print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") if plot_img: imp_agr_real_ev.plot_basemap_impact_exposure() imp_agr_real_ev.plot_hexbin_impact_exposure() imp_agr_dur_ev.plot_basemap_impact_exposure() imp_agr_dur_ev.plot_hexbin_impact_exposure()
#%% Exposure exp_meshs = fct.load_exp_agr(force_new_hdf5_generation, name_hdf5_file, input_folder, haz_real) exp_dur = exp_meshs.copy() exp_dur["if_HL"] = exp_dur[ "if_HL"] + 3 #change if_HL to match the corresponding imp_id if plot_img: exp_meshs.tag = Tag(file_name="exp_agr", description="Exposure_description") exp_meshs.plot_basemap() exp_meshs.plot_hexbin() exp_meshs.plot_scatter() exp_meshs.plot_raster(raster_res=0.001) #%% Impact imp_agr_meshs = Impact() imp_agr_meshs.calc(exp_meshs, ifset_hail, haz_real, save_mat=True) freq_curve_meshs_agr = imp_agr_meshs.calc_freq_curve() if plot_img: freq_curve_meshs_agr.plot() imp_agr_meshs.plot_basemap_eai_exposure() imp_agr_meshs.plot_hexbin_eai_exposure() imp_agr_meshs.plot_scatter_eai_exposure() imp_agr_meshs.plot_raster_eai_exposure(raster_res=0.001) imp_agr_dur = Impact() imp_agr_dur.calc(exp_dur, ifset_hail, haz_dur, save_mat=True) freq_curve_dur_agr = imp_agr_dur.calc_freq_curve() if plot_img: freq_curve_dur_agr.plot() imp_agr_dur.plot_basemap_eai_exposure() imp_agr_dur.plot_hexbin_eai_exposure()
def make_Y(parameter, *args): """ Score function for Optimization process. Multiple scoring options are present (spearman, pearson, RMSE, RMSF) Parameters ---------- parameter : np.ndarray array containing parameter that are optimized. *args : imp_fun_parameter: dict Contains ind and Parameter for Impact function exp: climada.entity.exposures.base.Exposures CLIMADA Expusure. haz: climada.hazard.base.Hazard CLIMADA hazard haz_type: str Type of Hazard ("HL") num_fct: int number of Impact functions ([1,3]) Returns ------- score: float Variable that is minimizes by optimization. Mulitple variables possible. """ # *args = imp_fun_parameter, exp, agr, haz_type # a = time.perf_counter() parameter_optimize, exp, haz, haz_type, num_fct, score_type, type_imp_fun = args ifset_hail = ImpactFuncSet() if type_imp_fun == "sig": if num_fct ==1: parameter_optimize[0]["L"] = parameter[0] parameter_optimize[0]["x_0"] = parameter[1] parameter_optimize[0]["k"] = parameter[2] else: parameter_optimize[0]["L"] = parameter[0] parameter_optimize[0]["x_0"] = parameter[1] parameter_optimize[0]["k"] = parameter[2] parameter_optimize[1]["L"] = parameter[3] parameter_optimize[1]["x_0"] = parameter[4] parameter_optimize[1]["k"] = parameter[5] parameter_optimize[2]["L"] = parameter[6] parameter_optimize[2]["x_0"] = parameter[7] parameter_optimize[2]["k"] = parameter[8] # b = time.perf_counter() # print("time to write parameter_optimize: ", b-a) for imp_fun_dict in parameter_optimize: imp_fun = create_impact_func(haz_type, imp_fun_dict["imp_id"], imp_fun_dict["L"], imp_fun_dict["x_0"], imp_fun_dict["k"]) ifset_hail.append(imp_fun) elif type_imp_fun == "lin": parameter_optimize[0]["m"] = parameter[0] imp_fun = create_impact_func_lin(haz_type, parameter_optimize[0]["imp_id"], m = parameter[0]) ifset_hail.append(imp_fun) c = time.perf_counter() # print("time to make imp_fun: ", c-b) imp = Impact() # imp.calc(self = imp, exposures = exp, impact_funcs = ifset_hail, hazard = haz, save_mat = True) imp.calc(exp, ifset_hail, haz, save_mat = False) d = time.perf_counter() print("time to calc impact: ", d-c) Y = list(imp.calc_impact_year_set(year_range = [2002, 2019]).values()) all_eq = 0 #very stupid bugfix. Ther where problem when all y values where 0, #so this test if this is the case and changes the last value if so for count, y in enumerate(Y): if y==0: all_eq += 1 Y[count] = 0.1 if all_eq == len(Y): Y[-1] = 0.2 Y_norm = np.divide(Y, min(Y)) Observ = [27.48, 46.14, 80.67, 76.80, 32.66, 62.47, 26.30, 110.60, 13.01, 34.53, 21.50, 71.77, 22.80, 19.84, 17.50, 35.80, 24.40, 33.30] O_norm = np.divide(Observ, min(Observ)) # res = mean_squared_error(Y_norm, O_norm)**0.5 rmsf = RMSF(Y_norm, O_norm) rmse = mean_squared_error(O_norm, Y_norm) print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") print("Params {}".format(parameter_optimize)) print("The sum of the new Impact is: {}".format(sum(Y))) spear_coef, spear_p_value = spearmanr(O_norm, Y_norm) print("spearman for agr (score, p_value) = ({}, {})".format(spear_coef, spear_p_value)) pears_coef, pears_p_value = stats.pearsonr(O_norm, Y_norm) print("pearson for agr (score, p_value) = ({}, {})".format(pears_coef, pears_p_value)) print("RMSF: ", rmsf) print("RMSE", rmse) print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") # e= time.perf_counter() # print("time to get result: ", e-d) if score_type == "pearson": score = pears_coef * -1 elif score_type == "spearman": score = spear_coef * -1 elif score_type == "RMSF": score = rmsf elif score_type == "RMSE": score = rmse return score
haz_file = HAZARD_PTH +'flddph_' +hydro +'_' +gcm +'_' +RCP +'_flopros_gev_picontrol_2006_2300_0.1.nc' haz_frac = HAZARD_PTH +'fldfrc_' +hydro +'_' +gcm +'_' +RCP +'_flopros_gev_picontrol_2006_2300_0.1.nc' if haz_file not in HAZ_FILES: logging.error('no file: flddph_' +hydro +'_' +gcm +'_' +RCP +'_flopros_gev_picontrol_2006_2300_0.1.nc') continue haz = Hazard('FL') haz.set_raster([haz_file], [haz_frac], band=range(yy_start,yy_end), transform=DST_META['transform'], height=DST_META['height'], width=DST_META['width'], resampling=Resampling.bilinear, attrs={'frequency':np.ones(10)/10}) imp_tmp = Impact() imp_tmp.calc(exp, ifs_step, haz, save_mat=True) imp_model.append(imp_tmp) if imp_save is None: imp_save = np.reshape(imp_tmp.eai_exp, [1,-1]) else: imp_save_tmp = np.reshape(imp_tmp.eai_exp, [1,-1]) imp_save = np.append(imp_save, imp_save_tmp, axis=0) logging.info('%s %s %s %s %s %s', year, hydro, gcm, imp_tmp.eai_exp.sum(), imp_tmp.aai_agg) save_file = SAVE_PTH +'imp_' +RCP +'_BaseYearPOP_' +str(year) +'.tif' write_raster(save_file, imp_save, DST_META) yy_start += 10
"""Plot intensity of one year event""" # new_haz.plot_intensity_drought(event='2003') """Initialize Impact function""" dr_if = ImpactFuncSet() if_def = IFDrought() """set impact function: for min: set_default; for sum-thr: set_default_sumthr; for sum: set_default_sum""" #if_def.set_default() #if_def.set_default_sumthr() if_def.set_default_sum() dr_if.append(if_def) """Initialize Exposure""" exposure_agrar = SpamAgrar() exposure_agrar.init_spam_agrar(country='CHE') """If intensity def is not default, exposure has to be adapted""" """In case of sum-thr: 'if_DR_sumthr', in case of sum:'if_DR_sum' """ #exposure_agrar['if_DR_sumthr'] = np.ones(exposure_agrar.shape[0]) exposure_agrar['if_DR_sum'] = np.ones(exposure_agrar.shape[0]) """Initialize impact of the drought""" imp_drought = Impact() """Calculate Damage for a specific event""" imp_drought.calc(exposure_agrar, dr_if, new_haz) index_event_start = imp_drought.event_name.index('2003') damages_drought = np.asarray([imp_drought.at_event[index_event_start]]) print(damages_drought)
exp_infr = fct.load_exp_infr(force_new_hdf5_generation, name_hdf5_file, input_folder, haz_real) exp_meshs = fct.load_exp_agr(force_new_hdf5_generation, name_hdf5_file, input_folder, haz_real) exp_dur = exp_meshs.copy() exp_dur["if_HL"] = exp_dur[ "if_HL"] + 3 #change if_HL to match the corresponding imp_id if plot_img: exp_infr.plot_basemap() #This takes to long. Do over night!!! #exp_agr.plot_basemap() #%% Impact imp_infr = Impact() imp_infr.calc(exp_infr, ifset_hail, haz_real, save_mat=True) # imp_infr.plot_raster_eai_exposure() freq_curve_infr = imp_infr.calc_freq_curve() freq_curve_infr.plot() plt.show() imp_agr = Impact() imp_agr.calc(exp_meshs, ifset_hail, haz_real, save_mat=True) freq_curve_agr = imp_agr.calc_freq_curve() freq_curve_agr.plot() plt.show() imp_agr_dur = Impact() imp_agr_dur.calc(exp_dur, ifset_hail, haz_dur, save_mat=True) freq_curve_agr = imp_agr.calc_freq_curve() freq_curve_agr.plot()
ini_date = str(years[year]) + '-01-01' fin_date = str(years[year]) + '-12-31' dataDF.iloc[line_counter, 0] = years[year] dataDF.iloc[line_counter, 1] = country[0] dataDF.iloc[line_counter, 2] = reg dataDF.iloc[line_counter, 3] = cont dataDF.iloc[line_counter, 4] = 0 # set variable exposure gdpa = GDP2Asset() gdpa.set_countries(countries=country, ref_year=years[year], path=gdp_path) #gdpa.correct_for_SSP(ssp_corr, country[0]) # calculate damages for all combinations imp2y_fl_pos = Impact() imp2y_fl_pos.calc(gdpa, if_set, rf2y_pos.select(date=(ini_date, fin_date))) imp2y_fl_neg = Impact() imp2y_fl_neg.calc(gdpa, if_set, rf2y_neg.select(date=(ini_date, fin_date))) imp2y_fl = Impact() imp2y_fl.calc(gdpa, if_set, rf2y.select(date=(ini_date, fin_date))) imp2y_fl_1980_pos = Impact() imp2y_fl_1980_pos.calc(gdpa1980, if_set, rf2y_pos.select(date=(ini_date, fin_date))) imp2y_fl_1980_neg = Impact() imp2y_fl_1980_neg.calc(gdpa1980, if_set, rf2y_neg.select(date=(ini_date, fin_date))) imp2y_fl_1980 = Impact() imp2y_fl_1980.calc(gdpa1980, if_set, rf2y.select(date=(ini_date, fin_date)))
def calc_geom_impact(exp, impf_set, haz, res, to_meters=False, disagg_met=DisaggMethod.DIV, disagg_val=None, agg_met=AggMethod.SUM): """ Compute impact for exposure with (multi-)polygons and/or (multi-)lines. Lat/Lon values in exp.gdf are ignored, only exp.gdf.geometry is considered. The geometries are first disaggregated to points. Polygons: grid with resolution res*res. Lines: points along the line separated by distance res. The impact per point is then re-aggregated for each geometry. Parameters ---------- exp : Exposures The exposure instance with exp.gdf.geometry containing (multi-)polygons and/or (multi-)lines impf_set : ImpactFuncSet The set of impact functions. haz : Hazard The hazard instance. res : float Resolution of the disaggregation grid (polygon) or line (lines). to_meters : bool, optional If True, res is interpreted as meters, and geometries are projected to an equal area projection for disaggregation. The exposures are then projected back to the original projections before impact calculation. The default is False. disagg_met : DisaggMethod Disaggregation method of the shapes's original value onto its inter- polated points. 'DIV': Divide the value evenly over all the new points; 'FIX': Replicate the value onto all the new points. Default is 'DIV'. Works in combination with the kwarg 'disagg_val'. disagg_val: float, optional Specifies what number should be taken as the value, which is to be disaggregated according to the method provided in disagg_met. None: The shape's value is taken from the exp.gdf.value column. float: This given number will be disaggregated according to the method. In case exp.gdf.value column exists, original values in there will be ignored. The default is None. agg_met : AggMethod Aggregation method of the point impacts into impact for respective parent-geometry. If 'SUM', the impact is summed over all points in each geometry. The default is 'SUM'. Returns ------- Impact Impact object with the impact per geometry (rows of exp.gdf). Contains two additional attributes 'geom_exp' and 'coord_exp', the first one being the origninal line or polygon geometries for which impact was computed. See Also -------- exp_geom_to_pnt: disaggregate exposures """ # disaggregate exposure exp_pnt = exp_geom_to_pnt(exp=exp, res=res, to_meters=to_meters, disagg_met=disagg_met, disagg_val=disagg_val) exp_pnt.assign_centroids(haz) # compute point impact impact_pnt = Impact() impact_pnt.calc(exp_pnt, impf_set, haz, save_mat=True) # re-aggregate impact to original exposure geometry impact_agg = impact_pnt_agg(impact_pnt, exp_pnt.gdf, agg_met) return impact_agg
# ifset_hail.plot(axis = axis) ifset_hail.plot(axis=axis) #%% Exposure exp_infr_meshs = fct.load_exp_infr(force_new_hdf5_generation, name_hdf5_file, input_folder, haz_real) exp_infr_dur = exp_infr_meshs.copy() exp_infr_dur["if_HL"] = 8 #change if_HL to match the corresponding imp_id if plot_img: exp_infr_meshs.plot_basemap() exp_infr_meshs.plot_hexbin() exp_infr_meshs.plot_scatter() exp_infr_meshs.plot_raster() #%% Impact imp_infr_meshs = Impact() imp_infr_meshs.calc(exp_infr_meshs, ifset_hail, haz_real, save_mat=True) # imp_infr.plot_raster_eai_exposure() freq_curve_infr_meshs = imp_infr_meshs.calc_freq_curve() #[1, 2, 5, 10, 20]) if plot_img: freq_curve_infr_meshs.plot() imp_infr_meshs.plot_basemap_eai_exposure() imp_infr_meshs.plot_hexbin_eai_exposure() imp_infr_meshs.plot_scatter_eai_exposure() imp_infr_meshs.plot_raster_eai_exposure() imp_infr_dur = Impact() imp_infr_dur.calc(exp_infr_dur, ifset_hail, haz_dur, save_mat=True) # imp_infr.plot_raster_eai_exposure() freq_curve_infr_dur = imp_infr_dur.calc_freq_curve() if plot_img: freq_curve_infr_dur.plot()
exp_synth_agr_meshs = fct.load_exp_agr(force_new_hdf5_generation, name_hdf5_file, input_folder, haz_synth) if plot_img: exp_synth_infr_meshs.plot_basemap() exp_synth_infr_meshs.plot_hexbin() exp_synth_infr_meshs.plot_scatter() exp_synth_infr_meshs.plot_raster() exp_synth_agr_meshs.tag = Tag(file_name = "exp_agr", description="Exposure_description") exp_synth_agr_meshs.plot_basemap() exp_synth_agr_meshs.plot_hexbin() exp_synth_agr_meshs.plot_scatter() exp_synth_agr_meshs.plot_raster(raster_res = 0.001) #%% Impact imp_synth_infr_meshs = Impact() imp_synth_infr_meshs.calc(exp_synth_infr_meshs, ifset_hail, haz_synth,save_mat=True) freq_curve_synth_infr_meshs = imp_synth_infr_meshs.calc_freq_curve() if plot_img: freq_curve_synth_infr_meshs.plot() imp_synth_infr_meshs.plot_basemap_eai_exposure() imp_synth_infr_meshs.plot_hexbin_eai_exposure() imp_synth_infr_meshs.plot_scatter_eai_exposure() imp_synth_infr_meshs.plot_raster_eai_exposure(raster_res = 0.001) imp_synth_agr_meshs = Impact() imp_synth_agr_meshs.calc(exp_synth_agr_meshs, ifset_hail, haz_synth, save_mat=True) freq_curve_synth_agr_meshs = imp_synth_agr_meshs.calc_freq_curve() if plot_img: freq_curve_synth_agr_meshs.plot() imp_synth_agr_meshs.plot_basemap_eai_exposure() imp_synth_agr_meshs.plot_hexbin_eai_exposure()
def calculate_impact(directory_hazard, scenario, year, exposures, uncertainty_variable='all', kanton=None, age_group=None, save_median_mat=False): """compute the impacts once: Parameters: directory_hazard (str): directory to a folder containing one tasmax (and one tasmin) folder with all the data files scenario (str): scenario for which to compute the hazards year(str): year for which to compute the hazards exposures(Exposures): the exposures which stay fixed for all runs uncertainty_variable(str): variable for which to consider the uncertainty. Default: 'all' kanton (str or None): Name of canton. Default: None (all of Switzerland) age_group (str or None): specific age group, as given in the "GIS_Data_code" of the age_categories.csv file. Default: None save_median_mat (bool): rather we save the impact matrix . Default = True Returns: Dictionary of impact loss and dictionary of impact matrices if specified """ impact_dict = {} if save_median_mat: matrices = {} save_mat = True else: save_mat = False hazard = call_hazard(directory_hazard, scenario, year, uncertainty_variable=uncertainty_variable, kanton=kanton) #################################################################################################### if uncertainty_variable == 'impactfunction' or uncertainty_variable == 'all': TF = True else: TF = False if_hw_set = call_impact_functions(TF) for e_ in exposures: # calculate impact for each type of exposure impact = Impact() impact.calc(exposures[e_], if_hw_set, hazard['heat'], save_mat=save_mat) impact_dict[e_] = np.sum(impact.at_event) if save_median_mat: matrices[e_] = csr_matrix(impact.imp_mat.sum(axis=0)) # sum all events to get one 1xgridpoints matrix per type of exposures del hazard if save_median_mat: output = [impact_dict, matrices] else: output = [impact_dict] return output
def calc_sector_direct_impact(self, hazard, exposure, imp_fun_set, selected_subsec="service"): """Calculate direct impacts. Parameters: ---------- hazard : Hazard Hazard object for impact calculation. exposure : Exposures Exposures object for impact calculation. For WIOD tables, exposure.region_id must be country names following ISO3 codes. imp_fun_set : ImpactFuncSet Set of impact functions. selected_subsec : str or list Positions of the selected sectors. These positions can be either defined by the user by passing a list of values, or by using built-in sectors' aggregations for the WIOD data passing a string with possible values being "service", "manufacturing", "agriculture" or "mining". Default is "service". """ if isinstance(selected_subsec, str): built_in_subsec_pos = {'service': range(26, 56), 'manufacturing': range(4, 23), 'agriculture': range(0, 1), 'mining': range(3, 4)} selected_subsec = built_in_subsec_pos[selected_subsec] dates = [ dt.datetime.strptime(date, "%Y-%m-%d") for date in hazard.get_event_date() ] self.years = np.unique([date.year for date in dates]) unique_exp_regid = exposure.gdf.region_id.unique() self.direct_impact = np.zeros(shape=(len(self.years), len(self.mriot_reg_names)*len(self.sectors))) self.reg_dir_imp = [] for exp_regid in unique_exp_regid: reg_exp = Exposures(exposure.gdf[exposure.gdf.region_id == exp_regid]) reg_exp.check() # Normalize exposure total_reg_value = reg_exp.gdf['value'].sum() reg_exp.gdf['value'] /= total_reg_value # Calc impact for country imp = Impact() imp.calc(reg_exp, imp_fun_set, hazard) imp_year_set = np.array(list(imp.calc_impact_year_set(imp).values())) mriot_reg_name = self._map_exp_to_mriot(exp_regid, self.mriot_type) self.reg_dir_imp.append(mriot_reg_name) subsec_reg_pos = np.array(selected_subsec) + self.reg_pos[mriot_reg_name][0] subsec_reg_prod = self.mriot_data[subsec_reg_pos].sum(axis=1) imp_year_set = np.repeat(imp_year_set, len(selected_subsec) ).reshape(len(self.years), len(selected_subsec)) direct_impact_reg = np.multiply(imp_year_set, subsec_reg_prod) # Sum needed below in case of many ROWs, which are aggregated into # one country as per WIOD table. self.direct_impact[:, subsec_reg_pos] += direct_impact_reg.astype(np.float32) # average impact across years self.direct_aai_agg = self.direct_impact.mean(axis=0)
def calib_instance(hazard, exposure, impact_func, df_out=pd.DataFrame(), yearly_impact=False, return_cost='False'): """calculate one impact instance for the calibration algorithm and write to given DataFrame Parameters ---------- hazard : Hazard exposure : Exposure impact_func : ImpactFunc df_out : Dataframe, optional Output DataFrame with headers of columns defined and optionally with first row (index=0) defined with values. If columns "impact", "event_id", or "year" are not included, they are created here. Data like reported impacts or impact function parameters can be given here; values are preserved. yearly_impact : boolean, optional if set True, impact is returned per year, not per event return_cost : str, optional if not 'False' but any of 'R2', 'logR2', cost is returned instead of df_out Returns ------- df_out: DataFrame DataFrame with modelled impact written to rows for each year or event. """ IFS = ImpactFuncSet() IFS.append(impact_func) impacts = Impact() impacts.calc(exposure, IFS, hazard) if yearly_impact: # impact per year IYS = impacts.calc_impact_year_set(all_years=True) # Loop over whole year range: if df_out.empty | df_out.index.shape[0] == 1: for cnt_, year in enumerate(np.sort(list((IYS.keys())))): if cnt_ > 0: df_out.loc[cnt_] = df_out.loc[ 0] # copy info from first row if year in IYS: df_out.loc[cnt_, 'impact_CLIMADA'] = IYS[year] else: df_out.loc[cnt_, 'impact_CLIMADA'] = 0.0 df_out.loc[cnt_, 'year'] = year else: years_in_common = df_out.loc[ df_out['year'].isin(np.sort(list((IYS.keys())))), 'year'] for cnt_, year in years_in_common.iteritems(): df_out.loc[df_out['year'] == year, 'impact_CLIMADA'] = IYS[year] else: # impact per event if df_out.empty | df_out.index.shape[0] == 1: for cnt_, impact in enumerate(impacts.at_event): if cnt_ > 0: df_out.loc[cnt_] = df_out.loc[ 0] # copy info from first row df_out.loc[cnt_, 'impact_CLIMADA'] = impact df_out.loc[cnt_, 'event_id'] = int(impacts.event_id[cnt_]) df_out.loc[cnt_, 'event_name'] = impacts.event_name[cnt_] df_out.loc[cnt_, 'year'] = \ dt.datetime.fromordinal(impacts.date[cnt_]).year df_out.loc[cnt_, 'date'] = impacts.date[cnt_] elif df_out.index.shape[0] == impacts.at_event.shape[0]: for cnt_, (impact, ind) in enumerate(zip(impacts.at_event, df_out.index)): df_out.loc[ind, 'impact_CLIMADA'] = impact df_out.loc[ind, 'event_id'] = int(impacts.event_id[cnt_]) df_out.loc[ind, 'event_name'] = impacts.event_name[cnt_] df_out.loc[ind, 'year'] = \ dt.datetime.fromordinal(impacts.date[cnt_]).year df_out.loc[ind, 'date'] = impacts.date[cnt_] else: raise ValueError('adding simulated impacts to reported impacts not' ' yet implemented. use yearly_impact=True or run' ' without init_impact_data.') if not return_cost == 'False': df_out = calib_cost_calc(df_out, return_cost) return df_out
def simp_opt(haz, exp, type_imp_fun, lin_space): for i in lin_space: #create new imp_fun with parameter #calculate impact imp = Impact() imp.calc(exp, imp_fun, haz) #save results return results optimize_results = [2.2e-05, 1.8e+00, 0.0e+00, 3.0e-01] sector = "infr" # ["infr", "agr"] optimize_type = "meshs" # "" = no optimization, "meshs", "dur" score_type = "RMSF" #["pearson", "spearman", "RMSF"] type_imp_fun = "const" #["sig", "lin", "class", "const"] norm = False class_mult = False # True -> imp_fun_class_mult False -> imp_fun_class bound = [(0.1, 1), (1.0, 150), (0.0, 20)] if optimize_type != "": num_fct = 1 #[1:3] init_parameter = [] if sector == "agr": if optimize_type == "meshs": parameter_optimize = imp_fun_parameter[1:1 + num_fct] haz = haz_real exp = exp_meshs.copy() bounds = num_fct * bound elif optimize_type == "dur": parameter_optimize = imp_fun_parameter[4:4 + num_fct] haz = haz_dur bounds = num_fct * bound exp = exp_dur.copy() elif sector == "infr": if optimize_type == "meshs": parameter_optimize = imp_fun_parameter[0:1 + num_fct] haz = haz_real exp = exp_infr.copy() bounds = num_fct * bound elif optimize_type == "dur": print("NO dur for infr") for i in range(num_fct): init_parameter += [*parameter_optimize[i].values()][1:4] if num_fct == 1: exp["if_HL"] = parameter_optimize[0]["imp_id"] if type_imp_fun == "lin": bounds = [(0.01, 0.03)] imp_fun_lin = {"imp_id": 9, "m": 0.1, "q": 0} parameter_optimize = [imp_fun_lin] exp.if_HL = 9 if type_imp_fun == "class": bound = [(0, 0.001)] if class_mult: bounds = [ slice(0, 0.000_051, 0.000_01), slice(0, 1, 0.2), slice(0, 1, 0.2), slice(0, 1, 0.2), slice(0, 1, 0.2) ] else: bounds = [ slice(0, 1, 1), slice(0, 1, 1), slice(0, 1, 1), slice(0, 1, 1), slice(0, 0.01_1, 0.000_1) ] init_parameter = [0, 0, 0, 0] parameter_optimize = init_parameter if type_imp_fun == "const": parameter_optimize = [{ "imp_id": parameter_optimize[0]["imp_id"], "y_const": 0 }] bounds = [slice(0, 0.001, 0.000_001)] args = (parameter_optimize, exp, haz, haz_type, num_fct, score_type, type_imp_fun, sector, norm, class_mult, optimize_type) # optimize_results = optimize.differential_evolution(func=fct.make_Y, bounds = bounds, args = args, workers = 3) optimize_results = optimize.brute(func=fct.make_Y, ranges=bounds, args=args, Ns=10, full_output=False, finish=None, workers=1) # optimize_results = optimize.minimize(fun = fct.make_Y, x0 = init_parameter,method="Powell", args = args, bounds = bounds) # test = fct.make_Y(init_parameter, args) print(optimize_results) print(score_type) print(optimize_type)