Пример #1
0
def get_entity(country, exponents, description_str, date_str):

    file_name = (country + '_litpop_' + description_str + '_' + date_str +
                 '.hdf5')
    exposure_file = os.path.abspath(os.path.join( \
                CONFIG['local_data']['save_dir'], file_name))
    if os.path.isfile(exposure_file):
        exposure = LitPop()
        exposure.read_hdf5(exposure_file)


#        with open(exposure_file, 'rb') as f:
#            exposure = pickle.load(f)
    else:
        exposure = LitPop()
        # The arguments 'exponent' is used to set the power with which Lit and Pop go into LitPop:
        exposure.set_country(country, exponents=exponents, reference_year=2014)
        exposure.set_geometry_points()
        exposure.check()
        exposure.tag.description = (exposure.tag.description + '_' +
                                    description_str)
        exposure.set_lat_lon()
        exposure.write_hdf5(exposure_file)
        #pickle.dump(exposure,open(exposure_file, 'wb'))
    return exposure
if not os.path.exists(RES_DIR):
    os.makedirs(RES_DIR)

files = [i for i in os.listdir(ENTITY_DIR) if os.path.isfile(os.path.join(ENTITY_DIR,i)) and \
         filename_start in i]
files = np.unique(files)
print('Number of country exposure files: %i' % (len(files)))

#
exposure_data = LitPop()

if try_read_from_hdf5 and os.path.exists(
        os.path.join(ENTITY_DIR, 'LitPop_pc_%iarcsec_000_all.hdf5' %
                     (RES_ARCSEC))):
    exposure_data.read_hdf5(
        os.path.join(ENTITY_DIR,
                     'LitPop_pc_%iarcsec_000_all.hdf5' % (RES_ARCSEC)))
else:
    grid_stats = pd.DataFrame(
        index=np.arange(0, len(files)),
        columns=['country', 'grid_count', 'sum', 'max', 'mean', 'median'])
    for idx, fi in enumerate(files):
        print('Loading: %s ...' % (fi))
        exposure_tmp = LitPop()
        exposure_tmp = exposure_tmp.from_csv(os.path.join(ENTITY_DIR, fi),
                                             index_col=None)
        exposure_data = exposure_data.append(exposure_tmp)
        # print('Max. grid cell value: USD %1.0f' %(exposure_tmp.value.max()))
        grid_stats.loc[idx, 'country'] = fi[-7:-4]
        grid_stats.loc[idx, 'grid_count'] = exposure_tmp.value[
            exposure_tmp.value > 0].count()
Пример #3
0
#    res = minimize(specific_calib, x0,
#                   constraints=cons,
#                   method='SLSQP',
#                   options={'xtol': 1e-5, 'disp': True, 'maxiter': 50})
#    
#    return param_dict_result


if __name__ == "__main__":
# yearly
 
    ## tryout calib_all
    hazard = TropCyclone()
    hazard.read_hdf5('C:/Users/ThomasRoosli/tc_NA_hazard.hdf5')
    exposure = LitPop()
    exposure.read_hdf5('C:/Users/ThomasRoosli/DOM_LitPop.hdf5')
    if_name_or_instance = 'emanuel'
    param_full_dict = {'v_thresh': [25.7, 20], 'v_half': [70], 'scale': [1, 0.8]}
    
    impact_data_source = {'emdat':('D:/Documents_DATA/EM-DAT/'
                                   '20181031_disaster_list_all_non-technological/'
                                   'ThomasRoosli_2018-10-31.csv')}
    year_range = [2004, 2017]
    yearly_impact = True
    df_result = calib_all(hazard,exposure,if_name_or_instance,param_full_dict,
                  impact_data_source, year_range, yearly_impact)
    
                                    reference_year=REF_YEAR)
#    ## tryout calib_optimize
#    hazard = TropCyclone()
#    hazard.read_hdf5('C:/Users/ThomasRoosli/tc_NA_hazard.hdf5')