Exemplo n.º 1
0
def untar_noaa_stable_nightlight(f_tar_ini):
    """Move input tar file to SYSTEM_DIR and extract stable light file.
    Returns absolute path of stable light file in format tif.gz.

    Parameters
    ----------
    f_tar_ini : str
        absolute path of file

    Returns
    -------
    f_tif_gz : str
        path of stable light file
    """
    # move to SYSTEM_DIR
    f_tar_dest = SYSTEM_DIR.joinpath(Path(f_tar_ini).name)
    shutil.move(f_tar_ini, f_tar_dest)
    # extract stable_lights.avg_vis.tif
    with tarfile.open(f_tar_ini) as tar_file:
        extract_name = [name for name in tar_file.getnames()
                        if name.endswith('stable_lights.avg_vis.tif.gz')]
        if len(extract_name) == 0:
            raise ValueError('No stable light intensities for selected year and satellite '
                            f'in file {f_tar_ini}')
        if len(extract_name) > 1:
            LOGGER.warning('found more than one potential intensity file in %s %s',
                           f_tar_ini, extract_name)
        tar_file.extract(extract_name[0], SYSTEM_DIR)
    return SYSTEM_DIR.joinpath(extract_name[0])
Exemplo n.º 2
0
def untar_noaa_stable_nightlight(f_tar_ini):
    """Move input tar file to SYSTEM_DIR and extract stable light file.
    Returns absolute path of stable light file in format tif.gz.

    Parameters:
        f_tar_ini (str): absolute path of file

    Returns:
        f_tif_gz (str)
    """
    # move to SYSTEM_DIR
    f_tar_dest = SYSTEM_DIR.joinpath(Path(f_tar_ini).name)
    shutil.move(f_tar_ini, f_tar_dest)
    # extract stable_lights.avg_vis.tif
    tar_file = tarfile.open(f_tar_ini)
    extract_name = [name for name in tar_file.getnames()
                    if name.endswith('stable_lights.avg_vis.tif.gz')]
    if len(extract_name) == 0:
        msg = f'No stable light intensities for selected year and satellite in file {f_tar_ini}'
        LOGGER.error(msg)
        raise ValueError(msg)
    if len(extract_name) > 1:
        LOGGER.warning('found more than one potential intensity file in %s %s', f_tar_ini, extract_name)
    try:
        tar_file.extract(extract_name[0], SYSTEM_DIR)
    except tarfile.TarError as err:
        LOGGER.error(str(err))
        raise err
    finally:
        tar_file.close()
    f_tif_gz = SYSTEM_DIR.joinpath(extract_name[0])

    return f_tif_gz
Exemplo n.º 3
0
def load_nightlight_noaa(ref_year=2013, sat_name=None):
    """Get nightlight luminosites. Nightlight matrix, lat and lon ordered
    such that nightlight[1][0] corresponds to lat[1], lon[0] point (the image
    has been flipped).

    Parameters:
        ref_year (int): reference year
        sat_name (str, optional): satellite provider (e.g. 'F10', 'F18', ...)

    Returns:
        nightlight (sparse.csr_matrix), coord_nl (np.array),
        fn_light (str)
    """
    if sat_name is None:
        fn_light = str(SYSTEM_DIR.joinpath('*' +
                             str(ref_year) + '*.stable_lights.avg_vis'))
    else:
        fn_light = str(SYSTEM_DIR.joinpath(sat_name +
                             str(ref_year) + '*.stable_lights.avg_vis'))
    # check if file exists in SYSTEM_DIR, download if not
    if glob.glob(fn_light + ".p"):
        fn_light = glob.glob(fn_light + ".p")[0]
        with open(fn_light, 'rb') as f_nl:
            nightlight = pickle.load(f_nl)
    elif glob.glob(fn_light + ".tif.gz"):
        fn_light = glob.glob(fn_light + ".tif.gz")[0]
        fn_light, nightlight = unzip_tif_to_py(fn_light)
    else:
        # iterate over all satellites if no satellite name provided
        if sat_name is None:
            ini_pre, end_pre = 18, 9
            for pre_i in np.arange(ini_pre, end_pre, -1):
                url = NOAA_SITE + 'F' + str(pre_i) + str(ref_year) + '.v4.tar'
                try:
                    file_down = download_file(url, download_dir=SYSTEM_DIR)
                    break
                except ValueError:
                    pass
            if 'file_down' not in locals():
                LOGGER.error('Nightlight for reference year %s not available. '
                             'Try an other year.', ref_year)
                raise ValueError
        else:
            url = NOAA_SITE + sat_name + str(ref_year) + '.v4.tar'
            try:
                file_down = download_file(url, download_dir=SYSTEM_DIR)
            except ValueError:
                LOGGER.error('Nightlight intensities for year %s and satellite'
                             ' %s do not exist.', ref_year, sat_name)
                raise
        fn_light = untar_noaa_stable_nightlight(file_down)
        fn_light, nightlight = unzip_tif_to_py(fn_light)

    # first point and step
    coord_nl = np.empty((2, 2))
    coord_nl[0, :] = [NOAA_BORDER[1], NOAA_RESOLUTION_DEG]
    coord_nl[1, :] = [NOAA_BORDER[0], NOAA_RESOLUTION_DEG]

    return nightlight, coord_nl, fn_light
Exemplo n.º 4
0
def world_bank(cntry_iso, ref_year, info_ind):
    """Get country's GDP from World Bank's data at a given year, or
    closest year value. If no data, get the natural earth's approximation.

    Parameters
    ----------
    cntry_iso : str
        key = ISO alpha_3 country
    ref_year : int
        reference year
    info_ind : str
        indicator of World Bank, e.g. 'NY.GDP.MKTP.CD'. If
        'INC_GRP', historical income groups from excel file used.

    Returns
    -------
    int, float

    Raises
    ------
    IOError, KeyError, IndexError
    """
    if info_ind != 'INC_GRP':
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            cntry_gdp = wb.download(indicator=info_ind,
                                    country=cntry_iso,
                                    start=1960,
                                    end=2030)
        years = np.array(
            [int(year) for year in cntry_gdp.index.get_level_values('year')])
        sort_years = np.abs(years - ref_year).argsort()
        close_val = cntry_gdp.iloc[sort_years].dropna()
        close_year = int(close_val.iloc[0].name[1])
        close_val = float(close_val.iloc[0].values)
    else:  # income group level
        fn_ig = SYSTEM_DIR.joinpath('OGHIST.xls')
        dfr_wb = pd.DataFrame()
        try:
            if not fn_ig.is_file():
                file_down = download_file(WORLD_BANK_INC_GRP)
                shutil.move(file_down, fn_ig)
            dfr_wb = pd.read_excel(fn_ig,
                                   'Country Analytical History',
                                   skiprows=5)
            dfr_wb = dfr_wb.drop(dfr_wb.index[0:5]).set_index('Unnamed: 0')
            dfr_wb = dfr_wb.replace(INCOME_GRP_WB_TABLE.keys(),
                                    INCOME_GRP_WB_TABLE.values())
        except (IOError, requests.exceptions.ConnectionError) as err:
            raise type(err)('Internet connection failed while downloading '
                            'historical income groups: ' + str(err)) from err

        cntry_dfr = dfr_wb.loc[cntry_iso]
        close_val = cntry_dfr.iloc[
            np.abs(np.array(cntry_dfr.index[1:]) - ref_year).argsort() +
            1].dropna()
        close_year = close_val.index[0]
        close_val = int(close_val.iloc[0])

    return close_year, close_val
Exemplo n.º 5
0
def unzip_tif_to_py(file_gz):
    """Unzip image file, read it, flip the x axis, save values as pickle
    and remove tif.

    Parameters
    ----------
    file_gz : str
        file fith .gz format to unzip

    Returns
    -------
    fname : str
        file_name of unzipped file
    nightlight : sparse.csr_matrix
    """
    LOGGER.info("Unzipping file %s.", file_gz)
    file_name = Path(Path(file_gz).stem)
    with gzip.open(file_gz, 'rb') as f_in:
        with file_name.open('wb') as f_out:
            shutil.copyfileobj(f_in, f_out)
    nightlight = sparse.csc.csc_matrix(plt.imread(file_name))
    # flip X axis
    nightlight.indices = -nightlight.indices + nightlight.shape[0] - 1
    nightlight = nightlight.tocsr()
    file_name.unlink()
    file_path = SYSTEM_DIR.joinpath(file_name.stem + ".p")
    save(file_path, nightlight)

    return file_name, nightlight
Exemplo n.º 6
0
def _gdp_twn(ref_year, per_capita=False):
    """returns GDP for TWN (Republic of China / Taiwan Province of China) based
    on a CSV sheet downloaded from the
    International Monetary Fund (IMF).
    The reason for this special treatment is the
    lack of GDP data for TWN in the World Bank data

    Data Source:
        https://www.imf.org/external/pubs/ft/weo/2019/02/weodata/index.aspx
        https://www.imf.org/external/pubs/ft/weo/2019/02/weodata/weorept.aspx?sy=1980&ey=2024&scsm=1&ssd=1&sic=1&sort=country&ds=.&br=1&pr1.x=42&pr1.y=10&c=528&s=NGDPD%2CNGDP_D%2CNGDPDPC&grp=0&a=
        (saved as CSV with name GDP_TWN_IMF_WEO_data in SYSTEM_DIR)

    Parameters
    ----------
    ref_year : int
        reference year, i.e. the year for which a GDP value is required
    per_capita : boolean
        return GDP per capita? Default False.

    Returns
    -------
    float
    """
    fname = 'GDP_TWN_IMF_WEO_data.csv'
    if not SYSTEM_DIR.joinpath(fname).is_file():
        raise FileNotFoundError(f'File {fname} not found in SYSTEM_DIR')
    if per_capita:
        var_name = 'Gross domestic product per capita, current prices'
    else:
        var_name = 'Gross domestic product, current prices'
    if ref_year < 1980:
        close_year = 1980
    elif ref_year > 2024:
        close_year = 2024
    else:
        close_year = ref_year
    data = pd.read_csv(SYSTEM_DIR.joinpath('GDP_TWN_IMF_WEO_data.csv'),
                       index_col=None,
                       header=0)
    close_val = data.loc[data['Subject Descriptor'] == var_name,
                         str(close_year)].values[0]
    close_val = float(close_val.replace(',', ''))
    if not per_capita:
        close_val = close_val * 1e9
    return close_year, close_val
Exemplo n.º 7
0
 def test_rm_file(self):
     """test if file is removed"""
     url, file_name, lead_times = _create_icon_grib_name(
         dt.datetime(1908, 2, 2),
         max_lead_time=1,
     )
     file_name_i = SYSTEM_DIR.absolute().joinpath(
         file_name.format(lead_i=lead_times[0]))
     Path(file_name_i).touch()
     delete_icon_grib(dt.datetime(1908, 2, 2),
                      max_lead_time=1,
                      download_dir=SYSTEM_DIR)
     self.assertFalse(Path(file_name_i).exists())
Exemplo n.º 8
0
def wealth2gdp(cntry_iso,
               non_financial=True,
               ref_year=2016,
               file_name=FILE_GWP_WEALTH2GDP_FACTORS):
    """Get country's wealth-to-GDP factor from the
        Credit Suisse's Global Wealth Report 2017 (household wealth).
        Missing value: returns NaN.

    Parameters
    ----------
    cntry_iso : str
        key = ISO alpha_3 country
    non_financial : boolean
        use non-financial wealth (True)
        use total wealth (False)
    ref_year : int
        reference year

    Returns
    -------
    float
    """
    fname = SYSTEM_DIR.joinpath(file_name)
    factors_all_countries = pd.read_csv(fname,
                                        sep=',',
                                        index_col=None,
                                        header=0,
                                        encoding='ISO-8859-1')
    if ref_year != 2016:
        LOGGER.warning('Reference year for the factor to convert GDP to '
                       'wealth was set to 2016 because other years have not '
                       'been implemented yet.')
        ref_year = 2016
    if non_financial:
        try:
            val = factors_all_countries[
                factors_all_countries.country_iso3 ==
                cntry_iso]['NFW-to-GDP-ratio'].values[0]
        except (AttributeError, KeyError, IndexError):
            LOGGER.warning('No data for country, using mean factor.')
            val = factors_all_countries["NFW-to-GDP-ratio"].mean()
    else:
        try:
            val = factors_all_countries[factors_all_countries.country_iso3 ==
                                        cntry_iso]['TW-to-GDP-ratio'].values[0]
        except (AttributeError, KeyError, IndexError):
            LOGGER.warning('No data for country, using mean factor.')
            val = factors_all_countries["TW-to-GDP-ratio"].mean()
    val = np.around(val, 5)
    return ref_year, val
import logging
from pathlib import Path
import numpy as np
import pandas as pd
import xarray as xr
import scipy as sp
from climada.entity.tag import Tag
import climada.util.coordinates as u_coord
from climada.util.constants import RIVER_FLOOD_REGIONS_CSV, SYSTEM_DIR
from .base import Exposures, INDICATOR_IMPF

LOGGER = logging.getLogger(__name__)

DEF_HAZ_TYPE = 'RF'

CONVERTER = SYSTEM_DIR.joinpath('GDP2Asset_converter_2.5arcmin.nc')


class GDP2Asset(Exposures):
    def set_countries(self, countries=[], reg=[], ref_year=2000, path=None):
        """Model countries using values at reference year. If GDP or income
        group not available for that year, consider the value of the closest
        available year.

        Parameters:
            countries (list): list of country names ISO3
            ref_year (int, optional): reference year. Default: 2016
            path (string): path to exposure dataset (ISIMIP)
        """
        gdp2a_list = []
        tag = Tag()
Exemplo n.º 10
0
def get_gpw_file_path(gpw_version, reference_year, data_dir=SYSTEM_DIR, verbatim=True):
    """Check available GPW population data versions and year closest to
    `reference_year` and return full path to TIFF file.

    Parameters
    ----------
    gpw_version : int (optional)
        Version number of GPW population data, i.e. 11 for v4.11.
    reference_year : int (optional)
        Data year is selected as close to reference_year as possible.
        The default is 2020.
    data_dir : pathlib.Path (optional)
        Absolute path where files are stored. Default: SYSTEM_DIR

    Raises
    ------
    FileExistsError

    Returns
    -------
    pathlib.Path : path to input file with population data
    """
    # get years available in GPW data from CONFIG and convert to array:
    years_available = np.array([year.int() for year in \
                                CONFIG.exposures.litpop.gpw_population.years_available.list()
                               ])
    # find closest year to reference_year with data available:
    year = years_available[np.abs(years_available - reference_year).argmin()]
    if verbatim and (year != reference_year):
        LOGGER.warning('Reference year: %i. Using nearest available year for GPW data: %i',
                    reference_year, year)

    # check if file is available for given GPW version,
    # if available, return full path to file:
    file_path = data_dir / \
        (CONFIG.exposures.litpop.gpw_population.filename_gpw.str() % (gpw_version, year))
    if file_path.is_file():
        if verbatim:
            LOGGER.info('GPW Version v4.%2i', gpw_version)
        return file_path
    # try to construct GPW file path from CONFIG:
    file_path = data_dir / \
        (CONFIG.exposures.litpop.gpw_population.dirname_gpw.str() % (gpw_version, year)) / \
        (CONFIG.exposures.litpop.gpw_population.filename_gpw.str() % (gpw_version, year))
    if file_path.is_file():
        if verbatim:
            LOGGER.info('GPW Version v4.%2i', gpw_version)
        return file_path
    # if no input file was found, FileExistsError is raised
    if SYSTEM_DIR.joinpath('GPW_help.pdf').is_file():
        subprocess.Popen([str(SYSTEM_DIR.joinpath('GPW_help.pdf'))], shell=True)
        raise FileExistsError(f'The file {file_path} could not '
                              + 'be found. Please download the file '
                              + 'first or choose a different folder. '
                              + 'Instructions on how to download the '
                              + 'file has been openend in your PDF '
                              + 'viewer.')
    raise FileExistsError(f'The file {file_path} could not '
                          + 'be found. Please download the file '
                          + 'first or choose a different folder. '
                          + 'The data can be downloaded from '
                          + 'http://sedac.ciesin.columbia.edu/'
                          + 'data/collection/gpw-v4/sets/browse, '
                          + 'e.g., https://sedac.ciesin.columbia.edu/data/'
                          + f'set/gpw-v4-population-count-rev{gpw_version}/'
                          + 'data-download'
                          + '(Free NASA Earthdata login required). '
                          )
Exemplo n.º 11
0
    def set_ls_model_prob(self,
                          bbox,
                          ls_model="UNEP_NGI",
                          path_sourcefile=None,
                          n_years=500,
                          incl_neighbour=False,
                          max_dist=1000,
                          max_prob=0.000015,
                          check_plots=1):
        """....
        Parameters:
            ls_model (str): UNEP_NGI (prob., UNEP/NGI) or NASA (prob., NASA Nowcast)
            bbox (array): [N, E , S, W] for which LS hazard should be calculated.
            n_years (int): timespan for probabilistic simulations. Default is 500y.
            incl_neighbour (bool): whether to include affected neighbouring pixels
                with dist <= max_dist. Default is false
            max_dist (int): distance until which neighbouring pixels should count as affected
                if incl_neighbour = True. Default is 1000m.
            max_prob (float): maximum occurence probability that should be assigned to
                categorical hazard maps (as in LS_MODEL[2]). Default is 0.000015
            path_sourcefile (str): if ls_model is UNEP_NGI, use  path to NGI/UNEP file,
                retrieved previously as descriped in tutorial and stored in climada/data.
                if ls_model is NASA  provide path to combined daily or
                monthly rasterfile, retrieved and aggregated
                previously with landslide.get_nowcast_tiff() and
                landslide.combine_nowcast_tiff().
        Returns:
            Landslide() module: probabilistic LS hazard set
        """

        if ls_model == "UNEP_NGI":
            path_sourcefile = LS_FILE_DIR.joinpath('ls_pr_NGI_UNEP/ls_pr.tif')

            if not bbox:
                LOGGER.error('Empty bounding box, please set bounds.')
                raise ValueError()

            window_array = self._get_window_from_coords(path_sourcefile, bbox)
            pixel_height, pixel_width = self._get_raster_meta(
                path_sourcefile, window_array)
            self.set_raster([path_sourcefile],
                            window=Window(window_array[0], window_array[1],
                                          window_array[3], window_array[2]))
            # prob values were initially multiplied by 1 mio
            self.intensity = self.intensity / 10e6
            self.centroids.set_raster_from_pix_bounds(bbox[0], bbox[3],
                                                      pixel_height,
                                                      pixel_width,
                                                      window_array[3],
                                                      window_array[2])
            LOGGER.info('Generating landslides...')
            self._intensity_prob_to_binom(n_years)
            self.check()

            if incl_neighbour:
                LOGGER.info('Finding neighbouring pixels...')
                self.centroids.set_meta_to_lat_lon()
                self.centroids.set_geometry_points()
                self._intensity_binom_to_range(max_dist)
                self.check()

            if check_plots == 1:
                fig1 = plt.subplots(nrows=1, ncols=1)[0]
                self.plot_raw()
                fig1.suptitle('Raw data: Occurrence prob of LS per year',
                              fontsize=14)

                fig2 = plt.subplots(nrows=1, ncols=1)[0]
                self.plot_events()
                fig2.suptitle('Prob. LS Hazard Set n_years = %i' % n_years,
                              fontsize=14)

            return self

        elif ls_model == "NASA":
            if not bbox:
                LOGGER.error('Empty bounding box, please set bounds.')
                raise ValueError()

            if not path_sourcefile:
                LOGGER.error('Empty sourcefile, please specify')
                raise ValueError()
            window_array = self._get_window_from_coords(path_sourcefile, bbox)
            pixel_height, pixel_width = self._get_raster_meta(
                path_sourcefile, window_array)
            self.set_raster([path_sourcefile],
                            window=Window(window_array[0], window_array[1],
                                          window_array[3], window_array[2]))
            LOGGER.info(
                'Setting probability values from categorical landslide hazard levels...'
            )
            self._intensity_cat_to_prob(max_prob)
            self.centroids.set_raster_from_pix_bounds(bbox[0], bbox[3],
                                                      pixel_height,
                                                      pixel_width,
                                                      window_array[3],
                                                      window_array[2])
            LOGGER.info('Generating binary landslides...')
            self._intensity_prob_to_binom(n_years)
            self.check()

            if incl_neighbour:
                LOGGER.info('Finding neighbouring pixels...')
                self.centroids.set_meta_to_lat_lon()
                self.centroids.set_geometry_points()
                self._intensity_binom_to_range(max_dist)
                self.check()

            if check_plots == 1:
                fig1, _ = plt.subplots(nrows=1, ncols=1)
                self.plot_raw()
                fig1.suptitle('Raw data: Occurrence prob of LS per year',
                              fontsize=14)

                fig2, _ = plt.subplots(nrows=1, ncols=1)
                self.plot_events()
                fig2.suptitle('Prob. LS Hazard Set n_years = %i' % n_years,
                              fontsize=14)

            return self

        else:
            LOGGER.error('Specify the LS model to be used for the hazard-set '
                         'generation as ls_model=str')
            raise KeyError
Exemplo n.º 12
0
    def set_calibrated_regional_IFs(self,
                                    calibration_approach='TDR',
                                    q=.5,
                                    input_file_path=None,
                                    version=1):
        """ initiate TC wind impact functions based on Eberenz et al. (2020)

        Optional Parameters:
                calibration_approach (str):
                    'TDR' (default): Total damage ratio (TDR) optimization with
                        TDR=1.0 (simulated damage = reported damage from EM-DAT)
                    'TDR1.5' : Total damage ratio (TDR) optimization with
                        TDR=1.5 (simulated damage = 1.5*reported damage from EM-DAT)
                    'RMSF': Root-mean-squared fraction (RMSF) optimization
                    'EDR': quantile from individually fitted v_half per event,
                        i.e. v_half fitted to get EDR=1.0 for each event
                q (float): quantile between 0 and 1.0 to select
                    (EDR only, default=0.5, i.e. median v_half)
                input_file_path (str or DataFrame): full path to calibration
                    result file to be used instead of default file in repository
                    (expert users only)

        Returns:
            v_half (dict): IF slope parameter v_half per region¨

        Raises:
            ValueError
        """
        calibration_approach = calibration_approach.upper()
        if calibration_approach not in [
                'TDR', 'TDR1.0', 'TDR1.5', 'RMSF', 'EDR'
        ]:
            LOGGER.error('calibration_approach is invalid')
            raise ValueError
        if 'EDR' in calibration_approach and (q < 0. or q > 1.):
            LOGGER.error('Quantile q out of range [0, 1]')
            raise ValueError
        if calibration_approach == 'TDR':
            calibration_approach = 'TDR1.0'
        # load calibration results depending on approach:
        if isinstance(input_file_path, str):
            df_calib_results = pd.read_csv(input_file_path,
                                           encoding="ISO-8859-1",
                                           header=0)
        elif isinstance(input_file_path, pd.DataFrame):
            df_calib_results = input_file_path
        else:
            df_calib_results = pd.read_csv(SYSTEM_DIR.joinpath(
                'tc_if_cal_v%02.0f_%s.csv' % (version, calibration_approach)),
                                           encoding="ISO-8859-1",
                                           header=0)

        # define regions and parameters:
        v_0 = 25.7  # v_threshold based on Emanuel (2011)
        scale = 1.0

        regions_short = [
            'NA1', 'NA2', 'NI', 'OC', 'SI', 'WP1', 'WP2', 'WP3', 'WP4'
        ]
        regions_long = dict()
        regions_long[regions_short[0]] = 'Caribbean and Mexico (NA1)'
        regions_long[regions_short[1]] = 'USA and Canada (NA2)'
        regions_long[regions_short[2]] = 'North Indian (NI)'
        regions_long[regions_short[3]] = 'Oceania (OC)'
        regions_long[regions_short[4]] = 'South Indian (SI)'
        regions_long[regions_short[5]] = 'South East Asia (WP1)'
        regions_long[regions_short[6]] = 'Philippines (WP2)'
        regions_long[regions_short[7]] = 'China Mainland (WP3)'
        regions_long[regions_short[8]] = 'North West Pacific (WP4)'
        regions_long['all'] = 'Global'
        regions_long['GLB'] = 'Global'
        regions_long['ROW'] = 'Global'

        # loop over calibration regions (column cal_region2 in df):
        reg_v_half = dict()
        for idx, region in enumerate(regions_short):
            df_reg = df_calib_results.loc[df_calib_results.cal_region2 ==
                                          region]
            df_reg = df_reg.reset_index(drop=True)
            reg_v_half[region] = np.round(df_reg['v_half'].quantile(q=q), 5)
        # rest of the world (ROW), calibrated by all data:
        regions_short = regions_short + ['ROW']
        if calibration_approach == 'EDR':
            reg_v_half[regions_short[-1]] = np.round(
                df_calib_results['v_half'].quantile(q=q), 5)
        else:
            df_reg = df_calib_results.loc[df_calib_results.cal_region2 ==
                                          'GLB']
            df_reg = df_reg.reset_index(drop=True)
            reg_v_half[regions_short[-1]] = np.round(
                df_reg['v_half'].values[0], 5)

        for idx, region in enumerate(regions_short):
            if_tc = IFTropCyclone()
            if_tc.set_emanuel_usa(if_id=int(idx + 1),
                                  v_thresh=v_0,
                                  v_half=reg_v_half[region],
                                  scale=scale)
            if_tc.name = regions_long[region]
            self.append(if_tc)
        return reg_v_half
Exemplo n.º 13
0
def get_box_gpw(**parameters):
    """Reads data from GPW GeoTiff file and cuts out the data along a chosen
        bounding box.

    Parameters
    ----------
    gpw_path : pathlib.Path
        Absolute path where files are stored. Default: SYSTEM_DIR
    resolution : int
        The resolution in arcsec in which the data output is created.
    country_cut_mode : int
        Defines how the country is cut out: If 0, the country is only cut out
        with a bounding box. If 1, the country is cut out along it's borders
        Default: 0.
        #TODO: Unimplemented
    cut_bbox : array-like, shape (1,4)
        Bounding box (ESRI type) to be cut out.
        The layout of the bounding box corresponds to the bounding box of
        the ESRI shape files and is as follows:
        [minimum longitude, minimum latitude, maximum longitude, maxmimum latitude]
        If country_cut_mode = 1, the cut_bbox is overwritten/ignored.
    return_coords : int
        Determines whether latitude and longitude are delievered along with gpw
        data (0) or only gpw_data is returned. Default: 0.
    add_one : boolean
        Determine whether the integer one is added to all cells to eliminate
        zero pixels. Default: 0.
        #TODO: Unimplemented
    reference_year : int
        reference year, available years are:
        2000, 2005, 2010, 2015 (default), 2020

    Returns
    -------
    tile_temp : pandas.arrays.SparseArray
        GPW data
    lon : list
        List with longitudinal infomation on the GPW data. Same
        dimensionality as tile_temp (only returned if return_coords is 1).
    lat : list
        list with latitudinal infomation on the GPW data. Same
        dimensionality as tile_temp (only returned if return_coords is 1).
    """
    resolution = parameters.get('resolution', 30)
    cut_bbox = parameters.get('cut_bbox')
#    country_cut_mode = parameters.get('country_cut_mode', 0)
    return_coords = parameters.get('return_coords', 0)
    reference_year = parameters.get('reference_year', 2015)
    year = YEARS_AVAILABLE[np.abs(YEARS_AVAILABLE - reference_year).argmin()]

    if year != reference_year:
        LOGGER.info('Reference year: %i. Using nearest available year for GWP population data: %i',
                    reference_year, year)
    if (cut_bbox is None) & (return_coords == 0):
    # If we don't have any bbox by now and we need one, we just use the global
        cut_bbox = np.array((-180, -90, 180, 90))
    zoom_factor = 30 / resolution  # Orignal resolution is arc-seconds
    file_exists = False
    for ver in GPW_VERSIONS:
        gpw_path = parameters.get('gpw_path', SYSTEM_DIR)
        fpath = gpw_path.joinpath(FILENAME_GPW % (ver, year))
        if fpath.is_file():
            file_exists = True
            LOGGER.info('GPW Version v4.%2i', ver)
            break

    try:
        if not file_exists:
            if SYSTEM_DIR.joinpath('GPW_help.pdf').is_file():
                subprocess.Popen([str(SYSTEM_DIR.joinpath('GPW_help.pdf'))], shell=True)
                raise FileExistsError(f'The file {fpath} could not '
                                      + 'be found. Please download the file '
                                      + 'first or choose a different folder. '
                                      + 'Instructions on how to download the '
                                      + 'file has been openend in your PDF '
                                      + 'viewer.')
            else:
                raise FileExistsError(f'The file {fpath} could not '
                                      + 'be found. Please download the file '
                                      + 'first or choose a different folder. '
                                      + 'The data can be downloaded from '
                                      + 'http://sedac.ciesin.columbia.edu/'
                                      + 'data/collection/gpw-v4/sets/browse')
        LOGGER.debug('Importing %s', str(fpath))
        gpw_file = gdal.Open(str(fpath))
        band1 = gpw_file.GetRasterBand(1)
        arr1 = band1.ReadAsArray()
        del band1, gpw_file
        arr1[arr1 < 0] = 0
        if arr1.shape != (17400, 43200) and arr1.shape != (21600, 43200):
            LOGGER.warning('GPW data dimensions mismatch. Actual dimensions: %s x %s',
                           arr1.shape[0], arr1.shape[1])
            LOGGER.warning('Expected dimensions: 17400x43200 or 21600x43200.')
        if zoom_factor != 1:
            total_population = arr1.sum()
            tile_temp = nd.zoom(arr1, zoom_factor, order=1)
            # normalize interpolated gridded population count to keep total population stable:
            tile_temp = tile_temp * (total_population / tile_temp.sum())
        else:
            tile_temp = arr1
        if tile_temp.ndim == 2:
            if cut_bbox is not None:
                tile_temp = _gpw_bbox_cutter(tile_temp, cut_bbox, resolution,
                                             arr1_shape=arr1.shape)
        else:
            LOGGER.error('Error: Matrix has an invalid number of dimensions \
                         (more than 2). Could not continue operation.')
            raise TypeError
        tile_temp = pd.arrays.SparseArray(
            tile_temp.reshape((tile_temp.size,), order='F'),
            fill_value=0)
        del arr1
        if return_coords == 1:
            lon = tuple((cut_bbox[0], 1 / (3600 / resolution)))
            lat = tuple((cut_bbox[1], 1 / (3600 / resolution)))
            return tile_temp, lon, lat

        return tile_temp

    except:
        LOGGER.error('Importing the GPW population density file failed.')
        raise
Exemplo n.º 14
0
def load_nightlight_noaa(ref_year=2013, sat_name=None):
    """Get nightlight luminosites. Nightlight matrix, lat and lon ordered
    such that nightlight[1][0] corresponds to lat[1], lon[0] point (the image
    has been flipped).

    Parameters
    ----------
    ref_year : int, optional
        reference year. The default is 2013.
    sat_name : str, optional
        satellite provider (e.g. 'F10', 'F18', ...)

    Returns
    -------
    nightlight : sparse.csr_matrix
    coord_nl : np.array
    fn_light : str
    """
    # NOAA's URL used to retrieve nightlight satellite images:
    noaa_url = CONFIG.exposures.litpop.nightlights.noaa_url.str()
    if sat_name is None:
        fn_light = str(SYSTEM_DIR.joinpath('*' +
                             str(ref_year) + '*.stable_lights.avg_vis'))
    else:
        fn_light = str(SYSTEM_DIR.joinpath(sat_name +
                             str(ref_year) + '*.stable_lights.avg_vis'))
    # check if file exists in SYSTEM_DIR, download if not
    if glob.glob(fn_light + ".p"):
        fn_light = glob.glob(fn_light + ".p")[0]
        with open(fn_light, 'rb') as f_nl:
            nightlight = pickle.load(f_nl)
    elif glob.glob(fn_light + ".tif.gz"):
        fn_light = glob.glob(fn_light + ".tif.gz")[0]
        fn_light, nightlight = unzip_tif_to_py(fn_light)
    else:
        # iterate over all satellites if no satellite name provided
        if sat_name is None:
            ini_pre, end_pre = 18, 9
            for pre_i in np.arange(ini_pre, end_pre, -1):
                url = noaa_url + 'F' + str(pre_i) + str(ref_year) + '.v4.tar'
                try:
                    file_down = download_file(url, download_dir=SYSTEM_DIR)
                    break
                except ValueError:
                    pass
            if 'file_down' not in locals():
                raise ValueError(f'Nightlight for reference year {ref_year} not available. '
                                 'Try a different year.')
        else:
            url = noaa_url + sat_name + str(ref_year) + '.v4.tar'
            try:
                file_down = download_file(url, download_dir=SYSTEM_DIR)
            except ValueError as err:
                raise ValueError(f'Nightlight intensities for year {ref_year} and satellite'
                                 f' {sat_name} do not exist.') from err
        fn_light = untar_noaa_stable_nightlight(file_down)
        fn_light, nightlight = unzip_tif_to_py(fn_light)

    # first point and step
    coord_nl = np.empty((2, 2))
    coord_nl[0, :] = [NOAA_BORDER[1], NOAA_RESOLUTION_DEG]
    coord_nl[1, :] = [NOAA_BORDER[0], NOAA_RESOLUTION_DEG]

    return nightlight, coord_nl, fn_light
Exemplo n.º 15
0
    def calibrated_regional_vhalf(calibration_approach='TDR', q=.5,
                                  input_file_path=None, version=1):
        """return calibrated TC wind impact function slope parameter v_half
        per region based on Eberenz et al., 2021: https://doi.org/10.5194/nhess-21-393-2021

        Parameters
        ----------
        calibration_approach : str
            'TDR' (default): Total damage ratio (TDR) optimization with
                TDR=1.0 (simulated damage = reported damage from EM-DAT)
            'TDR1.5' : Total damage ratio (TDR) optimization with
                TDR=1.5 (simulated damage = 1.5*reported damage from EM-DAT)
            'RMSF': Root-mean-squared fraction (RMSF) optimization
            'EDR': quantile from individually fitted v_half per event,
                i.e. v_half fitted to get EDR=1.0 for each event
        q : float
            quantile between 0 and 1.0 to select
            (EDR only, default=0.5, i.e. median v_half)
        input_file_path : str or DataFrame
            full path to calibration
            result file to be used instead of default file in repository
            (expert users only)

        Raises
        ------
        ValueError

        Returns
        -------
        v_half : dict
            TC impact function slope parameter v_half per region
        """
        calibration_approach = calibration_approach.upper()
        if calibration_approach not in ['TDR', 'TDR1.0', 'TDR1.5', 'RMSF', 'EDR']:
            raise ValueError('calibration_approach is invalid')
        if 'EDR' in calibration_approach and (q < 0. or q > 1.):
            raise ValueError('Quantile q out of range [0, 1]')
        if calibration_approach == 'TDR':
            calibration_approach = 'TDR1.0'
        # load calibration results depending on approach:
        if isinstance(input_file_path, str):
            df_calib_results = pd.read_csv(input_file_path,
                                           encoding="ISO-8859-1", header=0)
        elif isinstance(input_file_path, pd.DataFrame):
            df_calib_results = input_file_path
        else:
            df_calib_results = pd.read_csv(
                SYSTEM_DIR.joinpath(
                             'tc_impf_cal_v%02.0f_%s.csv' % (version, calibration_approach)),
                encoding="ISO-8859-1", header=0)

        regions_short = ['NA1', 'NA2', 'NI', 'OC', 'SI', 'WP1', 'WP2', 'WP3', 'WP4']

        # loop over calibration regions (column cal_region2 in df):
        reg_v_half = dict()
        for region in regions_short:
            df_reg = df_calib_results.loc[df_calib_results.cal_region2 == region]
            df_reg = df_reg.reset_index(drop=True)
            reg_v_half[region] = np.round(df_reg['v_half'].quantile(q=q), 5)
        # rest of the world (ROW), calibrated by all data:
        regions_short = regions_short + ['ROW']
        if calibration_approach == 'EDR':
            reg_v_half[regions_short[-1]] = np.round(df_calib_results['v_half'].quantile(q=q), 5)
        else:
            df_reg = df_calib_results.loc[df_calib_results.cal_region2 == 'GLB']
            df_reg = df_reg.reset_index(drop=True)
            reg_v_half[regions_short[-1]] = np.round(df_reg['v_half'].values[0], 5)
        return reg_v_half
Exemplo n.º 16
0
PARTICULAR PURPOSE.  See the GNU General Public License for more details.

You should have received a copy of the GNU General Public License along
with CLIMADA. If not, see <https://www.gnu.org/licenses/>.

---

Define climate change scenarios for tropical cycones.
"""

import numpy as np
import pandas as pd

from climada.util.constants import SYSTEM_DIR

TOT_RADIATIVE_FORCE = SYSTEM_DIR.joinpath('rcp_db.xls')
"""© RCP Database (Version 2.0.5) http://www.iiasa.ac.at/web-apps/tnt/RcpDb.
generated: 2018-07-04 10:47:59."""


def get_knutson_criterion():
    """Fill changes in TCs according to Knutson et al. 2015 Global projections
    of intense tropical cyclone activity for the late twenty-first century from
    dynamical downscaling of CMIP5/RCP4.5 scenarios.

    Returns:
        list(dict) with items 'criteria' (dict with variable_name and list(possible values)),
        'year' (int), 'change' (float), 'variable' (str), 'function' (np function)
    """
    criterion = list()
    # NA
def world_bank_wealth_account(cntry_iso,
                              ref_year,
                              variable_name="NW.PCA.TO",
                              no_land=True):
    """
    Download and unzip wealth accounting historical data (1995, 2000, 2005, 2010, 2014)
    from World Bank (https://datacatalog.worldbank.org/dataset/wealth-accounting).
    Return requested variable for a country (cntry_iso) and a year (ref_year).

    Inputs:
        cntry_iso (str): ISO3-code of country, i.e. "CHN" for China
        ref_year (int): reference year
                         - available in data: 1995, 2000, 2005, 2010, 2014
                         - other years between 1995 and 2014 are interpolated
                         - for years outside range, indicator is scaled
                             proportionally to GDP
        variable_name (str): select one variable, i.e.:
            'NW.PCA.TO': Produced capital stock of country
                         incl. manufactured or built assets such as machinery,
                         equipment, and physical structures
                         and value of built-up urban land (24% mark-up)
            'NW.PCA.PC': Produced capital stock per capita
                         incl. manufactured or built assets such as machinery,
                         equipment, and physical structures
                         and value of built-up urban land (24% mark-up)
            'NW.NCA.TO': Total natural capital of country. Natural capital
                        includes the valuation of fossil fuel energy (oil, gas,
                        hard and soft coal) and minerals (bauxite, copper, gold,
                        iron ore, lead, nickel, phosphate, silver, tin, and zinc),
                        agricultural land (cropland and pastureland),
                        forests (timber and some nontimber forest products), and
                        protected areas.
            'NW.TOW.TO': Total wealth of country.
            Note: Values are measured at market exchange rates in constant 2014 US dollars,
                        using a country-specific GDP deflator.
        no_land (boolean): If True, return produced capital without built-up land value
                        (applies to 'NW.PCA.*' only). Default = True.
    """
    try:
        data_file = SYSTEM_DIR.joinpath(FILE_WORLD_BANK_WEALTH_ACC)
        if not data_file.is_file():
            data_file = SYSTEM_DIR.joinpath('Wealth-Accounts_CSV',
                                            FILE_WORLD_BANK_WEALTH_ACC)
        if not data_file.is_file():
            if not SYSTEM_DIR.joinpath('Wealth-Accounts_CSV').is_dir():
                SYSTEM_DIR.joinpath('Wealth-Accounts_CSV').mkdir()
            file_down = download_file(WORLD_BANK_WEALTH_ACC)
            zip_ref = zipfile.ZipFile(file_down, 'r')
            zip_ref.extractall(SYSTEM_DIR.joinpath('Wealth-Accounts_CSV'))
            zip_ref.close()
            Path(file_down).unlink()
            LOGGER.debug('Download and unzip complete. Unzipping %s',
                         str(data_file))

        data_wealth = pd.read_csv(data_file, sep=',', index_col=None, header=0)
    except Exception as err:
        raise type(
            err)('Downloading World Bank Wealth Accounting Data failed: ' +
                 str(err)) from err

    data_wealth = data_wealth[
        data_wealth['Country Code'].str.contains(cntry_iso)
        & data_wealth['Indicator Code'].str.contains(
            variable_name)].loc[:, '1995':'2014']
    years = list(map(int, list(data_wealth)))
    if data_wealth.size == 0 and 'NW.PCA.TO' in variable_name:  # if country is not found in data
        LOGGER.warning(
            'No data available for country. Using non-financial wealth instead'
        )
        gdp_year, gdp_val = gdp(cntry_iso, ref_year)
        fac = wealth2gdp(cntry_iso)[1]
        return gdp_year, np.around((fac * gdp_val), 1), 0
    if ref_year in years:  # indicator for reference year is available directly
        result = data_wealth.loc[:, np.str(ref_year)].values[0]
    elif ref_year > np.min(years) and ref_year < np.max(years):  # interpolate
        result = np.interp(ref_year, years, data_wealth.values[0, :])
    elif ref_year < np.min(years):  # scale proportionally to GDP
        gdp_year, gdp0_val = gdp(cntry_iso, np.min(years))
        gdp_year, gdp_val = gdp(cntry_iso, ref_year)
        result = data_wealth.values[0, 0] * gdp_val / gdp0_val
        ref_year = gdp_year
    else:
        gdp_year, gdp0_val = gdp(cntry_iso, np.max(years))
        gdp_year, gdp_val = gdp(cntry_iso, ref_year)
        result = data_wealth.values[0, -1] * gdp_val / gdp0_val
        ref_year = gdp_year
    if 'NW.PCA.' in variable_name and no_land:
        # remove value of built-up land from produced capital
        result = result / 1.24
    return ref_year, np.around(result, 1), 1