Beispiel #1
0
def untar_noaa_stable_nightlight(f_tar_ini):
    """Move input tar file to SYSTEM_DIR and extract stable light file.
    Returns absolute path of stable light file in format tif.gz.

    Parameters:
        f_tar_ini (str): absolute path of file

    Returns:
        f_tif_gz (str)
    """
    # move to SYSTEM_DIR
    f_tar_dest = SYSTEM_DIR.joinpath(Path(f_tar_ini).name)
    shutil.move(f_tar_ini, f_tar_dest)
    # extract stable_lights.avg_vis.tif
    tar_file = tarfile.open(f_tar_ini)
    extract_name = [name for name in tar_file.getnames()
                    if name.endswith('stable_lights.avg_vis.tif.gz')]
    if len(extract_name) == 0:
        msg = f'No stable light intensities for selected year and satellite in file {f_tar_ini}'
        LOGGER.error(msg)
        raise ValueError(msg)
    if len(extract_name) > 1:
        LOGGER.warning('found more than one potential intensity file in %s %s', f_tar_ini, extract_name)
    try:
        tar_file.extract(extract_name[0], SYSTEM_DIR)
    except tarfile.TarError as err:
        LOGGER.error(str(err))
        raise err
    finally:
        tar_file.close()
    f_tif_gz = SYSTEM_DIR.joinpath(extract_name[0])

    return f_tif_gz
Beispiel #2
0
def load_nightlight_noaa(ref_year=2013, sat_name=None):
    """Get nightlight luminosites. Nightlight matrix, lat and lon ordered
    such that nightlight[1][0] corresponds to lat[1], lon[0] point (the image
    has been flipped).

    Parameters:
        ref_year (int): reference year
        sat_name (str, optional): satellite provider (e.g. 'F10', 'F18', ...)

    Returns:
        nightlight (sparse.csr_matrix), coord_nl (np.array),
        fn_light (str)
    """
    if sat_name is None:
        fn_light = str(SYSTEM_DIR.joinpath('*' +
                             str(ref_year) + '*.stable_lights.avg_vis'))
    else:
        fn_light = str(SYSTEM_DIR.joinpath(sat_name +
                             str(ref_year) + '*.stable_lights.avg_vis'))
    # check if file exists in SYSTEM_DIR, download if not
    if glob.glob(fn_light + ".p"):
        fn_light = glob.glob(fn_light + ".p")[0]
        with open(fn_light, 'rb') as f_nl:
            nightlight = pickle.load(f_nl)
    elif glob.glob(fn_light + ".tif.gz"):
        fn_light = glob.glob(fn_light + ".tif.gz")[0]
        fn_light, nightlight = unzip_tif_to_py(fn_light)
    else:
        # iterate over all satellites if no satellite name provided
        if sat_name is None:
            ini_pre, end_pre = 18, 9
            for pre_i in np.arange(ini_pre, end_pre, -1):
                url = NOAA_SITE + 'F' + str(pre_i) + str(ref_year) + '.v4.tar'
                try:
                    file_down = download_file(url, download_dir=SYSTEM_DIR)
                    break
                except ValueError:
                    pass
            if 'file_down' not in locals():
                LOGGER.error('Nightlight for reference year %s not available. '
                             'Try an other year.', ref_year)
                raise ValueError
        else:
            url = NOAA_SITE + sat_name + str(ref_year) + '.v4.tar'
            try:
                file_down = download_file(url, download_dir=SYSTEM_DIR)
            except ValueError:
                LOGGER.error('Nightlight intensities for year %s and satellite'
                             ' %s do not exist.', ref_year, sat_name)
                raise
        fn_light = untar_noaa_stable_nightlight(file_down)
        fn_light, nightlight = unzip_tif_to_py(fn_light)

    # first point and step
    coord_nl = np.empty((2, 2))
    coord_nl[0, :] = [NOAA_BORDER[1], NOAA_RESOLUTION_DEG]
    coord_nl[1, :] = [NOAA_BORDER[0], NOAA_RESOLUTION_DEG]

    return nightlight, coord_nl, fn_light
def unzip_tif_to_py(file_gz):
    """Unzip image file, read it, flip the x axis, save values as pickle
    and remove tif.

    Parameters
    ----------
    file_gz : str
        file fith .gz format to unzip

    Returns
    -------
    fname : str
        file_name of unzipped file
    nightlight : sparse.csr_matrix
    """
    LOGGER.info("Unzipping file %s.", file_gz)
    file_name = Path(Path(file_gz).stem)
    with gzip.open(file_gz, 'rb') as f_in:
        with file_name.open('wb') as f_out:
            shutil.copyfileobj(f_in, f_out)
    nightlight = sparse.csc.csc_matrix(plt.imread(file_name))
    # flip X axis
    nightlight.indices = -nightlight.indices + nightlight.shape[0] - 1
    nightlight = nightlight.tocsr()
    file_name.unlink()
    file_path = SYSTEM_DIR.joinpath(file_name.stem + ".p")
    save(file_path, nightlight)

    return file_name, nightlight
Beispiel #4
0
def world_bank(cntry_iso, ref_year, info_ind):
    """Get country's GDP from World Bank's data at a given year, or
    closest year value. If no data, get the natural earth's approximation.

    Parameters
    ----------
    cntry_iso : str
        key = ISO alpha_3 country
    ref_year : int
        reference year
    info_ind : str
        indicator of World Bank, e.g. 'NY.GDP.MKTP.CD'. If
        'INC_GRP', historical income groups from excel file used.

    Returns
    -------
    int, float

    Raises
    ------
    IOError, KeyError, IndexError
    """
    if info_ind != 'INC_GRP':
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            cntry_gdp = wb.download(indicator=info_ind,
                                    country=cntry_iso,
                                    start=1960,
                                    end=2030)
        years = np.array(
            [int(year) for year in cntry_gdp.index.get_level_values('year')])
        sort_years = np.abs(years - ref_year).argsort()
        close_val = cntry_gdp.iloc[sort_years].dropna()
        close_year = int(close_val.iloc[0].name[1])
        close_val = float(close_val.iloc[0].values)
    else:  # income group level
        fn_ig = SYSTEM_DIR.joinpath('OGHIST.xls')
        dfr_wb = pd.DataFrame()
        try:
            if not fn_ig.is_file():
                file_down = download_file(WORLD_BANK_INC_GRP)
                shutil.move(file_down, fn_ig)
            dfr_wb = pd.read_excel(fn_ig,
                                   'Country Analytical History',
                                   skiprows=5)
            dfr_wb = dfr_wb.drop(dfr_wb.index[0:5]).set_index('Unnamed: 0')
            dfr_wb = dfr_wb.replace(INCOME_GRP_WB_TABLE.keys(),
                                    INCOME_GRP_WB_TABLE.values())
        except (IOError, requests.exceptions.ConnectionError) as err:
            raise type(err)('Internet connection failed while downloading '
                            'historical income groups: ' + str(err)) from err

        cntry_dfr = dfr_wb.loc[cntry_iso]
        close_val = cntry_dfr.iloc[
            np.abs(np.array(cntry_dfr.index[1:]) - ref_year).argsort() +
            1].dropna()
        close_year = close_val.index[0]
        close_val = int(close_val.iloc[0])

    return close_year, close_val
Beispiel #5
0
def _gdp_twn(ref_year, per_capita=False):
    """returns GDP for TWN (Republic of China / Taiwan Province of China) based
    on a CSV sheet downloaded from the
    International Monetary Fund (IMF).
    The reason for this special treatment is the
    lack of GDP data for TWN in the World Bank data

    Data Source:
        https://www.imf.org/external/pubs/ft/weo/2019/02/weodata/index.aspx
        https://www.imf.org/external/pubs/ft/weo/2019/02/weodata/weorept.aspx?sy=1980&ey=2024&scsm=1&ssd=1&sic=1&sort=country&ds=.&br=1&pr1.x=42&pr1.y=10&c=528&s=NGDPD%2CNGDP_D%2CNGDPDPC&grp=0&a=
        (saved as CSV with name GDP_TWN_IMF_WEO_data in SYSTEM_DIR)

    Parameters
    ----------
    ref_year : int
        reference year, i.e. the year for which a GDP value is required
    per_capita : boolean
        return GDP per capita? Default False.

    Returns
    -------
    float
    """
    fname = 'GDP_TWN_IMF_WEO_data.csv'
    if not SYSTEM_DIR.joinpath(fname).is_file():
        raise FileNotFoundError(f'File {fname} not found in SYSTEM_DIR')
    if per_capita:
        var_name = 'Gross domestic product per capita, current prices'
    else:
        var_name = 'Gross domestic product, current prices'
    if ref_year < 1980:
        close_year = 1980
    elif ref_year > 2024:
        close_year = 2024
    else:
        close_year = ref_year
    data = pd.read_csv(SYSTEM_DIR.joinpath('GDP_TWN_IMF_WEO_data.csv'),
                       index_col=None,
                       header=0)
    close_val = data.loc[data['Subject Descriptor'] == var_name,
                         str(close_year)].values[0]
    close_val = float(close_val.replace(',', ''))
    if not per_capita:
        close_val = close_val * 1e9
    return close_year, close_val
Beispiel #6
0
def wealth2gdp(cntry_iso,
               non_financial=True,
               ref_year=2016,
               file_name=FILE_GWP_WEALTH2GDP_FACTORS):
    """Get country's wealth-to-GDP factor from the
        Credit Suisse's Global Wealth Report 2017 (household wealth).
        Missing value: returns NaN.

    Parameters
    ----------
    cntry_iso : str
        key = ISO alpha_3 country
    non_financial : boolean
        use non-financial wealth (True)
        use total wealth (False)
    ref_year : int
        reference year

    Returns
    -------
    float
    """
    fname = SYSTEM_DIR.joinpath(file_name)
    factors_all_countries = pd.read_csv(fname,
                                        sep=',',
                                        index_col=None,
                                        header=0,
                                        encoding='ISO-8859-1')
    if ref_year != 2016:
        LOGGER.warning('Reference year for the factor to convert GDP to '
                       'wealth was set to 2016 because other years have not '
                       'been implemented yet.')
        ref_year = 2016
    if non_financial:
        try:
            val = factors_all_countries[
                factors_all_countries.country_iso3 ==
                cntry_iso]['NFW-to-GDP-ratio'].values[0]
        except (AttributeError, KeyError, IndexError):
            LOGGER.warning('No data for country, using mean factor.')
            val = factors_all_countries["NFW-to-GDP-ratio"].mean()
    else:
        try:
            val = factors_all_countries[factors_all_countries.country_iso3 ==
                                        cntry_iso]['TW-to-GDP-ratio'].values[0]
        except (AttributeError, KeyError, IndexError):
            LOGGER.warning('No data for country, using mean factor.')
            val = factors_all_countries["TW-to-GDP-ratio"].mean()
    val = np.around(val, 5)
    return ref_year, val
import logging
from pathlib import Path
import numpy as np
import pandas as pd
import xarray as xr
import scipy as sp
from climada.entity.tag import Tag
import climada.util.coordinates as u_coord
from climada.util.constants import RIVER_FLOOD_REGIONS_CSV, SYSTEM_DIR
from .base import Exposures, INDICATOR_IMPF

LOGGER = logging.getLogger(__name__)

DEF_HAZ_TYPE = 'RF'

CONVERTER = SYSTEM_DIR.joinpath('GDP2Asset_converter_2.5arcmin.nc')


class GDP2Asset(Exposures):
    def set_countries(self, countries=[], reg=[], ref_year=2000, path=None):
        """Model countries using values at reference year. If GDP or income
        group not available for that year, consider the value of the closest
        available year.

        Parameters:
            countries (list): list of country names ISO3
            ref_year (int, optional): reference year. Default: 2016
            path (string): path to exposure dataset (ISIMIP)
        """
        gdp2a_list = []
        tag = Tag()
def world_bank_wealth_account(cntry_iso,
                              ref_year,
                              variable_name="NW.PCA.TO",
                              no_land=True):
    """
    Download and unzip wealth accounting historical data (1995, 2000, 2005, 2010, 2014)
    from World Bank (https://datacatalog.worldbank.org/dataset/wealth-accounting).
    Return requested variable for a country (cntry_iso) and a year (ref_year).

    Inputs:
        cntry_iso (str): ISO3-code of country, i.e. "CHN" for China
        ref_year (int): reference year
                         - available in data: 1995, 2000, 2005, 2010, 2014
                         - other years between 1995 and 2014 are interpolated
                         - for years outside range, indicator is scaled
                             proportionally to GDP
        variable_name (str): select one variable, i.e.:
            'NW.PCA.TO': Produced capital stock of country
                         incl. manufactured or built assets such as machinery,
                         equipment, and physical structures
                         and value of built-up urban land (24% mark-up)
            'NW.PCA.PC': Produced capital stock per capita
                         incl. manufactured or built assets such as machinery,
                         equipment, and physical structures
                         and value of built-up urban land (24% mark-up)
            'NW.NCA.TO': Total natural capital of country. Natural capital
                        includes the valuation of fossil fuel energy (oil, gas,
                        hard and soft coal) and minerals (bauxite, copper, gold,
                        iron ore, lead, nickel, phosphate, silver, tin, and zinc),
                        agricultural land (cropland and pastureland),
                        forests (timber and some nontimber forest products), and
                        protected areas.
            'NW.TOW.TO': Total wealth of country.
            Note: Values are measured at market exchange rates in constant 2014 US dollars,
                        using a country-specific GDP deflator.
        no_land (boolean): If True, return produced capital without built-up land value
                        (applies to 'NW.PCA.*' only). Default = True.
    """
    try:
        data_file = SYSTEM_DIR.joinpath(FILE_WORLD_BANK_WEALTH_ACC)
        if not data_file.is_file():
            data_file = SYSTEM_DIR.joinpath('Wealth-Accounts_CSV',
                                            FILE_WORLD_BANK_WEALTH_ACC)
        if not data_file.is_file():
            if not SYSTEM_DIR.joinpath('Wealth-Accounts_CSV').is_dir():
                SYSTEM_DIR.joinpath('Wealth-Accounts_CSV').mkdir()
            file_down = download_file(WORLD_BANK_WEALTH_ACC)
            zip_ref = zipfile.ZipFile(file_down, 'r')
            zip_ref.extractall(SYSTEM_DIR.joinpath('Wealth-Accounts_CSV'))
            zip_ref.close()
            Path(file_down).unlink()
            LOGGER.debug('Download and unzip complete. Unzipping %s',
                         str(data_file))

        data_wealth = pd.read_csv(data_file, sep=',', index_col=None, header=0)
    except Exception as err:
        raise type(
            err)('Downloading World Bank Wealth Accounting Data failed: ' +
                 str(err)) from err

    data_wealth = data_wealth[
        data_wealth['Country Code'].str.contains(cntry_iso)
        & data_wealth['Indicator Code'].str.contains(
            variable_name)].loc[:, '1995':'2014']
    years = list(map(int, list(data_wealth)))
    if data_wealth.size == 0 and 'NW.PCA.TO' in variable_name:  # if country is not found in data
        LOGGER.warning(
            'No data available for country. Using non-financial wealth instead'
        )
        gdp_year, gdp_val = gdp(cntry_iso, ref_year)
        fac = wealth2gdp(cntry_iso)[1]
        return gdp_year, np.around((fac * gdp_val), 1), 0
    if ref_year in years:  # indicator for reference year is available directly
        result = data_wealth.loc[:, np.str(ref_year)].values[0]
    elif ref_year > np.min(years) and ref_year < np.max(years):  # interpolate
        result = np.interp(ref_year, years, data_wealth.values[0, :])
    elif ref_year < np.min(years):  # scale proportionally to GDP
        gdp_year, gdp0_val = gdp(cntry_iso, np.min(years))
        gdp_year, gdp_val = gdp(cntry_iso, ref_year)
        result = data_wealth.values[0, 0] * gdp_val / gdp0_val
        ref_year = gdp_year
    else:
        gdp_year, gdp0_val = gdp(cntry_iso, np.max(years))
        gdp_year, gdp_val = gdp(cntry_iso, ref_year)
        result = data_wealth.values[0, -1] * gdp_val / gdp0_val
        ref_year = gdp_year
    if 'NW.PCA.' in variable_name and no_land:
        # remove value of built-up land from produced capital
        result = result / 1.24
    return ref_year, np.around(result, 1), 1
PARTICULAR PURPOSE.  See the GNU General Public License for more details.

You should have received a copy of the GNU General Public License along
with CLIMADA. If not, see <https://www.gnu.org/licenses/>.

---

Define climate change scenarios for tropical cycones.
"""

import numpy as np
import pandas as pd

from climada.util.constants import SYSTEM_DIR

TOT_RADIATIVE_FORCE = SYSTEM_DIR.joinpath('rcp_db.xls')
"""© RCP Database (Version 2.0.5) http://www.iiasa.ac.at/web-apps/tnt/RcpDb.
generated: 2018-07-04 10:47:59."""


def get_knutson_criterion():
    """Fill changes in TCs according to Knutson et al. 2015 Global projections
    of intense tropical cyclone activity for the late twenty-first century from
    dynamical downscaling of CMIP5/RCP4.5 scenarios.

    Returns:
        list(dict) with items 'criteria' (dict with variable_name and list(possible values)),
        'year' (int), 'change' (float), 'variable' (str), 'function' (np function)
    """
    criterion = list()
    # NA
    def calibrated_regional_vhalf(calibration_approach='TDR', q=.5,
                                  input_file_path=None, version=1):
        """return calibrated TC wind impact function slope parameter v_half
        per region based on Eberenz et al., 2021: https://doi.org/10.5194/nhess-21-393-2021

        Parameters
        ----------
        calibration_approach : str
            'TDR' (default): Total damage ratio (TDR) optimization with
                TDR=1.0 (simulated damage = reported damage from EM-DAT)
            'TDR1.5' : Total damage ratio (TDR) optimization with
                TDR=1.5 (simulated damage = 1.5*reported damage from EM-DAT)
            'RMSF': Root-mean-squared fraction (RMSF) optimization
            'EDR': quantile from individually fitted v_half per event,
                i.e. v_half fitted to get EDR=1.0 for each event
        q : float
            quantile between 0 and 1.0 to select
            (EDR only, default=0.5, i.e. median v_half)
        input_file_path : str or DataFrame
            full path to calibration
            result file to be used instead of default file in repository
            (expert users only)

        Raises
        ------
        ValueError

        Returns
        -------
        v_half : dict
            TC impact function slope parameter v_half per region
        """
        calibration_approach = calibration_approach.upper()
        if calibration_approach not in ['TDR', 'TDR1.0', 'TDR1.5', 'RMSF', 'EDR']:
            raise ValueError('calibration_approach is invalid')
        if 'EDR' in calibration_approach and (q < 0. or q > 1.):
            raise ValueError('Quantile q out of range [0, 1]')
        if calibration_approach == 'TDR':
            calibration_approach = 'TDR1.0'
        # load calibration results depending on approach:
        if isinstance(input_file_path, str):
            df_calib_results = pd.read_csv(input_file_path,
                                           encoding="ISO-8859-1", header=0)
        elif isinstance(input_file_path, pd.DataFrame):
            df_calib_results = input_file_path
        else:
            df_calib_results = pd.read_csv(
                SYSTEM_DIR.joinpath(
                             'tc_impf_cal_v%02.0f_%s.csv' % (version, calibration_approach)),
                encoding="ISO-8859-1", header=0)

        regions_short = ['NA1', 'NA2', 'NI', 'OC', 'SI', 'WP1', 'WP2', 'WP3', 'WP4']

        # loop over calibration regions (column cal_region2 in df):
        reg_v_half = dict()
        for region in regions_short:
            df_reg = df_calib_results.loc[df_calib_results.cal_region2 == region]
            df_reg = df_reg.reset_index(drop=True)
            reg_v_half[region] = np.round(df_reg['v_half'].quantile(q=q), 5)
        # rest of the world (ROW), calibrated by all data:
        regions_short = regions_short + ['ROW']
        if calibration_approach == 'EDR':
            reg_v_half[regions_short[-1]] = np.round(df_calib_results['v_half'].quantile(q=q), 5)
        else:
            df_reg = df_calib_results.loc[df_calib_results.cal_region2 == 'GLB']
            df_reg = df_reg.reset_index(drop=True)
            reg_v_half[regions_short[-1]] = np.round(df_reg['v_half'].values[0], 5)
        return reg_v_half