Exemple #1
0
# In these calculations we use:
# \begin{align*}
# IRF(t) = \frac{q_1}{d_1} \exp\Big(\frac{-t}{d_1}\Big) + \frac{q_2}{d_2} \exp\Big(\frac{-t}{d_2}\Big)
# \end{align*}
#
# Where the constants, $q_i$ and $d_i$ are shown below.
#
#
# %% [markdown] tags=[]
# # Code + figures

# %% jp-MarkdownHeadingCollapsed=true tags=[]
fn_IRF_constants = INPUT_DATA_DIR_BADC / 'recommended_irf_from_2xCO2_2021_02_25_222758.csv'

#irf_consts = pd.read_csv(fn_IRF_constants).set_index('id')
irf_consts = read_csv_badc(fn_IRF_constants).set_index('id')

ld1 = 'd1 (yr)'
ld2 = 'd2 (yr)'
lq1 = 'q1 (K / (W / m^2))'
lq2 = 'q2 (K / (W / m^2))'
median = 'median'
perc5 = '5th percentile'
perc95 = '95th percentile'
recommendation = 'recommendation'
irf_consts  # [d1]

# %%
# lets get the irf values from 0 until i
d1 = float(irf_consts[ld1])
d2 = float(irf_consts[ld2])
Exemple #2
0
# %% [markdown]
# ## HFCs:
# For HFCs we use the RE from Hodnebrog et al 2019 and the concentrations from chapter two to calculate the ERF.

# %% [markdown]
# ### Hodnebrog et al:

# %% [markdown]
# Read in table 3 from Hodnebrog et al

# %%
fp_hodnebrog = INPUT_DATA_DIR_BADC / 'hodnebrog_tab3.csv'
#fp_hodnebrog = INPUT_DATA_DIR / 'hodnebrog_tab3.csv'

# %%
df_hodnebrog = read_csv_badc(fp_hodnebrog, index_col=[0, 1], header=[0, 1])
df_HFC = df_hodnebrog.loc[('Hydrofluorocarbons', )]
df_HFC

# %% [markdown]
# df_hodnebrog = pd.read_csv(fp_hodnebrog, index_col=[0, 1], header=[7, 8])
# df_HFC = df_hodnebrog.loc[('Hydrofluorocarbons',)]
# df_HFC
#
# df_hodnebrog = pd.read_csv(fp_hodnebrog, index_col=[0, 1], header=[0, 1])
# df_HFC = df_hodnebrog.loc[('Hydrofluorocarbons',)]
# df_HFC

# %%
RE_df = df_HFC['RE (Wm-2ppb-1)'].transpose()
# RE_df = RE_df.reset_index().rename({'level_1':'Species'},axis=1).set_index('Species').drop('level_0', axis=1)
Exemple #3
0
scal_to = -0.38
aci_tot = df_dt_sep.sum()['Cloud']
scale_by = scal_to / aci_tot
print('Scaled down by ', (1 - scale_by) * 100, '%')
print(scal_to, aci_tot)

df_dt_sep['Cloud'] = df_dt_sep['Cloud'] * scale_by
df_dt_sep.sum()

# %% [markdown]
# # Uncertainties

# %% tags=[]

num_mod_lab = 'Number of models (Thornhill 2020)'
thornhill = read_csv_badc(fn_TAB2_THORNHILL, index_col=0)
thornhill.index = thornhill.index.rename('Species')
thornhill

# ratio between standard deviation and 5-95th percentile.
std_2_95th = 1.645

sd_tot = df_collins_sd['Total_sd']
df_err = pd.DataFrame(sd_tot.rename('std'))
df_err['SE'] = df_err

df_err['SE'] = df_err['std'] / np.sqrt(thornhill[num_mod_lab])
df_err['95-50_SE'] = df_err['SE'] * std_2_95th
df_err.loc['CO2', '95-50_SE'] = df_err.loc['CO2', 'std']
df_err
Exemple #4
0
# ## Load data:

# %% [markdown]
# Data for ERF historical period:

# %%
from ar6_ch6_rcmipfigs.utils.badc_csv import read_csv_badc

# %%
path_AR_hist = constants.INPUT_DATA_DIR_BADC / 'AR6_ERF_1750-2019.csv'

path_AR_hist_minorGHG = constants.INPUT_DATA_DIR_BADC / 'AR6_ERF_minorGHGs_1750-2019.csv'
# use historical up to 2019:
use_hist_to_year = 2019

df_hist = read_csv_badc(path_AR_hist, index_col=0).copy()
df_hist_minor_GHG = read_csv_badc(path_AR_hist_minorGHG, index_col=0).copy()
df_hist.columns

# %%
df_hist

# %% [markdown]
# Find SSP files:

# %% jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
path_ssps = constants.INPUT_DATA_DIR_BADC / 'SSPs'
paths = path_ssps.glob('*')  # '^(minor).)*$')
files = [x for x in paths if x.is_file()]
files
Exemple #5
0
# %% [markdown]
# The standard error is:
#
# $SE = \frac{\sigma}{n}$

# %% [markdown]
# Finally, we want 5-95th percentile. Assuming normal distribution, this amounts to multiplying the standard error by 1.645

# %%
std_2_95th = 1.645

# %%
from ar6_ch6_rcmipfigs.utils.badc_csv import read_csv_badc
num_mod_lab = 'Number of models (Thornhill 2020)'
thornhill = read_csv_badc(INPUT_DATA_DIR_BADC / 'table2_thornhill2020.csv',
                          index_col=0)
thornhill.index = thornhill.index.rename('Species')
thornhill

# %% [markdown]
# ![](thornhill.jpg)

# %%
sd_tot = table_sd['Total_sd']
df_err = pd.DataFrame(sd_tot.rename('std'))
df_err['SE'] = df_err

df_err['SE'] = df_err['std'] / np.sqrt(thornhill[num_mod_lab])
df_err['95-50_SE'] = df_err['SE'] * std_2_95th
df_err.loc['CO2', '95-50_SE'] = df_err.loc['CO2', 'std']
df_err
Exemple #6
0
# ### Read in csv files:

# %%

dic_ssp = {}
for f in fp_uncertainty.glob('*.csv'):
    ls_com = f.name.split('_')
    ssp = ls_com[-2]
    ssp
    perc = ls_com[-1].split('.')[0]
    if ssp not in dic_ssp.keys():
        dic_ssp[ssp] = dict()
    if perc not in dic_ssp[ssp]:
        dic_ssp[ssp][perc] = dict()

    dic_ssp[ssp][perc] = read_csv_badc(f, index_col=0)

# %% [markdown]
# ### Various definitions

# %%
percentiles = ['p05', 'p16', 'p50', 'p84', 'p95']

# %%
scenarios_fl = [
    'ssp119', 'ssp126', 'ssp245', 'ssp370', 'ssp370-lowNTCF-aerchemmip',
    'ssp370-lowNTCF-gidden', 'ssp585'
]

# %%
dic_vars = dict(hfc='HFCs',