Example #1
0
def prep_training_N_target(profile, sub=None):

    base_dir = get_main_dir()

    # path to input data
    fname = os.path.join(
        os.path.join(
            os.path.join(os.path.join(base_dir, 'input'), 'calibrations'),
            'idealised'), 'training_x.csv')
    df1, __ = read_csv(fname)

    # path to output data from the reference model
    fname = os.path.join(
        os.path.join(
            os.path.join(os.path.join(base_dir, 'input'), 'calibrations'),
            'idealised'), 'training_%s_y.csv' % (profile))
    df2, __ = read_csv(fname)

    # add the soil moisture profile to the input data
    df1['sw'], df1['Ps'] = soil_water(df1, profile)
    df1['Ps_pd'] = df1['Ps'].copy()  # daily pre-dawn soil water potentials
    df1['Ps_pd'].where(df1['PPFD'] <= 50., np.nan, inplace=True)

    # fix the wind speed
    df1['u'] = df1['u'].iloc[0]

    # non time-sensitive: last valid value propagated until next valid
    df1.fillna(method='ffill', inplace=True)

    # drop everything below min threshold for photosynthesis and reindex
    Y = np.asarray(df2['gs(std)'][df1['PPFD'] > 50.]) * 1000.  # mmol m-2 s-1
    X = df1[df1['PPFD'] > 50.]
    X.reset_index(inplace=True, drop=True)

    # add Rnet to the input (no ET or soil albedo feedbacks, this can be done)
    X['Rnet'] = net_radiation(X)
    X['scale2can'] = 1.

    if sub is not None:  # randomly subsample one week out of the data
        X, Y = subsample(X, Y, sub)

    return X, Y
Example #2
0
def check_X_Y(swaters):

    base_dir = get_main_dir()

    # check that the 4 week forcing file exists
    fname1 = os.path.join(
        os.path.join(
            os.path.join(os.path.join(base_dir, 'input'), 'calibrations'),
            'idealised'), 'training_x.csv')

    if not os.path.isfile(fname1):  # create file if it doesn't exist
        params = InForcings().defparams
        params.doy = random.randrange(92, 275)  # random day within GS
        InForcings().run(fname1, params, Ndays=7 * 4)

    for profile in swaters:

        # check that the output file from the reference model exists
        fname2 = os.path.join(
            os.path.join(
                os.path.join(os.path.join(base_dir, 'input'), 'calibrations'),
                'idealised'), 'training_%s_y.csv' % (profile))

        if not os.path.isfile(fname2):
            df1, __ = read_csv(fname1)

            # add the soil moisture profile to the input data
            df1['sw'], df1['Ps'] = soil_water(df1, profile)
            df1['Ps_pd'] = df1['Ps'].copy()  # pre-dawn soil water potentials
            df1['Ps_pd'].where(df1['PPFD'] <= 50., np.nan, inplace=True)

            # fixed value for the wind speed
            df1['u'] = df1['u'].iloc[0]

            # non time-sensitive: last valid value propagated until next valid
            df1.fillna(method='ffill', inplace=True)

            __ = hrun(fname2,
                      df1,
                      len(df1.index),
                      'Farquhar',
                      models=['Medlyn'],
                      inf_gb=True)

    return
def build_calibrated_forcing(training):

    base_dir = get_main_dir()  # working paths

    # forcing file used to calibrate the models
    fname = os.path.join(os.path.join(os.path.join(os.path.join(base_dir,
                         'input'), 'calibrations'), 'obs_driven'),
                         '%s_x.csv' % (training))
    df1, columns = read_csv(fname)

    # file containing the best calibrated params
    fname = os.path.join(os.path.join(os.path.join(os.path.join(base_dir,
                         'output'), 'calibrations'), 'obs_driven'),
                         'best_fit.csv')
    df2 = (pd.read_csv(fname, header=[0]).dropna(axis=0, how='all')
             .dropna(axis=1, how='all').squeeze())
    df2 = df2[df2['training'] == training]

    # attribute the first (and second and third) parameter(s)
    for i in df2.index:

        df1.loc[0, df2.loc[i, 'p1']] = df2.loc[i, 'v1']

        if not pd.isnull(df2.loc[i, 'v2']):
            df1.loc[0, df2.loc[i, 'p2']] = df2.loc[i, 'v2']

        if not pd.isnull(df2.loc[i, 'v3']):
            df1.loc[0, df2.loc[i, 'p3']] = df2.loc[i, 'v3']

    # save the forcing file containing the calibrated params
    df1.columns = columns  # original columns
    df1.drop([('Tleaf', '[deg C]')], axis=1, inplace=True)  # drop Tleaf
    df1.to_csv(os.path.join(os.path.join(os.path.join(os.path.join(base_dir,
               'input'), 'simulations'), 'obs_driven'),
               '%s_calibrated.csv' % (training)), index=False, na_rep='',
               encoding='utf-8')

    return
Example #4
0
###############################################################################

base_dir = get_main_dir()

# path to input data
Pmin = 1
fname = os.path.join(
    os.path.join(os.path.join(os.path.join(base_dir, 'input'), 'simulations'),
                 'idealised'), 'sensitivity_mtx_%sMPa.csv' % (str(Pmin)))

# create the pbm for the Sobol sensitivity analysis
fname1 = os.path.join(
    os.path.join(os.path.join(os.path.join(base_dir, 'input'), 'calibrations'),
                 'idealised'), 'training_x.csv')
df1, columns = read_csv(fname1)  # load training data

# get the bounds for the different variables to look at
variables = ['PPFD', 'Tair', 'VPD', 'CO2', 'Ps']
PPFD = [50., 2500.]  # umol m-2 s-1
Tair = [2., 40.]  # degC
VPD = [0.1, 10.]  # kPa
CO2 = [250. * 101.325 / 1000., 900. * 101.325 / 1000.]  # Pa, 250 - 900 ppm
Ps = [-Pmin, df1['Psie'].iloc[0]]
bounds = [PPFD, Tair, VPD, CO2, Ps]

# define the sensitivity problem
problem = {'num_vars': len(variables), 'names': variables, 'bounds': bounds}

if not os.path.isfile(fname):  # generate the sensitivity inputs
    N = 84000  # Saltelli mtx obtained via cross-sampling method
    print(fname)
    fig.savefig(fname, dpi=600)
    plt.close()
    print('done')

    return


###############################################################################

# working paths
base_dir = get_main_dir()

ipath = os.path.join(os.path.join(os.path.join(base_dir, 'input'),
                     'simulations'), 'idealised')
df, __ = read_csv(os.path.join(ipath, 'wet_calibration.csv'))

# soil moisture profile
df['sw'] = df['theta_sat']
df.fillna(method='ffill', inplace=True)
df['sw'], df['Ps'] = soil_water(df)

# week 1, at some point in the middle of the week
df = df[df['doy'] >= df['doy'].iloc[0] + 7 * 3]  #4]

# run the models
models = ['WUE', 'ProfitMax', 'CGain', 'ProfitMax2', 'LeastCost']

# actual form
df1 = hrun(None, df, 48, 'Farquhar', models=models,
           resolution='low', inf_gb=True)
for training in site_spp:  # loop over the site x spp combinations

    if not os.path.isfile(os.path.join(ipath,
                                       '%s_calibrated.csv' % (training))):
        build_calibrated_forcing(training)

for file in os.listdir(ipath):  # loop over all the possibilities

    # output dir paths
    ofdir = os.path.join(ipath.replace('input', 'output'), 'all_site_spp')

    if not os.path.isdir(ofdir):
        os.makedirs(ofdir)

    # load input data into a dataframe
    df1, __ = read_csv(os.path.join(ipath, file))
    df1.fillna(method='ffill', inplace=True)

    # add the necessary extra variables
    df1['Ps_pd'] = df1['Ps'].copy()
    df1['sw'] = 0.  # add sw (not used) or it won't run
    df1['scale2can'] = 1.

    # run the models
    fname = os.path.join(ofdir, '%s.csv' % (file.split('_calibrated')[0]))

    if not os.path.isfile(fname):  # create file if it doesn't exist
        df2 = hrun(fname, df1, len(df1.index), 'Farquhar',
                   models=['Medlyn', 'Tuzet', 'SOX12', 'WUE', 'CMax',
                           'ProfitMax', 'CGain', 'ProfitMax2', 'LeastCost',
                           'CAP', 'MES'], resolution='high')
Example #7
0

###############################################################################

# first, activate user defined rendering options
plt_setup()

base_dir = get_main_dir()  # dir paths
ifdir = os.path.join(
    os.path.join(os.path.join(base_dir, 'input'), 'simulations'), 'idealised')
ofdir = os.path.join(
    os.path.join(os.path.join(base_dir, 'output'), 'simulations'), 'idealised')

# path to input data
fname1 = os.path.join(ifdir, 'wet_calibration.csv')
df1, __ = read_csv(fname1)

# initialise soil moisture forcings
df1['sw'] = df1['theta_sat']
df1.fillna(method='ffill', inplace=True)

# plot the atmospheric forcings
figdir = os.path.join(os.path.join(base_dir, 'output'), 'plots')
figname = os.path.join(figdir, 'training_forcing_soil_moisture.png')

if not os.path.isfile(figname):
    plot_forcings(df1, figname)

# plot the calibration targets
fname2 = os.path.join(ifdir.replace('simulation', 'calibration'),
                      'training_wet_y.csv')
Example #8
0
def obs_calibs(df1, df2, figname):

    fig = plt.figure(figsize=(6.5, 8.))
    gs = fig.add_gridspec(nrows=96, ncols=16, hspace=0.3, wspace=0.2)
    ax2 = fig.add_subplot(gs[52:, 6:])  # conductance data

    ipath = os.path.join(
        os.path.join(os.path.join(get_main_dir(), 'input'), 'simulations'),
        'obs_driven')

    labels = []

    for i, what in enumerate(df1['site_spp'].unique().dropna()):

        if i < 13:
            nrow = int(i / 4) * 16
            ncol = (i % 4) * 4
            ax1 = fig.add_subplot(gs[nrow:nrow + 16, ncol:ncol + 4])

        else:
            nrow += 16
            ax1 = fig.add_subplot(gs[nrow:nrow + 16, :4])

        sub = df1.copy()[df1['site_spp'] == what]
        sub = sub.select_dtypes(exclude=['object', 'category'])
        sub = sub[sub['Pleaf'] > -9999.]
        sub['gs'] /= sub['gs'].max()

        for day in sub['doy'].unique():

            mask = sub['doy'] == day
            plot_obs(ax1, sub['Pleaf'][mask], sub['gs'][mask])

        x0, x1, obs_popt = fit_Tuzet(sub)
        x = np.linspace(sub['Pleaf'].max(), sub['Pleaf'].min(), 500)
        ax1.plot(x, fsig_tuzet(x, obs_popt[0], obs_popt[1]), 'k', zorder=30)
        ax1.vlines(x0, 0., 1., linestyle=':')
        ax1.vlines(x1, 0., 1., linestyle=':')

        # get the integrated VC given by the obs and site params
        ref, __ = read_csv(os.path.join(ipath, '%s_calibrated.csv' % (what)))
        b, c = Weibull_params(ref.iloc[0])
        int_VC = np.zeros(len(sub))

        for j in range(len(sub)):

            int_VC[j], __ = quad(f,
                                 sub['Pleaf'].iloc[j],
                                 sub['Ps'].iloc[j],
                                 args=(b, c))

        plot_obs(ax2, i, np.log(sub['E'] / int_VC), which='kmax')

        # subplot titles (including labelling)
        what = what.split('_')
        species = r'\textit{%s %s}' % (what[-2], what[-1])
        labels += [r'\textit{%s. %s}' % (what[-2][0], what[-1])]

        if 'Quercus' in what:
            species += ' (%s)' % (what[0][0])
            labels[-1] += ' (%s)' % (what[0][0])

        txt = ax1.annotate(r'\textbf{(%s)} %s' %
                           (string.ascii_lowercase[i], species),
                           xy=(0.025, 0.98),
                           xycoords='axes fraction',
                           ha='left',
                           va='top')
        txt.set_bbox(
            dict(boxstyle='round,pad=0.1', fc='w', ec='none', alpha=0.8))

        # format axes ticks
        ax1.xaxis.set_major_locator(mpl.ticker.NullLocator())

        if (i == 13) or ((ncol > 0) and (nrow == 32)):
            render_xlabels(ax1, r'$\Psi_{l}$', 'MPa')

        if ncol == 0:
            ax1.yaxis.set_major_locator(mpl.ticker.MaxNLocator(3))
            ax1.yaxis.set_major_formatter(
                mpl.ticker.FormatStrFormatter('%.1f'))
            ax1.set_ylabel(r'$g_{s, norm}$')

        else:
            ax1.yaxis.set_major_locator(mpl.ticker.MaxNLocator(3))
            ax1.set_yticklabels([])

    ax2.annotate(r'\textbf{(%s)}' % (string.ascii_lowercase[i + 1]),
                 xy=(0.05, 0.98),
                 xycoords='axes fraction',
                 ha='right',
                 va='top')

    # add max conductance parameter values
    params, models = get_calib_kmax(df2)
    params = np.asarray(params)
    locs = np.arange(len(df1['site_spp'].unique()))

    # update colour list
    colours = ([
        '#6023b7', '#af97c5', '#009231', '#6b3b07', '#ff8e12', '#ffe020',
        '#f10c80', '#ffc2cd'
    ]) * len(params)

    for i in range(params.shape[1]):

        if i < 8:
            ax2.scatter(locs,
                        params[:, i],
                        s=50,
                        linewidths=0.25,
                        c=colours[i],
                        alpha=0.9,
                        label=models[0][i],
                        zorder=4)

        else:
            ax2.scatter(locs,
                        params[:, i],
                        s=50,
                        linewidths=0.25,
                        c=colours[i],
                        alpha=0.9,
                        zorder=4)

    # tighten the subplot
    ax2.set_xlim(locs[0] - 0.8, locs[-1] + 0.8)
    ax2.set_ylim(np.log(0.025) - 0.1, np.log(80.))

    # ticks
    ax2.set_xticks(locs + 0.5)
    ax2.set_xticklabels(labels, ha='right', rotation=40)
    ax2.xaxis.set_tick_params(length=0.)

    yticks = [0.025, 0.25, 1, 5, 25, 75]
    ax2.set_yticks([np.log(e) for e in yticks])
    ax2.set_yticklabels(yticks)
    render_ylabels(ax2, r'k$_{max}$', 'mmol m$^{-2}$ s$^{-1}$ MPa$^{-1}$')

    handles, labels = ax2.get_legend_handles_labels()
    labels[3] = 'SOX$_\mathrm{\mathsf{opt}}$'
    ax2.legend(handles,
               labels,
               ncol=3,
               labelspacing=1. / 3.,
               columnspacing=0.5,
               loc=3)

    # save
    fig.savefig(figname)
                     'simulations'), 'idealised')

for training in trainings:

    if not os.path.isfile(os.path.join(ipath,
                                       '%s_calibration.csv' % (training))):
        build_calibrated_forcing(training)

combis = [e for e in combis if ((e[0] == e[1]) or (e[2] == 'insample'))]

for combi in combis:  # loop over all the possibilities

    xpe = '%s_%s_%s' % (combi[2], combi[1], combi[0])

    # load input data into a dataframe
    df, __ = read_csv(os.path.join(ipath, '%s_calibration.csv' % (combi[0])))

    # output dir paths
    ofdir = os.path.join(ipath.replace('input', 'output'), 'multivar_change')

    if xpe in univar_xpes:
        ofdir = ofdir.replace('multivar_change', 'univar_change')

    if not os.path.isdir(ofdir):
        os.makedirs(ofdir)

    # how should the atm forcing data change?
    df1 = df.copy()  # reset the df so as to not keep previous changes

    if combi[2] == 'highD':
        df1['VPD'] *= 2.