def main():
    
    # get specifications for run to read
    model_ids = ['811','811','911','911']
    assim_type = '_p25adapted'
    ens_size = 500
    
    # get pixels, ids and number of iterations to read
    cbf_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbf'+assim_type+'/' + model_ids[0] + '/'
    pixels = ['3809','3524','2224','4170','1945','3813','4054','3264','1271','3457']
    mcmc_ids = ['119','3','3','119']
    n_iters = ['40000000','1000000','1000000','40000000']
    
    
    nbe_mae, lai_mae, abgb_mae, gpp_mae = [], [], [], []
    
    # run through pixels
    for pixel in pixels:
    
        # get that pixel's outputs for each MCMCID
        nbe_pred, lai_pred, abgb_pred, gpp_pred = [], [], [], []
        for model_id, mcmc_id, n_iter in zip(model_ids, mcmc_ids, n_iters):
            
            # set directories
            cur_dir = os.getcwd() + '/'
            cbf_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbf'+assim_type+'/' + model_id + '/'
            cbr_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbr'+assim_type+'/' + model_id + '/'
            output_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/output'+assim_type+'/' + model_id + '/'
            plot_dir = '../../../../../../scratch/users/cfamigli/cardamom/plots/'
            parnames = autil.get_parnames('../../misc/', model_id)
            
            # read cbf file for that pixel
            cbf_pixel = rwb.read_cbf_file(glob.glob(cbf_dir + '*_' + pixel+'.cbf')[0])
            
            # read obs and obs unc for that pixel
            nbe_obs, lai_obs, abgb_obs, sif_obs = cbf_pixel['OBS']['NBE'], cbf_pixel['OBS']['LAI'], cbf_pixel['OBS']['ABGB'], cbf_pixel['OBS']['GPP']
            nbe_an_unc, nbe_seas_unc, lai_unc, abgb_unc = cbf_pixel['OBSUNC']['NBE']['annual_unc'], cbf_pixel['OBSUNC']['NBE']['seasonal_unc'], cbf_pixel['OTHER_OBS']['MLAI']['unc'], cbf_pixel['OBSUNC']['ABGB']['unc']
            
                
            conv_chains_pkl = read_pickle(glob.glob(cbr_dir + model_id + assim_type + '*_MCMC'+mcmc_id + '_'+n_iter+'_best_subset.pkl')[0])
            conv_chains_pkl.columns = ['pixel','bestchains','conv']# if model_id!='911' else ['pixel','bestchains'] #rename columns for easier access
            
            # grab cbrs corresponding to that pixel, MCMCID and number of iterations
            files = glob.glob(cbr_dir + '*MCMC'+mcmc_id+'_' + n_iter + '_'+ pixel+'*.cbr')
            files.sort()
            best_chains = conv_chains_pkl.loc[conv_chains_pkl['pixel']==pixel]['bestchains'].values[0][1:]
            
            # run through cbrs
            cbr_chain_list = []
            for chain in best_chains:
                print(chain)
                
                # read cbr for one file and transform Bday, Fday
                file = [i for i in files if pixel+'_'+chain+'.cbr' in i][0]
                cbr_chain = autil.modulus_Bday_Fday(rwb.read_cbr_file(file, {'nopars': len(parnames)}), parnames)
                print(cbr_chain.shape)
            
                
                # read forward run for that cbr
                
                flux_chain = rwb.readbinarymat(output_dir + 'fluxfile_' + file.partition(cbr_dir)[-1][:-3]+'bin', [cbf_pixel['nodays'], autil.get_nofluxes_nopools_lma(model_id)[0]])
                pool_chain = rwb.readbinarymat(output_dir + 'poolfile_' + file.partition(cbr_dir)[-1][:-3]+'bin', [cbf_pixel['nodays']+1, autil.get_nofluxes_nopools_lma(model_id)[1]])
                    
                # add chain to list for GR calculation
                if np.shape(cbr_chain)[0]==ens_size: 
                    
                    cbr_chain_list.append(cbr_chain)
                    
                    # add forward run chain to aggregated matrix
                    flux_pixel = np.copy(flux_chain) if best_chains.index(chain)==0 else np.concatenate((flux_pixel, flux_chain), axis=0)
                    pool_pixel = np.copy(pool_chain) if best_chains.index(chain)==0 else np.concatenate((pool_pixel, pool_chain), axis=0)
                
            # compute gelman rubin
            if len(cbr_chain_list)>1:
                gr = autil.gelman_rubin(cbr_chain_list)
                print('%i of %i parameters converged' % (sum(gr<1.2), len(parnames)))
            else:
                gr = np.nan
                
            cbr_pixel = np.vstack(cbr_chain_list)
            
            
            print(pool_pixel.shape)
            print(cbr_pixel.shape)
            # nbe, lai, and abgb predictions at pixel
            # list with elements corresponding to MCMCIDs considered (e.g. first element is MCMCID 119)
            nbe_pred.append(autil.get_output('NBE', model_id, flux_pixel, pool_pixel, cbr_pixel, autil.get_nofluxes_nopools_lma(model_id)[2]))
            lai_pred.append(autil.get_output('LAI', model_id, flux_pixel, pool_pixel, cbr_pixel, autil.get_nofluxes_nopools_lma(model_id)[2])[:,:-1])
            abgb_pred.append(autil.get_output('ABGB', model_id, flux_pixel, pool_pixel, cbr_pixel, autil.get_nofluxes_nopools_lma(model_id)[2])[:,:-1])
            gpp_pred.append(autil.get_output('GPP', model_id, flux_pixel, pool_pixel, cbr_pixel, autil.get_nofluxes_nopools_lma(model_id)[2]))
            
        # plot time series
        lbls = [model_id+'_MCMC'+mcmc_id for model_id, mcmc_id in zip(model_ids, mcmc_ids)]
        plot_output_ts(cbf_pixel, nbe_pred, nbe_obs, nbe_an_unc, lbls=lbls, var='NBE', savepath=cur_dir+plot_dir+'demcmc_mcmc/', title='all_models'+'_NBE_'+pixel+'.png')
        plot_output_ts(cbf_pixel, lai_pred, lai_obs, lai_unc, lbls=lbls, var='LAI', savepath=cur_dir+plot_dir+'demcmc_mcmc/', title='all_models'+'_LAI_'+pixel+'.png')
        plot_output_ts(cbf_pixel, gpp_pred, sif_obs, 0, lbls=lbls, var='GPP', savepath=cur_dir+plot_dir+'demcmc_mcmc/', title='all_models'+'_GPP_'+pixel+'.png')
        
        # plot box plots
        plot_dist_compare(nbe_pred, nbe_obs, [nbe_an_unc, nbe_seas_unc], lbls=lbls, var='NBE', savepath=cur_dir+plot_dir+'demcmc_mcmc/', title='all_models'+'_NBE_'+pixel+'_dist_')
        plot_dist_compare(lai_pred, lai_obs, lai_unc, lbls=lbls, var='LAI', savepath=cur_dir+plot_dir+'demcmc_mcmc/', title='all_models'+'_LAI_'+pixel+'_dist_')
        plot_dist_compare(abgb_pred, abgb_obs, abgb_unc, lbls=lbls, var='ABGB', savepath=cur_dir+plot_dir+'demcmc_mcmc/', title='all_models'+'_ABGB_'+pixel+'_dist_')

        # plot obs vs median comparison
        nbe_mae.append([mae_real_numbers_only(f, nbe_obs)[0] for f in nbe_pred])
        lai_mae.append([mae_real_numbers_only(f, lai_obs)[0] for f in lai_pred])
        abgb_mae.append([mae_real_numbers_only(f, abgb_obs)[0] for f in abgb_pred])
        
        print(rank_mae(nbe_mae, lbls))
        print(rank_mae(lai_mae, lbls))
        print(rank_mae(abgb_mae, lbls))
    
    plot_maes(nbe_mae, pixels, savepath=cur_dir+plot_dir+'demcmc_mcmc/', title='all_models_NBE_mae')
    plot_maes(lai_mae, pixels, savepath=cur_dir+plot_dir+'demcmc_mcmc/', title='all_models_LAI_mae')
    plot_maes(abgb_mae, pixels, savepath=cur_dir+plot_dir+'demcmc_mcmc/', title='all_models_ABGB_mae')
    
    return
def main():

    cur_dir = os.getcwd() + '/'
    plot_dir = '../../../../../../scratch/users/cfamigli/cardamom/plots/'

    os.chdir(plot_dir + 'dists/')

    # get list of model ids
    models_full = list(set([el.split('_')[0] for el in glob.glob('*.png')]))

    # remove 101, temporary until 102-->101
    models_full.remove('102')
    os.chdir(cur_dir)

    # set lists of variables and pixels
    vrs = [
        'NBE', 'cumNBE', 'LAI', 'GPP', 'Reco', 'Rauto', 'Rhet', 'lit', 'root',
        'som', 'wood'
    ]
    pixels = [
        '3809', '3524', '2224', '4170', '1945', '3813', '4054', '3264', '1271',
        '3457'
    ]

    # set MCMC ID
    mcmc_id = sys.argv[1]
    n_iter = sys.argv[2]
    assim_type = '_longadapted'

    nmodels_leave_out = sys.argv[3]
    models = random.sample(models_full,
                           len(models_full) - int(nmodels_leave_out))
    print(models)

    # dataframe will hold model structural uncertainty (Ms) and model parametric uncertainty (Mp) for each pixel-var combination
    # n is number of models that make up the suite
    partitioning = DataFrame(columns={'Ms', 'Mp', 'n'})
    df_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/processed_df/'

    for var in vrs:
        print('Variable: ' + var)

        Mp_pixels = np.zeros(
            len(pixels)) * np.nan  # list of Mp for each pixel, for mapping
        for pixel in pixels:
            print('Pixel: ' + pixel)

            nsteps = 228 if assim_type == '_longadapted' else 240
            meds, ub, lb = np.zeros((len(models), nsteps)) * np.nan, np.zeros(
                (len(models), nsteps)
            ) * np.nan, np.zeros(
                (len(models), nsteps)
            ) * np.nan  # medians, upper bounds, lower bounds of prediction through time
            Mp, n = 0, 0

            for model in models:
                print(model)

                cbf_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbf' + assim_type + '/' + model + '/'
                cbr_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbr' + assim_type + '/' + model + '/'
                output_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/output' + assim_type + '/' + model + '/'
                parnames = autil.get_parnames(cur_dir + '../../misc/', model)

                os.chdir(cur_dir + cbr_dir)
                #files = set(glob.glob('*.cbr')) - set(glob.glob('*MCMC'+mcmc_id+'*.cbr'))
                #files = glob.glob('*MCMC'+mcmc_id+'*.cbr')
                files = set(
                    glob.glob('*MCMC' + mcmc_id + '_' + n_iter + '_*.cbr'))

                pixel_chains = autil.find_all_chains(
                    files, pixel
                )  # list of files corresponding to each chain at that pixel, e.g. 2224_1, 2224_2, 2224_3, 2222_4
                pixel_chains.sort()
                n_chains = len(pixel_chains)

                if n_chains > 0:
                    cbf_pixel = rwb.read_cbf_file(
                        cur_dir + cbf_dir +
                        pixel_chains[0].partition('_MCMC')[0] + '_' + pixel +
                        '.cbf')

                    cbr_chain_list = []
                    for pixel_chain in pixel_chains:
                        print(pixel_chain)
                        cbr_chain = rwb.read_cbr_file(
                            pixel_chain, {'nopars': len(parnames)
                                          })  # cbr file for one chain
                        cbr_chain_list.append(
                            cbr_chain
                        )  # list of separate cbrs for each chain, use for gelman rubin
                        cbr_pixel = np.copy(cbr_chain) if pixel_chains.index(
                            pixel_chain) == 0 else np.concatenate(
                                (cbr_pixel, cbr_chain),
                                axis=0)  # concatenate all chain cbrs
                        #autil.plot_par_histograms(cbr_chain, parnames=parnames, savepath=cur_dir+plot_dir+'dists/', title=model+'_'+pixel_chain[:-3]+'png')

                        flux_chain = rwb.readbinarymat(
                            cur_dir + output_dir + 'fluxfile_' +
                            pixel_chain[:-3] + 'bin', [
                                cbf_pixel['nodays'],
                                autil.get_nofluxes_nopools_lma(model)[0]
                            ])
                        pool_chain = rwb.readbinarymat(
                            cur_dir + output_dir + 'poolfile_' +
                            pixel_chain[:-3] + 'bin', [
                                cbf_pixel['nodays'] + 1,
                                autil.get_nofluxes_nopools_lma(model)[1]
                            ])
                        #autil.plot_flux_pool_timeseries(cbf_pixel, cbr_chain, flux_chain, pool_chain, autil.get_nofluxes_nopools_lma(model)[2], savepath=cur_dir+plot_dir+'timeseries/', title=model+'_'+pixel_chain[:-3]+'png')

                        flux_pixel = np.copy(flux_chain) if pixel_chains.index(
                            pixel_chain) == 0 else np.concatenate(
                                (flux_pixel, flux_chain),
                                axis=0)  # concatenate all chain flux outputs
                        pool_pixel = np.copy(pool_chain) if pixel_chains.index(
                            pixel_chain) == 0 else np.concatenate(
                                (pool_pixel, pool_chain),
                                axis=0)  # concatenate all chain pool outputs

                    gr = autil.gelman_rubin(
                        cbr_chain_list)  # gelman rubin function from matt
                    gr_thresh = 1.2  # below this value parameters are assumed to be convergent
                    print('%i of %i parameters converged with GR<%.1f' %
                          (sum(gr < gr_thresh), len(parnames), gr_thresh))

                    #autil.plot_par_histograms(cbr_pixel, parnames=parnames, savepath=cur_dir+plot_dir+'dists/', title=model+'_'+pixel_chain[:-6]+'.png')
                    #autil.plot_flux_pool_timeseries(cbf_pixel, cbr_pixel, flux_pixel, pool_pixel, autil.get_nofluxes_nopools_lma(model)[2], savepath=cur_dir+plot_dir+'timeseries/', title=model+'_'+pixel_chain[:-6]+'.png')

                    if (sum(gr < gr_thresh) / len(parnames) < .9
                        ):  # don't include nonconvergent runs in analysis
                        continue
                    else:
                        fwd_data = autil.get_output(
                            var, model, flux_pixel, pool_pixel, cbr_pixel,
                            autil.get_nofluxes_nopools_lma(
                                model)[2])  # get forward data for var

                        if len(fwd_data) > 0:
                            if fwd_data.shape[1] > nsteps:
                                fwd_data = fwd_data[:, :-1]

                            fwd_data = autil.remove_outliers(fwd_data)
                            # fill medians, upper bounds, and lower bounds
                            meds[models.index(model), :] = np.nanmedian(
                                fwd_data, axis=0)
                            ub[models.index(model), :] = np.nanpercentile(
                                fwd_data, 75, axis=0)
                            lb[models.index(model), :] = np.nanpercentile(
                                fwd_data, 25, axis=0)

                            fwd_data = autil.remove_below_25_above_75(
                                fwd_data
                            )  # set values outside of 25th-75th range to nan
                            Mp += np.nanvar(
                                fwd_data, axis=0
                            )  # sum of intra-ensemble variance, only compute on 25th-75th
                            n += 1

            Ms = np.nanvar(meds, axis=0)  # inter-median variance
            Mp = Mp / n if n != 0 else float('nan')

            Ms_div_sum = Ms / (Ms + Mp)
            Mp_div_sum = Mp / (Ms + Mp)

            partitioning.loc[pixel + '_' + var] = {
                'Ms': np.nanmean(Ms_div_sum),
                'Mp': np.nanmean(Mp_div_sum),
                'n': n
            }
            Mp_pixels[pixels.index(pixel)] = np.nanmean(Mp_div_sum)

    print(partitioning.to_string())
    partitioning.sort_index(
        axis=1).to_pickle(cur_dir + df_dir + 'summary' + assim_type + '_MCMC' +
                          mcmc_id + '_' + date.today().strftime("%m%d%y") +
                          '_' + str(len(models)) + '.pkl')

    return
Exemple #3
0
def main():
    combinations = [['811', '119', '40000000'], ['811', '3', '1000000'],
                    ['911', '119', '40000000']]
    assim_type = '_longadapted'
    metric = sys.argv[1]

    vrs = [
        'NBE', 'cumNBE', 'LAI', 'GPP', 'Reco', 'Rauto', 'Rhet', 'lit', 'root',
        'som', 'wood'
    ]
    pixels = [
        '3809', '3524', '2224', '4170', '1945', '3813', '4054', '3264', '1271',
        '3457'
    ]

    ens_spread = np.ones(
        (len(pixels), len(vrs), len(combinations))) * float('nan')
    conv = np.ones((len(pixels), len(combinations))) * float('nan')

    cur_dir = os.getcwd() + '/'

    for pixel in pixels:

        comb_count = 0
        for comb in combinations:

            model_id = comb[0]
            mcmc_id = comb[1]
            it = comb[2]

            cbf_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbf' + assim_type + '/' + model_id + '/'
            cbr_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbr' + assim_type + '/' + model_id + '/'
            output_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/output' + assim_type + '/' + model_id + '/'
            plot_dir = '../../../../../../scratch/users/cfamigli/cardamom/plots/'
            parnames = autil.get_parnames(cur_dir + '../../misc/', model_id)

            os.chdir(cur_dir + cbr_dir)
            files = glob.glob('*MCMC' + mcmc_id + '_' + it + '_' + pixel +
                              '*.cbr')
            pixel_chains = autil.find_all_chains(files, pixel)
            pixel_chains.sort()  # filenames
            if model_id == '911': pixel_chains = pixel_chains[-4:]
            print(pixel_chains)

            cbf_pixel = rwb.read_cbf_file(
                cur_dir + cbf_dir + pixel_chains[0].partition('_MCMC')[0] +
                '_' + pixel + '.cbf')

            cbr_chain_list = []
            for pixel_chain in pixel_chains:
                print(pixel_chain)
                cbr_chain = rwb.read_cbr_file(pixel_chain,
                                              {'nopars': len(parnames)})
                cbr_pixel = np.copy(cbr_chain) if pixel_chains.index(
                    pixel_chain) == 0 else np.concatenate(
                        (cbr_pixel, cbr_chain), axis=0)

                flux_chain = rwb.readbinarymat(
                    cur_dir + output_dir + 'fluxfile_' + pixel_chain[:-3] +
                    'bin', [
                        cbf_pixel['nodays'],
                        autil.get_nofluxes_nopools_lma(model_id)[0]
                    ])
                pool_chain = rwb.readbinarymat(
                    cur_dir + output_dir + 'poolfile_' + pixel_chain[:-3] +
                    'bin', [
                        cbf_pixel['nodays'] + 1,
                        autil.get_nofluxes_nopools_lma(model_id)[1]
                    ])

                flux_pixel = np.copy(flux_chain) if pixel_chains.index(
                    pixel_chain) == 0 else np.concatenate(
                        (flux_pixel, flux_chain), axis=0)
                pool_pixel = np.copy(pool_chain) if pixel_chains.index(
                    pixel_chain) == 0 else np.concatenate(
                        (pool_pixel, pool_chain), axis=0)

                cbr_chain_list.append(cbr_chain)
                print(np.shape(cbr_chain))
                print(np.shape(cbr_pixel))

            gr = autil.gelman_rubin(cbr_chain_list)
            print('%i of %i parameters converged' %
                  (sum(gr < 1.2), len(parnames)))
            conv[pixels.index(pixel),
                 comb_count] = sum(gr < 1.2) / len(parnames) * 100

            for var in vrs:
                print(var)

                try:
                    obs = cbf_pixel['OBS'][var]
                    obs[obs == -9999] = float('nan')
                except:
                    obs = np.ones(cbf_pixel['nodays']) * np.nan
                n_obs = np.sum(np.isfinite(obs))

                fwd_data = autil.get_output(
                    var, model_id, flux_pixel, pool_pixel, cbr_pixel,
                    autil.get_nofluxes_nopools_lma(model_id)[2])

                if len(fwd_data) > 0:
                    if fwd_data.shape[1] > cbf_pixel['nodays']:
                        fwd_data = fwd_data[:, :-1]

                    fwd_data = autil.remove_outliers(fwd_data)
                    med = np.nanmedian(fwd_data, axis=0)
                    ub = np.nanpercentile(fwd_data, 75, axis=0)
                    lb = np.nanpercentile(fwd_data, 25, axis=0)

                    ens_spread[pixels.index(pixel),
                               vrs.index(var), comb_count] = np.nanmean(
                                   abs(ub -
                                       lb)) if metric == 'spread' else np.sqrt(
                                           np.nansum((med - obs)**2) / n_obs)

            comb_count += 1

    for var in vrs:
        autil.plot_spread_v_iter(
            ens_spread,
            pixels,
            vrs.index(var),
            var,
            it,
            metric,
            cur_dir + plot_dir + 'spread_v_iter',
            'iter_test_compare_' + assim_type + '_' + model_id + '_' + var +
            '_' + metric,
            single_val=True
        )  #'iter_test_MCMC'+mcmc_id+'_'+model_id+'_'+var + '_' + metric)

    autil.plot_conv_v_iter(conv,
                           pixels,
                           it,
                           cur_dir + plot_dir + 'spread_v_iter',
                           'iter_test_compare' + assim_type + '_' + model_id +
                           '_conv',
                           single_val=True)

    return
def main():

    # set run information to read
    model_id = sys.argv[1]
    mcmc_id = sys.argv[2]  # 119 for normal, 3 for DEMCMC
    n_iter = sys.argv[3]
    nbe_optimization = sys.argv[4]  # 'OFF' or 'ON'
    ens_size = 500
    assim_type = sys.argv[5]

    # set directories
    cur_dir = os.getcwd() + '/'
    misc_dir = cur_dir + '../../misc/'
    cbf_dir = cur_dir + '../../../../../../scratch/users/cfamigli/cardamom/files/cbf' + assim_type + '/' + model_id + '/'
    cbr_opt_dir = cur_dir + '../../../../../../scratch/users/cfamigli/cardamom/files/cbr' + assim_type + '/' + model_id + '/'
    cbr_ef_dir = cur_dir + '../../../../../../scratch/users/cfamigli/cardamom/files/cbr' + assim_type + '_ef/' + model_id + '/'
    cbr_pft_dir = cur_dir + '../../../../../../scratch/users/cfamigli/cardamom/files/cbr' + assim_type + '_pft/' + model_id + '/'
    output_opt_dir = cur_dir + '../../../../../../scratch/users/cfamigli/cardamom/files/output' + assim_type + '/' + model_id + '/'
    output_ef_dir = cur_dir + '../../../../../../scratch/users/cfamigli/cardamom/files/output' + assim_type + '_ef/' + model_id + '/'
    output_pft_dir = cur_dir + '../../../../../../scratch/users/cfamigli/cardamom/files/output' + assim_type + '_pft/' + model_id + '/'
    plot_dir = cur_dir + '../../../../../../../scratch/users/cfamigli/cardamom/plots/'
    parnames = autil.get_parnames('../../misc/', model_id)

    # get list of cbfs
    os.chdir(cbf_dir)
    cbf_files = glob.glob('*.cbf')
    cbf_files.sort()
    pixel_lst = []
    os.chdir(cur_dir + '/../')

    # initialize lists for error maps
    card_unc, opt_obs_err, pft_obs_err, ef_obs_err, obs_std = np.zeros(
        len(cbf_files)) * np.nan, np.zeros(len(cbf_files)) * np.nan, np.zeros(
            len(cbf_files)) * np.nan, np.zeros(
                len(cbf_files)) * np.nan, np.zeros(len(cbf_files)) * np.nan
    opt_pft_trend, opt_ef_trend, opt_pft_seas, opt_ef_seas, opt_mean, pft_mean, ef_mean = np.zeros(
        len(cbf_files)) * np.nan, np.zeros(len(cbf_files)) * np.nan, np.zeros(
            len(cbf_files)) * np.nan, np.zeros(
                len(cbf_files)) * np.nan, np.zeros(
                    len(cbf_files)) * np.nan, np.zeros(
                        len(cbf_files)) * np.nan, np.zeros(
                            len(cbf_files)) * np.nan
    pft_mean_within_opt_unc, ef_mean_within_opt_unc = np.zeros(
        len(cbf_files)) * np.nan, np.zeros(len(cbf_files)) * np.nan

    ################################################## iterate through pixels ##################################################
    ############################################################################################################################

    include_ef = True
    include_pft = True
    include_opt = True
    write_txt_sh_pft_rerun = True

    # initialize
    n_fluxes = autil.get_nofluxes_nopools_lma(model_id)[0]
    n_pools = autil.get_nofluxes_nopools_lma(model_id)[1]

    # load list of globcover labels
    gl_lbls = list(
        read_csv(misc_dir + 'Globcover2009_Legend.csv')['Value'].values)
    n_classes = len(gl_lbls)

    # load globcover csv for av_fracs determination
    gl_fracs = read_csv(misc_dir + 'globcover_fracs.csv', header=0)

    # load bestchains for cbr_files
    conv_chains = read_pickle(cbr_opt_dir + model_id + assim_type + '_ALL' +
                              '_MCMC' + mcmc_id + '_' + n_iter +
                              '_best_subset.pkl')
    conv_chains.columns = ['pixel', 'bestchains',
                           'conv']  #rename columns for easier access

    # create csv to track pft reruns
    pft_rerun_filename = 'pft_rerun_' + model_id + assim_type + '_MCMC' + mcmc_id + '_' + n_iter + '.csv'
    pft_rerun = open(misc_dir + pft_rerun_filename, 'w')
    w = csv.writer(pft_rerun)

    # run through all pixels
    for cbf_file in cbf_files:
        ind = cbf_files.index(cbf_file)
        pixel = cbf_file[-8:-4]
        pixel_lst.append(pixel)
        print(pixel)

        # read in fracs and types for pixel
        if int(pixel) in gl_fracs['pixel'].values:

            fracs_at_pixel = gl_fracs.loc[gl_fracs['pixel'] == int(
                pixel)].values[0][1:]
            types_at_pixel = get_types_at_pixel(gl_fracs, pixel)

        else:

            fracs_at_pixel = np.zeros(len(gl_lbls))
            types_at_pixel = []

        # read in cbf
        cbf_pixel = rwb.read_cbf_file(cbf_dir + cbf_file)
        nsteps = cbf_pixel['nodays']

        ################################################## get PFT forward runs ##################################################
        ##########################################################################################################################

        can_plot_pft = False
        if include_pft:

            pixel_rerun = []
            pft_spec = '5rp_'

            # initialize matrices to hold weighted average of fluxes and pools
            flux_pft_pixel = np.zeros((1, nsteps, n_fluxes))
            pool_pft_pixel = np.zeros((1, nsteps + 1, n_pools))
            #flux_pft_pixel = np.zeros((ens_size, nsteps, n_fluxes))
            #pool_pft_pixel = np.zeros((ens_size, nsteps+1, n_pools))

            # read all forward runs (each pft's run) for a given pixel
            print(types_at_pixel)
            for pft in types_at_pixel:

                suffix = cbf_file[:-9] + '_MCMC' + mcmc_id + '_' + n_iter + '_PFT' + str(
                    int(pft)
                ) + '_forward_' + pixel + '.bin'  #cbf_file[:-8]+'MCMC'+mcmc_id+'_'+n_iter+'_PFT'+str(int(pft))+'_'+pixel+'.bin'

                if (len(glob.glob(output_pft_dir + 'fluxfile_' + suffix)) > 0
                    ) & (len(glob.glob(output_pft_dir + 'poolfile_' + suffix))
                         > 0):
                    print(str(int(pft)))

                    flux_pft = rwb.readbinarymat(
                        output_pft_dir + 'fluxfile_' + suffix,
                        [nsteps, n_fluxes])
                    pool_pft = rwb.readbinarymat(
                        output_pft_dir + 'poolfile_' + suffix,
                        [nsteps + 1, n_pools])
                    #autil.plot_general_timeseries(autil.get_output('NBE', model_id, flux_pft, pool_pft, cbr_data=[], lma_ind=autil.get_nofluxes_nopools_lma(model_id)[2]), 'NBE', cbf_pixel, plot_dir+'timeseries/pft/', model_id + '_MCMC'+mcmc_id + '_'+n_iter + '_' + pixel + '_'+str(int(pft))+'.png')

                    # add each flux and pool matrix (corresponding to each pft) according to pft fractions, as weighted average
                    flux_pft[np.isnan(flux_pft)] = 0.
                    pool_pft[np.isnan(pool_pft)] = 0.

                    if (flux_pft.shape[0] > 0) & (pool_pft.shape[0] > 0):

                        lbl_ind = gl_lbls.index(int(pft))
                        flux_pft_pixel += flux_pft * fracs_at_pixel[lbl_ind]
                        pool_pft_pixel += pool_pft * fracs_at_pixel[lbl_ind]

                        can_plot_pft = True

                    else:
                        pixel_rerun.append(pft)

                else:
                    pixel_rerun.append(pft)

            if len(pixel_rerun) > 0:
                w.writerow([pixel] + pixel_rerun)

        ################################################ get optimal forward runs ################################################
        ##########################################################################################################################

        can_plot_opt = False
        if include_opt:

            # get pixel's convergent chain numbers

            if pixel in conv_chains['pixel'].values:

                best_chains = conv_chains.loc[
                    conv_chains['pixel'] == pixel]['bestchains'].values[0][1:]
                flux_opt, pool_opt = [], []

                # aggregate best chain outputs into one list
                for chain in best_chains:
                    suffix = cbf_file[:
                                      -8] + 'MCMC' + mcmc_id + '_' + n_iter + '_' + pixel + '_' + chain + '.bin'

                    if (len(glob.glob(output_opt_dir + 'fluxfile_' + suffix)) >
                            0) & (len(
                                glob.glob(output_opt_dir + 'poolfile_' +
                                          suffix)) > 0):

                        flux_opt.append(
                            rwb.readbinarymat(
                                output_opt_dir + 'fluxfile_' + suffix,
                                [nsteps, n_fluxes]))
                        pool_opt.append(
                            rwb.readbinarymat(
                                output_opt_dir + 'poolfile_' + suffix,
                                [nsteps + 1, n_pools]))

                        can_plot_opt = True

                # stack list elements for plotting
                flux_opt = np.vstack(flux_opt)
                pool_opt = np.vstack(pool_opt)

        ################################################### get EF forward runs ###################################################
        ###########################################################################################################################

        can_plot_ef = False
        if include_ef:

            ef_spec = 'clipped_PLS_soilgrids_poolobs_rescaled_forward_'
            # if 'wpolys' in ef_spec: use '_MCMC'
            # else: use 'MCMC'
            suffix = cbf_file[:
                              -9] + '_MCMC' + mcmc_id + '_' + n_iter + '_' + ef_spec + pixel + '.bin'  #cbf_file[:-8]+'MCMC'+mcmc_id+'_'+n_iter+'_EF_'+pixel+'.bin'

            if (len(glob.glob(output_ef_dir + 'fluxfile_' + suffix)) > 0) & (
                    len(glob.glob(output_ef_dir + 'poolfile_' + suffix)) > 0):

                flux_ef = rwb.readbinarymat(
                    output_ef_dir + 'fluxfile_' + suffix, [nsteps, n_fluxes])
                pool_ef = rwb.readbinarymat(
                    output_ef_dir + 'poolfile_' + suffix,
                    [nsteps + 1, n_pools])

                can_plot_ef = True

        ##################################################### plot and compare ####################################################
        ###########################################################################################################################

        can_decompose = True if (can_plot_opt) & (can_plot_pft) & (
            can_plot_ef) else False

        # plot optimal and pft predictions together
        output_opt = autil.get_output(
            'NBE',
            model_id,
            flux_opt,
            pool_opt,
            cbr_data=[],
            lma_ind=autil.get_nofluxes_nopools_lma(model_id)[2]) if (
                include_opt) & (can_plot_opt) else np.ones(nsteps) * np.nan
        output_pft = autil.get_output(
            'NBE',
            model_id,
            flux_pft_pixel,
            pool_pft_pixel,
            cbr_data=[],
            lma_ind=autil.get_nofluxes_nopools_lma(model_id)[2]) if (
                include_pft) & (can_plot_pft) else np.ones(nsteps) * np.nan
        output_ef = autil.get_output(
            'NBE',
            model_id,
            flux_ef,
            pool_ef,
            cbr_data=[],
            lma_ind=autil.get_nofluxes_nopools_lma(model_id)
            [2]) if (include_ef) & (can_plot_ef) else np.ones(nsteps) * np.nan

        card_unc[ind], opt_obs_err[ind], pft_obs_err[ind], ef_obs_err[
            ind], obs_std[ind] = autil.plot_opt_pft_ef_timeseries(
                output_opt,
                output_pft,
                output_ef,
                'NBE',
                pixel,
                autil.rowcol_to_latlon([pixel]),
                cbf_pixel,
                err_v_obs=False,
                savepath=plot_dir + 'forward_compare/timeseries/' + model_id +
                '/',
                title=model_id + '_MCMC' + mcmc_id + '_' + n_iter + '_' +
                pft_spec + ef_spec + pixel + '.png')

        if can_decompose:
            opt_pft_trend[ind], opt_ef_trend[ind], opt_pft_seas[
                ind], opt_ef_seas[ind], opt_mean[ind], pft_mean[ind], ef_mean[
                    ind], pft_mean_within_opt_unc[ind], ef_mean_within_opt_unc[
                        ind] = timeseries_decompose(
                            output_opt,
                            output_pft,
                            output_ef,
                            pixel,
                            savepath=plot_dir + 'forward_compare/decomp/' +
                            model_id + '/',
                            savename=model_id + '_MCMC' + mcmc_id + '_' +
                            n_iter + '_' + pft_spec + ef_spec + pixel)

    # close csv for rerun tracking
    pft_rerun.close()

    # plot decomposition results

    plot_decomposed(
        [opt_pft_trend, opt_ef_trend], [opt_pft_seas, opt_ef_seas],
        [opt_mean, pft_mean, ef_mean],
        [pft_mean_within_opt_unc, ef_mean_within_opt_unc],
        savepath=plot_dir + 'forward_compare/decomp/' + model_id + '/',
        savename=model_id + '_MCMC' + mcmc_id + '_' + n_iter + '_' + pft_spec +
        ef_spec)

    # plot error maps
    for data, plot_title, vmin, vmax in zip(
        [
            card_unc, opt_obs_err, pft_obs_err, ef_obs_err, obs_std,
            opt_obs_err / obs_std, pft_obs_err / obs_std, ef_obs_err / obs_std,
            pft_obs_err / obs_std - opt_obs_err / obs_std,
            ef_obs_err / obs_std - opt_obs_err / obs_std,
            pft_obs_err / obs_std - ef_obs_err / obs_std
        ], [
            'opt_unc', 'opt_err', 'pft_err', 'ef_err', 'obs_std',
            'norm_opt_err', 'norm_pft_err', 'norm_ef_err',
            'norm_pft_minus_norm_opt_err', 'norm_ef_minus_norm_opt_err',
            'norm_pft_minus_norm_ef_err'
        ], [0., 0., 0., 0., 0., 0., 0., 0., -1., -1., -1.],
        [0.7, 0.7, 0.7, 0.7, 0., 2., 2., 2., 1., 1., 1.]):

        data_nonan, pixel_lst_nonan = remove_nan(data, pixel_lst)

        stipple = card_unc if (plot_title == 'ef_err') | (
            plot_title == 'pft_err') else None
        autil.plot_map(
            nrows=46,
            ncols=73,
            land_pixel_list=[file[-8:-4] for file in cbf_files],
            pixel_value_list=pixel_lst_nonan,
            value_list=data_nonan,
            vmin=vmin,
            vmax=vmax,
            cmap='bwr',
            savepath=plot_dir + 'forward_compare/maps/' + model_id + '/',
            savename=model_id + '_MCMC' + mcmc_id + '_' + n_iter + '_' +
            pft_spec + ef_spec + plot_title,
            stipple=stipple)  #vmax=np.nanpercentile(data_nonan, 90)

    # save errors for comparison analysis
    DataFrame(list(zip(pixel_lst, list(ef_obs_err / obs_std))),
              columns=[
                  'pixels', 'norm_mae'
              ]).to_pickle(misc_dir + 'mae_pkls/' + model_id + '_MCMC' +
                           mcmc_id + '_' + n_iter + '_' + ef_spec + '.pkl')
    DataFrame(list(zip(pixel_lst, list(pft_obs_err / obs_std))),
              columns=[
                  'pixels', 'norm_mae'
              ]).to_pickle(misc_dir + 'mae_pkls/' + model_id + '_MCMC' +
                           mcmc_id + '_' + n_iter + '_' + pft_spec + '.pkl')

    # plot discrete map showing best parameterization (lowest error) for each pixel
    '''best_param_nonan, pixel_lst_nonan = best_param_nonancol([opt_obs_err, pft_obs_err, ef_obs_err], pixel_lst)
    autil.plot_map(nrows=46, ncols=73, land_pixel_list=[file[-8:-4] for file in cbf_files], pixel_value_list=pixel_lst_nonan, value_list=best_param_nonan, cmap=LinearSegmentedColormap.from_list('mycmap', [(0, 'dodgerblue'), (0.5, 'orangered'), (1., 'limegreen')]),savepath=plot_dir+'forward_compare/maps/'+model_id+'/', savename=model_id+'_MCMC'+mcmc_id+'_'+n_iter+'_'+ef_spec+'best_param')'''

    best_param_nonan, pixel_lst_nonan = best_param_nonancol(
        [pft_obs_err, ef_obs_err], pixel_lst)
    autil.plot_map(
        nrows=46,
        ncols=73,
        land_pixel_list=[file[-8:-4] for file in cbf_files],
        pixel_value_list=pixel_lst_nonan,
        value_list=best_param_nonan,
        cmap=LinearSegmentedColormap.from_list('mycmap', [(0, 'orangered'),
                                                          (1., 'limegreen')]),
        savepath=plot_dir + 'forward_compare/maps/' + model_id + '/',
        savename=model_id + '_MCMC' + mcmc_id + '_' + n_iter + '_' + pft_spec +
        ef_spec + 'best_param')

    rgb_triplets = err_rgb_triplets([opt_obs_err, pft_obs_err, ef_obs_err],
                                    pixel_lst)
    autil.plot_map_rgb(
        nrows=46,
        ncols=73,
        land_pixel_list=[file[-8:-4] for file in cbf_files],
        pixel_value_list=pixel_lst,
        value_list=rgb_triplets,
        savepath=plot_dir + 'forward_compare/maps/' + model_id + '/',
        savename=model_id + '_MCMC' + mcmc_id + '_' + n_iter + '_' + pft_spec +
        ef_spec + 'rgb')

    ############################################### create resubmission for pft ###############################################
    ###########################################################################################################################

    if write_txt_sh_pft_rerun:

        # set additional directories
        mdf_dir = '../code/CARDAMOM_2.1.6c/C/projects/CARDAMOM_MDF/' if nbe_optimization == 'OFF' else '../code/CARDAMOM_Uma_2.1.6c-master/C/projects/CARDAMOM_MDF/'
        runmodel_dir = '../code/CARDAMOM_2.1.6c/C/projects/CARDAMOM_GENERAL/' if nbe_optimization == 'OFF' else '../code/CARDAMOM_Uma_2.1.6c-master/C/projects/CARDAMOM_GENERAL/'
        cbf_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbf' + assim_type + '/' + model_id + '/'
        cbf_pft_ic_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbf' + assim_type + '_pft_ic/' + model_id + '/'
        cbr_pft_dir = '../../../../../scratch/users/cfamigli/cardamom/files/cbr' + assim_type + '_pft/' + model_id + '/'
        output_dir = '../../../../../scratch/users/cfamigli/cardamom/files/output' + assim_type + '/' + model_id + '/'
        output_pft_dir = '../../../../../scratch/users/cfamigli/cardamom/files/output' + assim_type + '_pft/' + model_id + '/'

        if mcmc_id == '119':
            frac_save_out = str(int(int(n_iter) / 500))
        elif mcmc_id == '3':
            frac_save_out = str(
                int(int(n_iter) / 500 * 100)
            )  # n_iterations/ frac_save_out * 100 will be ensemble size

        # set up which files to rerun
        pft_rerun = read_csv(misc_dir + pft_rerun_filename,
                             header=None,
                             sep=',',
                             names=['pixel'] + gl_lbls)
        txt_filename = 'pft_ic_combined_list_' + model_id + assim_type + '_MCMC' + mcmc_id + '_' + n_iter + '_rerun.txt'
        txt_file = open(txt_filename, 'w')

        cl_count, row_count = 1, 0
        for cbf_file in cbf_files:
            pixel = cbf_file[-8:-4]

            if int(pixel) in pft_rerun['pixel'].values:
                pixel_classes = pft_rerun.loc[pft_rerun['pixel'] == int(
                    pixel)].values[0][1:]

                for cl in pixel_classes:
                    if ~np.isnan(cl):
                        f = cbf_file[:-9] + '_PFT' + str(int(cl)) + '_' + pixel
                        txt_file.write(
                            '%sCARDAMOM_MDF.exe %s%s %s%s %s 0 %s 0.001 %s 1000'
                            % (mdf_dir, cbf_pft_ic_dir[3:], f + '.cbf',
                               cbr_pft_dir, f + '.cbr', n_iter, frac_save_out,
                               mcmc_id))
                        txt_file.write(
                            ' && %sCARDAMOM_RUN_MODEL.exe %s%s %s%s %s%s %s%s %s%s %s%s'
                            % (runmodel_dir, cbf_pft_ic_dir[3:], f + '.cbf',
                               cbr_pft_dir, f + '.cbr', output_pft_dir,
                               'fluxfile_' + f + '.bin', output_pft_dir,
                               'poolfile_' + f + '.bin', output_pft_dir,
                               'edcdfile_' + f + '.bin', output_pft_dir,
                               'probfile_' + f + '.bin'))
                        cl_count += 1

                        if np.mod(cl_count, 5) == 0:
                            txt_file.write('\n')
                            row_count += 1

                        else:
                            txt_file.write(' && ')

        txt_file.close()

        sh_file = open(txt_filename[:-3] + 'sh', 'w')
        autil.fill_in_sh(sh_file,
                         array_size=row_count,
                         n_hours=10,
                         txt_file=txt_filename,
                         combined=True)

    return
def main():
    
    ### set specifications
    model_id = sys.argv[1]
    run_type = 'ALL' 
    mcmc_id = '119'
    n_iter = '40000000'
    ens_size = 500
    assim_type = '_longadapted'
    
    ### set directories
    cur_dir = os.getcwd() + '/'
    cbf_dir = cur_dir + '../../../../../../scratch/users/cfamigli/cardamom/files/cbf'+assim_type+'/' + model_id + '/'
    cbr_dir = cur_dir + '../../../../../../scratch/users/cfamigli/cardamom/files/cbr'+assim_type+'/' + model_id + '/'
    output_dir = cur_dir + '../../../../../../scratch/users/cfamigli/cardamom/files/output'+assim_type+'/' + model_id + '/'
    plot_dir = cur_dir + '../../../../../../scratch/users/cfamigli/cardamom/plots/'
    parnames = autil.get_parnames(cur_dir + '../../misc/', model_id)
    
    # get list of cbfs
    os.chdir(cbf_dir)
    cbf_files = glob.glob('*.cbf')
    cbf_files.sort()
    os.chdir(cur_dir) 
    
    # initialize lists of pixel names and rmses 
    pixels_plot = []
    nbe_rmse, lai_rmse = [], []
    
    for cbf_file in cbf_files:
        print(cbf_file, cbf_files.index(cbf_file))
        
        cbf_pixel = rwb.read_cbf_file(cbf_dir + cbf_file)
        pixel = cbf_file[-8:-4]
        
        cbr_files = glob.glob(cbr_dir + '*MCMC'+mcmc_id+'_'+n_iter+'_' + pixel + '_*.cbr')
        cbr_files = sorted(cbr_files, key=lambda x: int(x.partition(pixel+'_')[-1].partition('.cbr')[0]))
        
        # get all possible XX member combinations of cbr files 
        n_chains_to_converge = 4
        cbr_files_all_subsets = [list(i) for i in itertools.combinations(cbr_files, n_chains_to_converge)]
        
        continue_check = True
        for subset in cbr_files_all_subsets:

            if continue_check:
                
                # read parameters and compute gelman rubin
                cbr_chain_list = []
                
                for cbr_file in subset:
                    cbr_chain = rwb.read_cbr_file(cbr_file, {'nopars': len(parnames)})
                    cbr_chain = autil.modulus_Bday_Fday(cbr_chain, parnames)
                    
                    if np.shape(cbr_chain)[0]==ens_size:
                        cbr_chain_list.append(cbr_chain)
                        
                if len(cbr_chain_list)>1:
                    gr = autil.gelman_rubin(cbr_chain_list)
                
                    if sum(gr<1.2)/len(parnames)>=0.9:
                        continue_check = False
                        cbr_agg = np.vstack(cbr_chain_list)
                        pixels_plot.append(pixel)
                        best_subset = subset.copy()
                        
                else:
                    gr = np.nan
        
        # if there is a convergent subset, read fluxes and pools
        if not continue_check: 
            convergent_chain_nums = [el.partition('.cbr')[0].partition(pixel)[-1][1:] for el in best_subset]
            convergent_files = [el.partition('.cbr')[0].partition(model_id+'/')[-1] for el in best_subset]
            
            flux_pixel = []
            pool_pixel = []
    
            for filename in convergent_files: 
                flux_chain = rwb.readbinarymat(output_dir + 'fluxfile_' + filename+'.bin', [cbf_pixel['nodays'], autil.get_nofluxes_nopools_lma(model_id)[0]])
                pool_chain = rwb.readbinarymat(output_dir + 'poolfile_' + filename+'.bin', [cbf_pixel['nodays']+1, autil.get_nofluxes_nopools_lma(model_id)[1]])
                
                if (flux_chain.shape[0]==ens_size) & (pool_chain.shape[0]==ens_size): 
                    flux_pixel.append(flux_chain)
                    pool_pixel.append(pool_chain)
            
            nbe_pred = autil.get_output('NBE', model_id, np.vstack(flux_pixel), np.vstack(pool_pixel), cbr_agg, autil.get_nofluxes_nopools_lma(model_id)[2])
            lai_pred = autil.get_output('LAI', model_id, np.vstack(flux_pixel), np.vstack(pool_pixel), cbr_agg, autil.get_nofluxes_nopools_lma(model_id)[2])
            nbe_obs, lai_obs = cbf_pixel['OBS']['NBE'], cbf_pixel['OBS']['LAI']
            
            nbe_rmse.append(rmse_real_numbers_only(nbe_pred, nbe_obs))
            lai_rmse.append(rmse_real_numbers_only(lai_pred, lai_obs))
            print(rmse_real_numbers_only(nbe_pred, nbe_obs), rmse_real_numbers_only(lai_pred, lai_obs))
            
    
    autil.plot_map(nrows=46, ncols=73, land_pixel_list=[file[-8:-4] for file in glob.glob(cbf_dir + '*.cbf')], pixel_value_list=pixels_plot, value_list=nbe_rmse, savepath=plot_dir+'maps/', savename='rmse_nbe_' + model_id + assim_type+ '_MCMC' + mcmc_id + '_' + n_iter)
    autil.plot_map(nrows=46, ncols=73, land_pixel_list=[file[-8:-4] for file in glob.glob(cbf_dir + '*.cbf')], pixel_value_list=pixels_plot, value_list=lai_rmse, savepath=plot_dir+'maps/', savename='rmse_lai_' + model_id + assim_type+ '_MCMC' + mcmc_id + '_' + n_iter)
    
    rmse_df = DataFrame(list(zip(pixels_plot, nbe_rmse, lai_rmse)))
    rmse_df.columns = ['pixel','nbe_rmse','lai_rmse']
    rmse_df.to_pickle(cur_dir + '../../misc/rmse_' + model_id + assim_type+ '_MCMC' + mcmc_id + '_' + n_iter + '.pkl')
    
    
    #################################################################################################################################################################
    # analyze regionally
    
    '''region_mask = Dataset(cur_dir + '../../misc/fourregion_maskarrays.nc')
    region_mask.set_auto_mask(False)
    regionmat, lat, lon = region_mask['4region'][:], region_mask['lat'][:], region_mask['lon'][:]
    lat[0] = -90
    lat[-1] = 90
    
    model_ids = ['811', '911']
    rmse_dfs = []
    for model_id in model_ids:
        rmse_df = read_pickle(cur_dir + '../../misc/rmse_' + model_id + assim_type+ '_MCMC' + mcmc_id + '_' + n_iter + '.pkl')
        rmse_df.columns = ['pixel','nbe_rmse','lai_rmse']
        
        regions = []
        for pixel in rmse_df[rmse_df.columns[0]].tolist():
            pixlat, pixlon = rwb.rowcol_to_latlon(pixel)
            regions.append(regionmat[np.argwhere(lat==pixlat)[0][0], np.argwhere(lon==pixlon)[0][0]])
        
        rmse_df.insert(loc=1, column='region', value=regions)
        rmse_dfs.append(rmse_df)
    
    print(rmse_dfs[0].groupby('region')['nbe_rmse'].mean(), rmse_dfs[0].groupby('region')['lai_rmse'].mean())
    print(rmse_dfs[1].groupby('region')['nbe_rmse'].mean(), rmse_dfs[1].groupby('region')['lai_rmse'].mean())'''
                        
    return
Exemple #6
0
def main():
    model_id = sys.argv[1]
    run_type = sys.argv[2] # ALL or SUBSET
    mcmc_id = sys.argv[3] # 119 for normal, 3 for DEMCMC
    n_iter = sys.argv[4]
    var_to_plot = sys.argv[5] # GR, a flux or pool, or PARXX
    ens_size = 500
    assim_type = '_longadapted'
    
    cur_dir = os.getcwd() + '/'
    if 'scripts' not in cur_dir:
        cur_dir = cur_dir + 'scripts/'
    
    cbf_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbf'+assim_type+'/' + model_id + '/'
    cbr_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbr'+assim_type+'/' + model_id + '/'
    output_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/output'+assim_type+'/' + model_id + '/'
    plot_dir = '../../../../../../scratch/users/cfamigli/cardamom/plots/'
    parnames = autil.get_parnames(cur_dir + '../../misc/', model_id)
    
    os.chdir(cbr_dir)
    files = glob.glob('*MCMC'+mcmc_id+'_'+n_iter+'_*.cbr')
    pixel = sys.argv[6]
    print(pixel)
    
    pixel_chains = autil.find_all_chains(files, pixel)
    pixel_chains.sort() # filenames
    print(pixel_chains)
    
    cbf_pixel = rwb.read_cbf_file(cur_dir + cbf_dir + pixel_chains[0].partition('_MCMC')[0]+'_'+pixel+'.cbf')
    
    cbr_chain_list = []
    for pixel_chain in pixel_chains:
        print(pixel_chain)
        cbr_chain = rwb.read_cbr_file(pixel_chain, {'nopars': len(parnames)})
        cbr_pixel = np.copy(cbr_chain) if pixel_chains.index(pixel_chain)==0 else np.concatenate((cbr_pixel, cbr_chain), axis=0)
        
        flux_chain = rwb.readbinarymat(cur_dir + output_dir + 'fluxfile_' + pixel_chain[:-3]+'bin', [cbf_pixel['nodays'], autil.get_nofluxes_nopools_lma(model_id)[0]])
        pool_chain = rwb.readbinarymat(cur_dir + output_dir + 'poolfile_' + pixel_chain[:-3]+'bin', [cbf_pixel['nodays']+1, autil.get_nofluxes_nopools_lma(model_id)[1]])

        flux_pixel = np.copy(flux_chain) if pixel_chains.index(pixel_chain)==0 else np.concatenate((flux_pixel, flux_chain), axis=0)
        pool_pixel = np.copy(pool_chain) if pixel_chains.index(pixel_chain)==0 else np.concatenate((pool_pixel, pool_chain), axis=0)
        
        if np.shape(cbr_chain)[0]==ens_size:
            cbr_chain_list.append(cbr_chain)
            print(np.shape(cbr_chain))
      
    ### COMPUTE GELMAN RUBIN  
    if len(cbr_chain_list)>1:
        gr = autil.gelman_rubin(cbr_chain_list)
        gr_pixel = sum(gr<1.2)/len(parnames)
    else:
        gr_pixel = -9999.
       
    ### DETERMINE DATA TO WRITE TO FILE
    if var_to_plot == 'GR':
        data = np.copy(gr_pixel)
    elif 'PAR' in var_to_plot:
        parnum = int(var_to_plot.partition('PAR')[-1])
        if gr_pixel>0.9:
            data = np.nanmedian(cbr_pixel[:,parnum-1])
        else:
            data = -9999.
    else:
        if gr_pixel>0.9:
            data = np.nanmean(np.nanmedian(autil.get_output(var_to_plot, model_id, flux_pixel, pool_pixel, cbr_pixel, autil.get_nofluxes_nopools_lma(model_id)[2]), axis=0))
        else:
            data = -9999.
        
    with open(cur_dir + '../../misc/' + model_id + '_' + pixel_chains[0].partition('_MCMC')[0] + '_MCMC' + mcmc_id + '_' + n_iter + '_' + var_to_plot + '.csv','a') as f:
        writer = csv.writer(f)
        new_row = [pixel, data]
        assert len(new_row)==2
        writer.writerow(new_row)
        
    return
def main():
    model_id_start = sys.argv[1]
    run_type = sys.argv[2]  # ALL or SUBSET
    metric = sys.argv[3]  # spread or RMSE
    assim_type = '_p25adapted'
    compare_between = sys.argv[4]  # MCMCID or MODEL or NBEUNC

    n_iters = [
        ['40000000'], ['40000000']
    ]  #['500000','1000000','2500000','5000000','10000000'],['40000000']]#[['100000', '250000', '500000', '1000000', '1750000', '2500000', '5000000'], ['100000', '250000', '500000', '1000000', '5000000', '10000000', '25000000','50000000']]
    vrs = [
        'NBE', 'cumNBE', 'LAI', 'GPP', 'Reco', 'Rauto', 'Rhet', 'lit', 'root',
        'som', 'wood'
    ]
    pixels = [
        '3809', '3524', '2224', '4170', '1945', '3813', '4054', '3264', '1271',
        '3457'
    ]

    cur_dir = os.getcwd() + '/'
    cbf_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbf' + assim_type + '/' + model_id_start + '/'
    cbr_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbr' + assim_type + '/' + model_id_start + '/'
    output_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/output' + assim_type + '/' + model_id_start + '/'
    plot_dir = '../../../../../../scratch/users/cfamigli/cardamom/plots/'
    parnames = autil.get_parnames('../../misc/', model_id_start)

    if compare_between == 'MCMCID':
        comps = ['3', '119']

    elif compare_between == 'MODEL':
        comps = [model_id_start, '911']
        mcmc_id = '119'

    elif compare_between == 'NBEUNC':
        comps = [assim_type, '_p25adapted_NBEuncreduced']
        mcmc_id = '119'

    ens_spread = [
        np.ones((len(pixels), len(vrs), len(n_iters[0]))) * float('nan'),
        np.ones((len(pixels), len(vrs), len(n_iters[1]))) * float('nan')
    ]
    conv = [
        np.ones((len(pixels), len(n_iters[0]))) * float('nan'),
        np.ones((len(pixels), len(n_iters[1]))) * float('nan')
    ]

    for pixel in pixels:

        for comp in comps:
            if compare_between == 'MCMCID':
                mcmc_id = comp
            elif compare_between == 'MODEL':
                model_id_start = comp
                cbf_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbf' + assim_type + '/' + comp + '/'
                cbr_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbr' + assim_type + '/' + comp + '/'
                output_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/output' + assim_type + '/' + comp + '/'
                plot_dir = '../../../../../../scratch/users/cfamigli/cardamom/plots/'
                parnames = autil.get_parnames(cur_dir + '../../misc/', comp)
            elif compare_between == 'NBEUNC':
                assim_type = comp
                cbf_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbf' + comp + '/' + model_id_start + '/'
                cbr_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbr' + comp + '/' + model_id_start + '/'
                output_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/output' + comp + '/' + model_id_start + '/'
                plot_dir = '../../../../../../scratch/users/cfamigli/cardamom/plots/'
                parnames = autil.get_parnames(cur_dir + '../../misc/',
                                              model_id_start)

            os.chdir(cur_dir + cbr_dir)
            for it in n_iters[comps.index(comp)]:
                files = glob.glob('*MCMC' + mcmc_id + '_' + it + '_' + pixel +
                                  '*.cbr')
                pixel_chains = autil.find_all_chains(files, pixel)
                pixel_chains.sort()  # filenames
                #if ((comp=='911') & (pixel_chains[0][-5]=='1')): pixel_chains.pop(0)
                #if ((comp=='911') & (pixel_chains[0][-5]=='2')): pixel_chains.pop(0)
                print(pixel_chains)

                cbf_pixel = rwb.read_cbf_file(
                    cur_dir + cbf_dir + pixel_chains[0].partition('_MCMC')[0] +
                    '_' + pixel + '.cbf')

                cbr_chain_list = []
                for pixel_chain in pixel_chains[:4]:
                    print(pixel_chain)
                    cbr_chain = rwb.read_cbr_file(pixel_chain,
                                                  {'nopars': len(parnames)})
                    cbr_pixel = np.copy(cbr_chain) if pixel_chains.index(
                        pixel_chain) == 0 else np.concatenate(
                            (cbr_pixel, cbr_chain), axis=0)

                    flux_chain = rwb.readbinarymat(
                        cur_dir + output_dir + 'fluxfile_' + pixel_chain[:-3] +
                        'bin', [
                            cbf_pixel['nodays'],
                            autil.get_nofluxes_nopools_lma(model_id_start)[0]
                        ])
                    pool_chain = rwb.readbinarymat(
                        cur_dir + output_dir + 'poolfile_' + pixel_chain[:-3] +
                        'bin', [
                            cbf_pixel['nodays'] + 1,
                            autil.get_nofluxes_nopools_lma(model_id_start)[1]
                        ])

                    flux_pixel = np.copy(flux_chain) if pixel_chains.index(
                        pixel_chain) == 0 else np.concatenate(
                            (flux_pixel, flux_chain), axis=0)
                    pool_pixel = np.copy(pool_chain) if pixel_chains.index(
                        pixel_chain) == 0 else np.concatenate(
                            (pool_pixel, pool_chain), axis=0)

                    cbr_chain_list.append(cbr_chain)
                    print(np.shape(cbr_chain))
                    print(np.shape(cbr_pixel))

                gr = autil.gelman_rubin(cbr_chain_list)
                print('%i of %i parameters converged' %
                      (sum(gr < 1.2), len(parnames)))
                conv[comps.index(
                    comp)][pixels.index(pixel),
                           n_iters[comps.index(comp)].index(it)] = sum(
                               gr < 1.2) / len(parnames) * 100

                for var in vrs:
                    print(var)

                    try:
                        obs = cbf_pixel['OBS'][var]
                        obs[obs == -9999] = float('nan')
                    except:
                        obs = np.ones(cbf_pixel['nodays']) * np.nan
                    n_obs = np.sum(np.isfinite(obs))

                    fwd_data = autil.get_output(
                        var, model_id_start, flux_pixel, pool_pixel, cbr_pixel,
                        autil.get_nofluxes_nopools_lma(model_id_start)[2])

                    if len(fwd_data) > 0:
                        if fwd_data.shape[1] > cbf_pixel['nodays']:
                            fwd_data = fwd_data[:, :-1]

                        fwd_data = autil.remove_outliers(fwd_data)
                        med = np.nanmedian(fwd_data, axis=0)
                        ub = np.nanpercentile(fwd_data, 75, axis=0)
                        lb = np.nanpercentile(fwd_data, 25, axis=0)

                        ens_spread[comps.index(comp)][
                            pixels.index(pixel),
                            vrs.index(var),
                            n_iters[comps.index(comp)].index(it)] = np.nanmean(
                                abs(ub -
                                    lb)) if metric == 'spread' else np.sqrt(
                                        np.nansum((med - obs)**2) / n_obs)
                        print(ens_spread[comps.index(comp)]
                              [pixels.index(pixel),
                               vrs.index(var),
                               n_iters[comps.index(comp)].index(it)])

    print(ens_spread)
    for var in vrs:
        autil.plot_spread_v_iter(
            ens_spread,
            pixels,
            vrs.index(var),
            var,
            n_iters,
            metric,
            cur_dir + plot_dir + 'spread_v_iter',
            'iter_test' + assim_type + '_' + compare_between + '_' +
            model_id_start + '_' + var + '_' + metric,
            single_val=True
        )  #'iter_test_MCMC'+mcmc_id+'_'+model_id_start+'_'+var + '_' + metric)

    autil.plot_conv_v_iter(conv,
                           pixels,
                           n_iters,
                           cur_dir + plot_dir + 'spread_v_iter',
                           'iter_test' + assim_type + '_' + compare_between +
                           '_' + model_id_start + '_conv',
                           single_val=True)

    return
Exemple #8
0
def main():
    model_id = sys.argv[1]
    run_type = sys.argv[2] # ALL or SUBSET
    mcmc_id = sys.argv[3] # 119 for normal, 3 for DEMCMC
    n_iter = sys.argv[4]
    ens_size = 500
    assim_type = '_p25adapted'
    use_bestchains_pkl = False
    
    cur_dir = os.getcwd() + '/'
    cbf_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbf'+assim_type+'_ef_ic/' + model_id + '/'
    cbr_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/cbr'+assim_type+'_ef/' + model_id + '/'
    output_dir = '../../../../../../scratch/users/cfamigli/cardamom/files/output'+assim_type+'_ef/' + model_id + '/'
    plot_dir = '../../../../../../scratch/users/cfamigli/cardamom/plots/'
    parnames = autil.get_parnames('../../misc/', model_id)
    
    
    # load list of land pixels
    pixels = list(set([file[-8:-4] for file in glob.glob(cbf_dir + '*.cbf')])) if run_type=='ALL' else ['3809','3524','2224','4170','1945','3813','4054','3264','1271','3457']
    pixels.sort()
    
    # load list of cbrs
    cbr_files = glob.glob(cbr_dir+'*MCMC'+mcmc_id+'_'+n_iter+'_*PLS*forward*.cbr')

    # for loop over pixels    
    gr_pixels = np.zeros(len(pixels))*np.nan # list of GR for each pixel, for mapping
    par_pixels = np.zeros((len(pixels), len(parnames)))*np.nan
    for pixel in pixels:
        print(pixel, pixels.index(pixel))
        
        pixel_chains = autil.find_all_chains(cbr_files, pixel)
        pixel_chains.sort() # filenames
        
        if use_bestchains_pkl:
            conv_chains_pkl = read_pickle(glob.glob(cbr_dir + model_id + assim_type + '*_MCMC'+mcmc_id + '_'+n_iter+'_best_subset.pkl')[0])
            conv_chains_pkl.columns = ['pixel','bestchains','conv'] #rename columns for easier access
            
            if pixel in conv_chains_pkl['pixel'].values:
                bestchains = conv_chains_pkl.loc[conv_chains_pkl['pixel']==pixel]['bestchains'].values[0][1:]
                print(bestchains)
                pixel_chains = [pixel_chain for pixel_chain in pixel_chains if pixel_chain.partition(pixel+'_')[-1][:-4] in bestchains]
            
            else:
                continue

        #cbf_pixel = rwb.read_cbf_file(cur_dir + cbf_dir + pixel_chains[0].partition('_MCMC')[0]+'_'+pixel+'.cbf')
        cbf_filename = glob.glob(cur_dir + cbf_dir + '*'+pixel+'.cbf')[0]
        cbf_pixel = rwb.read_cbf_file(cbf_filename)
        
        cbr_chain_list = []
        for pixel_chain in pixel_chains:
            print(pixel_chain)
            cbr_chain = rwb.read_cbr_file(pixel_chain, {'nopars': len(parnames)})
            cbr_chain = autil.modulus_Bday_Fday(cbr_chain, parnames)
            cbr_pixel = np.copy(cbr_chain) if pixel_chains.index(pixel_chain)==0 else np.concatenate((cbr_pixel, cbr_chain), axis=0)
            #autil.plot_par_histograms(cbr_chain, parnames=parnames, savepath=cur_dir+plot_dir+'dists/', title=model_id+'_'+pixel_chain[:-3]+'png')
            
            try:
                flux_chain = rwb.readbinarymat(cur_dir + output_dir + 'fluxfile_' + pixel_chain.partition(cbr_dir)[-1][:-3]+'bin', [cbf_pixel['nodays'], autil.get_nofluxes_nopools_lma(model_id)[0]])
                pool_chain = rwb.readbinarymat(cur_dir + output_dir + 'poolfile_' + pixel_chain.partition(cbr_dir)[-1][:-3]+'bin', [cbf_pixel['nodays']+1, autil.get_nofluxes_nopools_lma(model_id)[1]])
                #autil.plot_flux_pool_timeseries(cbf_pixel, cbr_chain, flux_chain, pool_chain, autil.get_nofluxes_nopools_lma(model_id)[2], savepath=cur_dir+plot_dir+'timeseries/', title=model_id+'_'+pixel_chain[:-3]+'png')
    
                flux_pixel = np.copy(flux_chain) if pixel_chains.index(pixel_chain)==0 else np.concatenate((flux_pixel, flux_chain), axis=0)
                pool_pixel = np.copy(pool_chain) if pixel_chains.index(pixel_chain)==0 else np.concatenate((pool_pixel, pool_chain), axis=0)
                
            except Exception as e:
                pass
                
            if np.shape(cbr_chain)[0]==ens_size:
                cbr_chain_list.append(cbr_chain)
                #print(np.shape(cbr_chain))
            
        if len(cbr_chain_list)>1:
            gr = autil.gelman_rubin(cbr_chain_list)
            #print(gr)
            print('%i of %i parameters converged' % (sum(gr<1.2), len(parnames)))
            gr_pixels[pixels.index(pixel)] = sum(gr<1.2)/len(parnames)
        else:
            gr = np.nan

        par_pixels[pixels.index(pixel),:] = np.nanmedian(cbr_pixel, axis=0)
        #autil.plot_par_histograms(cbr_pixel, parnames=parnames, savepath=cur_dir+plot_dir+'dists/', title=model_id+assim_type+'_MCMC'+mcmc_id+'_'+cbf_filename.partition(cbf_dir)[-1][:-4]+'.png')    
        #autil.plot_flux_pool_timeseries(cbf_pixel, cbr_pixel, flux_pixel, pool_pixel, autil.get_nofluxes_nopools_lma(model_id)[2], savepath=cur_dir+plot_dir+'timeseries/', title=model_id+assim_type+'_MCMC'+mcmc_id+'_'+cbf_filename.partition(cbf_dir)[-1][:-4]+'.png')
        
    #vmax = [None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,210,200,215,6600,195,24000,None,None,None,900,None,None,None,None,None,None,None] #np.nanpercentile(par_pixels[:,par], 90)
    for par in range(len(parnames)): autil.plot_map(nrows=46, ncols=73, land_pixel_list=[file[-8:-4] for file in glob.glob(cur_dir + cbf_dir + '*.cbf')], pixel_value_list=pixels, value_list=par_pixels[:,par], vmax=np.nanpercentile(par_pixels[:,par], 90), savepath=cur_dir+plot_dir+'maps/', savename='par'+str(par)+'_' + model_id +assim_type+ '_MCMC' + mcmc_id +'_'+ n_iter+'_EF_clipped_PLS_soilgrids_poolobs_rescaled_forward')
    #autil.plot_map(nrows=46, ncols=73, land_pixel_list=[file[-8:-4] for file in glob.glob(cur_dir + cbf_dir + '*.cbf')], pixel_value_list=pixels, value_list=np.ones(len(pixels)), savepath=cur_dir+plot_dir+'maps/', title='test_pixels.png')
    #autil.plot_map(nrows=46, ncols=73, land_pixel_list=[file[-8:-4] for file in glob.glob(cur_dir + cbf_dir + '*.cbf')], pixel_value_list=pixels, value_list=gr_pixels*100, savepath=cur_dir+plot_dir+'maps/', savename='gr_' + model_id + assim_type+ '_' +run_type+ '_MCMC' + mcmc_id + '_' + n_iter)
        
    return