def plot_max_var(var_name, zrange, path):
    print('-- plot max var: ' + var_name + ' --')
    global case_name
    global nx, ny, nz, dz, dt_stats
    cm1 = plt.cm.get_cmap('bone')
    cm2 = plt.cm.get_cmap('winter')
    count_color = 0.0

    path_references = os.path.join(path, 'Stats.' + case_name + '.nc')
    var_max = read_in_netcdf(var_name + '_max', 'profiles', path_references)
    time = read_in_netcdf('t', 'timeseries', path_references)

    plt.figure(figsize=(14, 7))

    for it in range(var_max.shape[0]):
        tt = np.int((np.double(it) - time[0]) / np.double(dt_stats))
        plt.plot(var_max[tt, :], zrange, 'k--', label='t=' + str(tt * dt_stats) + 's')

    plt.legend()
    plt.xlabel('max' + var_name)
    plt.ylabel('height z [m]')
    plt.title('max' + var_name + ' (' + case_name + ', nx*ny=' + str(nx * ny) + ')')
    plt.title('max[' + var_name + '] (' + case_name + ', nx*ny=' + str(nx * ny) + ')')
    plt.savefig(
        os.path.join(path, 'figs_stats', var_name + '_max_fromprofile.png'))
    # plt.show()
    plt.close()
    return
def plot_mean_profile(var_name, time_range, zrange, max_height, path_ref, path, BL=False, location=1):
    print('-- plot mean from profile: '+ var_name + ' --')
    global dt_stats
    # path_references = os.path.join(path, 'Stats.' + case_name + '.nc')
    time_ = read_in_netcdf('t', 'timeseries', path_ref)
    if var_name == 'cloud_fraction' or var_name == 'fraction_core' or var_name == 'fraction_cloud':
        var = read_in_netcdf(var_name, 'profiles', path_ref)
    else:
        var_name = var_name + '_mean'
        var = read_in_netcdf(var_name, 'profiles', path_ref)


    print('')
    print('var shape: ', var.shape, 'time: ', time_.shape, time_range, ' zrange:', zrange.shape, 'dt_stats: ', dt_stats)
    print('')
    plt.figure(figsize=(9,6))
    cm1 = plt.cm.get_cmap('bone')
    # for t in range(time_.shape[0]):
    #     if time_[t]>=3600.0 and np.mod(time[t], 100*dt_stats) == 0.0:
    #         print('timetimetime', time_[t], 20*dt_stats)
    #         plt.plot(var[t,:], zrange, label='t='+str(time[t]))
    count_color = 0
    t_ini = 0
    count_t = 0
    for t in time_range:
        for t_ in range(t_ini, time_.shape[0]):
            if np.abs(time_[t_] - time_range[count_t]) < dt_stats:
                lab = set_tlabel(time_[t_])
                if BL:
                    plt.plot(var[t_, 0:max_height], zrange[0:max_height], color=cm1(np.double(count_color)/len(time_range)), label=lab)
                else:
                    plt.plot(var[t_, :], zrange, color=cm1(np.double(count_color)/len(time_range)), label=lab)
                t_ini = t_+1
                count_color += 1
                continue
        count_t += 1

    plt.legend(loc=location)
    plt.xlabel('mean ' + var_name)
    plt.ylabel('height z [m]')
    plt.title('mean ' + var_name + ' (' + case_name + ', nx*ny=' + str(nx * ny) + ')')
    if BL:
        plt.savefig(
            os.path.join(path, 'figs_stats', var_name + '_fromprofile_BL.pdf'))
    else:
        plt.savefig(
            os.path.join(path, 'figs_stats', var_name + '_fromprofile.pdf'))
    # plt.show()
    # plt.close()

    return
Exemplo n.º 3
0
def plot_profiles(fullpath_in_ref, z_ref):
    global path
    ''' profiles '''
    qt_prof = read_in_netcdf('qt_mean', 'profiles', fullpath_in_ref)
    qt_max = read_in_netcdf('qt_max', 'profiles', fullpath_in_ref)
    qt_min = read_in_netcdf('qt_min', 'profiles', fullpath_in_ref)
    ql_prof = read_in_netcdf('ql_mean', 'profiles', fullpath_in_ref)
    ql_max = read_in_netcdf('ql_max', 'profiles', fullpath_in_ref)
    ql_min = read_in_netcdf('ql_min', 'profiles', fullpath_in_ref)
    s_prof = read_in_netcdf('s_mean', 'profiles', fullpath_in_ref)
    s_max = read_in_netcdf('s_max', 'profiles', fullpath_in_ref)
    s_min = read_in_netcdf('s_min', 'profiles', fullpath_in_ref)
    time_prof = read_in_netcdf('t', 'timeseries', fullpath_in_ref)
    dt_prof = time_prof[2] - time_prof[1]
    # n_prof = np.int(time[1] / dt_prof)
    # n_prof = np.int(time_prof[1] / dt_prof)
    print('time prof: ', n_prof, time_prof[n_prof], time[1])

    fig = plt.figure(figsize=(15, 5))
    plt.subplot(1, 3, 1)
    plt.plot(1e2 * ql_prof[n_prof, :], z_ref, label='<ql>*100')
    plt.plot(ql_max[n_prof, :], z_ref, 'k--', linewidth=1)
    plt.plot(ql_min[n_prof, :], z_ref, 'k--', linewidth=1)
    plt.plot([0, 0], [0, z_ref[-1]])
    plt.legend()
    plt.xlabel('ql')
    plt.subplot(1, 3, 2)
    plt.plot(qt_prof[n_prof, :], z_ref, label='<qt>')
    plt.plot(qt_max[n_prof, :],
             z_ref,
             'k--',
             linewidth=1,
             label='max: ' + str(np.amax(qt_max[n_prof])))
    plt.plot(qt_min[n_prof, :],
             z_ref,
             'k--',
             linewidth=1,
             label='min: ' + str(np.amin(qt_min[n_prof])))
    plt.plot([0, 0], [0, z_ref[-1]])
    plt.legend()
    plt.xlabel('qt')
    plt.subplot(1, 3, 3)
    plt.plot(s_prof[n_prof, :], z_ref)
    plt.plot(s_max[n_prof, :], z_ref, 'k--', linewidth=1)
    plt.plot(s_min[n_prof, :], z_ref, 'k--', linewidth=1)
    plt.plot([0, 0], [0, z_ref[-1]])
    plt.xlabel('s')
    plt.xlim([np.amin(s_prof[n_prof, :]), np.amax(s_max[n_prof, :])])
    plt.savefig(os.path.join(path, 'mean_profiles.png'))
    return
def plot_mean(var_name, files_, zrange, levels, path, prof = False):
    print('plotting mean')
    global case_name
    global nx, ny, nz, dz, dt_stats
    path_references = os.path.join(path, 'Stats.' + case_name + '.nc')
    var_mean = read_in_netcdf(var_name+'_mean', 'profiles', path_references)
    time = read_in_netcdf('t', 'timeseries', path_references)
    # print(time)
    # cm1 = plt.cm.get_cmap('viridis')
    cm1 = plt.cm.get_cmap('bone')
    cm2 = plt.cm.get_cmap('winter')
    count_color = 0.0

    plt.figure(figsize=(9, 6))
    # mini = np.min([np.amin(var_mean[:, :]), np.amin(var_mean_field)])
    # maxi = np.max([np.amax(var_mean[:, :]), np.amax(var_mean_field)])
    mini = np.amin(var_mean[:, :])
    maxi = np.amax(var_mean[:, :])
    for l in levels:
        plt.plot([mini, maxi], [l, l], color='0.75', linewidth=0.8, label=str(l) + 'm')

    for t in files_:
        if str(t)[-1] == 'c':
            path_fields = os.path.join(path, 'fields', str(t))
            it = np.int(t[0:-3])
        else:
            path_fields = os.path.join(path, 'fields', str(t) + '.nc')
            it = np.double(t)
        var_field = read_in_netcdf(var_name, 'fields', path_fields)
        var_mean_field = np.mean(np.mean(var_field, axis=0), axis=0)

        t_label = set_tlabel(it)
        plt.plot(var_mean_field[:], zrange, color=cm1(count_color/len(files_)), label=t_label)
        if prof:
            tt = np.int((np.double(it) - time[0]) / np.double(dt_stats))
            plt.plot(var_mean[tt, :], zrange, '--', color=cm2(count_color/len(files_)), label='t=' + str(tt * dt_stats) + 's (from Stats)')
        count_color += 1.0

    plt.legend()
    plt.xlabel('mean '+var_name)
    plt.ylabel('height z [m]')
    plt.title('mean '+var_name + ' (' + case_name + ', nx*ny=' + str(nx * ny) + ')')
    plt.savefig(
        os.path.join(path, 'figs_stats', var_name + '_mean_fromfield.png'))
    # plt.show()
    plt.close()
    return
Exemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser(prog='PyCLES')
    parser.add_argument("path")
    parser.add_argument("casename")
    parser.add_argument("file_name")
    parser.add_argument("Lx")
    parser.add_argument("dz")
    # parser.add_argument("time")
    parser.add_argument("--xa_ql")
    parser.add_argument("--xb_ql")

    args = parser.parse_args()
    path = args.path
    case_name = args.casename
    file_name = args.file_name
    Lx = args.Lx
    delta_z = args.dz
    # time_field = args.time

    fullpath = os.path.join(path, file_name)

    path_ref = os.path.join(path, 'Stats.' + case_name + '.nc')

    nml = simplejson.loads(open(os.path.join(path, case_name + '.in')).read())
    nz = nml['grid']['nz']
    dz = nml['grid']['dz']
    dt_stats = nml['stats_io']['frequency']
    time_ref = read_in_netcdf('t', 'timeseries', path_ref)
    # t_ref = np.int(0)
    # while ((time_ref[t_ref] - np.int(time_field) ) < dt_stats and t_ref < (time_ref.shape[0]-1) ):
    #     t_ref = np.int(t_ref + 1)
    # print('t_ref: ', t_ref, time_ref.shape[0])
    t_ref = 0
    time_field = '5to6'

    xa = -1.2e-5
    xb = 1e-6
    if args.xa_ql:
        xa = np.double(args.xa_ql)
    if args.xb_ql:
        xb = np.double(args.xb_ql)
    xlimits_ql = [xa, xb]
    xlimits_cf = [-5e-2, 2e-2]
    # plot_error_ql(case_name, path, file_name_err, xlimits, dz, Lx, delta_z, time_field, t_ref)
    for ncomp_max in [3, 5, 8]:
        plot_error_ql_ncompmax(case_name, path, file_name, ncomp_max,
                               xlimits_ql, dz, Lx, delta_z, time_field, t_ref)
        plot_error_cf_ncompmax(case_name, path, file_name, ncomp_max,
                               xlimits_cf, dz, Lx, delta_z, time_field, t_ref)

    return
def plot_mean_cumulated(var_name, files_cum, zrange, levels, path):
    print('-- plot mean cumulated: ' + var_name + ' --')
    print(levels)
    global case_name
    global nz, max_height
    mean_all = np.zeros(nz)
    # path_references = os.path.join(path, 'Stats.' + case_name + '.nc')
    # time = read_in_netcdf('t', 'timeseries', path_references)

    print('')
    print('files_cum', files_cum, len(files_cum))

    fig1 = plt.figure(figsize=(9, 6))
    cm1 = plt.cm.get_cmap('bone')
    cm2 = plt.cm.get_cmap('winter')
    count_color = 0.0


    for t in files_cum:
        if str(t)[-2:] == 'nc':
            path_fields = os.path.join(path, 'fields', str(t))
            it = np.int(t[0:-3])
        else:
            path_fields = os.path.join(path, 'fields', str(t) + '.nc')
            it = np.double(t)
        var_field = read_in_netcdf(var_name, 'fields', path_fields)

        var_mean_field = np.mean(np.mean(var_field, axis=0), axis=0)
        mean_all += var_mean_field

        lab = set_tlabel(it)
        plt.plot(var_mean_field[:], zrange, '--', color=cm1(count_color / len(files_cum)),
                     label='t=' + lab + ' (from field)')
        count_color += 1.0
    mini = np.amin(var_mean_field[:])
    maxi = np.amax(var_mean_field[:])
    for l in levels:
        plt.plot([mini, maxi], [l, l], color='0.75', linewidth=0.8, label=str(l) + 'm')

    mean_all /= len(files_cum)
    plt.plot(mean_all[:], zrange, 'k', label='time mean')
    plt.legend()
    plt.xlabel('mean ' + var_name)
    plt.ylabel('height z [m]')
    plt.title('mean ' + var_name + ' (' + case_name + ', n*nx*ny=' + str(nx * ny * len(files_cum)) + ')')
    plt.savefig(
        os.path.join(path, 'figs_stats', var_name + '_mean_fromfield_cum.pdf'))
    # plt.show()
    plt.close()

    return
def plot_mean_var(var_name, files_, zrange, path):
    print('-- plot mean var: ' + var_name + ' --')
    global case_name
    global nx, ny, nz, dz, dt_stats
    day = 24 * 3600
    cm1 = plt.cm.get_cmap('bone')
    cm2 = plt.cm.get_cmap('winter')
    count_color = 0.0

    plt.figure(figsize=(14, 7))
    for t in files_:
        if str(t)[-1] == 'c':
            path_fields = os.path.join(path, 'fields', str(t))
            it = np.int(t[0:-3])
        else:
            path_fields = os.path.join(path, 'fields', str(t) + '.nc')
            it = t

        # path_fields = os.path.join(path, 'fields', str(t) + '.nc')
        var = read_in_netcdf(var_name, 'fields', path_fields)
        var_mean_fields = np.mean(np.mean(var, axis=0), axis=0)
        var2_mean = np.mean(np.mean(var*var, axis=0), axis=0)
        var_variance = var2_mean - var_mean_fields
        lab = set_tlabel(it)
        plt.subplot(1, 2, 1)
        plt.plot(var_mean_fields, zrange, color = cm2(count_color/len(files_)), label='t=' + lab)
        plt.subplot(1, 2, 2)
        plt.plot(var_variance, zrange, color = cm2(count_color/len(files_)), label='t=' + lab)
        count_color += 1.0

    plt.subplot(1, 2, 1)
    plt.legend()
    plt.xlabel('mean ' + var_name)
    plt.ylabel('height z [m]')
    plt.title('mean '+var_name+' (' + case_name + ', nx*ny=' + str(nx * ny) + ')')
    plt.subplot(1, 2, 2)
    plt.legend()
    plt.xlabel('Var['+var_name+']')
    plt.ylabel('height z [m]')
    plt.title('Var['+var_name+'] (' + case_name + ', nx*ny=' + str(nx * ny) + ')')
    plt.savefig(
        os.path.join(path, 'figs_stats', var_name+'_mean_var_fromfield.png'))
    # plt.show()
    plt.close()
    return
Exemplo n.º 8
0
def main():
    parser = argparse.ArgumentParser(prog='PyCLES')
    parser.add_argument("path")
    parser.add_argument("casename")
    parser.add_argument("Lx")
    parser.add_argument("dz")
    parser.add_argument("time")
    args = parser.parse_args()
    path = args.path
    case_name = args.casename
    Lx = args.Lx
    delta_z = args.dz
    time_field = args.time

    fullpath = os.path.join(path, 'CloudClosure_res')
    file_name_err = 'CC_res_error_Lx'+ str(Lx)+'.0Ly'+str(Lx)+'.0_dz'+str(delta_z)+'_time'+str(time_field)+'.nc'
    path_ref = os.path.join(path, 'Stats.' + case_name + '.nc')

    nml = simplejson.loads(open(os.path.join(path, case_name + '.in')).read())
    nz = nml['grid']['nz']
    dz = nml['grid']['dz']
    dt_stats = nml['stats_io']['frequency']
    time_ref = read_in_netcdf('t', 'timeseries', path_ref)
    t_ref = np.int(0)
    while ((time_ref[t_ref] - np.int(time_field) ) < dt_stats and t_ref < (time_ref.shape[0]-1) ):
        t_ref = np.int(t_ref + 1)
    print('t_ref: ', t_ref, time_ref.shape[0])

    xlimits_ql = [-9e-6,1e-6]
    xlimits_cf = [-5e-2, 1e-2]
    # plot_error_ql(case_name, path, file_name_err, xlimits, dz, Lx, delta_z, time_field, t_ref)
    for ncomp_max in [3,5]:
        plot_error_ql_ncompmax(case_name, path, file_name_err, ncomp_max, xlimits_ql, dz, Lx, delta_z, time_field, t_ref)
        plot_error_cf_ncompmax(fullpath, file_name_err, ncomp_max, xlimits_cf, dz, Lx, delta_z, time_field, t_ref)


    return
Exemplo n.º 9
0
def main():
    parser = argparse.ArgumentParser(prog='PyCLES')
    parser.add_argument("path")
    parser.add_argument("casename")
    args = parser.parse_args()
    # ______________________
    case_name = args.casename
    read_in_nml(args.path, case_name)
    print('nx,ny,nz; ntot:', nx, ny, nz, ntot)
    global fullpath_out
    fullpath_out = args.path
    print('fullpath_out:', fullpath_out)
    # ______________________
    files = os.listdir(os.path.join(args.path,'fields'))
    N = len(files)
    print('Found the following directories', files, N)
    # ______________________
    global time_
    time = np.zeros((1))
    for d in files:
        time = np.sort(np.append(time, np.int(d[0:-3])))
    # ______________________
    '''
    zrange:     z-values for which the PDF is fitted
    var_list:   list of variables that are included in (multi-variate) PDF
    '''
    global z_max, zrange, ncomp, nvar, var_list
    if case_name == 'DCBLSoares':
        var_list = ['w', 'u', 's']
    else:
        var_list = ['w', 'temperature', 'qt']
    var_corr_list = ['wsqt', 'wthetaliqt']

    d = files[0]
    var = var_corr_list[0]
    nc_file_name = 'EM2_trivar_' + str(d)
    fullpath_in = os.path.join(in_path, 'EM2_trivar', nc_file_name)
    means = read_in_netcdf(var, 'means', fullpath_in)
    time_ = read_in_netcdf('t', 'time', fullpath_in)
    print('time', time, time_)
    zrange = read_in_netcdf('height', 'z-profile', fullpath_in)
    print('zrange from data: ', zrange)
    z_max = means.shape[0]
    ncomp = means.shape[1]
    nvar = 3

    means_time = np.ndarray(shape=(len(files), z_max, ncomp, nvar))
    covariance_time = np.zeros(shape=(len(files), z_max, ncomp, nvar, nvar))
    weights_time = np.zeros(shape=(len(files), z_max, ncomp))
    mean_tot_time = np.ndarray(shape=(len(files), z_max, nvar))
    covar_tot_time = np.zeros(shape=(len(files), z_max, nvar, nvar))

    '''(1) read in nc-files - trivar EM2 PDF'''
    for var in var_corr_list:
        count_t = 0
        print('')
        print('read in ' + var)
        for d in files:
            nc_file_name = 'EM2_trivar_' + str(d)
            fullpath_in = os.path.join(in_path, 'EM2_trivar', nc_file_name)
            print('fullpath_in', fullpath_in)
            if var == 'wthetaliqt':
                var_list[1] = 'thetali'
                try:
                    means = read_in_netcdf(var, 'means', fullpath_in)
                    covars = read_in_netcdf(var, 'covariances', fullpath_in)
                    weights = read_in_netcdf(var, 'weights', fullpath_in)
                except:
                    print('wthetaliqt not in variables')
            else:
                means = read_in_netcdf(var, 'means', fullpath_in)
                covars = read_in_netcdf(var, 'covariances', fullpath_in)
                weights = read_in_netcdf(var, 'weights', fullpath_in)

            means_time[count_t,:,:,:] = means[:,:,:]
            covariance_time[count_t, :, :, :] = covars[:, :, :]
            weights_time[count_t,:,:] = weights[:,:]
            mean_tot_time[count_t,:,:], covar_tot_time[count_t,:,:,:] = covariance_estimate_from_multicomp_pdf(means, covars, weights)
            count_t += 1

        '''(2) sort PDF components according to weights'''
        print('')
        print('Sorting A')
        try:
            os.mkdir(os.path.join(fullpath_out, 'EM2_trivar_figures', 'sortA'))
        except:
            pass
        for n in range(time_.size):
            for k in range(z_max):
                if weights_time[n, k, 0] < weights_time[n, k, 1]:
                    aux = weights_time[n, k, 1]
                    weights_time[n, k, 1] = weights_time[n, k, 0]
                    weights_time[n, k, 0] = aux
                    for i1 in range(nvar):  # loop over variables
                        aux = means_time[n, k, 1, i1]
                        means_time[n, k, 1, i1] = means_time[n, k, 0, i1]
                        means_time[n, k, 0, i1] = aux
                        for i2 in range(nvar):
                            aux = covariance_time[n, k, 1, i1, i2]
                            covariance_time[n, k, 1, i1, i2] = covariance_time[n, k, 0, i1, i2]
                            covariance_time[n, k, 0, i1, i2] = aux
        '''(2a) plot'''
        print('Plotting')
        trivar_plot_means(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_, 'sortA')
        trivar_plot_covars(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_, 'sortA')
        trivar_plot_weights(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_,'sortA')
        print('')

        '''(3) sort PDF components according to their mean'''
        print('Sorting B')
        try:
            os.mkdir(os.path.join(fullpath_out, 'EM2_trivar_figures', 'sortB'))
        except:
            pass
        for n in range(time_.size):
            for k in range(z_max):
                if means_time[n, k, 0, 0] < means_time[n, k, 1, 0]:
                    aux = weights_time[n, k, 1]
                    weights_time[n, k, 1] = weights_time[n, k, 0]
                    weights_time[n, k, 0] = aux
                    for i1 in range(nvar):  # loop over variables
                        aux = means_time[n, k, 1, i1]
                        means_time[n, k, 1, i1] = means_time[n, k, 0, i1]
                        means_time[n, k, 0, i1] = aux
                        for i2 in range(nvar):
                            aux = covariance_time[n, k, 1, i1, i2]
                            covariance_time[n, k, 1, i1, i2] = covariance_time[n, k, 0, i1, i2]
                            covariance_time[n, k, 0, i1, i2] = aux

        '''(3a) plot'''
        print('Plotting')
        trivar_plot_means(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_, 'sortB')
        trivar_plot_covars(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_, 'sortB')
        trivar_plot_weights(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_,'sortB')
        print('')

        '''(4) sort PDF components according to Var[w]'''
        print('Sorting C')
        try:
            os.mkdir(os.path.join(fullpath_out, 'EM2_trivar_figures', 'sortC'))
        except:
            pass
        for n in range(time_.size):
            for k in range(z_max):
                if covariance_time[n, k, 0, 0, 0] < covariance_time[n, k, 1, 0, 0]:
                    aux = weights_time[n, k, 1]
                    weights_time[n, k, 1] = weights_time[n, k, 0]
                    weights_time[n, k, 0] = aux
                    for i1 in range(nvar):  # loop over variables
                        aux = means_time[n, k, 1, i1]
                        means_time[n, k, 1, i1] = means_time[n, k, 0, i1]
                        means_time[n, k, 0, i1] = aux
                        for i2 in range(nvar):
                            aux = covariance_time[n, k, 1, i1, i2]
                            covariance_time[n, k, 1, i1, i2] = covariance_time[n, k, 0, i1, i2]
                            covariance_time[n, k, 0, i1, i2] = aux
        '''(4a) plot'''
        print('Plotting')
        trivar_plot_means(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_, 'sortC')
        trivar_plot_covars(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_, 'sortC')
        trivar_plot_weights(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_,
                           'sortC')
        print('')

        '''(5) sort PDF components according to E[qt] or Var[qt]'''
        print('Sorting D')
        try:
            os.mkdir(os.path.join(fullpath_out, 'EM2_trivar_figures', 'sortD'))
        except:
            pass
        for n in range(time_.size):
            for k in range(z_max):
                if means_time[n, k, 0, 2] < means_time[n, k, 1, 2]:
                    aux = weights_time[n, k, 1]
                    weights_time[n, k, 1] = weights_time[n, k, 0]
                    weights_time[n, k, 0] = aux
                    for i1 in range(nvar):  # loop over variables
                        aux = means_time[n, k, 1, i1]
                        means_time[n, k, 1, i1] = means_time[n, k, 0, i1]
                        means_time[n, k, 0, i1] = aux
                        for i2 in range(nvar):
                            aux = covariance_time[n, k, 1, i1, i2]
                            covariance_time[n, k, 1, i1, i2] = covariance_time[n, k, 0, i1, i2]
                            covariance_time[n, k, 0, i1, i2] = aux

        '''(5a) plot'''
        print('Plotting')
        trivar_plot_means(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_, 'sortB')
        trivar_plot_covars(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_, 'sortB')
        trivar_plot_weights(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_,
                            'sortD')
        print('')

    return
Exemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser(prog='PyCLES')
    parser.add_argument("path")
    parser.add_argument("casename")
    args = parser.parse_args()
    print('_______________________')
    case_name = args.casename
    read_in_nml(args.path, case_name)
    print('nx,ny,nz; ntot:', nx, ny, nz, ntot)
    global fullpath_out
    path = args.path
    print('path:', path)
    print('_______________________')
    files = os.listdir(os.path.join(args.path, 'fields'))
    N = len(files)
    print('Found the following directories', files, N)
    print('_______________________')
    global time
    time = np.zeros((1))
    for d in files:
        time = np.sort(np.append(time, np.int(d[0:-3])))
    print('time: ', time)
    print('_______________________')
    ''' read in reference state '''
    if case_name == 'ZGILS6':
        fullpath_in_ref = os.path.join(
            path, 'Stats.ZGILS_S6_1xCO2_SST_FixSub_D15.nc')
    else:
        fullpath_in_ref = os.path.join(path, 'Stats.' + case_name + '.nc')
    print(fullpath_in_ref)
    try:
        p_ref = read_in_netcdf('p0_half', 'reference', fullpath_in_ref)
        z_ref = read_in_netcdf('z_half', 'reference', fullpath_in_ref)
    except:
        p_ref = read_in_netcdf('p0', 'reference', fullpath_in_ref)
        z_ref = read_in_netcdf('z', 'reference', fullpath_in_ref)
    try:
        z_half_ref = read_in_netcdf('z_half', 'reference', fullpath_in_ref)
    except:
        pass
    if z_ref.shape[0] == nz:
        pass
    else:
        print('')
        print('problem in dimensions: z_ref.shape != nz')
        sys.exit()
    print('')
    print(z_ref)
    print(z_half_ref)
    # plot_profiles(fullpath_in_ref, z_ref)
    print('_______________________')
    '''
    zrange:     z-values for which the PDF is fitted
    var_list:   list of variables that are included in (multi-variate) PDF
    '''
    global zrange
    # zrange = np.arange(0,36,2)
    zrange = np.arange(15, 30, 5)
    print('zrange', zrange * dz)
    print('_______________________')
    ''' PDF parameters '''
    global ncomp
    global nvar
    ncomp = 1
    nvar = 2

    files_ = [files[0]]
    for d in files_:
        # T, ql = sat_adj(p0, 6500, 1e-3)
        t = np.int(d[0:-3])
        nc_file_name = str(d)
        fullpath_in = os.path.join(in_path, 'fields', nc_file_name)
        print('fullpath_in', fullpath_in)
        # nc_file_name = 'CC_s_' + str(d)
        # create_statistics_file(os.path.join(fullpath_out, 'CloudClosure'), nc_file_name, ncomp, nvar, len(zrange))
        '''(0) Read in Data & compute liquid potential temperature from temperature and moisture'''
        s_ = read_in_netcdf('s', 'fields',
                            fullpath_in)  #.reshape((nx * ny), nz)
        T_ = read_in_netcdf('temperature', 'fields',
                            fullpath_in)  #.reshape((nx * ny), nz)
        qt_ = read_in_netcdf('qt', 'fields',
                             fullpath_in)  #.reshape((nx * ny), nz)
        ql_ = read_in_netcdf('ql', 'fields',
                             fullpath_in)  #.reshape((nx * ny), nz)
        qi_ = np.zeros(shape=T_.shape)
        # for k in range()
        # theta_l_ = theta_li(p0,T_,qt_,ql_,qi_)#.reshape((nx * ny), nz)

        index_ref = np.zeros(1)
        count = 0
        k = 0
        while (k <= z_ref.size and count < len(zrange)):
            if z_ref[k] == zrange[count] * dz:
                index_ref = np.append(index_ref, np.int(k))
                count += 1
            k += 1

        print('z ref: ')
        for k in index_ref:
            print(z_ref[k])
        print('zrange: ', zrange)
        print('zrange*dz: ', zrange * dz)
        print('indices: ', index_ref)
        print('')

        data = np.ndarray(shape=((nx * ny), nvar))
        data_thl = np.ndarray(shape=((nx * ny), nvar))
        data_s = np.ndarray(shape=((nx * ny)))
        data_qt = np.ndarray(shape=((nx * ny)))
        data_ql = np.ndarray(shape=((nx * ny)))
        data_T = np.ndarray(shape=((nx * ny)))
        theta_l = np.ndarray(shape=((nx * ny)))
        # for k in range(len(zrange)):
        for iz in zrange:
            for i in range(nx):
                for j in range(ny):
                    data_s[i * ny + j] = s_[i, j, k]
                    data_T[i * ny + j] = T_[i, j, k]
                    data_qt[i * ny + j] = qt_[i, j, k]
                    data_ql[i * ny + j] = ql_[i, j, k]
                    theta_l[i * ny + j] = theta_li(p_ref[iz - 1], T_[i, j, k],
                                                   qt_[i, j, k], ql_[i, j,
                                                                     k], 0)
                    # theta_l[i*ny + j] = theta_l_[i,j,k]
            # iz = zrange[k]
            data[:, 0] = data_s[:]
            data[:, 1] = data_qt[:]
            data_thl[:, 0] = theta_l[:]
            data_thl[:, 1] = data_qt[:]

            # print('z = '+str(iz*dz)+': '+str(np.amin(data_s[:,0]))+str(np.amax(data_s[:,0]))
            #       +str(np.amin(data_s[:,1]))+str(np.amax(data_s[:,1])))
            # print('ql: ', np.amax(ql_[:, :, iz]))
            # print('')
            # print('shape data: ', data_s.shape)
            '''(1) Normalise Data (zero mean and normalised standard deviation)'''
            data_norm = preprocessing.StandardScaler().fit_transform(data)
            data_thl_norm = preprocessing.StandardScaler().fit_transform(
                data_thl)
            # X_test = X_scaler.transform(X_test)
            # plot_data_comp(data_th, data_s, data_th_norm, data_s_norm, ql[:,iz], np.int(d[0:-3]), iz * dz)
            '''(2) Compute bivariate Gaussian PDF (theta_l, qt) '''
            clf = Gaussian_bivariate(data, 's', 'qt', t, iz * dz)
            clf_norm = Gaussian_bivariate(data_norm, 's', 'qt', t, iz * dz)
            # plot_PDF_samples_qt(data, data_norm, 's', 'qt', clf, clf_norm, t, iz * dz)
            # plot_PDF_samples(data, data_norm, 's', 'qt', clf, clf_norm, t, iz * dz)

            clf_thl = Gaussian_bivariate(data_thl, 'theta_l', 'qt',
                                         np.int(d[0:-3]), iz * dz)
            clf_thl_norm = Gaussian_bivariate(data_thl_norm, 'theta_l', 'qt',
                                              np.int(d[0:-3]), iz * dz)
            # plot_PDF_samples_qt(data_thl, data_thl_norm, 'theta_l', 'qt', clf_thl, clf_thl_norm, np.int(d[0:-3]), iz * dz)
            # plot_PDF_samples(data_th_norm, data_th_norm, 'theta_l', 'qt', clf_th_norm, clf_th_norm, np.int(d[0:-3]), iz * dz)

            # clf_s = Gaussian_bivariate(data_s, 's', 'qt', np.int(d[0:-3]), iz * dz)
            # clf_s_norm = Gaussian_bivariate(data_s_norm, 's', 'qt', np.int(d[0:-3]), iz * dz)
            # plot_PDF_samples_qt(data_s, data_s_norm, 's', 'qt', clf_s, clf_s_norm, np.int(d[0:-3]), iz * dz)
            # plot_PDF_samples(data_s_norm, data_s_norm, 's', 'qt', clf_s_norm, clf_s_norm, np.int(d[0:-3]), iz * dz)
            # plot_both_PDF(data_th_norm, data_s_norm, clf_th_norm, clf_s_norm, np.int(d[0:-3]), iz * dz)
            '''(3) Compute Kernel-Estimate PDF '''
            # kde, kde_aux = Kernel_density_estimate(data, 'T', 'qt', np.int(d[0:-3]), iz * dz)
            '''(4) Compute Relative Entropy '''
            # relative_entropy(data, clf, kde)
            '''(5) Compute Liquid Water '''
            nn = np.int(1e5)
            S, y = clf.sample(n_samples=nn)
            print('clf samples: ', S.shape, y.shape)
            Th, y = clf_thl.sample(n_samples=nn)
            # Th_norm, y = clf_thl_norm.sample(n_samples=nn)
            print('clf thl samples: ', Th.shape, y.shape)
            print('index ref', index_ref)
            print('iz', iz)
            print('zrange', zrange)
            # S, y = clf_s.sample(n_samples=nn)
            # S_norm, y = clf_s_norm.sample(n_samples=nn)

            alpha = np.zeros(shape=nn)
            T_comp = np.zeros(shape=nn)
            ql_comp = np.zeros(shape=nn)
            alpha_thl = np.zeros(shape=nn)
            T_comp_thl = np.zeros(shape=nn)
            ql_comp_thl = np.zeros(shape=nn)
            for i in range(nn):
                T_comp[i], ql_comp[i], alpha[i] = sat_adj_fromentropy(
                    p_ref[iz - 1], S[i, 0], S[i, 1])
                T_comp_thl[i], ql_comp_thl[i], alpha_thl[
                    i] = sat_adj_fromthetali(p_ref[iz - 1], Th[i, 0], Th[i, 1])
            plot_sat_adj(T_comp, ql_comp, S, data, data_ql, 's', 'qt', nn, t,
                         iz * dz)
            plot_sat_adj(T_comp_thl, ql_comp_thl, Th, data_thl, data_ql, 'thl',
                         'qt', nn, t, iz * dz)

        # '''(4) Save Gaussian Mixture PDFs '''
        # # dump_variable(os.path.join(fullpath_out, 'CloudClosure', nc_file_name), 'means', means_, 'qtT', ncomp, nvar, len(zrange))
        # # dump_variable(os.path.join(fullpath_out, 'CloudClosure', nc_file_name), 'covariances', covariance_, 'qtT', ncomp, nvar, len(zrange))
    '''
    saturated conditions:
    - T_c >> T (299 vs 194 K)
    - problem: ql_2 remains < 0 --> endless loop even for s_2 = s --> f_2 = f_1 = 0 --> division by zero
    '''

    return
Exemplo n.º 11
0
def main():
    parser = argparse.ArgumentParser(prog='PyCLES')
    parser.add_argument("path")
    parser.add_argument("casename")
    args = parser.parse_args()
    print('_______________________')
    case_name = args.casename
    read_in_nml(args.path, case_name)
    print('nx,ny,nz; ntot:', nx, ny, nz, ntot)
    global fullpath_out
    fullpath_out = args.path
    print('fullpath_out:', fullpath_out)
    # ______________________
    files = os.listdir(os.path.join(args.path, 'fields'))
    N = len(files)
    print('Found the following directories', files, N)
    # ______________________
    global time
    time = np.zeros((1))
    for d in files:
        time = np.sort(np.append(time, np.int(d[0:-3])))
    # ______________________
    '''
    zrange:     z-values for which the PDF is fitted
    var_list:   list of variables that are included in (multi-variate) PDF
    '''
    global zrange
    # zrange = np.arange(0,36,2)
    zrange = np.arange(6, 20, 8)
    print('zrange', zrange * dz)
    print('_______________________')
    if case_name == 'DCBLSoares':
        var_list = ['u', 'w', 's']
    else:
        var_list = ['w', 's', 'qt']
        # var_list = ['w', 's']

    global ncomp
    global nvar
    ncomp = 1
    nvar = 2
    data_all = np.ndarray(shape=(0, nvar))
    nc_file_name_out = 'CC_alltime.nc'
    create_statistics_file(os.path.join(fullpath_out, 'CloudClosure'),
                           nc_file_name_out, ncomp, nvar, len(zrange))

    for i in range(len(zrange)):
        iz = zrange[i]
        for d in files:
            '''(1) compute liquid potential temperature from temperature and moisture'''
            p0 = 1e5
            # T, ql, qi = sat_adj(p0, 6500, 1e-3)

            nc_file_name = str(d)
            fullpath_in = os.path.join(in_path, 'fields', nc_file_name)
            print('fullpath_in', fullpath_in)
            T = read_in_netcdf('temperature', 'fields', fullpath_in)
            qt = read_in_netcdf('qt', 'fields', fullpath_in)
            ql = read_in_netcdf('ql', 'fields', fullpath_in)
            qi = np.zeros(shape=T.shape)
            theta_l = thetali(p0, T, qt, ql, qi)

            data = np.ndarray(shape=((nx * ny), nvar))
            means_ = np.ndarray(shape=(len(zrange), ncomp, nvar))
            covariance_ = np.zeros(shape=(len(zrange), ncomp, nvar, nvar))

            data1_ = theta_l.reshape((nx * ny), nz)
            data2_ = qt.reshape((nx * ny), nz)
            data[:, 0] = data1_[:, iz]
            data[:, 1] = data2_[:, iz]
            data_all = np.append(data_all, data, axis=0)
        '''(2) Compute bivariate Gaussian PDF (theta_l, qt) '''
        # means, covariance, weights = Gaussian_mixture_bivariate(data, var1, var2, np.int(d[0:-3]), iz*dz)
        clf = Gaussian_bivariate(data, 'T', 'qt', np.int(d[0:-3]), iz * dz)
        means_[i, :, :] = clf.means_[:, :]
        covariance_[i, :, :, :] = clf.covariances_[:, :, :]
        '''(3) Compute Kernel-Estimate PDF '''
        kde, kde_aux = Kernel_density_estimate(data, 'T', 'qt',
                                               np.int(d[0:-3]), iz * dz)

        relative_entropy(data, clf, kde)
        '''(4) Save Gaussian Mixture PDFs '''
    dump_variable(os.path.join(fullpath_out, 'CloudClosure', nc_file_name_out),
                  'means', means_, 'qtT', ncomp, nvar, len(zrange))
    dump_variable(os.path.join(fullpath_out, 'CloudClosure', nc_file_name_out),
                  'covariances', covariance_, 'qtT', ncomp, nvar, len(zrange))

    return
Exemplo n.º 12
0
def main():
    parser = argparse.ArgumentParser(prog='PyCLES')
    parser.add_argument("path")
    parser.add_argument("casename")
    args = parser.parse_args()
    # ______________________
    case_name = args.casename
    read_in_nml(args.path, case_name)
    print('nx,ny,nz; ntot:', nx, ny, nz, ntot)
    global fullpath_out
    fullpath_out = args.path
    print('fullpath_out:', fullpath_out)
    # ______________________
    files = os.listdir(os.path.join(args.path,'fields'))
    N = len(files)
    print('Found the following directories', files, N)
    # ______________________
    global time
    time = np.zeros(len(files))
    i = 0
    for d in files:
        time[i] = np.int(d[0:-3])
        i+=1
    time = np.sort(time)
    # print('time', time)
    print('')
    # ______________________
    # ______________________
    '''
    zrange:     z-values for which the PDF is fitted
    var_list:   list of variables that are included in (multi-variate) PDF
    '''
    var_list = ['ws','wqt','sqt']
    print(var_list)


    d = files[0]
    var = 'ws'
    nc_file_name = 'EM2_bivar_' + str(d)
    fullpath_in = os.path.join(in_path, 'EM2_bivar', nc_file_name)
    print('')
    print('fullpath_in', fullpath_in, var)
    means = read_in_netcdf(var, 'means', fullpath_in)
    time_ = read_in_netcdf('t', 'time', fullpath_in)
    print('time', time, time_)
    global z_max, zrange_, ncomp, nvar
    zrange_ = read_in_netcdf('height', 'z-profile', fullpath_in)
    # zrange = map(int,np.linspace(0, 24, 13))
    print('zrange from data: ', zrange_)
    z_max = means.shape[0]
    ncomp = means.shape[1]
    nvar = 2

    means_time = np.ndarray(shape=(len(files), z_max, ncomp, nvar))
    covariance_time = np.zeros(shape=(len(files), z_max, ncomp, nvar, nvar))
    weights_time = np.zeros(shape=(len(files), z_max, ncomp))
    mean_tot_time = np.ndarray(shape=(len(files), z_max, nvar))
    covar_tot_time = np.zeros(shape=(len(files), z_max, nvar, nvar))

    '''(1) read in nc-files - bivar'''
    for var in var_list:
        count_t = 0
        print('')
        print('read in ' + var)
        for d in files:
            nc_file_name = 'EM2_bivar_' + str(d)
            fullpath_in = os.path.join(in_path, 'EM2_bivar', nc_file_name)
            means = read_in_netcdf(var, 'means', fullpath_in)
            covars = read_in_netcdf(var, 'covariances', fullpath_in)
            weights = read_in_netcdf(var, 'weights', fullpath_in)

            means_time[count_t,:,:,:] = means[:,:,:]
            covariance_time[count_t, :, :, :] = covars[:, :, :]
            weights_time[count_t,:,:] = weights[:,:]

            mean_tot_time[count_t,:,:], covar_tot_time[count_t,:,:,:] = covariance_estimate_from_multicomp_pdf(means, covars, weights)
            count_t += 1

        '''(2) sort PDF components according to weights'''
        print('Sorting A')
        for n in range(time.size):
            for k in range(z_max):
                if weights_time[n, k, 0] < weights_time[n, k, 1]:
                    aux = weights_time[n, k, 1]
                    weights_time[n, k, 1] = weights_time[n, k, 0]
                    weights_time[n, k, 0] = aux
                    for i1 in range(nvar):  # loop over variables
                        aux = means_time[n, k, 1, i1]
                        means_time[n, k, 1, i1] = means_time[n, k, 0, i1]
                        means_time[n, k, 0, i1] = aux
                        for i2 in range(nvar):
                            aux = covariance_time[n, k, 1, i1, i2]
                            covariance_time[n, k, 1, i1, i2] = covariance_time[n, k, 0, i1, i2]
                            covariance_time[n, k, 0, i1, i2] = aux
        '''(2a) plot'''
        print('Plotting')
        bivar_plot_means(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_, 'sortA')
        bivar_plot_covars(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_, 'sortA')
        bivar_plot_weights(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_,'sortA')


        '''(3) sort PDF components according to their mean'''
        print('Sorting B')
        for n in range(time.size):
            for k in range(z_max):
                if means_time[n, k, 0, 0] < means_time[n, k, 1, 0]:
                    aux = weights_time[n, k, 1]
                    weights_time[n, k, 1] = weights_time[n, k, 0]
                    weights_time[n, k, 0] = aux
                    for i1 in range(nvar):  # loop over variables
                        aux = means_time[n, k, 1, i1]
                        means_time[n, k, 1, i1] = means_time[n, k, 0, i1]
                        means_time[n, k, 0, i1] = aux
                        for i2 in range(nvar):
                            aux = covariance_time[n, k, 1, i1, i2]
                            covariance_time[n, k, 1, i1, i2] = covariance_time[n, k, 0, i1, i2]
                            covariance_time[n, k, 0, i1, i2] = aux

        '''(3a) plot'''
        print('Plotting')
        bivar_plot_means(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_, 'sortB')
        bivar_plot_covars(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_, 'sortB')
        bivar_plot_weights(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_,'sortB')
        print('')

        '''(4) sort PDF components according to Var[w]'''
        print('Sorting B')
        for n in range(time.size):
            for k in range(z_max):
                if covariance_time[n, k, 0, 0, 0] < covariance_time[n, k, 1, 0, 0]:
                    aux = weights_time[n, k, 1]
                    weights_time[n, k, 1] = weights_time[n, k, 0]
                    weights_time[n, k, 0] = aux
                    for i1 in range(nvar):  # loop over variables
                        aux = means_time[n, k, 1, i1]
                        means_time[n, k, 1, i1] = means_time[n, k, 0, i1]
                        means_time[n, k, 0, i1] = aux
                        for i2 in range(nvar):
                            aux = covariance_time[n, k, 1, i1, i2]
                            covariance_time[n, k, 1, i1, i2] = covariance_time[n, k, 0, i1, i2]
                            covariance_time[n, k, 0, i1, i2] = aux
        '''(4a) plot'''
        print('Plotting')
        bivar_plot_means(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_, 'sortC')
        bivar_plot_covars(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_, 'sortC')
        bivar_plot_weights(var, means_time, covariance_time, weights_time, mean_tot_time, covar_tot_time, time_,
                           'sortC')
    return
Exemplo n.º 13
0
def main():
    parser = argparse.ArgumentParser(prog='PyCLES')
    parser.add_argument("path")
    parser.add_argument("casename")
    args = parser.parse_args()
    # ______________________
    global nx, dx
    case_name = args.casename
    dx, nx, dt = read_in_nml(args.path, case_name)
    global path
    path = args.path
    print('path:', path)
    # ______________________
    files = os.listdir(os.path.join(args.path, 'fields'))
    N = len(files)
    print('Found the following directories', files, N)
    print('')
    # ______________________
    global time
    time = np.zeros((1))
    for d in files:
        time = np.sort(np.append(time, np.int(d[0:-3])))
    print('time: ', time)
    print('')
    # ______________________
    ''' read in reference state '''
    if case_name == 'ZGILS6':
        fullpath_in_ref = os.path.join(path,
                                       'Stats.ZGILS_S6_1xCO2_SST_FixSub.nc')
    else:
        fullpath_in_ref = os.path.join(path, 'Stats.' + case_name + '.nc')
    print(fullpath_in_ref)
    try:
        p_ref = read_in_netcdf('p0_half', 'reference', fullpath_in_ref)
    except:
        p_ref = read_in_netcdf('p0', 'reference', fullpath_in_ref)
    z_ref = read_in_netcdf('z', 'reference', fullpath_in_ref)
    z_half_ref = read_in_netcdf('z_half', 'reference', fullpath_in_ref)
    print('')
    print('nz, dz: ', nx[2], dx[2])
    # print('height: ', z_ref, z_ref.shape)
    # print(z_half_ref)
    # plot_profiles(fullpath_in_ref, z_ref)

    # ______________________
    ''' zrange '''
    global zrange
    zrange = [15, 18, 20]
    zrange = [10, 30, 50]
    # ______________________
    # ______________________

    # ______________________

    # files_ = [files[1]]
    files_ = files
    for d in files_:
        print('')
        print(files, d, files_)
        nc_file_name = str(d)
        fullpath_in = os.path.join(path, 'fields', nc_file_name)
        '''read in fields'''
        s_ = read_in_netcdf('s', 'fields', fullpath_in)
        qt_ = read_in_netcdf('qt', 'fields', fullpath_in)
        T_ = read_in_netcdf('temperature', 'fields', fullpath_in)
        ql_ = read_in_netcdf('ql', 'fields', fullpath_in)
        alpha_ = np.zeros(shape=ql_.shape)
        T_comp = np.zeros(shape=ql_.shape)
        ql_comp = np.zeros(shape=ql_.shape)
        alpha = np.zeros(shape=ql_.shape)
        theta_l = np.zeros(shape=ql_.shape)
        T_comp_thl = np.zeros(shape=ql_.shape)
        ql_comp_thl = np.zeros(shape=ql_.shape)
        alpha_thl = np.zeros(shape=ql_.shape)
        for i in range(nx[0]):
            for j in range(nx[1]):
                for k in range(nx[2]):
                    if ql_[i, j, k] != 0.0:
                        alpha_[i, j, k] = 1

        max_T_sat = 0.0
        max_T_unsat = 0.0
        max_ql = 0.0
        min_ql = 0.0

        max_T_sat_thl = 0.0
        max_T_unsat_thl = 0.0
        max_ql_thl = 0.0
        min_ql_thl = 0.0
        # for k in range(nx[2]):
        print('')
        print('types: ', type(p_ref[0]), type(s_[0, 0, 0]), type(qt_[0, 0,
                                                                     0]))  #
        print('')
        for k in zrange:
            for i in range(nx[0]):
                for j in range(nx[1]):
                    # for i in range(10):
                    #     for j in range(10):
                    # T_comp, ql_comp = sat_adj(p, s[i,j], qt[i,j])
                    # T_comp[i, j, k], ql_comp[i, j, k], alpha[i, j, k] = sat_adj_fromentropy(p_ref[k], s_[i, j, k],qt_[i, j, k])
                    T_comp[i, j, k], ql_comp[i, j, k], alpha[
                        i, j, k] = sat_adj_fromentropy_double(
                            p_ref[k], s_[i, j, k], qt_[i, j, k])
                    theta_l[i, j, k] = theta_li(p_ref[k], T_[i, j, k],
                                                qt_[i, j, k], ql_[i, j, k], 0)
                    T_comp_thl[i, j, k], ql_comp_thl[i, j, k], alpha_thl[
                        i, j, k] = sat_adj_fromthetali(p_ref[k], theta_l[i, j,
                                                                         k],
                                                       qt_[i, j, k])

                    if np.isnan(T_comp_thl[i, j, k]):
                        print('T_comp_thl is nan')
                        sys.exit()

                    if (ql_comp[i, j, k] - ql_[i, j, k]) > max_ql:
                        max_ql = (ql_comp[i, j, k] - ql_[i, j, k])
                    elif (ql_comp[i, j, k] - ql_[i, j, k]) < min_ql:
                        min_ql = (ql_comp[i, j, k] - ql_[i, j, k])

                    if ql_[i, j, k] > 0.0 and alpha[i, j, k] > 0.0:
                        if np.abs(T_comp[i, j, k] - T_[i, j, k]) > max_T_sat:
                            max_T_sat = np.abs(T_comp[i, j, k] - T_[i, j, k])
                    elif alpha[i, j, k] == 0.0:
                        if np.abs(T_comp[i, j, k] - T_[i, j, k]) > max_T_unsat:
                            max_T_unsat = np.abs(T_comp[i, j, k] - T_[i, j, k])
                            print('unsat max T: ', max_T_unsat)

                    if (ql_comp_thl[i, j, k] - ql_[i, j, k]) > max_ql_thl:
                        max_ql_thl = ql_comp_thl[i, j, k] - ql_[i, j, k]
                    elif (ql_comp_thl[i, j, k] - ql_[i, j, k]) < min_ql_thl:
                        min_ql_thl = ql_comp_thl[i, j, k] - ql_[i, j, k]

                    if ql_[i, j, k] > 0.0 and alpha[i, j, k] > 0:
                        print('sat: ',
                              np.abs(T_comp_thl[i, j, k] - T_[i, j, k]))
                        if np.abs(T_comp_thl[i, j, k] -
                                  T_[i, j, k]) > max_T_sat_thl:
                            max_T_sat_thl = np.abs(T_comp_thl[i, j, k] -
                                                   T_[i, j, k])
                    elif alpha[i, j, k] == 0:
                        print('unsat',
                              np.abs(T_comp_thl[i, j, k] - T_[i, j, k]))
                        if np.abs(T_comp_thl[i, j, k] -
                                  T_[i, j, k]) > max_T_unsat_thl:
                            max_T_unsat_thl = np.abs(T_comp_thl[i, j, k] -
                                                     T_[i, j, k])

        print('')
        print('From Entropy:')
        print('max T sat: ', max_T_sat)  # max_T_sat = 0.096
        print('max T unsat: ', max_T_unsat)  # max_T_unsat = 0.05
        print('max ql:', max_ql)  # max_ql = 4.4e-5
        print('min ql:', min_ql)  # min_ql = -6.7e-5
        print('')
        print('From Thetali:')
        print('max T sat: ', max_T_sat_thl)  # max_T_sat = 0.12
        print('max T unsat: ', max_T_unsat_thl)  # max_T_unsat = 0.013
        print('max ql:', max_ql_thl)  # max_ql = 3.31e-5
        print('min ql:', min_ql_thl)  # min_ql = 0.0
        print('')

        plot_snapshots(ql_, ql_comp, alpha_, alpha, 'ql')
        plot_snapshots(T_, T_comp, alpha_, alpha, 'T')
        plot_snapshots(theta_l, theta_l, alpha_, alpha, 'thetali')

    return
Exemplo n.º 14
0
def plot_mean_levels(var_name, files_, zrange, path, profile=False):
    print('')
    print('plotting mean levels')
    global case_name
    global nx, ny, nz, dz, dt_stats
    path_references = os.path.join(path, 'Stats.' + case_name + '.nc')
    var_mean = read_in_netcdf(var_name+'_mean', 'profiles', path_references)
    time = read_in_netcdf('t', 'timeseries', path_references)
    # print(time)
    # cm1 = plt.cm.get_cmap('viridis')
    cm1 = plt.cm.get_cmap('bone')
    cm2 = plt.cm.get_cmap('winter')
    count_color = 0.0

    plt.figure(figsize=(9, 6))
    # print('files'), files_
    for t in files_:
        print('t', t)
        if str(t)[-1] == 'c':
            path_fields = os.path.join(path, 'fields', str(t))
            if case_name == 'TRMM_LBA':
                it = np.int(t[3:-3])
            else:
                it = np.int(t[0:-3])
        else:
            path_fields = os.path.join(path, 'fields', str(t) + '.nc')
            if case_name == 'TRMM_LBA':
                it = np.int(t[3:-1])
            else:
                it = np.double(t)
        # path_fields = os.path.join(path, 'fields', str(t) + '.nc')
        var_field = read_in_netcdf(var_name, 'fields', path_fields)
        var_mean_field = np.mean(np.mean(var_field, axis=0), axis=0)

        tt = np.int((np.double(it)-time[0]) / np.double(dt_stats))
        # print('tt', tt, 'dt_stats', dt_stats, 'it', it, 'time[0]', time[0])
        try:
            mini = np.min([np.amin(var_mean[tt, :]), np.amin(var_mean_field)])
            maxi = np.max([np.amax(var_mean[tt, :]), np.amax(var_mean_field)])
        except:
            mini = np.amin(var_mean_field)
            maxi = np.amax(var_mean_field)

        if var_name == 'ql':
            location = 1
            ql = np.ndarray(shape=(0))
            z_ = np.ndarray(shape=(0))
            k_ = np.ndarray(shape=(0), dtype=np.int)
            for k in range(zrange.shape[0]):
                if var_mean_field[k] > 0.0:
                    ql = np.append(ql, var_mean_field[k])
                    z_ = np.append(z_, zrange[k])
                    k_ = np.append(k_, k)
            # if count_color == len(files_)-1:
            if count_color == 0:
                for l in z_:
                    if np.mod(l, 50) == 0:
                        plt.plot([mini, maxi], [l, l], color='0.5', linewidth=1.5, label=str(l) + 'm')
                    else:
                        plt.plot([mini, maxi], [l, l], color='0.5', linewidth=0.5)
            lab = set_tlabel(it)
            plt.plot(ql[:], z_, color=cm1(count_color / len(files_)),
                         label=lab)
            # plt.plot(var_mean[tt, :], zrange, '--', color=cm2(count_color / len(files_)),
            #              label='t=' + str(tt * dt_stats) + 's (from Stats)')
        else:
            location = 3
            if count_color == 0.0:
                for l in zrange:
                    if np.mod(l,100) == 0:
                        plt.plot([mini, maxi], [l, l], color='0.5', linewidth=1.0, label=str(l)+'m')
                    elif np.mod(l,10*dz) == 0:
                        plt.plot([mini, maxi], [l, l], color='0.2', linewidth=0.2)
            lab = set_tlabel(it)
            plt.plot(var_mean_field[:], zrange, color=cm1(count_color/len(files_)), label=lab)
            if profile:
                plt.plot(var_mean[tt, :], zrange, '--', color=cm2(count_color/len(files_)), label='t=' + str(tt * dt_stats) + 's (from Stats)')
        count_color += 1.0


    plt.legend(loc=location, fontsize=6)
    plt.xlabel('mean '+var_name)
    plt.ylabel('height z [m]')
    plt.title('mean '+var_name + ' (' + case_name + ', nx*ny=' + str(nx * ny) + ', dz='+str(dz)+')')

    plt.savefig(
        os.path.join(path, 'figs_stats', var_name + '_mean_fromfield_levels.pdf'))
    # plt.show()
    plt.close()
    return
Exemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser(prog='PyCLES')
    parser.add_argument("path")
    parser.add_argument("casename")
    parser.add_argument("--files")
    parser.add_argument("--files_cum")
    args = parser.parse_args()
    path = args.path
    global case_name
    case_name = args.casename
    path_ref = os.path.join(path, 'Stats.'+case_name+'.nc')

    nml = simplejson.loads(open(os.path.join(path, case_name + '.in')).read())
    global nx, ny, nz, dz, dt_stats
    nx = nml['grid']['nx']
    ny = nml['grid']['ny']
    nz = nml['grid']['nz']
    dz = nml['grid']['dz']
    dt_stats = nml['stats_io']['frequency']
    print('Grid: dz='+str(dz))
    global max_height
    print('')

    files = os.listdir(os.path.join(path, 'fields'))
    print('All Files: ', files)
    t = np.int(files[0][0:-3])



    # path_fields = os.path.join(path, 'fields', str(t)+'.nc')
    # ql = read_in_netcdf('ql', 'fields', path_fields)
    print('path_ref', path_ref)
    # time_stats = read_in_netcdf('z', 'reference', path_ref)
    # print('time stats: ', time_stats[0:5], time_stats[-3:])
    time_stats = nc.Dataset(path_ref, 'r').groups['profiles'].variables['t'][:]
    print('time stats: ', time_stats[0:5], time_stats[-3:])
    zrange_stats1 = read_in_netcdf('z', 'reference', path_ref)
    zrange_stats = nc.Dataset(path_ref, 'r').groups['profiles'].variables['z_half'][:]
    zrange_stats2 = nc.Dataset(path_ref, 'r').groups['profiles'].variables['z'][:]

    # if str(files_[0])[-3:] == '.nc':
    if str(files[0])[-3:] == '.nc':
        path_z = os.path.join(path, 'fields', files[0])
    else:
        path_z = os.path.join(path, 'fields', str(files[0])+'.nc')
    print(path_z)
    try:
        zrange_field = nc.Dataset(path_z, 'r').groups['fields'].variables['z'][:]
        zrange = zrange_field
    except:
        zrange = zrange_stats
    print('')
    # print('zrange stats: ', zrange_stats1[0:5], zrange_stats1[20:23])
    print('zrange stats z_half:  ', zrange_stats[0:5], zrange_stats[20:23])
    if case_name == 'TRMM_LBA':
        zrange_stats_zp = nc.Dataset(path_ref, 'r').groups['reference'].variables['zp'][:]
        zrange_stats_zp_half = nc.Dataset(path_ref, 'r').groups['reference'].variables['zp_half'][:]
        print('zrange stats zp:      ', zrange_stats_zp[0:3], zrange_stats_zp[-2:])
        print('zrange stats zp_half: ', zrange_stats_zp_half[0:3], zrange_stats_zp_half[-2:])
        print('zrange field:         ', zrange_field[0:3], zrange_field[-2:])
        zrange_stats = zrange_stats

    day = np.int(24 * 3600)
    hour = np.int(3600)
    levels, files_, files_cum, time_prof = set_levels(case_name, files, zrange_stats, dz)
    print('Selected Files: ', files_)

    max_height = 120
    plot_mean_profile('thetali', time_prof, zrange_stats, max_height, path_ref, path, False, 4)
    plot_mean_profile('thetali', time_prof, zrange_stats, max_height, path_ref, path, True, 4)
    plot_mean_profile('cloud_fraction', time_prof, zrange, max_height, path_ref, path, True)
    # plot_mean_profile('fraction_core', time_prof, zrange, max_height, path_ref, path, True)
    # plot_mean_profile('fraction_cloud', time_prof, zrange, max_height, path_ref, path, True)
    plot_mean_profile('qt', time_prof, zrange, max_height, path_ref, path, True)
    plot_mean_profile('ql', time_prof, zrange, max_height, path_ref, path, True)
    # plot_ql_n_all(files_, zrange, path, prof=False)
    plot_mean('qt', files_, zrange, levels, path)
    plot_mean('ql', files_, zrange, levels, path)
    plot_mean('s', files_, zrange, levels, path)
    # try:
    #     plot_mean('thetali', files_, zrange, levels, path)
    #     plot_mean_var('thetali', files_, zrange, path)
    # except:
    #     print('thetali not in variables')
    plot_mean_levels('qt', files_cum, zrange, path)
    plot_mean_levels('ql', files_cum, zrange, path)

    plot_mean_var('ql', files_, zrange, path)
    plot_mean_var('qt', files_, zrange, path)
    plot_mean_var('s', files_, zrange, path)


    plot_mean_cumulated('ql', files_cum, zrange, levels, path)
    plot_mean_cumulated_BL('ql', files_cum, zrange, levels, path, 125)
    plot_mean_cumulated('s', files_cum, zrange, levels, path)
    plot_mean_cumulated_BL('ql', files_cum, zrange, levels, path, max_height)
    plot_mean_cumulated_BL('s', files_cum, zrange, levels, path, max_height)

    plot_max_var('ql', zrange, path)

    return
Exemplo n.º 16
0
def plot_ql_n_all(files_, zrange, path, prof=False):
    print('-- plot ql n all --')
    print(files_)
    global case_name
    global nx, ny, nz, dz, dt_stats
    day = 24 * 3600
    cm1 = plt.cm.get_cmap('bone')
    cm2 = plt.cm.get_cmap('winter')

    count_color = 0.0
    plt.figure(figsize=(14,7))
    for t in files_:
        print('')
        if str(t)[-1] == 'c':
            path_fields = os.path.join(path, 'fields', str(t))
            it = np.int(t[0:-3])
            if it >= 1000000:
                it = np.int(t[0:-3]) - 1000000
        else:
            path_fields = os.path.join(path, 'fields', str(t) + '.nc')
            it = np.int(t)
            if it >= 1000000:
                it -= 1000000

        lab = set_tlabel(it)
        print('it: ', it, lab)

        ql = read_in_netcdf('ql', 'fields', path_fields)
        zql = []
        nql = []
        nql_all = []
        for z in range(nz):
            n = np.count_nonzero(ql[:, :, z])
            nql_all.append(n)
            if n > 0:
                zql.append(z)
                nql.append(n)

        ql_mean_fields = np.mean(np.mean(ql,axis=0),axis=0)
        ax = plt.subplot(1,2,1)
        plt.plot(nql_all, zrange, color = cm2(count_color/len(files_)), label='t=' + lab)
        if np.any(nql_all) > 0.0:
            try:
                ax.set_xscale('log')
            except:
                print('no log-scaling possible (nql_all=' + str(nql_all) + ')')
                pass
        plt.subplot(1, 2, 2)
        plt.plot(ql_mean_fields, zrange, color=cm2(count_color/len(files_)), label='t=' + lab)
        if prof:
            path_references = os.path.join(path, 'Stats.' + case_name + '.nc')
            ql_mean = read_in_netcdf('ql_mean', 'profiles', path_references)
            time = read_in_netcdf('t', 'timeseries', path_references)
            tt = np.int((np.double(it)-time[0]) / np.double(dt_stats))
            plt.plot(ql_mean[tt,:], zrange,  'k--', label='t=' + lab)
        count_color += 1.0

    plt.subplot(1, 2, 1)
    plt.legend()
    plt.xlabel('# non-zero ql')
    plt.ylabel('height z [m]')
    plt.title('# non-zero ql (' + case_name + ', nx*ny=' + str(nx * ny) + ')')
    plt.subplot(1, 2, 2)
    plt.legend()
    plt.xlabel('mean ql')
    plt.ylabel('height z [m]')
    plt.title('mean ql (' + case_name + ', nx*ny=' + str(nx * ny) + ')')

    plt.savefig(
        os.path.join(path, 'figs_stats', 'ql_number_fromfield.png'))
    # plt.show()
    plt.close()
    return
def main():
    parser = argparse.ArgumentParser(prog='PyCLES')
    parser.add_argument("path")
    parser.add_argument("casename")
    args = parser.parse_args()
    # ______________________
    case_name = args.casename
    read_in_nml(args.path, case_name)
    global path, fullpath_in
    path = args.path
    print('path:', path)
    fullpath_in = os.path.join(in_path, 'EM2_trivar_alltimes', 'EM2_trivar_alltimes.nc')
    # ______________________
    files = os.listdir(os.path.join(args.path,'fields'))
    N = len(files)
    # print('Found the following directories', files, N)
    # ______________________
    '''
        zrange:     z-values for which the PDF is fitted
        var_list:   list of variables that are included in (multi-variate) PDF
        amp:        list of normalisation factors for data
    '''
    print('')
    global time
    # time = np.zeros((1))
    # for d in files:
    #     time = np.sort(np.append(time, np.int(d[0:-3])))
    time = read_in_netcdf('t', 'time', fullpath_in)
    print('time', time)
    # ______________________
    print('')
    global zrange, var_list
    # zrange = np.asarray([5, 10, 20, 30, 50, 70])
    zrange = read_in_netcdf('height', 'z-profile', fullpath_in)
    print('zrange from data: ', zrange * dz)
    # ______________________

    if case_name == 'DCBLSoares':
        var_list = ['w', 'u', 's']
    else:
        var_list = ['w', 'temperature', 'qt']
    var_corr_list = ['wtemperatureqt', 'wthetaliqt']



    # Test File
    print('')
    global z_max, ncomp, nvar, amp
    means = read_in_netcdf(var_corr_list[0], 'means', fullpath_in)
    z_max = means.shape[0]
    ncomp = means.shape[1]
    nvar = means.shape[2]
    amp = np.ones(nvar)
    print('ncomp, nvar, amp: ', ncomp, nvar, amp)

    # data = np.ndarray(shape=((nx * ny), nz, nvar))
    # data = np.ndarray(shape=((nx * ny), nvar))
    # data_all = np.ndarray(shape=(0, nvar))
    '''(1) read in 3d variable fields: print data'''
    for var in var_corr_list:
        print('var', var)
        if var == 'wthetaliqt':
            var_list[1] = 'thetali'

        plot_data(files)


    '''(2) read in trivar EM2 PDF parameters'''
    nc_file_name = 'EM2_trivar_alltimes.nc'
    for var in var_corr_list:
        if var == 'wthetaliqt':
            var_list[1] = 'thetali'
        elif var == 'wtemperatureqt':
            var_list[1] = 'temperature'
        # count_t = 0
        means = read_in_netcdf(var, 'means', fullpath_in)
        covars = read_in_netcdf(var, 'covariances', fullpath_in)
        weights = read_in_netcdf(var, 'weights', fullpath_in)
        mean_tot, covars_tot = covariance_estimate_from_multicomp_pdf(means,covars,weights)
        print('')


        '''(3) sort PDF components according to their weight'''
        print('Sorting A: weights')
        for k in range(z_max):
            if weights[k, 0] < weights[k, 1]:
                aux = weights[k, 1]
                weights[k, 1] = weights[k, 0]
                weights[k, 0] = aux
                for i1 in range(nvar):  # loop over variables
                    aux = means[k, 1, i1]
                    means[k, 1, i1] = means[k, 0, i1]
                    means[k, 0, i1] = aux
                    for i2 in range(nvar):
                        aux = covars[k, 1, i1, i2]
                        covars[k, 1, i1, i2] = covars[k, 0, i1, i2]
                        covars[k, 0, i1, i2] = aux
        '''(3a) plot'''
        print('Plotting')
        trivar_plot_means(var, weights, means, covars, mean_tot, covars_tot, time, 'sortA')
        trivar_plot_covars(var, weights, means, covars, mean_tot, covars_tot, time, 'sortA')
        trivar_plot_weights(var, weights, means, covars, mean_tot, covars_tot, time, 'sortA')
        print('')


        '''(4) sort PDF components according to <qt>'''
        print('Sorting B: E[qt]')
        for k in range(z_max):
            if means[k, 0, 2] < means[k, 1, 2]:
                aux = weights[k, 1]
                weights[k, 1] = weights[k, 0]
                weights[k, 0] = aux
                for i1 in range(nvar):  # loop over variables
                    aux = means[k, 1, i1]
                    means[k, 1, i1] = means[k, 0, i1]
                    means[k, 0, i1] = aux
                    for i2 in range(nvar):
                        aux = covars[k, 1, i1, i2]
                        covars[k, 1, i1, i2] = covars[k, 0, i1, i2]
                        covars[k, 0, i1, i2] = aux
        '''(4a) plot'''
        print('Plotting')
        trivar_plot_means(var, weights, means, covars, mean_tot, covars_tot, time, 'sortB')
        trivar_plot_covars(var, weights, means, covars, mean_tot, covars_tot, time, 'sortB')
        trivar_plot_weights(var, weights, means, covars, mean_tot, covars_tot, time, 'sortB')
        print('')


        '''(5) sort PDF components according to <w>'''
        print('Sorting C: E[w]')
        for k in range(z_max):
            if means[k, 0, 0] < means[k, 1, 0]:
                aux = weights[k, 1]
                weights[k, 1] = weights[k, 0]
                weights[k, 0] = aux
                for i1 in range(nvar):  # loop over variables
                    aux = means[k, 1, i1]
                    means[k, 1, i1] = means[k, 0, i1]
                    means[k, 0, i1] = aux
                    for i2 in range(nvar):
                        aux = covars[k, 1, i1, i2]
                        covars[k, 1, i1, i2] = covars[k, 0, i1, i2]
                        covars[k, 0, i1, i2] = aux
        '''(5a) plot'''
        print('Plotting')
        trivar_plot_means(var, weights, means, covars, mean_tot, covars_tot, time, 'sortC')
        trivar_plot_covars(var, weights, means, covars, mean_tot, covars_tot, time, 'sortC')
        trivar_plot_weights(var, weights, means, covars, mean_tot, covars_tot, time, 'sortC')
        print('')

    return
Exemplo n.º 18
0
def plot_mean_cumulated_BL(var_name, files_cum, zrange, levels, path, max_height):
    print('')
    print('-- plot mean cumulated BL: ' + var_name + ' --')
    global case_name
    global nz
    mean_all = np.zeros(nz)
    # path_references = os.path.join(path, 'Stats.' + case_name + '.nc')
    # time = read_in_netcdf('t', 'timeseries', path_references)

    print('files_cum', files_cum, len(files_cum))

    fig1 = plt.figure(figsize=(9, 6))
    cm1 = plt.cm.get_cmap('bone')
    cm2 = plt.cm.get_cmap('winter')
    count_color = 0.0

    min = 9999.9
    max = -9999.9
    for t in files_cum:
        if str(t)[-1] == 'c':
            path_fields = os.path.join(path, 'fields', str(t))
            it = np.int(t[0:-3])
        else:
            path_fields = os.path.join(path, 'fields', str(t) + '.nc')
            it = np.double(t)
        var_field = read_in_netcdf(var_name, 'fields', path_fields)
        var_mean_field = np.mean(np.mean(var_field, axis=0), axis=0)
        mean_all += var_mean_field

        min_ = np.amin(var_mean_field[0:max_height])
        max_ = np.amax(var_mean_field[0:max_height])
        if min_ < min:
            min = min_
        if max_ > max:
            max = max_

        lab = set_tlabel(it)
        plt.plot(var_mean_field[0:max_height], zrange[0:max_height], '--', color=cm1(count_color / len(files_cum)), label='t=' + lab + ' (from field)')
        count_color += 1.0
    mean_all /= len(files_cum)
    plt.plot(mean_all[0:max_height], zrange[0:max_height], 'k', label='time mean')

    print('plotting levels', levels)
    for i in levels:
        if i < zrange[max_height]:
            # k = levels[i]
            # print(i, zrange[i], levels[i])
            # print('plotting levels: ', k)
            # k = zrange[i]
            # plt.plot([min,max],[k,k], linewidth=0.5, color='0.5', label=str(np.int(i))+'m (k=' + str(i) + ')' )i]
            plt.plot([min,max],[i,i], linewidth=0.5, color='0.5', label=str(np.int(i))+'m' )

    plt.legend()
    plt.xlabel('mean ' + var_name)
    plt.ylabel('height z [m]')
    plt.title('mean ' + var_name + ' (' + case_name + ', n*nx*ny=' + str(nx * ny * len(files_cum)) + ')')
    plt.savefig(
        os.path.join(path, 'figs_stats', var_name + '_mean_fromfield_cum_BL.pdf'))
    # plt.show()
    plt.close()

    return