Esempio n. 1
0
def get_anchor_flderr_param(i, train, N, model, mode, fd):
    # return both the projection and rom fld relative errro
    import numpy as np
    import pandas as pd
    import reader
    import re

    dirs = '../ra_' + str(train) + '_theta_90/' + model[
        i] + '_parameter_ra_' + str(train) + '/mrelerr/'
    dirs = '../re' + str(train) + '/' + model[i] + '_parameter_Re' + str(
        train) + '/mrelerr_h1/'
    if model[i] == 'l-rom':
        filename = 'param_list_' + mode + '.dat'
        angle = np.loadtxt(dirs + filename)
        filename = 'rom_relerr_N' + str(
            N[i]) + '_' + fd[i] + '_' + mode + '.dat'
        fldrelerr_rom = np.loadtxt(dirs + filename)
        filename = 'proj_relerr_N' + str(N[i]) + '_' + mode + '.dat'
        fldrelerr_proj = np.loadtxt(dirs + filename)
    elif model[i] == 'l-rom-df':
        fname = 'mrelerr_N' + str(N[i]) + '_0' + fd[i] + '.csv'
    else:
        fname = 'mrelerr_N' + str(N[i]) + '.csv'

    filenames = reader.find_files(fname, '../')
    filenames = reader.find_files(fname, '../../')
    for f in filenames:
        if all(x in f for x in ['Ra' + str(train), model[i], str(N[i])]):
            #       if 'Ra'+str(train) in re.split('[/_]', f):
            #       if 'Re'+str(train) in re.split('[/_]', f):
            filename = f
    data = pd.read_csv(filename)

    return [data[i].tolist() for i in data.columns]
Esempio n. 2
0
def get_FOM_mtfluc(P_test):
    import sys
    sys.path.append('/Users/bigticket0501/Developer/PyMOR/code/plot_helpers/')
    import re
    import numpy as np
    from reader import find_files

    fom = []
    for i, test in enumerate(P_test):
        fname = 'tmtfluc'
        filenames = find_files(fname, '../')
        filenames = find_files(fname, '../../')
        for f in filenames:
            if 'Ra_'+str(test) in re.split('[/]', f):
                filename = f
        data = np.loadtxt(filename)
        fom.append(data)
    return fom
Esempio n. 3
0
def get_opterri_param(P_test,
                      P_train,
                      N,
                      P_test_anchor,
                      model,
                      mode,
                      fd,
                      scaled=''):
    import numpy as np
    import pandas as pd
    import reader
    # get erri with theta_g at each anchor points
    erri_his = []
    for i, train in enumerate(P_train):
        if model[i] == 'l-rom':
            fname = 'dual_norm_N' + str(
                N[i]) + '_' + fd[i].strip('0') + scaled + '_' + mode + '.dat'
        elif model[i] == 'l-rom-df':
            fname = 'dual_norm_N' + str(N[i]) + '_0' + fd[i] + scaled + '.csv'
        else:
            fname = 'dual_norm_N' + str(N[i]) + scaled + '.csv'
        filenames = reader.find_files(fname, '../')
        filenames = reader.find_files(fname, '../../')
        for f in filenames:
            if all(x in f for x in ['Ra' + str(train), model[i], str(N[i])]):
                #           if 'Ra'+str(train) in re.split('[/_]', f):
                #           if 'Re'+str(train) in re.split('[/_]', f):
                filename = f
        data = pd.read_csv(filename)
        param, erri = [data[i].tolist() for i in data.columns]
        idx = [param.index(i) for i in P_test]
        param = np.asarray(param)[idx]
        erri = np.asarray(erri)[idx]

        erri_his.append(erri)

    erri_comb = np.array(erri_his)
    erri_opt = []

    P_train = list(P_train)
    for i, test in enumerate(P_test):
        index = P_train.index(P_test_anchor[i])
        erri_opt.append(erri_comb[index, i])
    return param, erri_comb, erri_opt
Esempio n. 4
0
def get_anchor_mtke(i, train, N, model, mode, fd, feature):
    import pandas as pd
    import reader

    if model[i] == 'l-rom':
        fname = feature+'_N'+str(N[i])+'_'+fd[i].strip('0')+'_'+mode+'.csv'
    elif model[i] == 'l-rom-df':
        fname = feature+'_N'+str(N[i])+'_0'+fd[i]+'.csv'
    else:
        fname = feature+'_N'+str(N[i])+'.csv'
    filenames = reader.find_files(fname, '../')
    filenames = reader.find_files(fname, '../../')
    for f in filenames:
        if all(x in f for x in ['Ra'+str(train), model[i], str(N[i])]):
            filename = f
    data = pd.read_csv(filename)
    param = data['Ra'].tolist()
    data1 = data[feature].tolist()

    return param, data1
Esempio n. 5
0
def get_anchor_qoi_param(i, train, N, model, mode, fd):
    import numpy as np
    import pandas as pd
    import re
    import reader

    dirs = '../ra_' + str(train) + '_theta_90/' + model[
        i] + '_parameter_ra_' + str(train) + '/nu/'
    if model[i] == 'l-rom':
        fname = 'mnurelerr_N' + str(
            N[i]) + '_' + fd[i].strip('0') + '_' + mode + '.csv'
    elif model[i] == 'l-rom-df':
        fname = 'mnurelerr_N' + str(N[i]) + '_0' + fd[i] + '.csv'
    else:
        fname = 'mnurelerr_N' + str(N[i]) + '.csv'
    filenames = reader.find_files(fname, '../')
    filenames = reader.find_files(fname, '../../')
    for f in filenames:
        #           if 'Ra'+str(train) in re.split('[/_]', f):
        if all(x in f for x in ['Ra' + str(train), model[i], str(N[i])]):
            filename = f
    data = pd.read_csv(filename)
    param = data['Ra'].tolist()
    merr = data['mnurelerr'].tolist()

    if model[i] == 'l-rom':
        fname = 'mnu_N' + str(
            N[i]) + '_' + fd[i].strip('0') + '_' + mode + '.csv'
    elif model[i] == 'l-rom-df':
        fname = 'mnu_N' + str(N[i]) + '_0' + fd[i] + '.csv'
    else:
        fname = 'mnu_N' + str(N[i]) + '.csv'
    filenames = reader.find_files(fname, '../')
    filenames = reader.find_files(fname, '../../')
    for f in filenames:
        #           if 'Ra'+str(train) in re.split('[/_]', f):
        if all(x in f for x in ['Ra' + str(train), model[i], str(N[i])]):
            filename = f
    data = pd.read_csv(filename)
    m = data['mnu'].tolist()

    if model[i] == 'l-rom':
        fname = 'std_nu_N' + str(
            N[i]) + '_' + fd[i].strip('0') + '_' + mode + '.csv'
    elif model[i] == 'l-rom-df':
        fname = 'std_nu_N' + str(N[i]) + '_0' + fd[i] + '.csv'
    else:
        fname = 'std_nu_N' + str(N[i]) + '.csv'
    filenames = reader.find_files(fname, '../')
    filenames = reader.find_files(fname, '../../')
    for f in filenames:
        #           if 'Ra'+str(train) in re.split('[/_]', f):
        if all(x in f for x in ['Ra' + str(train), model[i], str(N[i])]):
            filename = f
    data = pd.read_csv(filename)
    std = data['std_nu'].tolist()

    return param, merr, m, std
Esempio n. 6
0
def get_FOM_nu_1st2nd(P_test):
    import sys
    sys.path.append('/Users/bigticket0501/Developer/PyMOR/code/plot_helpers/')
    import re
    import pandas as pd
    from reader import find_files

    fom_m_list = []
    fom_sd_list = []

    for i, test in enumerate(P_test):
        fname = 'nus_mom.csv'
        filenames = find_files(fname, '../')
        filenames = find_files(fname, '../../')
        for f in filenames:
            if 'Ra_'+str(test) in re.split('[/]', f):
                filename = f
        data = pd.read_csv(filename)
        fom_mean = data['mean']
        fom_std = data[' std']

        fom_m_list.append(fom_mean)
        fom_sd_list.append(fom_std)
    return fom_m_list, fom_sd_list
Esempio n. 7
0
            #           filename = '../../../fom_nuss/nuss_fom_'+str(test)
            #           filename = '../fom_nuss/nus_fom_'+str(test)
            #           data = mypostpro.read_nuss(filename)
            #           if (str(test) == '10000'):
            #               data[:, 2] = data[:, 2]/40
            #           else:
            #               data[:, 2] = data[:, 2]
            #           idx1 = mypostpro.find_nearest(data[:, 0], 0)
            #           idx2 = mypostpro.find_nearest(data[:, 0], 1000)
            #           nuss_fom = data[idx1:idx2, :]
            #           avgidx1 = mypostpro.find_nearest(data[:, 0], 501)
            #           fom_mean = mypostpro.cmean(nuss_fom[avgidx1:idx2], 2)
            #           fom_var = mypostpro.cvar(nuss_fom[avgidx1:idx2], fom_mean, 2)
            #           fom_sd = mypostpro.csd(nuss_fom[avgidx1:idx2], fom_mean, 2)
            fname = 'nus_mom.csv'
            filenames = find_files(fname, '../')
            for f in filenames:
                if 'Ra_' + str(test) in re.split('[/]', f):
                    filename = f
            data = pd.read_csv(filename)
            fom_mean = data['mean']
            fom_std = data[' std']

            fom_m_list.append(fom_mean)
            fom_sd_list.append(fom_std)

        fig1, ax1 = plt.subplots(1, tight_layout=True)
        fig2, ax2 = plt.subplots(1, tight_layout=True)
        fig3, ax3 = plt.subplots(1, tight_layout=True)
        fig4, ax4 = plt.subplots(1, tight_layout=True)
Esempio n. 8
0
def conv_compare(argv):
    import yaml
    import os
    import re
    import pandas as pd
    import numpy as np
    import matplotlib.pyplot as plt
    from figsetup.style import style
    from figsetup.text import text
    from aux.create_dir import create_dir
    sys.path.append('/home/pht2/Developer/PyROM/code/plot_helpers/')
    import reader

    style(1)
    text()
    models = []
    for i in range(len(argv) - 1):
        models.append(argv[i])
    feature = argv[-1]

    fig, ax = plt.subplots(1, tight_layout=True)
    for model in models:
        dir1 = model + '_reproduction'
        with open('reproduction.yaml') as f:
            info = yaml.load(f, Loader=yaml.FullLoader)
        for key, value in info['parameters'].items():
            al = '_'.join([str(key), str(value)])
            dir1 = '_'.join([dir1, al])
        if model == 'l-rom':
            fd = info['perc'].replace('p', '.')
            fd = str(int(float(fd) * 100))
            solver = model.upper() + ' with ' + str(
                fd) + ' percentage filtered'
            fname = feature + '_' + info['perc'] + '.csv'
        elif model == 'l-rom-df':
            fd = info['fwidth'].replace('p', '.')
            solver = model.upper() + r' with filter width $\delta=$ ' + str(fd)
            fname = feature + '_' + info['fwidth'] + '.csv'
        else:
            solver = model.upper()
            fname = feature + '.csv'
        tpath = reader.find_files(fname, './')
        print(tpath)
        for f in tpath:
            if model in re.split('[/_]', f):
                fn = f


#       tpath = os.path.join(dir1, feature, fname)
        data = pd.read_csv(fn)
        ax.plot(data.iloc[:, 0], data.iloc[:, 1], '-o', label=solver)
    ax.legend(loc=0)
    anc_lb = []
    for key, value in info['parameters'].items():
        if key == 'theta':
            anc_lb.append('\\' + str(key) + '^*_g=' + str(value))
        else:
            anc_lb.append(str(key) + '^*=' + str(value))
    anc_lb = ', '.join(anc_lb)
    if feature == 'mrelerr_h1':
        title = 'Relative error in the predicted mean flow at ' + '$' + anc_lb + '$'
        ax.set(
            xlabel=r'$N$',
            ylabel=
            r'$\frac{\|\langle \bf{u} - \bf{\tilde{u}} \rangle\|_{H^1}}{\|\langle \bf{u} \rangle\|_{H^1}}$',
            ylim=[1e-3, 1],
            title=title)
        ax.set_yscale('log')
    elif feature == 'mtke':
        title = 'Predicted mean TKE at ' + '$' + anc_lb + '$'
        ax.set(xlabel=r'$N$', ylabel=r'$\langle TKE \rangle_g$', title=title)
        ax.set_yscale('log')
        mtke_fom = np.loadtxt('../qoi/tmtke')
        fom_params = {'c': 'k', 'marker': 'o', 'label': 'FOM'}
        ax.plot(data.iloc[:, 0], mtke_fom * np.ones(len(data.iloc[:, 0])),
                **fom_params)
    elif feature == 'dual_norm':
        title = 'Dual norm at ' + '$' + anc_lb + '$'
        ax.set(xlabel=r'$N$',
               ylabel=r'$\triangle$',
               title=title,
               ylim=[1e-4, 1])
        ax.set_yscale('log')
    elif feature == 'mtfluc':
        title = 'Predicted mean fluctuation in temperature at ' + '$' + anc_lb + '$'
        ax.set(xlabel=r'$N$',
               ylabel=r'$\langle T_{fluc} \rangle_s$',
               title=title)
        mtfluc_fom = np.loadtxt('../qoi/tmtfluc')
        fom_params = {'c': 'k', 'marker': 'o', 'label': 'FOM'}
        ax.plot(data.iloc[:, 0], mtfluc_fom * np.ones(len(data.iloc[:, 0])),
                **fom_params)
    elif feature == 'mnu':
        filename = './fom/nus_mom.csv'
        fom = pd.read_csv(filename).to_numpy()
        title = 'Predicted mean Nu at ' + '$' + anc_lb + '$'
        ax.set(xlabel=r'$N$', ylabel=r'$\langle Nu \rangle_s$', title=title)
        fom_params = {'c': 'k', 'marker': 'o', 'label': 'FOM'}
        ax.plot(data.iloc[:, 0], fom[0][0] * np.ones(len(data.iloc[:, 0])),
                **fom_params)
    elif feature == 'stdnu':
        filename = './fom/nus_mom.csv'
        fom = pd.read_csv(filename).to_numpy()
        title = 'Predicted std(Nu) at ' + '$' + anc_lb + '$'
        ax.set(xlabel=r'$N$', ylabel=r'Std(Nu)', title=title)
        fom_params = {'c': 'k', 'marker': 'o', 'label': 'FOM'}
        ax.plot(data.iloc[:, 0], fom[0][1] * np.ones(len(data.iloc[:, 0])),
                **fom_params)
    elif feature == 'mnu_err':
        title = 'Relative error in mean Nu at ' + '$' + anc_lb + '$'
        ax.set(xlabel=r'$N$', title=title)
        ax.set_yscale('log')
    ax.legend(loc=0)
    tdir = './compare/'
    create_dir(tdir)
    fig.savefig(tdir + feature + '_conv_compare.png')
    return
Esempio n. 9
0
def get_ncand_param(P_test, P_train, ncand, N, model, mode, fd, scaled=''):
    import numpy as np
    import pandas as pd
    import reader
    import re

    P_test_anchor = []
    # Find out the nearest anchor(s) point to each testing parameter
    for i, test in enumerate(P_test):
        erris = []
        near_anch = []
        # distance between two parameters is used
        tmp = abs(test - P_train)
        print(test, tmp, P_train, len(set(tmp)))
        if (len(set(tmp)) < len(P_train)):
            ncand = 3
        elif (len(set(tmp)) == len(P_train)):
            ncand = 2
        for j in range(ncand):
            # find out the index corresponding to the closest anchor point
            if 0 in tmp:
                minidx = np.argmin(tmp)
                near_anch.append(P_train[minidx])
            else:
                minidx = np.argmin(tmp)
                near_anch.append(P_train[minidx])
                tmp[minidx] = 1e8

            # do not revisit the same anchor again
#           aflag = test in P_train
#           print(aflag, test, P_train)
#           if aflag:
#               pass
#           else:
#               tmp[minidx] = 1e8

            if model[minidx] == 'l-rom':
                fname = 'erri_N' + str(N[minidx]) + '_' + fd[minidx].strip(
                    '0') + scaled + '_' + mode + '.dat'
            elif model[minidx] == 'l-rom-df':
                fname = 'dual_norm_N' + str(
                    N[minidx]) + '_0' + fd[minidx] + scaled + '.csv'
            else:
                fname = 'erri_N' + str(
                    N[minidx]) + scaled + '_' + mode + '.dat'
                fname = 'dual_norm_N' + str(N[minidx]) + scaled + '.csv'


#           dual_norm = np.loadtxt(dirs+fname)
#           data = np.array(dual_norm).astype(np.float64)
            filenames = reader.find_files(fname, '../')
            filenames = reader.find_files(fname, '../../')
            #           print(['Ra'+str(int(P_train[minidx])), model[minidx], str(N[minidx])])
            for f in filenames:
                if all(x in f for x in [
                        'Ra' + str(int(P_train[minidx])), model[minidx],
                        str(N[minidx])
                ]):
                    #               if 'Ra'+str(int(P_train[minidx])) in re.split('[/_]', f) and model[minidx] in re.split('[/_]', f):
                    #               if 'Re'+str(int(P_train[minidx])) in re.split('[/_]', f):
                    filename = f
            data = pd.read_csv(filename)
            param, erri = [data[k].tolist() for k in data.columns]
            idx = param.index(test)
            erris.append(erri[idx])
        idx = erris.index(min(erris))
        print(i, test, near_anch, idx)
        P_test_anchor.append(near_anch[idx])
    return P_test_anchor