Ejemplo n.º 1
0
def pce_22(nboot, seed, ntrain_samples):
    fpath_save = file_settings()[0]
    if os.path.exists(f'{fpath_save}sa_pce_22.csv'):
        return
    samples, values = read_specify('model',
                                   'full',
                                   product_uniform=False,
                                   num_vars=22)
    # import parameter inputs and generate the dataframe of analytical ratios between sensitivity indices
    variable, param_all = read_specify('parameter',
                                       'full',
                                       product_uniform=False,
                                       num_vars=22)
    # Used for PCE fitting
    np.random.seed(seed)
    error_cv, _, total_effects, _ = fun(variable,
                                        samples[:, :ntrain_samples],
                                        values[:ntrain_samples],
                                        product_uniform=False,
                                        nboot=nboot)
    sa_df = sa_df_format(total_effects, variable, param_all, conf_level=0.95)
    sa_df.to_csv(f'{fpath_save}sa_pce_22.csv', index=True)
    error_cv_df = pd.DataFrame(data=error_cv,
                               columns=[f'22_{ntrain_samples}_uniform'],
                               index=np.arange(len(error_cv)))
    error_cv_mean = error_cv_df.apply(np.mean, axis=0)
    error_cv_lower = np.round(error_cv_df.quantile(0.025, axis=0), 4)
    error_cv_upper = np.round(error_cv_df.quantile(0.975, axis=0), 4)
    error_stats_df = pd.DataFrame(
        data=[error_cv_mean, error_cv_lower, error_cv_upper],
        index=['mean', 'lower', 'upper']).T
    error_stats_df.to_csv(f'{fpath_save}error_cv_compare.csv', index=True)
Ejemplo n.º 2
0
def pya_boot_sensitivity(outpath, nboot, seed, product_uniform, filename):

    variable, _ = read_specify('parameter',
                               'reduced',
                               product_uniform,
                               num_vars=11)

    len_params = variable.num_vars()
    samples, values = read_specify('model',
                                   'reduced',
                                   product_uniform,
                                   num_vars=11)
    # Adaptively increase the size of training dataset and conduct the bootstrap based partial ranking
    n_strat, n_end, n_step = [104, 552, 13]
    errors_cv_all = {}
    partial_results = {}
    total_effects_all = {}
    approx_list_all = {}
    for i in range(n_strat, n_end + 1, n_step):
        # for i in n_list:
        if (n_end - i) < n_step:
            i = n_end
        np.random.seed(seed)
        errors_cv, _, total_effects, approx_list = fun(variable,
                                                       samples[:, :i],
                                                       values[:i],
                                                       product_uniform,
                                                       nboot=nboot)

        # partial ranking
        total_effects = np.array(total_effects)
        sa_shape = list(total_effects.shape)[0:2]
        total_effects = total_effects.reshape((sa_shape))
        rankings = partial_rank(total_effects, len_params, conf_level=0.95)
        partial_results[f'nsample_{i}'] = rankings
        errors_cv_all[f'nsample_{i}'] = errors_cv
        total_effects_all[f'nsample_{i}'] = total_effects
        approx_list_all[f'nsample_{i}'] = approx_list
    # End for

    np.savez(f'{outpath}{filename}',
             errors_cv=errors_cv_all,
             sensitivity_indices=partial_results,
             total_effects=total_effects_all)
    import pickle
    pickle.dump(approx_list_all,
                open(f'{outpath}{filename[:-4]}-approx-list.pkl', "wb"))
Ejemplo n.º 3
0
def pmf_check(product_uniform = False):    
    variable, _ = read_specify('parameter', 'reduced', product_uniform, num_vars=11)
    samples, values = read_specify('model', 'reduced', product_uniform, num_vars=11)

    approx_list_all = {}
    mean_list = {}
    variance_list = {}
    n_strat, n_end, n_step = [156, 252, 13]
    for i in range(n_strat, n_end+1, n_step):
    # for i in n_list:
        print(i)
        if (n_end - i)  < n_step:
            i = n_end
        np.random.seed(seed)                                                    
        approx_list_all[f'nsample_{i}'] = fun(variable, samples[:, :i], values[:i], product_uniform, nboot=1)
    for key, pce in approx_list_all.items():
        mean_list[key], variance_list[key] = pce.mean(), pce.variance()
    pmf_stat = pd.concat([pd.DataFrame.from_dict(mean_list).T, \
        pd.DataFrame.from_dict(variance_list).T], axis=1)
        
    return pmf_stat
Ejemplo n.º 4
0
filename = f'{input_path}problem_adjust.txt'
problem_adjust = read_param_file(filename, delimiter=',')

num_pce = 500
seed = 222
ntrain_samples = 156
np.random.seed(seed)
rand = np.random.randint(0, ntrain_samples, size=(num_pce, ntrain_samples))
for n in range(600, 601, 100):
    print(n)
    df = samples_df(sample_method='samples_product', Nsamples=n)
    for ii in range(num_pce):
        poly = fun(variable,
                   samples,
                   values,
                   degree=2,
                   nboot=1,
                   I=rand[ii],
                   ntrain_samples=ntrain_samples)
        y_range_change = np.round(poly(df.T), 4).reshape(list(df.shape)[0])
        sa, main_resample, total_resample = sobol.analyze(
            problem_adjust,
            y_range_change,
            calc_second_order=False,
            num_resamples=500,
            conf_level=0.95)
        try:
            rankings = np.append(total_indices, total_resample, axis=1)
        except NameError:
            total_indices = total_resample[:]
    rankings = partial_rank(total_indices.T, problem_adjust['num_vars'])
###=============CALCULATE THE ERROR METRCIS FROM FACTOR FIXING============###
x_sample = np.loadtxt(f'{output_path}metric_samples.txt')

pool_res = {}
rand = np.random.randint(0, x_sample.shape[1], size=(500, x_sample.shape[1]))
cv_temp = np.zeros(num_pce)
rand_pce = np.random.randint(0, ntrain_samples, size=(num_pce, ntrain_samples))
ci_bounds = [0.025, 0.975]
# add the calculation of y_uncond
y_temp = np.zeros(shape=(num_pce, x_sample.shape[1]))
pce_list = []
for i in range(rand_pce.shape[0]):
    poly = fun(variable,
               samples[:, rand_pce[i]],
               values[rand_pce[i]],
               degree=2,
               nboot=1,
               ntrain_samples=ntrain_samples)

    pce_list.append(poly)
    y_temp[i, :] = poly(x_sample).flatten()
    cv_temp[i] = np.sqrt(poly.variance())[0] / poly.mean()[0]
# add the calculation of y_uncond

y_uncond = y_temp
conf_uncond = uncond_cal(y_uncond, ci_bounds, rand)
conf_uncond['cv'] = cv_temp.mean()
conf_uncond['cv_low'], conf_uncond['cv_up'] = \
            np.quantile(cv_temp, ci_bounds)
# End for