Exemple #1
0
def build_ft_regress(xdata,
                     ydata,
                     nparam=2,
                     init_rank=5,
                     adaptrank=1,
                     verbose=0):
    dim = xdata.shape[1]
    ndata = xdata.shape[0]

    ft = c3py.FunctionTrain(dim)
    for ii in range(dim):
        ft.set_dim_opts(ii, "legendre", LB, UB, nparam)

    ranks = [init_rank] * (dim + 1)
    ranks[0] = 1
    ranks[dim] = 1
    ft.set_ranks(ranks)
    ft.build_data_model(ndata,
                        xdata,
                        ydata,
                        alg="AIO",
                        obj="LS",
                        adaptrank=adaptrank,
                        kickrank=1,
                        roundtol=1e-10,
                        verbose=verbose)
    return ft
def build_ft_adapt(dim):
    ft = c3py.FunctionTrain(dim)
    for ii in range(dim):
        ft.set_dim_opts(ii, "legendre", LB, UB, NPARAM)
    verbose = 0
    init_rank = 2
    adapt = 0
    ft.build_approximation(func1, None, init_rank, verbose, adapt)
    return ft
Exemple #3
0
def gen_results(alpha):
    lb = -1  # lower bounds of features
    ub = 1  # upper bounds of features
    nparam = 3  # number of parameters per univariate function

    ## Run a rank-adaptive regression routine to approximate the first function
    ft = c3py.FunctionTrain(dim)
    for ii in range(dim):
        ft.set_dim_opts(ii, "legendre", lb, ub, nparam)

    verbose = 0
    init_rank = 2
    adapt = 1
    ft.build_approximation(func2, alpha, init_rank, verbose, adapt)

    print("Computing Sobol Indices")
    SI = c3py.SobolIndices(ft, order=2)
    print("done")
    var = SI.get_variance()
    names = []
    mains = np.zeros((dim, ))
    totals = np.zeros((dim, ))
    for ii in range(dim):
        mains[ii] = SI.get_main_sensitivity(ii)
        totals[ii] = SI.get_total_sensitivity(ii)
        names.append(str(ii))

    # print("totals = ", totals)
    # print("Sum totals = ", np.sum(totals))

    # print("Mains = ", mains)
    # print("Sum mains = ", np.sum(mains))

    inter = []
    for ii in range(dim):
        for jj in range(ii + 1, dim):
            val = SI.get_interaction([ii, jj])
            names.append(str(ii) + str(jj))
            inter.append(val)

    inter = np.array(inter)

    # print("sum = ", np.sum(mains) + np.sum(inter))
    left_over = var - np.sum(mains) - np.sum(inter)
    # print("leftover = ", left_over)
    names.append("other")

    all_sizes = list(mains / var) + list(inter / var) + [left_over / var]
    return names, all_sizes
def build_ft_regress(dim):
    ft = c3py.FunctionTrain(dim)
    for ii in range(dim):
        ft.set_dim_opts(ii, "legendre", LB, UB, NPARAM)

    ranks = [5]*(DIM+1)
    ranks[0] = 1
    ranks[DIM] = 1
    ft.set_ranks(ranks)
    verbose = 0
    adaptrank = 1
    ft.build_data_model(NDATA, X, Y,
                        alg="AIO", obj="LS", adaptrank=adaptrank,
                        kickrank=1, roundtol=1e-10, verbose=verbose)
    return ft
Exemple #5
0
def func2_grad(x):
    return np.cos(np.sum(x, axis=1))


dim = 2  # number of features
ndata = 100  # number of data points
x = np.random.rand(ndata, dim) * 2.0 - 1.0  # training samples
y1 = func1(x)  # function values
y2 = func2(x)  # ditto

lb = -1  # lower bounds of features
ub = 1  # upper bounds of features
nparam = 2  # number of parameters per univariate function

## Run a rank-adaptive regression routine to approximate the first function
ft = c3py.FunctionTrain(dim)
for ii in range(dim):
    ft.set_dim_opts(ii, "legendre", lb, ub, nparam)
ft.build_data_model(ndata,
                    x,
                    y1,
                    alg="AIO",
                    obj="LS",
                    adaptrank=1,
                    kickrank=1,
                    roundtol=1e-10,
                    verbose=0,
                    store_opt_info=False)

## Run a fixed-rank regression routine to approximate the second function with stochastic gradient descent
ft_sgd = c3py.FunctionTrain(dim)
dim = 3  # number of features
def func2(x,param=None):
    if (param is not None):
        print("param = ", param)
    out = np.sin(2*np.pi*np.sum(x,axis=1))
    return out

if __name__ == "__main__":
    
    lb = -1      # lower bounds of features
    ub = 1       # upper bounds of features
    nparam = 3   # number of parameters per univariate function

    ## Run a rank-adaptive regression routine to approximate the first function
    ft = c3py.FunctionTrain(dim)
    for ii in range(dim):
        ft.set_dim_opts(ii,"legendre",lb,ub,nparam)

    verbose=0
    init_rank=2
    adapt=1
    ft.build_approximation(func2,None,init_rank,verbose,adapt)

    # collect functions
    ranks = ft.get_ranks()
    funcs = [[]]*dim
    for ii in range(dim):
        funcs[ii] = [None]*ranks[ii]
        f, axis = plt.subplots(ranks[ii],ranks[ii+1])
        for row in range(ranks[ii]):
    #     print(x[:, 2])
    # print("\n\n\n\n\n")
    out = np.sin(np.sum(x, axis=1))
    return out


if __name__ == "__main__":

    lb = [-1] * DIM  # lower bounds of features
    ub = [1] * DIM  # upper bounds of features

    # number of parameters per univariate function
    nparam = [3] * DIM

    ## Run a rank-adaptive regression routine to approximate the first function
    ft = c3py.FunctionTrain(DIM)
    for ii in range(DIM):
        ft.set_dim_opts(ii, "legendre", lb[ii], ub[ii], nparam[ii])

    verbose = 1
    init_rank = 2
    adapt = 1
    maxrank = 10
    kickrank = 2
    roundtol = 1e-8
    maxiter = 5
    scales = [2] * DIM
    scales[2] = 0.2
    ft.build_approximation(func2,
                           scales,
                           init_rank,