def compute_moments(interp_dict, jpdf, which='all'): """Compute expected value and variance.""" if which == 'all': idx = np.array(interp_dict['idx_act'] + interp_dict['idx_adm']) hs = interp_dict['hs_act'] + interp_dict['hs_adm'] hs2 = interp_dict['hs2_act'] + interp_dict['hs2_adm'] else: idx = np.array(interp_dict['idx_act']) hs = interp_dict['hs_act'] hs2 = interp_dict['hs2_act'] max_idx_per_dim = np.max(idx, axis=0) M = len(hs) # approx. terms N = len(max_idx_per_dim) # dimensions # weights per dimension weights_per_dim = {} for n in range(N): # get knots per dimension based on maximum index kk, ww = seq_lj_1d(order=max_idx_per_dim[n], dist=jpdf[n]) weights_per_dim[n] = ww # multi-dimensional weights weights_md = [[weights_per_dim[n][idx[m, n]] for m in range(M)] for n in range(N)] weights_md = np.prod(weights_md, axis=0) # moments expected = np.dot(weights_md, hs) variance = np.dot(weights_md, hs2) - expected * expected return expected, variance
def interpolate_multiple(indices, coeffs, jpdf, non_grid_knots): """Leja interpolation on adaptively constructed sparse grid.""" # get shape of non_grid_points array --> K knots, N parameters non_grid_knots = np.array(non_grid_knots) try: # case: 2d array, K x N K, N = np.shape(non_grid_knots) except: # case: 1d array, 1 x N K = 1 N = len(non_grid_knots) non_grid_knots = non_grid_knots.reshape(K, N) # get shape of indices array --> M approx. terms, NN parameters indices = np.array(indices) try: # case: 2d array, M x NN M, NN = np.shape(indices) except: # case: 1d array, 1 x NN M = 1 NN = len(indices) indices = indices.reshape(M, NN) # check if dimensions agree if N != NN: return "Error! Knot and multi-index dimensions do not agree!" # get maximum index per dimension (P_1, P_2,..., P_N) max_idx_per_dim = np.max(indices, axis=0) # get knots, polynomials and polynomial evaluations per dimension knots_per_dim = {} # should hold N 1D arrays, [1 x (P_n+1)] polys_per_dim = {} # should hold N 1D lists, [1 x (P_n+1)] evals_per_dim = {} # should hold N 2D arrays, [K x (P_n+1)] for n in range(N): # get knots per dimension based on maximum index kk, ww = seq_lj_1d(order=max_idx_per_dim[n], dist=jpdf[n]) knots_per_dim[n] = kk # get polynomials per dimension based on knots P = len(kk) # no. of knots = no. of polynomials = P_n+1 polys_per_dim[n] = [Hierarchical1d(kk[:p + 1]) for p in range(P)] # univariate polynomial evaluations evals_per_dim[n] = np.ones([K, P]) for p in range(1, P): # column 0 --> pol. order 0 --> = 1.0 evals_per_dim[n][:, p] = polys_per_dim[n][p].evaluate( non_grid_knots[:, n]) # loop over M approximation terms (i.e., indices) evals_multidim = np.zeros(K) for m in range(M): # start with 1st dimension ievals_m = evals_per_dim[0][:, indices[m, 0]] # multiply with the rest of the dimensions for n in range(1, N): ievals_m = np.multiply(ievals_m, evals_per_dim[n][:, indices[m, n]]) # add m-th term evals_multidim = evals_multidim + ievals_m * coeffs[m] return evals_multidim
# export dictionary data idx = mero_dict['idx_act'] + mero_dict['idx_adm'] hs = mero_dict['hs_act'] + mero_dict['hs_adm'] fevals = mero_dict['fevals_act'] + mero_dict['fevals_adm'] # find knots/weights max_idx_per_dim = np.max(idx, axis=0) M = len(hs) # approx. terms N = len(max_idx_per_dim) # dimensions # weights per dimension weights_per_dim = {} knots_per_dim = {} for n in range(N): # get knots per dimension based on maximum index kk, ww = seq_lj_1d(order=max_idx_per_dim[n], dist=jpdf[n]) weights_per_dim[n] = ww knots_per_dim[n] = kk # multi-dimensional knots knots_md = [[knots_per_dim[n][idx[m][n]] for m in range(M)] for n in range(N)] # multi-dimensional weights weights_md = [[weights_per_dim[n][idx[m][n]] for m in range(M)] for n in range(N)] weights_md = np.prod(weights_md, axis=0) np.savetxt('mero_leja_ED/leja_ed_in_' + str(mfc) + '.txt', np.array(knots_md).T) np.savetxt('mero_leja_ED/leja_ed_out_' + str(mfc) + '.txt', fevals) np.savetxt('mero_leja_ED/leja_quad_weights_' + str(mfc) + '.txt', weights_md)
def dali_pce(func, N, jpdf_cp, jpdf_ot, tol=1e-12, max_fcalls=1000, verbose=True, interp_dict={}): if not interp_dict: # if dictionary is empty --> cold-start idx_act = [] # M_activated x N idx_adm = [] # M_admissible x N fevals_act = [] # M_activated x 1 fevals_adm = [] # M_admissible x 1 coeff_act = [] # M_activated x 1 coeff_adm = [] # M_admissible x 1 # start with 0 multi-index knot0 = [] for n in range(N): # get knots per dimension based on maximum index kk, ww = seq_lj_1d(order=0, dist=jpdf_cp[n]) knot0.append(kk[0]) feval = func(knot0) # update activated sets idx_act.append([0] * N) coeff_act.append(feval) fevals_act.append(feval) # local error indicators local_error_indicators = np.abs(coeff_act) # get the OT distribution type of each random variable dist_types = [] for i in range(N): dist_type = jpdf_ot.getMarginal(i).getName() dist_types.append(dist_type) # create orthogonal univariate bases poly_collection = ot.PolynomialFamilyCollection(N) for i in range(N): if dist_types[i] == 'Uniform': poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily( ot.LegendreFactory()) elif dist_types[i] == 'Normal': poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily( ot.HermiteFactory()) elif dist_types[i] == 'Beta': poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily( ot.JacobiFactory()) elif dist_types[i] == 'Gamma': poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily( ot.LaguerreFactory()) else: pdf = jpdf_ot.getDistributionCollection()[i] algo = ot.AdaptiveStieltjesAlgorithm(pdf) poly_collection[i] = ot.StandardDistributionPolynomialFactory( algo) # create multivariate basis mv_basis = ot.OrthogonalProductPolynomialFactory( poly_collection, ot.EnumerateFunction(N)) # get enumerate function (multi-index handling) enum_func = mv_basis.getEnumerateFunction() else: idx_act = interp_dict['idx_act'] idx_adm = interp_dict['idx_adm'] coeff_act = interp_dict['coeff_act'] coeff_adm = interp_dict['coeff_adm'] fevals_act = interp_dict['fevals_act'] fevals_adm = interp_dict['fevals_adm'] mv_basis = interp_dict['mv_basis'] enum_func = interp_dict['enum_func'] # local error indicators local_error_indicators = np.abs(coeff_adm) # compute global error indicator global_error_indicator = local_error_indicators.sum() # max or sum # fcalls / M approx. terms up to now fcalls = len(idx_act) + len(idx_adm) # fcalls = M --> approx. terms # maximum index per dimension max_idx_per_dim = np.max(idx_act + idx_adm, axis=0) # univariate knots and polynomials per dimension knots_per_dim = {} for n in range(N): kk, ww = seq_lj_1d(order=max_idx_per_dim[n], dist=jpdf_cp[n]) knots_per_dim[n] = kk # start iterations while global_error_indicator > tol and fcalls < max_fcalls: if verbose: print(fcalls) print(global_error_indicator) # the index added last to the activated set is the one to be refined last_act_idx = idx_act[-1][:] # compute the knot corresponding to the lastly added index last_knot = [ knots_per_dim[n][i] for n, i in zip(range(N), last_act_idx) ] # get admissible neighbors of the lastly added index adm_neighbors = admissible_neighbors(last_act_idx, idx_act) for an in adm_neighbors: # update admissible index set idx_adm.append(an) # find which parameter/direction n (n=1,2,...,N) gets refined n_ref = np.argmin( [idx1 == idx2 for idx1, idx2 in zip(an, last_act_idx)]) # sequence of 1d Leja nodes/weights for the given refinement knots_n, weights_n = seq_lj_1d(an[n_ref], jpdf_cp[int(n_ref)]) # update max_idx_per_dim, knots_per_dim, if necessary if an[n_ref] > max_idx_per_dim[n_ref]: max_idx_per_dim[n_ref] = an[n_ref] knots_per_dim[n_ref] = knots_n # find new_knot and compute function on new_knot new_knot = last_knot[:] new_knot[n_ref] = knots_n[-1] feval = func(new_knot) fevals_adm.append(feval) fcalls += 1 # update function calls # create PCE basis idx_system = idx_act + idx_adm idx_system_single = transform_multi_index_set(idx_system, enum_func) system_basis = mv_basis.getSubBasis(idx_system_single) # get corresponding evaluations fevals_system = fevals_act + fevals_adm # multi-dimensional knots M = len(idx_system) # equations terms knots_md = [[knots_per_dim[n][idx_system[m][n]] for m in range(M)] for n in range(N)] knots_md = np.array(knots_md).T # design matrix D = get_design_matrix(system_basis, knots_md) # solve system of equaations Q, R = scl.qr(D, mode='economic') c = Q.T.dot(fevals_system) coeff_system = scl.solve_triangular(R, c) # find the multi-index with the largest contribution, add it to idx_act # and delete it from idx_adm coeff_act = coeff_system[:len(idx_act)].tolist() coeff_adm = coeff_system[-len(idx_adm):].tolist() help_idx = np.argmax(np.abs(coeff_adm)) idx_add = idx_adm.pop(help_idx) pce_coeff_add = coeff_adm.pop(help_idx) fevals_add = fevals_adm.pop(help_idx) idx_act.append(idx_add) coeff_act.append(pce_coeff_add) fevals_act.append(fevals_add) # re-compute coefficients of admissible multi-indices # local error indicators local_error_indicators = np.abs(coeff_adm) # compute global error indicator global_error_indicator = local_error_indicators.sum() # max or sum # store expansion data in dictionary interp_dict = {} interp_dict['idx_act'] = idx_act interp_dict['idx_adm'] = idx_adm interp_dict['coeff_act'] = coeff_act interp_dict['coeff_adm'] = coeff_adm interp_dict['fevals_act'] = fevals_act interp_dict['fevals_adm'] = fevals_adm interp_dict['enum_func'] = enum_func interp_dict['mv_basis'] = mv_basis return interp_dict
def dali(func, N, jpdf, tol=1e-12, max_fcalls=1000, verbose=True, interp_dict={}): """Dimension-Adaptive Leja Interpolation (DALI) algorithm. FUNC: function to be approximated. N: number of parameters. JPDF: joint probability density function. TOL, MAX_FCALLS: exit criteria, self-explanatory. 'ACT': activated, i.e. already part of the approximation. 'ADM': admissible, i.e. candidates for the approximation's expansion.""" if not interp_dict: # if dictionary is empty --> cold-start idx_act = [] # M_activated x N idx_adm = [] # M_admissible x N hs_act = [] # M_activated x 1 hs_adm = [] # M_admissible x 1 hs2_act = [] # M_activated x 1 hs2_adm = [] # M_admissible x 1 fevals_act = [] # M_activated x 1 fevals_adm = [] # M_admissible x 1 # start with 0 multi-index knot0 = [] for n in range(N): # get knots per dimension based on maximum index kk, ww = seq_lj_1d(order=0, dist=jpdf[n]) knot0.append(kk[0]) feval = func(knot0) # update activated sets idx_act.append([0]*N) hs_act.append(feval) hs2_act.append(feval*feval) fevals_act.append(feval) # local error indicators local_error_indicators = np.abs(hs_act) else: # get data from dictionary idx_act = interp_dict['idx_act'] idx_adm = interp_dict['idx_adm'] hs_act = interp_dict['hs_act'] hs_adm = interp_dict['hs_adm'] hs2_act = interp_dict['hs2_act'] hs2_adm = interp_dict['hs2_adm'] fevals_act = interp_dict['fevals_act'] fevals_adm = interp_dict['fevals_adm'] # local error indicators local_error_indicators = np.abs(hs_adm) # compute global error indicator global_error_indicator = local_error_indicators.sum() # max or sum # fcalls / M approx. terms up to now fcalls = len(idx_act) + len(idx_adm) # fcalls = M --> approx. terms # maximum index per dimension max_idx_per_dim = np.max(idx_act + idx_adm, axis=0) # univariate knots and polynomials per dimension knots_per_dim = {} polys_per_dim = {} for n in range(N): kk, ww = seq_lj_1d(order=max_idx_per_dim[n], dist=jpdf[n]) knots_per_dim[n] = kk P = len(kk) # no. of knots = no. of polynomials = P_n+1 polys_per_dim[n] = [Hierarchical1d(kk[:p+1]) for p in range(P)] # start iterations while global_error_indicator > tol and fcalls < max_fcalls: if verbose: print(fcalls) print(global_error_indicator) # the index added last to the activated set is the one to be refined last_act_idx = idx_act[-1][:] # compute the knot corresponding to the lastly added index last_knot = [knots_per_dim[n][i] for n, i in zip(range(N), last_act_idx)] # get admissible neighbors of the lastly added index adm_neighbors = admissible_neighbors(last_act_idx, idx_act) for an in adm_neighbors: # update admissible index set idx_adm.append(an) # find which parameter/direction n (n=1,2,...,N) gets refined n_ref = np.argmin([idx1 == idx2 for idx1, idx2 in zip(an, last_act_idx)]) # sequence of 1d Leja nodes/weights for the given refinement knots_n, weights_n = seq_lj_1d(an[n_ref], jpdf[int(n_ref)]) # update max_idx_per_dim, knots_per_dim, if necessary if an[n_ref] > max_idx_per_dim[n_ref]: max_idx_per_dim[n_ref] = an[n_ref] knots_per_dim[n_ref] = knots_n polys_per_dim[n_ref].append(Hierarchical1d(knots_n)) # find new_knot and compute hierarchical surpluses new_knot = last_knot[:] new_knot[n_ref] = knots_n[-1] feval = func(new_knot) feval2 = feval*feval fevals_adm.append(feval) ieval = interpolate_single(idx_act, hs_act, polys_per_dim, new_knot) ieval2 = interpolate_single(idx_act, hs2_act, polys_per_dim, new_knot) HS = feval - ieval HS2 = feval2 - ieval2 hs_adm.append(HS) hs2_adm.append(HS2) fcalls += 1 # update function calls # update error indicators local_error_indicators = np.abs(hs_adm) global_error_indicator = local_error_indicators.sum() # max or sum? # find index from idx_adm with maximum local error indicator help_idx = np.argmax(local_error_indicators) # remove index, hs and hs2 from admissible sets idx_add = idx_adm.pop(help_idx) hs_add = hs_adm.pop(help_idx) hs2_add = hs2_adm.pop(help_idx) feval_add = fevals_adm.pop(help_idx) # update activated sets idx_act.append(idx_add) hs_act.append(hs_add) hs2_act.append(hs2_add) fevals_act.append(feval_add) #store data to dictionary interp_dict = {} interp_dict['idx_act'] = idx_act interp_dict['idx_adm'] = idx_adm interp_dict['hs_act'] = hs_act interp_dict['hs2_act'] = hs2_act interp_dict['hs_adm'] = hs_adm interp_dict['hs2_adm'] = hs2_adm interp_dict['fevals_act'] = fevals_act interp_dict['fevals_adm'] = fevals_adm return interp_dict