Exemplo n.º 1
0
def legendre_wafp_enrichment(x, lambdas, M_enrich, sampler=None):
    """
    Adds M_enrich points to the existing point set x by (approximate)
    determinant maximization.
    """

    from legendre_induced import induced_distribution_mixture_sampling

    if lambdas.ndim == 1:
        lambdas = np.reshape(lambdas, [lambdas.size, 1])

    if sampler is None:
        sampler = lambda MM: induced_distribution_mixture_sampling(lambdas, MM)

    M0 = x.shape[0]
    N, d = lambdas.shape
    ab = jacobi_recurrence(lambdas.max() + 1, alpha=0., beta=0., probability=True)

    M = max(M0, 2*N)

    while x.shape[0] < M0 + M_enrich:
        V = opolynd_eval(x, lambdas, ab)
        W = (V.T/np.sqrt(np.sum(V**2,axis=1))).T * np.sqrt(float(N)/float(x.shape[0]))
        G = np.dot(W.T, W)
        iG = np.linalg.inv(G)

        xs = sampler(M)
        Vs = opolynd_eval(xs, lambdas, ab)
        Ws = (Vs.T/np.sqrt(np.sum(Vs**2,axis=1))).T
        dets = np.sum((Ws*np.dot(Ws, iG))**2, axis=1)
        ind = np.argmax(dets)

        x = np.vstack([x, xs[ind,:]])

    return x
Exemplo n.º 2
0
def legendre_wafp(lambdas, M=1e3, sampler=None):
    """
    Generate M (= lambdas.shape[0]) weighted approximate Fekete points
    using randomized sampling.
    """

    from scipy.linalg import qr
    from legendre_induced import induced_distribution_mixture_sampling

    if lambdas.ndim == 1:
        lambdas = np.reshape(lambdas, [lambdas.size, 1])

    N, d = lambdas.shape

    ab = jacobi_recurrence(lambdas.max() + 1, alpha=0., beta=0., probability=True)

    if sampler is None:
        sampler = lambda MM: induced_distribution_mixture_sampling(lambdas, MM)

    # Choose at least 2*N samples
    M = max(M, 2*N)

    x = sampler(M)
    V = opolynd_eval(x, lambdas, ab)
    _, _, p = qr(V.T/np.sqrt(np.sum(V**2,axis=1)), pivoting=True, mode='economic')

    return x[p[:N], :]
Exemplo n.º 3
0
def legendre_wafp(lambdas, M=1e3, sampler=None):
    """
    Generate M (= lambdas.shape[0]) weighted approximate Fekete points
    using randomized sampling.
    """

    from scipy.linalg import qr
    from legendre_induced import induced_distribution_mixture_sampling

    if lambdas.ndim == 1:
        lambdas = np.reshape(lambdas, [lambdas.size, 1])

    N, d = lambdas.shape

    ab = jacobi_recurrence(lambdas.max() + 1,
                           alpha=0.,
                           beta=0.,
                           probability=True)

    if sampler is None:
        sampler = lambda MM: induced_distribution_mixture_sampling(lambdas, MM)

    # Choose at least 2*N samples
    M = max(M, 2 * N)

    x = sampler(M)
    V = opolynd_eval(x, lambdas, ab)
    _, _, p = qr(V.T / np.sqrt(np.sum(V**2, axis=1)),
                 pivoting=True,
                 mode='economic')

    return x[p[:N], :]
Exemplo n.º 4
0
def legendre_wafp_enrichment(x, lambdas, M_enrich, sampler=None):
    """
    Adds M_enrich points to the existing point set x by (approximate)
    determinant maximization.
    """

    from legendre_induced import induced_distribution_mixture_sampling

    if lambdas.ndim == 1:
        lambdas = np.reshape(lambdas, [lambdas.size, 1])

    if sampler is None:
        sampler = lambda MM: induced_distribution_mixture_sampling(lambdas, MM)

    M0 = x.shape[0]
    N, d = lambdas.shape
    ab = jacobi_recurrence(lambdas.max() + 1,
                           alpha=0.,
                           beta=0.,
                           probability=True)

    M = max(M0, 2 * N)

    while x.shape[0] < M0 + M_enrich:
        V = opolynd_eval(x, lambdas, ab)
        W = (V.T / np.sqrt(np.sum(V**2, axis=1))).T * np.sqrt(
            float(N) / float(x.shape[0]))
        G = np.dot(W.T, W)
        iG = np.linalg.inv(G)

        xs = sampler(M)
        Vs = opolynd_eval(xs, lambdas, ab)
        Ws = (Vs.T / np.sqrt(np.sum(Vs**2, axis=1))).T
        dets = np.sum((Ws * np.dot(Ws, iG))**2, axis=1)
        ind = np.argmax(dets)

        x = np.vstack([x, xs[ind, :]])

    return x
Exemplo n.º 5
0
######## Step 2: run "expensive" model

u = np.zeros([Nx, M])

print("Evaluating model on mesh...")
for ind, zval in enumerate(z):
    u[:, ind] = expensive_model(zval, x)

print(u)

# Each column of u is a model run for a fixed parameter value

######## Step 3: compute PCE coefficients
print("Assembling PCE coefficients...")
ab = jacobi_recurrence(lambdas.max() + 1, alpha=0., beta=0., probability=True)
V = opolynd_eval(z, lambdas, ab)
weights = np.sqrt(float(N) / float(M)) / np.sqrt(np.sum(V**2, axis=1))

# The PCE coefficients are computed as a weighted discrete least-squares
# estimator with the weights above.
coeffs = np.linalg.lstsq((V.T * weights).T, (u * weights).T)[0].T

# Each row of coeffs contains PCE coefficients for a single x gridpoint.
# Each column of coeffs contains a particular PCE coefficient for all
# values of x.

######## Step 4: whatever postprocessing you want
print("Processing PCE coefficients...")
# Compute total sensitivities
total_sensitivities = sensitivity.pce_total_sensitivity(
    coeffs.T, lambdas, list(range(d)))
Exemplo n.º 6
0
    rho = 0.    # Freud (Hermite) parameter
    x = idist_mixture_sampling_tensorial(M, d, k, "freud", alpha=alpha, rho=rho)
    plt.figure()
    plt.plot(np.sort(x), np.arange(x.size,dtype=float)/x.size)
    plt.title('CDF for Hermite, k={:d}'.format(k))

    x = idist_mixture_sampling_tensorial(M, 5, k, "freud", alpha=alpha, rho=rho)

    # Testing that sampling like this is well-conditioned
    M = 2000
    k = 30
    d = 2
    disttype = "freud"
    alpha = 2.
    rho = 0.
    ab = recurrence.hermite_recurrence(k+1,rho=rho)

    # Sampler
    x = idist_mixture_sampling_tensorial(M, d, k, disttype, alpha=alpha, rho=rho)

    # Generate tensor-product multi-indices
    l = np.arange(k+1)
    lx,ly = np.meshgrid(l,l)
    L = np.concatenate((lx.reshape(lx.size,1), ly.reshape(ly.size,1)), axis=1)
    # Vandermonde-like matrix
    V = opolynd.opolynd_eval(x, L, ab)
    # Christoffel preconditioner
    V = (V.T* np.sqrt( (k+1.)**2. / np.sum(V**2, axis=1))).T

    plt.show()
Exemplo n.º 7
0
######## Step 2: run "expensive" model

u = np.zeros([Nx, M])

print("Evaluating model on mesh...")
for ind,zval in enumerate(z):
    u[:,ind] = expensive_model(zval, x)

print(u)

# Each column of u is a model run for a fixed parameter value

######## Step 3: compute PCE coefficients
print("Assembling PCE coefficients...")
ab = jacobi_recurrence(lambdas.max()+1, alpha=0., beta=0., probability=True)
V = opolynd_eval(z, lambdas, ab)
weights = np.sqrt(float(N)/float(M)) / np.sqrt(np.sum(V**2,axis=1))

# The PCE coefficients are computed as a weighted discrete least-squares
# estimator with the weights above.
coeffs = np.linalg.lstsq( (V.T*weights).T, (u*weights).T)[0].T

# Each row of coeffs contains PCE coefficients for a single x gridpoint.
# Each column of coeffs contains a particular PCE coefficient for all
# values of x.

######## Step 4: whatever postprocessing you want
print("Processing PCE coefficients...")
# Compute total sensitivities
total_sensitivities = sensitivity.pce_total_sensitivity(coeffs.T, lambdas, list(range(d)))
Exemplo n.º 8
0
    return x

if __name__ == "__main__":

    from matplotlib import pyplot as plt
    from indexing import total_degree_indices, hyperbolic_cross_indices
    from recurrence import jacobi_recurrence
    from opolynd import opolynd_eval

    d, k = 9, 7

    #lambdas = total_degree_indices(d, k)
    lambdas = hyperbolic_cross_indices(d, k)
    N = lambdas.shape[0]

    x = legendre_wafp(lambdas, M=3e3)

    ab = jacobi_recurrence(lambdas.max() + 1, alpha=0., beta=0., probability=True)

    V = opolynd_eval(x, lambdas, ab)
    W = (V.T/np.sqrt(np.sum(V**2,axis=1))).T * np.sqrt(float(N)/float(x.shape[0]))

    M_enrich = 20
    x2 = legendre_wafp_enrichment(x, lambdas, M_enrich, sampler=None)
    V2 = opolynd_eval(x2, lambdas, ab)
    W2 = (V2.T/np.sqrt(np.sum(V2**2,axis=1))).T * np.sqrt(float(N)/float(x2.shape[0]))

    # W: unenriched
    # W2: enriched
Exemplo n.º 9
0
    from matplotlib import pyplot as plt
    from indexing import total_degree_indices, hyperbolic_cross_indices
    from recurrence import jacobi_recurrence
    from opolynd import opolynd_eval

    d, k = 9, 7

    #lambdas = total_degree_indices(d, k)
    lambdas = hyperbolic_cross_indices(d, k)
    N = lambdas.shape[0]

    x = legendre_wafp(lambdas, M=3e3)

    ab = jacobi_recurrence(lambdas.max() + 1,
                           alpha=0.,
                           beta=0.,
                           probability=True)

    V = opolynd_eval(x, lambdas, ab)
    W = (V.T / np.sqrt(np.sum(V**2, axis=1))).T * np.sqrt(
        float(N) / float(x.shape[0]))

    M_enrich = 20
    x2 = legendre_wafp_enrichment(x, lambdas, M_enrich, sampler=None)
    V2 = opolynd_eval(x2, lambdas, ab)
    W2 = (V2.T / np.sqrt(np.sum(V2**2, axis=1))).T * np.sqrt(
        float(N) / float(x2.shape[0]))

    # W: unenriched
    # W2: enriched
Exemplo n.º 10
0
def getImplicitQuad(D):
    # plt.scatter(samples[:,0], samples[:,1])

    initSamples = samples[:D + 1, :]
    otherSamples = np.ndarray.tolist(samples[D + 1:, :])

    vmat = opolynd.opolynd_eval(initSamples,
                                H.lambdas[:len(initSamples) + 3, :], H.ab, H).T

    # weights = np.asarray([(1/(D+1))*np.ones(len(initSamples))]).T
    # weights = weights / np.sum(weights)
    rv = multivariate_normal([0, 0], [[1, 0], [0, 1]])
    weights = np.asarray([rv.pdf(initSamples)]).T

    nodes = np.copy(initSamples)
    for K in range(D, Kmax):  # up to Kmax - 1
        # print(K)
        # Add Node
        nodes = np.vstack((nodes, otherSamples.pop()))
        # one = ((K+1)/(K+2))*weights
        # two = np.asarray([[1/(K+2)]])
        # weights = np.concatenate((one, two))
        weights = np.asarray([rv.pdf(nodes)]).T
        weights = weights / np.sum(weights)

        # Update weights
        vmat = opolynd.opolynd_eval(nodes, H.lambdas[:len(nodes) - 1, :], H.ab,
                                    H).T

        nullspace = sp.linalg.null_space(vmat)

        c = np.asarray([nullspace[:, 0]]).T

        a = weights / c
        aPos = np.ma.masked_where(c < 0, a)  # only values where c > 0
        alpha1 = np.min(aPos.compressed())

        aNeg = np.ma.masked_where(c > 0, a)  # only values where c < 0
        alpha2 = np.max(aNeg.compressed())

        # Choose alpha1 or alpha2
        alpha = alpha2

        # Remove Node
        vals = weights <= alpha * c
        # print(np.min(weights - alpha1*c))
        assert np.isclose(np.min(weights - alpha1 * c), 0)
        # print(np.sum(vals))
        idx = np.argmax(vals)
        if (np.sum(vals)) != 1:
            idx = np.argmin(weights - alpha * c)
            # print("No w_k is less than  alpha_k*c_k", np.min(weights - alpha*c))
        # print(alpha1, alpha2)
        assert alpha2 < alpha1
        nodes = np.delete(nodes, idx, axis=0)

        weights = weights - alpha * c
        assert weights[idx] < 10**(-15)
        weights = np.delete(weights, idx, axis=0)
        # print(np.sum(weights))

    return weights, nodes
Exemplo n.º 11
0
    # Compute integral of scalar function
    Q_sparse_grid = np.sum(w*fx)
    Q_exact = test_functions.genz_oscillatory_integral(c, r)
    Q_relerror = np.abs(Q_sparse_grid - Q_exact)/np.abs(Q_exact)

    print(("Sparse grid integration error for Genz oscillatory test function is {:1.4e} using a level {:d} grid in {:d} dimensions".format(Q_relerror[0], level, dim)))

    # Compute a polynomial chaos expansion (PCE) assuming a uniform
    # distribution

    order = level

    # Unifrom distribution
    ab = opoly1d.jacobi_recurrence(order+1, alpha=0., beta = 0., probability=True)
    lambdas = indexing.total_degree_indices(dim, order)
    V = opolynd.opolynd_eval(x, lambdas, ab)

    # Test integration accuracy of sparse grid:
    G = np.dot(np.dot(np.transpose(V), np.diag(w)), V)
    print(("Sparse grid integration error for orthogonal polynomials up to degree {:d} is {:1.4e} using a level {:d} grid in {:d} dimensions".format(order, np.linalg.norm(G - np.eye(G.shape[0])), level, dim)))

    # Compute PCE coefficients
    coeffs = np.dot(np.transpose(V), w*fx)

    # Now things like sensitivity can be computed:

    # Total sensitivity for each dimension:
    pce_TS = sensitivity.pce_total_sensitivity(coeffs, lambdas, list(range(dim)))
    # If you compare pce_TS with c, the qualitative trend in these
    # vectors is similar