예제 #1
0
def test_exact():
    np.random.seed(1)
    M = 100
    m = 10
    n = 2
    p = 5

    # Samples
    X = np.random.randn(M, m)

    # Synthetic function
    a = np.random.randn(m)
    b = np.random.randn(m)
    fX = np.dot(a, X.T)**2 + np.dot(b, X.T)**3

    # Random point
    U, _ = np.linalg.qr(np.random.randn(m, n))
    # Actual ridge subspace
    #U, _ = np.linalg.qr(np.vstack([a,b]).T)

    pra = PolynomialRidgeApproximation(degree=p,
                                       subspace_dimension=n,
                                       scale=True)
    pra.fit(X, fX, U0=U, verbose=1)
    # Because the data is an exact ridge function, we should (I think) converge to the global solution
    for fX1, fX2 in zip(pra(X), fX):
        print "%10.5e  %10.5e" % (fX1, fX2)
    assert np.all(np.isclose(pra(X), fX))
예제 #2
0
def test_same_solution(degree=5, dim=2):
    np.random.seed(0)
    X, fX, U = exact_data(M=1000, m=10, n=dim, p=degree)

    bases = [
        'arnoldi', 'legendre', 'monomial', 'chebyshev', 'laguerre', 'hermite'
    ]

    #
    #U0 = U + 0.01*np.random.randn(*U.shape)
    U0 = None
    for norm, bound in product([1, 2, np.inf],
                               [None]):  # , 'lower', 'upper']):
        print("=" * 50)
        print("Norm", norm, "bound", bound)
        Us = []
        for basis in bases:
            print('basis', basis)
            pra = PolynomialRidgeApproximation(degree=5,
                                               subspace_dimension=dim,
                                               basis=basis,
                                               bound=bound,
                                               verbose=True)
            pra.fit(X, fX, U0=U0)
            Us.append(np.copy(pra.U))

        angles = np.zeros((len(Us), len(Us)))
        for (i, j) in zip(*np.triu_indices(len(Us))):
            angles[i, j] = np.max(scipy.linalg.subspace_angles(Us[i], Us[j]))

        print(angles)
        assert np.max(
            angles
        ) < 1e-10, "Did not find same solution using a different basis"
예제 #3
0
def test_fit_inf():
    X, fX = exact_data()

    pra = PolynomialRidgeApproximation(degree=3,
                                       subspace_dimension=1,
                                       norm=np.inf)
    pra.fit(X, fX)
    assert np.all(np.isclose(pra(X), fX))
예제 #4
0
def test_fit_two():
    X, fX, Uopt = exact_data()

    pra = PolynomialRidgeApproximation(degree=3,
                                       subspace_dimension=1,
                                       norm=2,
                                       verbose=True)
    pra.fit(X, fX)
    assert np.all(np.isclose(pra(X), fX))
예제 #5
0
def test_profile(degree=3, subspace_dimension=1):
    X, fX, Uopt = exact_data()
    pra = PolynomialRidgeApproximation(degree=3,
                                       subspace_dimension=1,
                                       norm=2,
                                       verbose=True)
    pra.fit(X, fX)

    Y = pra.U.T.dot(X.T).T
    assert np.all(np.isclose(pra.profile(Y), pra(X)))
예제 #6
0
def test_affine():
    X = np.random.randn(100, 5)
    a = np.random.randn(5, )
    y = X.dot(a)

    pra = PolynomialRidgeApproximation(degree=1,
                                       subspace_dimension=1,
                                       bound='upper')
    pra.fit(X, y)

    assert np.all(np.isclose(y, pra(X)))
예제 #7
0
def test_affine():
    X = np.random.randn(100, 5)
    a = np.random.randn(5, )
    y = X.dot(a)

    pra = PolynomialRidgeApproximation(degree=1,
                                       subspace_dimension=1,
                                       bound='upper')
    pra.fit(X, y)
    assert np.all(np.isclose(y, pra(X)))

    ang = scipy.linalg.subspace_angles(pra.U, a.reshape(-1, 1))
    assert np.isclose(ang, 0)

    pra = PolynomialRidgeApproximation(degree=1, subspace_dimension=1)
    pra.fit(X, y)
    assert np.all(np.isclose(y, pra(X)))

    ang = scipy.linalg.subspace_angles(pra.U, a.reshape(-1, 1))
    assert np.isclose(ang, 0)
예제 #8
0
def test_minimax_gradient():
    np.random.seed(1)
    M = 50
    m = 5
    n = 2
    p = 5

    # Samples
    X = np.random.uniform(-1, 1, size=(M, m))

    # Synthetic function
    a = np.random.randn(m)
    b = np.random.randn(m)
    fX = np.dot(a, X.T)**2 + np.dot(b, X.T)**3

    # Random point
    U, _ = np.linalg.qr(np.random.randn(m, n))

    pra = PolynomialRidgeApproximation(degree=p,
                                       subspace_dimension=n,
                                       scale=False,
                                       maxiter=0)

    # Initialize subspace
    pra.fit(X, fX)
    #pra.set_scale(X, U)
    #pra._fit_fixed_U_inf_norm(X, fX, U)
    #c = pra.coef
    c = np.random.randn(len(pra.Basis(p, dim=n)))

    U_c = np.hstack([U.flatten(), c])

    res = lambda U_c: pra._residual(X, fX, U_c)
    jac = lambda U_c: pra._jacobian(X, fX, U_c)

    print(res(U_c))
    print(jac(U_c))

    err = check_jacobian(U_c, res, jac)
    assert err < 1e-6
예제 #9
0
def test_exact():
    np.random.seed(1)
    #	M = 100
    #	m = 10
    #	n = 2
    #	p = 5
    #
    #	# Samples
    #	X = np.random.randn(M,m)
    #
    #	# Synthetic function
    #	a = np.random.randn(m)
    #	b = np.random.randn(m)
    #	fX = np.dot(a,X.T)**2 + np.dot(b, X.T)**3
    M = 1000
    m, n = 10, 2
    p = 3
    X, fX, Uopt = exact_data(M=M, m=m, n=n, p=p)

    # Random point
    U, _ = np.linalg.qr(np.random.randn(m, n))
    # Actual ridge subspace
    #U, _ = np.linalg.qr(np.vstack([a,b]).T)
    #U += Uopt

    for basis in ['legendre', 'arnoldi']:

        pra = PolynomialRidgeApproximation(degree=p,
                                           subspace_dimension=n,
                                           scale=True,
                                           basis=basis,
                                           verbose=True)
        pra.fit(X, fX, U0=U)
        # Because the data is an exact ridge function, we should (I think) converge to the global solution
        for fX1, fX2 in zip(pra(X), fX):
            print("%+10.5e  %+10.5e | %10.5e" % (fX1, fX2, np.abs(fX1 - fX2)))
        assert np.all(np.isclose(pra(X), fX))
예제 #10
0
            try:
                X = np.vstack([X0] + [job.args[0] for job in jobs])
            except ValueError:
                X = X0

            # Normalize everything
            Xdone_norm = total_domain.normalize(Xdone)
            X_norm = total_domain.normalize(X)

            # Now build ridge approximations
            Us = []
            for qoi in qois:
                pra = PolynomialRidgeApproximation(subspace_dimension=1,
                                                   degree=3)
                I = np.isfinite(Ydone[:, qoi])
                pra.fit(Xdone_norm[I, :], Ydone[I, qoi])
                Us.append(pra.U)

            # Now do the sampling
            Xnew_norm = stretch_sample(total_domain_norm,
                                       Us,
                                       X0=X_norm,
                                       M=1,
                                       verbose=True,
                                       enrich=False)
            print "Added %d new points" % Xnew_norm.shape[0]

            Xnew = total_domain.unnormalize(Xnew_norm).reshape(1, -1)

        for x in Xnew:
            i += 1
예제 #11
0
파일: fig_err.py 프로젝트: zgrey/PSDR
ks = np.arange(150, 800, 2)
qois = [4, 21, 25]
#qois = [21,25]
for qoi in qois:
    Iall = np.isfinite(Yall[:, qoi])
    #norm = np.linalg.norm(Yall[Iall,qoi])
    norm = (np.nanmax(Yall[Iall, qoi]) - np.nanmin(Yall[Iall, qoi])) * np.sqrt(
        np.sum(Iall))
    err_rand_vec = []
    err_doe_vec = []
    for k in ks:
        I = np.isfinite(Yrand[:, qoi]) & (np.arange(Yrand.shape[0]) < k)
        pra = PolynomialRidgeApproximation(degree=3,
                                           subspace_dimension=1,
                                           n_init=1)
        pra.fit(Xrand_norm[I], Yrand[I, qoi])
        #err_rand = np.mean(np.abs(pra.predict(Xall_norm[Iall]) - Yall[Iall,qoi]))/norm
        err_rand = np.linalg.norm(
            pra.predict(Xall_norm[Iall]) - Yall[Iall, qoi]) / norm
        err_rand_vec.append(err_rand)
        I = np.isfinite(Ydoe[:, qoi]) & (np.arange(Ydoe.shape[0]) < k)
        pra.fit(Xdoe_norm[I], Ydoe[I, qoi])
        err_doe = np.linalg.norm(
            pra.predict(Xall_norm[Iall]) - Yall[Iall, qoi]) / norm
        #err_doe = np.mean(np.abs(pra.predict(Xall_norm[Iall]) - Yall[Iall,qoi]))/norm
        err_doe_vec.append(err_doe)
        print "%4d: err rand %5.2e; doe %5.2e" % (k, err_rand, err_doe)

    pgf = PGF()
    pgf.add('k', ks)
    pgf.add('doe', err_doe_vec)