예제 #1
0
def test_fit_one():
    X, fX, Uopt = exact_data()

    pra = PolynomialRidgeApproximation(degree=3,
                                       subspace_dimension=1,
                                       norm=1,
                                       verbose=True)
    pra.fit(X, fX)
    assert np.all(np.isclose(pra(X), fX))
예제 #2
0
def test_profile(degree=3, subspace_dimension=1):
    X, fX, Uopt = exact_data()
    pra = PolynomialRidgeApproximation(degree=3,
                                       subspace_dimension=1,
                                       norm=2,
                                       verbose=True)
    pra.fit(X, fX)

    Y = pra.U.T.dot(X.T).T
    assert np.all(np.isclose(pra.profile(Y), pra(X)))
예제 #3
0
def test_affine():
    X = np.random.randn(100, 5)
    a = np.random.randn(5, )
    y = X.dot(a)

    pra = PolynomialRidgeApproximation(degree=1,
                                       subspace_dimension=1,
                                       bound='upper')
    pra.fit(X, y)

    assert np.all(np.isclose(y, pra(X)))
예제 #4
0
def test_exact():
    np.random.seed(1)
    #	M = 100
    #	m = 10
    #	n = 2
    #	p = 5
    #
    #	# Samples
    #	X = np.random.randn(M,m)
    #
    #	# Synthetic function
    #	a = np.random.randn(m)
    #	b = np.random.randn(m)
    #	fX = np.dot(a,X.T)**2 + np.dot(b, X.T)**3
    M = 1000
    m, n = 10, 2
    p = 3
    X, fX, Uopt = exact_data(M=M, m=m, n=n, p=p)

    # Random point
    U, _ = np.linalg.qr(np.random.randn(m, n))
    # Actual ridge subspace
    #U, _ = np.linalg.qr(np.vstack([a,b]).T)
    #U += Uopt

    for basis in ['legendre', 'arnoldi']:

        pra = PolynomialRidgeApproximation(degree=p,
                                           subspace_dimension=n,
                                           scale=True,
                                           basis=basis,
                                           verbose=True)
        pra.fit(X, fX, U0=U)
        # Because the data is an exact ridge function, we should (I think) converge to the global solution
        for fX1, fX2 in zip(pra(X), fX):
            print("%+10.5e  %+10.5e | %10.5e" % (fX1, fX2, np.abs(fX1 - fX2)))
        assert np.all(np.isclose(pra(X), fX))
예제 #5
0
def test_minimax_gradient():
    np.random.seed(1)
    M = 50
    m = 5
    n = 2
    p = 5

    # Samples
    X = np.random.uniform(-1, 1, size=(M, m))

    # Synthetic function
    a = np.random.randn(m)
    b = np.random.randn(m)
    fX = np.dot(a, X.T)**2 + np.dot(b, X.T)**3

    # Random point
    U, _ = np.linalg.qr(np.random.randn(m, n))

    pra = PolynomialRidgeApproximation(degree=p,
                                       subspace_dimension=n,
                                       scale=False)
    #pra.set_scale(X, U)
    #pra._fit_fixed_U_inf_norm(X, fX, U)
    #c = pra.coef
    c = np.random.randn(len(pra.basis))

    U_c = np.hstack([U.flatten(), c])

    res = lambda U_c: pra._residual(X, fX, U_c)
    jac = lambda U_c: pra._jacobian(X, fX, U_c)

    print res(U_c)
    print jac(U_c)

    err = check_jacobian(U_c, res, jac)
    assert err < 1e-6
예제 #6
0
def test_affine():
    X = np.random.randn(100, 5)
    a = np.random.randn(5, )
    y = X.dot(a)

    pra = PolynomialRidgeApproximation(degree=1,
                                       subspace_dimension=1,
                                       bound='upper')
    pra.fit(X, y)
    assert np.all(np.isclose(y, pra(X)))

    ang = scipy.linalg.subspace_angles(pra.U, a.reshape(-1, 1))
    assert np.isclose(ang, 0)

    pra = PolynomialRidgeApproximation(degree=1, subspace_dimension=1)
    pra.fit(X, y)
    assert np.all(np.isclose(y, pra(X)))

    ang = scipy.linalg.subspace_angles(pra.U, a.reshape(-1, 1))
    assert np.isclose(ang, 0)
예제 #7
0
            np.savetxt('%s_inc.output' % prefix, Ydone)

            # Determine which points are already running
            try:
                X = np.vstack([X0] + [job.args[0] for job in jobs])
            except ValueError:
                X = X0

            # Normalize everything
            Xdone_norm = total_domain.normalize(Xdone)
            X_norm = total_domain.normalize(X)

            # Now build ridge approximations
            Us = []
            for qoi in qois:
                pra = PolynomialRidgeApproximation(subspace_dimension=1,
                                                   degree=3)
                I = np.isfinite(Ydone[:, qoi])
                pra.fit(Xdone_norm[I, :], Ydone[I, qoi])
                Us.append(pra.U)

            # Now do the sampling
            Xnew_norm = stretch_sample(total_domain_norm,
                                       Us,
                                       X0=X_norm,
                                       M=1,
                                       verbose=True,
                                       enrich=False)
            print "Added %d new points" % Xnew_norm.shape[0]

            Xnew = total_domain.unnormalize(Xnew_norm).reshape(1, -1)
예제 #8
0
def test_varpro_jacobian():
    np.random.seed(1)
    M = 6
    m = 2
    n = 1
    p = 2

    # Samples
    X = np.random.uniform(-1, 1, size=(M, m))

    # Synthetic function
    a = np.random.randn(m)
    b = np.random.randn(m)
    fX = np.dot(a, X.T)**2 + np.dot(b, X.T)**3

    # Random point
    U, _ = np.linalg.qr(np.random.randn(m, n))

    U_flat = U.flatten()

    for basis in ['legendre', 'arnoldi']:
        pra = PolynomialRidgeApproximation(degree=p,
                                           subspace_dimension=n,
                                           scale=False,
                                           basis=basis)
        # This sets the basis
        Y = (U.T @ X.T).T
        #	pra.basis = pra.Basis(pra.degree, Y)
        #pra._varpro_jacobian(X, fX, U)
        res = lambda U: pra._varpro_residual(X, fX, U)
        jac = lambda U: pra._varpro_jacobian(X, fX, U)

        err = check_jacobian(U_flat, res, jac, hvec=[1e-7])

        assert err < 1e-6

    # Check with scaling on
    pra = PolynomialRidgeApproximation(degree=p,
                                       subspace_dimension=n,
                                       scale=True)
    #pra.set_scale(X, U)
    res = lambda U: pra._varpro_residual(X, fX, U)
    jac = lambda U: pra._varpro_jacobian(X, fX, U)

    err = check_jacobian(U_flat, res, jac)
    assert err < 1e-6

    # Check with scaling on for Hermite basis
    pra = PolynomialRidgeApproximation(degree=p,
                                       subspace_dimension=n,
                                       scale=True,
                                       basis='hermite')
    #	pra.set_scale(X, U)
    res = lambda U: pra._varpro_residual(X, fX, U)
    jac = lambda U: pra._varpro_jacobian(X, fX, U)

    err = check_jacobian(U_flat, res, jac)
    assert err < 1e-6
예제 #9
0
파일: fig_err.py 프로젝트: zgrey/PSDR
Yall = np.vstack([Yrand, Ydoe[150:]])

ks = np.arange(150, 800, 2)
qois = [4, 21, 25]
#qois = [21,25]
for qoi in qois:
    Iall = np.isfinite(Yall[:, qoi])
    #norm = np.linalg.norm(Yall[Iall,qoi])
    norm = (np.nanmax(Yall[Iall, qoi]) - np.nanmin(Yall[Iall, qoi])) * np.sqrt(
        np.sum(Iall))
    err_rand_vec = []
    err_doe_vec = []
    for k in ks:
        I = np.isfinite(Yrand[:, qoi]) & (np.arange(Yrand.shape[0]) < k)
        pra = PolynomialRidgeApproximation(degree=3,
                                           subspace_dimension=1,
                                           n_init=1)
        pra.fit(Xrand_norm[I], Yrand[I, qoi])
        #err_rand = np.mean(np.abs(pra.predict(Xall_norm[Iall]) - Yall[Iall,qoi]))/norm
        err_rand = np.linalg.norm(
            pra.predict(Xall_norm[Iall]) - Yall[Iall, qoi]) / norm
        err_rand_vec.append(err_rand)
        I = np.isfinite(Ydoe[:, qoi]) & (np.arange(Ydoe.shape[0]) < k)
        pra.fit(Xdoe_norm[I], Ydoe[I, qoi])
        err_doe = np.linalg.norm(
            pra.predict(Xall_norm[Iall]) - Yall[Iall, qoi]) / norm
        #err_doe = np.mean(np.abs(pra.predict(Xall_norm[Iall]) - Yall[Iall,qoi]))/norm
        err_doe_vec.append(err_doe)
        print "%4d: err rand %5.2e; doe %5.2e" % (k, err_rand, err_doe)

    pgf = PGF()
예제 #10
0
def test_varpro_jacobian():
    np.random.seed(1)
    M = 100
    m = 10
    n = 2
    p = 5

    # Samples
    X = np.random.uniform(-1, 1, size=(M, m))

    # Synthetic function
    a = np.random.randn(m)
    b = np.random.randn(m)
    fX = np.dot(a, X.T)**2 + np.dot(b, X.T)**3

    # Random point
    U, _ = np.linalg.qr(np.random.randn(m, n))

    U_flat = U.flatten()

    pra = PolynomialRidgeApproximation(degree=p,
                                       subspace_dimension=n,
                                       scale=False)
    res = lambda U: pra._varpro_residual(X, fX, U)
    jac = lambda U: pra._varpro_jacobian(X, fX, U)

    err = check_jacobian(U_flat, res, jac)

    assert err < 1e-6

    # Check with scaling on
    pra = PolynomialRidgeApproximation(degree=p,
                                       subspace_dimension=n,
                                       scale=True)
    pra.set_scale(X, U)
    res = lambda U: pra._varpro_residual(X, fX, U)
    jac = lambda U: pra._varpro_jacobian(X, fX, U)

    err = check_jacobian(U_flat, res, jac)
    assert err < 1e-6

    # Check with scaling on for Hermite basis
    pra = PolynomialRidgeApproximation(degree=p,
                                       subspace_dimension=n,
                                       scale=True,
                                       basis='hermite')
    pra.set_scale(X, U)
    res = lambda U: pra._varpro_residual(X, fX, U)
    jac = lambda U: pra._varpro_jacobian(X, fX, U)

    err = check_jacobian(U_flat, res, jac)
    assert err < 1e-6