Пример #1
0
def plot_projection(X, L, center, ax, pgfname, stretch, **kwargs):
    U = orth(np.atleast_2d(L).T)
    P = np.outer(U, U.T)

    center = center - P.dot(center)

    # Line we project onto
    line = np.vstack([
        center - stretch * L,
        center + stretch * L,
    ])
    ax.plot(line[:, 0], line[:, 1], **kwargs)
    pgf = PGF()
    pgf.add('x', line[:, 0])
    pgf.add('y', line[:, 1])
    pgf.write(pgfname + '_line.dat')

    # projection lines
    lines = []  # lines from points to the projection axis
    dots = []  # dots on the projection axis
    for x in X:
        lines.append(x)
        lines.append(center + P.dot(x))
        dots.append(lines[-1])
        lines.append(np.nan * np.ones(2))
    lines = np.vstack(lines)
    ax.plot(lines[:, 0], lines[:, 1], ':', **kwargs)
    dots = np.vstack(dots)
    ax.plot(dots[:, 0], dots[:, 1], '.', **kwargs)

    pgf = PGF()
    pgf.add('x', lines[:, 0])
    pgf.add('y', lines[:, 1])
    pgf.write(pgfname + '_lines.dat')

    pgf = PGF()
    pgf.add('x', dots[:, 0])
    pgf.add('y', dots[:, 1])
    pgf.write(pgfname + '_dots.dat')
Пример #2
0
		time_samp[rep, i] = stop_time - start_time
		Hsamp = np.copy(lipschitz.H)
		mismatch_samp[rep, i] = metric(Hsamp)
		print("mismatch: ", mismatch_samp[rep,i], "time :", time_samp[rep,i])	
	
	# Now export the data to PGF
	pgf = PGF()
	pgf.add('M', Mvec)
	p0, p25, p50, p75, p100 = np.percentile(mismatch_samp[:rep+1], [0, 25, 50, 75, 100], axis =0,
		interpolation = 'nearest')
	pgf.add('p0', p0)
	pgf.add('p25', p25)
	pgf.add('p50', p50)
	pgf.add('p75', p75)
	pgf.add('p100', p100)
	pgf.write('data/fig_convergence_samp.dat')
	
	# Now the time part
	pgf = PGF()
	pgf.add('M', Mvec)
	p0, p25, p50, p75, p100 = np.percentile(time_samp[:rep+1], [0, 25, 50, 75, 100], axis =0)
	pgf.add('p0', p0)
	pgf.add('p25', p25)
	pgf.add('p50', p50)
	pgf.add('p75', p75)
	pgf.add('p100', p100)
	pgf.write('data/fig_convergence_time_samp.dat')
	
		
	#for i, N in tqdm(enumerate(Nvec), desc = 'gradient %3d' % rep, total = len(Nvec)):
	for i, N in enumerate(Nvec):
Пример #3
0
    p100 = np.zeros(len(Npoints))
    for k, N in enumerate(Npoints):

        def compute_dist(trial):
            X = alg(N, trial)
            return design_dispersion(X)

        print(f"starting N={N:3d}, alg {name}")
        dists = Parallel(n_jobs=20, verbose=100)(delayed(compute_dist)(i)
                                                 for i in range(Ntrials))
        #		dists = []
        #		for trial in range(Ntrials):
        #			X = alg(fun.domain, N, L, trial)
        #			dist = design_dispersion(X, fun.domain, L)
        #			dists.append(dist)
        #			print(f'M : {N:3d} | dispersion {dist:10.4e}')

        p0[k], p25[k], p50[k], p75[k], p100[k] = np.percentile(
            dists, [0, 25, 50, 75, 100])

        # Write incremental data
        pgf = PGF()
        pgf.add('N', Npoints)
        pgf.add('p0', p0)
        pgf.add('p25', p25)
        pgf.add('p50', p50)
        pgf.add('p75', p75)
        pgf.add('p100', p100)

        pgf.write(f'data/fig_design_rate_{name}.dat')
Пример #4
0
    np.random.seed(0)

    dom = psdr.BoxDomain(-np.ones(2), np.ones(2))

    fig, axes = plt.subplots(1, 2, figsize=(10, 5))

    # Number of samples
    M = 20

    # Latin Hypercube sampling
    X = psdr.latin_hypercube_maximin(dom, M, maxiter=1000)

    pgf = PGF()
    pgf.add('x', X[:, 0])
    pgf.add('y', X[:, 1])
    pgf.write('data/fig_latin_lhs_sample.dat')

    ax = axes[0]
    ax.plot(X[:, 0], X[:, 1], 'k.')
    ax.set_title('Latin Hypercube')

    L = np.array([[1, 0]])
    center = np.array([0, -1.3])
    plot_projection(X,
                    L,
                    center,
                    ax,
                    'data/fig_latin_lhs_hor',
                    stretch=1.2,
                    color='b')
Пример #5
0
    fXg = fun(Xg)
    #uncertain = (ub - lb)/(np.max(fXg) - np.min(fXg))
    rnge = np.max(fXg) - np.min(fXg)

    X_iso = psdr.minimax_lloyd(fun.domain, M)
    X_lip = psdr.minimax_lloyd(fun.domain, M, L=lip_mat.L)

    # Isotropic sampling/ scalar Lipschitz
    lb, ub = lip_con.uncertainty(X_iso, fun(X_iso), Xg)
    p = np.percentile(ub - lb, [0, 25, 50, 75, 100])
    print(p)
    pgf = PGF()
    for i, t in enumerate([0, 25, 50, 75, 100]):
        pgf.add('p%d' % t, [p[i]])
    pgf.add('range', [rnge])
    pgf.write('data/tab_sample_%s_scalar_isotropic_uncertainty.dat' % (name))

    # Isotropic sampling/ matrix Lipschitz
    lb, ub = lip_mat.uncertainty(X_iso, fun(X_iso), Xg)
    p = np.percentile(ub - lb, [0, 25, 50, 75, 100])
    print(p)
    pgf = PGF()
    for i, t in enumerate([0, 25, 50, 75, 100]):
        pgf.add('p%d' % t, [p[i]])
    pgf.add('range', [rnge])
    pgf.write('data/tab_sample_%s_matrix_isotropic_uncertainty.dat' % (name))

    # Isotropic sampling/ matrix Lipschitz
    lb, ub = lip_mat.uncertainty(X_lip, fun(X_lip), Xg)
    p = np.percentile(ub - lb, [0, 25, 50, 75, 100])
    print(p)
Пример #6
0
    'random \n→ maximin \n→ minimax',
    'coffeehouse \n→ minimax',
    'coffeehouse \n→ maximin \n→ minimax',
    'coffeehouse \n→ maximin \n→ scale \n→ minimax',
])

names = [
    'random_minimax',
    'random_maximin_minimax',
    'coffeehouse_minimax',
    'coffeehouse_maximin_minimax',
    'coffeehouse_maximin_scale_minimax',
]

for coll, name in zip(ax.collections, names):
    x, y = np.array(coll.get_offsets()).T
    print(x)
    print(y)
    pgf = PGF()
    pgf.add('x', x)
    pgf.add('y', y)
    pgf.write('data/fig_initialization_%s.dat' % name)

best = np.min(np.hstack(data))
print(best)
ax.axhline(best, color='k')

ax.set_xlabel('method')
ax.set_ylabel('minimax distance')
plt.show()
Пример #7
0
fX4 = fun(X4)
name4 = 'line'

for X, fX, name in zip([X1, X2, X3, X4], [fX1, fX2, fX3, fX4],
                       [name1, name2, name3, name4]):
    print(" === %10s === " % name)
    lb, ub = Lmat.uncertainty(X, fX, Xt)
    print("average uncertainty", np.mean(ub - lb))
    print("max uncertainty", np.max(ub - lb))

    p = np.percentile(ub - lb, [0, 25, 50, 75, 100])
    pgf = PGF()
    for i, t in enumerate([0, 25, 50, 75, 100]):
        pgf.add('p%d' % t, [p[i]])
    pgf.write('data/fig_sample_Lmat_uncertainty_%s.dat' % name)

    # Lipschitz constant
    lb, ub = Lcon.uncertainty(X, fX, Xt)
    print("average uncertainty", np.mean(ub - lb))
    print("max uncertainty", np.max(ub - lb))

    p = np.percentile(ub - lb, [0, 25, 50, 75, 100])
    pgf = PGF()
    for i, t in enumerate([0, 25, 50, 75, 100]):
        pgf.add('p%d' % t, [p[i]])
    pgf.write('data/fig_sample_Lcon_uncertainty_%s.dat' % name)

    if True:
        Lmat.shadow_uncertainty(
            fun.domain,
Пример #8
0
names.append('otl')

# Borehole
funs.append(psdr.demos.Borehole())
names.append('borehole')

# Wing Weight
funs.append(psdr.demos.WingWeight())
names.append('wing')

act = psdr.ActiveSubspace()
lip = psdr.LipschitzMatrix()

m = max([len(fun.domain) for fun in funs])
pgf = PGF()
pgf.add('i', np.arange(1, m + 1))
for fun, name in zip(funs, names):
    X = fun.domain.sample(1e3)
    grads = fun.grad(X)

    act.fit(grads)
    lip.fit(grads=grads)

    ew = np.nan * np.zeros(m)
    ew[0:len(fun.domain)] = scipy.linalg.eigvalsh(act.C)[::-1]
    pgf.add('%s_C' % name, ew)
    ew[0:len(fun.domain)] = scipy.linalg.eigvalsh(lip.H)[::-1]
    pgf.add('%s_H' % name, ew)

pgf.write('data/tab_eigs.dat')
Пример #9
0
for qoi in qois:
    Iall = np.isfinite(Yall[:, qoi])
    #norm = np.linalg.norm(Yall[Iall,qoi])
    norm = (np.nanmax(Yall[Iall, qoi]) - np.nanmin(Yall[Iall, qoi])) * np.sqrt(
        np.sum(Iall))
    err_rand_vec = []
    err_doe_vec = []
    for k in ks:
        I = np.isfinite(Yrand[:, qoi]) & (np.arange(Yrand.shape[0]) < k)
        pra = PolynomialRidgeApproximation(degree=3,
                                           subspace_dimension=1,
                                           n_init=1)
        pra.fit(Xrand_norm[I], Yrand[I, qoi])
        #err_rand = np.mean(np.abs(pra.predict(Xall_norm[Iall]) - Yall[Iall,qoi]))/norm
        err_rand = np.linalg.norm(
            pra.predict(Xall_norm[Iall]) - Yall[Iall, qoi]) / norm
        err_rand_vec.append(err_rand)
        I = np.isfinite(Ydoe[:, qoi]) & (np.arange(Ydoe.shape[0]) < k)
        pra.fit(Xdoe_norm[I], Ydoe[I, qoi])
        err_doe = np.linalg.norm(
            pra.predict(Xall_norm[Iall]) - Yall[Iall, qoi]) / norm
        #err_doe = np.mean(np.abs(pra.predict(Xall_norm[Iall]) - Yall[Iall,qoi]))/norm
        err_doe_vec.append(err_doe)
        print "%4d: err rand %5.2e; doe %5.2e" % (k, err_rand, err_doe)

    pgf = PGF()
    pgf.add('k', ks)
    pgf.add('doe', err_doe_vec)
    pgf.add('rand', err_rand_vec)
    pgf.write('fig_err_qoi%d.dat' % (qoi, ))
Пример #10
0
			try:
				res = prob.solve(warm_start = True)
				if prob.status == cp.OPTIMAL:
					Nsucceed += 1
					dist = np.linalg.norm(z.value - yi)
					if dist < eps:
						Ninside += 1

			except cp.SolverError:
				pass

		if randomize:
			mean = float(Ninside/Nsucceed)
			Ns[i] = int( mean*ngrid)
			Nstd[i] = np.sqrt(mean - mean**2)*float(ngrid) 
		else:
			Ns[i] = Ninside
			Nstd[i] = 0

		print('eps=%5.2g \t N=%12.7e' % (eps, Ns[i]))
		# Now save the data
		pgf = PGF()
		pgf.add('eps', epsilons[:i+1])
		pgf.add('N', Ns[:i+1])
		pgf.add('Nstd', Nstd[:i+1])
		pgf.write('data/fig_covering_%s.dat' % name)


	# Now compute rates
	
Пример #11
0
                for resp_name in responses:
                    resp = responses[resp_name]
                    # Now fit and test response surface
                    try:
                        resp.fit(X, fX)
                        # Record the sup norm error
                        data[resp_name][i, it] = np.max(
                            np.abs(resp(Xt).flatten() - ft.flatten())) / scale
                    except (KeyboardInterrupt, SystemExit):
                        raise
                    except:
                        pass

                    print("\t err %10s: %8.2e" %
                          (resp_name, data[resp_name][i, it]))

            # Now save the data
            for resp_name in data:
                fname = 'fig_sample_%s_%s_%s.dat' % (fun_name, samp_name,
                                                     resp_name)
                pgf = PGF()
                pgf.add('N', Nvec[:i + 1])
                p0, p25, p50, p75, p100 = np.nanpercentile(
                    data[resp_name][:i + 1, :], [0, 25, 50, 75, 100], axis=1)
                pgf.add('p0', p0)
                pgf.add('p25', p25)
                pgf.add('p50', p50)
                pgf.add('p75', p75)
                pgf.add('p100', p100)
                pgf.write('data/' + fname)
Пример #12
0
    plt.clf()

    # Samples to use when estimating dispersion
    #X0 = psdr.maximin_coffeehouse(fun.domain, 5000, L = L, N0 = 50)
    X0 = np.vstack(
        [psdr.random_sample(fun.domain, 5000),
         fun.domain.sample_grid(2)])

    plt.clf()

    # Now perform designs
    for alg, alg_name, M in zip(algs, alg_names, Ms):
        dispersion = []
        # We can get a very sloppy fit with more points
        for i in range(M):
            np.random.seed(i)
            X = alg(fun.domain, Nsamp, L)
            dist = psdr.fill_distance_estimate(fun.domain,
                                               X,
                                               L=L,
                                               X0=np.copy(X0))
            dispersion.append(dist)
            print(f'{alg_name:20s} : {i:4d} dispersion {dist:10.5e}')

        ax = sns.swarmplot(dispersion)
        x, y = np.array(ax.collections[-1].get_offsets()).T
        pgf = PGF()
        pgf.add('x', x)
        pgf.add('y', y)
        pgf.write(f'data/fig_design_{name}_{alg_name}.dat')
Пример #13
0
import numpy as np
from psdr.pgf import PGF

pgf = PGF()
pgf.read('data/fig_covering_mat.dat')
eps = np.array(pgf['eps'])
Ns = np.array(pgf['N'])

slope = np.zeros(eps.shape)
for i in range(len(slope)):
    I = slice(max(0, i - 1), min(len(slope), i + 1))
    xx = np.log10(eps[I])
    yy = np.log10(Ns[I])
    # Fit a line
    p = np.polyfit(xx, yy, 1)
    slope[i] = p[0]

# Median filter
slope_median = np.zeros(slope.shape)
for i in range(len(slope)):
    slope_median[i] = np.median(slope[max(0, i - 3):min(len(slope), i + 3)])

pgf = PGF()
pgf.add('eps', eps)
pgf.add('slope', slope)
pgf.add('median', slope_median)
pgf.write('data/fig_covering_mat_slope.dat')
Пример #14
0
from psdr.pgf import PGF

np.random.seed(0)

m = 2
domain = psdr.BoxDomain(-np.ones(m), np.ones(m))
L = np.array([[1, 0],[-1,4]])/4

print("without Lipschitz")
X = psdr.minimax_lloyd(domain, 9, maxiter = 500)
print(X)
d = psdr.geometry.fill_distance(domain, X)
print(d) 

pgf = PGF()
pgf.add('x', X[:,0])
pgf.add('y', X[:,1])
pgf.write('data/fig_cover_scalar.dat')


print("with Lipschitz")
X = psdr.minimax_lloyd(domain, 3, L = L)
print(X)
d = psdr.geometry.fill_distance(domain, X, L = L)
print(d) 

pgf = PGF()
pgf.add('x', X[:,0])
pgf.add('y', X[:,1])
pgf.write('data/fig_cover_matrix.dat')
Пример #15
0
delta2 = 10.**((np.log10(delta[1:]) + np.log10(delta[0:-1])) / 2.)
slope2 = (np.log10(Ndelta[1:]) - np.log10(Ndelta[0:-1])) / (
    np.log10(delta[1:]) - np.log10(delta[0:-1]))

delta2 = delta2[np.isfinite(slope2)]
slope2 = slope2[np.isfinite(slope2)]

# Median filter
slope_median = np.zeros(slope2.shape)
for i in range(len(slope2)):
    slope_median[i] = np.median(slope2[max(0, i - 3):min(len(slope2), i + 3)])

pgf = PGF()
pgf.add('eps', delta2)
pgf.add('slope', slope2)
pgf.add('median', slope_median)
pgf.write('data/fig_covering_mat_rate.dat')

if False:
    Iplus = slice(5, len(delta))
    Iminus = slice(0, len(delta) - 5)
    delta5 = 10.**((np.log10(delta[Iplus]) + np.log10(delta[Iminus])) / 2.)
    slope5 = (np.log10(Ndelta[Iplus]) - np.log10(Ndelta[Iminus])) / (
        np.log10(delta[Iplus]) - np.log10(delta[Iminus]))

    pgf = PGF()
    pgf.add('delta', delta5)
    pgf.add('slope', slope5)
    pgf.write('data/fig_volume_slope5.dat')
Пример #16
0
u = np.ones((m, 1)) / np.sqrt(m)

proj_domain = psdr.BoxDomain([-np.sqrt(m)], [np.sqrt(m)])

import matplotlib.pyplot as plt

# Get data for drawing plot
XX = np.linspace(-np.sqrt(m), np.sqrt(m), 1000).reshape(-1, 1) @ u.T
fXX = fun(XX)
fig, ax = plt.subplots(1, 2)
ax[0].plot((u.T @ XX.T).flatten(), fXX)

pgf = PGF()
pgf.add('x', (u.T @ XX.T).flatten())
pgf.add('y', fXX)
pgf.write('data/fig_sine_fun.dat')

# plot minimax design
X = psdr.minimax_design_1d(fun.domain, M, L=L[0, :].reshape(1, -1))
minimax_dist = psdr.fill_distance(proj_domain, (u.T @ X.T).T)
ax[0].plot((u.T @ X.T).flatten(), fun(X), 'r.')

pgf = PGF()
pgf.add('x', (u.T @ X.T).flatten())
pgf.add('y', fun(X))
pgf.write('data/fig_sine_minimax.dat')

# Plot a random design
np.random.seed(0)

X = psdr.random_sample(fun.domain, M)
Пример #17
0
ax = axes[1]
ax.set_title('Lipschitz Bounds')

ax.plot(X.flatten(), fX, 'k.')
ax.plot(xx, f(xx), 'b-')
ax.plot(xx, yy_lip, 'r-')
ax.fill_between(xx.flatten(), lb, ub, color='g', alpha=0.5)

axes[0].set_ylim(axes[1].get_ylim())

fig.tight_layout()
plt.show()

# Save PGF data

pgf = PGF()
pgf.add('x', X.flatten())
pgf.add('fx', fX)
pgf.write('data/fig_gp_data.dat')

pgf = PGF()
pgf.add('x', xx.flatten())
pgf.add('fx', f(xx))
pgf.add('gpr', yy_gpr)
pgf.add('gpr_lb', yy_gpr - yy_std)
pgf.add('gpr_ub', yy_gpr + yy_std)
pgf.add('lip_lb', lb)
pgf.add('lip_ub', ub)
pgf.write('data/fig_gp.dat')
Пример #18
0
    Ls = [np.array([[2, 1]]), np.array([[1, 2]])]

    for ax, slack in zip(axes, [1, 0.5]):
        X = []
        for i in range(M):
            x = psdr.seq_maximin_sample(dom,
                                        X,
                                        Ls=Ls,
                                        slack=slack,
                                        Nsamp=int(1e3))
            X.append(x)
        X = np.vstack(X)
        pgf = PGF()
        pgf.add('x', X[:, 0])
        pgf.add('y', X[:, 1])
        pgf.write('data/fig_lock_s%g_sample.dat' % slack)

        ax.plot(X[:, 0], X[:, 1], 'k.')
        ax.set_title('slack=%g' % slack)
        centers = [np.array([1.1, -1.1]), np.array([2, 0])]
        colors = ['b', 'r']
        for i, (L, center, color) in enumerate(zip(Ls, centers, colors)):
            plot_projection(X,
                            L,
                            center,
                            ax,
                            'data/fig_lock_s%g_L%d' % (slack, i),
                            stretch=1.2,
                            color=color)

    for ax in axes:
Пример #19
0
lam3 = np.zeros(epsilon.shape)
lam4 = np.zeros(epsilon.shape)
lam5 = np.zeros(epsilon.shape)
lam6 = np.zeros(epsilon.shape)

for k, eps in enumerate(epsilon):
    L = lipschitz(X, fX, eps)
    rank[k] = np.linalg.matrix_rank(L)
    obj[k] = np.linalg.norm(L, 'fro')
    ew, ev = np.linalg.eigh(L)
    ew = ew[::-1]
    lam1[k] = ew[0]
    lam2[k] = ew[1]
    lam3[k] = ew[2]
    lam4[k] = ew[3]
    lam5[k] = ew[4]
    lam6[k] = ew[5]
    print(f"=====> epsilon {eps:8.2e}, rank {rank[k]:2d}, obj {obj[k]:10.5e}")

pgf = PGF()
pgf.add('epsilon', epsilon)
pgf.add('rank', rank)
pgf.add('obj', obj)
pgf.add('lam1', lam1)
pgf.add('lam2', lam2)
pgf.add('lam3', lam3)
pgf.add('lam4', lam4)
pgf.add('lam5', lam5)
pgf.add('lam6', lam6)
pgf.write('data/fig_epsilon_rank.dat')