def test_polynomial_bases(self): data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] data = helper.load_test_npz('test_poly_bases_3.npz') B, I = rs.polynomial_bases(X, 3) np.testing.assert_equal(B, data['B']) np.testing.assert_equal(I, data['I'])
def test_local_linear_gradients(self): data = helper.load_test_npz('train_points_200_2.npz') X = data['X'].copy() f = 2 - 5 * X[:, 0] + 4 * X[:, 1] df = gr.local_linear_gradients(X, f) M = df.shape[0] np.testing.assert_array_almost_equal(df, np.tile(np.array([-5.0, 4.0]), (M, 1)), decimal=9) df = gr.local_linear_gradients(X, f, p=8) M = df.shape[0] np.testing.assert_array_almost_equal(df, np.tile(np.array([-5.0, 4.0]), (M, 1)), decimal=9) f = 2 - np.sin(X[:, 0]) + np.cos(X[:, 1]) np.random.seed(1234) df = gr.local_linear_gradients(X, f) #np.savez('data/test_llm_gradients',df=df) data = helper.load_test_npz('test_llm_gradients.npz') np.testing.assert_equal(df, data['df'])
def test_fun_rs_bnd_2d_int(self): data = helper.load_test_npz('test_rs_1.npz') X, f, df = data['X'], data['f'], data['df'] np.random.seed(43) model = base.ActiveSubspaceReducedModel(bounded_inputs=True) model.build_from_interface(X.shape[1], self.quad_fun, avdim=2) avg = model.average(20)[0] prob, pl, pu = model.probability(0.0, 1.0) fstar, xstar = model.minimum() np.savez('data/test_base_1_3',avg=avg, prob=prob, pl=pl, pu=pu, xstar=xstar, fstar=fstar) data_test = helper.load_test_npz('test_base_1_3.npz') np.testing.assert_equal(avg, data_test['avg']) np.testing.assert_equal(prob, data_test['prob']) np.testing.assert_equal(pl, data_test['pl']) np.testing.assert_equal(pu, data_test['pu']) np.testing.assert_equal(xstar, data_test['xstar']) np.testing.assert_equal(fstar, data_test['fstar']) print '\n' print 'ubnd avg: {:6.4f}'.format(avg) print 'ubnd prob: {:6.4f}, {:6.4f}, {:6.4f}'.format(pl,prob,pu) print 'ubnd min: {:6.4f}'.format(fstar) print 'ubnd xmin: {:6.4f}, {:6.4f}, {:6.4f}'.format(xstar[0,0],xstar[0,1],xstar[0,2])
def test_rs_fun_train_gp_bnd_2d(self): data = helper.load_test_npz('test_rs_1.npz') X, f, df = data['X'], data['f'], data['df'] sub = ss.Subspaces() sub.compute(df) sub.partition(2) avd = dom.BoundedActiveVariableDomain(sub) avm = dom.BoundedActiveVariableMap(avd) asrs = asm.ActiveSubspaceResponseSurface(avm) np.random.seed(43) asrs.train_with_interface(self.quad_fun, 10) XX = np.random.uniform(-1.0,1.0,size=(10, 3)) ff, dff, vff = asrs.predict(XX, compgrad=True, compvar=True) #np.savez('data/test_rs_7_0',ff=ff,dff=dff,vff=vff) data_test = helper.load_test_npz('test_rs_7_0.npz') np.testing.assert_equal(ff, data_test['ff']) np.testing.assert_equal(asrs(XX), data_test['ff']) np.testing.assert_equal(dff, data_test['dff']) np.testing.assert_equal(asrs.gradient(XX), data_test['dff']) np.testing.assert_equal(vff, data_test['vff']) sr = srun.SimulationRunner(self.quad_fun) f_true = sr.run(XX) dsr = srun.SimulationGradientRunner(self.quad_dfun) df_true = dsr.run(XX) print '\n' print 'fun gp bnd 2d' print 'f error: {:6.4e}'.format(np.linalg.norm(ff-f_true)/np.linalg.norm(f_true)) print 'df error: {:6.4e}'.format(np.linalg.norm(dff-df_true)/np.linalg.norm(df_true))
def test_rs_data_train_pr_ubnd(self): data = helper.load_test_npz('test_rs_0.npz') X, f, df = data['X'], data['f'], data['df'] sub = ss.Subspaces() sub.compute(df) avd = dom.UnboundedActiveVariableDomain(sub) avm = dom.UnboundedActiveVariableMap(avd) pr = rs.PolynomialRegression() asrs = asm.ActiveSubspaceResponseSurface(avm, respsurf=pr) asrs.train_with_data(X, f) np.random.seed(43) XX = np.random.normal(size=(10, 3)) ff, dff, vff = asrs.predict(XX, compgrad=True, compvar=True) #np.savez('data/test_rs_0_2',ff=ff,dff=dff,vff=vff) data_test = helper.load_test_npz('test_rs_0_2.npz') np.testing.assert_equal(ff, data_test['ff']) np.testing.assert_equal(asrs(XX), data_test['ff']) np.testing.assert_equal(dff, data_test['dff']) np.testing.assert_equal(asrs.gradient(XX), data_test['dff']) np.testing.assert_equal(vff, data_test['vff']) sr = srun.SimulationRunner(self.quad_fun) f_true = sr.run(XX) dsr = srun.SimulationGradientRunner(self.quad_dfun) df_true = dsr.run(XX) print '\n' print 'data pr ubnd' print 'f error: {:6.4e}'.format(np.linalg.norm(ff-f_true)/np.linalg.norm(f_true)) print 'df error: {:6.4e}'.format(np.linalg.norm(dff-df_true)/np.linalg.norm(df_true))
def test_exact_polynomial_approximation_1d(self): data = helper.load_test_npz('train_points_10_2.npz') X = data['X'] X_1d = X[:, 0].copy() f_1d = 2 + 5 * X_1d pr = rs.PolynomialRegression(N=1) pr.train(X_1d, f_1d) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] X_1d_test = X[:, 0].copy() f, df, v = pr.predict(X_1d_test, compgrad=True, compvar=True) np.testing.assert_almost_equal(f, 2 + 5 * X_1d_test.reshape((10, 1)), decimal=10) np.testing.assert_almost_equal(df, 5 * np.ones((10, 1)), decimal=10) f_1d = 2 - 3 * X_1d + 5 * X_1d * X_1d pr = rs.PolynomialRegression(N=2) pr.train(X_1d, f_1d) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] X_1d_test = X[:, 0].copy() f, df, v = pr.predict(X_1d_test, compgrad=True, compvar=True) f_test = 2 - 3 * X_1d_test + 5 * X_1d_test * X_1d_test df_test = -3 + 10 * X_1d_test np.testing.assert_almost_equal(f, f_test.reshape((10, 1)), decimal=10) np.testing.assert_almost_equal(df, df_test.reshape((10, 1)), decimal=10)
def test_exact_rbf_approximation_1d(self): data = helper.load_test_npz('train_points_10_2.npz') X = data['X'] M = X.shape[0] X_1d = X[:,0].copy().reshape((M,1)) f_1d = 2 + 5*X_1d gp = rs.RadialBasisApproximation(N=1) gp.train(X_1d, f_1d) print 'Rsqr: {:6.4f}'.format(gp.Rsqr) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] M = X.shape[0] X_1d_test = X[:,0].copy().reshape((M,1)) f, df = gp.predict(X_1d_test, compgrad=True) np.testing.assert_almost_equal(f, 2+5*X_1d_test.reshape((10,1)), decimal=10) np.testing.assert_almost_equal(df, 5*np.ones((10,1)), decimal=10) f_1d = 2 - 3*X_1d + 5*X_1d*X_1d gp = rs.RadialBasisApproximation(N=2) gp.train(X_1d, f_1d) print 'Rsqr: {:6.4f}'.format(gp.Rsqr) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] M = X.shape[0] X_1d_test = X[:,0].copy().reshape((M,1)) f, df = gp.predict(X_1d_test, compgrad=True) f_test = 2 - 3*X_1d_test + 5*X_1d_test*X_1d_test df_test = -3 + 10*X_1d_test np.testing.assert_almost_equal(f, f_test.reshape((10,1)), decimal=10) np.testing.assert_almost_equal(df, df_test.reshape((10,1)), decimal=10)
def test_rs_fun_train_pr_bnd(self): data = helper.load_test_npz('test_rs_1.npz') X, f, df = data['X'], data['f'], data['df'] sub = ss.Subspaces() sub.compute(df) avd = dom.BoundedActiveVariableDomain(sub) avm = dom.BoundedActiveVariableMap(avd) pr = rs.PolynomialApproximation() asrs = asm.ActiveSubspaceResponseSurface(avm, pr) np.random.seed(43) asrs.train_with_interface(self.quad_fun, 10) XX = np.random.uniform(-1.0,1.0,size=(10, 3)) ff, dff = asrs.predict(XX, compgrad=True) if self.writeData: np.savez('data/test_rs_5_1',ff=ff,dff=dff) data_test = helper.load_test_npz('test_rs_5_1.npz') np.testing.assert_almost_equal(ff, data_test['ff']) np.testing.assert_almost_equal(asrs(XX), data_test['ff']) np.testing.assert_almost_equal(dff, data_test['dff']) np.testing.assert_almost_equal(asrs.gradient(XX), data_test['dff']) sr = srun.SimulationRunner(self.quad_fun) f_true = sr.run(XX) dsr = srun.SimulationGradientRunner(self.quad_dfun) df_true = dsr.run(XX) print '\n' print 'fun pr bnd' print 'f error: {:6.4e}'.format(np.linalg.norm(ff-f_true)/np.linalg.norm(f_true)) print 'df error: {:6.4e}'.format(np.linalg.norm(dff-df_true)/np.linalg.norm(df_true))
def test_polynomial_grad_2d(self): data = helper.load_test_npz('train_points_200_2.npz') X = data['X'] X_train = X.copy() ff0 = np.cos(X_train[:,0]).reshape((200,1)) ff1 = np.sin(X_train[:,1]).reshape((200,1)) f_2d = ff0*ff1 pr = rs.PolynomialApproximation(N=5) pr.train(X_train, f_2d) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] X_test = X.copy() f0, df0 = pr.predict(X_test, compgrad=True) e = 1e-6 X_testp = X_test.copy() X_testp[:,0] += e f1 = pr.predict(X_testp)[0] df1_fd = (f1 - f0)/e np.testing.assert_almost_equal(df0[:,0].reshape((10,1)), df1_fd, decimal=5) X_testp = X_test.copy() X_testp[:,1] += e f1 = pr.predict(X_testp)[0] df2_fd = (f1 - f0)/e np.testing.assert_almost_equal(df0[:,1].reshape((10,1)), df2_fd, decimal=5)
def test_rs_data_train_gp_ubnd_2d(self): data = helper.load_test_npz('test_rs_0.npz') X, f, df = data['X'], data['f'], data['df'] sub = ss.Subspaces() sub.compute(df) sub.partition(2) avd = dom.UnboundedActiveVariableDomain(sub) avm = dom.UnboundedActiveVariableMap(avd) asrs = asm.ActiveSubspaceResponseSurface(avm) asrs.train_with_data(X, f) np.random.seed(43) XX = np.random.normal(size=(10, 3)) ff, dff = asrs.predict(XX, compgrad=True) if self.writeData: np.savez('data/test_rs_2_0',ff=ff,dff=dff) data_test = helper.load_test_npz('test_rs_2_0.npz') np.testing.assert_almost_equal(ff, data_test['ff']) np.testing.assert_almost_equal(asrs(XX), data_test['ff']) np.testing.assert_almost_equal(dff, data_test['dff']) np.testing.assert_almost_equal(asrs.gradient(XX), data_test['dff']) sr = srun.SimulationRunner(self.quad_fun) f_true = sr.run(XX) dsr = srun.SimulationGradientRunner(self.quad_dfun) df_true = dsr.run(XX) print '\n' print 'data gp ubnd 2d' print 'f error: {:6.4e}'.format(np.linalg.norm(ff-f_true)/np.linalg.norm(f_true)) print 'df error: {:6.4e}'.format(np.linalg.norm(dff-df_true)/np.linalg.norm(df_true))
def test_gp_grad_2d(self): data = helper.load_test_npz('train_points_200_2.npz') X = data['X'] X_train = X.copy() ff0 = np.cos(X_train[:, 0]).reshape((200, 1)) ff1 = np.sin(X_train[:, 1]).reshape((200, 1)) f_2d = ff0 * ff1 gp = rs.GaussianProcess(N=5) gp.train(X_train, f_2d) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] X_test = X.copy() f0, df0, v0 = gp.predict(X_test, compgrad=True, compvar=True) e = 1e-6 X_testp = X_test.copy() X_testp[:, 0] += e f1 = gp.predict(X_testp)[0] df1_fd = (f1 - f0) / e np.testing.assert_almost_equal(df0[:, 0].reshape((10, 1)), df1_fd, decimal=5) X_testp = X_test.copy() X_testp[:, 1] += e f1 = gp.predict(X_testp)[0] df2_fd = (f1 - f0) / e np.testing.assert_almost_equal(df0[:, 1].reshape((10, 1)), df2_fd, decimal=5)
def test_exact_rbf_approximation_2d(self): data = helper.load_test_npz('train_points_10_2.npz') X = data['X'] X_train = X.copy() f_2d = 2 + 5*X_train[:,0] - 4*X_train[:,1] gp = rs.RadialBasisApproximation(N=1) gp.train(X_train, f_2d.reshape((f_2d.size,1))) print 'Rsqr: {:6.4f}'.format(gp.Rsqr) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] X_test = X.copy() f, df = gp.predict(X_test, compgrad=True) f_test = 2 + 5*X_test[:,0] - 4*X_test[:,1] np.testing.assert_almost_equal(f, f_test.reshape((10,1)), decimal=10) np.testing.assert_almost_equal(df[:,0].reshape((10,1)), 5*np.ones((10,1)), decimal=10) np.testing.assert_almost_equal(df[:,1].reshape((10,1)), -4*np.ones((10,1)), decimal=10) f_2d = 2 - 3*X_train[:,1] + 5*X_train[:,0]*X_train[:,1] gp = rs.RadialBasisApproximation(N=2) gp.train(X_train, f_2d.reshape((f_2d.size,1))) print 'Rsqr: {:6.4f}'.format(gp.Rsqr) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] X_test = X.copy() f, df = gp.predict(X_test, compgrad=True) f_test = 2 - 3*X_test[:,1] + 5*X_test[:,0]*X_test[:,1] df1_test = 5*X_test[:,1] df2_test = -3 + 5*X_test[:,0] np.testing.assert_almost_equal(f, f_test.reshape((10,1)), decimal=10) np.testing.assert_almost_equal(df[:,0].reshape((10,1)), df1_test.reshape((10,1)), decimal=10) np.testing.assert_almost_equal(df[:,1].reshape((10,1)), df2_test.reshape((10,1)), decimal=10)
def test_exact_gp_approximation_1d(self): data = helper.load_test_npz('train_points_10_2.npz') X = data['X'] M = X.shape[0] X_1d = X[:,0].copy().reshape((M,1)) f_1d = 2 + 5*X_1d gp = rs.GaussianProcess(N=1) gp.train(X_1d, f_1d) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] M = X.shape[0] X_1d_test = X[:,0].copy().reshape((M,1)) f, df, v = gp.predict(X_1d_test, compgrad=True, compvar=True) np.testing.assert_almost_equal(f, 2+5*X_1d_test.reshape((10,1)), decimal=10) np.testing.assert_almost_equal(df, 5*np.ones((10,1)), decimal=10) f_1d = 2 - 3*X_1d + 5*X_1d*X_1d gp = rs.GaussianProcess(N=2) gp.train(X_1d, f_1d) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] M = X.shape[0] X_1d_test = X[:,0].copy().reshape((M,1)) f, df, v = gp.predict(X_1d_test, compgrad=True, compvar=True) f_test = 2 - 3*X_1d_test + 5*X_1d_test*X_1d_test df_test = -3 + 10*X_1d_test np.testing.assert_almost_equal(f, f_test.reshape((10,1)), decimal=10) np.testing.assert_almost_equal(df, df_test.reshape((10,1)), decimal=10)
def test_gp_as(self): data = helper.load_test_npz('test_points_uniform_50_2.npz') X_test = data['X'].copy() data = helper.load_test_npz('train_points_200_2.npz') X_train = data['X'].copy() f_train = 2 + 5 * X_train[:, 0] - 4 * X_train[:, 1] + 2 * X_train[:, 0] * X_train[:, 1] gp = rs.GaussianProcess(N=1) e = np.array([1.0, 0.5, 0.1, 0.05, 0.01]) gp.train(X_train, f_train, e=e) f, df, vf = gp.predict(X_test, compgrad=True, compvar=True) data = helper.load_test_npz('test_gp_0.npz') np.testing.assert_equal(f, data['f']) np.testing.assert_equal(df, data['df']) np.testing.assert_equal(vf, data['vf']) v = 0.0001 * np.ones(f_train.shape) gp.train(X_train, f_train, e=e, v=v) f, df, vf = gp.predict(X_test, compgrad=True, compvar=True) data = helper.load_test_npz('test_gp_1.npz') np.testing.assert_equal(f, data['f']) np.testing.assert_equal(df, data['df']) np.testing.assert_equal(vf, data['vf'])
def test_exact_gp_approximation_2d(self): data = helper.load_test_npz('train_points_10_2.npz') X = data['X'] X_train = X.copy() f_2d = 2 + 5*X_train[:,0] - 4*X_train[:,1] gp = rs.GaussianProcess(N=1) gp.train(X_train, f_2d.reshape((f_2d.size,1))) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] X_test = X.copy() f, df, v = gp.predict(X_test, compgrad=True, compvar=True) f_test = 2 + 5*X_test[:,0] - 4*X_test[:,1] np.testing.assert_almost_equal(f, f_test.reshape((10,1)), decimal=10) np.testing.assert_almost_equal(df[:,0].reshape((10,1)), 5*np.ones((10,1)), decimal=10) np.testing.assert_almost_equal(df[:,1].reshape((10,1)), -4*np.ones((10,1)), decimal=10) f_2d = 2 - 3*X_train[:,1] + 5*X_train[:,0]*X_train[:,1] gp = rs.GaussianProcess(N=2) gp.train(X_train, f_2d.reshape((f_2d.size,1))) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] X_test = X.copy() f, df, v = gp.predict(X_test, compgrad=True, compvar=True) f_test = 2 - 3*X_test[:,1] + 5*X_test[:,0]*X_test[:,1] df1_test = 5*X_test[:,1] df2_test = -3 + 5*X_test[:,0] np.testing.assert_almost_equal(f, f_test.reshape((10,1)), decimal=10) np.testing.assert_almost_equal(df[:,0].reshape((10,1)), df1_test.reshape((10,1)), decimal=10) np.testing.assert_almost_equal(df[:,1].reshape((10,1)), df2_test.reshape((10,1)), decimal=10)
def test_gp_grad_2d(self): data = helper.load_test_npz('train_points_200_2.npz') X = data['X'] X_train = X.copy() ff0 = np.cos(X_train[:,0]).reshape((200,1)) ff1 = np.sin(X_train[:,1]).reshape((200,1)) f_2d = ff0*ff1 gp = rs.GaussianProcess(N=5) gp.train(X_train, f_2d) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] X_test = X.copy() f0, df0, v0 = gp.predict(X_test, compgrad=True, compvar=True) e = 1e-6 X_testp = X_test.copy() X_testp[:,0] += e f1 = gp.predict(X_testp)[0] df1_fd = (f1 - f0)/e np.testing.assert_almost_equal(df0[:,0].reshape((10,1)), df1_fd, decimal=5) X_testp = X_test.copy() X_testp[:,1] += e f1 = gp.predict(X_testp)[0] df2_fd = (f1 - f0)/e np.testing.assert_almost_equal(df0[:,1].reshape((10,1)), df2_fd, decimal=5)
def test_gp_as(self): data = helper.load_test_npz('test_points_uniform_50_2.npz') X_test = data['X'].copy() data = helper.load_test_npz('train_points_200_2.npz') X_train = data['X'].copy() f_train = 2 + 5*X_train[:,0] - 4*X_train[:,1] +2*X_train[:,0]*X_train[:,1] gp = rs.GaussianProcess(N=1) e = np.array([1.0, 0.5, 0.1, 0.05, 0.01]) gp.train(X_train, f_train.reshape((f_train.size,1)), e=e) f, df, vf = gp.predict(X_test, compgrad=True, compvar=True) #np.savez('data/test_gp_0',f=f,df=df,vf=vf) data = helper.load_test_npz('test_gp_0.npz') np.testing.assert_equal(f, data['f']) np.testing.assert_equal(df, data['df']) np.testing.assert_equal(vf, data['vf']) v = 0.0001*np.ones(f_train.shape) gp.train(X_train, f_train.reshape((f_train.size,1)), e=e, v=v) f, df, vf = gp.predict(X_test, compgrad=True, compvar=True) #np.savez('data/test_gp_1',f=f,df=df,vf=vf) data = helper.load_test_npz('test_gp_1.npz') np.testing.assert_equal(f, data['f']) np.testing.assert_equal(df, data['df']) np.testing.assert_equal(vf, data['vf'])
def test_rbf_as(self): data = helper.load_test_npz('test_points_uniform_50_2.npz') X_test = data['X'].copy() data = helper.load_test_npz('train_points_200_2.npz') X_train = data['X'].copy() f_train = 2 + 5*X_train[:,0] - 4*X_train[:,1] +2*X_train[:,0]*X_train[:,1] gp = rs.RadialBasisApproximation(N=1) e = np.array([1.0, 0.5, 0.1, 0.05, 0.01]) gp.train(X_train, f_train.reshape((f_train.size,1)), e=e) f, df = gp.predict(X_test, compgrad=True) if self.writeData: np.savez('data/test_rbf_0.npz', f=f, df=df) data = helper.load_test_npz('test_rbf_0.npz') np.testing.assert_almost_equal(f, data['f']) np.testing.assert_almost_equal(df, data['df']) v = 0.0001*np.ones(f_train.shape) gp.train(X_train, f_train.reshape((f_train.size,1)), e=e, v=v) f, df = gp.predict(X_test, compgrad=True) if self.writeData: np.savez('data/test_rbf_1.npz', f=f, df=df) data = helper.load_test_npz('test_rbf_1.npz') np.testing.assert_almost_equal(f, data['f']) np.testing.assert_almost_equal(df, data['df'])
def test_rs_ubnd_2d_int(self): data = helper.load_test_npz('test_rs_0.npz') X, f, df = data['X'], data['f'], data['df'] np.random.seed(43) model = base.ActiveSubspaceReducedModel(3, False) model.build_from_data(X, f, df=df, avdim=2) avg = model.average(20)[0] prob, pl, pu = model.probability(0.0, 1.0) fstar, xstar = model.minimum() if self.writeData: np.savez('data/test_base_0_2',avg=avg, prob=prob, pl=pl, pu=pu, xstar=xstar, fstar=fstar) data_test = helper.load_test_npz('test_base_0_2.npz') np.testing.assert_almost_equal(avg, data_test['avg']) np.testing.assert_almost_equal(prob, data_test['prob']) np.testing.assert_almost_equal(pl, data_test['pl']) np.testing.assert_almost_equal(pu, data_test['pu']) np.testing.assert_almost_equal(xstar, data_test['xstar']) np.testing.assert_almost_equal(fstar, data_test['fstar']) print '\n' print 'ubnd avg: {:6.4f}'.format(avg) print 'ubnd prob: {:6.4f}, {:6.4f}, {:6.4f}'.format(pl,prob,pu) print 'ubnd min: {:6.4f}'.format(fstar) print 'ubnd xmin: {:6.4f}, {:6.4f}, {:6.4f}'.format(xstar[0,0],xstar[0,1],xstar[0,2])
def test_bootstrap_ranges(self): data = helper.load_test_npz('test_spec_decomp.npz') df, e, W = data['df'], data['e'], data['W'] np.random.seed(1234) e_br, sub_br = ss.bootstrap_ranges(df, e, W, n_boot=100) data = helper.load_test_npz('test_spec_br.npz') np.testing.assert_equal(e_br, data['e_br']) np.testing.assert_equal(sub_br, data['sub_br'])
def test_grad_polynomial_bases(self): data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] dB = rs.grad_polynomial_bases(X, 3) if self.writeData: np.savez('data/test_grad_poly_bases_3.npz', dB=dB) data = helper.load_test_npz('test_grad_poly_bases_3.npz') np.testing.assert_almost_equal(dB, data['dB'])
def test_bootstrap_ranges_1(self): data = helper.load_test_npz('test_spec_decomp_1.npz') df0, e0, W0 = data['df'], data['e'], data['W'] np.random.seed(42) e_br, sub_br = ss.bootstrap_ranges(df0, e0, W0, n_boot=100) data_br = helper.load_test_npz('test_spec_br_1.npz') np.testing.assert_equal(e_br, data_br['e_br']) np.testing.assert_equal(sub_br, data_br['sub_br'])
def test_grad_exponential_squared_covariance(self): data = helper.load_test_npz('test_points_10_2.npz') X1 = data['X'] X2 = X1.copy() dC = rs.grad_exponential_squared_covariance(X1, X2, 1.0, np.array([1.0,1.0])) data = helper.load_test_npz('test_grad_exp_cov.npz') np.testing.assert_equal(dC, data['dC'])
def test_grad_exponential_squared_covariance(self): data = helper.load_test_npz('test_points_10_2.npz') X1 = data['X'] X2 = X1.copy() dC = rs.grad_exponential_squared_covariance(X1, X2, 1.0, np.array([1.0, 1.0])) data = helper.load_test_npz('test_grad_exp_cov.npz') np.testing.assert_equal(dC, data['dC'])
def test_polynomial_bases(self): data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] B, I = rs.polynomial_bases(X, 3) if self.writeData: np.savez('data/test_poly_bases_3.npz', B=B, I=I) data = helper.load_test_npz('test_poly_bases_3.npz') np.testing.assert_almost_equal(B, data['B']) np.testing.assert_almost_equal(I, data['I'])
def test_grad_exponential_squared(self): data = helper.load_test_npz('test_points_10_2.npz') X1 = data['X'] X2 = X1.copy() dC = rs.grad_exponential_squared(X1, X2, 1.0, np.array([1.0,1.0])) if self.writeData: np.savez('data/test_grad_exp_cov.npz', dC=dC) data = helper.load_test_npz('test_grad_exp_cov.npz') np.testing.assert_almost_equal(dC, data['dC'])
def test_bootstrap_ranges_0(self): data = helper.load_test_npz('test_spec_decomp_0.npz') df0, e0, W0 = data['df'], data['e'], data['W'] np.random.seed(42) e_br, sub_br = ss.bootstrap_ranges(df0, e0, W0, n_boot=100) if self.writeData: np.savez('data/test_spec_br_0', e_br=e_br, sub_br=sub_br) data_br = helper.load_test_npz('test_spec_br_0.npz') np.testing.assert_almost_equal(e_br, data_br['e_br']) np.testing.assert_almost_equal(sub_br, data_br['sub_br'])
def test_subspaces_1(self): data = helper.load_test_npz('test_spec_decomp_1.npz') data_br = helper.load_test_npz('test_spec_br_1.npz') df0, e0, W0 = data['df'], data['e'], data['W'] sub = ss.Subspaces() np.random.seed(42) sub.compute(df0, n_boot=100) np.testing.assert_equal(sub.eigenvalues, e0) np.testing.assert_equal(sub.eigenvectors, W0) np.testing.assert_equal(sub.e_br, data_br['e_br']) np.testing.assert_equal(sub.sub_br, data_br['sub_br'])
def test_bounded_active_variable_domain_0(self): data = helper.load_test_npz('test_spec_decomp_0.npz') df0, e0, W0 = data['df'], data['e'], data['W'] sub = ss.Subspaces() sub.compute(df0) data_bavd = helper.load_test_npz('bavd_0.npz') bavd = dom.BoundedActiveVariableDomain(sub) np.testing.assert_almost_equal(bavd.vertY, np.dot(bavd.vertX, sub.W1)) np.testing.assert_equal(bavd.vertY, data_bavd['vertY']) np.testing.assert_equal(bavd.vertX, data_bavd['vertX']) np.testing.assert_equal(bavd.n, sub.W1.shape[1]) np.testing.assert_equal(bavd.m, sub.W1.shape[0])
def test_poly_order_2d(self): data = helper.load_test_npz('test_points_uniform_50_2.npz') X = data['X'] X_test = X.copy() xx = np.linspace(-1.0, 1.0, 21) X1, X2 = np.meshgrid(xx, xx) X_train = np.hstack((X1.reshape((441, 1)), X2.reshape((441, 1)))) f_train = np.sin(np.pi * X1.reshape( (441, 1))) * np.cos(np.pi * X2.reshape((441, 1))) print '\nPOLY 2D ORDER CONVERGENCE\n' for N in range(3, 10): pr = rs.PolynomialRegression(N=N) pr.train(X_train, f_train) f, df, v = pr.predict(X_test, compgrad=True, compvar=True) f_true = np.sin(np.pi * X[:, 0].reshape( (50, 1))) * np.cos(np.pi * X[:, 1].reshape((50, 1))) df1_true = np.cos(np.pi * X[:, 1].reshape( (50, 1))) * np.pi * np.cos(np.pi * X[:, 0].reshape((50, 1))) df2_true = -np.sin(np.pi * X[:, 0].reshape( (50, 1))) * np.pi * np.sin(np.pi * X[:, 1].reshape((50, 1))) err_f = np.linalg.norm(f - f_true) / np.linalg.norm(f_true) err_df1 = np.linalg.norm(df[:, 0].reshape((50, 1)) - df1_true) / np.linalg.norm(df1_true) err_df2 = np.linalg.norm(df[:, 1].reshape((50, 1)) - df2_true) / np.linalg.norm(df2_true) print 'Order: %d, Error in f: %6.4e, Error in df1: %6.4e, Error in df2: %6.4e' % ( N, err_f, err_df1, err_df2)
def test_gp_points_2d(self): data = helper.load_test_npz('test_points_uniform_50_2.npz') X = data['X'] X_test = X.copy() print '\nGP 2D POINT CONVERGENCE\n' for N in range(1, 5): xx = np.linspace(-1.0, 1.0, 2**N + 1) X1, X2 = np.meshgrid(xx, xx) X_train = np.hstack((X1.reshape( ((2**N + 1)**2, 1)), X2.reshape(((2**N + 1)**2, 1)))) f_train = np.sin(np.pi * X1.reshape( ((2**N + 1)**2, 1))) * np.cos(np.pi * X2.reshape( ((2**N + 1)**2, 1))) gp = rs.GaussianProcess(N=2) gp.train(X_train, f_train) f, df, v = gp.predict(X_test, compgrad=True, compvar=True) f_true = np.sin(np.pi * X[:, 0].reshape( (50, 1))) * np.cos(np.pi * X[:, 1].reshape((50, 1))) df1_true = np.cos(np.pi * X[:, 1].reshape( (50, 1))) * np.pi * np.cos(np.pi * X[:, 0].reshape((50, 1))) df2_true = -np.sin(np.pi * X[:, 0].reshape( (50, 1))) * np.pi * np.sin(np.pi * X[:, 1].reshape((50, 1))) err_f = np.linalg.norm(f - f_true) / np.linalg.norm(f_true) err_df1 = np.linalg.norm(df[:, 0].reshape((50, 1)) - df1_true) / np.linalg.norm(df1_true) err_df2 = np.linalg.norm(df[:, 1].reshape((50, 1)) - df2_true) / np.linalg.norm(df2_true) print 'Points: %d, Error in f: %6.4e, Error in df1: %6.4e, Error in df2: %6.4e' % ( (2**N + 1)**2, err_f, err_df1, err_df2)
def test_grad_exponential_squared_covariance_fd(self): data = helper.load_test_npz('test_points_10_2.npz') X1 = data['X'] X2 = X1.copy() C0 = rs.exponential_squared_covariance(X1, X2, 1.0, np.array([1.0, 1.0])) dC = rs.grad_exponential_squared_covariance(X1, X2, 1.0, np.array([1.0, 1.0])) e = 1e-6 X2p1 = X2.copy() X2p1[:, 0] += e C1 = rs.exponential_squared_covariance(X1, X2p1, 1.0, np.array([1.0, 1.0])) dC1 = (C1 - C0) / e np.testing.assert_array_almost_equal(dC[:, :, 0], dC1, decimal=5) X2p2 = X2.copy() X2p2[:, 1] += e C2 = rs.exponential_squared_covariance(X1, X2p2, 1.0, np.array([1.0, 1.0])) dC2 = (C2 - C0) / e np.testing.assert_array_almost_equal(dC[:, :, 1], dC2, decimal=5)
def test_jacobi_matrix_5(self): if self.writeData: J = gq.jacobi_matrix(gq.r_hermite(5)) np.savez('data/test_jacobi_matrix_5.npz',J=J) data = helper.load_test_npz('test_jacobi_matrix_5.npz') J = data['J'] np.testing.assert_almost_equal(gq.jacobi_matrix(gq.r_hermite(5)), J)
def test_rs_diag(self): data = helper.load_test_npz('test_rs_0.npz') X, f, df = data['X'], data['f'], data['df'] np.random.seed(43) model = base.ActiveSubspaceReducedModel(3, False) model.build_from_data(X, f, df=df) model.diagnostics()
def test_rs_0(self): data = helper.load_test_npz('test_spec_decomp_0.npz') df0, e0, W0 = data['df'], data['e'], data['W'] sub = ss.Subspaces() sub.compute(df0) avd = dom.UnboundedActiveVariableDomain(sub) avm = dom.UnboundedActiveVariableMap(avd) asm.ActiveSubspaceResponseSurface(avm)
def test_gauss_hermite_1d_int_arg(self): if self.writeData: p,w = gq.gauss_hermite(7) np.savez('data/test_gh1d_7pts.npz',p=p,w=w) data = helper.load_test_npz('test_gh1d_7pts.npz') p,w = gq.gauss_hermite(7) np.testing.assert_almost_equal(p, data['p']) np.testing.assert_almost_equal(w, data['w'])
def test_gauss_hermite_3d(self): if self.writeData: p,w = gq.gauss_hermite([3,3,4]) np.savez('data/test_gauss_hermite_3d.npz',p=p,w=w) data = helper.load_test_npz('test_gauss_hermite_3d.npz') p,w = gq.gauss_hermite([3,3,4]) np.testing.assert_almost_equal(p, data['p']) np.testing.assert_almost_equal(w, data['w'])
def test_gp_grad_1d(self): data = helper.load_test_npz('train_points_10_2.npz') X = data['X'] X_1d = X[:, 0].copy() f_1d = np.cos(X_1d) gp = rs.GaussianProcess(N=0) gp.train(X_1d, f_1d) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] X_1d_test = X[:, 0].copy() f0, df0, v0 = gp.predict(X_1d_test, compgrad=True, compvar=True) e = 1e-6 X_1d_testp = X_1d_test.copy() + e f1 = gp.predict(X_1d_testp)[0] df0_fd = (f1 - f0) / e np.testing.assert_almost_equal(df0, df0_fd, decimal=5)
def test_polynomial_grad_1d(self): data = helper.load_test_npz('train_points_10_2.npz') X = data['X'] X_1d = X[:, 0].copy() f_1d = np.cos(X_1d) pr = rs.PolynomialRegression(N=7) pr.train(X_1d, f_1d) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] X_1d_test = X[:, 0].copy() f0, df0, v0 = pr.predict(X_1d_test, compgrad=True, compvar=True) e = 1e-6 X_1d_testp = X_1d_test.copy() + e f1 = pr.predict(X_1d_testp)[0] df0_fd = (f1 - f0) / e np.testing.assert_almost_equal(df0, df0_fd, decimal=5)
def test_sample_z(self): data = helper.load_test_npz('test_spec_decomp_1.npz') df0, e0, W0 = data['df'], data['e'], data['W'] sub = ss.Subspaces() sub.compute(df0) W1, W2 = sub.W1, sub.W2 m, n = W1.shape np.random.seed(43) x = np.random.uniform(-1.0,1.0,size=(1,m)) y = np.dot(x, W1).reshape((n, )) N = 10 np.random.seed(42) Z = dom.sample_z(N, y, W1, W2) data_test = helper.load_test_npz('test_sampling_z_0_0.npz') np.testing.assert_almost_equal(Z, data_test['Z'])
def test_local_linear_gradients(self): data = helper.load_test_npz('train_points_200_2.npz') X = data['X'].copy() f = 2 - 5*X[:,0] + 4*X[:,1] df = gr.local_linear_gradients(X, f) M = df.shape[0] np.testing.assert_array_almost_equal(df, np.tile(np.array([-5.0, 4.0]), (M,1)), decimal=9) df = gr.local_linear_gradients(X, f, p=8) M = df.shape[0] np.testing.assert_array_almost_equal(df, np.tile(np.array([-5.0, 4.0]), (M,1)), decimal=9) f = 2 - np.sin(X[:,0]) + np.cos(X[:,1]) np.random.seed(1234) df = gr.local_linear_gradients(X, f) data = helper.load_test_npz('test_llm_gradients.npz') np.testing.assert_equal(df, data['df'])
def test_grad_polynomial_bases_fd(self): data = helper.load_test_npz('test_points_10_2.npz') X0 = data['X'] data = helper.load_test_npz('test_grad_poly_bases_3.npz') dB = rs.grad_polynomial_bases(X0, 3) e = 1e-6 B0 = rs.polynomial_bases(X0, 3)[0] X1 = X0.copy() X1[:, 0] += e B1 = rs.polynomial_bases(X1, 3)[0] dB1 = (B1 - B0) / e np.testing.assert_array_almost_equal(dB[:, :, 0], dB1, decimal=5) X2 = X0.copy() X2[:, 1] += e B2 = rs.polynomial_bases(X2, 3)[0] dB2 = (B2 - B0) / e np.testing.assert_array_almost_equal(dB[:, :, 1], dB2, decimal=5)
def test_exact_gp_approximation_2d(self): data = helper.load_test_npz('train_points_10_2.npz') X = data['X'] X_train = X.copy() f_2d = 2 + 5 * X_train[:, 0] - 4 * X_train[:, 1] gp = rs.GaussianProcess(N=1) gp.train(X_train, f_2d) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] X_test = X.copy() f, df, v = gp.predict(X_test, compgrad=True, compvar=True) f_test = 2 + 5 * X_test[:, 0] - 4 * X_test[:, 1] np.testing.assert_almost_equal(f, f_test.reshape((10, 1)), decimal=10) np.testing.assert_almost_equal(df[:, 0].reshape((10, 1)), 5 * np.ones((10, 1)), decimal=10) np.testing.assert_almost_equal(df[:, 1].reshape((10, 1)), -4 * np.ones((10, 1)), decimal=10) f_2d = 2 - 3 * X_train[:, 1] + 5 * X_train[:, 0] * X_train[:, 1] gp = rs.GaussianProcess(N=2) gp.train(X_train, f_2d) data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] X_test = X.copy() f, df, v = gp.predict(X_test, compgrad=True, compvar=True) f_test = 2 - 3 * X_test[:, 1] + 5 * X_test[:, 0] * X_test[:, 1] df1_test = 5 * X_test[:, 1] df2_test = -3 + 5 * X_test[:, 0] np.testing.assert_almost_equal(f, f_test.reshape((10, 1)), decimal=10) np.testing.assert_almost_equal(df[:, 0].reshape((10, 1)), df1_test.reshape((10, 1)), decimal=10) np.testing.assert_almost_equal(df[:, 1].reshape((10, 1)), df2_test.reshape((10, 1)), decimal=10)
def test_poly_order_1d(self): data = helper.load_test_npz('test_points_uniform_50_2.npz') X = data['X'] X_1d_test = X[:, 0].copy().reshape((50, 1)) X_train = np.linspace(-1.0, 1.0, 201).reshape((201, 1)) f_train = np.sin(np.pi * X_train) print '\nPOLY 1D ORDER CONVERGENCE\n' for N in range(3, 10): pr = rs.PolynomialRegression(N=N) pr.train(X_train, f_train) f, df, v = pr.predict(X_1d_test, compgrad=True, compvar=True) f_true = np.sin(np.pi * X_1d_test) err_f = np.linalg.norm(f - f_true) / np.linalg.norm(f_true) df_true = np.pi * np.cos(np.pi * X_1d_test) err_df = np.linalg.norm(df - df_true) / np.linalg.norm(df_true) print 'Order: %d, Error in f: %6.4e, Error in df: %6.4e' % ( N, err_f, err_df)
def test_gp_points_1d(self): data = helper.load_test_npz('test_points_uniform_50_2.npz') X = data['X'] X_1d_test = X[:, 0].copy().reshape((50, 1)) print '\nGP 1D POINT CONVERGENCE\n' for N in range(1, 9): X_train = np.linspace(-1.0, 1.0, 2**N + 1).reshape((2**N + 1, 1)) f_train = np.sin(np.pi * X_train) gp = rs.GaussianProcess(N=0) #pdb.set_trace() gp.train(X_train, f_train) f, df, v = gp.predict(X_1d_test, compgrad=True, compvar=True) f_true = np.sin(np.pi * X_1d_test) err_f = np.linalg.norm(f - f_true) / np.linalg.norm(f_true) df_true = np.pi * np.cos(np.pi * X_1d_test) err_df = np.linalg.norm(df - df_true) / np.linalg.norm(df_true) print 'Points: %d, Error in f: %6.4e, Error in df: %6.4e' % ( 2**N + 1, err_f, err_df)
def test_gh1d_7pts(self): data = helper.load_test_npz('test_gh1d_7pts.npz') p,w = gq.gh1d(7) np.testing.assert_equal(p, data['p']) np.testing.assert_equal(w, data['w'])
def test_gauss_hermite_3d(self): data = helper.load_test_npz('test_gauss_hermite_3d.npz') p,w = gq.gauss_hermite([3,3,4]) np.testing.assert_equal(p, data['p']) np.testing.assert_equal(w, data['w'])
def test_jacobi_matrix_5(self): data = helper.load_test_npz('test_jacobi_matrix_5.npz') J = data['J'] np.testing.assert_equal(gq.jacobi_matrix(gq.r_hermite(5)), J)
def test_grad_polynomial_bases(self): data = helper.load_test_npz('test_points_10_2.npz') X = data['X'] data = helper.load_test_npz('test_grad_poly_bases_3.npz') dB = rs.grad_polynomial_bases(X, 3) np.testing.assert_equal(dB, data['dB'])
def test_full_index_set(self): I = rs.full_index_set(7, 3) data = helper.load_test_npz('test_full_index_set_7_3.npz') np.testing.assert_equal(I, data['I'])
def test_spectral_decomposition(self): data = helper.load_test_npz('test_spec_decomp.npz') df = data['df'] e, W = ss.spectral_decomposition(df) np.testing.assert_equal(e, data['e']) np.testing.assert_equal(W, data['W'])
def test_gauss_hermite_1d_int_arg(self): data = helper.load_test_npz('test_gh1d_7pts.npz') p,w = gq.gauss_hermite(7) np.testing.assert_equal(p, data['p']) np.testing.assert_equal(w, data['w'])