def test_models(self): _, Ms = get() for M in Ms: assert isinstance(M.name, str) E = M.n_outputs assert isinstance(E, int) xb = M.x_bounds pb = M.p_bounds assert isinstance(xb, np.ndarray) assert isinstance(pb, np.ndarray) x = np.random.uniform(xb[:,0],xb[:,1]) p = np.random.uniform(pb[:,0],pb[:,1]) x[2] = 0 y = M( x, p ) assert y.shape == (E,) D = len(p) y, dy = M( x, p, grad=True ) assert y.shape == (E,) assert dy.shape == (E,D) x[2] = 1 y = M( x, p ) assert y.shape == (E,) D = len(p) y, dy = M( x, p, grad=True ) assert y.shape == (E,) assert dy.shape == (E,D)
def test_datagen(self): M, Ms = get() assert isinstance(M.truemodel, int) assert isinstance(M.measvar, (float,np.ndarray)) E = Ms[0].n_outputs xb = Ms[0].x_bounds x = np.random.uniform(xb[:,0],xb[:,1]) x[2] = 0 y = M( x ) assert y.shape == (E,) x[2] = 1 y = M( x ) assert y.shape == (E,)
def test_overflow_protection (self): M = get()[1][4] x, p = np.array([1., 0.01, 0]), 0.025 C, dC = M(x,p,grad=True) assert 0.999 <= C[0] <= 1.001 assert -0.001 <= dC[0,0] <= 0.001
def test_name (self): i = 1 M,_ = get(i) assert isinstance(M.name,str) assert M.name == 'M2'
self.plot_prediction(ax, c, mu, s2) plt.suptitle( 'Marginal predictive distributions at suggested next designs') plt.tight_layout(True) plt.subplots_adjust(top=0.85) plt.show() ################## ### ### ### D E M O ### ### ### ################## print("== Loading case study ==") measurement, models = mixing.get(i=2) print("Number of models: %d" % len(models)) x_bounds = measurement.x_bounds # Design variable bounds dim_x = len(x_bounds) # Number of design variables measvar = measurement.measvar # Measurement noise variance E = 1 # Number of outputs/target dimensions # Initial observations. X = np.array([[20., 0.50, 0], [20., 0.75, 0]]) Y = np.array([[0.766], [0.845]]) """ Initialise the GP surrogate models """ Ms = [] print("== Initialising models ==")