def test_get_uncertain_value(self): x = array([[0.05], [.25], [0.61], [0.95]]) y = array([0.738513784857542,-0.210367746201974,-0.489015457891476,12.3033138316612]) krig1 = KrigingSurrogate(x,y) self.assertEqual(krig1.get_uncertain_value(1).mu,NormalDistribution(1.,0.).mu) self.assertEqual(krig1.get_uncertain_value(1).sigma,NormalDistribution(1.,0.).sigma)
def test_ei_2obj(self): ei = MultiObjExpectedImprovement() ei.target = array([[1, 10], [1, -10]]) ei.current = [ NormalDistribution(mu=1, sigma=1), NormalDistribution(mu=0, sigma=1) ] ei.calc_switch = "EI" ei.execute() self.assertAlmostEqual([5.0], ei.EI, 1) self.assertEqual(0.5, ei.PI, 6)
def test_ei_nobj(self): ei = MultiObjExpectedImprovement() ei.target = array([[1, 1, 1]]) list_of_cases = [Case(outputs=[("y1", 1), ("y2", 1), ("y3", 1)])] ei.criteria = ['y1', 'y2', 'y3'] ei.current = [ NormalDistribution(mu=1, sigma=1), NormalDistribution(mu=1, sigma=1), NormalDistribution(mu=1, sigma=1) ] ei.execute() self.assertAlmostEqual(0.875, ei.PI, 1)
def test_reset_y_star_event(self): ei = MultiObjExpectedImprovement() ei.target = array([[1, 1, 1]]) ei.current = [ NormalDistribution(mu=1, sigma=1), NormalDistribution(mu=1, sigma=1), NormalDistribution(mu=1, sigma=1) ] ei.execute() ei.target = array([[2, 2, 2]]) ei.execute() self.assertEqual(ei.y_star.all(), array([2, 2, 2]).all())
def test_ei_nobj(self): ei = MultiObjExpectedImprovement(3) bests = CaseSet() list_of_cases = [Case(outputs=[("y1",1),("y2",1),("y3",1)])] for case in list_of_cases: bests.record(case) ei.best_cases = bests ei.criteria = ['y1','y2','y3'] ei.predicted_values = [NormalDistribution(mu=1,sigma=1), NormalDistribution(mu=1,sigma=1), NormalDistribution(mu=1,sigma=1)] ei.execute() self.assertAlmostEqual(0.875,ei.PI,1)
def test_ei_2obj(self): ei = MultiObjExpectedImprovement() bests = CaseSet() list_of_cases = [Case(outputs=[("y1",1),("y2",10)]),Case(outputs=[("y1",1),("y2",-10)])] for case in list_of_cases: bests.record(case) ei.best_cases = bests ei.criteria = ["y1","y2"] ei.predicted_values = [NormalDistribution(mu=1,sigma=1),NormalDistribution(mu=0,sigma=1)] ei.calc_switch = "EI" ei.execute() self.assertAlmostEqual([5.0],ei.EI,1) self.assertEqual(0.5,ei.PI,6)
def test_ei_calc_switch(self): ei = MultiObjExpectedImprovement() ei.target = array([[1, 1, 1]]) ei.current = [ NormalDistribution(mu=1, sigma=1), NormalDistribution(mu=1, sigma=1), NormalDistribution(mu=1, sigma=1) ] ei.calc_switch = 'EI' try: ei.execute() except ValueError, err: self.assertEqual( str(err), ': EI calculations not supported' ' for more than 2 objectives')
def test_bad_criteria(self): ei = MultiObjExpectedImprovement(2) bests = CaseSet() list_of_cases = [Case(outputs=[("y1",1),("y2",1)])] for case in list_of_cases: bests.record(case) ei.best_cases = bests ei.criteria = ['y1','y3'] ei.predicted_values = [NormalDistribution(mu=1,sigma=1), NormalDistribution(mu=1,sigma=1)] try: ei.execute() except ValueError,err: self.assertEqual(str(err),": no cases in the provided case_set" " had output matching the provided criteria, ['y1' 'y3']")
def predict(self, new_x): """Calculates a predicted value of the response based on the current trained model for the supplied list of inputs (see ISurrogate). """ Y_pred, MSE = self.model.predict([new_x]) dist = NormalDistribution(Y_pred[0][0], sqrt(abs(MSE))) return dist
class MyContainer(Container): uncertain = Slot(NormalDistribution(), iotype="out") def __init__(self, *args, **kwargs): super(MyContainer, self).__init__(*args, **kwargs) self.uncertain = NormalDistribution() self.add('dyntrait', Float(9., desc='some desc'))
def test_ei_zero_division(self): ei = ExpectedImprovement() ei.target = 1.0 ei.current = NormalDistribution(mu=1, sigma=0) ei.execute() self.assertEqual(0, ei.EI) self.assertEqual(0, ei.PI)
def test_ei(self): ei = ExpectedImprovement() ei.target = 1.0 ei.current = NormalDistribution(mu=1, sigma=1) ei.execute() self.assertAlmostEqual([0.40], ei.EI, 2) self.assertAlmostEqual(0.5, ei.PI, 6)
def test_ei_zero_division(self): ei = ExpectedImprovement() ei.best_case = CaseSet(Case(outputs=[("y", 1)])) ei.criteria = "y" ei.predicted_value = NormalDistribution(mu=1, sigma=0) ei.execute() self.assertEqual(0, ei.EI) self.assertEqual(0, ei.PI)
def test_ei(self): ei = ExpectedImprovement() ei.best_case = CaseSet(Case(outputs=[("y", 1)])) ei.criteria = "y" ei.predicted_value = NormalDistribution(mu=1, sigma=1) ei.execute() self.assertAlmostEqual([0.91], ei.EI, 2) self.assertAlmostEqual(0.5, ei.PI, 6)
def test_ei_calc_switch(self): ei = MultiObjExpectedImprovement(3) bests = CaseSet() list_of_cases = [Case(outputs=[("y1",1),("y2",1),("y3",1)])] for case in list_of_cases: bests.record(case) ei.best_cases = bests ei.criteria = ['y1','y2','y3'] ei.predicted_values = [NormalDistribution(mu=1,sigma=1), NormalDistribution(mu=1,sigma=1), NormalDistribution(mu=1,sigma=1)] ei.calc_switch = 'EI' try: ei.execute() except ValueError,err: self.assertEqual(str(err),': EI calculations not supported' ' for more than 2 objectives')
def test_reset_y_star_event(self): ei = MultiObjExpectedImprovement(3) bests = CaseSet() list_of_cases = [Case(outputs=[("y1",1),("y2",1),("y3",1)])] for case in list_of_cases: bests.record(case) ei.best_cases = bests ei.criteria = ['y1','y2','y3'] ei.predicted_values = [NormalDistribution(mu=1,sigma=1), NormalDistribution(mu=1,sigma=1), NormalDistribution(mu=1,sigma=1)] ei.execute() bests = CaseSet() list_of_cases = [Case(outputs=[("y1",2),("y2",2),("y3",2)])] for case in list_of_cases: bests.record(case) ei.best_cases = bests ei.reset_y_star = True ei.execute() self.assertEqual(ei.y_star.all(),array([2,2,2]).all())
def setUp(self): cases = [] for i in range(20): inputs = [('comp1.x', float(i)), ('comp1.y', i * 2.)] outputs = [('comp1.z', i * 1.5), ('comp2.normal', NormalDistribution(float(i), 0.5))] case = Case(inputs=inputs) case._outputs = dict(outputs) cases.append(case) self.caseiter = ListCaseIterator(cases) self.varnames = ['comp2.normal', 'comp1.x', 'comp1.z']
def test_ei_bad_criteria(self): ei = ExpectedImprovement() ei.best_case = CaseSet(Case(outputs=[("y", 1)])) ei.criteria = "x" ei.predicted_value = NormalDistribution(mu=1, sigma=1) try: ei.execute() except ValueError, err: self.assertEqual( str(err), ": best_case did not have an output which " "matched the criteria, 'x'")
def predict(self, new_x): """Calculates a predicted value of the response based on the current trained model for the supplied list of inputs. """ if self.m is None: # untrained surrogate raise RuntimeError("KrigingSurrogate has not been trained, so no " "prediction can be made") r = zeros(self.n) X, Y = self.X, self.Y thetas = 10.**self.thetas XX = array(X) new_x = array(new_x) for i in range(self.n): r[i] = sum(thetas * (XX[i] - new_x)**2.) r = exp(-r) one = ones(self.n) if self.R_fact is not None: #---CHOLESKY DECOMPOSTION --- #f = self.mu+dot(r,cho_solve(self.R_fact,Y-dot(one,self.mu))) #term1 = dot(r,cho_solve(self.R_fact,r)) #term2 = (1.0-dot(one,cho_solve(self.R_fact,r)))**2./dot(one,cho_solve(self.R_fact,one)) rhs = vstack([(Y - dot(one, self.mu)), r, one]).T R_fact = (self.R_fact[0].T, not self.R_fact[1]) cho = cho_solve(R_fact, rhs).T f = self.mu + dot(r, cho[0]) term1 = dot(r, cho[1]) term2 = (1.0 - dot(one, cho[1]))**2. / dot(one, cho[2]) else: #-----LSTSQ------- rhs = vstack([(Y - dot(one, self.mu)), r, one]).T lsq = lstsq(self.R.T, rhs)[0].T f = self.mu + dot(r, lsq[0]) term1 = dot(r, lsq[1]) term2 = (1.0 - dot(one, lsq[1]))**2. / dot(one, lsq[2]) """ #-----LSTSQ------- rhs = vstack([(Y-dot(one, self.mu)), r, one]).T lsq = lstsq(self.R.T, rhs)[0].T f = self.mu + dot(r, lsq[0]) term1 = dot(r, lsq[1]) term2 = (1.0 - dot(one, lsq[1]))**2./dot(one, lsq[2]) """ MSE = self.sig2 * (1.0 - term1 + term2) RMSE = sqrt(abs(MSE)) dist = NormalDistribution(f, RMSE) return dist
def test_pickle_conversion(self): recorder = DBCaseRecorder() for i in range(10): inputs = [('comp1.x', i), ('comp1.y', i*2.)] outputs = [('comp1.z', i*1.5), ('comp2.normal', NormalDistribution(float(i),0.5))] recorder.record(Case(inputs=inputs, outputs=outputs, label='case%s'%i)) iterator = recorder.get_iterator() for i,case in enumerate(iterator): self.assertTrue(isinstance(case['comp2.normal'], NormalDistribution)) self.assertEqual(case['comp2.normal'].mu, float(i)) self.assertEqual(case['comp2.normal'].sigma, 0.5) self.assertTrue(isinstance(case['comp1.y'], float)) self.assertEqual(case['comp1.y'], i*2.) self.assertEqual(case['comp1.z'], i*1.5)
def test_query(self): recorder = DBCaseRecorder() for i in range(10): inputs = [('comp1.x', i), ('comp1.y', i*2.)] outputs = [('comp1.z', i*1.5), ('comp2.normal', NormalDistribution(float(i),0.5))] recorder.record(Case(inputs=inputs, outputs=outputs, label='case%s'%i)) iterator = recorder.get_iterator() iterator.selectors = ["value>=0","value<3"] count = 0 for i,case in enumerate(iterator): count += 1 for name,value in case.items(): self.assertTrue(value >= 0 and value<3)
def test_query(self): recorder = DBCaseRecorder() inputs = ['comp1.x', 'comp1.y'] outputs = ['comp1.z', 'comp2.normal'] recorder.register(self, inputs, outputs) for i in range(10): inputs = [i, i * 2.] outputs = [i * 1.5, NormalDistribution(float(i), 0.5)] recorder.record(self, inputs, outputs, None, '', '') iterator = recorder.get_iterator() iterator.selectors = ["value>=0", "value<3"] count = 0 for i, case in enumerate(iterator): count += 1 for value in case.values(): self.assertTrue(value >= 0 and value < 3)
def test_pickle_conversion(self): recorder = DBCaseRecorder() inputs = ['comp1.x', 'comp1.y'] outputs = ['comp1.z', 'comp2.normal'] recorder.register(self, inputs, outputs) for i in range(10): inputs = [i, i * 2.] outputs = [i * 1.5, NormalDistribution(float(i), 0.5)] recorder.record(self, inputs, outputs, None, '', '') iterator = recorder.get_iterator() for i, case in enumerate(iterator): self.assertTrue( isinstance(case['comp2.normal'], NormalDistribution)) self.assertEqual(case['comp2.normal'].mu, float(i)) self.assertEqual(case['comp2.normal'].sigma, 0.5) self.assertTrue(isinstance(case['comp1.y'], float)) self.assertEqual(case['comp1.y'], i * 2.) self.assertEqual(case['comp1.z'], i * 1.5)
def __init__(self): super(MyContainer, self).__init__() self.uncertain = NormalDistribution() self.add('dyntrait', Float(9., desc='some desc'))
def get_uncertain_value(self, value): """Returns a NormalDistribution centered around the value, with a standard deviation of 0.""" return NormalDistribution(value, 0.)