def test_parab_subbed_Pcomps(self): model = Problem(impl=impl) root = model.root = Group() root.ln_solver = lin_solver() par = root.add('par', ParallelGroup()) par.add('s1', MP_Point(root=2.0)) par.add('s2', MP_Point(root=3.0)) root.add('sumcomp', ExecComp('sum = x1+x2')) root.connect('par.s1.c.y', 'sumcomp.x1') root.connect('par.s2.c.y', 'sumcomp.x2') driver = model.driver = pyOptSparseDriver() driver.add_param('par.s1.p.x', low=-100, high=100) driver.add_param('par.s2.p.x', low=-100, high=100) driver.add_objective('sumcomp.sum') model.setup(check=False) model.run() if not MPI or self.comm.rank == 0: assert_rel_error(self, model['par.s1.p.x'], 2.0, 1.e-6) if not MPI or self.comm.rank == 1: assert_rel_error(self, model['par.s2.p.x'], 3.0, 1.e-6)
def test_converge_diverge_compfd(self): prob = Problem(impl=impl) prob.root = ConvergeDivergePar() prob.root.ln_solver = PetscKSP() # fd comp2 and comp5. each is under a par group prob.root.par1.comp2.fd_options['force_fd'] = True prob.root.par2.comp5.fd_options['force_fd'] = True prob.setup(check=False) prob.run() # Make sure value is fine. assert_rel_error(self, prob['comp7.y1'], -102.7, 1e-6) indep_list = ['p.x'] unknown_list = ['comp7.y1'] J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)
def test_parab_FD_subbed_Pcomps(self): model = Problem(impl=impl) root = model.root = Group() par = root.add('par', ParallelGroup()) par.add('s1', MP_Point(root=2.0)) par.add('s2', MP_Point(root=3.0)) root.add('sumcomp', ExecComp('sum = x1+x2')) root.connect('par.s1.c.y', 'sumcomp.x1') root.connect('par.s2.c.y', 'sumcomp.x2') driver = model.driver = pyOptSparseDriver() driver.add_param('par.s1.p.x', low=-100, high=100) driver.add_param('par.s2.p.x', low=-100, high=100) driver.add_objective('sumcomp.sum') root.fd_options['force_fd'] = True model.setup(check=False) model.run() if not MPI or self.comm.rank == 0: assert_rel_error(self, model['par.s1.p.x'], 2.0, 1.e-6) if not MPI or self.comm.rank == 1: assert_rel_error(self, model['par.s2.p.x'], 3.0, 1.e-6)
def test_parab_FD_subbed_Pcomps(self): model = Problem(impl=impl) root = model.root = Group() par = root.add("par", ParallelGroup()) par.add("s1", MP_Point(root=2.0)) par.add("s2", MP_Point(root=3.0)) root.add("sumcomp", ExecComp("sum = x1+x2")) root.connect("par.s1.c.y", "sumcomp.x1") root.connect("par.s2.c.y", "sumcomp.x2") driver = model.driver = pyOptSparseDriver() driver.add_param("par.s1.p.x", low=-100, high=100) driver.add_param("par.s2.p.x", low=-100, high=100) driver.add_objective("sumcomp.sum") root.fd_options["force_fd"] = True model.setup(check=False) model.run() if not MPI or self.comm.rank == 0: assert_rel_error(self, model["par.s1.p.x"], 2.0, 1.0e-6) if not MPI or self.comm.rank == 1: assert_rel_error(self, model["par.s2.p.x"], 3.0, 1.0e-6)
def test_parab_FD(self): model = Problem(impl=impl) root = model.root = Group() par = root.add("par", ParallelGroup()) par.add("c1", Parab1D(root=2.0)) par.add("c2", Parab1D(root=3.0)) root.add("p1", ParamComp("x", val=0.0)) root.add("p2", ParamComp("x", val=0.0)) root.connect("p1.x", "par.c1.x") root.connect("p2.x", "par.c2.x") root.add("sumcomp", ExecComp("sum = x1+x2")) root.connect("par.c1.y", "sumcomp.x1") root.connect("par.c2.y", "sumcomp.x2") driver = model.driver = pyOptSparseDriver() driver.add_param("p1.x", low=-100, high=100) driver.add_param("p2.x", low=-100, high=100) driver.add_objective("sumcomp.sum") root.fd_options["force_fd"] = True model.setup(check=False) model.run() if not MPI or self.comm.rank == 0: assert_rel_error(self, model["p1.x"], 2.0, 1.0e-6) assert_rel_error(self, model["p2.x"], 3.0, 1.0e-6)
def test_one_dim_bi_fidelity_training(self): mm = MultiFiMetaModel(nfi=2) mm.add_param('x', 0.) surr = MockSurrogate() mm.add_output('y', 0., surrogate = surr) prob = Problem(Group()) prob.root.add('mm', mm) prob.setup(check=False) prob['mm.train:x']= [0.0, 0.4, 1.0] prob['mm.train:x_fi2'] = [0.1, 0.2, 0.3, 0.5, 0.6, 0.7, 0.8, 0.9, 0.0, 0.4, 1.0] prob['mm.train:y'] = [3.02720998, 0.11477697, 15.82973195] prob['mm.train:y_fi2'] = [-9.32828839, -8.31986355, -7.00778837, -4.54535129, -4.0747189 , -5.30287702, -4.47456522, 1.85597517, -8.48639501, -5.94261151, 7.91486597] expected_xtrain=[np.array([[0.0], [0.4], [1.0]]), np.array([[0.1], [0.2], [0.3], [0.5], [0.6], [0.7], [0.8], [0.9], [0.0], [0.4], [1.0]])] expected_ytrain=[np.array([[ 3.02720998], [0.11477697], [15.82973195]]), np.array([[-9.32828839], [-8.31986355], [-7.00778837], [-4.54535129], [-4.0747189], [-5.30287702], [-4.47456522], [1.85597517], [-8.48639501], [-5.94261151], [7.91486597]])] prob.run() np.testing.assert_array_equal(surr.xtrain[0], expected_xtrain[0]) np.testing.assert_array_equal(surr.xtrain[1], expected_xtrain[1]) np.testing.assert_array_equal(surr.ytrain[0], expected_ytrain[0]) np.testing.assert_array_equal(surr.ytrain[1], expected_ytrain[1])
def test_double_arraycomp(self): # Mainly testing a bug in the array return for multiple arrays group = Group() group.add('x_param1', IndepVarComp('x1', np.ones((2))), promotes=['*']) group.add('x_param2', IndepVarComp('x2', np.ones((2))), promotes=['*']) group.add('mycomp', DoubleArrayComp(), promotes=['*']) prob = Problem(impl=impl) prob.root = group prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() Jbase = group.mycomp.JJ J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='fwd', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8) J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='fd', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8) J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='rev', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8)
def test_fan_in_grouped(self): prob = Problem(impl=impl) prob.root = FanInGrouped() prob.root.ln_solver = PetscKSP() param_list = ['p1.x1', 'p2.x2'] unknown_list = ['comp3.y'] prob.setup(check=False) prob.run() J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6) assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6) assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)
def test_unequal_training_outputs(self): meta = MetaModel() meta.add_param('x', 0.) meta.add_param('y', 0.) meta.add_output('f', 0.) meta.default_surrogate = FloatKrigingSurrogate() prob = Problem(Group()) prob.root.add('meta', meta) prob.setup(check=False) prob['meta.train:x'] = [1.0, 1.0, 1.0, 1.0] prob['meta.train:y'] = [1.0, 2.0, 3.0, 4.0] prob['meta.train:f'] = [1.0, 1.0] prob['meta.x'] = 1.0 prob['meta.y'] = 1.0 with self.assertRaises(RuntimeError) as cm: prob.run() expected = "MetaModel: Each variable must have the same number" \ " of training points. Expected 4 but found" \ " 2 points for 'f'." self.assertEqual(str(cm.exception), expected)
def test_one_dim_one_fidelity_training(self): mm = MultiFiMetaModel() mm.add_param('x', 0.) surr = MockSurrogate() mm.add_output('y', 0., surrogate = surr) prob = Problem(Group()) prob.root.add('mm', mm) prob.setup(check=False) prob['mm.train:x'] = [0.0, 0.4, 1.0] prob['mm.train:y'] = [3.02720998, 0.11477697, 15.82973195] expected_xtrain=[np.array([ [0.0], [0.4], [1.0] ])] expected_ytrain=[np.array([ [3.02720998], [0.11477697], [15.82973195] ])] prob.run() np.testing.assert_array_equal(surr.xtrain, expected_xtrain) np.testing.assert_array_equal(surr.ytrain, expected_ytrain) expected_xpredict=0.5 prob['mm.x'] = expected_xpredict prob.run() np.testing.assert_array_equal(surr.xpredict, expected_xpredict)
def test_indices(self): size = 10 root = Group() root.add('P1', ParamComp('x', np.zeros(size))) root.add( 'C1', ExecComp('y = x * 2.', y=np.zeros(size // 2), x=np.zeros(size // 2))) root.add( 'C2', ExecComp('y = x * 3.', y=np.zeros(size // 2), x=np.zeros(size // 2))) root.connect('P1.x', "C1.x", src_indices=list(range(size // 2))) root.connect('P1.x', "C2.x", src_indices=list(range(size // 2, size))) prob = Problem(root) prob.setup(check=False) root.P1.unknowns['x'][0:size // 2] += 1.0 root.P1.unknowns['x'][size // 2:size] -= 1.0 prob.run() assert_rel_error(self, root.C1.params['x'], np.ones(size // 2), 0.0001) assert_rel_error(self, root.C2.params['x'], -np.ones(size // 2), 0.0001)
def test_parab_FD(self): model = Problem(impl=impl) root = model.root = Group() par = root.add('par', ParallelGroup()) par.add('c1', Parab1D(root=2.0)) par.add('c2', Parab1D(root=3.0)) root.add('p1', ParamComp('x', val=0.0)) root.add('p2', ParamComp('x', val=0.0)) root.connect('p1.x', 'par.c1.x') root.connect('p2.x', 'par.c2.x') root.add('sumcomp', ExecComp('sum = x1+x2')) root.connect('par.c1.y', 'sumcomp.x1') root.connect('par.c2.y', 'sumcomp.x2') driver = model.driver = pyOptSparseDriver() driver.add_param('p1.x', low=-100, high=100) driver.add_param('p2.x', low=-100, high=100) driver.add_objective('sumcomp.sum') root.fd_options['force_fd'] = True model.setup(check=False) model.run() if not MPI or self.comm.rank == 0: assert_rel_error(self, model['p1.x'], 2.0, 1.e-6) assert_rel_error(self, model['p2.x'], 3.0, 1.e-6)
def test_unequal_training_inputs(self): meta = MetaModel() meta.add_param('x', 0.) meta.add_param('y', 0.) meta.add_output('f', 0.) meta.default_surrogate = FloatKrigingSurrogate() prob = Problem(Group()) prob.root.add('meta', meta) prob.setup(check=False) prob['meta.train:x'] = [1.0, 1.0, 1.0, 1.0] prob['meta.train:y'] = [1.0, 2.0] prob['meta.train:f'] = [1.0, 1.0, 1.0, 1.0] prob['meta.x'] = 1.0 prob['meta.y'] = 1.0 with self.assertRaises(RuntimeError) as cm: prob.run() expected = "MetaModel: Each variable must have the same number" \ " of training points. Expected 4 but found" \ " 2 points for 'y'." self.assertEqual(str(cm.exception), expected)
def test_derivatives(self): meta = MetaModel() meta.add_param('x', 0.) meta.add_output('f', 0.) meta.default_surrogate = FloatKrigingSurrogate() prob = Problem(Group()) prob.root.add('meta', meta, promotes=['x']) prob.root.add('p', IndepVarComp('x', 0.), promotes=['x']) prob.setup(check=False) prob['meta.train:x'] = [0., .25, .5, .75, 1.] prob['meta.train:f'] = [1., .75, .5, .25, 0.] prob['x'] = 0.125 prob.run() Jf = prob.calc_gradient(['x'], ['meta.f'], mode='fwd') Jr = prob.calc_gradient(['x'], ['meta.f'], mode='rev') assert_rel_error(self, Jf[0][0], -1.00011, 1.0e-5) assert_rel_error(self, Jr[0][0], -1.00011, 1.0e-5) stream = cStringIO() prob.check_partial_derivatives(out_stream=stream) abs_errors = findall('Absolute Error \(.+\) : (.+)', stream.getvalue()) self.assertTrue(len(abs_errors) > 0) for match in abs_errors: abs_error = float(match) self.assertTrue(abs_error < 1e-6)
def test_unequal_training_outputs(self): meta = MetaModel() meta.add_param("x", 0.0) meta.add_param("y", 0.0) meta.add_output("f", 0.0) meta.default_surrogate = FloatKrigingSurrogate() prob = Problem(Group()) prob.root.add("meta", meta) prob.setup(check=False) prob["meta.train:x"] = [1.0, 1.0, 1.0, 1.0] prob["meta.train:y"] = [1.0, 2.0, 3.0, 4.0] prob["meta.train:f"] = [1.0, 1.0] prob["meta.x"] = 1.0 prob["meta.y"] = 1.0 with self.assertRaises(RuntimeError) as cm: prob.run() expected = ( "MetaModel: Each variable must have the same number" " of training points. Expected 4 but found" " 2 points for 'f'." ) self.assertEqual(str(cm.exception), expected)
def test_array_outputs(self): meta = MetaModel() meta.add_param('x', np.zeros((2, 2))) meta.add_output('y', np.zeros(2,)) meta.default_surrogate = FloatKrigingSurrogate() prob = Problem(Group()) prob.root.add('meta', meta) prob.setup(check=False) prob['meta.train:x'] = [ [[1.0, 1.0], [1.0, 1.0]], [[2.0, 1.0], [1.0, 1.0]], [[1.0, 2.0], [1.0, 1.0]], [[1.0, 1.0], [2.0, 1.0]], [[1.0, 1.0], [1.0, 2.0]] ] prob['meta.train:y'] = [[3.0, 1.0], [2.0, 4.0], [1.0, 7.0], [6.0, -3.0], [-2.0, 3.0]] prob['meta.x'] = [[1.0, 2.0], [1.0, 1.0]] prob.run() assert_rel_error(self, prob['meta.y'], np.array([1.0, 7.0]), .00001)
def test_fan_out_grouped(self): prob = Problem(impl=impl) prob.root = FanOutGrouped() prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() param = 'sub.pgroup.p.x' unknown_list = ['sub.comp2.y', "sub.comp3.y"] J = prob.calc_gradient([param], unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J[unknown_list[0]][param][0][0], -6.0, 1e-6) assert_rel_error(self, J[unknown_list[1]][param][0][0], 15.0, 1e-6) J = prob.calc_gradient([param], unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J[unknown_list[0]][param][0][0], -6.0, 1e-6) assert_rel_error(self, J[unknown_list[1]][param][0][0], 15.0, 1e-6)
def test_converge_diverge_groups(self): prob = Problem() prob.root = ConvergeDivergeGroups() prob.root.ln_solver = ExplicitSolver() prob.setup(check=False) prob.run() # Make sure value is fine. assert_rel_error(self, prob['comp7.y1'], -102.7, 1e-6) param_list = ['p.x'] unknown_list = ['comp7.y1'] J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='fd', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)
def test_double_arraycomp(self): # Mainly testing a bug in the array return for multiple arrays group = Group() group.add('x_param1', ParamComp('x1', np.ones((2))), promotes=['*']) group.add('x_param2', ParamComp('x2', np.ones((2))), promotes=['*']) group.add('mycomp', DoubleArrayComp(), promotes=['*']) prob = Problem(impl=impl) prob.root = group prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() Jbase = group.mycomp.JJ J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='fwd', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8) J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='fd', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8) J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='rev', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8)
def test_array_inputs(self): meta = MetaModel() meta.add_param("x", np.zeros((2, 2))) meta.add_output("y1", 0.0) meta.add_output("y2", 0.0) meta.default_surrogate = FloatKrigingSurrogate() prob = Problem(Group()) prob.root.add("meta", meta) prob.setup(check=False) prob["meta.train:x"] = [ [[1.0, 1.0], [1.0, 1.0]], [[2.0, 1.0], [1.0, 1.0]], [[1.0, 2.0], [1.0, 1.0]], [[1.0, 1.0], [2.0, 1.0]], [[1.0, 1.0], [1.0, 2.0]], ] prob["meta.train:y1"] = [3.0, 2.0, 1.0, 6.0, -2.0] prob["meta.train:y2"] = [1.0, 4.0, 7.0, -3.0, 3.0] prob["meta.x"] = [[1.0, 2.0], [1.0, 1.0]] prob.run() assert_rel_error(self, prob["meta.y1"], 1.0, 0.00001) assert_rel_error(self, prob["meta.y2"], 7.0, 0.00001)
def test_simple_array_model2(self): prob = Problem() prob.root = Group() comp = prob.root.add( 'comp', ExecComp('y = mat.dot(x)', x=np.zeros((2, )), y=np.zeros((2, )), mat=np.array([[2., 7.], [5., -3.]]))) p1 = prob.root.add('p1', ParamComp('x', np.ones([2]))) prob.root.connect('p1.x', 'comp.x') prob.setup(check=False) prob.run() data = prob.check_partial_derivatives(out_stream=None) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][2], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][2], 0.0, 1e-5)
def test_simple_array_model(self): prob = Problem() prob.root = Group() prob.root.add( 'comp', ExecComp(['y[0]=2.0*x[0]+7.0*x[1]', 'y[1]=5.0*x[0]-3.0*x[1]'], x=np.zeros([2]), y=np.zeros([2]))) prob.root.add('p1', ParamComp('x', np.ones([2]))) prob.root.connect('p1.x', 'comp.x') prob.setup(check=False) prob.run() data = prob.check_partial_derivatives(out_stream=None) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][2], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][2], 0.0, 1e-5)
def test_single_diamond_grouped(self): prob = Problem() prob.root = SingleDiamondGrouped() prob.root.ln_solver = ExplicitSolver() prob.setup(check=False) prob.run() param_list = ['p.x'] unknown_list = ['comp4.y1', 'comp4.y2'] J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6) assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6) assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='fd', return_format='dict') assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6) assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)
def test_vector_inputs(self): meta = MetaModel() meta.add_param('x', np.zeros(4)) meta.add_output('y1', 0.) meta.add_output('y2', 0.) meta.default_surrogate = FloatKrigingSurrogate() prob = Problem(Group()) prob.root.add('meta', meta) prob.setup(check=False) prob['meta.train:x'] = [ [1.0, 1.0, 1.0, 1.0], [2.0, 1.0, 1.0, 1.0], [1.0, 2.0, 1.0, 1.0], [1.0, 1.0, 2.0, 1.0], [1.0, 1.0, 1.0, 2.0] ] prob['meta.train:y1'] = [3.0, 2.0, 1.0, 6.0, -2.0] prob['meta.train:y2'] = [1.0, 4.0, 7.0, -3.0, 3.0] prob['meta.x'] = [1.0, 2.0, 1.0, 1.0] prob.run() assert_rel_error(self, prob['meta.y1'], 1.0, .00001) assert_rel_error(self, prob['meta.y2'], 7.0, .00001)
def test_fd_options_meta_step_size(self): class MetaParaboloid(Component): """ Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 """ def __init__(self): super(MetaParaboloid, self).__init__() # Params self.add_param('x', 1.0, fd_step_size = 1.0e5) self.add_param('y', 1.0, fd_step_size = 1.0e5) # Unknowns self.add_output('f_xy', 0.0) def solve_nonlinear(self, params, unknowns, resids): """f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 Optimal solution (minimum): x = 6.6667; y = -7.3333 """ x = params['x'] y = params['y'] f_xy = ((x-3.0)**2 + x*y + (y+4.0)**2 - 3.0) unknowns['f_xy'] = f_xy def jacobian(self, params, unknowns, resids): """Analytical derivatives""" x = params['x'] y = params['y'] J = {} J['f_xy', 'x'] = (2.0*x - 6.0 + y) J['f_xy', 'y'] = (2.0*y + 8.0 + x) return J prob = Problem() prob.root = Group() comp = prob.root.add('comp', MetaParaboloid()) prob.root.add('p1', ParamComp('x', 15.0)) prob.root.add('p2', ParamComp('y', 15.0)) prob.root.connect('p1.x', 'comp.x') prob.root.connect('p2.y', 'comp.y') comp.fd_options['force_fd'] = True prob.setup(check=False) prob.run() # Make sure bad meta step_size is used # Derivative should be way high with this. J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') self.assertGreater(J['comp.f_xy']['p1.x'][0][0], 1000.0)
def test_math(self): prob = Problem(root=Group()) C1 = prob.root.add('C1', ExecComp('y=sin(x)', x=2.0)) self.assertTrue('x' in C1._params_dict) self.assertTrue('y' in C1._unknowns_dict) prob.setup(check=False) prob.run() assert_rel_error(self, C1.unknowns['y'], math.sin(2.0), 0.00001)
def test_array(self): prob = Problem(root=Group()) C1 = prob.root.add('C1', ExecComp('y=x[1]', x=np.array([1.,2.,3.]), y=0.0)) self.assertTrue('x' in C1._params_dict) self.assertTrue('y' in C1._unknowns_dict) prob.setup(check=False) prob.run() assert_rel_error(self, C1.unknowns['y'], 2.0, 0.00001)
def test_mixed_type(self): prob = Problem(root=Group()) C1 = prob.root.add('C1', ExecComp('y=numpy.sum(x)', x=np.arange(10,dtype=float))) self.assertTrue('x' in C1._params_dict) self.assertTrue('y' in C1._unknowns_dict) prob.setup(check=False) prob.run() assert_rel_error(self, C1.unknowns['y'], 45.0, 0.00001)
def test_array(self): prob = Problem(root=Group()) C1 = prob.root.add('C1', ExecComp('y=x[1]', x=np.array([1., 2., 3.]), y=0.0)) self.assertTrue('x' in C1._params_dict) self.assertTrue('y' in C1._unknowns_dict) prob.setup(check=False) prob.run() assert_rel_error(self, C1.unknowns['y'], 2.0, 0.00001)
def test_array_lhs(self): prob = Problem(root=Group()) C1 = prob.root.add('C1', ExecComp(['y[0]=x[1]', 'y[1]=x[0]'], x=np.array([1.,2.,3.]), y=np.array([0.,0.]))) self.assertTrue('x' in C1._params_dict) self.assertTrue('y' in C1._unknowns_dict) prob.setup(check=False) prob.run() assert_rel_error(self, C1.unknowns['y'], np.array([2.,1.]), 0.00001)
def test_fd_options_meta_step_size(self): class MetaParaboloid(Component): """ Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 """ def __init__(self): super(MetaParaboloid, self).__init__() # Params self.add_param('x', 1.0, fd_step_size=1.0e5) self.add_param('y', 1.0, fd_step_size=1.0e5) # Unknowns self.add_output('f_xy', 0.0) def solve_nonlinear(self, params, unknowns, resids): """f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 Optimal solution (minimum): x = 6.6667; y = -7.3333 """ x = params['x'] y = params['y'] f_xy = ((x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0) unknowns['f_xy'] = f_xy def jacobian(self, params, unknowns, resids): """Analytical derivatives""" x = params['x'] y = params['y'] J = {} J['f_xy', 'x'] = (2.0 * x - 6.0 + y) J['f_xy', 'y'] = (2.0 * y + 8.0 + x) return J prob = Problem() prob.root = Group() comp = prob.root.add('comp', MetaParaboloid()) prob.root.add('p1', ParamComp('x', 15.0)) prob.root.add('p2', ParamComp('y', 15.0)) prob.root.connect('p1.x', 'comp.x') prob.root.connect('p2.y', 'comp.y') comp.fd_options['force_fd'] = True prob.setup(check=False) prob.run() # Make sure bad meta step_size is used # Derivative should be way high with this. J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') self.assertGreater(J['comp.f_xy']['p1.x'][0][0], 1000.0)
def test_sellar_derivs_grouped(self): prob = Problem(impl=impl) prob.root = SellarDerivativesGrouped() prob.root.ln_solver = PetscKSP() prob.root.mda.nl_solver.options['atol'] = 1e-12 prob.setup(check=False) prob.run() # Just make sure we are at the right answer assert_rel_error(self, prob['y1'], 25.58830273, .00001) assert_rel_error(self, prob['y2'], 12.05848819, .00001) param_list = ['x', 'z'] unknown_list = ['obj', 'con1', 'con2'] Jbase = {} Jbase['con1'] = {} Jbase['con1']['x'] = -0.98061433 Jbase['con1']['z'] = np.array([-9.61002285, -0.78449158]) Jbase['con2'] = {} Jbase['con2']['x'] = 0.09692762 Jbase['con2']['z'] = np.array([1.94989079, 1.0775421]) Jbase['obj'] = {} Jbase['obj']['x'] = 2.98061392 Jbase['obj']['z'] = np.array([9.61001155, 1.78448534]) J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='dict') for key1, val1 in Jbase.items(): for key2, val2 in val1.items(): assert_rel_error(self, J[key1][key2], val2, .00001) J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='dict') for key1, val1 in Jbase.items(): for key2, val2 in val1.items(): assert_rel_error(self, J[key1][key2], val2, .00001) prob.root.fd_options['form'] = 'central' J = prob.calc_gradient(param_list, unknown_list, mode='fd', return_format='dict') for key1, val1 in Jbase.items(): for key2, val2 in val1.items(): assert_rel_error(self, J[key1][key2], val2, .00001)
def test_array_lhs(self): prob = Problem(root=Group()) C1 = prob.root.add( 'C1', ExecComp(['y[0]=x[1]', 'y[1]=x[0]'], x=np.array([1., 2., 3.]), y=np.array([0., 0.]))) self.assertTrue('x' in C1._params_dict) self.assertTrue('y' in C1._unknowns_dict) prob.setup(check=False) prob.run() assert_rel_error(self, C1.unknowns['y'], np.array([2., 1.]), 0.00001)
def test_fan_out_grouped(self): prob = Problem(impl=impl) prob.root = root = Group() root.add('p', IndepVarComp('x', 1.0)) root.add('comp1', ExecComp(['y=3.0*x'])) sub = root.add('sub', ParallelGroup()) sub.add('comp2', ExecComp(['y=-2.0*x'])) sub.add('comp3', ExecComp(['y=5.0*x'])) root.add('c2', ExecComp(['y=-x'])) root.add('c3', ExecComp(['y=3.0*x'])) root.connect('sub.comp2.y', 'c2.x') root.connect('sub.comp3.y', 'c3.x') root.connect("comp1.y", "sub.comp2.x") root.connect("comp1.y", "sub.comp3.x") root.connect("p.x", "comp1.x") prob.root.ln_solver = LinearGaussSeidel() prob.root.sub.ln_solver = LinearGaussSeidel() prob.setup(check=False) prob.run() param = 'p.x' unknown_list = ['sub.comp2.y', "sub.comp3.y"] J = prob.calc_gradient([param], unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J[unknown_list[0]][param][0][0], -6.0, 1e-6) assert_rel_error(self, J[unknown_list[1]][param][0][0], 15.0, 1e-6) J = prob.calc_gradient([param], unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J[unknown_list[0]][param][0][0], -6.0, 1e-6) assert_rel_error(self, J[unknown_list[1]][param][0][0], 15.0, 1e-6) unknown_list = ['c2.y', "c3.y"] J = prob.calc_gradient([param], unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J[unknown_list[0]][param][0][0], 6.0, 1e-6) assert_rel_error(self, J[unknown_list[1]][param][0][0], 45.0, 1e-6) J = prob.calc_gradient([param], unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J[unknown_list[0]][param][0][0], 6.0, 1e-6) assert_rel_error(self, J[unknown_list[1]][param][0][0], 45.0, 1e-6)
def test_complex_step(self): prob = Problem(root=Group()) C1 = prob.root.add('C1', ExecComp(['y=2.0*x+1.'], x=2.0)) self.assertTrue('x' in C1._params_dict) self.assertTrue('y' in C1._unknowns_dict) prob.setup(check=False) prob.run() assert_rel_error(self, C1.unknowns['y'], 5.0, 0.00001) J = C1.jacobian(C1.params, C1.unknowns, C1.resids) assert_rel_error(self, J[('y', 'x')], 2.0, 0.00001)
def test_complex_step(self): prob = Problem(root=Group()) C1 = prob.root.add('C1', ExecComp(['y=2.0*x+1.'], x=2.0)) self.assertTrue('x' in C1._params_dict) self.assertTrue('y' in C1._unknowns_dict) prob.setup(check=False) prob.run() assert_rel_error(self, C1.unknowns['y'], 5.0, 0.00001) J = C1.jacobian(C1.params, C1.unknowns, C1.resids) assert_rel_error(self, J[('y','x')], 2.0, 0.00001)
def test_simple(self): group = Group() group.add('x_param', IndepVarComp('x', 1.0), promotes=['*']) group.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y']) prob = Problem(impl=impl) prob.root = group prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_simple(self): group = Group() group.add('x_param', ParamComp('x', 1.0), promotes=['*']) group.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y']) prob = Problem(impl=impl) prob.root = group prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_simple_jac(self): group = Group() group.add('x_param', ParamComp('x', 1.0), promotes=['*']) group.add('mycomp', ExecComp(['y=2.0*x']), promotes=['x', 'y']) prob = Problem() prob.root = group prob.root.ln_solver = ExplicitSolver() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_array_to_scalar(self): root = Group() root.add('P1', ParamComp('x', np.array([2., 3.]))) root.add('C1', SimpleComp()) root.add('C2', ExecComp('y = x * 3.', y=0., x=0.)) root.connect('P1.x', 'C1.x', src_indices=[0,]) root.connect('P1.x', 'C2.x', src_indices=[1,]) prob = Problem(root) prob.setup(check=False) prob.run() self.assertAlmostEqual(root.C1.params['x'], 2.) self.assertAlmostEqual(root.C2.params['x'], 3.)
def test_simple_jac(self): group = Group() group.add('x_param', IndepVarComp('x', 1.0), promotes=['*']) group.add('mycomp', ExecComp(['y=2.0*x']), promotes=['x', 'y']) prob = Problem() prob.root = group prob.root.ln_solver = DirectSolver() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_complex_step2(self): prob = Problem(Group()) comp = prob.root.add('comp', ExecComp('y=x*x + x*2.0')) prob.root.add('p1', ParamComp('x', 2.0)) prob.root.connect('p1.x', 'comp.x') comp.fd_options['force_fd'] = False prob.setup(check=False) prob.run() J = prob.calc_gradient(['p1.x'], ['comp.y'], mode='fwd', return_format='dict') assert_rel_error(self, J['comp.y']['p1.x'], np.array([6.0]), 0.00001) J = prob.calc_gradient(['p1.x'], ['comp.y'], mode='rev', return_format='dict') assert_rel_error(self, J['comp.y']['p1.x'], np.array([6.0]), 0.00001)
def test_fd_options_form(self): prob = Problem() prob.root = Group() comp = prob.root.add('comp', Paraboloid()) prob.root.add('p1', ParamComp('x', 15.0)) prob.root.add('p2', ParamComp('y', 15.0)) prob.root.connect('p1.x', 'comp.x') prob.root.connect('p2.y', 'comp.y') comp.fd_options['force_fd'] = True comp.fd_options['form'] = 'forward' param_list = ['p1.x'] unknowns_list = ['comp.f_xy'] prob.setup(check=False) prob.run() J = prob.calc_gradient(param_list, unknowns_list, return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-6) # Make sure it gives good result with small stepsize comp.fd_options['form'] = 'backward' J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-6) # Make sure it gives good result with small stepsize comp.fd_options['form'] = 'central' J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-6) # Now, Make sure we really are going foward and backward comp.fd_options['form'] = 'forward' comp.fd_options['step_size'] = 1e3 J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') self.assertGreater(J['comp.f_xy']['p1.x'][0][0], 0.0) comp.fd_options['form'] = 'backward' J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') self.assertLess(J['comp.f_xy']['p1.x'][0][0], 0.0) # Central should get pretty close even for the bad stepsize comp.fd_options['form'] = 'central' J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-1)
def test_subarray_to_promoted_var(self): root = Group() P = root.add('P', IndepVarComp('x', np.array([1., 2., 3., 4., 5.]))) G = root.add('G', Group()) C = root.add('C', SimpleComp()) A = G.add('A', SimpleArrayComp()) G2 = G.add('G2', Group()) A2 = G2.add('A2', SimpleArrayComp()) root.connect('P.x', 'G.A.x', src_indices=[0,1]) root.connect('P.x', 'C.x', src_indices=[2,]) root.connect('P.x', 'G.G2.A2.x', src_indices=[3, 4]) prob = Problem(root) prob.setup(check=False) prob.run() assert_rel_error(self, root.G.A.params['x'], np.array([1., 2.]), 0.0001) self.assertAlmostEqual(root.C.params['x'], 3.) assert_rel_error(self, root.G.G2.A2.params['x'], np.array([4., 5.]), 0.0001) # now try the same thing with promoted var root = Group() P = root.add('P', IndepVarComp('x', np.array([1., 2., 3., 4., 5.]))) G = root.add('G', Group()) C = root.add('C', SimpleComp()) A = G.add('A', SimpleArrayComp(), promotes=['x', 'y']) G2 = G.add('G2', Group()) A2 = G2.add('A2', SimpleArrayComp(), promotes=['x', 'y']) root.connect('P.x', 'G.x', src_indices=[0,1]) root.connect('P.x', 'C.x', src_indices=[2,]) root.connect('P.x', 'G.G2.x', src_indices=[3, 4]) prob = Problem(root) prob.setup(check=False) prob.run() assert_rel_error(self, root.G.A.params['x'], np.array([1., 2.]), 0.0001) self.assertAlmostEqual(root.C.params['x'], 3.) assert_rel_error(self, root.G.G2.A2.params['x'], np.array([4., 5.]), 0.0001)
def test_sin_metamodel(self): class Sin(Component): """ Simple sine calculation. """ def __init__(self): self.add_param('x', 0., units="rad") self.add_output('f_x', 0.) def solve_nonlinear(self, params, unknowns, resids): unknowns['f_x'] = .5*sin(params['x']) # create a MetaModel for Sin and add it to a Problem sin_mm = MetaModel() sin_mm.add_param('x', 0.) sin_mm.add_output('f_x', 0.) prob = Problem(Group()) prob.root.add('sin_mm', sin_mm) # check that missing surrogate is detected in check_setup stream = cStringIO() prob.setup(out_stream=stream) msg = ("No default surrogate model is defined and the " "following outputs do not have a surrogate model:\n" "['f_x']\n" "Either specify a default_surrogate, or specify a " "surrogate model for all outputs.") self.assertTrue(msg in stream.getvalue()) # check that output with no specified surrogate gets the default sin_mm.default_surrogate = FloatKrigingSurrogate() prob.setup(check=False) surrogate = prob.root.unknowns.metadata('sin_mm.f_x').get('surrogate') self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate), 'sin_mm.f_x should get the default surrogate') # train the surrogate and check predicted value prob['sin_mm.train:x'] = np.linspace(0,10,200) prob['sin_mm.train:f_x'] = .5*np.sin(prob['sin_mm.train:x']) prob['sin_mm.x'] = 2.22 prob.run() self.assertAlmostEqual(prob['sin_mm.f_x'], .5*np.sin(prob['sin_mm.x']), places=5)
def test_simple_in_group_matvec(self): group = Group() sub = group.add('sub', Group(), promotes=['x', 'y']) group.add('x_param', ParamComp('x', 1.0), promotes=['*']) sub.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y']) prob = Problem() prob.root = group prob.root.ln_solver = ExplicitSolver() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_linear_system(self): root = Group() root.add('lin', LinearSystem(3)) x = np.array([1, 2, -3]) A = np.array([[5.0, -3.0, 2.0], [1.0, 7.0, -4.0], [1.0, 0.0, 8.0]]) b = A.dot(x) root.add('p1', ParamComp('A', A)) root.add('p2', ParamComp('b', b)) root.connect('p1.A', 'lin.A') root.connect('p2.b', 'lin.b') prob = Problem(root) prob.setup(check=False) prob.run() # Make sure it gets the right answer assert_rel_error(self, prob['lin.x'], x, .0001) assert_rel_error(self, np.linalg.norm(prob.root.resids.vec), 0.0, 1e-10) # Compare against calculated derivs Ainv = np.linalg.inv(A) dx_dA = np.outer(Ainv, -x).reshape(3, 9) dx_db = Ainv J = prob.calc_gradient(['p1.A', 'p2.b'], ['lin.x'], mode='fwd', return_format='dict') assert_rel_error(self, J['lin.x']['p1.A'], dx_dA, .0001) assert_rel_error(self, J['lin.x']['p2.b'], dx_db, .0001) J = prob.calc_gradient(['p1.A', 'p2.b'], ['lin.x'], mode='rev', return_format='dict') assert_rel_error(self, J['lin.x']['p1.A'], dx_dA, .0001) assert_rel_error(self, J['lin.x']['p2.b'], dx_db, .0001) J = prob.calc_gradient(['p1.A', 'p2.b'], ['lin.x'], mode='fd', return_format='dict') assert_rel_error(self, J['lin.x']['p1.A'], dx_dA, .0001) assert_rel_error(self, J['lin.x']['p2.b'], dx_db, .0001)
def test_single_diamond(self): prob = Problem(impl=impl) prob.root = SingleDiamond() prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() indep_list = ['p.x'] unknown_list = ['comp4.y1', 'comp4.y2'] J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6) assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6) assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)
def test_subarray_to_promoted_var(self): root = Group() P = root.add('P', ParamComp('x', np.array([1., 2., 3.]))) G = root.add('G', Group()) C = root.add('C', SimpleComp()) A = G.add('A', SimpleArrayComp()) # , promotes=['x', 'y']) root.connect('P.x', 'G.A.x', src_indices=[0, 1]) root.connect('P.x', 'C.x', src_indices=[ 2, ]) prob = Problem(root) prob.setup(check=False) prob.run() assert_rel_error(self, root.G.A.params['x'], np.array([1., 2.]), 0.0001) self.assertAlmostEqual(root.C.params['x'], 3.) # no try the same thing with promoted var root = Group() P = root.add('P', ParamComp('x', np.array([1., 2., 3.]))) G = root.add('G', Group()) C = root.add('C', SimpleComp()) A = G.add('A', SimpleArrayComp(), promotes=['x', 'y']) root.connect('P.x', 'G.x', src_indices=[0, 1]) root.connect('P.x', 'C.x', src_indices=[ 2, ]) prob = Problem(root) prob.setup(check=False) prob.run() assert_rel_error(self, root.G.A.params['x'], np.array([1., 2.]), 0.0001) self.assertAlmostEqual(root.C.params['x'], 3.)
def test_array2D(self): group = Group() group.add('x_param', ParamComp('x', np.ones((2, 2))), promotes=['*']) group.add('mycomp', ArrayComp2D(), promotes=['x', 'y']) prob = Problem() prob.root = group prob.root.ln_solver = ExplicitSolver() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') Jbase = prob.root.mycomp._jacobian_cache diff = np.linalg.norm(J['y']['x'] - Jbase['y', 'x']) assert_rel_error(self, diff, 0.0, 1e-8) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') diff = np.linalg.norm(J['y']['x'] - Jbase['y', 'x']) assert_rel_error(self, diff, 0.0, 1e-8)
def test_array_to_scalar(self): root = Group() root.add('P1', ParamComp('x', np.array([2., 3.]))) root.add('C1', SimpleComp()) root.add('C2', ExecComp('y = x * 3.', y=0., x=0.)) root.connect('P1.x', 'C1.x', src_indices=[ 0, ]) root.connect('P1.x', 'C2.x', src_indices=[ 1, ]) prob = Problem(root) prob.setup(check=False) prob.run() self.assertAlmostEqual(root.C1.params['x'], 2.) self.assertAlmostEqual(root.C2.params['x'], 3.)
def test_overrides(self): class OverrideComp(Component): def __init__(self): super(OverrideComp, self).__init__() # Params self.add_param('x', 3.0) # Unknowns self.add_output('y', 5.5) def solve_nonlinear(self, params, unknowns, resids): """ Doesn't do much. """ unknowns['y'] = 7.0 * params['x'] def apply_linear(self, params, unknowns, dparams, dunknowns, dresids, mode): """Never Call.""" raise RuntimeError( "This should have been overridden by force_fd.") def jacobian(self, params, unknowns, resids): """Never Call.""" raise RuntimeError( "This should have been overridden by force_fd.") prob = Problem() prob.root = Group() comp = prob.root.add('comp', OverrideComp()) prob.root.add('p1', ParamComp('x', 2.0)) prob.root.connect('p1.x', 'comp.x') comp.fd_options['force_fd'] = True prob.setup(check=False) prob.run() J = prob.calc_gradient(['p1.x'], ['comp.y'], mode='fwd', return_format='dict') assert_rel_error(self, J['comp.y']['p1.x'][0][0], 7.0, 1e-6)
def test_warm_start(self): # create metamodel with warm_restart = True meta = MetaModel() meta.add_param('x1', 0.) meta.add_param('x2', 0.) meta.add_output('y1', 0.) meta.add_output('y2', 0.) meta.default_surrogate = ResponseSurface() meta.warm_restart = True # add to problem prob = Problem(Group()) prob.root.add('meta', meta) prob.setup(check=False) # provide initial training data prob['meta.train:x1'] = [1.0, 3.0] prob['meta.train:x2'] = [1.0, 4.0] prob['meta.train:y1'] = [3.0, 1.0] prob['meta.train:y2'] = [1.0, 7.0] # run against a data point and check result prob['meta.x1'] = 2.0 prob['meta.x2'] = 3.0 prob.run() assert_rel_error(self, prob['meta.y1'], 1.9085, .001) assert_rel_error(self, prob['meta.y2'], 3.9203, .001) # Add 3rd training point, moves the estimate for that point # back to where it should be. prob['meta.train:x1'] = [2.0] prob['meta.train:x2'] = [3.0] prob['meta.train:y1'] = [2.0] prob['meta.train:y2'] = [4.0] meta.train = True # currently need to tell meta to re-train prob.run() assert_rel_error(self, prob['meta.y1'], 2.0, .00001) assert_rel_error(self, prob['meta.y2'], 4.0, .00001)
def test_no_derivatives(self): prob = Problem() prob.root = Group() comp = prob.root.add('comp', ExecComp('y=x*2.0')) prob.root.add('p1', ParamComp('x', 2.0)) prob.root.connect('p1.x', 'comp.x') comp.fd_options['force_fd'] = True prob.setup(check=False) prob.run() J = prob.calc_gradient(['p1.x'], ['comp.y'], mode='fwd', return_format='dict') assert_rel_error(self, J['comp.y']['p1.x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['p1.x'], ['comp.y'], mode='rev', return_format='dict') assert_rel_error(self, J['comp.y']['p1.x'][0][0], 2.0, 1e-6)
def test_sin_metamodel(self): # create a MetaModel for Sin and add it to a Problem sin_mm = MetaModel() sin_mm.add_param('x', 0.) sin_mm.add_output('f_x', 0.) prob = Problem(Group()) prob.root.add('sin_mm', sin_mm) # check that missing surrogate is detected in check_setup stream = cStringIO() prob.setup(out_stream=stream) msg = ("No default surrogate model is defined and the " "following outputs do not have a surrogate model:\n" "['f_x']\n" "Either specify a default_surrogate, or specify a " "surrogate model for all outputs.") self.assertTrue(msg in stream.getvalue()) # check that output with no specified surrogate gets the default sin_mm.default_surrogate = FloatKrigingSurrogate() prob.setup(check=False) surrogate = prob.root.unknowns.metadata('sin_mm.f_x').get('surrogate') self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate), 'sin_mm.f_x should get the default surrogate') # train the surrogate and check predicted value prob['sin_mm.train:x'] = np.linspace(0, 10, 200) prob['sin_mm.train:f_x'] = .5 * np.sin(prob['sin_mm.train:x']) prob['sin_mm.x'] = 2.22 prob.run() self.assertAlmostEqual(prob['sin_mm.f_x'], .5 * np.sin(prob['sin_mm.x']), places=5)
def test_array_outputs(self): meta = MetaModel() meta.add_param('x', np.zeros((2, 2))) meta.add_output('y', np.zeros(2, )) meta.default_surrogate = FloatKrigingSurrogate() prob = Problem(Group()) prob.root.add('meta', meta) prob.setup(check=False) prob['meta.train:x'] = [[[1.0, 1.0], [1.0, 1.0]], [[2.0, 1.0], [1.0, 1.0]], [[1.0, 2.0], [1.0, 1.0]], [[1.0, 1.0], [2.0, 1.0]], [[1.0, 1.0], [1.0, 2.0]]] prob['meta.train:y'] = [[3.0, 1.0], [2.0, 4.0], [1.0, 7.0], [6.0, -3.0], [-2.0, 3.0]] prob['meta.x'] = [[1.0, 2.0], [1.0, 1.0]] prob.run() assert_rel_error(self, prob['meta.y'], np.array([1.0, 7.0]), .00001)