def test_double_arraycomp(self): # Mainly testing a bug in the array return for multiple arrays group = Group() group.add('x_param1', IndepVarComp('x1', np.ones((2))), promotes=['*']) group.add('x_param2', IndepVarComp('x2', np.ones((2))), promotes=['*']) group.add('mycomp', DoubleArrayComp(), promotes=['*']) prob = Problem(impl=impl) prob.root = group prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() Jbase = group.mycomp.JJ J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='fwd', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8) J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='fd', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8) J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='rev', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8)
def __init__(self): super(ConvergeDivergeGroups, self).__init__() self.add('p', ParamComp('x', 2.0)) sub1 = self.add('sub1', Group()) sub1.add('comp1', Comp1()) sub2 = sub1.add('sub2', Group()) sub2.add('comp2', Comp2()) sub2.add('comp3', Comp3()) sub1.add('comp4', Comp4()) sub3 = self.add('sub3', Group()) sub3.add('comp5', Comp5()) sub3.add('comp6', Comp6()) self.add('comp7', Comp7()) self.connect("p.x", "sub1.comp1.x1") self.connect('sub1.comp1.y1', 'sub1.sub2.comp2.x1') self.connect('sub1.comp1.y2', 'sub1.sub2.comp3.x1') self.connect('sub1.sub2.comp2.y1', 'sub1.comp4.x1') self.connect('sub1.sub2.comp3.y1', 'sub1.comp4.x2') self.connect('sub1.comp4.y1', 'sub3.comp5.x1') self.connect('sub1.comp4.y2', 'sub3.comp6.x1') self.connect('sub3.comp5.y1', 'comp7.x1') self.connect('sub3.comp6.y1', 'comp7.x2')
def test_simple_array_model(self): prob = Problem() prob.root = Group() prob.root.add( 'comp', ExecComp(['y[0]=2.0*x[0]+7.0*x[1]', 'y[1]=5.0*x[0]-3.0*x[1]'], x=np.zeros([2]), y=np.zeros([2]))) prob.root.add('p1', ParamComp('x', np.ones([2]))) prob.root.connect('p1.x', 'comp.x') prob.setup(check=False) prob.run() data = prob.check_partial_derivatives(out_stream=None) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][2], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][2], 0.0, 1e-5)
def test_parab_FD_subbed_Pcomps(self): model = Problem(impl=impl) root = model.root = Group() par = root.add('par', ParallelGroup()) par.add('s1', MP_Point(root=2.0)) par.add('s2', MP_Point(root=3.0)) root.add('sumcomp', ExecComp('sum = x1+x2')) root.connect('par.s1.c.y', 'sumcomp.x1') root.connect('par.s2.c.y', 'sumcomp.x2') driver = model.driver = pyOptSparseDriver() driver.add_param('par.s1.p.x', low=-100, high=100) driver.add_param('par.s2.p.x', low=-100, high=100) driver.add_objective('sumcomp.sum') root.fd_options['force_fd'] = True model.setup(check=False) model.run() if not MPI or self.comm.rank == 0: assert_rel_error(self, model['par.s1.p.x'], 2.0, 1.e-6) if not MPI or self.comm.rank == 1: assert_rel_error(self, model['par.s2.p.x'], 3.0, 1.e-6)
def test_simple(self): group = Group() group.add('x_param', IndepVarComp('x', 1.0), promotes=['*']) group.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y']) prob = Problem(impl=impl) prob.root = group prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_parab_FD(self): model = Problem(impl=impl) root = model.root = Group() par = root.add('par', ParallelGroup()) par.add('c1', Parab1D(root=2.0)) par.add('c2', Parab1D(root=3.0)) root.add('p1', ParamComp('x', val=0.0)) root.add('p2', ParamComp('x', val=0.0)) root.connect('p1.x', 'par.c1.x') root.connect('p2.x', 'par.c2.x') root.add('sumcomp', ExecComp('sum = x1+x2')) root.connect('par.c1.y', 'sumcomp.x1') root.connect('par.c2.y', 'sumcomp.x2') driver = model.driver = pyOptSparseDriver() driver.add_param('p1.x', low=-100, high=100) driver.add_param('p2.x', low=-100, high=100) driver.add_objective('sumcomp.sum') root.fd_options['force_fd'] = True model.setup(check=False) model.run() if not MPI or self.comm.rank == 0: assert_rel_error(self, model['p1.x'], 2.0, 1.e-6) assert_rel_error(self, model['p2.x'], 3.0, 1.e-6)
def test_simple_jac(self): group = Group() group.add('x_param', IndepVarComp('x', 1.0), promotes=['*']) group.add('mycomp', ExecComp(['y=2.0*x']), promotes=['x', 'y']) prob = Problem() prob.root = group prob.root.ln_solver = DirectSolver() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_simple_array_model2(self): prob = Problem() prob.root = Group() comp = prob.root.add( 'comp', ExecComp('y = mat.dot(x)', x=np.zeros((2, )), y=np.zeros((2, )), mat=np.array([[2., 7.], [5., -3.]]))) p1 = prob.root.add('p1', ParamComp('x', np.ones([2]))) prob.root.connect('p1.x', 'comp.x') prob.setup(check=False) prob.run() data = prob.check_partial_derivatives(out_stream=None) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][2], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][2], 0.0, 1e-5)
def test_unequal_training_outputs(self): meta = MetaModel() meta.add_param('x', 0.) meta.add_param('y', 0.) meta.add_output('f', 0.) meta.default_surrogate = FloatKrigingSurrogate() prob = Problem(Group()) prob.root.add('meta', meta) prob.setup(check=False) prob['meta.train:x'] = [1.0, 1.0, 1.0, 1.0] prob['meta.train:y'] = [1.0, 2.0, 3.0, 4.0] prob['meta.train:f'] = [1.0, 1.0] prob['meta.x'] = 1.0 prob['meta.y'] = 1.0 with self.assertRaises(RuntimeError) as cm: prob.run() expected = "MetaModel: Each variable must have the same number" \ " of training points. Expected 4 but found" \ " 2 points for 'f'." self.assertEqual(str(cm.exception), expected)
def test_simple_in_group_matvec(self): group = Group() sub = group.add('sub', Group(), promotes=['x', 'y']) group.add('x_param', ParamComp('x', 1.0), promotes=['*']) sub.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y']) prob = Problem() prob.root = group prob.root.ln_solver = ExplicitSolver() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_simple_matvec_subbed(self): group = Group() group.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y']) prob = Problem() prob.root = Group() prob.root.add('x_param', ParamComp('x', 1.0), promotes=['*']) prob.root.add('sub', group, promotes=['*']) prob.root.ln_solver = ExplicitSolver() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_array2D(self): group = Group() group.add('x_param', IndepVarComp('x', np.ones((2, 2))), promotes=['*']) group.add('mycomp', ArrayComp2D(), promotes=['x', 'y']) prob = Problem(impl=impl) prob.root = group prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') Jbase = prob.root.mycomp._jacobian_cache diff = np.linalg.norm(J['y']['x'] - Jbase['y', 'x']) assert_rel_error(self, diff, 0.0, 1e-8) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') diff = np.linalg.norm(J['y']['x'] - Jbase['y', 'x']) assert_rel_error(self, diff, 0.0, 1e-8)
def test_math(self): prob = Problem(root=Group()) C1 = prob.root.add('C1', ExecComp('y=sin(x)', x=2.0)) self.assertTrue('x' in C1._params_dict) self.assertTrue('y' in C1._unknowns_dict) prob.setup(check=False) prob.run() assert_rel_error(self, C1.unknowns['y'], math.sin(2.0), 0.00001)
def test_array_to_scalar(self): root = Group() root.add('P1', ParamComp('x', np.array([2., 3.]))) root.add('C1', SimpleComp()) root.add('C2', ExecComp('y = x * 3.', y=0., x=0.)) root.connect('P1.x', 'C1.x', src_indices=[0,]) root.connect('P1.x', 'C2.x', src_indices=[1,]) prob = Problem(root) prob.setup(check=False) prob.run() self.assertAlmostEqual(root.C1.params['x'], 2.) self.assertAlmostEqual(root.C2.params['x'], 3.)
def test_fd_options_meta_step_size(self): class MetaParaboloid(Component): """ Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 """ def __init__(self): super(MetaParaboloid, self).__init__() # Params self.add_param('x', 1.0, fd_step_size=1.0e5) self.add_param('y', 1.0, fd_step_size=1.0e5) # Unknowns self.add_output('f_xy', 0.0) def solve_nonlinear(self, params, unknowns, resids): """f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 Optimal solution (minimum): x = 6.6667; y = -7.3333 """ x = params['x'] y = params['y'] f_xy = ((x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0) unknowns['f_xy'] = f_xy def jacobian(self, params, unknowns, resids): """Analytical derivatives""" x = params['x'] y = params['y'] J = {} J['f_xy', 'x'] = (2.0 * x - 6.0 + y) J['f_xy', 'y'] = (2.0 * y + 8.0 + x) return J prob = Problem() prob.root = Group() comp = prob.root.add('comp', MetaParaboloid()) prob.root.add('p1', ParamComp('x', 15.0)) prob.root.add('p2', ParamComp('y', 15.0)) prob.root.connect('p1.x', 'comp.x') prob.root.connect('p2.y', 'comp.y') comp.fd_options['force_fd'] = True prob.setup(check=False) prob.run() # Make sure bad meta step_size is used # Derivative should be way high with this. J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') self.assertGreater(J['comp.f_xy']['p1.x'][0][0], 1000.0)
def __init__(self): super(TestProb, self).__init__() self.root = root = Group() root.add('c1', SimpleArrayComp()) root.add('p1', ParamComp('p', 1 * np.ones(2))) root.connect('p1.p', 'c1.x') root.add('ci1', SimpleImplicitComp()) root.add('pi1', ParamComp('p', 1.)) root.connect('pi1.p', 'ci1.x')
def test_array(self): prob = Problem(root=Group()) C1 = prob.root.add('C1', ExecComp('y=x[1]', x=np.array([1., 2., 3.]), y=0.0)) self.assertTrue('x' in C1._params_dict) self.assertTrue('y' in C1._unknowns_dict) prob.setup(check=False) prob.run() assert_rel_error(self, C1.unknowns['y'], 2.0, 0.00001)
def test_prom_conns(self): # this test mimics some of the connections found in test_nozzle in pycycle. The bug was that # an unknown that was connected to one parameter # (desVars.Ps_exhaust to nozzle.press_calcs.Ps_exhaust), was not being connected to the # other parameters ('nozzle.ideal_flow.chem_eq.n2ls.P', 'nozzle.ideal_flow.mach_calc.Ps', # and 'nozzle.ideal_flow.props.tp2props.P') that were connected via input-input connections # to nozzle.press_calcs.Ps_exhaust. prob = Problem(root=Group()) root = prob.root desVars = root.add("desVars", ParamComp('Ps_exhaust', 1.0), promotes=('Ps_exhaust', )) nozzle = root.add("nozzle", Group()) press_calcs = nozzle.add('press_calcs', ExecComp('out=Ps_exhaust'), promotes=('Ps_exhaust', )) ideal_flow = nozzle.add("ideal_flow", Group()) chem_eq = ideal_flow.add('chem_eq', Group(), promotes=('P', )) n2ls = chem_eq.add("n2ls", ExecComp('out=P'), promotes=('P', )) props = ideal_flow.add("props", Group(), promotes=('P', )) tp2props = props.add("tp2props", ExecComp('out=P'), promotes=('P', )) mach_calc = ideal_flow.add("mach_calc", ExecComp('out=Ps'), promotes=('Ps', )) nozzle.connect('Ps_exhaust', 'ideal_flow.Ps') root.connect('Ps_exhaust', 'nozzle.Ps_exhaust') ideal_flow.connect('Ps', 'P') prob.setup(check=False) expected_targets = set([ 'nozzle.ideal_flow.chem_eq.n2ls.P', 'nozzle.press_calcs.Ps_exhaust', 'nozzle.ideal_flow.mach_calc.Ps', 'nozzle.ideal_flow.props.tp2props.P' ]) self.assertEqual(set(prob.root.connections), expected_targets) for tgt in expected_targets: self.assertTrue('desVars.Ps_exhaust' in prob.root.connections[tgt])
def setUp(self): self.startdir = os.getcwd() self.tempdir = tempfile.mkdtemp(prefix='test_extcode-') os.chdir(self.tempdir) shutil.copy(os.path.join(DIRECTORY, 'external_code_for_testing.py'), os.path.join(self.tempdir, 'external_code_for_testing.py')) self.extcode = ExternalCodeForTesting() self.top = Problem() self.top.root = Group() self.top.root.add('extcode', self.extcode)
def test_double_arraycomp(self): # Mainly testing a bug in the array return for multiple arrays group = Group() group.add('x_param1', ParamComp('x1', np.ones((2))), promotes=['*']) group.add('x_param2', ParamComp('x2', np.ones((2))), promotes=['*']) group.add('mycomp', DoubleArrayComp(), promotes=['*']) prob = Problem(impl=impl) prob.root = group prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() Jbase = group.mycomp.JJ J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='fwd', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8) J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='fd', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8) J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='rev', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8)
def test_array_lhs(self): prob = Problem(root=Group()) C1 = prob.root.add( 'C1', ExecComp(['y[0]=x[1]', 'y[1]=x[0]'], x=np.array([1., 2., 3.]), y=np.array([0., 0.]))) self.assertTrue('x' in C1._params_dict) self.assertTrue('y' in C1._unknowns_dict) prob.setup(check=False) prob.run() assert_rel_error(self, C1.unknowns['y'], np.array([2., 1.]), 0.00001)
def test_complex_step(self): prob = Problem(root=Group()) C1 = prob.root.add('C1', ExecComp(['y=2.0*x+1.'], x=2.0)) self.assertTrue('x' in C1._params_dict) self.assertTrue('y' in C1._unknowns_dict) prob.setup(check=False) prob.run() assert_rel_error(self, C1.unknowns['y'], 5.0, 0.00001) J = C1.jacobian(C1.params, C1.unknowns, C1.resids) assert_rel_error(self, J[('y', 'x')], 2.0, 0.00001)
def test_linear_system(self): root = Group() root.add('lin', LinearSystem(3)) x = np.array([1, 2, -3]) A = np.array([[5.0, -3.0, 2.0], [1.0, 7.0, -4.0], [1.0, 0.0, 8.0]]) b = A.dot(x) root.add('p1', ParamComp('A', A)) root.add('p2', ParamComp('b', b)) root.connect('p1.A', 'lin.A') root.connect('p2.b', 'lin.b') prob = Problem(root) prob.setup(check=False) prob.run() # Make sure it gets the right answer assert_rel_error(self, prob['lin.x'], x, .0001) assert_rel_error(self, np.linalg.norm(prob.root.resids.vec), 0.0, 1e-10) # Compare against calculated derivs Ainv = np.linalg.inv(A) dx_dA = np.outer(Ainv, -x).reshape(3, 9) dx_db = Ainv J = prob.calc_gradient(['p1.A', 'p2.b'], ['lin.x'], mode='fwd', return_format='dict') assert_rel_error(self, J['lin.x']['p1.A'], dx_dA, .0001) assert_rel_error(self, J['lin.x']['p2.b'], dx_db, .0001) J = prob.calc_gradient(['p1.A', 'p2.b'], ['lin.x'], mode='rev', return_format='dict') assert_rel_error(self, J['lin.x']['p1.A'], dx_dA, .0001) assert_rel_error(self, J['lin.x']['p2.b'], dx_db, .0001) J = prob.calc_gradient(['p1.A', 'p2.b'], ['lin.x'], mode='fd', return_format='dict') assert_rel_error(self, J['lin.x']['p1.A'], dx_dA, .0001) assert_rel_error(self, J['lin.x']['p2.b'], dx_db, .0001)
def __init__(self): super(SingleDiamondGrouped, self).__init__() self.add('p', ParamComp('x', 2.0)) sub1 = self.add('sub1', Group()) sub1.add('comp1', Comp1()) sub1.add('comp2', Comp2()) sub1.add('comp3', Comp3()) self.add('comp4', Comp4()) self.connect("p.x", "sub1.comp1.x1") self.connect('sub1.comp1.y1', 'sub1.comp2.x1') self.connect('sub1.comp1.y2', 'sub1.comp3.x1') self.connect('sub1.comp2.y1', 'comp4.x1') self.connect('sub1.comp3.y1', 'comp4.x2')
def test_fd_options_form(self): prob = Problem() prob.root = Group() comp = prob.root.add('comp', Paraboloid()) prob.root.add('p1', ParamComp('x', 15.0)) prob.root.add('p2', ParamComp('y', 15.0)) prob.root.connect('p1.x', 'comp.x') prob.root.connect('p2.y', 'comp.y') comp.fd_options['force_fd'] = True comp.fd_options['form'] = 'forward' param_list = ['p1.x'] unknowns_list = ['comp.f_xy'] prob.setup(check=False) prob.run() J = prob.calc_gradient(param_list, unknowns_list, return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-6) # Make sure it gives good result with small stepsize comp.fd_options['form'] = 'backward' J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-6) # Make sure it gives good result with small stepsize comp.fd_options['form'] = 'central' J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-6) # Now, Make sure we really are going foward and backward comp.fd_options['form'] = 'forward' comp.fd_options['step_size'] = 1e3 J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') self.assertGreater(J['comp.f_xy']['p1.x'][0][0], 0.0) comp.fd_options['form'] = 'backward' J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') self.assertLess(J['comp.f_xy']['p1.x'][0][0], 0.0) # Central should get pretty close even for the bad stepsize comp.fd_options['form'] = 'central' J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-1)
def test_indices(self): size = 10 root = Group() root.add('P1', ParamComp('x', np.zeros(size))) root.add('C1', ExecComp('y = x * 2.', y=np.zeros(size//2), x=np.zeros(size//2))) root.add('C2', ExecComp('y = x * 3.', y=np.zeros(size//2), x=np.zeros(size//2))) root.connect('P1.x', "C1.x", src_indices=list(range(size//2))) root.connect('P1.x', "C2.x", src_indices=list(range(size//2, size))) prob = Problem(root) prob.setup(check=False) root.P1.unknowns['x'][0:size//2] += 1.0 root.P1.unknowns['x'][size//2:size] -= 1.0 prob.run() assert_rel_error(self, root.C1.params['x'], np.ones(size//2), 0.0001) assert_rel_error(self, root.C2.params['x'], -np.ones(size//2), 0.0001)
def __init__(self): super(FanOutGrouped, self).__init__() sub = self.add('sub', ParallelGroup()) pgroup = sub.add('pgroup', Group()) pgroup.add('p', ParamComp('x', 1.0)) pgroup.add('comp1', ExecComp(['y=3.0*x'])) sub.add('comp2', ExecComp(['y=-2.0*x'])) sub.add('comp3', ExecComp(['y=5.0*x'])) self.add('c2', ExecComp(['y=x'])) self.add('c3', ExecComp(['y=x'])) self.connect('sub.comp2.y', 'c2.x') self.connect('sub.comp3.y', 'c3.x') self.connect("sub.pgroup.comp1.y", "sub.comp2.x") self.connect("sub.pgroup.comp1.y", "sub.comp3.x") self.connect("sub.pgroup.p.x", "sub.pgroup.comp1.x")
def test_complex_step2(self): prob = Problem(Group()) comp = prob.root.add('comp', ExecComp('y=x*x + x*2.0')) prob.root.add('p1', ParamComp('x', 2.0)) prob.root.connect('p1.x', 'comp.x') comp.fd_options['force_fd'] = False prob.setup(check=False) prob.run() J = prob.calc_gradient(['p1.x'], ['comp.y'], mode='fwd', return_format='dict') assert_rel_error(self, J['comp.y']['p1.x'], np.array([6.0]), 0.00001) J = prob.calc_gradient(['p1.x'], ['comp.y'], mode='rev', return_format='dict') assert_rel_error(self, J['comp.y']['p1.x'], np.array([6.0]), 0.00001)
def test_overrides(self): class OverrideComp(Component): def __init__(self): super(OverrideComp, self).__init__() # Params self.add_param('x', 3.0) # Unknowns self.add_output('y', 5.5) def solve_nonlinear(self, params, unknowns, resids): """ Doesn't do much. """ unknowns['y'] = 7.0 * params['x'] def apply_linear(self, params, unknowns, dparams, dunknowns, dresids, mode): """Never Call.""" raise RuntimeError( "This should have been overridden by force_fd.") def jacobian(self, params, unknowns, resids): """Never Call.""" raise RuntimeError( "This should have been overridden by force_fd.") prob = Problem() prob.root = Group() comp = prob.root.add('comp', OverrideComp()) prob.root.add('p1', ParamComp('x', 2.0)) prob.root.connect('p1.x', 'comp.x') comp.fd_options['force_fd'] = True prob.setup(check=False) prob.run() J = prob.calc_gradient(['p1.x'], ['comp.y'], mode='fwd', return_format='dict') assert_rel_error(self, J['comp.y']['p1.x'][0][0], 7.0, 1e-6)
def test_warm_start(self): # create metamodel with warm_restart = True meta = MetaModel() meta.add_param('x1', 0.) meta.add_param('x2', 0.) meta.add_output('y1', 0.) meta.add_output('y2', 0.) meta.default_surrogate = ResponseSurface() meta.warm_restart = True # add to problem prob = Problem(Group()) prob.root.add('meta', meta) prob.setup(check=False) # provide initial training data prob['meta.train:x1'] = [1.0, 3.0] prob['meta.train:x2'] = [1.0, 4.0] prob['meta.train:y1'] = [3.0, 1.0] prob['meta.train:y2'] = [1.0, 7.0] # run against a data point and check result prob['meta.x1'] = 2.0 prob['meta.x2'] = 3.0 prob.run() assert_rel_error(self, prob['meta.y1'], 1.9085, .001) assert_rel_error(self, prob['meta.y2'], 3.9203, .001) # Add 3rd training point, moves the estimate for that point # back to where it should be. prob['meta.train:x1'] = [2.0] prob['meta.train:x2'] = [3.0] prob['meta.train:y1'] = [2.0] prob['meta.train:y2'] = [4.0] meta.train = True # currently need to tell meta to re-train prob.run() assert_rel_error(self, prob['meta.y1'], 2.0, .00001) assert_rel_error(self, prob['meta.y2'], 4.0, .00001)
def test_simple_jac(self): group = Group() group.add('x_param', ParamComp('x', 1.0), promotes=['*']) group.add('mycomp', ExecComp(['y=2.0*x']), promotes=['x', 'y']) prob = Problem() prob.root = group prob.root.ln_solver = ExplicitSolver() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_no_derivatives(self): prob = Problem() prob.root = Group() comp = prob.root.add('comp', ExecComp('y=x*2.0')) prob.root.add('p1', ParamComp('x', 2.0)) prob.root.connect('p1.x', 'comp.x') comp.fd_options['force_fd'] = True prob.setup(check=False) prob.run() J = prob.calc_gradient(['p1.x'], ['comp.y'], mode='fwd', return_format='dict') assert_rel_error(self, J['comp.y']['p1.x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['p1.x'], ['comp.y'], mode='rev', return_format='dict') assert_rel_error(self, J['comp.y']['p1.x'][0][0], 2.0, 1e-6)
def test_simple_in_group_matvec(self): group = Group() sub = group.add('sub', Group(), promotes=['x', 'y']) group.add('x_param', ParamComp('x', 1.0), promotes=['*']) sub.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y']) prob = Problem(impl=impl) prob.root = group prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_sin_metamodel(self): # create a MetaModel for Sin and add it to a Problem sin_mm = MetaModel() sin_mm.add_param('x', 0.) sin_mm.add_output('f_x', 0.) prob = Problem(Group()) prob.root.add('sin_mm', sin_mm) # check that missing surrogate is detected in check_setup stream = cStringIO() prob.setup(out_stream=stream) msg = ("No default surrogate model is defined and the " "following outputs do not have a surrogate model:\n" "['f_x']\n" "Either specify a default_surrogate, or specify a " "surrogate model for all outputs.") self.assertTrue(msg in stream.getvalue()) # check that output with no specified surrogate gets the default sin_mm.default_surrogate = FloatKrigingSurrogate() prob.setup(check=False) surrogate = prob.root.unknowns.metadata('sin_mm.f_x').get('surrogate') self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate), 'sin_mm.f_x should get the default surrogate') # train the surrogate and check predicted value prob['sin_mm.train:x'] = np.linspace(0, 10, 200) prob['sin_mm.train:f_x'] = .5 * np.sin(prob['sin_mm.train:x']) prob['sin_mm.x'] = 2.22 prob.run() self.assertAlmostEqual(prob['sin_mm.f_x'], .5 * np.sin(prob['sin_mm.x']), places=5)
def test_fd_options_step_size(self): prob = Problem() prob.root = Group() comp = prob.root.add('comp', Paraboloid()) prob.root.add('p1', ParamComp([('x', 15.0), ('y', 15.0)])) prob.root.connect('p1.x', 'comp.x') prob.root.connect('p1.y', 'comp.y') comp.fd_options['force_fd'] = True prob.setup(check=False) prob.run() J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-6) # Make sure step_size is used # Derivative should be way high with this. comp.fd_options['step_size'] = 1e5 J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') self.assertGreater(J['comp.f_xy']['p1.x'][0][0], 1000.0)
def test_array_outputs(self): meta = MetaModel() meta.add_param('x', np.zeros((2, 2))) meta.add_output('y', np.zeros(2, )) meta.default_surrogate = FloatKrigingSurrogate() prob = Problem(Group()) prob.root.add('meta', meta) prob.setup(check=False) prob['meta.train:x'] = [[[1.0, 1.0], [1.0, 1.0]], [[2.0, 1.0], [1.0, 1.0]], [[1.0, 2.0], [1.0, 1.0]], [[1.0, 1.0], [2.0, 1.0]], [[1.0, 1.0], [1.0, 2.0]]] prob['meta.train:y'] = [[3.0, 1.0], [2.0, 4.0], [1.0, 7.0], [6.0, -3.0], [-2.0, 3.0]] prob['meta.x'] = [[1.0, 2.0], [1.0, 1.0]] prob.run() assert_rel_error(self, prob['meta.y'], np.array([1.0, 7.0]), .00001)
def test_vector_inputs(self): meta = MetaModel() meta.add_param('x', np.zeros(4)) meta.add_output('y1', 0.) meta.add_output('y2', 0.) meta.default_surrogate = FloatKrigingSurrogate() prob = Problem(Group()) prob.root.add('meta', meta) prob.setup(check=False) prob['meta.train:x'] = [[1.0, 1.0, 1.0, 1.0], [2.0, 1.0, 1.0, 1.0], [1.0, 2.0, 1.0, 1.0], [1.0, 1.0, 2.0, 1.0], [1.0, 1.0, 1.0, 2.0]] prob['meta.train:y1'] = [3.0, 2.0, 1.0, 6.0, -2.0] prob['meta.train:y2'] = [1.0, 4.0, 7.0, -3.0, 3.0] prob['meta.x'] = [1.0, 2.0, 1.0, 1.0] prob.run() assert_rel_error(self, prob['meta.y1'], 1.0, .00001) assert_rel_error(self, prob['meta.y2'], 7.0, .00001)
def test_linear_system(self): root = Group() root.add('lin', LinearSystem(3)) x = np.array([1, 2, -3]) A = np.array([[ 5.0, -3.0, 2.0], [1.0, 7.0, -4.0], [1.0, 0.0, 8.0]]) b = A.dot(x) root.add('p1', IndepVarComp('A', A)) root.add('p2', IndepVarComp('b', b)) root.connect('p1.A', 'lin.A') root.connect('p2.b', 'lin.b') prob = Problem(root) prob.setup(check=False) prob.run() # Make sure it gets the right answer assert_rel_error(self, prob['lin.x'], x, .0001) assert_rel_error(self, np.linalg.norm(prob.root.resids.vec), 0.0, 1e-10) # Compare against calculated derivs Ainv = np.linalg.inv(A) dx_dA = np.outer(Ainv, -x).reshape(3, 9) dx_db = Ainv J = prob.calc_gradient(['p1.A', 'p2.b'], ['lin.x'], mode='fwd', return_format='dict') assert_rel_error(self, J['lin.x']['p1.A'], dx_dA, .0001) assert_rel_error(self, J['lin.x']['p2.b'], dx_db, .0001) J = prob.calc_gradient(['p1.A', 'p2.b'], ['lin.x'], mode='rev', return_format='dict') assert_rel_error(self, J['lin.x']['p1.A'], dx_dA, .0001) assert_rel_error(self, J['lin.x']['p2.b'], dx_db, .0001) J = prob.calc_gradient(['p1.A', 'p2.b'], ['lin.x'], mode='fd', return_format='dict') assert_rel_error(self, J['lin.x']['p1.A'], dx_dA, .0001) assert_rel_error(self, J['lin.x']['p2.b'], dx_db, .0001)
def test_indices_connect_error(self): root = Group() P = root.add('P', IndepVarComp('x', np.array([1., 2., 3., 4., 5.]))) G = root.add('G', Group()) C = root.add('C', SimpleComp()) A = G.add('A', SimpleArrayComp()) root.connect('P.x', 'G.A.x', src_indices=[0]) root.connect('P.x', 'C.x', src_indices=[2,]) expected_error_message = py3fix("Size 1 of the indexed sub-part of " "source 'P.x' must match the size " "'2' of the target 'G.A.x'") prob = Problem(root) with self.assertRaises(ConnectError) as cm: prob.setup(check=False) self.assertEqual(str(cm.exception), expected_error_message) # now try the same thing with promoted var root = Group() P = root.add('P', IndepVarComp('x', np.array([1., 2., 3., 4., 5.]))) G = root.add('G', Group()) C = root.add('C', SimpleComp()) A = G.add('A', SimpleArrayComp(), promotes=['x', 'y']) root.connect('P.x', 'G.x', src_indices=[0,1,2]) root.connect('P.x', 'C.x', src_indices=[2,]) expected_error_message = py3fix("Size 3 of the indexed sub-part of " "source 'P.x' must match the size " "'2' of the target 'G.x'") prob = Problem(root) with self.assertRaises(ConnectError) as cm: prob.setup(check=False) self.assertEqual(str(cm.exception), expected_error_message)
self.connect("nozzle_air.Fl_O:tot:T","tm.nozzle_air_Tt") self.connect("nozzle_air.Fl_O:tot:Cp","tm.nozzle_air_Cp") self.connect("nozzle_air.Fl_O:stat:W","tm.nozzle_air_W") self.connect("bearing_air.Fl_O:tot:T","tm.bearing_air_Tt") self.connect("bearing_air.Fl_O:tot:Cp","tm.bearing_air_Cp") self.connect("bearing_air.Fl_O:stat:W","tm.bearing_air_W") self.connect('tm.ss_temp_residual','tmp_balance.ss_temp_residual') self.connect('tmp_balance.temp_boundary','tm.temp_boundary') #run stand-alone component if __name__ == "__main__": root = Group() root.add('fs', FlowStuff()) prob = Problem(root) prob.root.nl_solver = Newton() prob.root.nl_solver.options['atol'] = 1e-5 prob.root.nl_solver.options['iprint'] = 1 prob.root.nl_solver.options['rtol'] = 1e-5 prob.root.nl_solver.options['maxiter'] = 50 params = ( ('P', 0.3, {'units':'psi'}), ('T', 1500.0, {'units':'degR'}), ('W', 1.0, {'units':'lbm/s'}) )
def test_subarray_to_promoted_var(self): root = Group() P = root.add('P', ParamComp('x', np.array([1., 2., 3.]))) G = root.add('G', Group()) C = root.add('C', SimpleComp()) A = G.add('A', SimpleArrayComp()) # , promotes=['x', 'y']) root.connect('P.x', 'G.A.x', src_indices=[0,1]) root.connect('P.x', 'C.x', src_indices=[2,]) prob = Problem(root) prob.setup(check=False) prob.run() assert_rel_error(self, root.G.A.params['x'], np.array([1., 2.]), 0.0001) self.assertAlmostEqual(root.C.params['x'], 3.) # no try the same thing with promoted var root = Group() P = root.add('P', ParamComp('x', np.array([1., 2., 3.]))) G = root.add('G', Group()) C = root.add('C', SimpleComp()) A = G.add('A', SimpleArrayComp(), promotes=['x', 'y']) root.connect('P.x', 'G.x', src_indices=[0,1]) root.connect('P.x', 'C.x', src_indices=[2,]) prob = Problem(root) prob.setup(check=False) prob.run() assert_rel_error(self, root.G.A.params['x'], np.array([1., 2.]), 0.0001) self.assertAlmostEqual(root.C.params['x'], 3.)