def test_indices_rev(self): prob = self.setup_model('rev') J = prob.calc_gradient(['p.x'], ['c4.y', 'c5.y'], mode='rev', return_format='dict') assert_rel_error(self, J['c5.y']['p.x'][0], np.array([20., 25.]), 1e-6) assert_rel_error(self, J['c4.y']['p.x'][0], np.array([8., 0.]), 1e-6)
def test_simple_paraboloid_lower(self): prob = Problem() root = prob.root = Group() root.add('p1', IndepVarComp('x', 50.0), promotes=['*']) root.add('p2', IndepVarComp('y', 50.0), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) root.add('con', ExecComp('c = x - y'), promotes=['*']) prob.driver = pyOptSparseDriver() prob.driver.options['optimizer'] = OPTIMIZER if OPTIMIZER == 'SLSQP': prob.driver.opt_settings['ACC'] = 1e-9 prob.driver.options['print_results'] = False prob.driver.add_desvar('x', lower=-50.0, upper=50.0) prob.driver.add_desvar('y', lower=-50.0, upper=50.0) prob.driver.add_objective('f_xy') prob.driver.add_constraint('c', lower=15.0) prob.setup(check=False) prob.run() # Minimum should be at (7.166667, -7.833334) assert_rel_error(self, prob['x'], 7.16667, 1e-6) assert_rel_error(self, prob['y'], -7.833334, 1e-6)
def test_parab_FD(self): model = Problem(impl=impl) root = model.root = Group() par = root.add('par', ParallelGroup()) par.add('c1', Parab1D(root=2.0)) par.add('c2', Parab1D(root=3.0)) root.add('p1', IndepVarComp('x', val=0.0)) root.add('p2', IndepVarComp('x', val=0.0)) root.connect('p1.x', 'par.c1.x') root.connect('p2.x', 'par.c2.x') root.add('sumcomp', ExecComp('sum = x1+x2')) root.connect('par.c1.y', 'sumcomp.x1') root.connect('par.c2.y', 'sumcomp.x2') driver = model.driver = pyOptSparseDriver() driver.options['optimizer'] = OPTIMIZER driver.options['print_results'] = False driver.add_desvar('p1.x', lower=-100, upper=100) driver.add_desvar('p2.x', lower=-100, upper=100) driver.add_objective('sumcomp.sum') root.fd_options['force_fd'] = True model.setup(check=False) model.run() if not MPI or self.comm.rank == 0: assert_rel_error(self, model['p1.x'], 2.0, 1.e-6) assert_rel_error(self, model['p2.x'], 3.0, 1.e-6)
def test_fan_out_parallel_sets_rev(self): prob = Problem(impl=impl) prob.root = FanOutGrouped() prob.root.ln_solver = LinearGaussSeidel() prob.root.sub.ln_solver = LinearGaussSeidel() # need to set mode to rev before setup. Otherwise the sub-vectors # for the parallel set vars won't get allocated. prob.root.ln_solver.options['mode'] = 'rev' prob.root.sub.ln_solver.options['mode'] = 'rev' prob.driver.add_desvar('p.x') prob.driver.add_constraint('c2.y', upper=0.0) prob.driver.add_constraint('c3.y', upper=0.0) prob.driver.parallel_derivs(['c2.y', 'c3.y']) if MPI: expected = [('c2.y', 'c3.y')] else: expected = [('c2.y',), ('c3.y',)] self.assertEqual(prob.driver.outputs_of_interest(), expected) prob.setup(check=False) prob.run() unknown_list = ['c2.y', 'c3.y'] indep_list = ['p.x'] J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['c2.y']['p.x'][0][0], -6.0, 1e-6) assert_rel_error(self, J['c3.y']['p.x'][0][0], 15.0, 1e-6)
def test_parab_subbed_Pcomps(self): model = Problem(impl=impl) root = model.root = Group() root.ln_solver = lin_solver() par = root.add('par', ParallelGroup()) par.add('s1', MP_Point(root=2.0)) par.add('s2', MP_Point(root=3.0)) root.add('sumcomp', ExecComp('sum = x1+x2')) root.connect('par.s1.c.y', 'sumcomp.x1') root.connect('par.s2.c.y', 'sumcomp.x2') driver = model.driver = pyOptSparseDriver() driver.options['optimizer'] = OPTIMIZER driver.options['print_results'] = False driver.add_desvar('par.s1.p.x', lower=-100, upper=100) driver.add_desvar('par.s2.p.x', lower=-100, upper=100) driver.add_objective('sumcomp.sum') model.setup(check=False) model.run() if not MPI or self.comm.rank == 0: assert_rel_error(self, model['par.s1.p.x'], 2.0, 1.e-6) if not MPI or self.comm.rank == 1: assert_rel_error(self, model['par.s2.p.x'], 3.0, 1.e-6)
def test_fd_options_form_precedence(self): class MyComp(Component): def __init__(self): super(MyComp, self).__init__() # Params self.add_param("x1", 3.0) self.add_param("x2", 3.0, form="central") # Unknowns self.add_output("y", 5.5) def solve_nonlinear(self, params, unknowns, resids): """ Doesn't do much. """ unknowns["y"] = 7.0 * params["x1"] ** 2 + 7.0 * params["x2"] ** 2 prob = Problem() prob.root = Group() comp = prob.root.add("comp", MyComp()) prob.root.add("p1", IndepVarComp([("x1", 3.0), ("x2", 3.0)])) prob.root.connect("p1.x1", "comp.x1") prob.root.connect("p1.x2", "comp.x2") comp.fd_options["force_fd"] = True comp.fd_options["form"] = "forward" prob.setup(check=False) prob.run() J = prob.calc_gradient(["p1.x1", "p1.x2"], ["comp.y"], return_format="dict") x1_err = J["comp.y"]["p1.x1"] - 42.0 x2_err = J["comp.y"]["p1.x2"] - 42.0 assert_rel_error(self, x1_err, 7e-6, 1e-1) assert_rel_error(self, x2_err, 5.4e-10, 1e-1)
def test_fan_out_parallel_sets_fwd(self): prob = Problem(impl=impl) prob.root = FanOutGrouped() prob.root.ln_solver = LinearGaussSeidel() prob.root.sub.ln_solver = LinearGaussSeidel() prob.root.ln_solver.options['mode'] = 'fwd' prob.root.sub.ln_solver.options['mode'] = 'fwd' prob.driver.add_desvar('p.x') prob.driver.add_constraint('c2.y', upper=0.0) prob.driver.add_constraint('c3.y', upper=0.0) prob.driver.parallel_derivs(['c2.y', 'c3.y']) # ignored in fwd if MPI: expected = [('c2.y', 'c3.y')] else: expected = [('c2.y',), ('c3.y',)] self.assertEqual(prob.driver.outputs_of_interest(), expected) prob.setup(check=False) prob.run() unknown_list = ['c2.y', 'c3.y'] indep_list = ['p.x'] J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['c2.y']['p.x'][0][0], -6.0, 1e-6) assert_rel_error(self, J['c3.y']['p.x'][0][0], 15.0, 1e-6)
def test_two_simple(self): size = 3 group = Group() group.add('P', IndepVarComp('x', numpy.ones(size))) group.add('C1', DistribExecComp(['y=2.0*x'], arr_size=size, x=numpy.zeros(size), y=numpy.zeros(size))) group.add('C2', ExecComp(['z=3.0*y'], y=numpy.zeros(size), z=numpy.zeros(size))) prob = Problem(impl=impl) prob.root = group prob.root.ln_solver = LinearGaussSeidel() prob.root.connect('P.x', 'C1.x') prob.root.connect('C1.y', 'C2.y') prob.setup(check=False) prob.run() J = prob.calc_gradient(['P.x'], ['C2.z'], mode='fwd', return_format='dict') assert_rel_error(self, J['C2.z']['P.x'], numpy.eye(size)*6.0, 1e-6) J = prob.calc_gradient(['P.x'], ['C2.z'], mode='rev', return_format='dict') assert_rel_error(self, J['C2.z']['P.x'], numpy.eye(size)*6.0, 1e-6)
def test_derivatives(self): meta = MetaModel() meta.add_param('x', 0.) meta.add_output('f', 0.) meta.default_surrogate = FloatKrigingSurrogate() prob = Problem(Group()) prob.root.add('meta', meta, promotes=['x']) prob.root.add('p', IndepVarComp('x', 0.), promotes=['x']) prob.setup(check=False) prob['meta.train:x'] = [0., .25, .5, .75, 1.] prob['meta.train:f'] = [1., .75, .5, .25, 0.] prob['x'] = 0.125 prob.run() Jf = prob.calc_gradient(['x'], ['meta.f'], mode='fwd') Jr = prob.calc_gradient(['x'], ['meta.f'], mode='rev') assert_rel_error(self, Jf[0][0], -1., 1.e-5) assert_rel_error(self, Jr[0][0], -1., 1.e-5) stream = cStringIO() prob.check_partial_derivatives(out_stream=stream, global_options={'check_type': 'cs'}) abs_errors = findall('Absolute Error \(.+\) : (.+)', stream.getvalue()) self.assertTrue(len(abs_errors) > 0) for match in abs_errors: abs_error = float(match) self.assertTrue(abs_error < 1.e-6)
def test_analysis_error(self): prob = Problem(impl=impl) prob.root = ConvergeDiverge() prob.root.ln_solver = PetscKSP() prob.root.ln_solver.options['maxiter'] = 2 prob.root.ln_solver.options['err_on_maxiter'] = True prob.setup(check=False) prob.run() indep_list = ['p.x'] unknown_list = ['comp7.y1'] prob.run() # Make sure value is fine. assert_rel_error(self, prob['comp7.y1'], -102.7, 1e-6) try: J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') except AnalysisError as err: self.assertEqual(str(err), "Solve in '': PetscKSP FAILED to converge in 3 iterations") else: self.fail("expected AnalysisError")
def test_simple_matvec(self): class VerificationComp(SimpleCompDerivMatVec): def linearize(self, params, unknowns, resids): raise RuntimeError("Derivative functions on this comp should not run.") def apply_linear(self, params, unknowns, dparams, dunknowns, dresids, mode): raise RuntimeError("Derivative functions on this comp should not run.") sub = Group() sub.add('mycomp', VerificationComp()) prob = Problem() prob.root = Group() prob.root.add('sub', sub) prob.root.add('x_param', IndepVarComp('x', 1.0)) prob.root.connect('x_param.x', "sub.mycomp.x") sub.fd_options['force_fd'] = True prob.setup(check=False) prob.run() J = prob.calc_gradient(['x_param.x'], ['sub.mycomp.y'], mode='fwd', return_format='dict') assert_rel_error(self, J['sub.mycomp.y']['x_param.x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x_param.x'], ['sub.mycomp.y'], mode='rev', return_format='dict') assert_rel_error(self, J['sub.mycomp.y']['x_param.x'][0][0], 2.0, 1e-6)
def test_solver_init_design(self): init_vec = self.primals[0] # pull a vector out of kona memory self.driver.init_design(init_vec) for name, idx in self.driver.primal_vars: assert_rel_error(self, init_vec.data[idx[0]:idx[1]], self.prob[name], 1e-10)
def test_prediction(self): test_x = np.array([[0.5], [1.5], [2.5]]) expected_y = np.array([[0.5], [1.], [0.5]]) for x0, y0 in zip(test_x, expected_y): mu = self.surrogate.predict(x0) assert_rel_error(self, mu, y0, 1e-9)
def test_jacobian(self): test_x = np.array([[0.5], [1.5], [2.5]]) expected_deriv = np.array([[1.], [0.], [-1.]]) for x0, y0 in zip(test_x, expected_deriv): jac = self.surrogate.jacobian(x0) assert_rel_error(self, jac, y0, 1e-9)
def test_bulk_prediction(self): test_x = np.array([[0.5], [1.5], [2.5]]) expected_y = np.array([[0.82893803], [1.72485853], [0.82893803]]) mu = self.surrogate.predict(test_x) assert_rel_error(self, mu, expected_y, 1e-8)
def test_bulk_prediction(self): test_x = np.array([[1., 0.5], [0.5, 1.0], [1.0, 1.5], [1.5, 1.], [0., 1.], [.5, .5] ]) a = ((16. / (5 * np.sqrt(5.)) + 16. / (13. * np.sqrt(13.))) / (16./(5*np.sqrt(5.)) + 16. / (13. * np.sqrt(13.)) + 8.)) b = 8. / (8. + 16. / (5 * np.sqrt(5.)) + 16. / (13. * np.sqrt(13.))) c = (2. + 2./(5.*np.sqrt(5))) / (3. + 2. / (5. * np.sqrt(5))) d = 1. / (3. + 2. / (5. * np.sqrt(5))) expected_y = np.array([[a, b, 0.5, a], [a, b, 0.5, a], [a, b, 0.5, a], [a, b, 0.5, a], [c, d, 0.5, c], [0.54872067, 0.45127933, 0.5, 0.54872067] ]) mu = self.surrogate.predict(test_x, n=5, dist_eff=3) assert_rel_error(self, mu, expected_y, 1e-6)
def test_jacobian(self): test_x = np.array([[0.5], [1.5], [2.5]]) expected_deriv = np.array([[1.92797784], [0.06648199], [-1.92797784]]) for x0, y0 in zip(test_x, expected_deriv): jac = self.surrogate.jacobian(x0, n=3) assert_rel_error(self, jac, y0, 1e-6)
def test_vector_inputs(self): meta = MetaModel() meta.add_param('x', np.zeros(4)) meta.add_output('y1', 0.) meta.add_output('y2', 0.) meta.default_surrogate = FloatKrigingSurrogate() prob = Problem(Group()) prob.root.add('meta', meta) prob.setup(check=False) prob['meta.train:x'] = [ [1.0, 1.0, 1.0, 1.0], [2.0, 1.0, 1.0, 1.0], [1.0, 2.0, 1.0, 1.0], [1.0, 1.0, 2.0, 1.0], [1.0, 1.0, 1.0, 2.0] ] prob['meta.train:y1'] = [3.0, 2.0, 1.0, 6.0, -2.0] prob['meta.train:y2'] = [1.0, 4.0, 7.0, -3.0, 3.0] prob['meta.x'] = [1.0, 2.0, 1.0, 1.0] prob.run() assert_rel_error(self, prob['meta.y1'], 1.0, .00001) assert_rel_error(self, prob['meta.y2'], 7.0, .00001)
def test_analysis_error(self): prob = Problem() prob.root = ConvergeDiverge() prob.root.ln_solver = ScipyGMRES() prob.root.ln_solver.options["maxiter"] = 2 prob.root.ln_solver.options["err_on_maxiter"] = True prob.setup(check=False) prob.run() indep_list = ["p.x"] unknown_list = ["comp7.y1"] prob.run() # Make sure value is fine. assert_rel_error(self, prob["comp7.y1"], -102.7, 1e-6) try: J = prob.calc_gradient(indep_list, unknown_list, mode="fwd", return_format="dict") except AnalysisError as err: self.assertEqual(str(err), "Solve in '': ScipyGMRES failed to converge after 2 iterations") else: self.fail("expected AnalysisError")
def test_array_outputs(self): meta = MetaModel() meta.add_param('x', np.zeros((2, 2))) meta.add_output('y', np.zeros(2,)) meta.default_surrogate = FloatKrigingSurrogate() prob = Problem(Group()) prob.root.add('meta', meta) prob.setup(check=False) prob['meta.train:x'] = [ [[1.0, 1.0], [1.0, 1.0]], [[2.0, 1.0], [1.0, 1.0]], [[1.0, 2.0], [1.0, 1.0]], [[1.0, 1.0], [2.0, 1.0]], [[1.0, 1.0], [1.0, 2.0]] ] prob['meta.train:y'] = [[3.0, 1.0], [2.0, 4.0], [1.0, 7.0], [6.0, -3.0], [-2.0, 3.0]] prob['meta.x'] = [[1.0, 2.0], [1.0, 1.0]] prob.run() assert_rel_error(self, prob['meta.y'], np.array([1.0, 7.0]), .00001)
def test_simple_paraboloid_equality_linear(self): prob = Problem() root = prob.root = Group() root.add('p1', IndepVarComp('x', 50.0), promotes=['*']) root.add('p2', IndepVarComp('y', 50.0), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) root.add('con', ExecComp('c = - x + y'), promotes=['*']) prob.driver = pyOptSparseDriver() prob.driver.options['optimizer'] = OPTIMIZER if OPTIMIZER == 'SLSQP': prob.driver.opt_settings['ACC'] = 1e-9 prob.driver.options['print_results'] = False prob.driver.add_desvar('x', lower=-50.0, upper=50.0) prob.driver.add_desvar('y', lower=-50.0, upper=50.0) prob.driver.add_objective('f_xy') prob.driver.add_constraint('c', equals=-15.0, linear=True) if OPTIMIZER == 'SNOPT': # there is currently a bug in SNOPT, it requires at least one # nonlinear inequality constraint, so provide a 'fake' one prob.driver.add_constraint('x', lower=-100.0) prob.setup(check=False) prob.run() # Minimum should be at (7.166667, -7.833334) assert_rel_error(self, prob['x'], 7.16667, 1e-6) assert_rel_error(self, prob['y'], -7.833334, 1e-6)
def test_root_derivs_dict(self): if OPT is None: raise unittest.SkipTest("pyoptsparse is not installed") if OPTIMIZER is None: raise unittest.SkipTest("pyoptsparse is not providing SNOPT or SLSQP") prob = Problem() prob.root = SellarDerivativesGrouped() prob.driver = pyOptSparseDriver() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.opt_settings['ACC'] = 1e-9 prob.driver.options['print_results'] = False prob.driver.add_desvar('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0])) prob.driver.add_desvar('x', lower=0.0, upper=10.0) prob.driver.add_objective('obj') prob.driver.add_constraint('con1', upper=0.0) prob.driver.add_constraint('con2', upper=0.0) prob.driver.add_recorder(self.recorder) self.recorder.options['record_metadata'] = False self.recorder.options['record_derivs'] = True prob.setup(check=False) prob.run() prob.cleanup() self.io.seek(0) csv_reader = csv.DictReader(self.io) rows = [row for row in csv_reader] # execution row = rows[0] self.assertEqual(row['Derivatives'], '') # derivatives row = rows[1] self.assertEqual(row['obj'], '') J1 = eval(row['Derivatives'])[0] Jbase = {} Jbase['con1'] = {} Jbase['con1']['x'] = -0.98061433 Jbase['con1']['z'] = np.array([-9.61002285, -0.78449158]) Jbase['con2'] = {} Jbase['con2']['x'] = 0.09692762 Jbase['con2']['z'] = np.array([1.94989079, 1.0775421 ]) Jbase['obj'] = {} Jbase['obj']['x'] = 2.98061392 Jbase['obj']['z'] = np.array([9.61001155, 1.78448534]) for key1, val1 in Jbase.items(): for key2, val2 in val1.items(): assert_rel_error(self, J1[key1][key2], val2, .00001)
def test_simple_paraboloid_scaled_objective_rev(self): prob = Problem() root = prob.root = Group() root.add('p1', IndepVarComp('x', 50.0), promotes=['*']) root.add('p2', IndepVarComp('y', 50.0), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) root.add('con', ExecComp('c = x - y'), promotes=['*']) prob.driver = pyOptSparseDriver() prob.driver.options['optimizer'] = OPTIMIZER if OPTIMIZER == 'SNOPT': prob.driver.opt_settings['Verify level'] = 3 prob.driver.options['print_results'] = False prob.driver.add_desvar('x', lower=-50.0, upper=50.0) prob.driver.add_desvar('y', lower=-50.0, upper=50.0) prob.driver.add_objective('f_xy', scaler=1/10.) prob.driver.add_constraint('c', lower=10.0, upper=11.0) root.ln_solver.options['mode'] = 'rev' prob.setup(check=False) prob.run() # Minimum should be at (7.166667, -7.833334) assert_rel_error(self, prob['x'] - prob['y'], 11.0, 1e-6)
def test_paraboloid_optimize_unconstrained(self): top = Problem() root = top.root = Group() root.add('p1', IndepVarComp('x', 3.0)) root.add('p2', IndepVarComp('y', -4.0)) root.add('p', ParaboloidOptUnCon()) root.connect('p1.x', 'p.x') root.connect('p2.y', 'p.y') top.driver = ScipyOptimizer() top.driver.options['optimizer'] = 'SLSQP' top.driver.options['disp'] = False top.driver.add_desvar('p1.x', lower=-50, upper=50) top.driver.add_desvar('p2.y', lower=-50, upper=50) top.driver.add_objective('p.f_xy') top.setup(check=False) top.run() assert_rel_error(self, top['p.x'], 6.666667, 1e-6) assert_rel_error(self, top['p.y'], -7.333333, 1e-6)
def test_inf_as_desvar_bounds(self): # User may use np.inf as a bound. It is unneccessary, but the user # may do it anyway, so make sure SLSQP doesn't blow up with it (bug # reported by rfalck) prob = Problem() root = prob.root = Group() root.add('p1', IndepVarComp('x', 50.0), promotes=['*']) root.add('p2', IndepVarComp('y', 50.0), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) root.add('con', ExecComp('c = - x + y'), promotes=['*']) prob.driver = pyOptSparseDriver() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.opt_settings['ACC'] = 1e-9 prob.driver.options['print_results'] = False prob.driver.add_desvar('x', lower=-np.inf, upper=np.inf) prob.driver.add_desvar('y', lower=-50.0, upper=50.0) prob.driver.add_objective('f_xy') prob.driver.add_constraint('c', upper=-15.0) prob.setup(check=False) prob.run() # Minimum should be at (7.166667, -7.833334) assert_rel_error(self, prob['x'], 7.16667, 1e-6) assert_rel_error(self, prob['y'], -7.833334, 1e-6)
def test_fan_in_parallel_sets_rev(self): prob = Problem(impl=impl) prob.root = FanInGrouped() prob.root.ln_solver = LinearGaussSeidel() prob.root.sub.ln_solver = LinearGaussSeidel() prob.root.ln_solver.options['mode'] = 'rev' prob.root.sub.ln_solver.options['mode'] = 'rev' prob.driver.add_desvar('p1.x1') prob.driver.add_desvar('p2.x2') prob.driver.add_desvar('p3.x3') prob.driver.add_objective('comp3.y') prob.driver.parallel_derivs(['p1.x1', 'p2.x2']) if MPI: expected = [('p1.x1', 'p2.x2'), ('p3.x3',)] else: expected = [('p1.x1',), ('p2.x2',), ('p3.x3',)] self.assertEqual(prob.driver.desvars_of_interest(), expected) prob.setup(check=False) prob.run() indep_list = ['p1.x1', 'p2.x2'] unknown_list = ['comp3.y'] J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6) assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)
def test_driver_param_indices_force_fd_shift(self): """ Test driver param indices with shifted indices and force_fd=True """ prob = Problem() prob.root = SellarStateConnection() prob.root.fd_options['force_fd'] = True prob.driver.add_desvar('z', lower=np.array([-10.0, -10.0]), upper=np.array([10.0, 10.0]), indices=[1]) prob.driver.add_desvar('x', lower=0.0, upper=10.0) prob.driver.add_objective('obj') prob.driver.add_constraint('con1', upper=0.0) prob.driver.add_constraint('con2', upper=0.0) #prob.driver.options['disp'] = False prob.setup(check=False) prob['z'][1] = 0.0 prob.run() J = prob.calc_gradient(['x', 'z'], ['obj'], mode='fd', return_format='array') assert_rel_error(self, J[0][1], 1.78402, 1e-3)
def compare_derivatives(self, var_in, var_out, rel_error=False): model = self.model # Numeric Jn = model.calc_gradient(var_in, var_out, mode="fd", return_format='array') #print 'finite diff', Jn # Analytic forward Jf = model.calc_gradient(var_in, var_out, mode='fwd', return_format='array') #print 'forward', Jf if rel_error: diff = np.nan_to_num(abs(Jf - Jn) / Jn) else: diff = abs(Jf - Jn) assert_rel_error(self, diff.max(), 0.0, 1e-3) # Analytic adjoint Ja = model.calc_gradient(var_in, var_out, mode='rev', return_format='array') # print Ja if rel_error: diff = np.nan_to_num(abs(Ja - Jn) / Jn) else: diff = abs(Ja - Jn) assert_rel_error(self, diff.max(), 0.0, 1e-3)
def test_sellar_specify_linear_solver(self): prob = Problem() prob.root = SellarStateConnection() prob.root.nl_solver = Newton() # Use bad settings for this one so that problem doesn't converge. # That way, we test that we are really using Newton's Lin Solver # instead. prob.root.ln_solver = ScipyGMRES() prob.root.ln_solver.options['maxiter'] = 1 # The good solver prob.root.nl_solver.ln_solver = ScipyGMRES() prob.setup(check=False) prob.run() assert_rel_error(self, prob['y1'], 25.58830273, .00001) assert_rel_error(self, prob['state_eq.y2_command'], 12.05848819, .00001) # Make sure we aren't iterating like crazy self.assertLess(prob.root.nl_solver.iter_count, 8) self.assertEqual(prob.root.ln_solver.iter_count, 0) self.assertGreater(prob.root.nl_solver.ln_solver.iter_count, 0)
def test_parab_FD_subbed_Pcomps(self): model = Problem(impl=impl) root = model.root = Group() par = root.add('par', ParallelGroup()) par.add('s1', MP_Point(root=2.0)) par.add('s2', MP_Point(root=3.0)) root.add('sumcomp', ExecComp('sum = x1+x2')) root.connect('par.s1.c.y', 'sumcomp.x1') root.connect('par.s2.c.y', 'sumcomp.x2') driver = model.driver = pyOptSparseDriver() driver.add_param('par.s1.p.x', low=-100, high=100) driver.add_param('par.s2.p.x', low=-100, high=100) driver.add_objective('sumcomp.sum') root.fd_options['force_fd'] = True model.setup(check=False) model.run() if not MPI or self.comm.rank == 0: assert_rel_error(self, model['par.s1.p.x'], 2.0, 1.e-6) if not MPI or self.comm.rank == 1: assert_rel_error(self, model['par.s2.p.x'], 3.0, 1.e-6)
def test_simple_matvec_subbed(self): group = Group() group.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y']) prob = Problem(impl=impl) prob.root = Group() prob.root.add('x_param', IndepVarComp('x', 1.0), promotes=['*']) prob.root.add('sub', group, promotes=['*']) prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='fd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_run(self): nProblems = 4 top = Problem(impl=impl) top.root = SellarDerivativesSuperGroup(nProblems=nProblems) top.driver = ScipyOptimizer() top.driver = pyOptSparseDriver() if OPTIMIZER == 'SNOPT': top.driver.options['optimizer'] = 'SNOPT' top.driver.opt_settings['Verify level'] = 0 top.driver.opt_settings['Print file'] = 'SNOPT_print_petsctest.out' top.driver.opt_settings[ 'Summary file'] = 'SNOPT_summary_petsctest.out' top.driver.opt_settings['Major iterations limit'] = 1000 else: top.driver.options['optimizer'] = 'SLSQP' top.driver.add_desvar('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0])) top.driver.add_desvar('x', lower=0.0, upper=10.0) top.driver.add_objective('obj') top.driver.add_constraint('con1', upper=0.0) top.driver.add_constraint('con2', upper=0.0) top.root.ln_solver.options['single_voi_relevance_reduction'] = True top.setup(check=False) # Setting initial values for design variables top['x'] = 1.0 top['z'] = np.array([5.0, 2.0]) top.run() if top.root.comm.rank == 0: assert_rel_error(self, top['z'][0], 1.977639, 1.0e-6) assert_rel_error(self, top['z'][1], 0.0, 1.0e-6) assert_rel_error(self, top['x'], 0.0, 1.0e-6)
def test_array_values(self): prob = Problem() prob.root = Group() prob.root.add('pc', IndepVarComp('x', np.zeros((2,3)), units='degC'), promotes=['x']) prob.root.add('uc', UnitComp(shape=(2,3), param_name='x', out_name='x_out', units='degF'), promotes=['x', 'x_out']) prob.setup(check=False) prob.run() assert_rel_error(self, prob['x_out'], np.array([[32., 32., 32.],[32., 32., 32.]]), 1e-6) indep_list = ['x'] unknown_list = ['x_out'] # Forward Mode J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['x_out']['x'],1.8*np.eye(6), 1e-6) # Reverse Mode J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['x_out']['x'],1.8*np.eye(6), 1e-6)
def test_fan_out_parallel_sets(self): prob = Problem(impl=impl) prob.root = FanOut3Grouped() prob.root.ln_solver = LinearGaussSeidel() prob.root.sub.ln_solver = LinearGaussSeidel() # need to set mode to rev before setup. Otherwise the sub-vectors # for the parallel set vars won't get allocated. prob.root.ln_solver.options['mode'] = 'rev' prob.root.sub.ln_solver.options['mode'] = 'rev' # Parallel Groups prob.driver.add_desvar('p.x') prob.driver.add_constraint('c2.y', upper=0.0) prob.driver.add_constraint('c3.y', upper=0.0) prob.driver.add_constraint('c4.y', upper=0.0) prob.driver.parallel_derivs(['c2.y', 'c3.y', 'c4.y']) if MPI: expected = [('c2.y', 'c3.y', 'c4.y')] else: expected = [('c2.y', ), ('c3.y', ), ('c4.y', )] self.assertEqual(prob.driver.outputs_of_interest(), expected) prob.setup(check=False) prob.run() unknown_list = ['c2.y', 'c3.y', 'c4.y'] indep_list = ['p.x'] J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['c2.y']['p.x'][0][0], -6.0, 1e-6) assert_rel_error(self, J['c3.y']['p.x'][0][0], 15.0, 1e-6) assert_rel_error(self, J['c4.y']['p.x'][0][0], 33.0, 1e-6)
def test_unit_convert(self): prob = Problem() prob.root = Group() prob.root.add('src', SrcComp()) prob.root.add('tgtF', TgtCompF()) prob.root.add('tgtC', TgtCompC()) prob.root.add('tgtK', TgtCompK()) prob.root.add('px1', IndepVarComp('x1', 100.0), promotes=['x1']) prob.root.connect('x1', 'src.x1') prob.root.connect('src.x2', 'tgtF.x2') prob.root.connect('src.x2', 'tgtC.x2') prob.root.connect('src.x2', 'tgtK.x2') prob.setup(check=False) prob.run() p1 = ComplexStepTgtVecWrapper(prob.root.tgtF.params) p2 = ComplexStepTgtVecWrapper(prob.root.tgtC.params) p3 = ComplexStepTgtVecWrapper(prob.root.tgtK.params) assert_rel_error(self, p1['x2'], 212.0, 1.0e-6) assert_rel_error(self, p2['x2'], 100.0, 1.0e-6) assert_rel_error(self, p3['x2'], 373.15, 1.0e-6)
def test_Sellar_state_SLSQP(self): prob = Problem() prob.root = SellarStateConnection() prob.driver = ScipyOptimizer() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.options['tol'] = 1.0e-8 prob.driver.add_desvar('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0])) prob.driver.add_desvar('x', lower=0.0, upper=10.0) prob.driver.add_objective('obj') prob.driver.add_constraint('con1', upper=0.0) prob.driver.add_constraint('con2', upper=0.0) prob.driver.options['disp'] = False prob.setup(check=False) prob.run() assert_rel_error(self, prob['z'][0], 1.9776, 1e-3) assert_rel_error(self, prob['z'][1], 0.0, 1e-3) assert_rel_error(self, prob['x'], 0.0, 1e-3)
def test_basic_grouped(self): prob = Problem() prob.root = Group() sub1 = prob.root.add('sub1', Group()) sub2 = prob.root.add('sub2', Group()) sub1.add('src', SrcComp()) sub2.add('tgtF', TgtCompF()) sub2.add('tgtC', TgtCompC()) sub2.add('tgtK', TgtCompK()) prob.root.add('px1', IndepVarComp('x1', 100.0), promotes=['x1']) prob.root.connect('x1', 'sub1.src.x1') prob.root.connect('sub1.src.x2', 'sub2.tgtF.x2') prob.root.connect('sub1.src.x2', 'sub2.tgtC.x2') prob.root.connect('sub1.src.x2', 'sub2.tgtK.x2') prob.setup(check=False) prob.run() assert_rel_error(self, prob['sub1.src.x2'], 100.0, 1e-6) assert_rel_error(self, prob['sub2.tgtF.x3'], 212.0, 1e-6) assert_rel_error(self, prob['sub2.tgtC.x3'], 100.0, 1e-6) assert_rel_error(self, prob['sub2.tgtK.x3'], 373.15, 1e-6) # Make sure we don't convert equal units self.assertEqual( prob.root.sub2.params.metadata('tgtC.x2').get('unit_conv'), None) indep_list = ['x1'] unknown_list = ['sub2.tgtF.x3', 'sub2.tgtC.x3', 'sub2.tgtK.x3'] J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6) assert_rel_error(self, J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6) assert_rel_error(self, J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6) assert_rel_error(self, J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6) assert_rel_error(self, J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict') assert_rel_error(self, J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6) assert_rel_error(self, J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6) assert_rel_error(self, J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6) stream = cStringIO() conv = prob.root.sub1.list_unit_conv(stream=stream) self.assertTrue(len(conv) == 0)
# Same test as for KrigingSurrogate... well with predicted test value adjustment def branin(x): y = (x[1] - (5.1 / (4. * pi**2.)) * x[0]**2. + 5. * x[0] / pi - 6.)**2. + 10. * (1. - 1. / (8. * pi)) * cos(x[0]) + 10. return y x = array([[-2., 0.], [-0.5, 1.5], [1., 3.], [8.5, 4.5], [-3.5, 6.], [4., 7.5], [-5., 9.], [5.5, 10.5], [10., 12.], [7., 13.5], [2.5, 15.]]) y = array([branin(case) for case in x]) krig1 = MultiFiCoKrigingSurrogate() krig1.train(x, y) mu, sigma = krig1.predict([-2., 0.]) assert_rel_error(self, mu, branin(x[0]), 1e-5) assert_rel_error(self, sigma, 0., 1e-5) mu, sigma = krig1.predict([5., 5.]) assert_rel_error(self, mu, 22, 1) assert_rel_error(self, sigma, 13, 1) # Test with theta setting instead of estimation krig2 = MultiFiCoKrigingSurrogate(theta=[0.1]) krig1.train(x, y) mu, sigma = krig1.predict([-2., 0.]) assert_rel_error(self, mu, branin(x[0]), 1e-5) assert_rel_error(self, sigma, 0., 1e-5) mu, sigma = krig1.predict([5., 5.])
def test_array2D_index_connection(self): group = Group() group.add('x_param', IndepVarComp('x', np.ones((2, 2))), promotes=['*']) sub = group.add('sub', Group(), promotes=['*']) sub.add('mycomp', ArrayComp2D(), promotes=['x', 'y']) group.add('obj', ExecComp('b = a')) group.connect('y', 'obj.a', src_indices=[3]) prob = Problem() prob.root = group prob.root.ln_solver = LinearGaussSeidel() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['obj.b'], mode='fwd', return_format='dict') Jbase = prob.root.sub.mycomp._jacobian_cache assert_rel_error(self, Jbase[('y', 'x')][3][0], J['obj.b']['x'][0][0], 1e-8) assert_rel_error(self, Jbase[('y', 'x')][3][1], J['obj.b']['x'][0][1], 1e-8) assert_rel_error(self, Jbase[('y', 'x')][3][2], J['obj.b']['x'][0][2], 1e-8) assert_rel_error(self, Jbase[('y', 'x')][3][3], J['obj.b']['x'][0][3], 1e-8) J = prob.calc_gradient(['x'], ['obj.b'], mode='rev', return_format='dict') Jbase = prob.root.sub.mycomp._jacobian_cache assert_rel_error(self, Jbase[('y', 'x')][3][0], J['obj.b']['x'][0][0], 1e-8) assert_rel_error(self, Jbase[('y', 'x')][3][1], J['obj.b']['x'][0][1], 1e-8) assert_rel_error(self, Jbase[('y', 'x')][3][2], J['obj.b']['x'][0][2], 1e-8) assert_rel_error(self, Jbase[('y', 'x')][3][3], J['obj.b']['x'][0][3], 1e-8)
def test_double_diamond_model(self): prob = Problem() prob.root = ConvergeDivergeGroups() prob.setup(check=False) prob.run() data = prob.check_partial_derivatives(out_stream=None) for key1, val1 in iteritems(data): for key2, val2 in iteritems(val1): assert_rel_error(self, val2['abs error'][0], 0.0, 1e-5) assert_rel_error(self, val2['abs error'][1], 0.0, 1e-5) assert_rel_error(self, val2['abs error'][2], 0.0, 1e-5) assert_rel_error(self, val2['rel error'][0], 0.0, 1e-5) assert_rel_error(self, val2['rel error'][1], 0.0, 1e-5) assert_rel_error(self, val2['rel error'][2], 0.0, 1e-5) self.assertEqual(len(data), 7) # Piggyback a test for the 'comps' option. data = prob.check_partial_derivatives( out_stream=None, comps=['sub1.sub2.comp3', 'comp7']) self.assertEqual(len(data), 2) self.assertTrue('sub1.sub2.comp3' in data) self.assertTrue('comp7' in data) with self.assertRaises(RuntimeError) as cm: data = prob.check_partial_derivatives(out_stream=None, comps=['sub1', 'bogus']) expected_msg = "The following are not valid comp names: ['bogus', 'sub1']" self.assertEqual(str(cm.exception), expected_msg) # This is a good test to piggyback the compact_print test mystream = StringIO() prob.check_partial_derivatives(out_stream=mystream, compact_print=True) text = mystream.getvalue() expected = "'y1' wrt 'x1' | 8.0000e+00 | 8.0000e+00 | 8.0000e+00 | 2.0013e-06 | 2.0013e-06 | 2.5016e-07 | 2.5016e-07" self.assertTrue(expected in text)
def test_unit_conversion(self): prob = Problem() prob.root = Group() prob.root.add('src', SrcComp()) prob.root.add('tgtF', TgtCompF()) prob.root.add('tgtC', TgtCompC()) prob.root.add('tgtK', TgtCompK()) prob.root.add('px1', IndepVarComp('x1', 100.0), promotes=['x1']) prob.root.connect('x1', 'src.x1') prob.root.connect('src.x2', 'tgtF.x2') prob.root.connect('src.x2', 'tgtC.x2') prob.root.connect('src.x2', 'tgtK.x2') prob.root.deriv_options['type'] = 'cs' prob.setup(check=False) prob.run() indep_list = ['x1'] unknown_list = ['tgtF.x3', 'tgtC.x3', 'tgtK.x3'] J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6) assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6) assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6) assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6) assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict') assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6) assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6) assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
def test_scaler_adder_array_int(self): prob = Problem() root = prob.root = Group() driver = prob.driver = ScaleAddDriverArray() root.add('p1', IndepVarComp('x', val=np.array([[1.0, 1.0], [1.0, 1.0]])), promotes=['*']) root.add('comp', ArrayComp2D(), promotes=['*']) root.add('constraint', ExecComp('con = x + y', x=np.array([[1.0, 1.0], [1.0, 1.0]]), y=np.array([[1.0, 1.0], [1.0, 1.0]]), con=np.array([[1.0, 1.0], [1.0, 1.0]])), promotes=['*']) driver.add_desvar('x', lower=np.array([[-1e5, -1e5], [-1e5, -1e5]]), adder=np.array([[10, 100], [1000, 10000]]), scaler=np.array([[1, 2], [3, 4]])) driver.add_objective('y', adder=np.array([[10, 100], [1000, 10000]]), scaler=np.array([[1, 2], [3, 4]])) driver.add_constraint('con', upper=np.zeros((2, 2)), adder=np.array([[10, 100], [1000, 10000]]), scaler=np.array([[1, 2], [3, 4]])) prob.setup(check=False) prob.run() self.assertEqual(driver.param[0], 11.0) self.assertEqual(driver.param[1], 202.0) self.assertEqual(driver.param[2], 3003.0) self.assertEqual(driver.param[3], 40004.0) self.assertEqual(prob['x'][0, 0], 12.0) self.assertEqual(prob['x'][0, 1], 102.0) self.assertEqual(prob['x'][1, 0], 2003.0) self.assertEqual(prob['x'][1, 1], 20250.0) self.assertEqual(driver.obj_scaled[0], (prob['y'][0, 0] + 10.0) * 1.0) self.assertEqual(driver.obj_scaled[1], (prob['y'][0, 1] + 100.0) * 2.0) self.assertEqual(driver.obj_scaled[2], (prob['y'][1, 0] + 1000.0) * 3.0) self.assertEqual(driver.obj_scaled[3], (prob['y'][1, 1] + 10000.0) * 4.0) self.assertEqual(driver.param_low[0], (-1e5 + 10.0) * 1.0) self.assertEqual(driver.param_low[1], (-1e5 + 100.0) * 2.0) self.assertEqual(driver.param_low[2], (-1e5 + 1000.0) * 3.0) self.assertEqual(driver.param_low[3], (-1e5 + 10000.0) * 4.0) conval = prob['x'] + prob['y'] self.assertEqual(driver.con_scaled[0], (conval[0, 0] + 10.0) * 1.0) self.assertEqual(driver.con_scaled[1], (conval[0, 1] + 100.0) * 2.0) self.assertEqual(driver.con_scaled[2], (conval[1, 0] + 1000.0) * 3.0) self.assertEqual(driver.con_scaled[3], (conval[1, 1] + 10000.0) * 4.0) J = driver.calc_gradient(['x'], ['y', 'con']) Jbase = np.array([[2., 1., 3., 7.], [4., 2., 6., 5.], [3., 6., 9., 8.], [1., 3., 2., 4.], [3., 1., 3., 7.], [4., 3., 6., 5.], [3., 6., 10., 8.], [1., 3., 2., 5.]]) assert_rel_error(self, J, Jbase, 1e-6)
def test_1d_1fi_cokriging(self): # CoKrigingSurrogate with one fidelity could be used as a KrigingSurrogate # Same test as for KrigingSurrogate... well with predicted test value adjustment x = array([[0.05], [.25], [0.61], [0.95]]) y = array([ 0.738513784857542, -0.210367746201974, -0.489015457891476, 12.3033138316612 ]) krig1 = MultiFiCoKrigingSurrogate() krig1.train(x, y) new_x = array([0.5]) mu, sigma = krig1.predict(x[0]) assert_rel_error(self, mu, y[0], 1e-4) assert_rel_error(self, sigma, 0., 1e-4) mu, sigma = krig1.predict(new_x) assert_rel_error(self, mu, -2.0279, 1e-3) assert_rel_error(self, sigma, 1.3408, 1e-3) # Test with theta setting instead of estimation krig2 = MultiFiCoKrigingSurrogate(theta=0.1) krig2.train(x, y) mu, sigma = krig2.predict(x[0]) assert_rel_error(self, mu, y[0], 1e-4) assert_rel_error(self, sigma, .0, 1e-4) mu, sigma = krig2.predict(new_x) assert_rel_error(self, mu, -1.2719, 1e-3) assert_rel_error(self, sigma, 0.0439, 1e-3)
def test_simple_implicit_run_once(self): class SIC2(SimpleImplicitComp): def solve_nonlinear(self, params, unknowns, resids): """ Simple iterative solve. (Babylonian method).""" super(SIC2, self).solve_nonlinear(params, unknowns, resids) # This mimics a problem with residuals that aren't up-to-date # with the solve resids['z'] = 999.999 prob = Problem() prob.root = Group() prob.root.ln_solver = ScipyGMRES() prob.root.add('comp', SIC2()) prob.root.add('p1', IndepVarComp('x', 0.5)) prob.root.connect('p1.x', 'comp.x') prob.setup(check=False) prob.run_once() data = prob.check_partial_derivatives(out_stream=None) for key1, val1 in iteritems(data): for key2, val2 in iteritems(val1): assert_rel_error(self, val2['abs error'][0], 0.0, 1e-5) assert_rel_error(self, val2['abs error'][1], 0.0, 1e-5) assert_rel_error(self, val2['abs error'][2], 0.0, 1e-5) assert_rel_error(self, val2['rel error'][0], 0.0, 1e-5) assert_rel_error(self, val2['rel error'][1], 0.0, 1e-5) assert_rel_error(self, val2['rel error'][2], 0.0, 1e-5)
def test_simple_array_model(self): prob = Problem() prob.root = Group() prob.root.add('comp', SimpleArrayComp()) prob.root.add('p1', IndepVarComp('x', np.ones([2]))) prob.root.connect('p1.x', 'comp.x') prob.setup(check=False) prob.run() data = prob.check_partial_derivatives(out_stream=None) for key1, val1 in iteritems(data): for key2, val2 in iteritems(val1): assert_rel_error(self, val2['abs error'][0], 0.0, 1e-5) assert_rel_error(self, val2['abs error'][1], 0.0, 1e-5) assert_rel_error(self, val2['abs error'][2], 0.0, 1e-5) assert_rel_error(self, val2['rel error'][0], 0.0, 1e-5) assert_rel_error(self, val2['rel error'][1], 0.0, 1e-5) assert_rel_error(self, val2['rel error'][2], 0.0, 1e-5)
def test_double_diamond_model_complex_step(self): prob = Problem() prob.root = ConvergeDivergeGroups() prob.root.sub1.comp1.fd_options['form'] = 'complex_step' prob.root.sub1.sub2.comp2.fd_options['form'] = 'complex_step' prob.root.sub1.sub2.comp3.fd_options['form'] = 'complex_step' prob.root.sub1.comp4.fd_options['form'] = 'complex_step' prob.root.sub3.comp5.fd_options['form'] = 'complex_step' prob.root.sub3.comp6.fd_options['form'] = 'complex_step' prob.root.comp7.fd_options['form'] = 'complex_step' prob.setup(check=False) prob.run() data = prob.check_partial_derivatives(out_stream=None) for key1, val1 in iteritems(data): for key2, val2 in iteritems(val1): assert_rel_error(self, val2['abs error'][0], 0.0, 1e-5) assert_rel_error(self, val2['abs error'][1], 0.0, 1e-5) assert_rel_error(self, val2['abs error'][2], 0.0, 1e-5) assert_rel_error(self, val2['rel error'][0], 0.0, 1e-5) assert_rel_error(self, val2['rel error'][1], 0.0, 1e-5) assert_rel_error(self, val2['rel error'][2], 0.0, 1e-5)
def test_full_model_fd_simple_comp_promoted(self): prob = Problem() prob.root = Group() sub = prob.root.add('sub', Group(), promotes=['*']) sub.add('comp', SimpleCompDerivMatVec(), promotes=['*']) prob.root.add('p1', IndepVarComp('x', 1.0), promotes=['*']) prob.setup(check=False) prob.run() data = prob.check_total_derivatives(out_stream=None) for key, val in iteritems(data): assert_rel_error(self, val['abs error'][0], 0.0, 1e-5) assert_rel_error(self, val['abs error'][1], 0.0, 1e-5) assert_rel_error(self, val['abs error'][2], 0.0, 1e-5) assert_rel_error(self, val['rel error'][0], 0.0, 1e-5) assert_rel_error(self, val['rel error'][1], 0.0, 1e-5) assert_rel_error(self, val['rel error'][2], 0.0, 1e-5)
def test_simple_implicit(self): prob = Problem() prob.root = Group() prob.root.ln_solver = ScipyGMRES() prob.root.add('comp', SimpleImplicitComp()) prob.root.add('p1', IndepVarComp('x', 0.5)) prob.root.connect('p1.x', 'comp.x') prob.setup(check=False) prob.run() data = prob.check_total_derivatives(out_stream=None) for key, val in iteritems(data): assert_rel_error(self, val['abs error'][0], 0.0, 1e-5) assert_rel_error(self, val['abs error'][1], 0.0, 1e-5) assert_rel_error(self, val['abs error'][2], 0.0, 1e-5) assert_rel_error(self, val['rel error'][0], 0.0, 1e-5) assert_rel_error(self, val['rel error'][1], 0.0, 1e-5) assert_rel_error(self, val['rel error'][2], 0.0, 1e-5)
def test_double_diamond_model(self): prob = Problem() prob.root = ConvergeDivergeGroups() prob.setup(check=False) prob.run() data = prob.check_total_derivatives(out_stream=None) for key, val in iteritems(data): assert_rel_error(self, val['abs error'][0], 0.0, 1e-5) assert_rel_error(self, val['abs error'][1], 0.0, 1e-5) assert_rel_error(self, val['abs error'][2], 0.0, 1e-5) assert_rel_error(self, val['rel error'][0], 0.0, 1e-5) assert_rel_error(self, val['rel error'][1], 0.0, 1e-5) assert_rel_error(self, val['rel error'][2], 0.0, 1e-5)
def test_basic_implicit_conn(self): prob = Problem() prob.root = Group() prob.root.add('src', SrcComp(), promotes=['x1', 'x2']) prob.root.add('tgtF', TgtCompF(), promotes=['x2']) prob.root.add('tgtC', TgtCompC(), promotes=['x2']) prob.root.add('tgtK', TgtCompK(), promotes=['x2']) prob.root.add('px1', IndepVarComp('x1', 100.0), promotes=['x1']) prob.setup(check=False) prob.run() assert_rel_error(self, prob['x2'], 100.0, 1e-6) assert_rel_error(self, prob['tgtF.x3'], 212.0, 1e-6) assert_rel_error(self, prob['tgtC.x3'], 100.0, 1e-6) assert_rel_error(self, prob['tgtK.x3'], 373.15, 1e-6) # Make sure we don't convert equal units self.assertEqual( prob.root.params.metadata('tgtC.x2').get('unit_conv'), None) indep_list = ['x1'] unknown_list = ['tgtF.x3', 'tgtC.x3', 'tgtK.x3'] J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6) assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6) assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6) assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6) assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict') assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6) assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6) assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
def test_basic_force_fd_comps(self): prob = Problem() prob.root = Group() prob.root.add('src', SrcComp()) prob.root.add('tgtF', TgtCompF()) prob.root.add('tgtC', TgtCompC()) prob.root.add('tgtK', TgtCompK()) prob.root.add('px1', IndepVarComp('x1', 100.0), promotes=['x1']) prob.root.connect('x1', 'src.x1') prob.root.connect('src.x2', 'tgtF.x2') prob.root.connect('src.x2', 'tgtC.x2') prob.root.connect('src.x2', 'tgtK.x2') prob.root.src.deriv_options['type'] = 'fd' prob.root.tgtF.deriv_options['type'] = 'fd' prob.root.tgtC.deriv_options['type'] = 'fd' prob.root.tgtK.deriv_options['type'] = 'fd' prob.setup(check=False) prob.run() assert_rel_error(self, prob['src.x2'], 100.0, 1e-6) assert_rel_error(self, prob['tgtF.x3'], 212.0, 1e-6) assert_rel_error(self, prob['tgtC.x3'], 100.0, 1e-6) assert_rel_error(self, prob['tgtK.x3'], 373.15, 1e-6) # Make sure we don't convert equal units self.assertEqual( prob.root.params.metadata('tgtC.x2').get('unit_conv'), None) indep_list = ['x1'] unknown_list = ['tgtF.x3', 'tgtC.x3', 'tgtK.x3'] J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6) assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6) assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6) assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6) assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict') assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6) assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6) assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6) # Need to clean up after FD gradient call, so just rerun. prob.run() # Make sure check partials handles conversion data = prob.check_partial_derivatives(out_stream=None) for key1, val1 in iteritems(data): for key2, val2 in iteritems(val1): assert_rel_error(self, val2['abs error'][0], 0.0, 1e-6) assert_rel_error(self, val2['abs error'][1], 0.0, 1e-6) assert_rel_error(self, val2['abs error'][2], 0.0, 1e-6) assert_rel_error(self, val2['rel error'][0], 0.0, 1e-6) assert_rel_error(self, val2['rel error'][1], 0.0, 1e-6) assert_rel_error(self, val2['rel error'][2], 0.0, 1e-6) stream = cStringIO() conv = prob.root.list_unit_conv(stream=stream) self.assertTrue((('src.x2', 'tgtF.x2'), ('degC', 'degF')) in conv) self.assertTrue((('src.x2', 'tgtK.x2'), ('degC', 'degK')) in conv)
def test_record_derivs_lists(self): prob = Problem() prob.root = SellarDerivativesGrouped() prob.driver = ScipyOptimizer() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.options['tol'] = 1.0e-8 prob.driver.options['disp'] = False prob.driver.add_desvar('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0])) prob.driver.add_desvar('x', lower=0.0, upper=10.0) prob.driver.add_objective('obj') prob.driver.add_constraint('con1', upper=0.0) prob.driver.add_constraint('con2', upper=0.0) prob.driver.add_recorder(self.recorder) self.recorder.options['record_metadata'] = False self.recorder.options['record_derivs'] = True prob.setup(check=False) prob.run() prob.cleanup() hdf = h5py.File(self.filename, 'r') deriv_group = hdf['rank0:SLSQP/1']['deriv'] self.assertEqual(deriv_group.attrs['success'], 1) self.assertEqual(deriv_group.attrs['msg'], '') J1 = deriv_group['Derivatives'] assert_rel_error(self, J1[0][0], 9.61001155, .00001) assert_rel_error(self, J1[0][1], 1.78448534, .00001) assert_rel_error(self, J1[0][2], 2.98061392, .00001) assert_rel_error(self, J1[1][0], -9.61002285, .00001) assert_rel_error(self, J1[1][1], -0.78449158, .00001) assert_rel_error(self, J1[1][2], -0.98061433, .00001) assert_rel_error(self, J1[2][0], 1.94989079, .00001) assert_rel_error(self, J1[2][1], 1.0775421, .00001) assert_rel_error(self, J1[2][2], 0.09692762, .00001)
def test_basics(self): # create a metamodel component mm = MetaModel() mm.add_param('x1', 0.) mm.add_param('x2', 0.) mm.add_output('y1', 0.) mm.add_output('y2', 0., surrogate=FloatKrigingSurrogate()) mm.default_surrogate = ResponseSurface() # add metamodel to a problem prob = Problem(root=Group()) prob.root.add('mm', mm) prob.setup(check=False) # check that surrogates were properly assigned surrogate = prob.root.unknowns.metadata('mm.y1').get('surrogate') self.assertTrue(isinstance(surrogate, ResponseSurface)) surrogate = prob.root.unknowns.metadata('mm.y2').get('surrogate') self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate)) # populate training data prob['mm.train:x1'] = [1.0, 2.0, 3.0] prob['mm.train:x2'] = [1.0, 3.0, 4.0] prob['mm.train:y1'] = [3.0, 2.0, 1.0] prob['mm.train:y2'] = [1.0, 4.0, 7.0] # run problem for provided data point and check prediction prob['mm.x1'] = 2.0 prob['mm.x2'] = 3.0 self.assertTrue(mm.train) # training will occur before 1st run prob.run() assert_rel_error(self, prob['mm.y1'], 2.0, .00001) assert_rel_error(self, prob['mm.y2'], 4.0, .00001) # run problem for interpolated data point and check prediction prob['mm.x1'] = 2.5 prob['mm.x2'] = 3.5 self.assertFalse(mm.train) # training will not occur before 2nd run prob.run() assert_rel_error(self, prob['mm.y1'], 1.5934, .001) # change default surrogate, re-setup and check that metamodel re-trains mm.default_surrogate = FloatKrigingSurrogate() prob.setup(check=False) surrogate = prob.root.unknowns.metadata('mm.y1').get('surrogate') self.assertTrue(isinstance(surrogate, FloatKrigingSurrogate)) self.assertTrue(mm.train) # training will occur after re-setup mm.warm_restart = True # use existing training data prob['mm.x1'] = 2.5 prob['mm.x2'] = 3.5 prob.run() assert_rel_error(self, prob['mm.y1'], 2., 1e-2)
def test_single_diamond_grouped(self): prob = Problem() prob.root = SingleDiamondGrouped() prob.root.ln_solver = LinearGaussSeidel() prob.setup(check=False) prob.run() indep_list = ['p.x'] unknown_list = ['comp4.y1', 'comp4.y2'] J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6) assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6) assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict') assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6) assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)
def test_inner_connection(self): class Squarer(Component): def __init__(self, size): super(Squarer, self).__init__() self.add_param(name='input:x', val=np.zeros(size), desc='x') self.add_output(name='output:x2', val=np.zeros(size), desc='x squared') def solve_nonlinear(self, params, unknowns, resids): unknowns['output:x2'] = params['input:x']**2 class Cuber(Component): def __init__(self, size): super(Cuber, self).__init__() self.add_param(name='x', val=np.zeros(size), desc='x') self.add_output(name='output:x3', val=np.zeros(size), desc='x squared') def solve_nonlinear(self, params, unknowns, resids): unknowns['output:x3'] = params['x']**3 class InnerGroup(Group): def __init__(self): super(InnerGroup, self).__init__() self.add('square1', Squarer(5)) self.add('square2', Squarer(3), promotes=['input:x']) # the following connection should result in 'cube1.x' using the # same src_indices as 'input:x', which is [2,3,4] from the outer # connection self.add('cube1', Cuber(3)) self.connect('input:x', 'cube1.x') # the following connection should result in 'cube2.x' using # src_indices [0,1] of 'input:x', which corresponds to the # src_indices [2,3] from the outer connection self.add('cube2', Cuber(2)) self.connect('input:x', 'cube2.x', src_indices=[0, 1]) # the following connection should result in 'cube3.x' using # src_indices [1,2] of 'square1.input:x', which corresponds to the # src_indices [1,2] from the outer connection self.add('cube3', Cuber(2)) self.connect('square1.input:x', 'cube3.x', src_indices=[1, 2]) class OuterGroup(Group): def __init__(self): super(OuterGroup, self).__init__() iv = IndepVarComp('input:x', np.zeros(5)) self.add('indep_vars', iv, promotes=['*']) self.add('inner', InnerGroup()) self.connect('input:x', 'inner.square1.input:x') self.connect('input:x', 'inner.input:x', src_indices=[2, 3, 4]) prob = Problem(root=OuterGroup()) prob.setup(check=False) prob['input:x'] = np.array([4., 5., 6., 7., 8.]) prob.run() assert_rel_error(self, prob.root.inner.square1.params['input:x'], np.array([4., 5., 6., 7., 8.]), 0.00000001) assert_rel_error(self, prob.root.inner.cube1.params['x'], np.array([6., 7., 8.]), 0.00000001) assert_rel_error(self, prob.root.inner.cube2.params['x'], np.array([6., 7.]), 0.00000001) assert_rel_error(self, prob.root.inner.cube3.params['x'], np.array([5., 6.]), 0.00000001)
def test_sellar_derivs(self): prob = Problem() prob.root = SellarDerivatives() prob.root.ln_solver = LinearGaussSeidel() prob.root.ln_solver.options['maxiter'] = 10 prob.root.ln_solver.options['atol'] = 1e-12 prob.root.ln_solver.options['rtol'] = 1e-12 prob.root.nl_solver.options['atol'] = 1e-12 prob.setup(check=False) prob.run() # Just make sure we are at the right answer assert_rel_error(self, prob['y1'], 25.58830273, .00001) assert_rel_error(self, prob['y2'], 12.05848819, .00001) indep_list = ['x', 'z'] unknown_list = ['obj', 'con1', 'con2'] Jbase = {} Jbase['con1'] = {} Jbase['con1']['x'] = -0.98061433 Jbase['con1']['z'] = np.array([-9.61002285, -0.78449158]) Jbase['con2'] = {} Jbase['con2']['x'] = 0.09692762 Jbase['con2']['z'] = np.array([1.94989079, 1.0775421]) Jbase['obj'] = {} Jbase['obj']['x'] = 2.98061392 Jbase['obj']['z'] = np.array([9.61001155, 1.78448534]) J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') for key1, val1 in Jbase.items(): for key2, val2 in val1.items(): assert_rel_error(self, J[key1][key2], val2, .00001) J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') for key1, val1 in Jbase.items(): for key2, val2 in val1.items(): assert_rel_error(self, J[key1][key2], val2, .00001) # Cheat a bit so I can twiddle mode OptionsDictionary.locked = False prob.root.deriv_options['form'] = 'central' J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict') for key1, val1 in Jbase.items(): for key2, val2 in val1.items(): assert_rel_error(self, J[key1][key2], val2, .00001) # Obviously this test doesn't do much right now, but I need to verify # we don't get a keyerror here. J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='array')
def test_basic_grouped_grouped_implicit(self): prob = Problem() root = prob.root = Group() sub1 = prob.root.add('sub1', Group(), promotes=['x2']) sub2 = prob.root.add('sub2', Group(), promotes=['x2']) sub1.add('src', SrcComp(), promotes=['x2']) sub2.add('tgtF', TgtCompFMulti(), promotes=['x2']) sub2.add('tgtC', TgtCompC(), promotes=['x2']) sub2.add('tgtK', TgtCompK(), promotes=['x2']) prob.root.add('px1', IndepVarComp('x1', 100.0), promotes=['x1']) prob.root.connect('x1', 'sub1.src.x1') prob.setup(check=False) prob.run() assert_rel_error(self, prob['x2'], 100.0, 1e-6) assert_rel_error(self, prob['sub2.tgtF.x3'], 212.0, 1e-6) assert_rel_error(self, prob['sub2.tgtC.x3'], 100.0, 1e-6) assert_rel_error(self, prob['sub2.tgtK.x3'], 373.15, 1e-6) indep_list = ['x1'] unknown_list = ['sub2.tgtF.x3', 'sub2.tgtC.x3', 'sub2.tgtK.x3'] J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6) assert_rel_error(self, J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6) assert_rel_error(self, J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6) assert_rel_error(self, J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6) assert_rel_error(self, J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict') assert_rel_error(self, J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6) assert_rel_error(self, J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6) assert_rel_error(self, J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6)
def test_simple_implicit_complex_step(self): prob = Problem() prob.root = Group() prob.root.ln_solver = ScipyGMRES() prob.root.add('comp', SimpleImplicitComp()) prob.root.add('p1', IndepVarComp('x', 0.5)) prob.root.connect('p1.x', 'comp.x') prob.root.comp.fd_options['step_size'] = 1.0e4 prob.root.comp.fd_options['form'] = 'complex_step' prob.setup(check=False) prob.run() data = prob.check_partial_derivatives(out_stream=None) for key1, val1 in iteritems(data): for key2, val2 in iteritems(val1): assert_rel_error(self, val2['abs error'][0], 0.0, 1e-5) assert_rel_error(self, val2['abs error'][1], 0.0, 1e-5) assert_rel_error(self, val2['abs error'][2], 0.0, 1e-5) assert_rel_error(self, val2['rel error'][0], 0.0, 1e-5) assert_rel_error(self, val2['rel error'][1], 0.0, 1e-5) assert_rel_error(self, val2['rel error'][2], 0.0, 1e-5)
def test_root_derivs_array(self): prob = Problem() prob.root = SellarDerivativesGrouped() prob.driver = ScipyOptimizer() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.options['tol'] = 1.0e-8 prob.driver.options['disp'] = False prob.driver.add_desvar('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0])) prob.driver.add_desvar('x', lower=0.0, upper=10.0) prob.driver.add_objective('obj') prob.driver.add_constraint('con1', upper=0.0) prob.driver.add_constraint('con2', upper=0.0) prob.driver.add_recorder(self.recorder) self.recorder.options['record_metadata'] = False self.recorder.options['record_derivs'] = True prob.setup(check=False) prob.run() prob.cleanup() db = SqliteDict(self.filename, self.tablename_derivs, flag='r') J1 = db['rank0:SLSQP/1']['Derivatives'] assert_rel_error(self, J1[0][0], 9.61001155, .00001) assert_rel_error(self, J1[0][1], 1.78448534, .00001) assert_rel_error(self, J1[0][2], 2.98061392, .00001) assert_rel_error(self, J1[1][0], -9.61002285, .00001) assert_rel_error(self, J1[1][1], -0.78449158, .00001) assert_rel_error(self, J1[1][2], -0.98061433, .00001) assert_rel_error(self, J1[2][0], 1.94989079, .00001) assert_rel_error(self, J1[2][1], 1.0775421, .00001) assert_rel_error(self, J1[2][2], 0.09692762, .00001)
def test_apply_linear_adjoint(self): # Make sure we can index into dparams class Attitude_Angular(Component): """ Calculates angular velocity vector from the satellite's orientation matrix and its derivative. """ def __init__(self, n=2): super(Attitude_Angular, self).__init__() self.n = n # Inputs self.add_param( 'O_BI', np.zeros((3, 3, n)), units="ft", desc= "Rotation matrix from body-fixed frame to Earth-centered " "inertial frame over time") self.add_param('Odot_BI', np.zeros((3, 3, n)), units="km", desc="First derivative of O_BI over time") # Outputs self.add_output( 'w_B', np.zeros((3, n)), units="1/s", desc="Angular velocity vector in body-fixed frame over time" ) self.dw_dOdot = np.zeros((n, 3, 3, 3)) self.dw_dO = np.zeros((n, 3, 3, 3)) def solve_nonlinear(self, params, unknowns, resids): """ Calculate output. """ O_BI = params['O_BI'] Odot_BI = params['Odot_BI'] w_B = unknowns['w_B'] for i in range(0, self.n): w_B[0, i] = np.dot(Odot_BI[2, :, i], O_BI[1, :, i]) w_B[1, i] = np.dot(Odot_BI[0, :, i], O_BI[2, :, i]) w_B[2, i] = np.dot(Odot_BI[1, :, i], O_BI[0, :, i]) def linearize(self, params, unknowns, resids): """ Calculate and save derivatives. (i.e., Jacobian) """ O_BI = params['O_BI'] Odot_BI = params['Odot_BI'] for i in range(0, self.n): self.dw_dOdot[i, 0, 2, :] = O_BI[1, :, i] self.dw_dO[i, 0, 1, :] = Odot_BI[2, :, i] self.dw_dOdot[i, 1, 0, :] = O_BI[2, :, i] self.dw_dO[i, 1, 2, :] = Odot_BI[0, :, i] self.dw_dOdot[i, 2, 1, :] = O_BI[0, :, i] self.dw_dO[i, 2, 0, :] = Odot_BI[1, :, i] def apply_linear(self, params, unknowns, dparams, dunknowns, dresids, mode): """ Matrix-vector product with the Jacobian. """ dw_B = dresids['w_B'] if mode == 'fwd': for k in range(3): for i in range(3): for j in range(3): if 'O_BI' in dparams: dw_B[k, :] += self.dw_dO[:, k, i, j] * \ dparams['O_BI'][i, j, :] if 'Odot_BI' in dparams: dw_B[k, :] += self.dw_dOdot[:, k, i, j] * \ dparams['Odot_BI'][i, j, :] else: for k in range(3): for i in range(3): for j in range(3): if 'O_BI' in dparams: dparams['O_BI'][i, j, :] += self.dw_dO[:, k, i, j] * \ dw_B[k, :] if 'Odot_BI' in dparams: dparams['Odot_BI'][i, j, :] -= -self.dw_dOdot[:, k, i, j] * \ dw_B[k, :] prob = Problem() root = prob.root = Group() prob.root.add('comp', Attitude_Angular(n=5), promotes=['*']) prob.root.add('p1', IndepVarComp('O_BI', np.ones((3, 3, 5))), promotes=['*']) prob.root.add('p2', IndepVarComp('Odot_BI', np.ones((3, 3, 5))), promotes=['*']) prob.setup(check=False) prob.run() indep_list = ['O_BI', 'Odot_BI'] unknown_list = ['w_B'] Jf = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') indep_list = ['O_BI', 'Odot_BI'] unknown_list = ['w_B'] Jr = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') for key, val in iteritems(Jr): for key2 in val: diff = abs(Jf[key][key2] - Jr[key][key2]) assert_rel_error(self, diff, 0.0, 1e-10)