def test_sparse_jacobian(self): class SparsePartialComp(ExplicitComponent): def setup(self): self.add_input('x', shape=(4, )) self.add_output('f', shape=(2, )) self.declare_partials(of='f', wrt='x', rows=[0, 1, 1, 1], cols=[0, 1, 2, 3]) def compute_partials(self, inputs, partials): # Corresponds to the [(0,0), (1,1), (1,2), (1,3)] entries. partials['f', 'x'] = [1., 2., 3., 4.] model = Group() comp = IndepVarComp() comp.add_output('x', np.ones(4)) model.add_subsystem('input', comp) model.add_subsystem('example', SparsePartialComp()) model.connect('input.x', 'example.x') problem = Problem(model=model) problem.setup(check=False) problem.run_model() totals = problem.compute_totals(['example.f'], ['input.x']) assert_rel_error(self, totals['example.f', 'input.x'], [[1., 0., 0., 0.], [0., 2., 3., 4.]])
def test_vectorized(self): """Check against the scipy solver.""" model = Group() x = np.array([[1, 2, -3], [2, -1, 4]]) A = np.array([[5.0, -3.0, 2.0], [1.0, 7.0, -4.0], [1.0, 0.0, 8.0]]) b = np.einsum('jk,ik->ij', A, x) model.add_subsystem('p1', IndepVarComp('A', A)) model.add_subsystem('p2', IndepVarComp('b', b)) lingrp = model.add_subsystem('lingrp', Group(), promotes=['*']) lingrp.add_subsystem('lin', LinearSystemComp(size=3, vec_size=2)) model.connect('p1.A', 'lin.A') model.connect('p2.b', 'lin.b') prob = Problem(model) prob.setup() lingrp.linear_solver = ScipyKrylov() prob.set_solver_print(level=0) prob.run_model() assert_rel_error(self, prob['lin.x'], x, .0001) assert_rel_error(self, prob.model._residuals.get_norm(), 0.0, 1e-10) model.run_apply_nonlinear() with model._scaled_context_all(): val = model.lingrp.lin._residuals['x'] assert_rel_error(self, val, np.zeros((2, 3)), tolerance=1e-8)
def test_feature_vectorized_A(self): import numpy as np from openmdao.api import Group, Problem, IndepVarComp from openmdao.api import LinearSystemComp, ScipyKrylov model = Group() A = np.array([[[5.0, -3.0, 2.0], [1.0, 7.0, -4.0], [1.0, 0.0, 8.0]], [[2.0, 3.0, 4.0], [1.0, -1.0, -2.0], [3.0, 2.0, -2.0]]]) b = np.array([[-5.0, 2.0, 3.0], [-1.0, 1.0, -3.0]]) model.add_subsystem('p1', IndepVarComp('A', A)) model.add_subsystem('p2', IndepVarComp('b', b)) lingrp = model.add_subsystem('lingrp', Group(), promotes=['*']) lingrp.add_subsystem('lin', LinearSystemComp(size=3, vec_size=2, vectorize_A=True)) model.connect('p1.A', 'lin.A') model.connect('p2.b', 'lin.b') prob = Problem(model) prob.setup() lingrp.linear_solver = ScipyKrylov() prob.run_model() assert_rel_error(self, prob['lin.x'], np.array([[-0.78807947, 0.66887417, 0.47350993], [ 0.7 , -1.8 , 0.75 ]]), .0001)
def test_vectorized_A(self): """Check against the scipy solver.""" model = Group() x = np.array([[1, 2, -3], [2, -1, 4]]) A = np.array([[[5.0, -3.0, 2.0], [1.0, 7.0, -4.0], [1.0, 0.0, 8.0]], [[2.0, 3.0, 4.0], [1.0, -1.0, -2.0], [3.0, 2.0, -2.0]]]) b = np.einsum('ijk,ik->ij', A, x) model.add_subsystem('p1', IndepVarComp('A', A)) model.add_subsystem('p2', IndepVarComp('b', b)) lingrp = model.add_subsystem('lingrp', Group(), promotes=['*']) lingrp.add_subsystem('lin', LinearSystemComp(size=3, vec_size=2, vectorize_A=True)) model.connect('p1.A', 'lin.A') model.connect('p2.b', 'lin.b') prob = Problem(model) prob.setup() lingrp.linear_solver = ScipyKrylov() prob.set_solver_print(level=0) prob.run_model() assert_rel_error(self, prob['lin.x'], x, .0001) assert_rel_error(self, prob.model._residuals.get_norm(), 0.0, 1e-10) model.run_apply_nonlinear() with model._scaled_context_all(): val = model.lingrp.lin._residuals['x'] assert_rel_error(self, val, np.zeros((2, 3)), tolerance=1e-8)
def test_feature_vectorized(self): import numpy as np from openmdao.api import Group, Problem, IndepVarComp from openmdao.api import LinearSystemComp, ScipyKrylov model = Group() A = np.array([[5.0, -3.0, 2.0], [1.0, 7.0, -4.0], [1.0, 0.0, 8.0]]) b = np.array([[2.0, -3.0, 4.0], [1.0, 0.0, -1.0]]) model.add_subsystem('p1', IndepVarComp('A', A)) model.add_subsystem('p2', IndepVarComp('b', b)) lingrp = model.add_subsystem('lingrp', Group(), promotes=['*']) lingrp.add_subsystem('lin', LinearSystemComp(size=3, vec_size=2)) model.connect('p1.A', 'lin.A') model.connect('p2.b', 'lin.b') prob = Problem(model) prob.setup() lingrp.linear_solver = ScipyKrylov() prob.run_model() assert_rel_error(self, prob['lin.x'], np.array([[ 0.10596026, -0.16556291, 0.48675497], [ 0.19205298, -0.11258278, -0.14900662]]), .0001)
def test_conflicting_connections(self): # verify we get an error if we have conflicting implicit and explicit connections root = Group() # promoting G1.x will create an implicit connection to G3.x # this is a conflict because G3.x (aka G3.C4.x) is already connected # to G3.C3.x G2 = root.add('G2', Group(), promotes=['x']) # BAD PROMOTE G2.add('C1', IndepVarComp('x', 5.), promotes=['x']) G1 = G2.add('G1', Group(), promotes=['x']) G1.add('C2', ExecComp('y=x*2.0'), promotes=['x']) G3 = root.add('G3', Group(), promotes=['x']) G3.add('C3', ExecComp('y=x*2.0')) G3.add('C4', ExecComp('y=x*2.0'), promotes=['x']) root.connect('G2.G1.C2.y', 'G3.C3.x') G3.connect('C3.y', 'x') prob = Problem(root) try: prob.setup(check=False) except Exception as error: msg = "Target 'G3.C4.x' is connected to multiple unknowns: ['G2.C1.x', 'G3.C3.y']" self.assertTrue(msg in str(error)) else: self.fail("Error expected")
def test_array2D_index_connection(self): group = Group() group.add('x_param', IndepVarComp('x', np.ones((2, 2))), promotes=['*']) sub = group.add('sub', Group(), promotes=['*']) sub.add('mycomp', ArrayComp2D(), promotes=['x', 'y']) group.add('obj', ExecComp('b = a')) group.connect('y', 'obj.a', src_indices=[3]) prob = Problem() prob.root = group prob.root.ln_solver = LinearGaussSeidel() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['obj.b'], mode='fwd', return_format='dict') Jbase = prob.root.sub.mycomp._jacobian_cache assert_rel_error(self, Jbase[('y', 'x')][3][0], J['obj.b']['x'][0][0], 1e-8) assert_rel_error(self, Jbase[('y', 'x')][3][1], J['obj.b']['x'][0][1], 1e-8) assert_rel_error(self, Jbase[('y', 'x')][3][2], J['obj.b']['x'][0][2], 1e-8) assert_rel_error(self, Jbase[('y', 'x')][3][3], J['obj.b']['x'][0][3], 1e-8) J = prob.calc_gradient(['x'], ['obj.b'], mode='rev', return_format='dict') Jbase = prob.root.sub.mycomp._jacobian_cache assert_rel_error(self, Jbase[('y', 'x')][3][0], J['obj.b']['x'][0][0], 1e-8) assert_rel_error(self, Jbase[('y', 'x')][3][1], J['obj.b']['x'][0][1], 1e-8) assert_rel_error(self, Jbase[('y', 'x')][3][2], J['obj.b']['x'][0][2], 1e-8) assert_rel_error(self, Jbase[('y', 'x')][3][3], J['obj.b']['x'][0][3], 1e-8)
def test_guess_nonlinear_resids_read_only(self): class ImpWithInitial(ImplicitComponent): def setup(self): self.add_input('x', 3.0) self.add_output('y', 4.0) def guess_nonlinear(self, inputs, outputs, resids): # inputs is read_only, should not be allowed resids['y'] = 0. group = Group() group.add_subsystem('px', IndepVarComp('x', 77.0)) group.add_subsystem('comp1', ImpWithInitial()) group.add_subsystem('comp2', ImpWithInitial()) group.connect('px.x', 'comp1.x') group.connect('comp1.y', 'comp2.x') group.nonlinear_solver = NewtonSolver() group.nonlinear_solver.options['maxiter'] = 1 prob = Problem(model=group) prob.set_solver_print(level=0) prob.setup(check=False) with self.assertRaises(ValueError) as cm: prob.run_model() self.assertEqual(str(cm.exception), "Attempt to set value of 'y' in residual vector " "when it is read only.")
def test_linear_system(self): """Check against the scipy solver.""" model = Group() x = np.array([1, 2, -3]) A = np.array([[5.0, -3.0, 2.0], [1.0, 7.0, -4.0], [1.0, 0.0, 8.0]]) b = A.dot(x) model.add_subsystem('p1', IndepVarComp('A', A)) model.add_subsystem('p2', IndepVarComp('b', b)) lingrp = model.add_subsystem('lingrp', Group(), promotes=['*']) lingrp.add_subsystem('lin', LinearSystemComp(size=3, partial_type="matrix_free")) model.connect('p1.A', 'lin.A') model.connect('p2.b', 'lin.b') prob = Problem(model) prob.setup() lingrp.linear_solver = ScipyKrylov() prob.set_solver_print(level=0) prob.run_model() assert_rel_error(self, prob['lin.x'], x, .0001) assert_rel_error(self, prob.model._residuals.get_norm(), 0.0, 1e-10)
def test_guess_nonlinear_inputs_read_only_reset(self): class ImpWithInitial(ImplicitComponent): def setup(self): self.add_input('x', 3.0) self.add_output('y', 4.0) def guess_nonlinear(self, inputs, outputs, resids): raise AnalysisError("It's just a scratch.") group = Group() group.add_subsystem('px', IndepVarComp('x', 77.0)) group.add_subsystem('comp1', ImpWithInitial()) group.add_subsystem('comp2', ImpWithInitial()) group.connect('px.x', 'comp1.x') group.connect('comp1.y', 'comp2.x') group.nonlinear_solver = NewtonSolver() group.nonlinear_solver.options['maxiter'] = 1 prob = Problem(model=group) prob.set_solver_print(level=0) prob.setup(check=False) with self.assertRaises(AnalysisError): prob.run_model() # verify read_only status is reset after AnalysisError prob['comp1.x'] = 111.
def test_src_indices(self): size = 10 root = Group() root.add('P1', IndepVarComp('x', np.zeros(size))) root.add( 'C1', ExecComp('y = x * 2.', y=np.zeros(size // 2), x=np.zeros(size // 2))) root.add( 'C2', ExecComp('y = x * 3.', y=np.zeros(size // 2), x=np.zeros(size // 2))) root.connect('P1.x', "C1.x", src_indices=list(range(size // 2))) root.connect('P1.x', "C2.x", src_indices=list(range(size // 2, size))) prob = Problem(root) prob.setup(check=False) root.P1.unknowns['x'][0:size // 2] += 1.0 root.P1.unknowns['x'][size // 2:size] -= 1.0 prob.run() assert_rel_error(self, root.C1.params['x'], np.ones(size // 2), 0.0001) assert_rel_error(self, root.C2.params['x'], -np.ones(size // 2), 0.0001)
def test_feature_vectorized(self): import numpy as np from openmdao.api import Group, Problem, IndepVarComp from openmdao.api import LinearSystemComp, ScipyKrylov model = Group() A = np.array([[5.0, -3.0, 2.0], [1.0, 7.0, -4.0], [1.0, 0.0, 8.0]]) b = np.array([[2.0, -3.0, 4.0], [1.0, 0.0, -1.0]]) model.add_subsystem('p1', IndepVarComp('A', A)) model.add_subsystem('p2', IndepVarComp('b', b)) lingrp = model.add_subsystem('lingrp', Group(), promotes=['*']) lingrp.add_subsystem('lin', LinearSystemComp(size=3, vec_size=2)) model.connect('p1.A', 'lin.A') model.connect('p2.b', 'lin.b') prob = Problem(model) prob.setup() lingrp.linear_solver = ScipyKrylov() prob.run_model() assert_rel_error( self, prob['lin.x'], np.array([[0.10596026, -0.16556291, 0.48675497], [0.19205298, -0.11258278, -0.14900662]]), .0001)
def test_feature_vectorized_A(self): import numpy as np from openmdao.api import Group, Problem, IndepVarComp from openmdao.api import LinearSystemComp, ScipyKrylov model = Group() A = np.array([[[5.0, -3.0, 2.0], [1.0, 7.0, -4.0], [1.0, 0.0, 8.0]], [[2.0, 3.0, 4.0], [1.0, -1.0, -2.0], [3.0, 2.0, -2.0]]]) b = np.array([[-5.0, 2.0, 3.0], [-1.0, 1.0, -3.0]]) model.add_subsystem('p1', IndepVarComp('A', A)) model.add_subsystem('p2', IndepVarComp('b', b)) lingrp = model.add_subsystem('lingrp', Group(), promotes=['*']) lingrp.add_subsystem( 'lin', LinearSystemComp(size=3, vec_size=2, vectorize_A=True)) model.connect('p1.A', 'lin.A') model.connect('p2.b', 'lin.b') prob = Problem(model) prob.setup() lingrp.linear_solver = ScipyKrylov() prob.run_model() assert_rel_error( self, prob['lin.x'], np.array([[-0.78807947, 0.66887417, 0.47350993], [0.7, -1.8, 0.75]]), .0001)
def test_feature_basic(self): import numpy as np from openmdao.api import Group, Problem, IndepVarComp from openmdao.api import LinearSystemComp, ScipyKrylov model = Group() A = np.array([[5.0, -3.0, 2.0], [1.0, 7.0, -4.0], [1.0, 0.0, 8.0]]) b = np.array([1.0, 2.0, -3.0]) model.add_subsystem('p1', IndepVarComp('A', A)) model.add_subsystem('p2', IndepVarComp('b', b)) lingrp = model.add_subsystem('lingrp', Group(), promotes=['*']) lingrp.add_subsystem('lin', LinearSystemComp(size=3)) model.connect('p1.A', 'lin.A') model.connect('p2.b', 'lin.b') prob = Problem(model) prob.setup() lingrp.linear_solver = ScipyKrylov() prob.run_model() assert_rel_error(self, prob['lin.x'], np.array([0.36423841, -0.00662252, -0.4205298]), .0001)
def test_conflicting_connections(self): # verify we get an error if we have conflicting implicit and explicit connections root = Group() # promoting G1.x will create an implicit connection to G3.x # this is a conflict because G3.x (aka G3.C4.x) is already connected # to G3.C3.x G2 = root.add('G2', Group(), promotes=['x']) # BAD PROMOTE G2.add('C1', IndepVarComp('x', 5.), promotes=['x']) G1 = G2.add('G1', Group(), promotes=['x']) G1.add('C2', ExecComp('y=x*2.0'), promotes=['x']) G3 = root.add('G3', Group(), promotes=['x']) G3.add('C3', ExecComp('y=x*2.0')) G3.add('C4', ExecComp('y=x*2.0'), promotes=['x']) root.connect('G2.G1.C2.y', 'G3.C3.x') G3.connect('C3.y', 'x') prob = Problem(root) try: prob.setup(check=False) except Exception as error: msg = "Target 'G3.C4.x' is connected to multiple unknowns: ['G2.C1.x', 'G3.C3.y']" self.assertEqual(text_type(error), msg) else: self.fail("Error expected")
def test_guess_nonlinear_resids_read_only(self): class ImpWithInitial(ImplicitComponent): def setup(self): self.add_input('x', 3.0) self.add_output('y', 4.0) def guess_nonlinear(self, inputs, outputs, resids): # inputs is read_only, should not be allowed resids['y'] = 0. group = Group() group.add_subsystem('px', IndepVarComp('x', 77.0)) group.add_subsystem('comp1', ImpWithInitial()) group.add_subsystem('comp2', ImpWithInitial()) group.connect('px.x', 'comp1.x') group.connect('comp1.y', 'comp2.x') group.nonlinear_solver = NewtonSolver() group.nonlinear_solver.options['maxiter'] = 1 prob = Problem(model=group) prob.set_solver_print(level=0) prob.setup(check=False) with self.assertRaises(ValueError) as cm: prob.run_model() self.assertEqual( str(cm.exception), "Attempt to set value of 'y' in residual vector " "when it is read only.")
def test(self): surfaces = get_default_surfaces() group = Group() comp = Forces(surfaces=surfaces) indep_var_comp = IndepVarComp() indep_var_comp.add_output('wing_widths', val=np.ones((surfaces[0]['num_y'] - 1)), units='m') indep_var_comp.add_output('tail_widths', val=np.ones((surfaces[1]['num_y'] - 1)), units='m') indep_var_comp.add_output('M', val=.3) group.add_subsystem('indep_var_comp', indep_var_comp) group.add_subsystem('forces', comp) group.connect('indep_var_comp.wing_widths', 'forces.wing_widths') group.connect('indep_var_comp.tail_widths', 'forces.tail_widths') group.connect('indep_var_comp.M', 'forces.M') run_test(self, group)
def test_feature_basic(self): import numpy as np from openmdao.api import Group, Problem, IndepVarComp from openmdao.api import LinearSystemComp, ScipyKrylov model = Group() A = np.array([[5.0, -3.0, 2.0], [1.0, 7.0, -4.0], [1.0, 0.0, 8.0]]) b = np.array([1.0, 2.0, -3.0]) model.add_subsystem('p1', IndepVarComp('A', A)) model.add_subsystem('p2', IndepVarComp('b', b)) lingrp = model.add_subsystem('lingrp', Group(), promotes=['*']) lingrp.add_subsystem('lin', LinearSystemComp(size=3)) model.connect('p1.A', 'lin.A') model.connect('p2.b', 'lin.b') prob = Problem(model) prob.setup() lingrp.linear_solver = ScipyKrylov() prob.run_model() assert_rel_error(self, prob['lin.x'], np.array([0.36423841, -0.00662252, -0.4205298 ]), .0001)
def test_sparse_jacobian(self): import numpy as np from openmdao.api import Problem, Group, IndepVarComp, ExplicitComponent class SparsePartialComp(ExplicitComponent): def setup(self): self.add_input('x', shape=(4,)) self.add_output('f', shape=(2,)) self.declare_partials(of='f', wrt='x', rows=[0, 1, 1, 1], cols=[0, 1, 2, 3]) def compute_partials(self, inputs, partials): # Corresponds to the [(0,0), (1,1), (1,2), (1,3)] entries. partials['f', 'x'] = [1., 2., 3., 4.] model = Group() comp = IndepVarComp() comp.add_output('x', np.ones(4)) model.add_subsystem('input', comp) model.add_subsystem('example', SparsePartialComp()) model.connect('input.x', 'example.x') problem = Problem(model=model) problem.setup(check=False) problem.run_model() totals = problem.compute_totals(['example.f'], ['input.x']) assert_rel_error(self, totals['example.f', 'input.x'], [[1., 0., 0., 0.], [0., 2., 3., 4.]])
def test_linear_system(self): """Check against the scipy solver.""" model = Group() x = np.array([1, 2, -3]) A = np.array([[5.0, -3.0, 2.0], [1.0, 7.0, -4.0], [1.0, 0.0, 8.0]]) b = A.dot(x) model.add_subsystem('p1', IndepVarComp('A', A)) model.add_subsystem('p2', IndepVarComp('b', b)) lingrp = model.add_subsystem('lingrp', Group(), promotes=['*']) lingrp.add_subsystem( 'lin', LinearSystemComp(size=3, partial_type="matrix_free")) model.connect('p1.A', 'lin.A') model.connect('p2.b', 'lin.b') prob = Problem(model) prob.setup() lingrp.linear_solver = ScipyKrylov() prob.set_solver_print(level=0) prob.run_model() assert_rel_error(self, prob['lin.x'], x, .0001) assert_rel_error(self, prob.model._residuals.get_norm(), 0.0, 1e-10)
def _parse_problem_table(self, group: om.Group, table: dict): """ Feeds provided *group*, using definition in provided TOML *table*. :param group: :param table: """ # assert isinstance(table, dict), "table should be a dictionary" for key, value in table.items(): if isinstance(value, dict): # value defines a sub-component if KEY_COMPONENT_ID in value: # It is a non-group component, that should be registered with its ID options = value.copy() identifier = options.pop(KEY_COMPONENT_ID) # Process option values that are relative paths conf_dirname = pth.dirname(self._conf_file) for name, option_value in options.items(): option_is_path = (name.endswith("file") or name.endswith("path") or name.endswith("dir") or name.endswith("directory") or name.endswith("folder")) if (isinstance(option_value, str) and option_is_path and not pth.isabs(option_value)): options[name] = pth.join(conf_dirname, option_value) sub_component = RegisterOpenMDAOSystem.get_system( identifier, options=options) group.add_subsystem(key, sub_component, promotes=["*"]) else: # It is a Group sub_component = group.add_subsystem(key, om.Group(), promotes=["*"]) try: self._parse_problem_table(sub_component, value) except FASTConfigurationBadOpenMDAOInstructionError as err: # There has been an error while parsing an attribute. # Error is relayed with key added for context raise FASTConfigurationBadOpenMDAOInstructionError( err, key) elif key == KEY_CONNECTION_ID and isinstance(value, list): # a list of dict currently defines only connections for connection_def in value: group.connect(connection_def["source"], connection_def["target"]) else: # value is an attribute of current component and will be literally interpreted try: setattr(group, key, _om_eval(str(value))) # pylint:disable=eval-used except Exception as err: raise FASTConfigurationBadOpenMDAOInstructionError( err, key, value)
def test_fd_options(self): import numpy as np from openmdao.api import Problem, Group, IndepVarComp, ExplicitComponent class FDPartialComp(ExplicitComponent): def setup(self): self.add_input('x', shape=(4, )) self.add_input('y', shape=(2, )) self.add_input('y2', shape=(2, )) self.add_output('f', shape=(2, )) self.declare_partials('f', 'y*', method='fd', form='backward', step=1e-6) self.declare_partials('f', 'x', method='fd', form='central', step=1e-4) def compute(self, inputs, outputs): f = outputs['f'] x = inputs['x'] y = inputs['y'] f[0] = x[0] + y[0] f[1] = np.dot([0, 2, 3, 4], x) + y[1] model = Group() comp = IndepVarComp() comp.add_output('x', np.ones(4)) comp.add_output('y', np.ones(2)) model.add_subsystem('input', comp) model.add_subsystem('example', FDPartialComp()) model.connect('input.x', 'example.x') model.connect('input.y', 'example.y') problem = Problem(model=model) problem.setup(check=False) problem.run_model() totals = problem.compute_totals(['example.f'], ['input.x', 'input.y']) assert_rel_error(self, totals['example.f', 'input.x'], [[1., 0., 0., 0.], [0., 2., 3., 4.]], tolerance=1e-8) assert_rel_error(self, totals['example.f', 'input.y'], [[1., 0.], [0., 1.]], tolerance=1e-8)
def test_multiple_connect_alt(self): root = Group() C1 = root.add("C1", ExecComp("y=x*2.0")) C2 = root.add("C2", ExecComp("y=x*2.0")) C3 = root.add("C3", ExecComp("y=x*2.0")) with self.assertRaises(TypeError) as err: root.connect("C1.y", "C2.x", "C3.x") msg = "src_indices must be an index array, did you mean connect('C1.y', ['C2.x', 'C3.x'])?" self.assertEqual(msg, str(err.exception))
def test_multiple_connect_alt(self): root = Group() C1 = root.add('C1', ExecComp('y=x*2.0')) C2 = root.add('C2', ExecComp('y=x*2.0')) C3 = root.add('C3', ExecComp('y=x*2.0')) with self.assertRaises(TypeError) as err: root.connect('C1.y', 'C2.x', 'C3.x') msg = "src_indices must be an index array, did you mean connect('C1.y', ['C2.x', 'C3.x'])?" self.assertEqual(msg, str(err.exception))
def test_calc_gradient(self): root = Group() root.add('indep', IndepVarComp('x', np.array([1., 1., 1., 1.]))) root.add('comp', RosenSuzuki()) root.connect('indep.x', 'comp.x') subprob = Problem(root) subprob.driver.add_desvar('indep.x', lower=-10, upper=99) subprob.driver.add_objective('comp.f') subprob.driver.add_constraint('comp.g', upper=0.) prob = Problem(root=Group()) prob.root.add('desvars', IndepVarComp('x', np.ones(4))) prob.root.add('subprob', SubProblem(subprob, params=['indep.x'], unknowns=['comp.f', 'comp.g'])) prob.root.connect('desvars.x', 'subprob.indep.x') prob.setup(check=False) prob.run() indep_list = ['desvars.x'] unknown_list = ['subprob.comp.f', 'subprob.comp.g'] # check that calc_gradient returns proper dict value when mode is 'fwd' J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_almost_equal(J['subprob.comp.f']['desvars.x'], expectedJ['subprob.comp.f']['desvars.x']) assert_almost_equal(J['subprob.comp.g']['desvars.x'], expectedJ['subprob.comp.g']['desvars.x']) # check that calc_gradient returns proper array value when mode is 'fwd' J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='array') assert_almost_equal(J, expectedJ_array) # check that calc_gradient returns proper dict value when mode is 'rev' J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_almost_equal(J['subprob.comp.f']['desvars.x'], expectedJ['subprob.comp.f']['desvars.x']) assert_almost_equal(J['subprob.comp.g']['desvars.x'], expectedJ['subprob.comp.g']['desvars.x']) # check that calc_gradient returns proper array value when mode is 'rev' J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='array') assert_almost_equal(J, expectedJ_array) # check that calc_gradient returns proper dict value when mode is 'fd' J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict') assert_almost_equal(J['subprob.comp.f']['desvars.x'], expectedJ['subprob.comp.f']['desvars.x'], decimal=5) assert_almost_equal(J['subprob.comp.g']['desvars.x'], expectedJ['subprob.comp.g']['desvars.x'], decimal=5) # check that calc_gradient returns proper array value when mode is 'fd' J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='array') assert_almost_equal(J, expectedJ_array, decimal=5)
def test_calc_gradient_with_qoi_indices(self): q_idxs = [0, 2] root = Group() root.add('parm', IndepVarComp('x', np.array([1., 1., 1., 1.]))) root.add('comp', RosenSuzuki()) root.connect('parm.x', 'comp.x') prob = Problem(root) prob.driver.add_desvar('parm.x', lower=-10, upper=99) prob.driver.add_objective('comp.f') prob.driver.add_constraint('comp.g', upper=0., indices=q_idxs) prob.setup(check=False) prob.run() indep_list = ['parm.x'] unknown_list = ['comp.f', 'comp.g'] # override expected array value to reflect qoi indices expectedJ_array = np.concatenate(( expectedJ['comp.f']['parm.x'], expectedJ['comp.g']['parm.x'][q_idxs, :] )) # check that calc_gradient returns proper dict value when mode is 'fwd' J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_almost_equal(J['comp.f']['parm.x'], expectedJ['comp.f']['parm.x']) assert_almost_equal(J['comp.g']['parm.x'], expectedJ['comp.g']['parm.x'][q_idxs, :]) # check that calc_gradient returns proper array value when mode is 'fwd' J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='array') assert_almost_equal(J, expectedJ_array) # check that calc_gradient returns proper dict value when mode is 'rev' J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_almost_equal(J['comp.f']['parm.x'], expectedJ['comp.f']['parm.x']) assert_almost_equal(J['comp.g']['parm.x'], expectedJ['comp.g']['parm.x'][q_idxs, :]) # check that calc_gradient returns proper array value when mode is 'rev' J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='array') assert_almost_equal(J, expectedJ_array) # check that calc_gradient returns proper dict value when mode is 'fd' J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict') assert_almost_equal(J['comp.f']['parm.x'], expectedJ['comp.f']['parm.x'], decimal=5) assert_almost_equal(J['comp.g']['parm.x'], expectedJ['comp.g']['parm.x'][q_idxs, :], decimal=5) # check that calc_gradient returns proper array value when mode is 'fd' J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='array') assert_almost_equal(J, expectedJ_array, decimal=5)
def setUp(self): from openmdao.api import Group, Problem, IndepVarComp from openmdao.core.tests.test_impl_comp import QuadraticComp group = Group() comp1 = group.add_subsystem('comp1', IndepVarComp()) comp1.add_output('a', 1.0) comp1.add_output('b', 1.0) comp1.add_output('c', 1.0) sub = group.add_subsystem('sub', Group()) sub.add_subsystem('comp2', QuadraticComp()) sub.add_subsystem('comp3', QuadraticComp()) group.connect('comp1.a', 'sub.comp2.a') group.connect('comp1.b', 'sub.comp2.b') group.connect('comp1.c', 'sub.comp2.c') group.connect('comp1.a', 'sub.comp3.a') group.connect('comp1.b', 'sub.comp3.b') group.connect('comp1.c', 'sub.comp3.c') global prob prob = Problem(model=group) prob.setup() prob['comp1.a'] = 1. prob['comp1.b'] = -4. prob['comp1.c'] = 3. prob.run_model()
def test_array_to_scalar(self): root = Group() root.add('P1', IndepVarComp('x', np.array([2., 3.]))) root.add('C1', SimpleComp()) root.add('C2', ExecComp('y = x * 3.', y=0., x=0.)) root.connect('P1.x', 'C1.x', src_indices=[0,]) root.connect('P1.x', 'C2.x', src_indices=[1,]) prob = Problem(root) prob.setup(check=False) prob.run() self.assertAlmostEqual(root.C1.params['x'], 2.) self.assertAlmostEqual(root.C2.params['x'], 3.)
def test_multiple_connect(self): root = Group() C1 = root.add("C1", ExecComp("y=x*2.0")) C2 = root.add("C2", ExecComp("y=x*2.0")) C3 = root.add("C3", ExecComp("y=x*2.0")) root.connect("C1.y", ["C2.x", "C3.x"]) prob = Problem() root._init_sys_data("", prob._probdata) params_dict, unknowns_dict = root._setup_variables() # verify we get correct connection information connections = root._get_explicit_connections() expected_connections = {"C2.x": [("C1.y", None)], "C3.x": [("C1.y", None)]} self.assertEqual(connections, expected_connections)
def test_linear_system(self): root = Group() lingrp = root.add('lingrp', Group(), promotes=['*']) lingrp.add('lin', LinearSystem(3)) lingrp.ln_solver = ScipyGMRES() x = np.array([1, 2, -3]) A = np.array([[5.0, -3.0, 2.0], [1.0, 7.0, -4.0], [1.0, 0.0, 8.0]]) b = A.dot(x) root.add('p1', IndepVarComp('A', A)) root.add('p2', IndepVarComp('b', b)) root.connect('p1.A', 'lin.A') root.connect('p2.b', 'lin.b') prob = Problem(root) prob.setup(check=False) prob.run() # Make sure it gets the right answer assert_rel_error(self, prob['lin.x'], x, .0001) assert_rel_error(self, np.linalg.norm(prob.root.resids.vec), 0.0, 1e-10) # Compare against calculated derivs Ainv = np.linalg.inv(A) dx_dA = np.outer(Ainv, -x).reshape(3, 9) dx_db = Ainv J = prob.calc_gradient(['p1.A', 'p2.b'], ['lin.x'], mode='fwd', return_format='dict') assert_rel_error(self, J['lin.x']['p1.A'], dx_dA, .0001) assert_rel_error(self, J['lin.x']['p2.b'], dx_db, .0001) J = prob.calc_gradient(['p1.A', 'p2.b'], ['lin.x'], mode='rev', return_format='dict') assert_rel_error(self, J['lin.x']['p1.A'], dx_dA, .0001) assert_rel_error(self, J['lin.x']['p2.b'], dx_db, .0001) J = prob.calc_gradient(['p1.A', 'p2.b'], ['lin.x'], mode='fd', return_format='dict') assert_rel_error(self, J['lin.x']['p1.A'], dx_dA, .0001) assert_rel_error(self, J['lin.x']['p2.b'], dx_db, .0001)
def test2(self): surfaces = get_default_surfaces() group = Group() comp = MomentCoefficient(surfaces=surfaces) indep_var_comp = IndepVarComp() indep_var_comp.add_output('S_ref_total', val=1e4, units='m**2') group.add_subsystem('moment_calc', comp) group.add_subsystem('indep_var_comp', indep_var_comp) group.connect('indep_var_comp.S_ref_total', 'moment_calc.S_ref_total') run_test(self, group)
def test_constraint_on_distrib_output(self): # this test should be removed once distributed outputs are able to be used as constraints # this tests a temporary fix for issue #1331 prob = Problem() model = Group() model.add_subsystem('dvs', IndepVarComp('x', val=1.0 * np.ones((2, )))) model.add_subsystem('distcomp', DistribInputDistribOutputComp(arr_size=2)) model.add_subsystem('sum', ExecComp('y = sum(x)', x=np.ones((2, )))) model.connect('dvs.x', 'distcomp.invec') model.connect('distcomp.outvec', 'sum.x') prob.model = model prob.model.add_design_var('dvs.x', lower=-100, upper=100) prob.model.add_objective('sum.y') prob.model.add_constraint('distcomp.outvec', lower=-10.5) with self.assertRaises(NotImplementedError) as context: prob.setup()
def test(self): surfaces = get_default_surfaces() group = Group() comp = RadiusComp(surface=surfaces[0]) indep_var_comp = IndepVarComp() indep_var_comp.add_output('mesh', val=surfaces[0]['mesh'], units='m') group.add_subsystem('radius', comp) group.add_subsystem('indep_var_comp', indep_var_comp) group.connect('indep_var_comp.mesh', 'radius.mesh') run_test(self, group)
def test_scalar_guess_func_using_outputs(self): # Implicitly solve -(ax^2 + bx) = c using a BalanceComp. # For a=1, b=-4 and c=3, there are solutions at x=1 and x=3. # Verify that we can set the guess value (and target a solution) based on outputs. ind = IndepVarComp() ind.add_output('a', 1) ind.add_output('b', -4) ind.add_output('c', 3) lhs = ExecComp('lhs=-(a*x**2+b*x)') bal = BalanceComp() def guess_function(inputs, outputs, residuals): if outputs['x'] < 0: return 5. else: return 0. bal.add_balance(name='x', rhs_name='c', guess_func=guess_function) model = Group() model.add_subsystem('ind_comp', ind, promotes_outputs=['a', 'b', 'c']) model.add_subsystem('lhs_comp', lhs, promotes_inputs=['a', 'b', 'x']) model.add_subsystem('bal_comp', bal, promotes_inputs=['c'], promotes_outputs=['x']) model.connect('lhs_comp.lhs', 'bal_comp.lhs:x') model.linear_solver = DirectSolver() model.nonlinear_solver = NewtonSolver(maxiter=1000, iprint=0) prob = Problem(model) prob.setup() # initial value of 'x' less than zero, guess should steer us to solution of 3. prob['bal_comp.x'] = -1 prob.run_model() assert_almost_equal(prob['bal_comp.x'], 3.0, decimal=7) # initial value of 'x' greater than zero, guess should steer us to solution of 1. prob['bal_comp.x'] = 99 prob.run_model() assert_almost_equal(prob['bal_comp.x'], 1.0, decimal=7)
def test_sparse_jacobian_in_place(self): import numpy as np from openmdao.api import Problem, Group, IndepVarComp, ExplicitComponent class SparsePartialComp(ExplicitComponent): def setup(self): self.add_input('x', shape=(4, )) self.add_output('f', shape=(2, )) self.declare_partials(of='f', wrt='x', rows=[0, 1, 1, 1], cols=[0, 1, 2, 3]) def compute_partials(self, inputs, partials): pd = partials['f', 'x'] # Corresponds to the (0, 0) entry pd[0] = 1. # (1,1) entry pd[1] = 2. # (1, 2) entry pd[2] = 3. # (1, 3) entry pd[3] = 4 model = Group() comp = IndepVarComp() comp.add_output('x', np.ones(4)) model.add_subsystem('input', comp) model.add_subsystem('example', SparsePartialComp()) model.connect('input.x', 'example.x') problem = Problem(model=model) problem.setup(check=False) problem.run_model() totals = problem.compute_totals(['example.f'], ['input.x']) assert_rel_error(self, totals['example.f', 'input.x'], [[1., 0., 0., 0.], [0., 2., 3., 4.]])
def test(self): surfaces = get_default_surfaces() group = Group() comp = VLMGeometry(surface=surfaces[0]) indep_var_comp = IndepVarComp() indep_var_comp.add_output('def_mesh', val=surfaces[0]['mesh'], units='m') group.add_subsystem('geom', comp) group.add_subsystem('indep_var_comp', indep_var_comp) group.connect('indep_var_comp.def_mesh', 'geom.def_mesh') run_test(self, group)
class Sub(Problem): def __init__(self): super(Sub, self).__init__() self.root = Group() # Add the 'Paraboloid Component' to sub's root Group. Alternatively, I could have placed the 'Paraboloid' Component within a group self.root.add('P', Paraboloid()) # Initialize x and y values in seperate IndepVarComps and add them to sub's root group self.root.add('p1', IndepVarComp('x', 13.0)) self.root.add('p2', IndepVarComp('y', -14.0)) # Define a constraint equation and add it to top's root Group self.root.add('con', ExecComp('c = x - y')) # Connect 'p1.x' and 'p2.y' to 'con.x' and 'con.y' respectively self.root.connect('p1.x', 'con.x') self.root.connect('p2.y', 'con.y') # Connect the IndepVarComps 'p1.x' and 'p2.y' to 'T.Paraboloid.x' and 'T.Paraboloid.y' respectively self.root.connect('p1.x', 'P.x') self.root.connect('p2.y', 'P.y') # Instantiate sub's optimization driver self.driver = ScipyOptimizer() # Modify the optimization driver's settings self.driver.options[ 'optimizer'] = 'COBYLA' # Type of Optimizer. 'COBYLA' does not require derivatives self.driver.options[ 'tol'] = 1.0e-4 # Tolerance for termination. Not sure exactly what it represents. Default: 1.0e-6 self.driver.options[ 'maxiter'] = 200 # Maximum iterations. Default: 200 self.driver.opt_settings[ 'rhobeg'] = 1.0 # Initial step size. Default: 1.0 #sub.driver.opt_settings['catol'] = 0.1 # Absolute tolerance for constraint violations # Add design variables, objective, and constraints to the optimization driver self.driver.add_desvar('p1.x', lower=-50, upper=50) self.driver.add_objective('P.f_xy') self.driver.add_constraint('con.c', lower=15.0) self.driver.add_constraint( 'p1.x', lower=-50.0, upper=50.0 ) # Note adding this while this variable constraint reduces the degree to which
def test_assert_no_dict_jacobians_exception_not_expected(self): model = Group(assembled_jac_type='dense') ivc = IndepVarComp() ivc.add_output('x', 3.0) ivc.add_output('y', -4.0) model.add_subsystem('des_vars', ivc) model.add_subsystem('parab_comp', Paraboloid()) model.connect('des_vars.x', 'parab_comp.x') model.connect('des_vars.y', 'parab_comp.y') prob = Problem(model) prob.model.linear_solver = DirectSolver(assemble_jac=True) prob.setup(check=False) assert_no_dict_jacobians(prob.model, include_self=True, recurse=True)
def test_assert_no_dict_jacobians_exception_not_expected(self): model = Group() ivc = IndepVarComp() ivc.add_output('x', 3.0) ivc.add_output('y', -4.0) model.add_subsystem('des_vars', ivc) model.add_subsystem('parab_comp', Paraboloid()) model.connect('des_vars.x', 'parab_comp.x') model.connect('des_vars.y', 'parab_comp.y') prob = Problem(model) prob.model.jacobian = DenseJacobian() prob.setup(check=False) assert_no_dict_jacobians(prob.model, include_self=True, recurse=True)
def test_scalar_with_guess_func_additional_input(self): model = Group(assembled_jac_type='dense') bal = BalanceComp() bal.add_balance('x') bal.add_input('guess_x', val=0.0) ivc = IndepVarComp() ivc.add_output(name='y_tgt', val=4) ivc.add_output(name='guess_x', val=2.5) exec_comp = ExecComp('y=x**2', x={'value': 1}, y={'value': 1}) model.add_subsystem(name='ivc', subsys=ivc, promotes_outputs=['y_tgt', 'guess_x']) model.add_subsystem(name='exec', subsys=exec_comp) model.add_subsystem(name='balance', subsys=bal) model.connect('guess_x', 'balance.guess_x') model.connect('y_tgt', 'balance.rhs:x') model.connect('balance.x', 'exec.x') model.connect('exec.y', 'balance.lhs:x') model.linear_solver = DirectSolver(assemble_jac=True) model.nonlinear_solver = NewtonSolver(maxiter=100, iprint=0) prob = Problem(model) prob.setup() # run problem without a guess function prob['balance.x'] = .5 prob.run_model() assert_almost_equal(prob['balance.x'], 2.0, decimal=7) iters_no_guess = model.nonlinear_solver._iter_count # run problem with same initial value and a guess function def guess_function(inputs, outputs, resids): outputs['x'] = inputs['guess_x'] bal.options['guess_func'] = guess_function prob['balance.x'] = .5 prob.run_model() assert_almost_equal(prob['balance.x'], 2.0, decimal=7) iters_with_guess = model.nonlinear_solver._iter_count # verify it converges faster with the guess function self.assertTrue(iters_with_guess < iters_no_guess)
def test_sparse_jacobian_const(self): import numpy as np import scipy as sp from openmdao.api import Problem, Group, IndepVarComp, ExplicitComponent class SparsePartialComp(ExplicitComponent): def setup(self): self.add_input('x', shape=(4, )) self.add_input('y', shape=(2, )) self.add_output('f', shape=(2, )) self.declare_partials(of='f', wrt='x', rows=[0, 1, 1, 1], cols=[0, 1, 2, 3], val=[1., 2., 3., 4.]) self.declare_partials(of='f', wrt='y', val=sp.sparse.eye(2, format='csc')) def compute_partials(self, inputs, partials): pass model = Group() comp = IndepVarComp() comp.add_output('x', np.ones(4)) comp.add_output('y', np.ones(2)) model.add_subsystem('input', comp) model.add_subsystem('example', SparsePartialComp()) model.connect('input.x', 'example.x') model.connect('input.y', 'example.y') problem = Problem(model=model) problem.setup(check=False) problem.run_model() totals = problem.compute_totals(['example.f'], ['input.x', 'input.y']) assert_rel_error(self, totals['example.f', 'input.x'], [[1., 0., 0., 0.], [0., 2., 3., 4.]]) assert_rel_error(self, totals['example.f', 'input.y'], [[1., 0.], [0., 1.]])
def test_fd_options(self): import numpy as np from openmdao.api import Problem, Group, IndepVarComp, ExplicitComponent class FDPartialComp(ExplicitComponent): def setup(self): self.add_input('x', shape=(4,)) self.add_input('y', shape=(2,)) self.add_input('y2', shape=(2,)) self.add_output('f', shape=(2,)) self.declare_partials('f', 'y*', method='fd', form='backward', step=1e-6) self.declare_partials('f', 'x', method='fd', form='central', step=1e-4) def compute(self, inputs, outputs): f = outputs['f'] x = inputs['x'] y = inputs['y'] f[0] = x[0] + y[0] f[1] = np.dot([0, 2, 3, 4], x) + y[1] model = Group() comp = IndepVarComp() comp.add_output('x', np.ones(4)) comp.add_output('y', np.ones(2)) model.add_subsystem('input', comp) model.add_subsystem('example', FDPartialComp()) model.connect('input.x', 'example.x') model.connect('input.y', 'example.y') problem = Problem(model=model) problem.setup(check=False) problem.run_model() totals = problem.compute_totals(['example.f'], ['input.x', 'input.y']) assert_rel_error(self, totals['example.f', 'input.x'], [[1., 0., 0., 0.], [0., 2., 3., 4.]], tolerance=1e-8) assert_rel_error(self, totals['example.f', 'input.y'], [[1., 0.], [0., 1.]], tolerance=1e-8)
def test(self): surfaces = get_default_surfaces() group = Group() comp = RadiusComp(surface=surfaces[0]) ny = surfaces[0]['mesh'].shape[1] indep_var_comp = IndepVarComp() indep_var_comp.add_output('mesh', val=surfaces[0]['mesh'], units='m') indep_var_comp.add_output('t_over_c', val=np.linspace(0.1,0.5,num=ny-1)) group.add_subsystem('radius', comp) group.add_subsystem('indep_var_comp', indep_var_comp) group.connect('indep_var_comp.mesh', 'radius.mesh') group.connect('indep_var_comp.t_over_c', 'radius.t_over_c') run_test(self, group)
def test_guess_nonlinear_transfer_subbed2(self): # Test that data is transfered to a component before calling guess_nonlinear. class ImpWithInitial(ImplicitComponent): def setup(self): self.add_input('x', 3.0) self.add_output('y', 4.0) def solve_nonlinear(self, inputs, outputs): """ Do nothing. """ pass def apply_nonlinear(self, inputs, outputs, resids): """ Do nothing. """ resids['y'] = 1.0e-6 pass def guess_nonlinear(self, inputs, outputs, resids): # Passthrough outputs['y'] = inputs['x'] group = Group() sub = Group() group.add_subsystem('px', IndepVarComp('x', 77.0)) sub.add_subsystem('comp1', ImpWithInitial()) sub.add_subsystem('comp2', ImpWithInitial()) group.connect('px.x', 'sub.comp1.x') group.connect('sub.comp1.y', 'sub.comp2.x') group.add_subsystem('sub', sub) sub.nonlinear_solver = NewtonSolver() sub.nonlinear_solver.options['maxiter'] = 1 prob = Problem(model=group) prob.set_solver_print(level=0) prob.setup(check=False) prob.run_model() assert_rel_error(self, prob['sub.comp2.y'], 77., 1e-5)
def test_multiple_connect(self): root = Group() C1 = root.add('C1', ExecComp('y=x*2.0')) C2 = root.add('C2', ExecComp('y=x*2.0')) C3 = root.add('C3', ExecComp('y=x*2.0')) root.connect('C1.y', ['C2.x', 'C3.x']) prob = Problem() root._init_sys_data('', prob._probdata) params_dict, unknowns_dict = root._setup_variables() # verify we get correct connection information connections = root._get_explicit_connections() expected_connections = { 'C2.x': [('C1.y', None)], 'C3.x': [('C1.y', None)] } self.assertEqual(connections, expected_connections)
def test_multiple_connect(self): root = Group() C1 = root.add('C1', ExecComp('y=x*2.0')) C2 = root.add('C2', ExecComp('y=x*2.0')) C3 = root.add('C3', ExecComp('y=x*2.0')) root.connect('C1.y',['C2.x', 'C3.x']) prob = Problem() root._init_sys_data('', prob._probdata) params_dict, unknowns_dict = root._setup_variables() # verify we get correct connection information connections = root._get_explicit_connections() expected_connections = { 'C2.x': [('C1.y', None)], 'C3.x': [('C1.y', None)] } self.assertEqual(connections, expected_connections)
def test_pass_through(self): group = Group() group.add_subsystem('sys1', IndepVarComp('old_length', 1.0, units='mm', ref=1e5)) group.add_subsystem('sys2', PassThroughLength()) group.connect('sys1.old_length', 'sys2.old_length') prob = Problem(group) prob.setup(check=False) prob.set_solver_print(level=0) prob['sys1.old_length'] = 3.e5 prob.final_setup() assert_rel_error(self, prob['sys1.old_length'], 3.e5) assert_rel_error(self, prob.model._outputs['sys1.old_length'], 3.e5) prob.run_model() assert_rel_error(self, prob['sys2.new_length'], 3.e-1) assert_rel_error(self, prob.model._outputs['sys2.new_length'], 3.e-1)
def test_duplicate_src_indices(self): size = 10 root = Group() root.add('P1', IndepVarComp('x', np.zeros(size//2))) root.add('C1', ExecComp('y = x**2', y=np.zeros(size), x=np.zeros(size))) root.connect('P1.x', "C1.x", src_indices=2*list(range(size//2))) prob = Problem(root) prob.setup(check=False) prob["P1.x"] = np.arange(5,dtype=float) prob.run() r = np.arange(5, dtype=float)**2 expected = np.concatenate((r, r)) assert_almost_equal( prob["C1.y"], expected, decimal=7)
def test_speed(self): comp = IndepVarComp() comp.add_output('distance', 1., units='km') comp.add_output('time', 1., units='h') group = Group() group.add_subsystem('c1', comp) group.add_subsystem('c2', SpeedComputationWithUnits()) group.connect('c1.distance', 'c2.distance') group.connect('c1.time', 'c2.time') prob = Problem(model=group) prob.setup(check=False) prob.set_solver_print(level=0) prob.run_model() assert_rel_error(self, prob['c1.distance'], 1.0) # units: km assert_rel_error(self, prob['c2.distance'], 1000.0) # units: m assert_rel_error(self, prob['c1.time'], 1.0) # units: h assert_rel_error(self, prob['c2.time'], 3600.0) # units: s assert_rel_error(self, prob['c2.speed'], 1.0) # units: km/h (i.e., kph)
def test_guess_nonlinear(self): class ImpWithInitial(QuadraticLinearize): def solve_nonlinear(self, inputs, outputs): """ Do nothing. """ pass def guess_nonlinear(self, inputs, outputs, resids): # Solution at x=1 and x=3. Default value takes us to the x=1 solution. Here # we set it to a value that will take us to the x=3 solution. outputs['x'] = 5.0 group = Group() group.add_subsystem('pa', IndepVarComp('a', 1.0)) group.add_subsystem('pb', IndepVarComp('b', 1.0)) group.add_subsystem('pc', IndepVarComp('c', 1.0)) group.add_subsystem('comp2', ImpWithInitial()) group.connect('pa.a', 'comp2.a') group.connect('pb.b', 'comp2.b') group.connect('pc.c', 'comp2.c') prob = Problem(model=group) group.nonlinear_solver = NewtonSolver() group.nonlinear_solver.options['solve_subsystems'] = True group.nonlinear_solver.options['max_sub_solves'] = 1 group.linear_solver = ScipyKrylov() prob.setup(check=False) prob['pa.a'] = 1. prob['pb.b'] = -4. prob['pc.c'] = 3. # Making sure that guess_nonlinear is called early enough to eradicate this. prob['comp2.x'] = np.NaN prob.run_model() assert_rel_error(self, prob['comp2.x'], 3.)
def test_linear_system(self): root = Group() lingrp = root.add('lingrp', Group(), promotes=['*']) lingrp.add('lin', LinearSystem(3)) lingrp.ln_solver = ScipyGMRES() x = np.array([1, 2, -3]) A = np.array([[ 5.0, -3.0, 2.0], [1.0, 7.0, -4.0], [1.0, 0.0, 8.0]]) b = A.dot(x) root.add('p1', IndepVarComp('A', A)) root.add('p2', IndepVarComp('b', b)) root.connect('p1.A', 'lin.A') root.connect('p2.b', 'lin.b') prob = Problem(root) prob.setup(check=False) prob.run() # Make sure it gets the right answer assert_rel_error(self, prob['lin.x'], x, .0001) assert_rel_error(self, np.linalg.norm(prob.root.resids.vec), 0.0, 1e-10) # Compare against calculated derivs Ainv = np.linalg.inv(A) dx_dA = np.outer(Ainv, -x).reshape(3, 9) dx_db = Ainv J = prob.calc_gradient(['p1.A', 'p2.b'], ['lin.x'], mode='fwd', return_format='dict') assert_rel_error(self, J['lin.x']['p1.A'], dx_dA, .0001) assert_rel_error(self, J['lin.x']['p2.b'], dx_db, .0001) J = prob.calc_gradient(['p1.A', 'p2.b'], ['lin.x'], mode='rev', return_format='dict') assert_rel_error(self, J['lin.x']['p1.A'], dx_dA, .0001) assert_rel_error(self, J['lin.x']['p2.b'], dx_db, .0001) J = prob.calc_gradient(['p1.A', 'p2.b'], ['lin.x'], mode='fd', return_format='dict') assert_rel_error(self, J['lin.x']['p1.A'], dx_dA, .0001) assert_rel_error(self, J['lin.x']['p2.b'], dx_db, .0001)
def test_shape(self): n = 100 bal = BalanceComp() bal.add_balance('x', shape=(n,)) tgt = IndepVarComp(name='y_tgt', val=4*np.ones(n)) exe = ExecComp('y=x**2', x=np.zeros(n), y=np.zeros(n)) model = Group() model.add_subsystem('tgt', tgt, promotes_outputs=['y_tgt']) model.add_subsystem('exe', exe) model.add_subsystem('bal', bal) model.connect('y_tgt', 'bal.rhs:x') model.connect('bal.x', 'exe.x') model.connect('exe.y', 'bal.lhs:x') model.linear_solver = DirectSolver(assemble_jac=True) model.nonlinear_solver = NewtonSolver(maxiter=100, iprint=0) prob = Problem(model) prob.setup() prob['bal.x'] = np.random.rand(n) prob.run_model() assert_almost_equal(prob['bal.x'], 2.0*np.ones(n), decimal=7)