def test_compute_totals_basic(self): # Basic test for the method using default solvers on simple model. prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) prob.setup(check=False, mode='fwd') prob.set_solver_print(level=0) prob.run_model() of = ['f_xy'] wrt = ['x', 'y'] derivs = prob.compute_totals(of=of, wrt=wrt) assert_rel_error(self, derivs['f_xy', 'x'], [[-6.0]], 1e-6) assert_rel_error(self, derivs['f_xy', 'y'], [[8.0]], 1e-6) prob.setup(check=False, mode='rev') prob.run_model() of = ['f_xy'] wrt = ['x', 'y'] derivs = prob.compute_totals(of=of, wrt=wrt) assert_rel_error(self, derivs['f_xy', 'x'], [[-6.0]], 1e-6) assert_rel_error(self, derivs['f_xy', 'y'], [[8.0]], 1e-6)
def test_compute_totals_basic_return_dict(self): # Make sure 'dict' return_format works. prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) prob.setup(check=False, mode='fwd') prob.set_solver_print(level=0) prob.run_model() of = ['f_xy'] wrt = ['x', 'y'] derivs = prob.compute_totals(of=of, wrt=wrt, return_format='dict') assert_rel_error(self, derivs['f_xy']['x'], [[-6.0]], 1e-6) assert_rel_error(self, derivs['f_xy']['y'], [[8.0]], 1e-6) prob.setup(check=False, mode='rev') prob.run_model() of = ['f_xy'] wrt = ['x', 'y'] derivs = prob.compute_totals(of=of, wrt=wrt, return_format='dict') assert_rel_error(self, derivs['f_xy']['x'], [[-6.0]], 1e-6) assert_rel_error(self, derivs['f_xy']['y'], [[8.0]], 1e-6)
def test_paraboloid_subbed(self): prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) sub = model.add_subsystem('sub', Group(), promotes=['x', 'y', 'f_xy']) sub.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.linear_solver = ScipyKrylov() sub.approx_totals() prob.setup(check=False, mode='fwd') prob.set_solver_print(level=0) prob.run_model() of = ['f_xy'] wrt = ['x', 'y'] derivs = prob.compute_totals(of=of, wrt=wrt) assert_rel_error(self, derivs['f_xy', 'x'], [[-6.0]], 1e-6) assert_rel_error(self, derivs['f_xy', 'y'], [[8.0]], 1e-6) Jfd = sub._jacobian assert_rel_error(self, Jfd['sub.comp.f_xy', 'sub.comp.x'], [[-6.0]], 1e-6) assert_rel_error(self, Jfd['sub.comp.f_xy', 'sub.comp.y'], [[8.0]], 1e-6) # 1 output x 2 inputs self.assertEqual(len(sub._approx_schemes['fd']._exec_list), 2)
def test_paraboloid_subbed(self, vec_class): if not vec_class: raise unittest.SkipTest("PETSc is not installed") prob = self.prob model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) sub = model.add_subsystem('sub', Group(), promotes=['x', 'y', 'f_xy']) sub.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.linear_solver = ScipyKrylov() sub.approx_totals(method='cs') prob.setup(check=False, mode='fwd', local_vector_class=vec_class) prob.set_solver_print(level=0) prob.run_model() of = ['f_xy'] wrt = ['x', 'y'] derivs = prob.compute_totals(of=of, wrt=wrt) assert_rel_error(self, derivs['f_xy', 'x'], [[-6.0]], 1e-6) assert_rel_error(self, derivs['f_xy', 'y'], [[8.0]], 1e-6) Jfd = sub._jacobian assert_rel_error(self, Jfd['sub.comp.f_xy', 'sub.comp.x'], [[-6.0]], 1e-6) assert_rel_error(self, Jfd['sub.comp.f_xy', 'sub.comp.y'], [[8.0]], 1e-6) # 1 output x 2 inputs self.assertEqual(len(sub._approx_schemes['cs']._exec_list), 2)
def test_unconstrainted(self): from openmdao.api import Problem, ScipyOptimizeDriver, IndepVarComp # We'll use the component that was defined in the last tutorial from openmdao.test_suite.components.paraboloid import Paraboloid # build the model prob = Problem() indeps = prob.model.add_subsystem('indeps', IndepVarComp()) indeps.add_output('x', 3.0) indeps.add_output('y', -4.0) prob.model.add_subsystem('paraboloid', Paraboloid()) prob.model.connect('indeps.x', 'paraboloid.x') prob.model.connect('indeps.y', 'paraboloid.y') # setup the optimization prob.driver = ScipyOptimizeDriver() prob.driver.options['optimizer'] = 'COBYLA' prob.model.add_design_var('indeps.x', lower=-50, upper=50) prob.model.add_design_var('indeps.y', lower=-50, upper=50) prob.model.add_objective('paraboloid.f_xy') prob.setup() prob.run_driver() # minimum value assert_rel_error(self, prob['paraboloid.f_xy'], -27.33333, 1e-6) # location of the minimum assert_rel_error(self, prob['indeps.x'], 6.6667, 1e-4) assert_rel_error(self, prob['indeps.y'], -7.33333, 1e-4)
def test_debug_print_option_totals(self): prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*']) prob.set_solver_print(level=0) prob.driver = ScipyOptimizeDriver() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.options['tol'] = 1e-9 prob.driver.options['disp'] = False prob.driver.options['debug_print'] = ['totals'] model.add_design_var('x', lower=-50.0, upper=50.0) model.add_design_var('y', lower=-50.0, upper=50.0) model.add_objective('f_xy') model.add_constraint('c', upper=-15.0) prob.setup(check=False) failed, output = run_driver(prob) self.assertFalse(failed, "Optimization failed.") self.assertTrue('Solving variable: comp.f_xy' in output) self.assertTrue('Solving variable: con.c' in output)
def test_index_number(self): prob = om.Problem() model = prob.model model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=0.0, upper=1.0) model.add_design_var('y', lower=0.0, upper=1.0) model.add_objective('f_xy') prob.driver = om.DOEDriver(om.PlackettBurmanGenerator()) prob.driver.add_recorder(self.recorder) prob.setup() prob.run_driver() prob.cleanup() data_dict = _get_viewer_data(self.filename, case_id=3) vals = data_dict['tree']['children'][2]['children'] x_val = vals[0]['val'] y_val = vals[1]['val'] f_xy_val = vals[2]['val'] self.assertEqual(x_val, "N/A") self.assertEqual(y_val, "N/A") self.assertEqual(f_xy_val, np.array([27.]))
def setup_and_run_simple_problem(self, driver=None, reports=_UNDEFINED, reports_dir=_UNDEFINED): prob = om.Problem(reports=reports, reports_dir=reports_dir) model = prob.model model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=0.0, upper=1.0) model.add_design_var('y', lower=0.0, upper=1.0) model.add_objective('f_xy') if driver: prob.driver = driver else: prob.driver = om.ScipyOptimizeDriver() prob.setup(False) prob.run_driver() prob.cleanup() return prob
def test_simple_paraboloid_scaled_objective_fwd(self): prob = Problem() model = prob.model = Group() prob.set_solver_print(level=0) model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_subsystem('con', ExecComp('c = x - y'), promotes=['*']) prob.driver = ScipyOptimizer() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.options['tol'] = 1e-9 prob.driver.options['disp'] = False model.add_design_var('x', lower=-50.0, upper=50.0) model.add_design_var('y', lower=-50.0, upper=50.0) model.add_objective('f_xy', ref=10.) model.add_constraint('c', lower=10.0, upper=11.0) prob.setup(check=False, mode='fwd') prob.run_driver() # Minimum should be at (7.166667, -7.833334) assert_rel_error(self, prob['x'] - prob['y'], 11.0, 1e-6)
def test_feature_numpyvec_setup(self): from openmdao.api import Problem, Group, IndepVarComp from openmdao.test_suite.components.paraboloid import Paraboloid prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) prob.setup() prob['x'] = 2. prob['y'] = 10. prob.run_model() assert_rel_error(self, prob['f_xy'], 214.0, 1e-6) prob['x'] = 0. prob['y'] = 0. prob.run_model() assert_rel_error(self, prob['f_xy'], 22.0, 1e-6) # skip the setup error checking prob.setup(check=False) prob['x'] = 4 prob['y'] = 8. prob.run_model() assert_rel_error(self, prob['f_xy'], 174.0, 1e-6)
def test_simple_paraboloid_unconstrained_COBYLA(self): prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) prob.set_solver_print(level=0) prob.driver = ScipyOptimizeDriver() prob.driver.options['optimizer'] = 'COBYLA' prob.driver.options['tol'] = 1e-9 prob.driver.options['disp'] = False model.add_design_var('x', lower=-50.0, upper=50.0) model.add_design_var('y', lower=-50.0, upper=50.0) model.add_objective('f_xy') prob.setup(check=False) failed = prob.run_driver() self.assertFalse( failed, "Optimization failed, result =\n" + str(prob.driver.result)) assert_rel_error(self, prob['x'], 6.66666667, 1e-6) assert_rel_error(self, prob['y'], -7.3333333, 1e-6)
def test_feature_simple_run_once_compute_totals(self): from openmdao.api import Problem, Group, IndepVarComp from openmdao.test_suite.components.paraboloid import Paraboloid prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 3.0)) model.add_subsystem('p2', IndepVarComp('y', -4.0)) model.add_subsystem('comp', Paraboloid()) model.connect('p1.x', 'comp.x') model.connect('p2.y', 'comp.y') prob.setup() prob.run_model() totals = prob.compute_totals(of=['comp.f_xy'], wrt=['p1.x', 'p2.y']) assert_rel_error(self, totals[('comp.f_xy', 'p1.x')][0][0], -4.0) assert_rel_error(self, totals[('comp.f_xy', 'p2.y')][0][0], 3.0) totals = prob.compute_totals(of=['comp.f_xy'], wrt=['p1.x', 'p2.y'], return_format='dict') assert_rel_error(self, totals['comp.f_xy']['p1.x'][0][0], -4.0) assert_rel_error(self, totals['comp.f_xy']['p2.y'][0][0], 3.0)
def test_feature_optimizer(self): from openmdao.api import Problem, Group, IndepVarComp, ScipyOptimizeDriver from openmdao.test_suite.components.paraboloid import Paraboloid prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) prob.driver = ScipyOptimizeDriver() prob.driver.options['optimizer'] = 'COBYLA' prob.driver.options['disp'] = True model.add_design_var('x', lower=-50.0, upper=50.0) model.add_design_var('y', lower=-50.0, upper=50.0) model.add_objective('f_xy') prob.setup(check=False) prob.run_driver() assert_rel_error(self, prob['x'], 6.66666667, 1e-6) assert_rel_error(self, prob['y'], -7.3333333, 1e-6)
def test_simple_paraboloid_scaled_objective_rev(self): prob = Problem() model = prob.model = Group() prob.set_solver_print(level=0) model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_subsystem('con', ExecComp('c = x - y'), promotes=['*']) prob.driver = ScipyOptimizeDriver() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.options['tol'] = 1e-9 prob.driver.options['disp'] = False model.add_design_var('x', lower=-50.0, upper=50.0) model.add_design_var('y', lower=-50.0, upper=50.0) model.add_objective('f_xy', ref=10.) model.add_constraint('c', lower=10.0, upper=11.0) prob.setup(check=False, mode='rev') failed = prob.run_driver() self.assertFalse( failed, "Optimization failed, result =\n" + str(prob.driver.result)) assert_rel_error(self, prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_equality_linear(self): prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*']) prob.set_solver_print(level=0) prob.driver = ScipyOptimizeDriver() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.options['tol'] = 1e-9 prob.driver.options['disp'] = False model.add_design_var('x', lower=-50.0, upper=50.0) model.add_design_var('y', lower=-50.0, upper=50.0) model.add_objective('f_xy') model.add_constraint('c', equals=-15.0, linear=True) prob.setup(check=False) failed = prob.run_driver() self.assertFalse( failed, "Optimization failed, result =\n" + str(prob.driver.result)) # Minimum should be at (7.166667, -7.833334) assert_rel_error(self, prob['x'], 7.16667, 1e-6) assert_rel_error(self, prob['y'], -7.833334, 1e-6)
def test_unsupported_equality(self): prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*']) prob.set_solver_print(level=0) prob.driver = ScipyOptimizeDriver() prob.driver.options['optimizer'] = 'COBYLA' prob.driver.options['tol'] = 1e-9 prob.driver.options['disp'] = False model.add_design_var('x', lower=-50.0, upper=50.0) model.add_design_var('y', lower=-50.0, upper=50.0) model.add_objective('f_xy') model.add_constraint('c', equals=-15.0) prob.setup(check=False) with self.assertRaises(Exception) as raises_cm: prob.run_driver() exception = raises_cm.exception msg = "Constraints of type 'eq' not handled by COBYLA." self.assertEqual(exception.args[0], msg)
def test_simple_paraboloid_equality(self): prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*']) prob.set_solver_print(level=0) prob.driver = ScipyOptimizer() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.options['tol'] = 1e-9 prob.driver.options['disp'] = False model.add_design_var('x', lower=-50.0, upper=50.0) model.add_design_var('y', lower=-50.0, upper=50.0) model.add_objective('f_xy') model.add_constraint('c', equals=-15.0) prob.setup(check=False) prob.run_driver() # Minimum should be at (7.166667, -7.833334) # (Note, loose tol because of appveyor py3.4 machine.) assert_rel_error(self, prob['x'], 7.16667, 1e-4) assert_rel_error(self, prob['y'], -7.833334, 1e-4)
def test_debug_print_option(self): from openmdao.api import Problem, Group, IndepVarComp, ScipyOptimizeDriver, ExecComp from openmdao.test_suite.components.paraboloid import Paraboloid prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*']) prob.set_solver_print(level=0) prob.driver = ScipyOptimizeDriver() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.options['tol'] = 1e-9 prob.driver.options['disp'] = False prob.driver.options['debug_print'] = [ 'desvars', 'ln_cons', 'nl_cons', 'objs' ] model.add_design_var('x', lower=-50.0, upper=50.0) model.add_design_var('y', lower=-50.0, upper=50.0) model.add_objective('f_xy') model.add_constraint('c', upper=-15.0) prob.setup(check=False) prob.run_driver()
def test_simple_driver_recording(self, m): self.setup_endpoints(m) recorder = WebRecorder(self._accepted_token, suppress_output=True) prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*']) model.suppress_solver_output = True prob.driver = pyOptSparseDriver() prob.driver.add_recorder(recorder) prob.driver.recording_options['record_desvars'] = True prob.driver.recording_options['record_responses'] = True prob.driver.recording_options['record_objectives'] = True prob.driver.recording_options['record_constraints'] = True prob.driver.options['optimizer'] = OPTIMIZER if OPTIMIZER == 'SLSQP': prob.driver.opt_settings['ACC'] = 1e-9 model.add_design_var('x', lower=-50.0, upper=50.0) model.add_design_var('y', lower=-50.0, upper=50.0) model.add_objective('f_xy') model.add_constraint('c', upper=-15.0) prob.setup(check=False) t0, t1 = run_driver(prob) prob.cleanup() driver_iteration_data = json.loads(self.driver_iteration_data) expected_desvars = [ {'name': 'p1.x', 'values': [7.1666666]}, {'name': 'p2.y', 'values': [-7.8333333]} ] expected_objectives = [ {'name': 'comp.f_xy', 'values': [-27.083333]} ] expected_constraints = [ {'name': 'con.c', 'values': [-15.0]} ] for d in expected_desvars: self.assert_array_close(d, driver_iteration_data['desvars']) for o in expected_objectives: self.assert_array_close(o, driver_iteration_data['objectives']) for c in expected_constraints: self.assert_array_close(c, driver_iteration_data['constraints'])
def test_full_factorial(self): prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=0.0, upper=1.0) model.add_design_var('y', lower=0.0, upper=1.0) model.add_objective('f_xy') prob.driver = DOEDriver(FullFactorialGenerator(levels=3), parallel=True) prob.driver.add_recorder(SqliteRecorder("CASES.db")) prob.setup() failed, output = run_driver(prob) self.assertFalse(failed) prob.cleanup() expected = { 0: {'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])}, 1: {'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])}, 2: {'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])}, 3: {'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])}, 4: {'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])}, 5: {'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])}, 6: {'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])}, 7: {'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])}, 8: {'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])}, } size = prob.comm.size rank = prob.comm.rank # cases will be split across files for each proc filename = "CASES.db_%d" % rank expect_msg = "Cases from rank %d are being written to %s." % (rank, filename) self.assertTrue(expect_msg in output) cases = CaseReader(filename).driver_cases # cases recorded on this proc num_cases = cases.num_cases self.assertEqual(num_cases, len(expected)//size+(rank<len(expected)%size)) for n in range(num_cases): case = cases.get_case(n) idx = n * size + rank # index of expected case self.assertEqual(cases.get_case(n).outputs['x'], expected[idx]['x']) self.assertEqual(cases.get_case(n).outputs['y'], expected[idx]['y']) self.assertEqual(cases.get_case(n).outputs['f_xy'], expected[idx]['f_xy'])
def test_debug_print_option(self): prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*']) prob.set_solver_print(level=0) prob.driver = ScipyOptimizeDriver() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.options['tol'] = 1e-9 prob.driver.options['disp'] = False prob.driver.options['debug_print'] = [ 'desvars', 'ln_cons', 'nl_cons', 'objs' ] model.add_design_var('x', lower=-50.0, upper=50.0) model.add_design_var('y', lower=-50.0, upper=50.0) model.add_objective('f_xy') model.add_constraint('c', upper=-15.0) prob.setup(check=False) failed, output = run_driver(prob) self.assertFalse(failed, "Optimization failed.") output = output.split('\n') self.assertTrue( output.count("Design Vars") > 1, "Should be more than one design vars header printed") self.assertTrue( output.count("Nonlinear constraints") > 1, "Should be more than one nonlinear constraint header printed") self.assertTrue( output.count("Linear constraints") > 1, "Should be more than one linear constraint header printed") self.assertTrue( output.count("Objectives") > 1, "Should be more than one objective header printed") self.assertTrue( len([s for s in output if s.startswith("{'p1.x")]) > 1, "Should be more than one p1.x printed") self.assertTrue( len([s for s in output if "'p2.y'" in s]) > 1, "Should be more than one p2.y printed") self.assertTrue( len([s for s in output if s.startswith("{'con.c")]) > 1, "Should be more than one con.c printed") self.assertTrue( len([s for s in output if s.startswith("{'comp.f_xy")]) > 1, "Should be more than one comp.f_xy printed")
def test_latin_hypercube_center(self): samples = 4 upper = 10. prob = Problem() model = prob.model indep = model.add_subsystem('indep', IndepVarComp()) indep.add_output('x', 0.0) indep.add_output('y', 0.0) model.add_subsystem('comp', Paraboloid()) model.connect('indep.x', 'comp.x') model.connect('indep.y', 'comp.y') model.add_design_var('indep.x', lower=0., upper=upper) model.add_design_var('indep.y', lower=0., upper=upper) model.add_objective('comp.f_xy') prob.driver = DOEDriver(LatinHypercubeGenerator(samples=samples, criterion='c')) prob.driver.add_recorder(SqliteRecorder("CASES.db")) prob.setup(check=False) prob.run_driver() prob.cleanup() cases = CaseReader("CASES.db").driver_cases self.assertEqual(cases.num_cases, samples) # the sample space for each variable (0 to upper) should be divided into # equal size buckets and each variable should have a value in each bucket bucket_size = upper/samples all_buckets = set(range(samples)) x_buckets_filled = set() y_buckets_filled = set() # with criterion of 'center', each value should be in the center of it's bucket valid_values = [round(bucket_size*(bucket + 1/2), 3) for bucket in all_buckets] for n in range(cases.num_cases): x = float(cases.get_case(n).outputs['indep.x']) y = float(cases.get_case(n).outputs['indep.y']) x_buckets_filled.add(int(x/bucket_size)) y_buckets_filled.add(int(y/bucket_size)) self.assertTrue(round(x, 3) in valid_values, '%f not in %s' % (x, valid_values)) self.assertTrue(round(y, 3) in valid_values, '%f not in %s' % (y, valid_values)) self.assertEqual(x_buckets_filled, all_buckets) self.assertEqual(y_buckets_filled, all_buckets)
def test_feature_simple_run_once_input_input(self): from openmdao.api import Problem, Group, IndepVarComp from openmdao.test_suite.components.paraboloid import Paraboloid prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 3.0)) #promote the two inputs to the same name model.add_subsystem('comp1', Paraboloid(), promotes_inputs=['x']) model.add_subsystem('comp2', Paraboloid(), promotes_inputs=['x']) #connect the source to the common name model.connect('p1.x', 'x') prob.setup() prob.run_model() assert_rel_error(self, prob['comp1.f_xy'], 13.0) assert_rel_error(self, prob['comp2.f_xy'], 13.0)
def __init__(self, **kwargs): super().__init__(**kwargs) model = self.model model.add_subsystem('p1', om.IndepVarComp('x', 50.0), promotes=['*']) model.add_subsystem('p2', om.IndepVarComp('y', 50.0), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_subsystem('con', om.ExecComp('c = - x + y'), promotes=['*']) model.add_design_var('x', lower=-50.0, upper=50.0) model.add_design_var('y', lower=-50.0, upper=50.0) model.add_objective('f_xy') model.add_constraint('c', upper=-15.0)
def test_plackett_burman(self): prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=0.0, upper=1.0) model.add_design_var('y', lower=0.0, upper=1.0) model.add_objective('f_xy') prob.driver = DOEDriver(PlackettBurmanGenerator()) prob.driver.add_recorder(SqliteRecorder("cases.sql")) prob.setup(check=False) prob.run_driver() prob.cleanup() expected = { 0: { 'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00]) }, 1: { 'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00]) }, 2: { 'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00]) }, 3: { 'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00]) }, } cases = CaseReader("cases.sql").driver_cases self.assertEqual(cases.num_cases, 4) for n in range(cases.num_cases): outputs = cases.get_case(n).outputs self.assertEqual(outputs['x'], expected[n]['x']) self.assertEqual(outputs['y'], expected[n]['y']) self.assertEqual(outputs['f_xy'], expected[n]['f_xy'])
def test_uniform(self): prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 0.), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_design_var('x', lower=-10, upper=10) model.add_design_var('y', lower=-10, upper=10) model.add_objective('f_xy') prob.driver = DOEDriver(UniformGenerator(num_samples=5, seed=0)) prob.driver.add_recorder(SqliteRecorder("cases.sql")) prob.setup(check=False) prob.run_driver() prob.cleanup() # all values should be between -10 and 10, check expected values for seed = 0 expected = { 0: { 'x': np.array([0.97627008]), 'y': np.array([4.30378733]) }, 1: { 'x': np.array([2.05526752]), 'y': np.array([0.89766366]) }, 2: { 'x': np.array([-1.52690401]), 'y': np.array([2.91788226]) }, 3: { 'x': np.array([-1.24825577]), 'y': np.array([7.83546002]) }, 4: { 'x': np.array([9.27325521]), 'y': np.array([-2.33116962]) }, } cases = CaseReader("cases.sql").driver_cases self.assertEqual(cases.num_cases, 5) for n in range(cases.num_cases): outputs = cases.get_case(n).outputs assert_rel_error(self, outputs['x'], expected[n]['x'], 1e-4) assert_rel_error(self, outputs['y'], expected[n]['y'], 1e-4)
def test_ListRecorder(): from openmdao.api import Problem, IndepVarComp from openmdao.test_suite.components.paraboloid import Paraboloid prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 0.), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_design_var('x', lower=-10, upper=10) model.add_design_var('y', lower=-10, upper=10) model.add_objective('f_xy') xyf = [[0.98, 4.30, 74.1844], [2.06, 0.90, 23.7476], [-1.53, 2.92, 60.9397], [-1.25, 7.84, 145.4481]] prob.driver = DOEDriver(ListGenerator([[('x', xy[0]), ('y', xy[1])] for xy in xyf])) recorder = TopFarmListRecorder() recorder._initialize_database() recorder._cleanup_abs2meta() recorder.record_iteration_problem(None, None, None) recorder.record_iteration_system(None, None, None) recorder.record_iteration_solver(None, None, None) recorder.record_viewer_data(None) recorder.record_metadata_solver(None) recorder.record_derivatives_driver(None, None, None) recorder.shutdown() prob.driver.add_recorder(recorder) prob.driver.recording_options['record_desvars'] = True prob.driver.recording_options['includes'] = ['*'] prob.driver.recording_options['record_inputs'] = True prob.setup() prob.run_driver() prob.cleanup() assert recorder.num_cases == 4 npt.assert_array_equal(recorder.get('counter'), range(1, 5)) npt.assert_array_equal(recorder['counter'], range(1, 5)) npt.assert_array_almost_equal(recorder.get(['x', 'y', 'f_xy']), xyf, 4) for xyf, k in zip(xyf[0], ['x', 'y', 'f_xy']): npt.assert_allclose(recorder[k][0], xyf) with pytest.raises(KeyError, match="missing"): recorder.get('missing')
def test_feature_set_indeps(self): prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) prob.setup() prob['x'] = 2. prob['y'] = 10. prob.run_model() assert_rel_error(self, prob['f_xy'], 214.0, 1e-6)
def test_full_factorial(self): from openmdao.api import Problem, IndepVarComp from openmdao.test_suite.components.paraboloid import Paraboloid from openmdao.api import DOEDriver, FullFactorialGenerator from openmdao.api import SqliteRecorder, CaseReader from mpi4py import MPI prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=0.0, upper=1.0) model.add_design_var('y', lower=0.0, upper=1.0) model.add_objective('f_xy') prob.driver = DOEDriver(FullFactorialGenerator(levels=3)) prob.driver.options['parallel'] = True prob.driver.add_recorder(SqliteRecorder("cases.sql")) prob.setup() prob.run_driver() prob.cleanup() self.assertEqual(MPI.COMM_WORLD.size, 2) # check recorded cases from each case file rank = MPI.COMM_WORLD.rank filename = "cases.sql_%d" % rank self.assertEqual(filename, "cases.sql_%d" % rank) cases = CaseReader(filename).driver_cases self.assertEqual(cases.num_cases, 5 if rank == 0 else 4) values = [] for n in range(cases.num_cases): outputs = cases.get_case(n).outputs values.append((outputs['x'], outputs['y'], outputs['f_xy'])) self.assertEqual( "\n" + "\n".join([ "x: %5.2f, y: %5.2f, f_xy: %6.2f" % (x, y, f_xy) for x, y, f_xy in values ]), self.expect_text)
def test_feature_petsc_setup(self): prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) # use PETScVector when using any PETSc linear solvers or running under MPI prob.setup(vector_class=PETScVector) prob['x'] = 2. prob['y'] = 10. prob.run_model() assert_rel_error(self, prob['f_xy'], 214.0, 1e-6)