def test_debug_print_option_totals(self): prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*']) prob.set_solver_print(level=0) prob.driver = ScipyOptimizeDriver() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.options['tol'] = 1e-9 prob.driver.options['disp'] = False prob.driver.options['debug_print'] = ['totals'] model.add_design_var('x', lower=-50.0, upper=50.0) model.add_design_var('y', lower=-50.0, upper=50.0) model.add_objective('f_xy') model.add_constraint('c', upper=-15.0) prob.setup(check=False) failed, output = run_driver(prob) self.assertFalse(failed, "Optimization failed.") self.assertTrue('Solving variable: comp.f_xy' in output) self.assertTrue('Solving variable: con.c' in output)
def test_proc_per_model(self): # Test that we can run a GA on a distributed component without lockups. prob = om.Problem() model = prob.model model.add_subsystem('p', om.IndepVarComp('x', 3.0), promotes=['x']) model.add_subsystem('d1', D1(), promotes=['*']) model.add_subsystem('d2', D2(), promotes=['*']) model.add_subsystem('obj_comp', Summer(), promotes_outputs=['*']) model.promotes('obj_comp', inputs=['*'], src_indices=om.slicer[:]) model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True) model.linear_solver = om.LinearBlockGS() model.add_design_var('x', lower=-0.5, upper=0.5) model.add_objective('obj') driver = prob.driver = om.DifferentialEvolutionDriver() driver.options['pop_size'] = 4 driver.options['max_gen'] = 3 driver.options['run_parallel'] = True driver.options['procs_per_model'] = 2 # also check that parallel recording works driver.add_recorder(om.SqliteRecorder("cases.sql")) prob.setup() prob.set_solver_print(level=0) failed, output = run_driver(prob) self.assertFalse(failed) # we will have run 2 models in parallel on our 4 procs num_models = prob.comm.size // driver.options['procs_per_model'] self.assertEqual(num_models, 2) # a separate case file should have been written by rank 0 of each parallel model # (the top two global ranks) rank = prob.comm.rank filename = "cases.sql_%d" % rank if rank < num_models: expect_msg = "Cases from rank %d are being written to %s." % ( rank, filename) self.assertTrue(expect_msg in output) cr = om.CaseReader(filename) cases = cr.list_cases('driver') # check that cases were recorded on this proc num_cases = len(cases) self.assertTrue(num_cases > 0) else: self.assertFalse("Cases from rank %d are being written" % rank in output) self.assertFalse(os.path.exists(filename))
def test_debug_print_option(self): prob = Problem() model = prob.model = Group() model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*']) model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*']) model.add_subsystem('comp', Paraboloid(), promotes=['*']) model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*']) prob.set_solver_print(level=0) prob.driver = ScipyOptimizeDriver() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.options['tol'] = 1e-9 prob.driver.options['disp'] = False prob.driver.options['debug_print'] = [ 'desvars', 'ln_cons', 'nl_cons', 'objs' ] model.add_design_var('x', lower=-50.0, upper=50.0) model.add_design_var('y', lower=-50.0, upper=50.0) model.add_objective('f_xy') model.add_constraint('c', upper=-15.0) prob.setup(check=False) failed, output = run_driver(prob) self.assertFalse(failed, "Optimization failed.") output = output.split('\n') self.assertTrue( output.count("Design Vars") > 1, "Should be more than one design vars header printed") self.assertTrue( output.count("Nonlinear constraints") > 1, "Should be more than one nonlinear constraint header printed") self.assertTrue( output.count("Linear constraints") > 1, "Should be more than one linear constraint header printed") self.assertTrue( output.count("Objectives") > 1, "Should be more than one objective header printed") self.assertTrue( len([s for s in output if s.startswith("{'p1.x")]) > 1, "Should be more than one p1.x printed") self.assertTrue( len([s for s in output if "'p2.y'" in s]) > 1, "Should be more than one p2.y printed") self.assertTrue( len([s for s in output if s.startswith("{'con.c")]) > 1, "Should be more than one con.c printed") self.assertTrue( len([s for s in output if s.startswith("{'comp.f_xy")]) > 1, "Should be more than one comp.f_xy printed")
def test_full_factorial(self): prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 0.0), promotes=['x']) model.add_subsystem('p2', IndepVarComp('y', 0.0), promotes=['y']) model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) model.add_design_var('x', lower=0.0, upper=1.0) model.add_design_var('y', lower=0.0, upper=1.0) model.add_objective('f_xy') prob.driver = DOEDriver(FullFactorialGenerator(levels=3), parallel=True) prob.driver.add_recorder(SqliteRecorder("CASES.db")) prob.setup() failed, output = run_driver(prob) self.assertFalse(failed) prob.cleanup() expected = { 0: {'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])}, 1: {'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])}, 2: {'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])}, 3: {'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])}, 4: {'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])}, 5: {'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])}, 6: {'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])}, 7: {'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])}, 8: {'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])}, } size = prob.comm.size rank = prob.comm.rank # cases will be split across files for each proc filename = "CASES.db_%d" % rank expect_msg = "Cases from rank %d are being written to %s." % (rank, filename) self.assertTrue(expect_msg in output) cases = CaseReader(filename).driver_cases # cases recorded on this proc num_cases = cases.num_cases self.assertEqual(num_cases, len(expected)//size+(rank<len(expected)%size)) for n in range(num_cases): case = cases.get_case(n) idx = n * size + rank # index of expected case self.assertEqual(cases.get_case(n).outputs['x'], expected[idx]['x']) self.assertEqual(cases.get_case(n).outputs['y'], expected[idx]['y']) self.assertEqual(cases.get_case(n).outputs['f_xy'], expected[idx]['f_xy'])
def test_fan_in_grouped_serial(self): # run cases on all procs (parallel model will run on single proc) doe_parallel = True prob = Problem(FanInGrouped()) model = prob.model model.add_design_var('iv.x1', lower=0.0, upper=1.0) model.add_design_var('iv.x2', lower=0.0, upper=1.0) model.add_objective('c3.y') prob.driver = DOEDriver(FullFactorialGenerator(levels=3)) prob.driver.add_recorder(SqliteRecorder("cases.sql")) prob.driver.options['parallel'] = doe_parallel prob.setup(check=False) failed, output = run_driver(prob) self.assertFalse(failed) prob.cleanup() expected = { 0: { 'iv.x1': np.array([0.]), 'iv.x2': np.array([0.]), 'c3.y': np.array([0.0]) }, 1: { 'iv.x1': np.array([.5]), 'iv.x2': np.array([0.]), 'c3.y': np.array([-3.0]) }, 2: { 'iv.x1': np.array([1.]), 'iv.x2': np.array([0.]), 'c3.y': np.array([-6.0]) }, 3: { 'iv.x1': np.array([0.]), 'iv.x2': np.array([.5]), 'c3.y': np.array([17.5]) }, 4: { 'iv.x1': np.array([.5]), 'iv.x2': np.array([.5]), 'c3.y': np.array([14.5]) }, 5: { 'iv.x1': np.array([1.]), 'iv.x2': np.array([.5]), 'c3.y': np.array([11.5]) }, 6: { 'iv.x1': np.array([0.]), 'iv.x2': np.array([1.]), 'c3.y': np.array([35.0]) }, 7: { 'iv.x1': np.array([.5]), 'iv.x2': np.array([1.]), 'c3.y': np.array([32.0]) }, 8: { 'iv.x1': np.array([1.]), 'iv.x2': np.array([1.]), 'c3.y': np.array([29.0]) }, } rank = prob.comm.rank size = prob.comm.size // doe_parallel num_cases = 0 # cases will be split across files for each proc up to the number requested filename = "cases.sql_%d" % rank expect_msg = "Cases from rank %d are being written to %s." % (rank, filename) self.assertTrue(expect_msg in output) cases = CaseReader(filename).driver_cases # cases recorded on this proc num_cases = cases.num_cases self.assertEqual(num_cases, len(expected) // size + (rank < len(expected) % size)) for n in range(num_cases): idx = n * size + rank # index of expected case outputs = cases.get_case(n).outputs self.assertEqual(outputs['iv.x1'], expected[idx]['iv.x1']) self.assertEqual(outputs['iv.x2'], expected[idx]['iv.x2']) self.assertEqual(outputs['c3.y'], expected[idx]['c3.y']) # total number of cases recorded across all requested procs num_cases = prob.comm.allgather(num_cases) self.assertEqual(sum(num_cases), len(expected))