def test_assert_no_force_fd_expect_failure(self): prob = Problem() prob.root = Group() paraboloid = Paraboloid() prob.root.add('paraboloid', paraboloid, promotes=['x', 'y', 'f_xy']) paraboloid.fd_options['force_fd'] = True prob.root.add('p1', IndepVarComp('x', 2.0),promotes=['x']) prob.root.add('p2', IndepVarComp('y', 2.0),promotes=['y']) prob.setup() try: assert_no_force_fd(prob.root) except AssertionError as exc: pass else: self.fail('Expected AssertionError')
def test_driver_records_unknown_types_metadata(self): prob = Problem() root = prob.root = Group() # Need an optimization problem to test to make sure # the is_desvar, is_con, is_obj metadata is being # recorded for the Unknowns root.add('p1', IndepVarComp('x', 50.0)) root.add('p2', IndepVarComp('y', 50.0)) root.add('comp', Paraboloid()) root.connect('p1.x', 'comp.x') root.connect('p2.y', 'comp.y') prob.driver = ScipyOptimizer() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.add_desvar('p1.x', lower=-50.0, upper=50.0) prob.driver.add_desvar('p2.y', lower=-50.0, upper=50.0) prob.driver.add_objective('comp.f_xy') prob.driver.options['disp'] = False prob.driver.add_recorder(self.recorder) self.recorder.options['record_metadata'] = True prob.setup(check=False) self.recorder.close() expected_params = list(iteritems(prob.root.params)) expected_unknowns = list(iteritems(prob.root.unknowns)) expected_resids = list(iteritems(prob.root.resids)) self.assertMetadataRecorded((expected_params, expected_unknowns, expected_resids))
def test_simple_paraboloid_lower(self): prob = Problem() root = prob.root = Group() root.add('p1', IndepVarComp('x', 50.0), promotes=['*']) root.add('p2', IndepVarComp('y', 50.0), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) root.add('con', ExecComp('c = x - y'), promotes=['*']) prob.driver = pyOptSparseDriver() prob.driver.options['optimizer'] = OPTIMIZER if OPTIMIZER == 'SLSQP': prob.driver.opt_settings['ACC'] = 1e-9 prob.driver.options['print_results'] = False prob.driver.add_desvar('x', lower=-50.0, upper=50.0) prob.driver.add_desvar('y', lower=-50.0, upper=50.0) prob.driver.add_objective('f_xy') prob.driver.add_constraint('c', lower=15.0) prob.setup(check=False) prob.run() # Minimum should be at (7.166667, -7.833334) assert_rel_error(self, prob['x'], 7.16667, 1e-6) assert_rel_error(self, prob['y'], -7.833334, 1e-6)
def test_inf_as_desvar_bounds(self): # User may use np.inf as a bound. It is unneccessary, but the user # may do it anyway, so make sure SLSQP doesn't blow up with it (bug # reported by rfalck) prob = Problem() root = prob.root = Group() root.add('p1', IndepVarComp('x', 50.0), promotes=['*']) root.add('p2', IndepVarComp('y', 50.0), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) root.add('con', ExecComp('c = - x + y'), promotes=['*']) prob.driver = pyOptSparseDriver() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.opt_settings['ACC'] = 1e-9 prob.driver.options['print_results'] = False prob.driver.add_desvar('x', lower=-np.inf, upper=np.inf) prob.driver.add_desvar('y', lower=-50.0, upper=50.0) prob.driver.add_objective('f_xy') prob.driver.add_constraint('c', upper=-15.0) prob.setup(check=False) prob.run() # Minimum should be at (7.166667, -7.833334) assert_rel_error(self, prob['x'], 7.16667, 1e-6) assert_rel_error(self, prob['y'], -7.833334, 1e-6)
def test_simple_paraboloid_scaled_objective_rev(self): prob = Problem() root = prob.root = Group() root.add('p1', IndepVarComp('x', 50.0), promotes=['*']) root.add('p2', IndepVarComp('y', 50.0), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) root.add('con', ExecComp('c = x - y'), promotes=['*']) prob.driver = pyOptSparseDriver() prob.driver.options['optimizer'] = OPTIMIZER if OPTIMIZER == 'SNOPT': prob.driver.opt_settings['Verify level'] = 3 prob.driver.options['print_results'] = False prob.driver.add_desvar('x', lower=-50.0, upper=50.0) prob.driver.add_desvar('y', lower=-50.0, upper=50.0) prob.driver.add_objective('f_xy', scaler=1 / 10.) prob.driver.add_constraint('c', lower=10.0, upper=11.0) root.ln_solver.options['mode'] = 'rev' prob.setup(check=False) prob.run() # Minimum should be at (7.166667, -7.833334) assert_rel_error(self, prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_equality_linear(self): prob = Problem() root = prob.root = Group() root.add('p1', IndepVarComp('x', 50.0), promotes=['*']) root.add('p2', IndepVarComp('y', 50.0), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) root.add('con', ExecComp('c = - x + y'), promotes=['*']) prob.driver = pyOptSparseDriver() prob.driver.options['optimizer'] = OPTIMIZER if OPTIMIZER == 'SLSQP': prob.driver.opt_settings['ACC'] = 1e-9 prob.driver.options['print_results'] = False prob.driver.add_desvar('x', lower=-50.0, upper=50.0) prob.driver.add_desvar('y', lower=-50.0, upper=50.0) prob.driver.add_objective('f_xy') prob.driver.add_constraint('c', equals=-15.0, linear=True) if OPTIMIZER == 'SNOPT': # there is currently a bug in SNOPT, it requires at least one # nonlinear inequality constraint, so provide a 'fake' one prob.driver.add_constraint('x', lower=-100.0) prob.setup(check=False) prob.run() # Minimum should be at (7.166667, -7.833334) assert_rel_error(self, prob['x'], 7.16667, 1e-6) assert_rel_error(self, prob['y'], -7.833334, 1e-6)
def test_algorithm_coverage_lhc(self): prob = Problem(impl=impl) root = prob.root = Group() root.add('p1', IndepVarComp('x', 50.0), promotes=['*']) root.add('p2', IndepVarComp('y', 50.0), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) prob.driver = LatinHypercubeDriver(100, num_par_doe=self.N_PROCS) prob.driver.add_desvar('x', lower=-50.0, upper=50.0) prob.driver.add_desvar('y', lower=-50.0, upper=50.0) prob.driver.add_objective('f_xy') prob.setup(check=False) prob.run() if MPI: runList = prob.driver._distrib_build_runlist() expected_runs = 25 else: runList = prob.driver._build_runlist() expected_runs = 100 # Ensure generated run list is a generator self.assertTrue((type(runList) == GeneratorType), "_build_runlist did not return a generator.") # Add run list to dictionaries xSet = set() ySet = set() countRuns = 0 for inputLine in runList: countRuns += 1 x, y = dict(inputLine).values() xSet.add(np.floor(x)) ySet.add(np.floor(y)) # Assert we had the correct number of runs self.assertTrue( countRuns == expected_runs, "Incorrect number of runs generated. expected %d but got %d" % (expected_runs, countRuns)) # Assert all input values in range [-50,50] valuesInRange = True for value in xSet | ySet: if value < (-50) or value > 49: valuesInRange = False self.assertTrue( valuesInRange, "One of the input values was outside the given range.") # Assert a single input in each interval [n,n+1] for n = [-50,49] self.assertTrue( len(xSet) == expected_runs, "One of the intervals wasn't covered.") self.assertTrue( len(ySet) == expected_runs, "One of the intervals wasn't covered.")
def test_simple_paraboloid_constrained_COBYLA_upper(self): prob = Problem() root = prob.root = Group() root.add('p1', IndepVarComp('x', 50.0), promotes=['*']) root.add('p2', IndepVarComp('y', 50.0), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) root.add('con', ExecComp('c = y - x'), promotes=['*']) prob.driver = ScipyOptimizer() prob.driver.options['optimizer'] = 'COBYLA' prob.driver.options['tol'] = 1.0e-8 prob.driver.add_desvar('x', lower=-50.0, upper=50.0) prob.driver.add_desvar('y', lower=-50.0, upper=50.0) prob.driver.add_objective('f_xy') prob.driver.add_constraint('c', upper=-15.0) prob.driver.options['disp'] = False prob.setup(check=False) prob.run() # Minimum should be at (7.166667, -7.833334) assert_rel_error(self, prob['x'], 7.16667, 1e-6) assert_rel_error(self, prob['y'], -7.833334, 1e-6)
def test_assert_no_force_fd(self): prob = Problem() prob.root = Group() prob.root.add('paraboloid', Paraboloid(), promotes=['x', 'y', 'f_xy']) prob.root.add('p1', IndepVarComp('x', 2.0),promotes=['x']) prob.root.add('p2', IndepVarComp('y', 2.0),promotes=['y']) prob.setup(check=False) assert_no_force_fd(prob.root)
def test_array_scaler_bug(self): class Paraboloid(Component): def __init__(self): super(Paraboloid, self).__init__() self.add_param('X', val=np.array([0.0, 0.0])) self.add_output('f_xy', val=0.0) def solve_nonlinear(self, params, unknowns, resids): X = params['X'] x = X[0] y = X[1] unknowns['f_xy'] = (1000. * x - 3.)**2 + (1000. * x) * ( 0.01 * y) + (0.01 * y + 4.)**2 - 3. def linearize(self, params, unknowns, resids): """ Jacobian for our paraboloid.""" X = params['X'] J = {} x = X[0] y = X[1] J['f_xy', 'X'] = np.array([[ 2000000.0 * x - 6000.0 + 10.0 * y, 0.0002 * y + 0.08 + 10.0 * x ]]) return J top = Problem() root = top.root = Group() root.deriv_options['type'] = 'fd' root.add('p1', IndepVarComp('X', np.array([3.0, -4.0]))) root.add('p', Paraboloid()) root.connect('p1.X', 'p.X') top.driver = ScipyOptimizer() top.driver.options['optimizer'] = 'SLSQP' top.driver.options['tol'] = 1e-12 top.driver.add_desvar('p1.X', lower=np.array([-1000.0, -1000.0]), upper=np.array([1000.0, 1000.0]), scaler=np.array([1000., 0.01])) top.driver.add_objective('p.f_xy') top.setup(check=False) top.run() # Optimal solution (minimum): x = 6.6667; y = -7.3333 # Note: this scaling isn't so great, but at least we know it works # and the bug is fixed. assert_rel_error(self, top['p1.X'][0], 6.666667 / 1000.0, 1e-3) assert_rel_error(self, top['p1.X'][1], -7.333333 / 0.01, 1e-3)
def test_algorithm_coverage_lhc(self): prob = Problem() root = prob.root = Group() root.add('p1', IndepVarComp('x', 50.0), promotes=['*']) root.add('p2', IndepVarComp('y', 50.0), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) prob.driver = LatinHypercubeDriver(100) prob.driver.add_desvar('x', lower=-50.0, upper=50.0) prob.driver.add_desvar('y', lower=-50.0, upper=50.0) prob.driver.add_objective('f_xy') prob.setup(check=False) runList = prob.driver._build_runlist() prob.run() # Ensure generated run list is a generator self.assertTrue( (type(runList) == GeneratorType), "_build_runlist did not return a generator.") # Add run list to dictionaries xDict = [] yDict = [] countRuns = 0 for inputLine in runList: countRuns += 1 x, y = dict(inputLine).values() xDict.append(np.floor(x)) yDict.append(np.floor(y)) # Assert we had the correct number of runs self.assertTrue( countRuns == 100, "Incorrect number of runs generated.") # Assert all input values in range [-50,50] valuesInRange = True for value in xDict + yDict: if value < (-50) or value > 49: valuesInRange = False self.assertTrue( valuesInRange, "One of the input values was outside the given range.") # Assert a single input in each interval [n,n+1] for n = [-50,49] self.assertTrue( len(xDict) == 100, "One of the intervals wasn't covered.") self.assertTrue( len(yDict) == 100, "One of the intervals wasn't covered.")
def test_scaler_adder(self): class ScaleAddDriver(Driver): def run(self, problem): """ Save away scaled info.""" params = self.get_desvars() param_meta = self.get_desvar_metadata() self.set_desvar('x', 0.5) problem.root.solve_nonlinear() objective = self.get_objectives() constraint = self.get_constraints() # Stuff we saved should be in the scaled coordinates. self.param = params['x'] self.obj_scaled = objective['f_xy'] self.con_scaled = constraint['con'] self.param_high = param_meta['x']['upper'] self.param_low = param_meta['x']['lower'] prob = Problem() root = prob.root = Group() driver = prob.driver = ScaleAddDriver() root.add('p1', IndepVarComp([('x', 60000.0, { 'desc': 'my x' }), ('y', 60000.0, { 'desc': 'my y' })]), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) root.add('constraint', ExecComp('con=f_xy + x + y'), promotes=['*']) driver.add_desvar('x', lower=59000.0, upper=61000.0, adder=-60000.0, scaler=1 / 1000.0) driver.add_objective('f_xy', adder=-10890367002.0, scaler=1.0 / 20) driver.add_constraint('con', upper=0.0, adder=-10890487502.0, scaler=1.0 / 20) prob.setup(check=False) prob.run() self.assertEqual(driver.param_high, 1.0) self.assertEqual(driver.param_low, -1.0) self.assertEqual(driver.param, 0.0) self.assertEqual(prob['x'], 60500.0) self.assertEqual(driver.obj_scaled[0], 1.0) self.assertEqual(driver.con_scaled[0], 1.0)
def test_deriv_options_form(self): prob = Problem() prob.root = Group() comp = prob.root.add('comp', Paraboloid()) prob.root.add('p1', IndepVarComp('x', 15.0)) prob.root.add('p2', IndepVarComp('y', 15.0)) prob.root.connect('p1.x', 'comp.x') prob.root.connect('p2.y', 'comp.y') comp.deriv_options['type'] = 'fd' comp.deriv_options['form'] = 'forward' indep_list = ['p1.x'] unknowns_list = ['comp.f_xy'] prob.setup(check=False) prob.run() J = prob.calc_gradient(indep_list, unknowns_list, return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-6) # Cheat a bit so I can twiddle mode OptionsDictionary.locked = False # Make sure it gives good result with small stepsize comp.deriv_options['form'] = 'backward' J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-6) # Make sure it gives good result with small stepsize comp.deriv_options['form'] = 'central' J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-6) # Now, Make sure we really are going foward and backward comp.deriv_options['form'] = 'forward' comp.deriv_options['step_size'] = 1e3 J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') self.assertGreater(J['comp.f_xy']['p1.x'][0][0], 0.0) comp.deriv_options['form'] = 'backward' J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') self.assertLess(J['comp.f_xy']['p1.x'][0][0], 0.0) # Central should get pretty close even for the bad stepsize comp.deriv_options['form'] = 'central' J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-1)
def test_limit_to_desvar_obj_con(self): prob = Problem() root = prob.root = Group() root.add('p1', IndepVarComp('x', 1.0), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) prob.driver.add_desvar('x') prob.driver.add_objective('f_xy') prob.setup(check=False) prob.run() data = prob.check_total_derivatives(out_stream=None) self.assertTrue(('f_xy', 'x') in data) self.assertTrue(('f_xy', 'y') not in data)
def test_message_no_connections(self): p = Problem() p.root = Group() c = p.root.add('comp', Paraboloid()) p.setup(check=False) p.run_once() mystream = StringIO() p.check_partial_derivatives(out_stream=mystream) text = mystream.getvalue() expected = 'Skipping because component has no connected inputs.' self.assertTrue(expected in text)
def test_single_comp_paraboloid(self): prob = Problem(impl=petsc_impl) root = prob.root = Group() root.add('p1', IndepVarComp('x', 0.0), promotes=['*']) root.add('p2', IndepVarComp('y', 0.0), promotes=['*']) root.add('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) root.deriv_options['type'] = 'cs' prob.setup(check=False) prob.run() jac = prob.calc_gradient(['x', 'y'], ['f_xy']) # Note, FD can not reach this accuracy, but CS can. assert_rel_error(self, jac[0][0], -6.0, 1e-7) assert_rel_error(self, jac[0][1], 8.0, 1e-7)
def test_simple_float(self): prob = Problem() prob.root = root = Group() root.add('x_param', IndepVarComp('x', 17.0), promotes=['x']) root.add('y_param', IndepVarComp('y', 19.0), promotes=['y']) root.add('mycomp', Paraboloid(), promotes=['x', 'y', 'f_xy']) # This will give poor FD, but good CS root.mycomp.fd_options['step_size'] = 1.0e1 root.mycomp.fd_options['force_fd'] = True root.mycomp.fd_options['form'] = 'complex_step' prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['f_xy'], mode='fwd', return_format='dict') assert_rel_error(self, J['f_xy']['x'][0][0], 47.0, 1e-6)
def test_message_root_is_fd(self): p = Problem() p.root = Group() p.root.add('p1', IndepVarComp('x', 1.0)) c = p.root.add('comp', Paraboloid()) p.root.connect('p1.x', 'comp.x') p.root.deriv_options['type'] = 'fd' p.setup(check=False) with self.assertRaises(RuntimeError) as cm: p.check_partial_derivatives() msg = "You cannot run check_partial_derivatives when option 'type' " msg += "in `root` is set to 'fd' or 'cs' because no derivative " msg += "vectors are allocated in that case." self.assertEqual(str(cm.exception), msg)
def test_message_check_types_are_same(self): p = Problem() p.root = Group() p.root.add('p1', IndepVarComp('x', 1.0)) c = p.root.add('comp', Paraboloid()) p.root.connect('p1.x', 'comp.x') p.root.comp.deriv_options['type'] = 'fd' p.root.comp.deriv_options['check_type'] = 'fd' p.setup(check=False) p.run_once() mystream = StringIO() p.check_partial_derivatives(out_stream=mystream) text = mystream.getvalue() expected = 'Skipping because type == check_type.' self.assertTrue(expected in text)
def test_simple_paraboloid_unconstrained_SLSQP(self): prob = Problem() root = prob.root = Group() root.add('p1', IndepVarComp('x', 50.0), promotes=['*']) root.add('p2', IndepVarComp('y', 50.0), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) prob.driver = ScipyOptimizer() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.add_desvar('x', lower=-50.0, upper=50.0) prob.driver.add_desvar('y', lower=-50.0, upper=50.0) prob.driver.add_objective('f_xy') prob.driver.options['disp'] = False prob.setup(check=False) prob.run() # Optimal solution (minimum): x = 6.6667; y = -7.3333 assert_rel_error(self, prob['x'], 6.666667, 1e-6) assert_rel_error(self, prob['y'], -7.333333, 1e-6)
def test_simple_paraboloid_equality(self): prob = Problem() root = prob.root = Group() root.add('p1', ParamComp('x', 50.0), promotes=['*']) root.add('p2', ParamComp('y', 50.0), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) root.add('con', ExecComp('c = 15.0 - x + y'), promotes=['*']) prob.driver = pyOptSparseDriver() prob.driver.add_param('x', low=-50.0, high=50.0) prob.driver.add_param('y', low=-50.0, high=50.0) prob.driver.add_objective('f_xy') prob.driver.add_constraint('c', ctype='ineq') prob.setup(check=False) prob.run() # Minimum should be at (7.166667, -7.833334) assert_rel_error(self, prob['x'], 7.16667, 1e-6) assert_rel_error(self, prob['y'], -7.833334, 1e-6)
def test_messages(self): prob = Problem() prob.root = Group() prob.root.add('comp', Paraboloid()) with self.assertRaises(KeyError) as cm: prob.root.comp.fd_options['z'] = 1 # These will all raise deprecation warnings, but will work. prob.root.comp.fd_options['force_fd'] = True prob.root.comp.fd_options['step_type'] = 'relative' prob.root.comp.fd_options['form'] = 'complex_step' prob.root.comp.fd_options['extra_check_partials_form'] = 'forward' with self.assertRaises(ValueError) as cm: prob.root.comp.deriv_options['force_fd'] = True msg = "'type' must be one of the following values: '['user', 'fd', 'cs']" self.assertTrue(msg in str(cm.exception)) fd = prob.root.comp.fd_options['force_fd'] self.assertTrue(fd==True)
def test_fd_options_step_size(self): prob = Problem() prob.root = Group() comp = prob.root.add('comp', Paraboloid()) prob.root.add('p1', IndepVarComp([('x', 15.0), ('y', 15.0)])) prob.root.connect('p1.x', 'comp.x') prob.root.connect('p1.y', 'comp.y') comp.fd_options['force_fd'] = True prob.setup(check=False) prob.run() J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-6) # Make sure step_size is used # Derivative should be way high with this. comp.fd_options['step_size'] = 1e5 J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') self.assertGreater(J['comp.f_xy']['p1.x'][0][0], 1000.0)
def test_simple_paraboloid_unconstrained_SLSQP_bounds(self): # Make sure we don't go past high/low when set. prob = Problem() root = prob.root = Group() root.add('p1', IndepVarComp('x', 50.0), promotes=['*']) root.add('p2', IndepVarComp('y', 50.0), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) root.add('obj_comp', ExecComp('obj = -f_xy'), promotes=['*']) prob.driver = ScipyOptimizer() prob.driver.options['optimizer'] = 'SLSQP' prob.driver.add_desvar('x', lower=-50.0, upper=50.0) prob.driver.add_desvar('y', lower=-50.0, upper=50.0) prob.driver.add_objective('obj') prob.driver.options['disp'] = False prob.setup(check=False) prob.run() assert_rel_error(self, prob['x'], 50.0, 1e-6) assert_rel_error(self, prob['y'], 50.0, 1e-6)
def test_scaler_adder(self): prob = Problem() root = prob.root = Group() driver = prob.driver = ScaleAddDriver() root.add('p1', IndepVarComp([('x',60000.0,{'desc':'my x'}), ('y',60000.0,{'desc':'my y'})]), promotes=['*']) root.add('comp', Paraboloid(), promotes=['*']) root.add('constraint', ExecComp('con=f_xy + x + y'), promotes=['*']) driver.add_desvar('x', lower=59000.0, upper=61000.0, adder=-60000.0, scaler=1/1000.0) driver.add_objective('f_xy', adder=-10890367002.0, scaler=1.0/20) driver.add_constraint('con', upper=0.0, adder=-10890487502.0, scaler=1.0/20) prob.setup(check=False) prob.run() self.assertEqual(driver.param_high, 1.0) self.assertEqual(driver.param_low, -1.0) self.assertEqual(driver.param, 0.0) self.assertEqual(prob['x'], 60500.0) self.assertEqual(driver.obj_scaled[0], 1.0) self.assertEqual(driver.con_scaled[0], 1.0)
import sys sys.path.insert(0, 'C:\Users\gazi\Documents\OpenMDAO') import openmdao from openmdao.api import IndepVarComp, Group, Problem, ScipyOptimizer, ExecComp, DumpRecorder from openmdao.test.paraboloid import Paraboloid from openmdao.drivers.latinhypercube_driver import OptimizedLatinHypercubeDriver top = Problem() root = top.root = Group() root.add('p1', IndepVarComp('x', 50.0), promotes=['x']) root.add('p2', IndepVarComp('y', 50.0), promotes=['y']) root.add('comp', Paraboloid(), promotes=['x', 'y', 'f_xy']) top.driver = OptimizedLatinHypercubeDriver(num_samples=4, seed=0, population=20, generations=4, norm_method=2) top.driver.add_desvar('x', lower=-50.0, upper=50.0) top.driver.add_desvar('y', lower=-50.0, upper=50.0) top.driver.add_objective('f_xy') recorder = DumpRecorder('paraboloid') recorder.options['record_params'] = True recorder.options['record_unknowns'] = False recorder.options['record_resids'] = False top.driver.add_recorder(recorder)