def test_double_arraycomp(self): # Mainly testing a bug in the array return for multiple arrays group = Group() group.add('x_param1', ParamComp('x1', np.ones((2))), promotes=['*']) group.add('x_param2', ParamComp('x2', np.ones((2))), promotes=['*']) group.add('mycomp', DoubleArrayComp(), promotes=['*']) prob = Problem(impl=impl) prob.root = group prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() Jbase = group.mycomp.JJ J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='fwd', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8) J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='fd', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8) J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='rev', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8)
def test_double_arraycomp(self): # Mainly testing a bug in the array return for multiple arrays group = Group() group.add('x_param1', IndepVarComp('x1', np.ones((2))), promotes=['*']) group.add('x_param2', IndepVarComp('x2', np.ones((2))), promotes=['*']) group.add('mycomp', DoubleArrayComp(), promotes=['*']) prob = Problem(impl=impl) prob.root = group prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() Jbase = group.mycomp.JJ J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='fwd', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8) J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='fd', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8) J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='rev', return_format='array') diff = np.linalg.norm(J - Jbase) assert_rel_error(self, diff, 0.0, 1e-8)
def test_simple_array_model(self): prob = Problem() prob.root = Group() prob.root.add( 'comp', ExecComp(['y[0]=2.0*x[0]+7.0*x[1]', 'y[1]=5.0*x[0]-3.0*x[1]'], x=np.zeros([2]), y=np.zeros([2]))) prob.root.add('p1', ParamComp('x', np.ones([2]))) prob.root.connect('p1.x', 'comp.x') prob.setup(check=False) prob.run() data = prob.check_partial_derivatives(out_stream=None) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][2], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][2], 0.0, 1e-5)
def test_single_diamond_grouped(self): prob = Problem() prob.root = SingleDiamondGrouped() prob.root.ln_solver = ExplicitSolver() prob.setup(check=False) prob.run() param_list = ['p.x'] unknown_list = ['comp4.y1', 'comp4.y2'] J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6) assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6) assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='fd', return_format='dict') assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6) assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)
def test_fan_in_grouped(self): prob = Problem(impl=impl) prob.root = FanInGrouped() prob.root.ln_solver = PetscKSP() param_list = ['p1.x1', 'p2.x2'] unknown_list = ['comp3.y'] prob.setup(check=False) prob.run() J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6) assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6) assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)
def test_converge_diverge_compfd(self): prob = Problem(impl=impl) prob.root = ConvergeDivergePar() prob.root.ln_solver = PetscKSP() # fd comp2 and comp5. each is under a par group prob.root.par1.comp2.fd_options['force_fd'] = True prob.root.par2.comp5.fd_options['force_fd'] = True prob.setup(check=False) prob.run() # Make sure value is fine. assert_rel_error(self, prob['comp7.y1'], -102.7, 1e-6) indep_list = ['p.x'] unknown_list = ['comp7.y1'] J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)
def test_converge_diverge_groups(self): prob = Problem() prob.root = ConvergeDivergeGroups() prob.root.ln_solver = ExplicitSolver() prob.setup(check=False) prob.run() # Make sure value is fine. assert_rel_error(self, prob['comp7.y1'], -102.7, 1e-6) param_list = ['p.x'] unknown_list = ['comp7.y1'] J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='fd', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)
def test_fan_out_grouped(self): prob = Problem(impl=impl) prob.root = FanOutGrouped() prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() param = 'sub.pgroup.p.x' unknown_list = ['sub.comp2.y', "sub.comp3.y"] J = prob.calc_gradient([param], unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J[unknown_list[0]][param][0][0], -6.0, 1e-6) assert_rel_error(self, J[unknown_list[1]][param][0][0], 15.0, 1e-6) J = prob.calc_gradient([param], unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J[unknown_list[0]][param][0][0], -6.0, 1e-6) assert_rel_error(self, J[unknown_list[1]][param][0][0], 15.0, 1e-6)
def test_simple_deriv_xfer(self): prob = Problem(impl=impl) prob.root = FanInGrouped() prob.setup(check=False) prob.root.comp3.dpmat[None]['x1'] = 7. prob.root.comp3.dpmat[None]['x2'] = 11. prob.root._transfer_data(mode='rev', deriv=True) if not MPI or self.comm.rank == 0: self.assertEqual(prob.root.sub.comp1.dumat[None]['y'], 7.) if not MPI or self.comm.rank == 1: self.assertEqual(prob.root.sub.comp2.dumat[None]['y'], 11.) prob.root.comp3.dpmat[None]['x1'] = 0. prob.root.comp3.dpmat[None]['x2'] = 0. self.assertEqual(prob.root.comp3.dpmat[None]['x1'], 0.) self.assertEqual(prob.root.comp3.dpmat[None]['x2'], 0.) prob.root._transfer_data(mode='fwd', deriv=True) self.assertEqual(prob.root.comp3.dpmat[None]['x1'], 7.) self.assertEqual(prob.root.comp3.dpmat[None]['x2'], 11.)
def test_simple_array_model2(self): prob = Problem() prob.root = Group() comp = prob.root.add( 'comp', ExecComp('y = mat.dot(x)', x=np.zeros((2, )), y=np.zeros((2, )), mat=np.array([[2., 7.], [5., -3.]]))) p1 = prob.root.add('p1', ParamComp('x', np.ones([2]))) prob.root.connect('p1.x', 'comp.x') prob.setup(check=False) prob.run() data = prob.check_partial_derivatives(out_stream=None) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['abs error'][2], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y', 'x')]['rel error'][2], 0.0, 1e-5)
def test_fd_options_meta_step_size(self): class MetaParaboloid(Component): """ Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 """ def __init__(self): super(MetaParaboloid, self).__init__() # Params self.add_param('x', 1.0, fd_step_size = 1.0e5) self.add_param('y', 1.0, fd_step_size = 1.0e5) # Unknowns self.add_output('f_xy', 0.0) def solve_nonlinear(self, params, unknowns, resids): """f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 Optimal solution (minimum): x = 6.6667; y = -7.3333 """ x = params['x'] y = params['y'] f_xy = ((x-3.0)**2 + x*y + (y+4.0)**2 - 3.0) unknowns['f_xy'] = f_xy def jacobian(self, params, unknowns, resids): """Analytical derivatives""" x = params['x'] y = params['y'] J = {} J['f_xy', 'x'] = (2.0*x - 6.0 + y) J['f_xy', 'y'] = (2.0*y + 8.0 + x) return J prob = Problem() prob.root = Group() comp = prob.root.add('comp', MetaParaboloid()) prob.root.add('p1', ParamComp('x', 15.0)) prob.root.add('p2', ParamComp('y', 15.0)) prob.root.connect('p1.x', 'comp.x') prob.root.connect('p2.y', 'comp.y') comp.fd_options['force_fd'] = True prob.setup(check=False) prob.run() # Make sure bad meta step_size is used # Derivative should be way high with this. J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') self.assertGreater(J['comp.f_xy']['p1.x'][0][0], 1000.0)
def test_fd_options_meta_step_size(self): class MetaParaboloid(Component): """ Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 """ def __init__(self): super(MetaParaboloid, self).__init__() # Params self.add_param('x', 1.0, fd_step_size=1.0e5) self.add_param('y', 1.0, fd_step_size=1.0e5) # Unknowns self.add_output('f_xy', 0.0) def solve_nonlinear(self, params, unknowns, resids): """f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 Optimal solution (minimum): x = 6.6667; y = -7.3333 """ x = params['x'] y = params['y'] f_xy = ((x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0) unknowns['f_xy'] = f_xy def jacobian(self, params, unknowns, resids): """Analytical derivatives""" x = params['x'] y = params['y'] J = {} J['f_xy', 'x'] = (2.0 * x - 6.0 + y) J['f_xy', 'y'] = (2.0 * y + 8.0 + x) return J prob = Problem() prob.root = Group() comp = prob.root.add('comp', MetaParaboloid()) prob.root.add('p1', ParamComp('x', 15.0)) prob.root.add('p2', ParamComp('y', 15.0)) prob.root.connect('p1.x', 'comp.x') prob.root.connect('p2.y', 'comp.y') comp.fd_options['force_fd'] = True prob.setup(check=False) prob.run() # Make sure bad meta step_size is used # Derivative should be way high with this. J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') self.assertGreater(J['comp.f_xy']['p1.x'][0][0], 1000.0)
def test_sellar_derivs_grouped(self): prob = Problem(impl=impl) prob.root = SellarDerivativesGrouped() prob.root.ln_solver = PetscKSP() prob.root.mda.nl_solver.options['atol'] = 1e-12 prob.setup(check=False) prob.run() # Just make sure we are at the right answer assert_rel_error(self, prob['y1'], 25.58830273, .00001) assert_rel_error(self, prob['y2'], 12.05848819, .00001) param_list = ['x', 'z'] unknown_list = ['obj', 'con1', 'con2'] Jbase = {} Jbase['con1'] = {} Jbase['con1']['x'] = -0.98061433 Jbase['con1']['z'] = np.array([-9.61002285, -0.78449158]) Jbase['con2'] = {} Jbase['con2']['x'] = 0.09692762 Jbase['con2']['z'] = np.array([1.94989079, 1.0775421]) Jbase['obj'] = {} Jbase['obj']['x'] = 2.98061392 Jbase['obj']['z'] = np.array([9.61001155, 1.78448534]) J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='dict') for key1, val1 in Jbase.items(): for key2, val2 in val1.items(): assert_rel_error(self, J[key1][key2], val2, .00001) J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='dict') for key1, val1 in Jbase.items(): for key2, val2 in val1.items(): assert_rel_error(self, J[key1][key2], val2, .00001) prob.root.fd_options['form'] = 'central' J = prob.calc_gradient(param_list, unknown_list, mode='fd', return_format='dict') for key1, val1 in Jbase.items(): for key2, val2 in val1.items(): assert_rel_error(self, J[key1][key2], val2, .00001)
def test_simple(self): group = Group() group.add('x_param', IndepVarComp('x', 1.0), promotes=['*']) group.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y']) prob = Problem(impl=impl) prob.root = group prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_simple(self): group = Group() group.add('x_param', ParamComp('x', 1.0), promotes=['*']) group.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y']) prob = Problem(impl=impl) prob.root = group prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_simple_jac(self): group = Group() group.add('x_param', ParamComp('x', 1.0), promotes=['*']) group.add('mycomp', ExecComp(['y=2.0*x']), promotes=['x', 'y']) prob = Problem() prob.root = group prob.root.ln_solver = ExplicitSolver() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_simple_jac(self): group = Group() group.add('x_param', IndepVarComp('x', 1.0), promotes=['*']) group.add('mycomp', ExecComp(['y=2.0*x']), promotes=['x', 'y']) prob = Problem() prob.root = group prob.root.ln_solver = DirectSolver() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_fd_options_form(self): prob = Problem() prob.root = Group() comp = prob.root.add('comp', Paraboloid()) prob.root.add('p1', ParamComp('x', 15.0)) prob.root.add('p2', ParamComp('y', 15.0)) prob.root.connect('p1.x', 'comp.x') prob.root.connect('p2.y', 'comp.y') comp.fd_options['force_fd'] = True comp.fd_options['form'] = 'forward' param_list = ['p1.x'] unknowns_list = ['comp.f_xy'] prob.setup(check=False) prob.run() J = prob.calc_gradient(param_list, unknowns_list, return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-6) # Make sure it gives good result with small stepsize comp.fd_options['form'] = 'backward' J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-6) # Make sure it gives good result with small stepsize comp.fd_options['form'] = 'central' J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-6) # Now, Make sure we really are going foward and backward comp.fd_options['form'] = 'forward' comp.fd_options['step_size'] = 1e3 J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') self.assertGreater(J['comp.f_xy']['p1.x'][0][0], 0.0) comp.fd_options['form'] = 'backward' J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') self.assertLess(J['comp.f_xy']['p1.x'][0][0], 0.0) # Central should get pretty close even for the bad stepsize comp.fd_options['form'] = 'central' J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-1)
def test_simple_in_group_matvec(self): group = Group() sub = group.add('sub', Group(), promotes=['x', 'y']) group.add('x_param', ParamComp('x', 1.0), promotes=['*']) sub.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y']) prob = Problem() prob.root = group prob.root.ln_solver = ExplicitSolver() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_single_diamond(self): prob = Problem(impl=impl) prob.root = SingleDiamond() prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() indep_list = ['p.x'] unknown_list = ['comp4.y1', 'comp4.y2'] J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6) assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6) assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)
def test_fan_out(self): prob = Problem() prob.root = FanOut() prob.root.ln_solver = DirectSolver() prob.setup(check=False) prob.run() indep_list = ['p.x'] unknown_list = ['comp2.y', "comp3.y"] J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp2.y']['p.x'][0][0], -6.0, 1e-6) assert_rel_error(self, J['comp3.y']['p.x'][0][0], 15.0, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp2.y']['p.x'][0][0], -6.0, 1e-6) assert_rel_error(self, J['comp3.y']['p.x'][0][0], 15.0, 1e-6)
def test_fan_in_grouped(self): prob = Problem() prob.root = FanInGrouped() prob.root.ln_solver = ExplicitSolver() prob.setup(check=False) prob.run() param_list = ['p1.x1', 'p2.x2'] unknown_list = ['comp3.y'] J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6) assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6) assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)
def test_fan_in(self): prob = Problem(impl=impl) prob.root = FanIn() prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() indep_list = ['p1.x1', 'p2.x2'] unknown_list = ['comp3.y'] J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6) assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6) assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)
def test_no_derivatives(self): prob = Problem() prob.root = Group() comp = prob.root.add('comp', ExecComp('y=x*2.0')) prob.root.add('p1', ParamComp('x', 2.0)) prob.root.connect('p1.x', 'comp.x') comp.fd_options['force_fd'] = True prob.setup(check=False) prob.run() J = prob.calc_gradient(['p1.x'], ['comp.y'], mode='fwd', return_format='dict') assert_rel_error(self, J['comp.y']['p1.x'][0][0], 2.0, 1e-6) J = prob.calc_gradient(['p1.x'], ['comp.y'], mode='rev', return_format='dict') assert_rel_error(self, J['comp.y']['p1.x'][0][0], 2.0, 1e-6)
def test_fan_out_grouped(self): prob = Problem(impl=impl) prob.root = FanOutGrouped() prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() indep_list = ['p.x'] unknown_list = ['sub.comp2.y', "sub.comp3.y"] J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['sub.comp2.y']['p.x'][0][0], -6.0, 1e-6) assert_rel_error(self, J['sub.comp3.y']['p.x'][0][0], 15.0, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['sub.comp2.y']['p.x'][0][0], -6.0, 1e-6) assert_rel_error(self, J['sub.comp3.y']['p.x'][0][0], 15.0, 1e-6)
def test_array2D(self): group = Group() group.add('x_param', IndepVarComp('x', np.ones((2, 2))), promotes=['*']) group.add('mycomp', ArrayComp2D(), promotes=['x', 'y']) prob = Problem(impl=impl) prob.root = group prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') Jbase = prob.root.mycomp._jacobian_cache diff = np.linalg.norm(J['y']['x'] - Jbase['y', 'x']) assert_rel_error(self, diff, 0.0, 1e-8) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') diff = np.linalg.norm(J['y']['x'] - Jbase['y', 'x']) assert_rel_error(self, diff, 0.0, 1e-8)
def test_sellar_derivs_grouped(self): prob = Problem(impl=impl) prob.root = SellarDerivativesGrouped() prob.root.ln_solver = PetscKSP() prob.root.mda.nl_solver.options['atol'] = 1e-12 prob.setup(check=False) prob.run() # Just make sure we are at the right answer assert_rel_error(self, prob['y1'], 25.58830273, .00001) assert_rel_error(self, prob['y2'], 12.05848819, .00001) indep_list = ['x', 'z'] unknown_list = ['obj', 'con1', 'con2'] Jbase = {} Jbase['con1'] = {} Jbase['con1']['x'] = -0.98061433 Jbase['con1']['z'] = np.array([-9.61002285, -0.78449158]) Jbase['con2'] = {} Jbase['con2']['x'] = 0.09692762 Jbase['con2']['z'] = np.array([1.94989079, 1.0775421 ]) Jbase['obj'] = {} Jbase['obj']['x'] = 2.98061392 Jbase['obj']['z'] = np.array([9.61001155, 1.78448534]) J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') for key1, val1 in Jbase.items(): for key2, val2 in val1.items(): assert_rel_error(self, J[key1][key2], val2, .00001) J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') for key1, val1 in Jbase.items(): for key2, val2 in val1.items(): assert_rel_error(self, J[key1][key2], val2, .00001) prob.root.fd_options['form'] = 'central' J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict') for key1, val1 in Jbase.items(): for key2, val2 in val1.items(): assert_rel_error(self, J[key1][key2], val2, .00001)
def test_array2D(self): group = Group() group.add('x_param', ParamComp('x', np.ones((2, 2))), promotes=['*']) group.add('mycomp', ArrayComp2D(), promotes=['x', 'y']) prob = Problem() prob.root = group prob.root.ln_solver = ExplicitSolver() prob.setup(check=False) prob.run() J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict') Jbase = prob.root.mycomp._jacobian_cache diff = np.linalg.norm(J['y']['x'] - Jbase['y', 'x']) assert_rel_error(self, diff, 0.0, 1e-8) J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict') diff = np.linalg.norm(J['y']['x'] - Jbase['y', 'x']) assert_rel_error(self, diff, 0.0, 1e-8)
def test_overrides(self): class OverrideComp(Component): def __init__(self): super(OverrideComp, self).__init__() # Params self.add_param('x', 3.0) # Unknowns self.add_output('y', 5.5) def solve_nonlinear(self, params, unknowns, resids): """ Doesn't do much. """ unknowns['y'] = 7.0 * params['x'] def apply_linear(self, params, unknowns, dparams, dunknowns, dresids, mode): """Never Call.""" raise RuntimeError( "This should have been overridden by force_fd.") def jacobian(self, params, unknowns, resids): """Never Call.""" raise RuntimeError( "This should have been overridden by force_fd.") prob = Problem() prob.root = Group() comp = prob.root.add('comp', OverrideComp()) prob.root.add('p1', ParamComp('x', 2.0)) prob.root.connect('p1.x', 'comp.x') comp.fd_options['force_fd'] = True prob.setup(check=False) prob.run() J = prob.calc_gradient(['p1.x'], ['comp.y'], mode='fwd', return_format='dict') assert_rel_error(self, J['comp.y']['p1.x'][0][0], 7.0, 1e-6)
def test_simple_array_model2(self): prob = Problem() prob.root = Group() comp = prob.root.add('comp', ExecComp('y = mat.dot(x)', x=np.zeros((2,)), y=np.zeros((2,)), mat=np.array([[2.,7.],[5.,-3.]]))) p1 = prob.root.add('p1', ParamComp('x', np.ones([2]))) prob.root.connect('p1.x', 'comp.x') prob.setup(check=False) prob.run() data = prob.check_partial_derivatives(out_stream=None) assert_rel_error(self, data['comp'][('y','x')]['abs error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['abs error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['abs error'][2], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['rel error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['rel error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['rel error'][2], 0.0, 1e-5)
def test_overrides(self): class OverrideComp(Component): def __init__(self): super(OverrideComp, self).__init__() # Params self.add_param('x', 3.0) # Unknowns self.add_output('y', 5.5) def solve_nonlinear(self, params, unknowns, resids): """ Doesn't do much. """ unknowns['y'] = 7.0*params['x'] def apply_linear(self, params, unknowns, dparams, dunknowns, dresids, mode): """Never Call.""" raise RuntimeError("This should have been overridden by force_fd.") def jacobian(self, params, unknowns, resids): """Never Call.""" raise RuntimeError("This should have been overridden by force_fd.") prob = Problem() prob.root = Group() comp = prob.root.add('comp', OverrideComp()) prob.root.add('p1', ParamComp('x', 2.0)) prob.root.connect('p1.x', 'comp.x') comp.fd_options['force_fd'] = True prob.setup(check=False) prob.run() J = prob.calc_gradient(['p1.x'], ['comp.y'], mode='fwd', return_format='dict') assert_rel_error(self, J['comp.y']['p1.x'][0][0], 7.0, 1e-6)
def test_converge_diverge_groups(self): prob = Problem(impl=impl) prob.root = ConvergeDivergeGroups() prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() # Make sure value is fine. assert_rel_error(self, prob['comp7.y1'], -102.7, 1e-6) indep_list = ['p.x'] unknown_list = ['comp7.y1'] J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6) J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)
def test_simple_array_model(self): prob = Problem() prob.root = Group() prob.root.add('comp', ExecComp(['y[0]=2.0*x[0]+7.0*x[1]', 'y[1]=5.0*x[0]-3.0*x[1]'], x=np.zeros([2]), y=np.zeros([2]))) prob.root.add('p1', ParamComp('x', np.ones([2]))) prob.root.connect('p1.x', 'comp.x') prob.setup(check=False) prob.run() data = prob.check_partial_derivatives(out_stream=None) assert_rel_error(self, data['comp'][('y','x')]['abs error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['abs error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['abs error'][2], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['rel error'][0], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['rel error'][1], 0.0, 1e-5) assert_rel_error(self, data['comp'][('y','x')]['rel error'][2], 0.0, 1e-5)
def openmdao_sellar_converged(): from openmdao.core import Problem from openmdao.drivers import ScipyOptimizer top = Problem() top.root = SellarDerivatives() top.driver = ScipyOptimizer() top.driver.options['optimizer'] = 'SLSQP' top.driver.options['tol'] = 1.0e-8 top.driver.options['disp'] = False top.driver.add_desvar('z', low=np.array([-10.0, 0.0]), high=np.array([10.0, 10.0])) top.driver.add_desvar('x', low=0.0, high=10.0) top.driver.add_objective('obj') top.driver.add_constraint('con1', upper=0.0) top.driver.add_constraint('con2', upper=0.0) top.setup(check=False) top.run() return top
def test_fd_options_step_size(self): prob = Problem() prob.root = Group() comp = prob.root.add('comp', Paraboloid()) prob.root.add('p1', ParamComp([('x', 15.0), ('y', 15.0)])) prob.root.connect('p1.x', 'comp.x') prob.root.connect('p1.y', 'comp.y') comp.fd_options['force_fd'] = True prob.setup(check=False) prob.run() J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') assert_rel_error(self, J['comp.f_xy']['p1.x'][0][0], 39.0, 1e-6) # Make sure step_size is used # Derivative should be way high with this. comp.fd_options['step_size'] = 1e5 J = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') self.assertGreater(J['comp.f_xy']['p1.x'][0][0], 1000.0)
def test_fan_out(self): prob = Problem(impl=impl) prob.root = FanOut() prob.root.ln_solver = PetscKSP() prob.setup(check=False) prob.run() param_list = ['p.x'] unknown_list = ['comp2.y', "comp3.y"] J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp2.y']['p.x'][0][0], -6.0, 1e-6) assert_rel_error(self, J['comp3.y']['p.x'][0][0], 15.0, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp2.y']['p.x'][0][0], -6.0, 1e-6) assert_rel_error(self, J['comp3.y']['p.x'][0][0], 15.0, 1e-6)
def test_converge_diverge(self): prob = Problem() prob.root = ConvergeDiverge() prob.root.ln_solver = ExplicitSolver() prob.setup(check=False) prob.run() param_list = ['p.x'] unknown_list = ['comp7.y1'] prob.run() # Make sure value is fine. assert_rel_error(self, prob['comp7.y1'], -102.7, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='fd', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)
def test_converge_diverge_compfd(self): prob = Problem(impl=impl) prob.root = ConvergeDivergePar() prob.root.ln_solver = PetscKSP() # fd comp2 and comp5. each is under a par group prob.root.par1.comp2.fd_options['force_fd'] = True prob.root.par2.comp5.fd_options['force_fd'] = True prob.setup(check=False) prob.run() # Make sure value is fine. assert_rel_error(self, prob['comp7.y1'], -102.7, 1e-6) param_list = ['p.x'] unknown_list = ['comp7.y1'] J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='fd', return_format='dict') assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)
def test_fan_out_grouped(self): prob = Problem() prob.root = FanOutGrouped() prob.root.ln_solver = ExplicitSolver() prob.setup(check=False) prob.run() param_list = ['p.x'] unknown_list = ['sub.comp2.y', "sub.comp3.y"] J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='dict') assert_rel_error(self, J['sub.comp2.y']['p.x'][0][0], -6.0, 1e-6) assert_rel_error(self, J['sub.comp3.y']['p.x'][0][0], 15.0, 1e-6) J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='dict') assert_rel_error(self, J['sub.comp2.y']['p.x'][0][0], -6.0, 1e-6) assert_rel_error(self, J['sub.comp3.y']['p.x'][0][0], 15.0, 1e-6)
def test_fd_options_meta_form(self): class MetaParaboloid(Component): """ Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 """ def __init__(self): super(MetaParaboloid, self).__init__() # Params self.add_param('x1', 1.0, fd_form='forward') self.add_param('x2', 1.0, fd_form='backward') self.add_param('y', 1.0) # Unknowns self.add_output('f_xy', 0.0) def solve_nonlinear(self, params, unknowns, resids): """f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 Optimal solution (minimum): x = 6.6667; y = -7.3333 """ x1 = params['x1'] x2 = params['x2'] y = params['y'] f_xy = ((x1 - 3.0)**2 + (x2 - 3.0)**2 + (x2 + x2) * y + (y + 4.0)**2 - 3.0) unknowns['f_xy'] = f_xy def jacobian(self, params, unknowns, resids): """Analytical derivatives""" x1 = params['x1'] x2 = params['x2'] y = params['y'] J = {} J['f_xy', 'x1'] = (2.0 * x1 - 6.0 + x2 * y) J['f_xy', 'x2'] = (2.0 * x2 - 6.0 + x1 * y) J['f_xy', 'y'] = (2.0 * y + 8.0 + x1 + x2) return J prob = Problem() prob.root = Group() comp = prob.root.add('comp', MetaParaboloid()) prob.root.add('p11', ParamComp('x1', 15.0)) prob.root.add('p12', ParamComp('x2', 15.0)) prob.root.add('p2', ParamComp('y', 15.0)) prob.root.connect('p11.x1', 'comp.x1') prob.root.connect('p12.x2', 'comp.x2') prob.root.connect('p2.y', 'comp.y') comp.fd_options['force_fd'] = True comp.fd_options['step_size'] = 1e3 params_list = ['p11.x1'] unknowns_list = ['comp.f_xy'] prob.setup(check=False) prob.run() J = prob.calc_gradient(params_list, unknowns_list, return_format='dict') self.assertGreater(J['comp.f_xy']['p11.x1'][0][0], 0.0) J = prob.calc_gradient(['p12.x2'], unknowns_list, return_format='dict') self.assertLess(J['comp.f_xy']['p12.x2'][0][0], 0.0)
def test_fd_options_meta_form(self): class MetaParaboloid(Component): """ Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 """ def __init__(self): super(MetaParaboloid, self).__init__() # Params self.add_param('x1', 1.0, fd_form = 'forward') self.add_param('x2', 1.0, fd_form = 'backward') self.add_param('y', 1.0) # Unknowns self.add_output('f_xy', 0.0) def solve_nonlinear(self, params, unknowns, resids): """f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 Optimal solution (minimum): x = 6.6667; y = -7.3333 """ x1 = params['x1'] x2 = params['x2'] y = params['y'] f_xy = ((x1-3.0)**2 + (x2-3.0)**2 + (x2+x2)*y + (y+4.0)**2 - 3.0) unknowns['f_xy'] = f_xy def jacobian(self, params, unknowns, resids): """Analytical derivatives""" x1 = params['x1'] x2 = params['x2'] y = params['y'] J = {} J['f_xy', 'x1'] = (2.0*x1 - 6.0 + x2*y) J['f_xy', 'x2'] = (2.0*x2 - 6.0 + x1*y) J['f_xy', 'y'] = (2.0*y + 8.0 + x1 + x2) return J prob = Problem() prob.root = Group() comp = prob.root.add('comp', MetaParaboloid()) prob.root.add('p11', ParamComp('x1', 15.0)) prob.root.add('p12', ParamComp('x2', 15.0)) prob.root.add('p2', ParamComp('y', 15.0)) prob.root.connect('p11.x1', 'comp.x1') prob.root.connect('p12.x2', 'comp.x2') prob.root.connect('p2.y', 'comp.y') comp.fd_options['force_fd'] = True comp.fd_options['step_size'] = 1e3 params_list = ['p11.x1'] unknowns_list = ['comp.f_xy'] prob.setup(check=False) prob.run() J = prob.calc_gradient(params_list, unknowns_list, return_format='dict') self.assertGreater(J['comp.f_xy']['p11.x1'][0][0], 0.0) J = prob.calc_gradient(['p12.x2'], unknowns_list, return_format='dict') self.assertLess(J['comp.f_xy']['p12.x2'][0][0], 0.0)
def test_fd_options_step_type(self): class ScaledParaboloid(Component): """ Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 """ def __init__(self): super(ScaledParaboloid, self).__init__() # Params self.add_param('x', 1.0) self.add_param('y', 1.0) # Unknowns self.add_output('f_xy', 0.0) self.scale = 1.0e-6 def solve_nonlinear(self, params, unknowns, resids): """f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 Optimal solution (minimum): x = 6.6667; y = -7.3333 """ x = params['x'] y = params['y'] f_xy = ((x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0) unknowns['f_xy'] = self.scale * f_xy def jacobian(self, params, unknowns, resids): """Analytical derivatives""" x = params['x'] y = params['y'] J = {} J['f_xy', 'x'] = (2.0 * x - 6.0 + y) * self.scale J['f_xy', 'y'] = (2.0 * y + 8.0 + x) * self.scale return J prob = Problem() prob.root = Group() comp = prob.root.add('comp', ScaledParaboloid()) prob.root.add('p1', ParamComp('x', 8.0 * comp.scale)) prob.root.add('p2', ParamComp('y', 8.0 * comp.scale)) prob.root.connect('p1.x', 'comp.x') prob.root.connect('p2.y', 'comp.y') comp.fd_options['force_fd'] = True comp.fd_options['step_type'] = 'absolute' prob.setup(check=False) prob.run() J1 = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') comp.fd_options['step_type'] = 'relative' J2 = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') # Couldnt put together a case where one is much worse, so just make sure they # are not equal. self.assertNotEqual(self, J1['comp.f_xy']['p1.x'][0][0], J2['comp.f_xy']['p1.x'][0][0])
class TrigMM(Group): ''' FloatKriging gives responses as floats ''' def __init__(self): super(TrigMM, self).__init__() # Create meta_model for f_x as the response sin_mm = self.add("sin_mm", MetaModel()) sin_mm.add_param('x', val=0.) sin_mm.add_output('f_x:float', val=0., surrogate=FloatKrigingSurrogate()) sin_mm.add_output('f_x:norm_dist', val=(0.,0.), surrogate=KrigingSurrogate()) from openmdao.core import Problem prob = Problem() prob.root = TrigMM() prob.setup() #traning data is just set manually. No connected input needed, since # we're assuming the data is pre-existing prob['sin_mm.train:x'] = np.linspace(0,10,20) prob['sin_mm.train:f_x:float'] = np.sin(prob['sin_mm.train:x']) prob['sin_mm.train:f_x:norm_dist'] = np.cos(prob['sin_mm.train:x']) prob['sin_mm.x'] = 2.1 #prediction happens at this value prob.run() print('float predicted:', '%3.4f'%prob['sin_mm.f_x:float']) #predicted value print('float actual: ', '%3.4f'%np.sin(2.1)) print('norm_dist predicted:', '%3.4f,'%prob['sin_mm.f_x:norm_dist'][0], '%3.4e'%prob['sin_mm.f_x:norm_dist'][1]) #predicted value print('norm_dist actual: ', '%3.4f'%np.cos(2.1))
def test_fd_options_step_type(self): class ScaledParaboloid(Component): """ Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 """ def __init__(self): super(ScaledParaboloid, self).__init__() # Params self.add_param('x', 1.0) self.add_param('y', 1.0) # Unknowns self.add_output('f_xy', 0.0) self.scale = 1.0e-6 def solve_nonlinear(self, params, unknowns, resids): """f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3 Optimal solution (minimum): x = 6.6667; y = -7.3333 """ x = params['x'] y = params['y'] f_xy = ((x-3.0)**2 + x*y + (y+4.0)**2 - 3.0) unknowns['f_xy'] = self.scale*f_xy def jacobian(self, params, unknowns, resids): """Analytical derivatives""" x = params['x'] y = params['y'] J = {} J['f_xy', 'x'] = (2.0*x - 6.0 + y) * self.scale J['f_xy', 'y'] = (2.0*y + 8.0 + x) * self.scale return J prob = Problem() prob.root = Group() comp = prob.root.add('comp', ScaledParaboloid()) prob.root.add('p1', ParamComp('x', 8.0*comp.scale)) prob.root.add('p2', ParamComp('y', 8.0*comp.scale)) prob.root.connect('p1.x', 'comp.x') prob.root.connect('p2.y', 'comp.y') comp.fd_options['force_fd'] = True comp.fd_options['step_type'] = 'absolute' prob.setup(check=False) prob.run() J1 = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') comp.fd_options['step_type'] = 'relative' J2 = prob.calc_gradient(['p1.x'], ['comp.f_xy'], return_format='dict') # Couldnt put together a case where one is much worse, so just make sure they # are not equal. self.assertNotEqual(self, J1['comp.f_xy']['p1.x'][0][0], J2['comp.f_xy']['p1.x'][0][0])