def test_serial_in_parallel(self): prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 1.0)) parallel = model.add_subsystem('parallel', ParallelGroup()) parallel.add_subsystem('c1', ExecComp(['y=-2.0*x'])) parallel2 = model.add_subsystem('parallel_copy', ParallelGroup()) parallel2.add_subsystem('comp1', ExecComp(['y=-2.0*x'])) model.add_subsystem('con', ExecComp('y = 3.0*x')) model.connect("p1.x", "parallel.c1.x") model.connect('parallel.c1.y', 'parallel_copy.comp1.x') model.connect('parallel_copy.comp1.y', 'con.x') prob.setup(check=True) msg = ("The following systems are executed out-of-order:\n" " System 'parallel.c2' executes out-of-order with respect to its source systems ['parallel.c1']\n") with assert_no_warning(UserWarning, msg): prob.run_model()
def __init__(self,name,rhs,num_seg,seg_ncn=3,rel_lengths=1): super(CollocationPhase,self).__init__() self._eom_states = rhs.eom_states self.trajectory = None self.name = name self._segments = [] parallel_segment_group = ParallelGroup() for i in range(num_seg): seg_name = 's{0}'.format(i) seg = CollocationSegment(index=i, rhs=rhs, num_cardinal_nodes=2, rel_length=1) parallel_segment_group.add(name=seg_name, system=seg) self._segments.append(seg) self.add(name='segments',system=parallel_segment_group) # 3. Add the state and dynamic control param comps and muxing components eom_state_names = ['X_c:{0}'.format(state['name']) for state in rhs.eom_states] for i,state in enumerate(self._eom_states): self.add( name='eom_state_ivar_comp_{0}'.format(state['name']), system=IndepVarComp(name=eom_state_names[i], val=np.zeros((3))), promotes=[eom_state_names[i]]) for i, seg in enumerate(self._segments): idxs_states = range(0, 2) for state in self._eom_states: state_name = state['name'] self.connect( 'X_c:{0}'.format(state_name), 'segments.s{0:d}.X_c:{1}'.format(i, state_name), src_indices=idxs_states)
def __init__(self,name,rhs,num_seg,seg_ncn=3,rel_lengths=1): super(CollocationPhase,self).__init__() self._eom_states = rhs.eom_states self.trajectory = None self.name = name self._segments = [] parallel_segment_group = ParallelGroup() for i in range(num_seg): seg_name = '{0}'.format(i) seg = CollocationSegment(index=i, rhs=rhs, num_cardinal_nodes=2, rel_length=1) parallel_segment_group.add(name=seg_name, system=seg) self._segments.append(seg) self.add(name='segments',system=parallel_segment_group) # 3. Add the state and dynamic control param comps and muxing components eom_state_names = ['X_c:{0}'.format(state['name']) for state in rhs.eom_states] for i,state in enumerate(self._eom_states): self.add( name='eom_state_ivar_comp:{0}'.format(state['name']), system=IndepVarComp(name=eom_state_names[i], val=np.zeros((3))), promotes=[eom_state_names[i]]) for i, seg in enumerate(self._segments): idxs_states = range(0, 2) for state in self._eom_states: state_name = state['name'] self.connect( 'X_c:{0}'.format(state_name), 'segments.{0:d}.X_c:{1}'.format(i, state_name), src_indices=idxs_states)
def __init__(self, comps=[], output_key=None, output_unit=''): super().__init__(comps, output_key, output_unit) parallel = ParallelGroup() for i, comp in enumerate(self.comps): parallel.add_subsystem('comp_{}'.format(i), comp, promotes=['*']) self.add_subsystem('parallel', parallel, promotes=['*']) self.add_subsystem('objective', self.obj_comp, promotes=['*'])
def test_multipoint_with_coloring(self): size = 10 num_pts = self.N_PROCS np.random.seed(11) p = Problem() p.driver = pyOptSparseDriver() p.driver.options['optimizer'] = OPTIMIZER p.driver.options['dynamic_simul_derivs'] = True if OPTIMIZER == 'SNOPT': p.driver.opt_settings['Major iterations limit'] = 100 p.driver.opt_settings['Major feasibility tolerance'] = 1.0E-6 p.driver.opt_settings['Major optimality tolerance'] = 1.0E-6 p.driver.opt_settings['iSumm'] = 6 model = p.model for i in range(num_pts): model.add_subsystem('indep%d' % i, IndepVarComp('x', val=np.ones(size))) model.add_design_var('indep%d.x' % i) par1 = model.add_subsystem('par1', ParallelGroup()) for i in range(num_pts): mat = _get_mat(5, size) par1.add_subsystem('comp%d' % i, ExecComp('y=A.dot(x)', A=mat, x=np.ones(size), y=np.ones(5))) model.connect('indep%d.x' % i, 'par1.comp%d.x' % i) par2 = model.add_subsystem('par2', ParallelGroup()) for i in range(num_pts): mat = _get_mat(size, 5) par2.add_subsystem('comp%d' % i, ExecComp('y=A.dot(x)', A=mat, x=np.ones(5), y=np.ones(size))) model.connect('par1.comp%d.y' % i, 'par2.comp%d.x' % i) par2.add_constraint('comp%d.y' % i, lower=-1.) model.add_subsystem('normcomp%d' % i, ExecComp("y=sum(x*x)", x=np.ones(size))) model.connect('par2.comp%d.y' % i, 'normcomp%d.x' % i) model.add_subsystem('obj', ExecComp("y=" + '+'.join(['x%d' % i for i in range(num_pts)]))) for i in range(num_pts): model.connect('normcomp%d.y' % i, 'obj.x%d' % i) model.add_objective('obj.y') p.setup() p.run_driver() J = p.compute_totals() for i in range(num_pts): vname = 'par2.comp%d.A' % i if vname in model._var_abs_names['input']: norm = np.linalg.norm(J['par2.comp%d.y'%i,'indep%d.x'%i] - getattr(par2, 'comp%d'%i)._inputs['A'].dot(getattr(par1, 'comp%d'%i)._inputs['A'])) self.assertLess(norm, 1.e-7) elif vname not in model._var_allprocs_abs_names['input']: self.fail("Can't find variable par2.comp%d.A" % i)
def test_file_diamond(self): # connect a source FileRef to two target FileRefs on # components running in parallel, and connect the outputs # of those components to a common sink component. All filenames # are different, so files will actually be copied for each connection. if MPI: num = self.N_PROCS else: num = 1 prob = Problem(Group(), impl=impl) src = prob.root.add("src", FileSrc('src')) par = prob.root.add('par', ParallelGroup()) sink = prob.root.add("sink", FileSink('sink', num)) for i in range(num): par.add("mid%d" % i, FileMid('mid%d' % i, 'mid%d' % i)) prob.root.connect('src.fout', 'par.mid%d.fin' % i) prob.root.connect('par.mid%d.fout' % i, 'sink.fin%d' % i) prob.setup(check=False) prob.run() for i in range(num): with sink.params['fin%d' % i].open('r') as f: self.assertEqual(f.read(), "src\npar.mid%d\n" % i)
def test_parab_FD(self): model = Problem(impl=impl) root = model.root = Group() par = root.add('par', ParallelGroup()) par.add('c1', Parab1D(root=2.0)) par.add('c2', Parab1D(root=3.0)) root.add('p1', IndepVarComp('x', val=0.0)) root.add('p2', IndepVarComp('x', val=0.0)) root.connect('p1.x', 'par.c1.x') root.connect('p2.x', 'par.c2.x') root.add('sumcomp', ExecComp('sum = x1+x2')) root.connect('par.c1.y', 'sumcomp.x1') root.connect('par.c2.y', 'sumcomp.x2') driver = model.driver = pyOptSparseDriver() driver.options['optimizer'] = OPTIMIZER driver.options['print_results'] = False driver.add_desvar('p1.x', lower=-100, upper=100) driver.add_desvar('p2.x', lower=-100, upper=100) driver.add_objective('sumcomp.sum') root.deriv_options['type'] = 'fd' model.setup(check=False) model.run() if not MPI or self.comm.rank == 0: assert_rel_error(self, model['p1.x'], 2.0, 1.e-6) assert_rel_error(self, model['p2.x'], 3.0, 1.e-6)
def test_parab_subbed_Pcomps(self): model = Problem(impl=impl) root = model.root = Group() root.ln_solver = lin_solver() par = root.add('par', ParallelGroup()) par.add('s1', MP_Point(root=2.0)) par.add('s2', MP_Point(root=3.0)) root.add('sumcomp', ExecComp('sum = x1+x2')) root.connect('par.s1.c.y', 'sumcomp.x1') root.connect('par.s2.c.y', 'sumcomp.x2') driver = model.driver = pyOptSparseDriver() driver.options['optimizer'] = OPTIMIZER driver.options['print_results'] = False driver.add_desvar('par.s1.p.x', lower=-100, upper=100) driver.add_desvar('par.s2.p.x', lower=-100, upper=100) driver.add_objective('sumcomp.sum') model.setup(check=False) model.run() if not MPI or self.comm.rank == 0: assert_rel_error(self, model['par.s1.p.x'], 2.0, 1.e-6) if not MPI or self.comm.rank == 1: assert_rel_error(self, model['par.s2.p.x'], 3.0, 1.e-6)
def setup(self): """ Setup the Trajectory Group. """ super(Trajectory, self).setup() if self.design_parameter_options: self._setup_design_parameters() if self.input_parameter_options: self._setup_input_parameters() phases_group = self.add_subsystem('phases', subsys=ParallelGroup(), promotes_inputs=['*'], promotes_outputs=['*']) for name, phs in iteritems(self._phases): g = phases_group.add_subsystem(name, phs, **self._phase_add_kwargs[name]) # DirectSolvers were moved down into the phases for use with MPI g.linear_solver = DirectSolver() phs.finalize_variables() if self._linkages: self._setup_linkages()
def test_reraise_analylsis_error(self): prob = Problem() prob.model = model = Group() model.add_subsystem('p1', IndepVarComp('x', 0.5)) model.add_subsystem('p2', IndepVarComp('x', 3.0)) sub = model.add_subsystem('sub', ParallelGroup()) sub.add_subsystem('c1', AEComp()) sub.add_subsystem('c2', AEComp()) sub.nonlinear_solver = NonlinearBlockJac() model.add_subsystem('obj', ExecComp(['val = x1 + x2'])) model.connect('p1.x', 'sub.c1.x') model.connect('p2.x', 'sub.c2.x') model.connect('sub.c1.y', 'obj.x1') model.connect('sub.c2.y', 'obj.x2') prob.driver = AEDriver() prob.setup(vector_class=PETScVector, check=False) handled = prob.run_driver() self.assertTrue(handled)
def setup_model(self): asize = self.asize prob = Problem() root = prob.model root.linear_solver = LinearBlockGS() p1 = root.add_subsystem('p1', IndepVarComp('x', np.arange(asize, dtype=float)+1.0)) p2 = root.add_subsystem('p2', IndepVarComp('x', np.arange(asize, dtype=float)+1.0)) G1 = root.add_subsystem('G1', ParallelGroup()) G1.linear_solver = LinearBlockGS() c1 = G1.add_subsystem('c1', ExecComp('y = ones(3).T*x.dot(arange(3.,6.))', x=np.zeros(asize), y=np.zeros(asize))) c2 = G1.add_subsystem('c2', ExecComp('y = x * 2.0', x=np.zeros(asize), y=np.zeros(asize))) c3 = root.add_subsystem('c3', ExecComp('y = x * 5.0', x=np.zeros(asize), y=np.zeros(asize))) c4 = root.add_subsystem('c4', ExecComp('y = x * 4.0', x=np.zeros(asize), y=np.zeros(asize))) root.connect('p1.x', 'G1.c1.x') root.connect('p2.x', 'G1.c2.x') root.connect('G1.c1.y', 'c3.x') root.connect('G1.c2.y', 'c4.x') return prob
def test_remote_voi(self): prob = Problem() prob.model.add_subsystem('par', ParallelGroup()) prob.model.par.add_subsystem('G1', Mygroup()) prob.model.par.add_subsystem('G2', Mygroup()) prob.model.add_subsystem('Obj', ExecComp('obj=y1+y2')) prob.model.connect('par.G1.y', 'Obj.y1') prob.model.connect('par.G2.y', 'Obj.y2') prob.model.add_objective('Obj.obj') prob.driver = pyOptSparseDriver() prob.driver.options['optimizer'] = 'SLSQP' prob.setup(vector_class=PETScVector) prob.run_driver() J = prob.compute_totals(of=['Obj.obj', 'par.G1.c', 'par.G2.c'], wrt=['par.G1.x', 'par.G2.x']) assert_rel_error(self, J['Obj.obj', 'par.G1.x'], np.array([[2.0]]), 1e-6) assert_rel_error(self, J['Obj.obj', 'par.G2.x'], np.array([[2.0]]), 1e-6) assert_rel_error(self, J['par.G1.c', 'par.G1.x'], np.array([[1.0]]), 1e-6) assert_rel_error(self, J['par.G1.c', 'par.G2.x'], np.array([[0.0]]), 1e-6) assert_rel_error(self, J['par.G2.c', 'par.G1.x'], np.array([[0.0]]), 1e-6) assert_rel_error(self, J['par.G2.c', 'par.G2.x'], np.array([[1.0]]), 1e-6)
def setup_model(self, mode): asize = 3 prob = Problem() root = prob.model root.linear_solver = LinearBlockGS() p = root.add_subsystem('p', IndepVarComp('x', np.arange(asize, dtype=float)+1.0)) G1 = root.add_subsystem('G1', ParallelGroup()) G1.linear_solver = LinearBlockGS() c2 = G1.add_subsystem('c2', ExecComp('y = x * 2.0', x=np.zeros(asize), y=np.zeros(asize))) c3 = G1.add_subsystem('c3', ExecComp('y = ones(3).T*x.dot(arange(3.,6.))', x=np.zeros(asize), y=np.zeros(asize))) c4 = root.add_subsystem('c4', ExecComp('y = x * 4.0', x=np.zeros(asize), y=np.zeros(asize))) c5 = root.add_subsystem('c5', ExecComp('y = x * 5.0', x=np.zeros(asize), y=np.zeros(asize))) prob.model.add_design_var('p.x', indices=[1, 2]) prob.model.add_constraint('c4.y', upper=0.0, indices=[1], parallel_deriv_color='par_resp') prob.model.add_constraint('c5.y', upper=0.0, indices=[2], parallel_deriv_color='par_resp') root.connect('p.x', 'G1.c2.x') root.connect('p.x', 'G1.c3.x') root.connect('G1.c2.y', 'c4.x') root.connect('G1.c3.y', 'c5.x') prob.setup(vector_class=vector_class, check=False, mode=mode) prob.run_driver() return prob
def test_fan_in(self): prob = Problem(Group(), impl=impl) par = prob.root.add('par', ParallelGroup()) G1 = par.add('G1', Group()) A1 = G1.add('A1', IndepVarComp('a', [1., 1., 1., 1., 1.])) C1 = G1.add('C1', PBOComp()) G2 = par.add('G2', Group()) B1 = G2.add('B1', IndepVarComp('b', [3., 3., 3., 3., 3.])) C2 = G2.add('C2', PBOComp()) C3 = prob.root.add('C3', PBOComp()) par.connect('G1.A1.a', 'G1.C1.a') par.connect('G2.B1.b', 'G2.C2.a') prob.root.connect('par.G1.C1.c', 'C3.a') prob.root.connect('par.G2.C2.c', 'C3.b') prob.setup(check=False) prob.run() self.assertEqual(prob['C3.a'], [2., 3., 4., 5., 6.]) self.assertEqual(prob['C3.b'], [4., 5., 6., 7., 8.]) self.assertEqual(prob['C3.c'], [6., 8., 10., 12., 14.]) self.assertEqual(prob['C3.d'], [-2., -2., -2., -2., -2.]) self.assertEqual(prob.root.unknowns.vec.size, 0)
def setup_model(self): asize = self.asize prob = Problem() #import wingdbstub root = prob.model root.linear_solver = LinearBlockGS() Indep1 = root.add_subsystem('Indep1', IndepVarComp('x', np.arange(asize, dtype=float)+1.0)) Indep2 = root.add_subsystem('Indep2', IndepVarComp('x', np.arange(asize+2, dtype=float)+1.0)) G1 = root.add_subsystem('G1', ParallelGroup()) G1.linear_solver = LinearBlockGS() c1 = G1.add_subsystem('c1', ExecComp('y = ones(3).T*x.dot(arange(3.,6.))', x=np.zeros(asize), y=np.zeros(asize))) c2 = G1.add_subsystem('c2', ExecComp('y = x[:%d] * 2.0' % asize, x=np.zeros(asize+2), y=np.zeros(asize))) Con1 = root.add_subsystem('Con1', ExecComp('y = x * 5.0', x=np.zeros(asize), y=np.zeros(asize))) Con2 = root.add_subsystem('Con2', ExecComp('y = x * 4.0', x=np.zeros(asize), y=np.zeros(asize))) root.connect('Indep1.x', 'G1.c1.x') root.connect('Indep2.x', 'G1.c2.x') root.connect('G1.c1.y', 'Con1.x') root.connect('G1.c2.y', 'Con2.x') return prob
def test_list_states_allprocs(self): class StateComp(ImplicitComponent): def initialize(self): self.mtx = np.array([ [0.99, 0.01], [0.01, 0.99], ]) def setup(self): self.add_input('rhs', val=np.ones(2)) self.add_output('x', val=np.zeros(2)) self.declare_partials(of='*', wrt='*') def apply_nonlinear(self, inputs, outputs, residuals): residuals['x'] = self.mtx.dot(outputs['x']) - inputs['rhs'] def solve_nonlinear(self, inputs, outputs): outputs['x'] = np.linalg.solve(self.mtx, inputs['rhs']) p = Problem(model=ParallelGroup()) p.model.add_subsystem('C1', StateComp()) p.model.add_subsystem('C2', StateComp()) p.model.add_subsystem('C3', ExecComp('y=2.0*x')) p.model.add_subsystem('C4', StateComp()) p.setup() p.final_setup() self.assertEqual(p.model._list_states_allprocs(), ['C1.x', 'C2.x', 'C4.x'])
def test_parallel_diamond(self): size = 3 prob = Problem(Group(), impl=impl) root = prob.root root.add('P1', IndepVarComp('x', np.ones(size, float) * 1.1)) G1 = root.add('G1', ParallelGroup()) G1.add('C1', ABCDArrayComp(size)) G1.add('C2', ABCDArrayComp(size)) root.add('C3', ABCDArrayComp(size)) root.connect('P1.x', 'G1.C1.a') root.connect('P1.x', 'G1.C2.b') root.connect('G1.C1.c', 'C3.a') root.connect('G1.C2.d', 'C3.b') prob.setup(check=False) prob.run() if not MPI or self.comm.rank == 0: assert_rel_error(self, prob.root.G1.C1.unknowns['c'], np.ones(size) * 2.1, 1.e-10) assert_rel_error(self, prob.root.G1.C1.unknowns['d'], np.ones(size) * .1, 1.e-10) assert_rel_error(self, prob.root.C3.params['a'], np.ones(size) * 2.1, 1.e-10) assert_rel_error(self, prob.root.C3.params['b'], np.ones(size) * -.1, 1.e-10) if not MPI or self.comm.rank == 1: assert_rel_error(self, prob.root.G1.C2.unknowns['c'], np.ones(size) * 2.1, 1.e-10) assert_rel_error(self, prob.root.G1.C2.unknowns['d'], np.ones(size) * -.1, 1.e-10)
def setUp(self): p = Problem() p.model = Group() p.model.cite = "foobar model" p.model.nonlinear_solver.cite = "foobar nonlinear_solver" p.model.linear_solver.cite = "foobar linear_solver" indeps = p.model.add_subsystem('indeps', IndepVarComp('x', 10), promotes=['*']) indeps.linear_solver = LinearRunOnce() par = p.model.add_subsystem('par', ParallelGroup(), promotes=['*']) ec = par.add_subsystem('ec', ExecComp('y = 2+3*x'), promotes=['*']) # note using newton here makes no sense in reality, but its fine for this test since we never run the model ec.nonlinear_solver = NewtonSolver() ec.cite = "foobar exec comp" c2 = par.add_subsystem('c2', ExecComp('y2=x'), promotes=['*']) c2.cite = 'foobar exec comp' self.prob = p
def setup(self): size = 4 Indep1 = self.add_subsystem( 'Indep1', IndepVarComp('x', np.arange(size, dtype=float) + 1.0)) Comp1 = self.add_subsystem('Comp1', SumComp(size)) pargroup = self.add_subsystem('ParallelGroup1', ParallelGroup()) self.linear_solver = LinearBlockGS() self.linear_solver.options['iprint'] = -1 pargroup.linear_solver = LinearBlockGS() pargroup.linear_solver.options['iprint'] = -1 delay = .1 Con1 = pargroup.add_subsystem('Con1', SlowComp(delay=delay, size=2, mult=2.0)) Con2 = pargroup.add_subsystem('Con2', SlowComp(delay=delay, size=2, mult=-3.0)) self.connect('Indep1.x', 'Comp1.x') self.connect('Comp1.y', 'ParallelGroup1.Con1.x') self.connect('Comp1.y', 'ParallelGroup1.Con2.x') color = 'parcon' self.add_design_var('Indep1.x') self.add_constraint('ParallelGroup1.Con1.y', lower=0.0, parallel_deriv_color=color) self.add_constraint('ParallelGroup1.Con2.y', upper=0.0, parallel_deriv_color=color)
def __init__(self, nProblems=0): super(SellarDerivativesSuperGroup, self).__init__() self.add('px', IndepVarComp('x', 1.0), promotes=['*']) self.add('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['*']) pg = self.add('manySellars', ParallelGroup(), promotes=['*']) print(nProblems) for problem_id in np.arange(0, nProblems): pg.add('Sellar%i' % problem_id, SellarDerivativesSubGroup(problem_id=problem_id), promotes=['*']) self.add( 'obj_cmp', ExecComp( 'obj = (x**2 + z[1] + y1_0 + exp(-y2_0)) + (x**2 + z[1] + y1_1 + exp(-y2_1)) + ' '(x**2 + z[1] + y1_2 + exp(-y2_2)) + (x**2 + z[1] + y1_3 + exp(-y2_3))', z=np.array([0.0, 0.0]), x=0.0, y1_0=0.0, y2_0=0.0, y1_1=0.0, y2_1=0.0, y1_2=0.0, y2_2=0.0, y1_3=0.0, y2_3=0.0), promotes=['*']) self.add('con_cmp1', ExecComp('con1 = 3.16 - y1_0'), promotes=['*']) self.add('con_cmp2', ExecComp('con2 = y2_0 - 24.0'), promotes=['*'])
def test_remote_var_access_prom(self): prob = Problem() group = prob.model.add_subsystem('group', ParallelGroup(), promotes=['f', 'g']) group.add_subsystem('indep1', IndepVarComp('f'), promotes=['*']) group.add_subsystem('indep2', IndepVarComp('g'), promotes=['*']) prob.model.add_subsystem('summ', ExecComp('z = f + g'), promotes=['f', 'g']) prob.model.add_subsystem('prod', ExecComp('z = f * g'), promotes=['f', 'g']) prob.setup() prob['f'] = 4. prob['g'] = 5. prob.run_model() np.testing.assert_almost_equal(prob['summ.z'], 9., decimal=5) np.testing.assert_almost_equal(prob['prod.z'], 20., decimal=5)
def test_remote_var_access(self): # build the model prob = Problem() group = prob.model.add_subsystem('group', ParallelGroup()) comp = ExecComp('f = (x-3)**2 + x*y + (y+4)**2 - 3', y=2.0) group.add_subsystem('comp1', comp) comp = ExecComp('g = x*y', y=2.0) group.add_subsystem('comp2', comp) prob.setup() prob['group.comp1.x'] = 4. prob['group.comp2.x'] = 5. prob.run_model() np.testing.assert_almost_equal(prob.get_val('group.comp1.f', get_remote=True), 42., decimal=5) np.testing.assert_almost_equal(prob.get_val('group.comp2.g', get_remote=True), 10., decimal=5)
def setup(self): indeps = self.add_subsystem('indeps', IndepVarComp(), promotes=['*']) indeps.add_output('x', 1.0) indeps.add_output('z', np.array([5.0, 2.0])) cycle = self.add_subsystem('cycle', ParallelGroup(), promotes=['*']) cycle.add_subsystem('d1', SellarDis1(), promotes_inputs=['x', 'z', 'y2'], promotes_outputs=['y1']) cycle.add_subsystem('d2', SellarDis2(), promotes_inputs=['z', 'y1'], promotes_outputs=['y2']) # Nonlinear Block Gauss Seidel is a gradient free solver cycle.nonlinear_solver = NonlinearBlockGS() self.add_subsystem('obj_cmp', ExecComp( 'obj = x**2 + z[1] + y1 + exp(-y2)', z=np.array([0.0, 0.0]), x=0.0), promotes=['x', 'z', 'y1', 'y2', 'obj']) self.add_subsystem('con_cmp1', ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1']) self.add_subsystem('con_cmp2', ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
def test_size_1_matmat(self): p = Problem() indeps = p.model.add_subsystem('indeps', IndepVarComp('x', np.ones(2))) indeps.add_output('y', 1.0) par = p.model.add_subsystem('par', ParallelGroup()) par.add_subsystem('C1', ExecComp('y=2*x', x=np.zeros(2), y=np.zeros(2))) par.add_subsystem('C2', ExecComp('y=3*x')) p.model.connect("indeps.x", "par.C1.x") p.model.connect("indeps.y", "par.C2.x") p.model.add_design_var('indeps.x', vectorize_derivs=True, parallel_deriv_color='foo') p.model.add_design_var('indeps.y', vectorize_derivs=True, parallel_deriv_color='foo') par.add_objective('C2.y') par.add_constraint('C1.y', lower=0.0) p.setup(mode='fwd') p.run_model() # prior to bug fix, this would raise an exception J = p.compute_totals() np.testing.assert_array_equal(J['par.C1.y', 'indeps.x'], np.eye(2) * 2.) np.testing.assert_array_equal(J['par.C2.y', 'indeps.x'], np.zeros((1, 2))) np.testing.assert_array_equal(J['par.C1.y', 'indeps.y'], np.zeros((2, 1))) np.testing.assert_array_equal(J['par.C2.y', 'indeps.y'], np.array([[3.]]))
def test_fan_in_grouped_feature(self): from openmdao.api import Problem, IndepVarComp, ParallelGroup, ExecComp, PETScVector prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 1.0)) model.add_subsystem('p2', IndepVarComp('x', 1.0)) parallel = model.add_subsystem('parallel', ParallelGroup()) parallel.add_subsystem('c1', ExecComp(['y=-2.0*x'])) parallel.add_subsystem('c2', ExecComp(['y=5.0*x'])) model.add_subsystem('c3', ExecComp(['y=3.0*x1+7.0*x2'])) model.connect("parallel.c1.y", "c3.x1") model.connect("parallel.c2.y", "c3.x2") model.connect("p1.x", "parallel.c1.x") model.connect("p2.x", "parallel.c2.x") prob.setup(check=False, mode='fwd') prob.set_solver_print(level=0) prob.run_model() assert_rel_error(self, prob['c3.y'], 29.0, 1e-6)
def _build_model(nsubs, min_procs=None, max_procs=None, weights=None, top=None, mode='fwd'): p = Problem() if min_procs is None: min_procs = [1]*nsubs if max_procs is None: max_procs = [None]*nsubs if weights is None: weights = [1.0]*nsubs model = p.model model.add_subsystem('indep', IndepVarComp('x', 1.0)) par = model.add_subsystem('par', ParallelGroup()) for i in range(nsubs): par.add_subsystem("C%d" % i, ExecComp("y=2.0*x"), min_procs=min_procs[i], max_procs=max_procs[i], proc_weight=weights[i]) model.connect('indep.x', 'par.C%d.x' % i) s_sum = '+'.join(['x%d' % i for i in range(nsubs)]) model.add_subsystem('objective', ExecComp("y=%s" % s_sum)) for i in range(nsubs): model.connect('par.C%d.y' % i, 'objective.x%d' % i) model.add_design_var('indep.x') model.add_objective('objective.y') p.setup(mode=mode, check=False) p.final_setup() return p
def test_par_multi_src_inds_fail(self): p = Problem() p.model.add_subsystem('indep', IndepVarComp('x', val=np.ones(10))) par = p.model.add_subsystem('par', ParallelGroup()) par.add_subsystem('C1', ExecComp('y=x*2.', x=np.zeros(7), y=np.zeros(7))) par.add_subsystem('C2', ExecComp('y=x*3.', x=np.zeros(3), y=np.zeros(3))) p.model.connect('indep.x', 'par.C1.x', src_indices=list(range(7))) p.model.connect('indep.x', 'par.C2.x', src_indices=list(range(7, 10))) p.setup() p['par.C1.x'] = (np.arange(7) + 1.) * 2. p['par.C2.x'] = (np.arange(7, 10) + 1.) * 3. p.run_model() np.testing.assert_allclose(p['indep.x'][:7], (np.arange(7) + 1.) * 2.) np.testing.assert_allclose(p['indep.x'][7:10], (np.arange(7, 10) + 1.) * 3.) np.testing.assert_allclose(p['par.C1.x'], (np.arange(7) + 1.) * 2.) np.testing.assert_allclose(p['par.C2.x'], (np.arange(7, 10) + 1.) * 3.) np.testing.assert_allclose(p['par.C1.y'], (np.arange(7) + 1.) * 4.) np.testing.assert_allclose(p['par.C2.y'], (np.arange(7, 10) + 1.) * 9.)
def test_single_parallel_group_order(self): prob = Problem() model = prob.model model.add_subsystem('p1', IndepVarComp('x', 1.0)) model.add_subsystem('p2', IndepVarComp('x', 1.0)) parallel = model.add_subsystem('parallel', ParallelGroup()) parallel.add_subsystem('c1', ExecComp(['y=-2.0*x'])) parallel.add_subsystem('c2', ExecComp(['y=5.0*x'])) parallel.connect('c1.y', 'c2.x') model.add_subsystem('c3', ExecComp(['y=3.0*x1+7.0*x2'])) model.connect("parallel.c1.y", "c3.x1") model.connect("parallel.c2.y", "c3.x2") model.connect("p1.x", "parallel.c1.x") testlogger = TestLogger() prob.setup(check=True, mode='fwd', logger=testlogger) msg = "Need to attach NonlinearBlockJac, NewtonSolver, or BroydenSolver to 'parallel' when " \ "connecting components inside parallel groups" with assert_warning(UserWarning, msg): prob.run_model() expected_warning = ("The following systems are executed out-of-order:\n" " System 'parallel.c2' executes out-of-order with respect to its source systems ['parallel.c1']\n") testlogger.find_in('warning', expected_warning)
def setUp(self): if OPT is None: raise unittest.SkipTest("pyoptsparse is not installed") if OPTIMIZER is None: raise unittest.SkipTest( "pyoptsparse is not providing SNOPT or SLSQP") prob = Problem(impl=impl) root = prob.root = Group() #root.ln_solver = lin_solver() root.ln_solver = LinearGaussSeidel() par = root.add('par', ParallelGroup()) par.ln_solver = LinearGaussSeidel() ser1 = par.add('ser1', Group()) ser1.ln_solver = LinearGaussSeidel() ser1.add('p1', IndepVarComp('x', np.zeros([2])), promotes=['x']) ser1.add('comp', SimpleArrayComp(), promotes=['x', 'y']) ser1.add('con', ExecComp('c = y - 20.0', c=np.array([0.0, 0.0]), y=np.array([0.0, 0.0])), promotes=['c', 'y']) ser1.add('obj', ExecComp('o = y[0]', y=np.array([0.0, 0.0])), promotes=['y', 'o']) ser2 = par.add('ser2', Group()) ser2.ln_solver = LinearGaussSeidel() ser2.add('p1', IndepVarComp('x', np.zeros([2])), promotes=['x']) ser2.add('comp', SimpleArrayComp(), promotes=['x', 'y']) ser2.add('con', ExecComp('c = y - 30.0', c=np.array([0.0, 0.0]), y=np.array([0.0, 0.0])), promotes=['c', 'y']) ser2.add('obj', ExecComp('o = y[0]', y=np.array([0.0, 0.0])), promotes=['o', 'y']) root.add('total', ExecComp('obj = x1 + x2')) root.connect('par.ser1.o', 'total.x1') root.connect('par.ser2.o', 'total.x2') prob.driver = pyOptSparseDriver() prob.driver.options['optimizer'] = OPTIMIZER prob.driver.options['print_results'] = False prob.driver.add_desvar('par.ser1.x', lower=-50.0, upper=50.0) prob.driver.add_desvar('par.ser2.x', lower=-50.0, upper=50.0) prob.driver.add_objective('total.obj') prob.driver.add_constraint('par.ser1.c', equals=0.0) prob.driver.add_constraint('par.ser2.c', equals=0.0) self.prob = prob
def test_is_local(self): p = Problem() p.model.add_subsystem('indep', IndepVarComp('x', 1.0)) par = p.model.add_subsystem('par', ParallelGroup()) par.add_subsystem('C1', ExecComp('y=2*x')) par.add_subsystem('C2', ExecComp('y=3*x')) p.model.connect('indep.x', ['par.C1.x', 'par.C2.x']) with self.assertRaises(RuntimeError) as cm: loc = p.is_local('indep.x') self.assertEqual( str(cm.exception), "is_local('indep.x') was called before setup() completed.") with self.assertRaises(RuntimeError) as cm: loc = p.is_local('par.C1') self.assertEqual( str(cm.exception), "is_local('par.C1') was called before setup() completed.") with self.assertRaises(RuntimeError) as cm: loc = p.is_local('par.C1.y') self.assertEqual( str(cm.exception), "is_local('par.C1.y') was called before setup() completed.") with self.assertRaises(RuntimeError) as cm: loc = p.is_local('par.C1.x') self.assertEqual( str(cm.exception), "is_local('par.C1.x') was called before setup() completed.") p.setup() p.final_setup() self.assertTrue(p.is_local('indep'), 'indep should be local') self.assertTrue(p.is_local('indep.x'), 'indep.x should be local') if p.comm.rank == 0: self.assertTrue(p.is_local('par.C1'), 'par.C1 should be local') self.assertTrue(p.is_local('par.C1.x'), 'par.C1.x should be local') self.assertTrue(p.is_local('par.C1.y'), 'par.C1.y should be local') self.assertFalse(p.is_local('par.C2'), 'par.C1 should be remote') self.assertFalse(p.is_local('par.C2.x'), 'par.C1.x should be remote') self.assertFalse(p.is_local('par.C2.y'), 'par.C1.y should be remote') else: self.assertFalse(p.is_local('par.C1'), 'par.C1 should be remote') self.assertFalse(p.is_local('par.C1.x'), 'par.C1.x should be remote') self.assertFalse(p.is_local('par.C1.y'), 'par.C1.y should be remote') self.assertTrue(p.is_local('par.C2'), 'par.C2 should be local') self.assertTrue(p.is_local('par.C2.x'), 'par.C2.x should be local') self.assertTrue(p.is_local('par.C2.y'), 'par.C2.y should be local')
def setup(self): # Raw data to load is in 'data' directory data_path = os.path.dirname(os.path.realpath(__file__)) data_path = os.path.join(data_path, 'data') launch_data = np.loadtxt(data_path + '/Launch/launch1.dat') # orbit position and velocity data for each design point r_e2b_I0s = launch_data[1::2, 1:] # number of days since launch for each design point LDs = launch_data[1::2, 0] - 2451545 # Create IndepVarComp for broadcast parameters. bp = self.add_subsystem('bp', IndepVarComp()) bp.add_output('cellInstd', np.ones((7, 12))) bp.add_output('finAngle', np.pi/4.0, units='rad') bp.add_output('antAngle', 0.0, units='rad') # CADRE instances go into a Parallel Group para = self.add_subsystem('parallel', ParallelGroup(), promotes=['*']) # build design points names = ['pt%s' % i for i in range(self.npts)] for i, name in enumerate(names): # Some initial values inits = { 'LD': float(LDs[i]), 'r_e2b_I0': r_e2b_I0s[i] } para.add_subsystem(name, CADRE(n=self.n, m=self.m, initial_inputs=inits)) # Hook up broadcast inputs self.connect('bp.cellInstd', '%s.cellInstd' % name) self.connect('bp.finAngle', '%s.finAngle' % name) self.connect('bp.antAngle', '%s.antAngle' % name) self.add_subsystem('%s_con5' % name, ExecComp('val = SOCi - SOCf')) self.connect('%s.SOC' % name, '%s_con5.SOCi' % name, src_indices=[0], flat_src_indices=True) self.connect('%s.SOC' % name, '%s_con5.SOCf' % name, src_indices=[self.n-1], flat_src_indices=True) # objective: sum of data from all design points data_totals = ['%s_DataTot' % name for name in names] obj = ''.join([' - %s' % data_tot for data_tot in data_totals]) meta_dicts = {} for dt in data_totals: meta_dicts[dt] = {'units': 'Gibyte'} self.add_subsystem('obj', ExecComp('val='+obj, **meta_dicts)) for name in names: self.connect('%s.Data' % name, 'obj.%s_DataTot' % name, src_indices=[self.n-1], flat_src_indices=True)
def test_run(self): root = ParallelGroup() root.nl_solver = NLGaussSeidel() root.add('C1', IndepVarComp('x', 5.)) root.add('C2', ExecComp('y=x*2.0')) root.add('C3', ExecComp('y=x*2.0')) root.add('C4', ExecComp('y=x*2.0')) root.connect("C1.x", "C2.x") root.connect("C2.y", "C3.x") root.connect("C3.y", "C4.x") prob = Problem(root) prob.setup(check=False) prob.run() self.assertEqual(root.nl_solver.iter_count, 3) self.assertEqual(prob['C4.y'], 40.)
def setUp(self): root = ParallelGroup() root.nl_solver = NLGaussSeidel() root.add('C1', IndepVarComp('x', 5.)) root.add('C2', ExecComp('y=x*2.0')) root.add('C3', ExecComp('y=x*2.0')) root.add('C4', ExecComp('y=x*2.0')) root.connect("C1.x", "C2.x") root.connect("C2.y", "C4.x") root.connect("C4.y", "C3.x") self.root = root