Exemple #1
0
    def test_direct_solver_comp(self):
        """
        Test the direct solver on a component.
        """
        for jac in [None, 'csc', 'dense']:
            prob = Problem(model=ImplComp4Test())
            prob.model.nonlinear_solver = NewtonSolver()
            if jac in ('csc', 'dense'):
                prob.model.options['assembled_jac_type'] = jac
            prob.model.linear_solver = DirectSolver(assemble_jac=jac in ('csc','dense'))
            prob.set_solver_print(level=0)

            prob.setup(check=False)

            prob.run_model()
            assert_rel_error(self, prob['y'], [-1., 1.])

            d_inputs, d_outputs, d_residuals = prob.model.get_linear_vectors()

            d_residuals.set_const(2.0)
            d_outputs.set_const(0.0)
            prob.model.run_solve_linear(['linear'], 'fwd')
            result = d_outputs.get_data()
            assert_rel_error(self, result, [-2., 2.])

            d_outputs.set_const(2.0)
            d_residuals.set_const(0.0)
            prob.model.run_solve_linear(['linear'], 'rev')
            result = d_residuals.get_data()
            assert_rel_error(self, result, [2., -2.])
    def test_direct_solver_comp(self):
        """
        Test the direct solver on a component.
        """
        for jac in [None, 'csc', 'dense']:
            prob = Problem(model=ImplComp4Test())
            prob.model.nonlinear_solver = NewtonSolver(solve_subsystems=False)
            if jac in ('csc', 'dense'):
                prob.model.options['assembled_jac_type'] = jac
            prob.model.linear_solver = DirectSolver(assemble_jac=jac in ('csc','dense'))
            prob.set_solver_print(level=0)

            prob.setup()

            prob.run_model()
            assert_near_equal(prob['y'], [-1., 1.])

            d_inputs, d_outputs, d_residuals = prob.model.get_linear_vectors()

            d_residuals.set_val(2.0)
            d_outputs.set_val(0.0)
            prob.model.run_solve_linear('fwd')
            result = d_outputs.asarray()
            assert_near_equal(result, [-2., 2.])

            d_outputs.set_val(2.0)
            d_residuals.set_val(0.0)
            prob.model.run_solve_linear('rev')
            result = d_residuals.asarray()
            assert_near_equal(result, [2., -2.])
    def test_direct_solver_comp(self):
        """
        Test the direct solver on a component.
        """
        for jac in [None, 'csc', 'dense']:
            prob = Problem(model=ImplComp4Test())
            prob.model.nonlinear_solver = NewtonSolver()
            if jac in ('csc', 'dense'):
                prob.model.options['assembled_jac_type'] = jac
            prob.model.linear_solver = DirectSolver(assemble_jac=jac in ('csc','dense'))
            prob.set_solver_print(level=0)

            prob.setup(check=False)

            prob.run_model()
            assert_rel_error(self, prob['y'], [-1., 1.])

            d_inputs, d_outputs, d_residuals = prob.model.get_linear_vectors()

            d_residuals.set_const(2.0)
            d_outputs.set_const(0.0)
            prob.model.run_solve_linear(['linear'], 'fwd')
            result = d_outputs._data
            assert_rel_error(self, result, [-2., 2.])

            d_outputs.set_const(2.0)
            d_residuals.set_const(0.0)
            prob.model.run_solve_linear(['linear'], 'rev')
            result = d_residuals._data
            assert_rel_error(self, result, [2., -2.])
    def test_direct_solver_comp(self):
        """
        Test the direct solver on a component.
        """
        for jac in ['dict', 'coo', 'csr', 'csc', 'dense']:
            prob = Problem(model=ImplComp4Test())
            prob.model.nonlinear_solver = NewtonSolver()
            prob.model.linear_solver = DirectSolver()
            prob.set_solver_print(level=0)

            if jac == 'dict':
                pass
            elif jac == 'csr':
                prob.model.jacobian = CSRJacobian()
            elif jac == 'csc':
                prob.model.jacobian = CSCJacobian()
            elif jac == 'coo':
                prob.model.jacobian = COOJacobian()
            elif jac == 'dense':
                prob.model.jacobian = DenseJacobian()

            prob.setup(check=False)

            if jac == 'coo':
                with self.assertRaises(Exception) as context:
                    prob.run_model()
                self.assertEqual(
                    str(context.exception),
                    "Direct solver is not compatible with matrix type COOMatrix in system ''."
                )
                continue

            prob.run_model()
            assert_rel_error(self, prob['y'], [-1., 1.])

            d_inputs, d_outputs, d_residuals = prob.model.get_linear_vectors()

            d_residuals.set_const(2.0)
            d_outputs.set_const(0.0)
            prob.model.run_solve_linear(['linear'], 'fwd')
            result = d_outputs.get_data()
            assert_rel_error(self, result, [-2., 2.])

            d_outputs.set_const(2.0)
            d_residuals.set_const(0.0)
            prob.model.run_solve_linear(['linear'], 'rev')
            result = d_residuals.get_data()
            assert_rel_error(self, result, [2., -2.])
    def test_shape(self):
        import numpy as np
        from openmdao.api import Group, Problem, IndepVarComp
        from openmdao.components.meta_model_structured import MetaModelStructured

        # create input param training data, of sizes 25, 5, and 10 points resp.
        p1 = np.linspace(0, 100, 25)
        p2 = np.linspace(-10, 10, 5)
        p3 = np.linspace(0, 1, 10)

        # can use meshgrid to create a 3D array of test data
        P1, P2, P3 = np.meshgrid(p1, p2, p3, indexing='ij')
        f = np.sqrt(P1) + P2 * P3

        # verify the shape matches the order and size of the input params
        print(f.shape)


        # Create regular grid interpolator instance
        interp = MetaModelStructured(method='cubic')
        interp.add_input('p1', 0.5, p1)
        interp.add_input('p2', 0.0, p2)
        interp.add_input('p3', 3.14, p3)

        interp.add_output('f', 0.0, f)


        # Set up the OpenMDAO model
        model = Group()
        model.add_subsystem('comp', interp, promotes=["*"])
        prob = Problem(model)
        prob.setup()

        # set inputs
        prob['p1'] = 55.12
        prob['p2'] = -2.14
        prob['p3'] = 0.323

        prob.run_model()

        computed = prob['f']
        actual = 6.73306472

        assert_almost_equal(computed, actual)

        # we can verify all gradients by checking against finit-difference
        prob.check_partials(compact_print=True)
Exemple #6
0
    def test_training_derivatives(self):
        import numpy as np
        from openmdao.api import Group, Problem, IndepVarComp
        from openmdao.components.meta_model_structured import MetaModelStructured

        # create input param training data, of sizes 25, 5, and 10 points resp.
        p1 = np.linspace(0, 100, 25)
        p2 = np.linspace(-10, 10, 5)
        p3 = np.linspace(0, 1, 10)

        # can use meshgrid to create a 3D array of test data
        P1, P2, P3 = np.meshgrid(p1, p2, p3, indexing='ij')
        f = np.sqrt(P1) + P2 * P3

        # verify the shape matches the order and size of the input params
        print(f.shape)

        # Create regular grid interpolator instance
        interp = MetaModelStructured(method='cubic',
                                     training_data_gradients=True)
        interp.add_input('p1', 0.5, p1)
        interp.add_input('p2', 0.0, p2)
        interp.add_input('p3', 3.14, p3)

        interp.add_output('f', 0.0, f)

        # Set up the OpenMDAO model
        model = Group()
        model.add_subsystem('comp', interp, promotes=["*"])
        prob = Problem(model)
        prob.setup()

        # set inputs
        prob['p1'] = 55.12
        prob['p2'] = -2.14
        prob['p3'] = 0.323

        prob.run_model()

        computed = prob['f']
        actual = 6.73306472

        assert_almost_equal(computed, actual)

        # we can verify all gradients by checking against finit-difference
        prob.check_partials(compact_print=True)
Exemple #7
0
    def test_meta_model_structured_deprecated(self):
        # run same test as above, only with the deprecated component,
        # to ensure we get the warning and the correct answer.
        # self-contained, to be removed when class name goes away.
        import numpy as np
        from openmdao.api import Group, Problem, IndepVarComp
        from openmdao.components.meta_model_structured_comp import MetaModelStructured  # deprecated
        import warnings

        with warnings.catch_warnings(record=True) as w:
            xor_interp = MetaModelStructured(method='slinear')

        self.assertEqual(len(w), 1)
        self.assertTrue(issubclass(w[0].category, DeprecationWarning))
        self.assertEqual(str(w[0].message), "'MetaModelStructured' has been deprecated. Use "
                                            "'MetaModelStructuredComp' instead.")

        # set up inputs and outputs
        xor_interp.add_input('x', 0.0, training_data=np.array([0.0, 1.0]), units=None)
        xor_interp.add_input('y', 1.0, training_data=np.array([0.0, 1.0]), units=None)

        xor_interp.add_output('xor', 1.0, training_data=np.array([[0.0, 1.0], [1.0, 0.0]]), units=None)

        # Set up the OpenMDAO model
        model = Group()
        ivc = IndepVarComp()
        ivc.add_output('x', 0.0)
        ivc.add_output('y', 1.0)
        model.add_subsystem('ivc', ivc, promotes=["*"])
        model.add_subsystem('comp', xor_interp, promotes=["*"])
        prob = Problem(model)
        prob.setup()

        # Now test out a 'fuzzy' XOR
        prob['x'] = 0.9
        prob['y'] = 0.001242

        prob.run_model()

        computed = prob['xor']
        actual = 0.8990064

        assert_almost_equal(computed, actual)

        # we can verify all gradients by checking against finite-difference
        prob.check_partials(compact_print=True)
    def test_direct_solver_comp(self):
        """
        Test the direct solver on a component.
        """
        for jac in ['dict', 'coo', 'csr', 'csc', 'dense']:
            prob = Problem(model=ImplComp4Test())
            prob.model.nonlinear_solver = NewtonSolver()
            prob.model.linear_solver = DirectSolver()
            prob.set_solver_print(level=0)

            if jac == 'dict':
                pass
            elif jac == 'csr':
                prob.model.jacobian = CSRJacobian()
            elif jac == 'csc':
                prob.model.jacobian = CSCJacobian()
            elif jac == 'coo':
                prob.model.jacobian = COOJacobian()
            elif jac == 'dense':
                prob.model.jacobian = DenseJacobian()

            prob.setup(check=False)

            if jac == 'coo':
                with self.assertRaises(Exception) as context:
                    prob.run_model()
                self.assertEqual(str(context.exception),
                                 "Direct solver is not compatible with matrix type COOMatrix in system ''.")
                continue

            prob.run_model()
            assert_rel_error(self, prob['y'], [-1., 1.])

            d_inputs, d_outputs, d_residuals = prob.model.get_linear_vectors()

            d_residuals.set_const(2.0)
            d_outputs.set_const(0.0)
            prob.model.run_solve_linear(['linear'], 'fwd')
            result = d_outputs.get_data()
            assert_rel_error(self, result, [-2., 2.])

            d_outputs.set_const(2.0)
            d_residuals.set_const(0.0)
            prob.model.run_solve_linear(['linear'], 'rev')
            result = d_residuals.get_data()
            assert_rel_error(self, result, [2., -2.])
    def test_training_gradient(self):
        model = Group()
        ivc = IndepVarComp()

        mapdata = SampleMap()

        params = mapdata.param_data
        outs = mapdata.output_data

        ivc.add_output('x', np.array([-0.3, 0.7, 1.2]))
        ivc.add_output('y', np.array([0.14, 0.313, 1.41]))
        ivc.add_output('z', np.array([-2.11, -1.2, 2.01]))

        ivc.add_output('f_train', outs[0]['values'])
        ivc.add_output('g_train', outs[1]['values'])

        comp = MetaModelStructured(training_data_gradients=True,
                                   method='cubic',
                                   num_nodes=3)
        for param in params:
            comp.add_input(param['name'], param['default'], param['values'])

        for out in outs:
            comp.add_output(out['name'], out['default'], out['values'])

        model.add_subsystem('ivc', ivc, promotes=["*"])
        model.add_subsystem('comp',
                            comp,
                            promotes=["*"])


        prob = Problem(model)
        prob.setup()
        prob.run_model()

        val0 = np.array([ 50.26787317, 49.76106232, 19.66117913])
        val1 = np.array([-32.62094041, -31.67449135, -27.46959668])

        tol = 1e-5
        assert_rel_error(self, prob['f'], val0, tol)
        assert_rel_error(self, prob['g'], val1, tol)
        self.run_and_check_derivs(prob)
Exemple #10
0
    def test_training_gradient(self):
        model = Group()
        ivc = IndepVarComp()

        mapdata = SampleMap()

        params = mapdata.param_data
        outs = mapdata.output_data

        ivc.add_output('x', np.array([-0.3, 0.7, 1.2]))
        ivc.add_output('y', np.array([0.14, 0.313, 1.41]))
        ivc.add_output('z', np.array([-2.11, -1.2, 2.01]))

        ivc.add_output('f_train', outs[0]['values'])
        ivc.add_output('g_train', outs[1]['values'])

        comp = MetaModelStructuredComp(training_data_gradients=True,
                                       method='cubic',
                                       num_nodes=3)
        for param in params:
            comp.add_input(param['name'], param['default'], param['values'])

        for out in outs:
            comp.add_output(out['name'], out['default'], out['values'])

        model.add_subsystem('ivc', ivc, promotes=["*"])
        model.add_subsystem('comp',
                            comp,
                            promotes=["*"])


        prob = Problem(model)
        prob.setup()
        prob.run_model()

        val0 = np.array([ 50.26787317,  49.76106232,  19.66117913])
        val1 = np.array([-32.62094041, -31.67449135, -27.46959668])

        tol = 1e-5
        assert_rel_error(self, prob['f'], val0, tol)
        assert_rel_error(self, prob['g'], val1, tol)
        self.run_and_check_derivs(prob)
Exemple #11
0
    def test_xor(self):
        import numpy as np
        from openmdao.api import Group, Problem, IndepVarComp
        from openmdao.components.meta_model_structured import MetaModelStructured

        # Create regular grid interpolator instance
        xor_interp = MetaModelStructured(method='slinear')

        # set up inputs and outputs
        xor_interp.add_input('x', 0.0, np.array([0.0, 1.0]), units=None)
        xor_interp.add_input('y', 1.0, np.array([0.0, 1.0]), units=None)

        xor_interp.add_output('xor',
                              1.0,
                              np.array([[0.0, 1.0], [1.0, 0.0]]),
                              units=None)

        # Set up the OpenMDAO model
        model = Group()
        ivc = IndepVarComp()
        ivc.add_output('x', 0.0)
        ivc.add_output('y', 1.0)
        model.add_subsystem('ivc', ivc, promotes=["*"])
        model.add_subsystem('comp', xor_interp, promotes=["*"])
        prob = Problem(model)
        prob.setup()

        # Now test out a 'fuzzy' XOR
        prob['x'] = 0.9
        prob['y'] = 0.001242

        prob.run_model()

        computed = prob['xor']
        actual = 0.8990064

        assert_almost_equal(computed, actual)

        # we can verify all gradients by checking against finit-difference
        prob.check_partials(compact_print=True)
    def test_xor(self):
        import numpy as np
        from openmdao.api import Group, Problem, IndepVarComp
        from openmdao.components.meta_model_structured import MetaModelStructured

        # Create regular grid interpolator instance
        xor_interp = MetaModelStructured(method='slinear')

        # set up inputs and outputs
        xor_interp.add_input('x', 0.0, np.array([0.0, 1.0]), units=None)
        xor_interp.add_input('y', 1.0, np.array([0.0, 1.0]), units=None)

        xor_interp.add_output('xor', 1.0, np.array([[0.0, 1.0], [1.0, 0.0]]), units=None)

        # Set up the OpenMDAO model
        model = Group()
        ivc = IndepVarComp()
        ivc.add_output('x', 0.0)
        ivc.add_output('y', 1.0)
        model.add_subsystem('ivc', ivc, promotes=["*"])
        model.add_subsystem('comp', xor_interp, promotes=["*"])
        prob = Problem(model)
        prob.setup()

        # Now test out a 'fuzzy' XOR
        prob['x'] = 0.9
        prob['y'] = 0.001242

        prob.run_model()

        computed = prob['xor']
        actual = 0.8990064

        assert_almost_equal(computed, actual)

        # we can verify all gradients by checking against finit-difference
        prob.check_partials(compact_print=True)
Exemple #13
0
    def test_solver_debug_print_feature(self):
        from openmdao.api import Problem, IndepVarComp, NewtonSolver
        from openmdao.test_suite.test_examples.test_circuit_analysis import Circuit

        p = Problem()
        model = p.model

        model.add_subsystem('ground', IndepVarComp('V', 0., units='V'))
        model.add_subsystem('source', IndepVarComp('I', 0.1, units='A'))
        model.add_subsystem('circuit', Circuit())

        model.connect('source.I', 'circuit.I_in')
        model.connect('ground.V', 'circuit.Vg')

        p.setup()

        nl = model.circuit.nonlinear_solver = NewtonSolver()

        nl.options['iprint'] = 2
        nl.options['debug_print'] = True

        # set some poor initial guesses so that we don't converge
        p['circuit.n1.V'] = 10.
        p['circuit.n2.V'] = 1e-3

        opts = {}
        # formatting has changed in numpy 1.14 and beyond.
        if LooseVersion(np.__version__) >= LooseVersion("1.14"):
            opts["legacy"] = '1.13'

        with printoptions(**opts):
            # run the model
            p.run_model()

        with open('rank0_root_0_NLRunOnce_0_circuit_0.dat', 'r') as f:
            self.assertEqual(f.read(), self.expected_data)
Exemple #14
0
class ODEIntegrationInterface(object):
    """
    Given a system class, create a callable object with the same signature as that required
    by scipy.integrate.ode::

        f(t, x, *args)

    Internally, this is accomplished by constructing an OpenMDAO problem using the ODE with
    a single node.  The interface populates the values of the time, states, and controls,
    and then calls `run_model()` on the problem.  The state rates generated by the ODE
    are then returned back to scipy ode, which continues the integration.

    Parameters
    ----------
    ode_class : class
        The ODEClass belonging to the phase being simulated.
    time_options : dict of {str: TimeOptionsDictionary}
        The time options for the phase being simulated.
    state_options : dict of {str: StateOptionsDictionary}
        The state options for the phase being simulated.
    control_options : dict of {str: ControlOptionsDictionary}
        The control options for the phase being simulated.
    design_parameter_options : dict of {str: DesignParameterOptionsDictionary}
        The design parameter options for the phase being simulated.
    input_parameter_options : dict of {str: InputParameterOptionsDictionary}
        The input parameter options for the phase being simulated.
    ode_init_kwargs : dict
        Keyword argument dictionary passed to the ODE at initialization.
    """
    def __init__(self,
                 ode_class,
                 time_options,
                 state_options,
                 control_options,
                 polynomial_control_options,
                 design_parameter_options,
                 input_parameter_options,
                 traj_parameter_options,
                 ode_init_kwargs=None):

        # Get the state vector.  This isn't necessarily ordered
        # so just pick the default ordering and go with it.
        self.state_options = OrderedDict()
        self.time_options = time_options
        self.control_options = control_options
        self.polynomial_control_options = polynomial_control_options
        self.design_parameter_options = design_parameter_options
        self.input_parameter_options = input_parameter_options
        self.traj_parameter_options = traj_parameter_options
        self.control_interpolants = {}
        self.polynomial_control_interpolants = {}

        pos = 0

        for state, options in iteritems(state_options):
            self.state_options[state] = {
                'rate_source': options['rate_source'],
                'pos': pos,
                'shape': options['shape'],
                'size': np.prod(options['shape']),
                'units': options['units'],
                'targets': options['targets']
            }
            pos += self.state_options[state]['size']

        self._state_vec = np.zeros(pos, dtype=float)
        self._state_rate_vec = np.zeros(pos, dtype=float)

        #
        # Build odeint problem interface
        #
        self.prob = Problem(model=Group())
        model = self.prob.model

        # The time IVC
        ivc = IndepVarComp()
        time_units = self.time_options['units']
        ivc.add_output('time', val=0.0, units=time_units)
        ivc.add_output('time_phase', val=-88.0, units=time_units)
        ivc.add_output('t_initial', val=-99.0, units=time_units)
        ivc.add_output('t_duration', val=-111.0, units=time_units)

        model.add_subsystem('time_input', ivc, promotes_outputs=['*'])

        model.connect(
            'time',
            ['ode.{0}'.format(tgt) for tgt in self.time_options['targets']])

        model.connect('time_phase', [
            'ode.{0}'.format(tgt)
            for tgt in self.time_options['time_phase_targets']
        ])

        model.connect('t_initial', [
            'ode.{0}'.format(tgt)
            for tgt in self.time_options['t_initial_targets']
        ])

        model.connect('t_duration', [
            'ode.{0}'.format(tgt)
            for tgt in self.time_options['t_duration_targets']
        ])

        # The States Comp
        for name, options in iteritems(self.state_options):
            ivc.add_output('states:{0}'.format(name),
                           shape=(1, np.prod(options['shape'])),
                           units=options['units'])

            rate_src = self._get_rate_source_path(name)

            model.connect(
                rate_src,
                'state_rate_collector.state_rates_in:{0}_rate'.format(name))

            if options['targets'] is not None:
                model.connect(
                    'states:{0}'.format(name),
                    ['ode.{0}'.format(tgt) for tgt in options['targets']])

        if self.control_options or self.polynomial_control_options:
            self._interp_comp = \
                ODEIntControlInterpolationComp(time_units=time_units,
                                               control_options=self.control_options,
                                               polynomial_control_options=self.polynomial_control_options)
            self._interp_comp.control_interpolants = self.control_interpolants
            self._interp_comp.polynomial_control_interpolants = self.polynomial_control_interpolants

            model.add_subsystem('indep_controls',
                                self._interp_comp,
                                promotes_outputs=['*'])
            model.connect('time', ['indep_controls.time'])

        if self.control_options:
            for name, options in iteritems(self.control_options):
                if options['targets']:
                    model.connect(
                        'controls:{0}'.format(name),
                        ['ode.{0}'.format(tgt) for tgt in options['targets']])
                if options['rate_targets']:
                    model.connect('control_rates:{0}_rate'.format(name), [
                        'ode.{0}'.format(tgt)
                        for tgt in options['rate_targets']
                    ])
                if options['rate2_targets']:
                    model.connect('control_rates:{0}_rate2'.format(name), [
                        'ode.{0}'.format(tgt)
                        for tgt in options['rate2_targets']
                    ])

        if self.polynomial_control_options:
            for name, options in iteritems(self.polynomial_control_options):
                tgts = options['targets']
                rate_tgts = options['rate_targets']
                rate2_tgts = options['rate2_targets']
                if options['targets']:
                    if isinstance(tgts, string_types):
                        tgts = [tgts]
                    model.connect('polynomial_controls:{0}'.format(name),
                                  ['ode.{0}'.format(tgt) for tgt in tgts])
                if options['rate_targets']:
                    if isinstance(rate_tgts, string_types):
                        rate_tgts = [rate_tgts]
                    model.connect(
                        'polynomial_control_rates:{0}_rate'.format(name),
                        ['ode.{0}'.format(tgt) for tgt in rate_tgts])
                if options['rate2_targets']:
                    if isinstance(rate2_tgts, string_types):
                        rate2_tgts = [rate2_tgts]
                    model.connect(
                        'polynomial_control_rates:{0}_rate2'.format(name),
                        ['ode.{0}'.format(tgt) for tgt in rate2_tgts])

        if self.design_parameter_options:
            for name, options in iteritems(self.design_parameter_options):
                ivc.add_output('design_parameters:{0}'.format(name),
                               shape=options['shape'],
                               units=options['units'])
                if options['targets'] is not None:
                    tgts = options['targets']
                    if isinstance(tgts, string_types):
                        tgts = [tgts]
                    model.connect('design_parameters:{0}'.format(name),
                                  ['ode.{0}'.format(tgt) for tgt in tgts])

        if self.input_parameter_options:
            for name, options in iteritems(self.input_parameter_options):
                ivc.add_output('input_parameters:{0}'.format(name),
                               shape=options['shape'],
                               units=options['units'])
                if options['targets'] is not None:
                    tgts = options['targets']
                    if isinstance(tgts, string_types):
                        tgts = [tgts]
                    model.connect('input_parameters:{0}'.format(name),
                                  ['ode.{0}'.format(tgt) for tgt in tgts])

        if self.traj_parameter_options:
            for name, options in iteritems(self.traj_parameter_options):
                ivc.add_output('traj_parameters:{0}'.format(name),
                               shape=options['shape'],
                               units=options['units'])
                if options['targets'] is not None:
                    tgts = options['targets']
                    if isinstance(tgts, string_types):
                        tgts = [tgts]
                    model.connect('traj_parameters:{0}'.format(name),
                                  ['ode.{0}'.format(tgt) for tgt in tgts])

        # The ODE System
        if ode_class is not None:
            model.add_subsystem('ode',
                                subsys=ode_class(num_nodes=1,
                                                 **ode_init_kwargs))

        # The state rate collector comp
        self.prob.model.add_subsystem(
            'state_rate_collector',
            StateRateCollectorComp(state_options=self.state_options,
                                   time_units=time_options['units']))

        # Flag that is set to true if has_controls is called
        self._has_dynamic_controls = False

    def _get_rate_source_path(self, state_var):
        var = self.state_options[state_var]['rate_source']

        if var == 'time':
            rate_path = 'time'
        elif var == 'time_phase':
            rate_path = 'time_phase'
        elif self.state_options is not None and var in self.state_options:
            rate_path = 'states:{0}'.format(var)
        elif self.control_options is not None and var in self.control_options:
            rate_path = 'controls:{0}'.format(var)
        elif self.polynomial_control_options is not None and var in self.polynomial_control_options:
            rate_path = 'polynomial_controls:{0}'.format(var)
        elif self.design_parameter_options is not None and var in self.design_parameter_options:
            rate_path = 'design_parameters:{0}'.format(var)
        elif self.input_parameter_options is not None and var in self.input_parameter_options:
            rate_path = 'input_parameters:{0}'.format(var)
        elif self.traj_parameter_options is not None and var in self.traj_parameter_options:
            rate_path = 'traj_parameters:{0}'.format(var)
        elif var.endswith('_rate') and self.control_options is not None and \
                var[:-5] in self.control_options:
            rate_path = 'control_rates:{0}'.format(var)
        elif var.endswith('_rate2') and self.control_options is not None and \
                var[:-6] in self.control_options:
            rate_path = 'control_rates:{0}'.format(var)
        elif var.endswith('_rate') and self.polynomial_control_options is not None and \
                var[:-5] in self.polynomial_control_options:
            rate_path = 'polynomial_control_rates:{0}'.format(var)
        elif var.endswith('_rate2') and self.polynomial_control_options is not None and \
                var[:-6] in self.polynomial_control_options:
            rate_path = 'polynomial_control_rates:{0}'.format(var)
        else:
            rate_path = 'ode.{0}'.format(var)

        return rate_path

    def _unpack_state_vec(self, x):
        """
        Given the state vector in 1D, extract the values corresponding to
        each state into the ode integrators problem states.

        Parameters
        ----------
        x : np.array
            The 1D state vector.

        Returns
        -------
        None

        """
        for state_name, state_options in self.state_options.items():
            pos = state_options['pos']
            size = state_options['size']
            self.prob['states:{0}'.format(state_name)][0,
                                                       ...] = x[pos:pos + size]

    def _pack_state_rate_vec(self):
        """
        Pack the state rates into a 1D vector for use by scipy odeint.

        Returns
        -------
        dXdt: np.array
            The 1D state-rate vector.

        """
        for state_name, state_options in self.state_options.items():
            pos = state_options['pos']
            size = state_options['size']
            self._state_rate_vec[pos:pos + size] = \
                np.ravel(self.prob['state_rate_collector.'
                                   'state_rates:{0}_rate'.format(state_name)])
        return self._state_rate_vec

    def _pack_state_vec(self, x_dict):
        """
        Pack the state into a 1D vector for use by scipy.integrate.ode.

        Returns
        -------
        x: np.array
            The 1D state vector.

        """
        self._state_vec[:] = 0.0
        for state_name, state_options in self.state_options.items():
            pos = state_options['pos']
            size = state_options['size']
            self._state_vec[pos:pos + size] = np.ravel(x_dict[state_name])
        return self._state_vec

    def __call__(self, t, x):
        """
        The function interface used by scipy.ode

        Parameters
        ----------
        t : float
            The current time, t.
        x : np.array
            The 1D state vector.

        Returns
        -------
        xdot : np.array
            The 1D vector of state time-derivatives.

        """
        self.prob['time'] = t
        self.prob['time_phase'] = t - self.prob['t_initial']
        self._unpack_state_vec(x)
        self.prob.run_model()
        xdot = self._pack_state_rate_vec()
        return xdot
Exemple #15
0
    g = p.model

    if 'gmres' in sys.argv:
        from openmdao.solvers.linear.scipy_iter_solver import ScipyKrylov
        p.root.linear_solver = ScipyKrylov()

    g.add_subsystem("P", IndepVarComp('x', numpy.ones(vec_size)))

    g.add_design_var("P.x")

    par = g.add_subsystem("par", ParallelGroup())
    for pt in range(pts):
        ptname = "G%d"%pt
        ptg = par.add_subsystem(ptname, SubGroup())
        #create_dyncomps(ptg, num_comps, 2, 2, 2,
                            #var_factory=lambda: numpy.zeros(vec_size))
        g.connect("P.x", "par.%s.C0.i0" % ptname)

        #cname = ptname + '.' + "C%d"%(num_comps-1)
        #g.add_objective("par.%s.o0" % cname)
        #g.add_constraint("par.%s.o1" % cname, lower=0.0)

    p.setup(vector_class=vec_class)
    p.final_setup()
    p.run_model()
    #
    from openmdao.devtools.debug import max_mem_usage
    print("mem:", max_mem_usage())

    config_summary(p)
Exemple #16
0
    des_vars.add_output('MN', 0.5)
    des_vars.add_output('dTs', 0.0, units='degR')

    fc = p1.model.add_subsystem("fc", FlightConditions())

    p1.model.connect('des_vars.W', 'fc.W')
    p1.model.connect('des_vars.alt', 'fc.alt')
    p1.model.connect('des_vars.MN', 'fc.MN')
    p1.model.connect('des_vars.dTs', 'fc.dTs')

    p1.setup()

    # p1.root.list_connections()

    p1['des_vars.alt'] = 17868.79060515557
    p1['des_vars.MN'] = 2.101070288213628
    p1['des_vars.dTs'] = 0.0
    p1['des_vars.W'] = 1.0

    p1.run_model()

    print('Ts_atm: ', p1['fc.ambient.Ts'])
    print('Ts_set: ', p1['fc.Fl_O:stat:T'])
    print('Ps_atm: ', p1['fc.ambient.Ps'])
    print('Ps_set: ', p1['fc.Fl_O:stat:P'])
    print('rhos_atm: ', p1['fc.ambient.rhos'] * 32.175)
    print('rhos_set: ', p1['fc.Fl_O:stat:rho'])
    print('W', p1['fc.Fl_O:stat:W'])

    print('Pt: ', p1['fc.Fl_O:tot:P'])
Exemple #17
0
    g = p.model

    if 'gmres' in sys.argv:
        from openmdao.solvers.linear.scipy_iter_solver import ScipyKrylov
        g.linear_solver = ScipyKrylov()

    g.add_subsystem("P", IndepVarComp('x', numpy.ones(vec_size)))

    g.add_design_var("P.x")

    par = g.add_subsystem("par", ParallelGroup())
    for pt in range(pts):
        ptname = "G%d" % pt
        ptg = par.add_subsystem(ptname, SubGroup())
        #create_dyncomps(ptg, num_comps, 2, 2, 2,
        #var_factory=lambda: numpy.zeros(vec_size))
        g.connect("P.x", "par.%s.C0.i0" % ptname)

        #cname = ptname + '.' + "C%d"%(num_comps-1)
        #g.add_objective("par.%s.o0" % cname)
        #g.add_constraint("par.%s.o1" % cname, lower=0.0)

    p.setup()
    p.final_setup()
    p.run_model()
    #
    from openmdao.devtools.memory import max_mem_usage
    print("mem:", max_mem_usage())

    config_summary(p)
Exemple #18
0
        y = inputs['y']

        partials['f_xy', 'x'] = 2.0 * x - 6.0 + y
        partials['f_xy', 'y'] = 2.0 * y + 8.0 + x


if __name__ == "__main__":
    from openmdao.core.problem import Problem
    from openmdao.core.group import Group
    from openmdao.core.indepvarcomp import IndepVarComp

    model = Group()
    ivc = IndepVarComp()
    ivc.add_output('x', 3.0)
    ivc.add_output('y', -4.0)
    model.add_subsystem('des_vars', ivc)
    model.add_subsystem('parab_comp', Paraboloid())

    model.connect('des_vars.x', 'parab_comp.x')
    model.connect('des_vars.y', 'parab_comp.y')

    prob = Problem(model)
    prob.setup()
    prob.run_model()
    print(prob['parab_comp.f_xy'])

    prob['des_vars.x'] = 5.0
    prob['des_vars.y'] = -2.0
    prob.run_model()
    print(prob['parab_comp.f_xy'])
Exemple #19
0
        x = inputs['x']
        y = inputs['y']

        outputs['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0


if __name__ == "__main__":
    from openmdao.core.problem import Problem
    from openmdao.core.group import Group
    from openmdao.core.indepvarcomp import IndepVarComp

    model = Group()
    ivc = IndepVarComp()
    ivc.add_output('x', 3.0)
    ivc.add_output('y', -4.0)
    model.add_subsystem('des_vars', ivc)
    model.add_subsystem('parab_comp', Paraboloid())

    model.connect('des_vars.x', 'parab_comp.x')
    model.connect('des_vars.y', 'parab_comp.y')

    prob = Problem(model)
    prob.setup()
    prob.run_model()
    print(prob['parab_comp.f_xy'])

    prob['des_vars.x'] = 5.0
    prob['des_vars.y'] = -2.0
    prob.run_model()
    print(prob['parab_comp.f_xy'])
class MetaModelVisualization(object):
    """
    Top-level container for the Meta Model Visualization.

    Attributes
    ----------
    prob : Problem
        Name of variable corresponding to Problem Component
    meta_model : MetaModel
        Name of empty Meta Model Component object reference
    resolution : int
        Number used to calculate width and height of contour plot
    is_structured_meta_model : Bool
        Boolean used to signal whether the meta model is structured or unstructured
    slider_source : ColumnDataSource
        Data source containing dictionary of sliders
    contour_training_data_source : ColumnDataSource
        Data source containing dictionary of training data points
    bottom_plot_source : ColumnDataSource
        Data source containing data for the bottom subplot
    bottom_plot_scatter_source : ColumnDataSource
        Data source containing scatter point data for the bottom subplot
    right_plot_source : ColumnDataSource
        Data source containing data for the right subplot
    right_plot_scatter_source : ColumnDataSource
        Data source containing scatter point data for the right subplot
    contour_plot_source : ColumnDataSource
        Data source containing data for the contour plot
    input_names : list
        List of input data titles as strings
    output_names : list
        List of output data titles as strings
    training_inputs : dict
        Dictionary of input training data
    x_input_select : Select
        Bokeh Select object containing a list of inputs for the x axis
    y_input_select : Select
        Bokeh Select object containing a list of inputs for the y axis
    output_select : Select
        Bokeh Select object containing a list of inputs for the outputs
    x_input_slider : Slider
        Bokeh Slider object containing a list of input values for the x axis
    y_input_slider : Slider
        Bokeh Slider object containing a list of input values for the y axis
    slider_dict : dict
        Dictionary of slider names and their respective slider objects
    predict_inputs : dict
        Dictionary containing training data points to predict at.
    num_inputs : int
        Number of inputs
    num_outputs : int
        Number of outputs
    limit_range : array
        Array containing the range of each input
    scatter_distance : TextInput
        Text input for user to enter custom value to calculate distance of training points around
        slice line
    right_alphas : array
        Array of points containing alpha values for right plot
    bottom_alphas : array
        Array of points containing alpha values for bottom plot
    dist_range : float
        Value taken from scatter_distance used for calculating distance of training points around
        slice line
    x_index : int
        Value of x axis column
    y_index : int
        Value of y axis column
    output_variable : int
        Value of output axis column
    sliders_and_selects : layout
        Layout containing the sliders and select elements
    doc_layout : layout
        Contains first row of plots
    doc_layout2 : layout
        Contains second row of plots
    Z : array
        A 2D array containing contour plot data
    """

    def __init__(self, model, resolution=50, doc=None):
        """
        Initialize parameters.

        Parameters
        ----------
        model : MetaModelComponent
            Reference to meta model component
        resolution : int
            Value used to calculate the size of contour plot meshgrid
        doc : Document
            The bokeh document to build.
        """
        self.prob = Problem()
        self.resolution = resolution
        logging.getLogger("bokeh").setLevel(logging.ERROR)

        # If the surrogate model coming in is structured
        if isinstance(model, MetaModelUnStructuredComp):
            self.is_structured_meta_model = False

            # Create list of input names, check if it has more than one input, then create list
            # of outputs
            self.input_names = [name[0] for name in model._surrogate_input_names]
            if len(self.input_names) < 2:
                raise ValueError('Must have more than one input value')
            self.output_names = [name[0] for name in model._surrogate_output_names]

            # Create reference for untructured component
            self.meta_model = MetaModelUnStructuredComp(
                default_surrogate=model.options['default_surrogate'])

        # If the surrogate model coming in is unstructured
        elif isinstance(model, MetaModelStructuredComp):
            self.is_structured_meta_model = True

            self.input_names = [name for name in model._var_rel_names['input']]

            if len(self.input_names) < 2:
                raise ValueError('Must have more than one input value')

            self.output_names = [name for name in model._var_rel_names['output']]

            self.meta_model = MetaModelStructuredComp(
                distributed=model.options['distributed'],
                extrapolate=model.options['extrapolate'],
                method=model.options['method'],
                training_data_gradients=model.options['training_data_gradients'],
                vec_size=1)

        # Pair input list names with their respective data
        self.training_inputs = {}

        self._setup_empty_prob_comp(model)

        # Setup dropdown menus for x/y inputs and the output value
        self.x_input_select = Select(title="X Input:", value=[x for x in self.input_names][0],
                                     options=[x for x in self.input_names])
        self.x_input_select.on_change('value', self._x_input_update)

        self.y_input_select = Select(title="Y Input:", value=[x for x in self.input_names][1],
                                     options=[x for x in self.input_names])
        self.y_input_select.on_change('value', self._y_input_update)

        self.output_select = Select(title="Output:", value=[x for x in self.output_names][0],
                                    options=[x for x in self.output_names])
        self.output_select.on_change('value', self._output_value_update)

        # Create sliders for each input
        self.slider_dict = {}
        self.predict_inputs = {}
        for title, values in self.training_inputs.items():
            slider_data = np.linspace(min(values), max(values), self.resolution)
            self.predict_inputs[title] = slider_data
            # Calculates the distance between slider ticks
            slider_step = slider_data[1] - slider_data[0]
            slider_object = Slider(start=min(values), end=max(values), value=min(values),
                                   step=slider_step, title=str(title))
            self.slider_dict[title] = slider_object

        self._slider_attrs()

        # Length of inputs and outputs
        self.num_inputs = len(self.input_names)
        self.num_outputs = len(self.output_names)

        # Precalculate the problem bounds.
        limits = np.array([[min(value), max(value)] for value in self.training_inputs.values()])
        self.limit_range = limits[:, 1] - limits[:, 0]

        # Positional indicies
        self.x_index = 0
        self.y_index = 1
        self.output_variable = self.output_names.index(self.output_select.value)

        # Data sources are filled with initial values
        # Slider Column Data Source
        self.slider_source = ColumnDataSource(data=self.predict_inputs)

        # Contour plot Column Data Source
        self.contour_plot_source = ColumnDataSource(data=dict(
            z=np.random.rand(self.resolution, self.resolution)))
        self.contour_training_data_source = ColumnDataSource(
            data=dict(x=np.repeat(0, self.resolution), y=np.repeat(0, self.resolution)))

        # Bottom plot Column Data Source
        self.bottom_plot_source = ColumnDataSource(data=dict(
            x=np.repeat(0, self.resolution), y=np.repeat(0, self.resolution)))
        self.bottom_plot_scatter_source = ColumnDataSource(data=dict(
            bot_slice_x=np.repeat(0, self.resolution), bot_slice_y=np.repeat(0, self.resolution)))

        # Right plot Column Data Source
        self.right_plot_source = ColumnDataSource(data=dict(
            x=np.repeat(0, self.resolution), y=np.repeat(0, self.resolution)))
        self.right_plot_scatter_source = ColumnDataSource(data=dict(
            right_slice_x=np.repeat(0, self.resolution),
            right_slice_y=np.repeat(0, self.resolution)))

        # Text input to change the distance of reach when searching for nearest data points
        self.scatter_distance = TextInput(value="0.1", title="Scatter Distance")
        self.scatter_distance.on_change('value', self._scatter_input)
        self.dist_range = float(self.scatter_distance.value)

        # Grouping all of the sliders and dropdowns into one column
        sliders = [value for value in self.slider_dict.values()]
        sliders.extend(
            [self.x_input_select, self.y_input_select, self.output_select, self.scatter_distance])
        self.sliders_and_selects = row(
            column(*sliders))

        # Layout creation
        self.doc_layout = row(self._contour_data(), self._right_plot(), self.sliders_and_selects)
        self.doc_layout2 = row(self._bottom_plot())

        if doc is None:
            doc = curdoc()

        doc.add_root(self.doc_layout)
        doc.add_root(self.doc_layout2)
        doc.title = 'Meta Model Visualization'

    def _setup_empty_prob_comp(self, metamodel):
        """
        Take data from surrogate ref and pass it into new surrogate model with empty Problem model.

        Parameters
        ----------
        metamodel : MetaModelComponent
            Reference to meta model component

        """
        # Check for structured or unstructured
        if self.is_structured_meta_model:
            # Loop through the input names
            for idx, name in enumerate(self.input_names):
                # Check for no training data
                try:
                    # Append the input data/titles to a dictionary
                    self.training_inputs[name] = metamodel.inputs[idx]
                    # Also, append the data as an 'add_input' to the model reference
                    self.meta_model.add_input(name, 0.,
                                              training_data=metamodel.inputs[idx])
                except TypeError:
                    msg = "No training data present for one or more parameters"
                    raise TypeError(msg)

            # Add the outputs to the model reference
            for idx, name in enumerate(self.output_names):
                self.meta_model.add_output(
                    name, 0.,
                    training_data=metamodel.training_outputs[name])

        else:
            for name in self.input_names:
                try:
                    self.training_inputs[name] = {
                        title for title in metamodel.options['train:' + str(name)]}
                    self.meta_model.add_input(
                        name, 0.,
                        training_data=[
                            title for title in metamodel.options['train:' + str(name)]])
                except TypeError:
                    msg = "No training data present for one or more parameters"
                    raise TypeError(msg)

            for name in self.output_names:
                self.meta_model.add_output(
                    name, 0.,
                    training_data=[
                        title for title in metamodel.options['train:' + str(name)]])

        # Add the subsystem and setup
        self.prob.model.add_subsystem('interp', self.meta_model)
        self.prob.setup()

    def _slider_attrs(self):
        """
        Assign data to slider objects and callback functions.

        Parameters
        ----------
        None

        """
        for name, slider_object in self.slider_dict.items():
            # Checks if there is a callback previously assigned and then clears it
            if len(slider_object._callbacks) == 1:
                slider_object._callbacks.clear()

            # Check if the name matches the 'x input' title
            if name == self.x_input_select.value:
                # Set the object and add an event handler
                self.x_input_slider = slider_object
                self.x_input_slider.on_change('value', self._scatter_plots_update)

            # Check if the name matches the 'y input' title
            elif name == self.y_input_select.value:
                # Set the object and add an event handler
                self.y_input_slider = slider_object
                self.y_input_slider.on_change('value', self._scatter_plots_update)
            else:
                # If it is not an x or y input then just assign it the event handler
                slider_object.on_change('value', self._update)

    def _make_predictions(self, data):
        """
        Run the data parameter through the surrogate model which is given in prob.

        Parameters
        ----------
        data : dict
            Dictionary containing training points.

        Returns
        -------
        array
            np.stack of predicted points.
        """
        # Create dictionary with an empty list
        outputs = {name: [] for name in self.output_names}

        # Parse dict into shape [n**2, number of inputs] list
        inputs = np.empty([self.resolution**2, self.num_inputs])
        for idx, values in enumerate(data.values()):
            inputs[:, idx] = values.flatten()

        # Check for structured or unstructured
        if self.is_structured_meta_model:
            # Assign each row of the data coming in to a tuple. Loop through the tuple, and append
            # the name of the input and value.
            for idx, tup in enumerate(inputs):
                for name, val in zip(data.keys(), tup):
                    self.prob[self.meta_model.name + '.' + name] = val
                self.prob.run_model()
                # Append the predicted value(s)
                for title in self.output_names:
                    outputs[title].append(
                        np.array(self.prob[self.meta_model.name + '.' + title]))

        else:
            for idx, tup in enumerate(inputs):
                for name, val in zip(data.keys(), tup):
                    self.prob[self.meta_model.name + '.' + name] = val
                self.prob.run_model()
                for title in self.output_names:
                    outputs[title].append(
                        float(self.prob[self.meta_model.name + '.' + title]))

        return stack_outputs(outputs)

    def _contour_data_calcs(self):
        """
        Parse input data into a dictionary to be predicted at.

        Parameters
        ----------
        None

        Returns
        -------
        dict
            Dictionary of training data to be predicted at.
        """
        # Create initial data array of training points
        resolution = self.resolution
        x_data = np.zeros((resolution, resolution, self.num_inputs))

        self._slider_attrs()

        # Broadcast the inputs to every row of x_data array
        x_data[:, :, :] = np.array(self.input_point_list)

        # Find the x/y input titles and match their index positions
        for idx, (title, values) in enumerate(self.slider_source.data.items()):
            if title == self.x_input_select.value:
                self.xlins_mesh = values
                x_index_position = idx
            if title == self.y_input_select.value:
                self.ylins_mesh = values
                y_index_position = idx

        # Make meshgrid from the x/y inputs to be plotted
        X, Y = np.meshgrid(self.xlins_mesh, self.ylins_mesh)
        # Move the x/y inputs to their respective positions in x_data
        x_data[:, :, x_index_position] = X
        x_data[:, :, y_index_position] = Y

        pred_dict = {}
        for idx, title in enumerate(self.slider_source.data):
            pred_dict.update({title: x_data[:, :, idx]})

        return pred_dict

    def _contour_data(self):
        """
        Create a contour plot.

        Parameters
        ----------
        None

        Returns
        -------
        Bokeh Image Plot
        """
        resolution = self.resolution
        # Output data array initialization
        y_data = np.zeros((resolution, resolution, self.num_outputs))
        self.input_point_list = [point.value for point in self.slider_dict.values()]

        # Pass the dict to make predictions and then reshape the output to
        # (resolution, resolution, number of outputs)
        y_data[:, :, :] = self._make_predictions(self._contour_data_calcs()).reshape(
            (resolution, resolution, self.num_outputs))
        # Use the output variable to pull the correct column of data from the predicted
        # data (y_data)
        self.Z = y_data[:, :, self.output_variable]
        # Reshape it to be 2D
        self.Z = self.Z.reshape(resolution, resolution)

        # Update the data source with new data
        self.contour_plot_source.data = dict(z=[self.Z])

        # Min to max of training data
        self.contour_x_range = xlins = self.xlins_mesh
        self.contour_y_range = ylins = self.ylins_mesh

        # Color bar formatting
        color_mapper = LinearColorMapper(
            palette="Viridis11", low=np.amin(self.Z), high=np.amax(self.Z))
        color_bar = ColorBar(color_mapper=color_mapper, ticker=BasicTicker(), label_standoff=12,
                             location=(0, 0))

        # Contour Plot
        self.contour_plot = contour_plot = figure(
            match_aspect=False,
            tooltips=[(self.x_input_select.value, "$x"), (self.y_input_select.value, "$y"),
                      (self.output_select.value, "@z")], tools='')
        contour_plot.x_range.range_padding = 0
        contour_plot.y_range.range_padding = 0
        contour_plot.plot_width = 600
        contour_plot.plot_height = 500
        contour_plot.xaxis.axis_label = self.x_input_select.value
        contour_plot.yaxis.axis_label = self.y_input_select.value
        contour_plot.min_border_left = 0
        contour_plot.add_layout(color_bar, 'right')
        contour_plot.x_range = Range1d(min(xlins), max(xlins))
        contour_plot.y_range = Range1d(min(ylins), max(ylins))
        contour_plot.image(image='z', source=self.contour_plot_source, x=min(xlins), y=min(ylins),
                           dh=(max(ylins) - min(ylins)), dw=(max(xlins) - min(xlins)),
                           palette="Viridis11")

        # Adding training data points overlay to contour plot
        if self.is_structured_meta_model:
            data = self._structured_training_points()
        else:
            data = self._unstructured_training_points()

        if len(data):
            # Add training data points overlay to contour plot
            data = np.array(data)
            if self.is_structured_meta_model:
                self.contour_training_data_source.data = dict(x=data[:, 0], y=data[:, 1],
                                                              z=self.meta_model.training_outputs[
                                                              self.output_select.value].flatten())
            else:
                self.contour_training_data_source.data = dict(x=data[:, 0], y=data[:, 1],
                                                              z=self.meta_model._training_output[
                                                              self.output_select.value])

            training_data_renderer = self.contour_plot.circle(
                x='x', y='y', source=self.contour_training_data_source,
                size=5, color='white', alpha=0.50)

            self.contour_plot.add_tools(HoverTool(renderers=[training_data_renderer], tooltips=[
                (self.x_input_select.value + " (train)", '@x'),
                (self.y_input_select.value + " (train)", '@y'),
                (self.output_select.value + " (train)", '@z'), ]))

        return self.contour_plot

    def _right_plot(self):
        """
        Create the right side subplot to view the projected slice.

        Parameters
        ----------
        None

        Returns
        -------
        Bokeh figure
        """
        # List of the current positions of the sliders
        self.input_point_list = [point.value for point in self.slider_dict.values()]

        # Find the title of the y input and match it with the data
        y_idx = self.y_input_select.value
        y_data = self.predict_inputs[y_idx]
        # Find the position of the x_input slider
        x_value = self.x_input_slider.value

        # Rounds the x_data to match the predict_inputs value
        subplot_value_index = np.where(
            np.around(self.predict_inputs[self.x_input_select.value], 5) ==
            np.around(x_value, 5))[0]

        # Make slice in Z data at the point calculated before and add it to the data source
        z_data = self.Z[:, subplot_value_index].flatten()

        x = z_data
        y = self.slider_source.data[y_idx]

        # Update the data source with new data
        self.right_plot_source.data = dict(x=x, y=y)

        # Create and format figure
        self.right_plot_fig = right_plot_fig = figure(
            plot_width=250, plot_height=500,
            title="{} vs {}".format(y_idx, self.output_select.value), tools="pan")
        right_plot_fig.xaxis.axis_label = self.output_select.value
        right_plot_fig.yaxis.axis_label = y_idx
        right_plot_fig.xaxis.major_label_orientation = math.pi / 9
        right_plot_fig.line(x='x', y='y', source=self.right_plot_source)
        right_plot_fig.x_range.range_padding = 0.1
        right_plot_fig.y_range.range_padding = 0.02

        # Determine distance and alpha opacity of training points
        if self.is_structured_meta_model:
            data = self._structured_training_points(compute_distance=True, source='right')
        else:
            data = self._unstructured_training_points(compute_distance=True, source='right')

        self.right_alphas = 1.0 - data[:, 2] / self.dist_range

        # Training data scatter plot
        scatter_renderer = right_plot_fig.scatter(x=data[:, 3], y=data[:, 1], line_color=None,
                                                  fill_color='#000000',
                                                  fill_alpha=self.right_alphas.tolist())

        right_plot_fig.add_tools(HoverTool(renderers=[scatter_renderer], tooltips=[
            (self.output_select.value + " (train)", '@x'),
            (y_idx + " (train)", '@y'),
        ]))
        right_plot_fig.scatter(x=data[:, 3], y=data[:, 1], line_color=None, fill_color='#000000',
                               fill_alpha=self.right_alphas.tolist())

        span_width = self.dist_range * (max(y_data) - min(y_data))

        # Set the right_plot data source to new values
        self.right_plot_scatter_source.data = dict(
            right_slice_x=np.repeat(x_value, self.resolution), right_slice_y=y_data,
            left_dashed=[i - span_width for i in np.repeat(x_value, self.resolution)],
            right_dashed=[i + span_width for i in np.repeat(x_value, self.resolution)])

        self.contour_plot.line(
            'right_slice_x', 'right_slice_y', source=self.right_plot_scatter_source,
            color='black', line_width=2)
        self.contour_plot.line(
            'left_dashed', 'right_slice_y', line_dash='dashed',
            source=self.right_plot_scatter_source, color='black', line_width=2)
        self.contour_plot.line(
            'right_dashed', 'right_slice_y', line_dash='dashed',
            source=self.right_plot_scatter_source, color='black', line_width=2)

        return self.right_plot_fig

    def _bottom_plot(self):
        """
        Create the bottom subplot to view the projected slice.

        Parameters
        ----------
        None

        Returns
        -------
        Bokeh figure
        """
        # List of the current positions of the sliders
        self.input_point_list = [point.value for point in self.slider_dict.values()]

        # Find the title of the x input and match it with the data
        x_idx = self.x_input_select.value
        x_data = self.predict_inputs[x_idx]
        # Find the position of the y_input slider
        y_value = self.y_input_slider.value

        # Rounds the y_data to match the predict_inputs value
        subplot_value_index = np.where(
            np.around(self.predict_inputs[self.y_input_select.value], 5) ==
            np.around(y_value, 5))[0]

        # Make slice in Z data at the point calculated before and add it to the data source
        z_data = self.Z[subplot_value_index, :].flatten()

        x = self.slider_source.data[x_idx]
        y = z_data

        # Update the data source with new data
        self.bottom_plot_source.data = dict(x=x, y=y)

        # Create and format figure
        self.bottom_plot_fig = bottom_plot_fig = figure(
            plot_width=550, plot_height=250,
            title="{} vs {}".format(x_idx, self.output_select.value), tools="")
        bottom_plot_fig.xaxis.axis_label = x_idx
        bottom_plot_fig.yaxis.axis_label = self.output_select.value
        bottom_plot_fig.line(x='x', y='y', source=self.bottom_plot_source)
        bottom_plot_fig.x_range.range_padding = 0.02
        bottom_plot_fig.y_range.range_padding = 0.1

        # Determine distance and alpha opacity of training points
        if self.is_structured_meta_model:
            data = self._structured_training_points(compute_distance=True)
        else:
            data = self._unstructured_training_points(compute_distance=True)

        self.bottom_alphas = 1.0 - data[:, 2] / self.dist_range

        # Training data scatter plot
        scatter_renderer = bottom_plot_fig.scatter(x=data[:, 0], y=data[:, 3], line_color=None,
                                                   fill_color='#000000',
                                                   fill_alpha=self.bottom_alphas.tolist())

        bottom_plot_fig.add_tools(HoverTool(renderers=[scatter_renderer], tooltips=[
            (x_idx + " (train)", '@x'),
            (self.output_select.value + " (train)", '@y'),
        ]))

        span_width = self.dist_range * (max(x_data) - min(x_data))

        # Set the right_plot data source to new values
        self.bottom_plot_scatter_source.data = dict(
            bot_slice_x=x_data, bot_slice_y=np.repeat(y_value, self.resolution),
            upper_dashed=[i + span_width for i in np.repeat(y_value, self.resolution)],
            lower_dashed=[i - span_width for i in np.repeat(y_value, self.resolution)])

        self.contour_plot.line(
            'bot_slice_x', 'bot_slice_y', source=self.bottom_plot_scatter_source, color='black',
            line_width=2)
        self.contour_plot.line(
            'bot_slice_x', 'upper_dashed', line_dash='dashed',
            source=self.bottom_plot_scatter_source, color='black', line_width=2)
        self.contour_plot.line(
            'bot_slice_x', 'lower_dashed', line_dash='dashed',
            source=self.bottom_plot_scatter_source, color='black', line_width=2)

        return self.bottom_plot_fig

    def _unstructured_training_points(self, compute_distance=False, source='bottom'):
        """
        Calculate the training points and returns and array containing the position and alpha.

        Parameters
        ----------
        compute_distance : bool
            If true, compute the distance of training points from surrogate line.
        source : str
            Which subplot the method is being called from.

        Returns
        -------
        array
            The array of training points and their alpha opacity with respect to the surrogate line
        """
        # Input training data and output training data
        x_training = self.meta_model._training_input
        training_output = np.squeeze(stack_outputs(self.meta_model._training_output), axis=1)

        # Index of input/output variables
        x_index = self.x_input_select.options.index(self.x_input_select.value)
        y_index = self.y_input_select.options.index(self.y_input_select.value)
        output_variable = self.output_names.index(self.output_select.value)

        # Vertically stack the x/y inputs and then transpose them
        infos = np.vstack((x_training[:, x_index], x_training[:, y_index])).transpose()
        if not compute_distance:
            return infos

        points = x_training.copy()

        # Normalize so each dimension spans [0, 1]
        points = np.divide(points, self.limit_range)
        dist_limit = np.linalg.norm(self.dist_range * self.limit_range)
        scaled_x0 = np.divide(self.input_point_list, self.limit_range)

        # Query the nearest neighbors tree for the closest points to the scaled x0 array
        # Nearest points to x slice
        if x_training.shape[1] < 3:

            tree = cKDTree(points)
            # Query the nearest neighbors tree for the closest points to the scaled x0 array
            dists, idxs = tree.query(
                scaled_x0, k=len(x_training), distance_upper_bound=self.dist_range)

            # kdtree query always returns requested k even if there are not enough valid points
            idx_finite = np.where(np.isfinite(dists))
            dists = dists[idx_finite]
            idxs = idxs[idx_finite]

        else:
            dists, idxs = self._multidimension_input(scaled_x0, points, source=source)

        # data contains:
        # [x_value, y_value, ND-distance, func_value]

        data = np.zeros((len(idxs), 4))
        for dist_index, j in enumerate(idxs):
            data[dist_index, 0:2] = infos[j, :]
            data[dist_index, 2] = dists[dist_index]
            data[dist_index, 3] = training_output[j, output_variable]

        return data

    def _structured_training_points(self, compute_distance=False, source='bottom'):
        """
        Calculate the training points and return an array containing the position and alpha.

        Parameters
        ----------
        compute_distance : bool
            If true, compute the distance of training points from surrogate line.
        source : str
            Which subplot the method is being called from.

        Returns
        -------
        array
            The array of training points and their alpha opacity with respect to the surrogate line
        """
        # Create tuple of the input parameters
        input_dimensions = tuple(self.meta_model.inputs)

        # Input training data and output training data
        x_training = np.array([z for z in product(*input_dimensions)])
        training_output = self.meta_model.training_outputs[self.output_select.value].flatten()

        # Index of input/output variables
        x_index = self.x_input_select.options.index(self.x_input_select.value)
        y_index = self.y_input_select.options.index(self.y_input_select.value)

        # Vertically stack the x/y inputs and then transpose them
        infos = np.vstack((x_training[:, x_index], x_training[:, y_index])).transpose()
        if not compute_distance:
            return infos

        points = x_training.copy()

        # Normalize so each dimension spans [0, 1]
        points = np.divide(points, self.limit_range)
        self.dist_limit = np.linalg.norm(self.dist_range * self.limit_range)
        scaled_x0 = np.divide(self.input_point_list, self.limit_range)
        # Query the nearest neighbors tree for the closest points to the scaled x0 array
        # Nearest points to x slice

        if x_training.shape[1] < 3:
            x_tree, x_idx = self._two_dimension_input(scaled_x0, points, source=source)
        else:
            x_tree, x_idx = self._multidimension_input(scaled_x0, points, source=source)

        # format for 'data'
        # [x_value, y_value, ND-distance_(x or y), func_value]

        n = len(x_tree)
        data = np.zeros((n, 4))
        for dist_index, j in enumerate(x_idx):
            data[dist_index, 0:2] = infos[j, :]
            data[dist_index, 2] = x_tree[dist_index]
            data[dist_index, 3] = training_output[j]

        return data

    def _two_dimension_input(self, scaled_points, training_points, source='bottom'):
        """
        Calculate the distance of training points to the surrogate line.

        Parameters
        ----------
        scaled_points : array
            Array of normalized slider positions.
        training_points : array
            Array of input training data.
        source : str
            Which subplot the method is being called from.

        Returns
        -------
        idxs : array
            Index of closest points that are within the dist range.
        x_tree : array
            One dimentional array of points that are within the dist range.
        """
        # Column of the input
        if source == 'right':
            col_idx = self.y_input_select.options.index(self.y_input_select.value)
        else:
            col_idx = self.x_input_select.options.index(self.x_input_select.value)

        # Delete the axis of input from source to predicted 1D distance
        x = np.delete(scaled_points, col_idx, axis=0)
        x_training_points = np.delete(training_points, col_idx, axis=1).flatten()

        # Tree of point distances
        x_tree = np.abs(x - x_training_points)

        # Only return points that are within our distance-viewing paramter.
        idx = np.where(x_tree <= self.dist_range)
        x_tree = x_tree[idx]
        return x_tree, idx[0]

    def _multidimension_input(self, scaled_points, training_points, source='bottom'):
        """
        Calculate the distance of training points to the surrogate line.

        Parameters
        ----------
        scaled_points : array
            Array of normalized slider positions.
        training_points : array
            Array of input training data.
        source : str
            Which subplot the method is being called from.

        Returns
        -------
        idxs : array
            Index of closest points that are within the dist range.
        x_tree : array
            Array of points that are within the dist range.
        """
        # Column of the input
        if source == 'right':
            col_idx = self.y_input_select.options.index(self.y_input_select.value)

        else:
            col_idx = self.x_input_select.options.index(self.x_input_select.value)

        # Delete the axis of input from source to predicted distance
        x = np.delete(scaled_points, col_idx, axis=0)
        x_training_points = np.delete(training_points, col_idx, axis=1)

        # Tree of point distances
        x_tree = cKDTree(x_training_points)

        # Query the nearest neighbors tree for the closest points to the scaled array
        dists, idx = x_tree.query(x, k=len(x_training_points),
                                  distance_upper_bound=self.dist_range)

        # kdtree query always returns requested k even if there are not enough valid points
        idx_finite = np.where(np.isfinite(dists))
        dists_finite = dists[idx_finite]
        idx = idx[idx_finite]
        return dists_finite, idx

    # Event handler functions
    def _update_all_plots(self):
        self.doc_layout.children[0] = self._contour_data()
        self.doc_layout.children[1] = self._right_plot()
        self.doc_layout2.children[0] = self._bottom_plot()

    def _update_subplots(self):
        self.doc_layout.children[1] = self._right_plot()
        self.doc_layout2.children[0] = self._bottom_plot()

    def _update(self, attr, old, new):
        self._update_all_plots()

    def _scatter_plots_update(self, attr, old, new):
        self._update_subplots()

    def _scatter_input(self, attr, old, new):
        # Text input update function of dist range value
        self.dist_range = float(new)
        self._update_all_plots()

    def _x_input_update(self, attr, old, new):
        # Checks that x and y inputs are not equal to each other
        if new == self.y_input_select.value:
            raise ValueError("Inputs should not equal each other")
        else:
            self.x_input_select.value = new
            self._update_all_plots()

    def _y_input_update(self, attr, old, new):
        # Checks that x and y inputs are not equal to each other
        if new == self.x_input_select.value:
            raise ValueError("Inputs should not equal each other")
        else:
            self.y_input_select.value = new
            self._update_all_plots()

    def _output_value_update(self, attr, old, new):
        self.output_variable = self.output_names.index(new)
        self._update_all_plots()
Exemple #21
0
class ScipyODEIntegrator(object):
    """
    Given a system class, create a callable object with the same signature as that required
    by scipy.integrate.ode::

        f(t, x, *args)

    Internally, this is accomplished by constructing an OpenMDAO problem using the ODE with
    a single node.  The interface populates the values of the time, states, and controls,
    and then calls `run_model()` on the problem.  The state rates generated by the ODE
    are then returned back to scipy ode, which continues the integration.

    Parameters
    ----------
    phase_name : str
        The name of the phase being simulated.
    ode_class : class
        The ODEClass belonging to the phase being simulated.
    time_options : dict of {str: TimeOptionsDictionary}
        The time options for the phase being simulated.
    state_options : dict of {str: StateOptionsDictionary}
        The state options for the phase being simulated.
    control_options : dict of {str: ControlOptionsDictionary}
        The control options for the phase being simulated.
    design_parameter_options : dict of {str: DesignParameterOptionsDictionary}
        The design parameter options for the phase being simulated.
    input_parameter_options : dict of {str: InputParameterOptionsDictionary}
        The input parameter options for the phase being simulated.
    ode_init_kwargs : dict
        Keyword argument dictionary passed to the ODE at initialization.
    """
    def __init__(self,
                 phase_name,
                 ode_class,
                 time_options,
                 state_options,
                 control_options,
                 design_parameter_options,
                 input_parameter_options,
                 ode_init_kwargs=None):

        self.phase_name = phase_name
        self.prob = Problem(model=Group())

        self.ode = ode

        # The ODE System
        self.prob.model.add_subsystem('ode',
                                      subsys=ode_class(num_nodes=1,
                                                       **ode_init_kwargs))
        self.ode_options = ode_class.ode_options

        # Get the state vector.  This isn't necessarily ordered
        # so just pick the default ordering and go with it.
        self.state_options = OrderedDict()
        self.time_options = time_options
        self.control_options = control_options
        self.design_parameter_options = design_parameter_options
        self.input_parameter_options = input_parameter_options

        pos = 0

        for state, options in iteritems(state_options):
            self.state_options[state] = {
                'rate_source': options['rate_source'],
                'pos': pos,
                'shape': options['shape'],
                'size': np.prod(options['shape']),
                'units': options['units'],
                'targets': options['targets']
            }
            pos += self.state_options[state]['size']

        self._state_vec = np.zeros(pos, dtype=float)
        self._state_rate_vec = np.zeros(pos, dtype=float)

        # The Time Comp
        self.prob.model.add_subsystem(
            'time_input',
            IndepVarComp('time',
                         val=0.0,
                         units=self.ode_options._time_options['units']),
            promotes_outputs=['time'])

        if self.ode_options._time_options['targets'] is not None:
            self.prob.model.connect('time', [
                'ode.{0}'.format(tgt)
                for tgt in self.ode_options._time_options['targets']
            ])

        # The States Comp
        indep = IndepVarComp()
        for name, options in iteritems(self.state_options):
            indep.add_output('states:{0}'.format(name),
                             shape=(1, np.prod(options['shape'])),
                             units=options['units'])
            if options['targets'] is not None:
                self.prob.model.connect(
                    'states:{0}'.format(name),
                    ['ode.{0}'.format(tgt) for tgt in options['targets']])
            self.prob.model.connect(
                'ode.{0}'.format(options['rate_source']),
                'state_rate_collector.state_rates_in:{0}_rate'.format(name))

        self.prob.model.add_subsystem('indep_states',
                                      subsys=indep,
                                      promotes_outputs=['*'])

        # The Control interpolation comp
        time_units = self.ode_options._time_options['units']
        self._interp_comp = ControlInterpolationComp(
            time_units=time_units, control_options=control_options)

        # The state rate collector comp
        self.prob.model.add_subsystem(
            'state_rate_collector',
            StateRateCollectorComp(state_options=self.state_options,
                                   time_units=time_options['units']))

        # Flag that is set to true if has_controls is called
        self._has_dynamic_controls = False

    def setup(self, check=False):
        """
        Call setup on the ScipyODEIntegrator's problem instance.

        Parameters
        ----------
        check : bool
            If True, run setup on the problem instance with checks enabled.
            Default is False.

        """
        model = self.prob.model

        order = ['time_input', 'indep_states']

        if self.control_options:
            model.add_subsystem('indep_controls',
                                self._interp_comp,
                                promotes_outputs=['*'])

            order += ['indep_controls']
            model.connect('time', ['indep_controls.time'])

            for name, options in iteritems(self.control_options):
                if name in self.ode_options._parameters:
                    targets = self.ode_options._parameters[name]['targets']
                    model.connect('controls:{0}'.format(name),
                                  ['ode.{0}'.format(tgt) for tgt in targets])
                if options['rate_param']:
                    rate_param = options['rate_param']
                    rate_targets = self.ode_options._parameters[rate_param][
                        'targets']
                    model.connect(
                        'control_rates:{0}_rate'.format(name),
                        ['ode.{0}'.format(tgt) for tgt in rate_targets])
                if options['rate2_param']:
                    rate2_param = options['rate2_param']
                    rate2_targets = self.ode_options._parameters[rate2_param][
                        'targets']
                    model.connect(
                        'control_rates:{0}_rate2'.format(name),
                        ['ode.{0}'.format(tgt) for tgt in rate2_targets])

        if self.design_parameter_options:
            ivc = model.add_subsystem('design_params',
                                      IndepVarComp(),
                                      promotes_outputs=['*'])

            order += ['design_params']

            for name, options in iteritems(self.design_parameter_options):
                ivc.add_output('design_parameters:{0}'.format(name),
                               val=np.zeros(options['shape']),
                               units=options['units'])
                if name in self.ode_options._parameters:
                    targets = self.ode_options._parameters[name]['targets']
                    model.connect('design_parameters:{0}'.format(name),
                                  ['ode.{0}'.format(tgt) for tgt in targets])

        if self.input_parameter_options:
            ivc = model.add_subsystem('input_params',
                                      IndepVarComp(),
                                      promotes_outputs=['*'])

            order += ['input_params']

            for name, options in iteritems(self.input_parameter_options):
                ivc.add_output('input_parameters:{0}'.format(name),
                               val=np.zeros(options['shape']),
                               units=options['units'])
                if options['target_param'] in self.ode_options._parameters:
                    targets = self.ode_options._parameters[
                        options['target_param']]['targets']
                    model.connect('input_parameters:{0}'.format(name),
                                  ['ode.{0}'.format(tgt) for tgt in targets])

        order += ['ode', 'state_rate_collector']
        model.set_order(order)

        self.prob.setup(check=check)

    def set_interpolant(self, name, interpolant):
        """
        Set the interpolator to be used for the control of the given name.

        Parameters
        ----------
        name : str
            The name of the control whose interpolant is being specified.
        interpolant : object
            An object that provides interpolation for the control as a function of time.
            The object must have methods `eval(t)` which returns the interpolated value
            of the control at time t, and `eval_deriv(t)` which returns the interpolated
            value of the first time-derivative of the control at time t.
        """
        self._interp_comp.interpolants[name] = interpolant

    def set_design_param_value(self, name, val, units=None):
        """
        Sets the values of design parameters in the problem, once self.prob has been setup.

        Parameters
        ----------
        name : str
            The name of the design parameter whose value is being set
        val : float or ndarray
            The value of the design parameter
        units : str or None
            The units in which the design parameter value is set.

        """
        self.prob.set_val('design_parameters:{0}'.format(name), val, units)

    def set_input_param_value(self, name, val, units=None):
        """
        Sets the values of input parameters in the problem, once self.prob has been setup.

        Parameters
        ----------
        name : str
            The name of the design parameter whose value is being set
        val : float or ndarray
            The value of the design parameter
        units : str or None
            The units in which the design parameter value is set.

        """
        self.prob.set_val('input_parameters:{0}'.format(name), val, units)

    def _unpack_state_vec(self, x):
        """
        Given the state vector in 1D, extract the values corresponding to
        each state into the ode integrators problem states.

        Parameters
        ----------
        x : np.array
            The 1D state vector.

        Returns
        -------
        None

        """
        for state_name, state_options in self.state_options.items():
            pos = state_options['pos']
            size = state_options['size']
            self.prob['states:{0}'.format(state_name)][0,
                                                       ...] = x[pos:pos + size]

    def _pack_state_rate_vec(self):
        """
        Pack the state rates into a 1D vector for use by scipy odeint.

        Returns
        -------
        dXdt: np.array
            The 1D state-rate vector.

        """
        for state_name, state_options in self.state_options.items():
            pos = state_options['pos']
            size = state_options['size']
            self._state_rate_vec[pos:pos + size] = \
                np.ravel(self.prob['state_rate_collector.'
                                   'state_rates:{0}_rate'.format(state_name)])
        return self._state_rate_vec

    def _pack_state_vec(self, x_dict):
        """
        Pack the state into a 1D vector for use by scipy.integrate.ode.

        Returns
        -------
        x: np.array
            The 1D state vector.

        """
        self._state_vec[:] = 0.0
        for state_name, state_options in self.state_options.items():
            pos = state_options['pos']
            size = state_options['size']
            self._state_vec[pos:pos + size] = np.ravel(x_dict[state_name])
        return self._state_vec

    def _f_ode(self, t, x, *args):
        """
        The function interface used by scipy.ode

        Parameters
        ----------
        t : float
            The current time, t.
        x : np.array
            The 1D state vector.

        Returns
        -------
        xdot : np.array
            The 1D vector of state time-derivatives.

        """
        self.prob['time'] = t
        self._unpack_state_vec(x)
        self.prob.run_model()
        xdot = self._pack_state_rate_vec()
        return xdot

    def _store_results(self, results, append=False):
        """
        Save the outputs of the integrators problem object into the given PhaseSimulationResults
        instance.

        Parameters
        ----------
        results : PhaseSimulationResults
            The PhaseSimulationResults object into which results of the integration are
            to be saved.
        """
        model_outputs = self.prob.model.list_outputs(units=True,
                                                     shape=True,
                                                     values=True,
                                                     implicit=True,
                                                     explicit=True,
                                                     out_stream=None)

        for output_name, options in model_outputs:
            prom_name = self.prob.model._var_abs2prom['output'][output_name]
            if prom_name.startswith('time'):
                var_type = 'indep'
                name = 'time'
                shape = (1, )
            elif prom_name.startswith('states:'):
                var_type = 'states'
                name = prom_name.replace('states:', '', 1)
                shape = self.state_options[name]['shape']

            elif prom_name.startswith('controls:'):
                var_type = 'controls'
                name = prom_name.replace('controls:', '', 1)
                shape = self.control_options[name]['shape']

            elif prom_name.startswith('control_rates:'):
                var_type = 'control_rates'
                name = prom_name.replace('control_rates:', '', 1)
                if name.endswith('_rate'):
                    control_name = name[:-5]
                if name.endswith('_rate2'):
                    control_name = name[:-6]
                shape = self.control_options[control_name]['shape']

            elif prom_name.startswith('design_parameters:'):
                var_type = 'design_parameters'
                name = prom_name.replace('design_parameters:', '', 1)
                shape = self.design_parameter_options[name]['shape']

            elif prom_name.startswith('input_parameters:'):
                var_type = 'input_parameters'
                name = prom_name.replace('input_parameters:', '', 1)
                shape = self.input_parameter_options[name]['shape']

            elif prom_name.startswith('ode.'):
                var_type = 'ode'
                name = prom_name.replace('ode.', '', 1)
                shape = options['shape'] if len(
                    options['shape']) == 1 else options['shape'][1:]

            elif prom_name.startswith('state_rate_collector.'):
                # skip this variable since this is just a simulation artifact
                continue
            else:
                raise RuntimeWarning('Unexpected output encountered during'
                                     'simulation: {0}'.format(prom_name))
                continue

            if append:
                results.outputs[var_type][name]['value'] = \
                    np.concatenate((results.outputs[var_type][name]['value'],
                                    np.atleast_2d(options['value'])),
                                   axis=0)
            else:
                results.outputs[var_type][name] = {}
                results.outputs[var_type][name]['value'] = np.atleast_2d(
                    options['value']).copy()
                results.outputs[var_type][name]['units'] = options['units']
                results.outputs[var_type][name]['shape'] = shape

    def integrate_times(self,
                        x0_dict,
                        times,
                        integrator='vode',
                        integrator_params=None,
                        observer=None):
        """
        Integrate the RHS with the given initial state, and record the values at the
        specified times.

        Parameters
        ----------
        x0_dict : dict
            A dictionary keyed by state variable name that contains the
            initial values for the states.  If absent the initial value is
            assumed to be zero.
        times : sequence
            The sequence of times at which output for the integration is desired.
        integrator : str
            The integrator to be used by scipy.ode.  This is one of:
            vode, zvode, lsoda, dopri5, or dopri853.
        integrator_params : dict, None
            Parameters specific to the chosen integrator.  See the Scipy
            documentation for details.
        observer : callable, str, None
            A callable function to be called at the specified timesteps in
            `integrate_times`.  This can be used to record the integrated trajectory.
            If 'default', a StdOutObserver will be used, which outputs all variables
            in the model to standard output by default.
            If None, no observer will be called.

        Returns
        -------
        PhaseSimulationResults
            A dictionary of variables in the RHS and their values at the given times.

        """

        # Prepare the observer
        if observer == 'stdout':
            _observer = StdOutObserver(self)
        elif observer == 'progress-bar':
            _observer = ProgressBarObserver(self,
                                            out_stream=sys.stdout,
                                            t0=times[0],
                                            tf=times[-1])
        else:
            _observer = observer

        int_params = integrator_params if integrator_params else {}

        solver = ode(self._f_ode)

        x0 = self._pack_state_vec(x0_dict)

        solver.set_integrator(integrator, **int_params)
        solver.set_initial_value(x0, times[0])

        delta_times = np.diff(times)

        # Run the Model once to get the initial values of all variables
        self._f_ode(solver.t, solver.y)

        # Prepare the output dictionary
        results = \
            PhaseSimulationResults(time_options=self.time_options,
                                   state_options=self.state_options,
                                   control_options=self.control_options,
                                   design_parameter_options=self.design_parameter_options,
                                   input_parameter_options=self.input_parameter_options)

        self._store_results(results)

        if _observer:
            _observer(solver.t, solver.y, self.prob)

        terminate = False
        for dt in delta_times:

            try:
                solver.integrate(solver.t + dt)
                self._f_ode(solver.t, solver.y)
            except AnalysisError:
                terminate = True

            self._store_results(results, append=True)

            if _observer:
                _observer(solver.t, solver.y, self.prob)

            if terminate:
                break

        return results
Exemple #22
0
    def _run_nl_ln_drv(self, ndv, nstate, nproc, flag):
        """
        Benchmark a single point.

        Nonlinear solve is always run. Linear Solve and Driver are optional.

        Parameters
        ----------
        ndv : int
            Number of design variables requested.
        nstate : int
            Number of states requested.
        nproc : int
            Number of processors requested.
        flag : bool
            User assignable flag that will be False or True.
        """
        prob = Problem()

        # User hook pre setup
        self.setup(prob, ndv, nstate, nproc, flag)

        prob.setup(mode=self.mode)

        # User hook post setup
        self.post_setup(prob, ndv, nstate, nproc, flag)

        prob.final_setup()

        # Time Execution
        t0 = time()
        prob.run_model()
        t1 = time() - t0
        print("Nonlinear Execution complete:", t1, 'sec')

        if self.time_driver:
            t4 = time()
            prob.run_driver()
            t5 = time() - t4
            print("Driver Execution complete:", t5, 'sec')
        else:
            t5 = 0.0

        if self.time_linear:
            if self.sub_timing:
                prob.model.linear_solver.time_lu_fact = 0
                prob.model.linear_solver.time_lu_solve = 0
            t2 = time()
            prob.compute_totals(of=self.ln_of,
                                wrt=self.ln_wrt,
                                return_format='dict')
            t3 = time() - t2
            print("Linear Execution complete:", t3, 'sec')
            if self.sub_timing:
                t3a = prob.model.linear_solver.time_lu_fact
                t3b = prob.model.linear_solver.time_lu_solve
                t3c = prob.total_jac.time_linearize_sys
                t3d = prob.total_jac.time_lienarize_solver
                t3e = prob.total_jac.time_solve
        else:
            t3 = 0.0

        self.post_run(prob, ndv, nstate, nproc, flag)

        if self.sub_timing and self.time_linear:
            return t1, t3, t5, t3a, t3b, t3c, t3d, t3e
        else:
            return t1, t3, t5