示例#1
0
    def __init__(self):
        super(Driver, self).__init__()
        self.recorders = RecordingManager()

        # What this driver supports
        self.supports = OptionsDictionary(read_only=True)
        self.supports.add_option('inequality_constraints', True)
        self.supports.add_option('equality_constraints', True)
        self.supports.add_option('linear_constraints', True)
        self.supports.add_option('multiple_objectives', True)
        self.supports.add_option('two_sided_constraints', True)
        self.supports.add_option('integer_design_vars', True)

        # inheriting Drivers should override this setting and set it to False
        # if they don't use gradients.
        self.supports.add_option('gradients', True)

        # This driver's options
        self.options = OptionsDictionary()

        self._desvars = OrderedDict()
        self._objs = OrderedDict()
        self._cons = OrderedDict()

        self._voi_sets = []
        self._vars_to_record = None

        # We take root during setup
        self.root = None

        self.iter_count = 0
        self.dv_conversions = {}
        self.fn_conversions = {}
示例#2
0
    def __init__(self, **kwargs):
        """
        Initialize all attributes.

        Parameters
        ----------
        **kwargs : dict
            options dictionary.
        """
        self._system = None
        self._depth = 0
        self._vec_names = None
        self._mode = 'fwd'
        self._iter_count = 0

        self.options = OptionsDictionary()
        self.recording_options = OptionsDictionary()
        self.options.declare('maxiter', types=int, default=10,
                             desc='maximum number of iterations')
        self.options.declare('atol', default=1e-10,
                             desc='absolute error tolerance')
        self.options.declare('rtol', default=1e-10,
                             desc='relative error tolerance')
        self.options.declare('iprint', types=int, default=1,
                             desc='whether to print output')
        self.options.declare('err_on_maxiter', types=bool, default=False,
                             desc="When True, AnalysisError will be raised if we don't converge.")
        # Case recording options
        self.recording_options.declare('record_abs_error', types=bool, default=True,
                                       desc='Set to True to record absolute error at the \
                                       solver level')
        self.recording_options.declare('record_rel_error', types=bool, default=True,
                                       desc='Set to True to record relative error at the \
                                       solver level')
        self.recording_options.declare('record_solver_residuals', types=bool, default=False,
                                       desc='Set to True to record residuals at the solver level')
        self.recording_options.declare('record_metadata', types=bool, desc='Record metadata',
                                       default=True)
        self.recording_options.declare('includes', types=list, default=['*'],
                                       desc='Patterns for variables to include in recording')
        self.recording_options.declare('excludes', types=list, default=[],
                                       desc='Patterns for vars to exclude in recording '
                                       '(processed post-includes)')
        # Case recording related
        self._filtered_vars_to_record = {}
        self._norm0 = 0.0

        # What the solver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('gradients', types=bool, default=False)

        self._declare_options()
        self.options.update(kwargs)

        self.metadata = {}
        self._rec_mgr = RecordingManager()

        self.cite = ""
示例#3
0
 def __init__(self):
     self.iter_count = 0
     self.options = OptionsDictionary()
     desc = 'Set to 0 to disable printing, set to 1 to print the ' \
            'residual to stdout each iteration, set to 2 to print ' \
            'subiteration residuals as well.'
     self.options.add_option('iprint', 0, values=[0, 1, 2], desc=desc)
     self.recorders = RecordingManager()
     self.local_meta = None
示例#4
0
 def __init__(self):
     self.iter_count = 0
     self.options = OptionsDictionary()
     desc = 'Set to 0 to disable printing, set to 1 to print the ' \
            'residual to stdout each iteration, set to 2 to print ' \
            'subiteration residuals as well.'
     self.options.add_option('iprint', 0, values=[0, 1, 2], desc=desc)
     self.options.add_option(
         'err_on_maxiter',
         False,
         desc='If True, raise an AnalysisError if not converged at maxiter.'
     )
     self.recorders = RecordingManager()
     self.local_meta = None
示例#5
0
    def __init__(self):
        self.iter_count = 0
        self.options = OptionsDictionary()
        desc =  "Set to 0 to print only failures, set to 1 to print iteration totals to" + \
                "stdout, set to 2 to print the residual each iteration to stdout," + \
                "or -1 to suppress all printing."

        self.options.add_option('iprint', 0, values=[-1, 0, 1, 2], desc=desc)
        self.options.add_option(
            'err_on_maxiter',
            False,
            desc='If True, raise an AnalysisError if not converged at maxiter.'
        )
        self.recorders = RecordingManager()
        self.local_meta = None
示例#6
0
文件: driver.py 项目: fzahle/OpenMDAO
    def __init__(self):
        super(Driver, self).__init__()
        self.recorders = RecordingManager()

        # What this driver supports
        self.supports = OptionsDictionary(read_only=True)
        self.supports.add_option("inequality_constraints", True)
        self.supports.add_option("equality_constraints", True)
        self.supports.add_option("linear_constraints", True)
        self.supports.add_option("multiple_objectives", True)
        self.supports.add_option("two_sided_constraints", True)
        self.supports.add_option("integer_design_vars", True)

        # This driver's options
        self.options = OptionsDictionary()

        self._desvars = OrderedDict()
        self._objs = OrderedDict()
        self._cons = OrderedDict()

        self._voi_sets = []
        self._vars_to_record = None

        # We take root during setup
        self.root = None

        self.iter_count = 0
        self.dv_conversions = {}
        self.fn_conversions = {}
示例#7
0
文件: solver.py 项目: samtx/OpenMDAO
    def __init__(self, **kwargs):
        """
        Initialize all attributes.

        Parameters
        ----------
        **kwargs : dict
            options dictionary.
        """
        self._system = None
        self._depth = 0
        self._vec_names = None
        self._mode = 'fwd'
        self._iter_count = 0

        self.options = OptionsDictionary()
        self.recording_options = OptionsDictionary()
        self.options.declare('maxiter', types=int, default=10,
                             desc='maximum number of iterations')
        self.options.declare('atol', default=1e-10,
                             desc='absolute error tolerance')
        self.options.declare('rtol', default=1e-10,
                             desc='relative error tolerance')
        self.options.declare('iprint', types=int, default=1,
                             desc='whether to print output')
        self.options.declare('err_on_maxiter', types=bool, default=False,
                             desc="When True, AnalysisError will be raised if we don't converge.")
        # Case recording options
        self.recording_options.declare('record_abs_error', types=bool, default=True,
                                       desc='Set to True to record absolute error at the \
                                       solver level')
        self.recording_options.declare('record_rel_error', types=bool, default=True,
                                       desc='Set to True to record relative error at the \
                                       solver level')
        self.recording_options.declare('record_solver_residuals', types=bool, default=False,
                                       desc='Set to True to record residuals at the solver level')
        self.recording_options.declare('record_metadata', types=bool, desc='Record metadata',
                                       default=True)
        self.recording_options.declare('includes', types=list, default=['*'],
                                       desc='Patterns for variables to include in recording')
        self.recording_options.declare('excludes', types=list, default=[],
                                       desc='Patterns for vars to exclude in recording '
                                       '(processed post-includes)')
        # Case recording related
        self._filtered_vars_to_record = {}
        self._norm0 = 0.0

        # What the solver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('gradients', types=bool, default=False)

        self._declare_options()
        self.options.update(kwargs)

        self.metadata = {}
        self._rec_mgr = RecordingManager()

        self.cite = ""
示例#8
0
    def __init__(self, **kwargs):
        """
        Initialize all attributes.

        Parameters
        ----------
        **kwargs : dict
            options dictionary.
        """
        self._system = None
        self._depth = 0
        self._vec_names = None
        self._mode = 'fwd'
        self._iter_count = 0

        self.options = OptionsDictionary()
        self.options.declare('maxiter',
                             type_=int,
                             default=10,
                             desc='maximum number of iterations')
        self.options.declare('atol',
                             default=1e-10,
                             desc='absolute error tolerance')
        self.options.declare('rtol',
                             default=1e-10,
                             desc='relative error tolerance')
        self.options.declare('iprint',
                             type_=int,
                             default=1,
                             desc='whether to print output')
        self.options.declare(
            'err_on_maxiter',
            type_=bool,
            default=False,
            desc="When True, AnlysisError will be raised if we don't convege.")

        # What the solver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('gradients', type_=bool, default=False)

        self._declare_options()
        self.options.update(kwargs)

        self.metadata = {}
        self._rec_mgr = RecordingManager()
 def __init__(self):
     self.iter_count = 0
     self.options = OptionsDictionary()
     desc = 'Set to 0 to disable printing, set to 1 to print the ' \
            'residual to stdout each iteration, set to 2 to print ' \
            'subiteration residuals as well.'
     self.options.add_option('iprint', 0, values=[0, 1, 2], desc=desc)
     self.recorders = RecordingManager()
     self.local_meta = None
示例#10
0
 def __init__(self):
     self.iter_count = 0
     self.options = OptionsDictionary()
     desc = 'Set to 0 to disable printing, set to 1 to print the ' \
            'residual to stdout each iteration, set to 2 to print ' \
            'subiteration residuals as well.'
     self.options.add_option('iprint', 0, values=[0, 1, 2], desc=desc)
     self.options.add_option('err_on_maxiter', False,
         desc='If True, raise an AnalysisError if not converged at maxiter.')
     self.recorders = RecordingManager()
     self.local_meta = None
示例#11
0
    def __init__(self):
        self.iter_count = 0
        self.options = OptionsDictionary()
        desc =  "Set to 0 to print only failures, set to 1 to print iteration totals to" + \
                "stdout, set to 2 to print the residual each iteration to stdout," + \
                "or -1 to suppress all printing."

        self.options.add_option('iprint', 0, values=[-1, 0, 1, 2], desc=desc)
        self.options.add_option('err_on_maxiter', False,
            desc='If True, raise an AnalysisError if not converged at maxiter.')
        self.recorders = RecordingManager()
        self.local_meta = None
示例#12
0
文件: driver.py 项目: sebasanper/blue
    def __init__(self):
        """
        Initialize the driver.
        """
        self._rec_mgr = RecordingManager()

        self._problem = None
        self._designvars = None
        self._cons = None
        self._objs = None
        self._responses = None
        self.options = OptionsDictionary()

        # What the driver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('inequality_constraints',
                              type_=bool,
                              default=False)
        self.supports.declare('equality_constraints',
                              type_=bool,
                              default=False)
        self.supports.declare('linear_constraints', type_=bool, default=False)
        self.supports.declare('two_sided_constraints',
                              type_=bool,
                              default=False)
        self.supports.declare('multiple_objectives', type_=bool, default=False)
        self.supports.declare('integer_design_vars', type_=bool, default=False)
        self.supports.declare('gradients', type_=bool, default=False)
        self.supports.declare('active_set', type_=bool, default=False)

        self.iter_count = 0
        self.metadata = None
        self._model_viewer_data = None

        # TODO, support these in Openmdao blue
        self.supports.declare('integer_design_vars', type_=bool, default=False)

        self.fail = False
示例#13
0
    def __init__(self):
        """
        Initialize the driver.
        """
        self._rec_mgr = RecordingManager()
        self._vars_to_record = {
            'desvarnames': set(),
            'responsenames': set(),
            'objectivenames': set(),
            'constraintnames': set(),
            'sysinclnames': set(),
        }

        self._problem = None
        self._designvars = None
        self._cons = None
        self._objs = None
        self._responses = None
        self.options = OptionsDictionary()
        self.recording_options = OptionsDictionary()

        ###########################
        self.options.declare(
            'debug_print',
            types=list,
            is_valid=_is_debug_print_opts_valid,
            desc="List of what type of Driver variables to print at each "
            "iteration. Valid items in list are 'desvars','ln_cons',"
            "'nl_cons','objs'",
            default=[])

        ###########################
        self.recording_options.declare('record_metadata',
                                       types=bool,
                                       desc='Record metadata',
                                       default=True)
        self.recording_options.declare(
            'record_desvars',
            types=bool,
            default=True,
            desc='Set to True to record design variables at the \
                                       driver level')
        self.recording_options.declare(
            'record_responses',
            types=bool,
            default=False,
            desc='Set to True to record responses at the driver level')
        self.recording_options.declare(
            'record_objectives',
            types=bool,
            default=True,
            desc='Set to True to record objectives at the \
                                       driver level')
        self.recording_options.declare(
            'record_constraints',
            types=bool,
            default=True,
            desc='Set to True to record constraints at the \
                                       driver level')
        self.recording_options.declare(
            'includes',
            types=list,
            default=['*'],
            desc='Patterns for variables to include in recording')
        self.recording_options.declare(
            'excludes',
            types=list,
            default=[],
            desc='Patterns for vars to exclude in recording '
            '(processed post-includes)')
        self.recording_options.declare(
            'record_derivatives',
            types=bool,
            default=False,
            desc='Set to True to record derivatives at the driver \
                                       level')
        ###########################

        # What the driver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('inequality_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('equality_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('linear_constraints', types=bool, default=False)
        self.supports.declare('two_sided_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('multiple_objectives', types=bool, default=False)
        self.supports.declare('integer_design_vars', types=bool, default=False)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('active_set', types=bool, default=False)
        self.supports.declare('simultaneous_derivatives',
                              types=bool,
                              default=False)

        # Debug printing.
        self.debug_print = OptionsDictionary()
        self.debug_print.declare(
            'debug_print',
            types=bool,
            default=False,
            desc='Overall option to turn on Driver debug printing')
        self.debug_print.declare('debug_print_desvars',
                                 types=bool,
                                 default=False,
                                 desc='Print design variables')
        self.debug_print.declare('debug_print_nl_con',
                                 types=bool,
                                 default=False,
                                 desc='Print nonlinear constraints')
        self.debug_print.declare('debug_print_ln_con',
                                 types=bool,
                                 default=False,
                                 desc='Print linear constraints')
        self.debug_print.declare('debug_print_objective',
                                 types=bool,
                                 default=False,
                                 desc='Print objectives')

        self.iter_count = 0
        self.metadata = None
        self._model_viewer_data = None
        self.cite = ""

        # TODO, support these in OpenMDAO
        self.supports.declare('integer_design_vars', types=bool, default=False)

        self._simul_coloring_info = None
        self._res_jacs = {}

        self.fail = False
示例#14
0
class Driver(object):
    """
    Top-level container for the systems and drivers.

    Attributes
    ----------
    fail : bool
        Reports whether the driver ran successfully.
    iter_count : int
        Keep track of iterations for case recording.
    options : <OptionsDictionary>
        Dictionary with general pyoptsparse options.
    recording_options : <OptionsDictionary>
        Dictionary with driver recording options.
    cite : str
        Listing of relevant citations that should be referenced when
        publishing work that uses this class.
    _problem : <Problem>
        Pointer to the containing problem.
    supports : <OptionsDictionary>
        Provides a consistent way for drivers to declare what features they support.
    _designvars : dict
        Contains all design variable info.
    _designvars_discrete : list
        List of design variables that are discrete.
    _cons : dict
        Contains all constraint info.
    _objs : dict
        Contains all objective info.
    _responses : dict
        Contains all response info.
    _remote_dvs : dict
        Dict of design variables that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_cons : dict
        Dict of constraints that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_objs : dict
        Dict of objectives that are remote on at least one proc. Values are
        (owning rank, size).
    _rec_mgr : <RecordingManager>
        Object that manages all recorders added to this driver.
    _coloring_info : dict
        Metadata pertaining to total coloring.
    _total_jac_sparsity : dict, str, or None
        Specifies sparsity of sub-jacobians of the total jacobian. Only used by pyOptSparseDriver.
    _res_jacs : dict
        Dict of sparse subjacobians for use with certain optimizers, e.g. pyOptSparseDriver.
    _total_jac : _TotalJacInfo or None
        Cached total jacobian handling object.
    """
    def __init__(self, **kwargs):
        """
        Initialize the driver.

        Parameters
        ----------
        **kwargs : dict of keyword arguments
            Keyword arguments that will be mapped into the Driver options.
        """
        self._rec_mgr = RecordingManager()

        self._problem = None
        self._designvars = None
        self._designvars_discrete = []
        self._cons = None
        self._objs = None
        self._responses = None

        # Driver options
        self.options = OptionsDictionary(parent_name=type(self).__name__)

        self.options.declare(
            'debug_print',
            types=list,
            check_valid=_check_debug_print_opts_valid,
            desc="List of what type of Driver variables to print at each "
            "iteration. Valid items in list are 'desvars', 'ln_cons', "
            "'nl_cons', 'objs', 'totals'",
            default=[])

        # Case recording options
        self.recording_options = OptionsDictionary(
            parent_name=type(self).__name__)

        self.recording_options.declare(
            'record_model_metadata',
            types=bool,
            default=True,
            desc='Record metadata for all Systems in the model')
        self.recording_options.declare(
            'record_desvars',
            types=bool,
            default=True,
            desc='Set to True to record design variables at the '
            'driver level')
        self.recording_options.declare(
            'record_responses',
            types=bool,
            default=False,
            desc='Set to True to record responses at the driver level')
        self.recording_options.declare(
            'record_objectives',
            types=bool,
            default=True,
            desc='Set to True to record objectives at the driver level')
        self.recording_options.declare(
            'record_constraints',
            types=bool,
            default=True,
            desc='Set to True to record constraints at the '
            'driver level')
        self.recording_options.declare(
            'includes',
            types=list,
            default=[],
            desc='Patterns for variables to include in recording')
        self.recording_options.declare(
            'excludes',
            types=list,
            default=[],
            desc='Patterns for vars to exclude in recording '
            '(processed post-includes)')
        self.recording_options.declare(
            'record_derivatives',
            types=bool,
            default=False,
            desc='Set to True to record derivatives at the driver '
            'level')
        self.recording_options.declare(
            'record_inputs',
            types=bool,
            default=True,
            desc='Set to True to record inputs at the driver level')

        # What the driver supports.
        self.supports = OptionsDictionary(parent_name=type(self).__name__)
        self.supports.declare('inequality_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('equality_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('linear_constraints', types=bool, default=False)
        self.supports.declare('two_sided_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('multiple_objectives', types=bool, default=False)
        self.supports.declare('integer_design_vars', types=bool, default=False)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('active_set', types=bool, default=False)
        self.supports.declare('simultaneous_derivatives',
                              types=bool,
                              default=False)
        self.supports.declare('total_jac_sparsity', types=bool, default=False)

        self.iter_count = 0
        self.cite = ""

        self._coloring_info = coloring_mod._DEF_COMP_SPARSITY_ARGS.copy()
        self._coloring_info['coloring'] = None
        self._coloring_info['dynamic'] = False
        self._coloring_info['static'] = None

        self._total_jac_sparsity = None
        self._res_jacs = {}
        self._total_jac = None

        self.fail = False

        self._declare_options()
        self.options.update(kwargs)

    @property
    def msginfo(self):
        """
        Return info to prepend to messages.

        Returns
        -------
        str
            Info to prepend to messages.
        """
        return type(self).__name__

    def add_recorder(self, recorder):
        """
        Add a recorder to the driver.

        Parameters
        ----------
        recorder : CaseRecorder
           A recorder instance.
        """
        self._rec_mgr.append(recorder)

    def cleanup(self):
        """
        Clean up resources prior to exit.
        """
        # shut down all recorders
        self._rec_mgr.shutdown()

    def _declare_options(self):
        """
        Declare options before kwargs are processed in the init method.

        This is optionally implemented by subclasses of Driver.
        """
        pass

    def _setup_comm(self, comm):
        """
        Perform any driver-specific setup of communicators for the model.

        Parameters
        ----------
        comm : MPI.Comm or <FakeComm> or None
            The communicator for the Problem.

        Returns
        -------
        MPI.Comm or <FakeComm> or None
            The communicator for the Problem model.
        """
        return comm

    def _setup_driver(self, problem):
        """
        Prepare the driver for execution.

        This is the final thing to run during setup.

        Parameters
        ----------
        problem : <Problem>
            Pointer to the containing problem.
        """
        self._problem = problem
        self._recording_iter = problem._recording_iter
        model = problem.model

        self._total_jac = None

        self._has_scaling = (np.any([
            r['scaler'] is not None for r in itervalues(self._responses)
        ]) or np.any(
            [dv['scaler'] is not None for dv in itervalues(self._designvars)]))

        # Determine if any design variables are discrete.
        self._designvars_discrete = [
            dv for dv in self._designvars if dv in model._discrete_outputs
        ]
        if not self.supports['integer_design_vars'] and len(
                self._designvars_discrete) > 0:
            msg = "Discrete design variables are not supported by this driver: "
            msg += '.'.join(self._designvars_discrete)
            raise RuntimeError(msg)

        con_set = set()
        obj_set = set()
        dv_set = set()

        self._remote_dvs = dv_dict = {}
        self._remote_cons = con_dict = {}
        self._remote_objs = obj_dict = {}

        # Now determine if later we'll need to allgather cons, objs, or desvars.
        if model.comm.size > 1 and model._subsystems_allprocs:
            local_out_vars = set(model._outputs._views)
            remote_dvs = set(self._designvars) - local_out_vars
            remote_cons = set(self._cons) - local_out_vars
            remote_objs = set(self._objs) - local_out_vars
            all_remote_vois = model.comm.allgather(
                (remote_dvs, remote_cons, remote_objs))
            for rem_dvs, rem_cons, rem_objs in all_remote_vois:
                con_set.update(rem_cons)
                obj_set.update(rem_objs)
                dv_set.update(rem_dvs)

            # If we have remote VOIs, pick an owning rank for each and use that
            # to bcast to others later
            owning_ranks = model._owning_rank
            sizes = model._var_sizes['nonlinear']['output']
            for i, vname in enumerate(model._var_allprocs_abs_names['output']):
                owner = owning_ranks[vname]
                if vname in dv_set:
                    dv_dict[vname] = (owner, sizes[owner, i])
                if vname in con_set:
                    con_dict[vname] = (owner, sizes[owner, i])
                if vname in obj_set:
                    obj_dict[vname] = (owner, sizes[owner, i])

        self._remote_responses = self._remote_cons.copy()
        self._remote_responses.update(self._remote_objs)

        # set up simultaneous deriv coloring
        if coloring_mod._use_total_sparsity:
            # reset the coloring
            if self._coloring_info['dynamic'] or self._coloring_info[
                    'static'] is not None:
                self._coloring_info['coloring'] = None

            coloring = self._get_static_coloring()
            if coloring is not None and self.supports[
                    'simultaneous_derivatives']:
                if model._owns_approx_jac:
                    coloring._check_config_partial(model)
                else:
                    coloring._check_config_total(self)
                self._setup_simul_coloring()

    def _check_for_missing_objective(self):
        """
        Check for missing objective and raise error if no objectives found.
        """
        if len(self._objs) == 0:
            msg = "Driver requires objective to be declared"
            raise RuntimeError(msg)

    def _get_vars_to_record(self, recording_options):
        """
        Get variables to record based on recording options.

        Parameters
        ----------
        recording_options : <OptionsDictionary>
            Dictionary with recording options.

        Returns
        -------
        dict
           Dictionary containing lists of variables to record.
        """
        problem = self._problem
        model = problem.model

        if MPI:
            # TODO: Eventually, we think we can get rid of this next check.
            #       But to be safe, we are leaving it in there.
            if not model.is_active():
                raise RuntimeError(
                    "RecordingManager.startup should never be called when "
                    "running in parallel on an inactive System")
            rrank = problem.comm.rank
            rowned = model._owning_rank

        incl = recording_options['includes']
        excl = recording_options['excludes']

        # includes and excludes for outputs are specified using promoted names
        # NOTE: only local var names are in abs2prom, all will be gathered later
        abs2prom = model._var_abs2prom['output']

        all_desvars = {
            n
            for n in self._designvars
            if n in abs2prom and check_path(abs2prom[n], incl, excl, True)
        }
        all_objectives = {
            n
            for n in self._objs
            if n in abs2prom and check_path(abs2prom[n], incl, excl, True)
        }
        all_constraints = {
            n
            for n in self._cons
            if n in abs2prom and check_path(abs2prom[n], incl, excl, True)
        }

        # design variables, objectives and constraints are always in the options
        mydesvars = myobjectives = myconstraints = set()

        if recording_options['record_desvars']:
            if MPI:
                mydesvars = [n for n in all_desvars if rrank == rowned[n]]
            else:
                mydesvars = list(all_desvars)

        if recording_options['record_objectives']:
            if MPI:
                myobjectives = [
                    n for n in all_objectives if rrank == rowned[n]
                ]
            else:
                myobjectives = list(all_objectives)

        if recording_options['record_constraints']:
            if MPI:
                myconstraints = [
                    n for n in all_constraints if rrank == rowned[n]
                ]
            else:
                myconstraints = list(all_constraints)

        filtered_vars_to_record = {
            'des': mydesvars,
            'obj': myobjectives,
            'con': myconstraints
        }

        # responses (if in options)
        if 'record_responses' in recording_options:
            myresponses = set()

            if recording_options['record_responses']:
                myresponses = {
                    n
                    for n in self._responses if n in abs2prom
                    and check_path(abs2prom[n], incl, excl, True)
                }

                if MPI:
                    myresponses = [
                        n for n in myresponses if rrank == rowned[n]
                    ]

            filtered_vars_to_record['res'] = list(myresponses)

        # inputs (if in options)
        if 'record_inputs' in recording_options:
            myinputs = set()

            if recording_options['record_inputs']:
                myinputs = {
                    n
                    for n in model._inputs if check_path(n, incl, excl)
                }

                if MPI:
                    # gather the variables from all ranks to rank 0
                    all_vars = model.comm.gather(myinputs, root=0)
                    if MPI.COMM_WORLD.rank == 0:
                        myinputs = all_vars[-1]
                        for d in all_vars[:-1]:
                            myinputs.update(d)

                    myinputs = [n for n in myinputs if rrank == rowned[n]]

            filtered_vars_to_record['in'] = list(myinputs)

        # system outputs
        myoutputs = set()

        if incl:
            myoutputs = {
                n
                for n in model._outputs
                if n in abs2prom and check_path(abs2prom[n], incl, excl)
            }

            if MPI:
                # gather the variables from all ranks to rank 0
                all_vars = model.comm.gather(myoutputs, root=0)
                if MPI.COMM_WORLD.rank == 0:
                    myoutputs = all_vars[-1]
                    for d in all_vars[:-1]:
                        myoutputs.update(d)

            # de-duplicate
            myoutputs = myoutputs.difference(all_desvars, all_objectives,
                                             all_constraints)

            if MPI:
                myoutputs = [n for n in myoutputs if rrank == rowned[n]]

        filtered_vars_to_record['sys'] = list(myoutputs)

        return filtered_vars_to_record

    def _setup_recording(self):
        """
        Set up case recording.
        """
        self._filtered_vars_to_record = self._get_vars_to_record(
            self.recording_options)

        self._rec_mgr.startup(self)

        # record the system metadata to the recorders attached to this Driver
        if self.recording_options['record_model_metadata']:
            for sub in self._problem.model.system_iter(recurse=True,
                                                       include_self=True):
                self._rec_mgr.record_metadata(sub)

    def _get_voi_val(self,
                     name,
                     meta,
                     remote_vois,
                     driver_scaling=True,
                     ignore_indices=False):
        """
        Get the value of a variable of interest (objective, constraint, or design var).

        This will retrieve the value if the VOI is remote.

        Parameters
        ----------
        name : str
            Name of the variable of interest.
        meta : dict
            Metadata for the variable of interest.
        remote_vois : dict
            Dict containing (owning_rank, size) for all remote vois of a particular
            type (design var, constraint, or objective).
        driver_scaling : bool
            When True, return values that are scaled according to either the adder and scaler or
            the ref and ref0 values that were specified when add_design_var, add_objective, and
            add_constraint were called on the model. Default is True.
        ignore_indices : bool
            Set to True if the full array is desired, not just those indicated by indices.

        Returns
        -------
        float or ndarray
            The value of the named variable of interest.
        """
        model = self._problem.model
        comm = model.comm
        vec = model._outputs._views_flat
        indices = meta['indices']

        if name in remote_vois:
            owner, size = remote_vois[name]
            if owner == comm.rank:
                if indices is None or ignore_indices:
                    val = vec[name].copy()
                else:
                    val = vec[name][indices]
            else:
                if not (indices is None or ignore_indices):
                    size = len(indices)
                val = np.empty(size)

            comm.Bcast(val, root=owner)
        else:
            if name in self._designvars_discrete:
                val = model._discrete_outputs[name]

                # At present, only integers are supported by OpenMDAO drivers.
                # We check the values here.
                valid = True
                msg = "Only integer scalars or ndarrays are supported as values for " + \
                      "discrete variables when used as a design variable. "
                if np.isscalar(val) and not isinstance(val, int):
                    msg += "A value of type '{}' was specified.".format(
                        val.__class__.__name__)
                    valid = False
                elif isinstance(val,
                                np.ndarray) and not np.issubdtype(val[0], int):
                    msg += "An array of type '{}' was specified.".format(
                        val[0].__class__.__name__)
                    valid = False

                if valid is False:
                    raise ValueError(msg)

            elif indices is None or ignore_indices:
                val = vec[name].copy()
            else:
                val = vec[name][indices]

        if self._has_scaling and driver_scaling:
            # Scale design variable values
            adder = meta['adder']
            if adder is not None:
                val += adder

            scaler = meta['scaler']
            if scaler is not None:
                val *= scaler

        return val

    def get_design_var_values(self,
                              filter=None,
                              driver_scaling=True,
                              ignore_indices=False):
        """
        Return the design variable values.

        This is called to gather the initial design variable state.

        Parameters
        ----------
        filter : list
            List of desvar names used by recorders.
        driver_scaling : bool
            When True, return values that are scaled according to either the adder and scaler or
            the ref and ref0 values that were specified when add_design_var, add_objective, and
            add_constraint were called on the model. Default is True.
        ignore_indices : bool
            Set to True if the full array is desired, not just those indicated by indices.

        Returns
        -------
        dict
           Dictionary containing values of each design variable.
        """
        if filter:
            dvs = filter
        else:
            # use all the designvars
            dvs = self._designvars

        return {
            n: self._get_voi_val(n,
                                 self._designvars[n],
                                 self._remote_dvs,
                                 driver_scaling=driver_scaling,
                                 ignore_indices=ignore_indices)
            for n in dvs
        }

    def set_design_var(self, name, value):
        """
        Set the value of a design variable.

        Parameters
        ----------
        name : str
            Global pathname of the design variable.
        value : float or ndarray
            Value for the design variable.
        """
        problem = self._problem

        if (name in self._remote_dvs
                and problem.model._owning_rank[name] != problem.comm.rank):
            return

        meta = self._designvars[name]
        indices = meta['indices']
        if indices is None:
            indices = slice(None)

        if name in self._designvars_discrete:
            problem.model._discrete_outputs[name] = int(value)

        else:
            desvar = problem.model._outputs._views_flat[name]
            desvar[indices] = value

            # Undo driver scaling when setting design var values into model.
            if self._has_scaling:
                scaler = meta['scaler']
                if scaler is not None:
                    desvar[indices] *= 1.0 / scaler

                adder = meta['adder']
                if adder is not None:
                    desvar[indices] -= adder

    def get_response_values(self, filter=None):
        """
        Return response values.

        Parameters
        ----------
        filter : list
            List of response names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each response.
        """
        if filter:
            resps = filter
        else:
            resps = self._responses

        return {
            n: self._get_voi_val(n, self._responses[n], self._remote_objs)
            for n in resps
        }

    def get_objective_values(self,
                             driver_scaling=True,
                             filter=None,
                             ignore_indices=False):
        """
        Return objective values.

        Parameters
        ----------
        driver_scaling : bool
            When True, return values that are scaled according to either the adder and scaler or
            the ref and ref0 values that were specified when add_design_var, add_objective, and
            add_constraint were called on the model. Default is True.
        filter : list
            List of objective names used by recorders.
        ignore_indices : bool
            Set to True if the full array is desired, not just those indicated by indices.

        Returns
        -------
        dict
           Dictionary containing values of each objective.
        """
        if filter:
            objs = filter
        else:
            objs = self._objs

        return {
            n: self._get_voi_val(n,
                                 self._objs[n],
                                 self._remote_objs,
                                 driver_scaling=driver_scaling,
                                 ignore_indices=ignore_indices)
            for n in objs
        }

    def get_constraint_values(self,
                              ctype='all',
                              lintype='all',
                              driver_scaling=True,
                              filter=None,
                              ignore_indices=False):
        """
        Return constraint values.

        Parameters
        ----------
        ctype : string
            Default is 'all'. Optionally return just the inequality constraints
            with 'ineq' or the equality constraints with 'eq'.
        lintype : string
            Default is 'all'. Optionally return just the linear constraints
            with 'linear' or the nonlinear constraints with 'nonlinear'.
        driver_scaling : bool
            When True, return values that are scaled according to either the adder and scaler or
            the ref and ref0 values that were specified when add_design_var, add_objective, and
            add_constraint were called on the model. Default is True.
        filter : list
            List of constraint names used by recorders.
        ignore_indices : bool
            Set to True if the full array is desired, not just those indicated by indices.

        Returns
        -------
        dict
           Dictionary containing values of each constraint.
        """
        if filter is not None:
            cons = filter
        else:
            cons = self._cons

        con_dict = {}
        for name in cons:
            meta = self._cons[name]

            if lintype == 'linear' and not meta['linear']:
                continue

            if lintype == 'nonlinear' and meta['linear']:
                continue

            if ctype == 'eq' and meta['equals'] is None:
                continue

            if ctype == 'ineq' and meta['equals'] is not None:
                continue

            con_dict[name] = self._get_voi_val(name,
                                               meta,
                                               self._remote_cons,
                                               driver_scaling=driver_scaling,
                                               ignore_indices=ignore_indices)

        return con_dict

    def _get_ordered_nl_responses(self):
        """
        Return the names of nonlinear responses in the order used by the driver.

        Default order is objectives followed by nonlinear constraints.  This is used for
        simultaneous derivative coloring and sparsity determination.

        Returns
        -------
        list of str
            The nonlinear response names in order.
        """
        order = list(self._objs)
        order.extend(n for n, meta in iteritems(self._cons)
                     if not ('linear' in meta and meta['linear']))
        return order

    def _update_voi_meta(self, model):
        """
        Collect response and design var metadata from the model and size desvars and responses.

        Parameters
        ----------
        model : System
            The System that represents the entire model.

        Returns
        -------
        int
            Total size of responses, with linear constraints excluded.
        int
            Total size of design vars.
        """
        self._objs = objs = OrderedDict()
        self._cons = cons = OrderedDict()

        self._responses = resps = model.get_responses(recurse=True)
        for name, data in iteritems(resps):
            if data['type'] == 'con':
                cons[name] = data
            else:
                objs[name] = data

        response_size = sum(resps[n]['size']
                            for n in self._get_ordered_nl_responses())

        # Gather up the information for design vars.
        self._designvars = designvars = model.get_design_vars(recurse=True)
        desvar_size = sum(data['size'] for data in itervalues(designvars))

        return response_size, desvar_size

    def run(self):
        """
        Execute this driver.

        The base `Driver` just runs the model. All other drivers overload
        this method.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        with RecordingDebugging(self._get_name(), self.iter_count, self):
            self._problem.model.run_solve_nonlinear()

        self.iter_count += 1
        return False

    def _compute_totals(self,
                        of=None,
                        wrt=None,
                        return_format='flat_dict',
                        global_names=True):
        """
        Compute derivatives of desired quantities with respect to desired inputs.

        All derivatives are returned using driver scaling.

        Parameters
        ----------
        of : list of variable name strings or None
            Variables whose derivatives will be computed. Default is None, which
            uses the driver's objectives and constraints.
        wrt : list of variable name strings or None
            Variables with respect to which the derivatives will be computed.
            Default is None, which uses the driver's desvars.
        return_format : string
            Format to return the derivatives. Default is a 'flat_dict', which
            returns them in a dictionary whose keys are tuples of form (of, wrt). For
            the scipy optimizer, 'array' is also supported.
        global_names : bool
            Set to True when passing in global names to skip some translation steps.

        Returns
        -------
        derivs : object
            Derivatives in form requested by 'return_format'.
        """
        problem = self._problem
        total_jac = self._total_jac
        debug_print = 'totals' in self.options['debug_print'] and (
            not MPI or MPI.COMM_WORLD.rank == 0)

        if debug_print:
            header = 'Driver total derivatives for iteration: ' + str(
                self.iter_count)
            print(header)
            print(len(header) * '-' + '\n')

        if problem.model._owns_approx_jac:
            self._recording_iter.stack.append(('_compute_totals_approx', 0))

            try:
                if total_jac is None:
                    total_jac = _TotalJacInfo(problem,
                                              of,
                                              wrt,
                                              global_names,
                                              return_format,
                                              approx=True,
                                              debug_print=debug_print)

                    # Don't cache linear constraint jacobian
                    if not total_jac.has_lin_cons:
                        self._total_jac = total_jac

                    totals = total_jac.compute_totals_approx(initialize=True)
                else:
                    totals = total_jac.compute_totals_approx()
            finally:
                self._recording_iter.stack.pop()

        else:
            if total_jac is None:
                total_jac = _TotalJacInfo(problem,
                                          of,
                                          wrt,
                                          global_names,
                                          return_format,
                                          debug_print=debug_print)

                # don't cache linear constraint jacobian
                if not total_jac.has_lin_cons:
                    self._total_jac = total_jac

            self._recording_iter.stack.append(('_compute_totals', 0))

            try:
                totals = total_jac.compute_totals()
            finally:
                self._recording_iter.stack.pop()

        if self._rec_mgr._recorders and self.recording_options[
                'record_derivatives']:
            metadata = create_local_meta(self._get_name())
            total_jac.record_derivatives(self, metadata)

        return totals

    def record_iteration(self):
        """
        Record an iteration of the current Driver.
        """
        if not self._rec_mgr._recorders:
            return

        # Get the data to record (collective calls that get across all ranks)
        opts = self.recording_options
        filt = self._filtered_vars_to_record

        if opts['record_desvars']:
            des_vars = self.get_design_var_values(driver_scaling=False,
                                                  ignore_indices=True)
        else:
            des_vars = {}

        if opts['record_objectives']:
            obj_vars = self.get_objective_values(driver_scaling=False,
                                                 ignore_indices=True)
        else:
            obj_vars = {}

        if opts['record_constraints']:
            con_vars = self.get_constraint_values(driver_scaling=False,
                                                  ignore_indices=True)
        else:
            con_vars = {}

        if opts['record_responses']:
            # res_vars = self.get_response_values()  # not really working yet
            res_vars = {}
        else:
            res_vars = {}

        des_vars = {name: des_vars[name] for name in filt['des']}
        obj_vars = {name: obj_vars[name] for name in filt['obj']}
        con_vars = {name: con_vars[name] for name in filt['con']}
        # res_vars = {name: res_vars[name] for name in filt['res']}

        model = self._problem.model

        names = model._outputs._names
        views = model._outputs._views
        sys_vars = {name: views[name] for name in names if name in filt['sys']}

        if self.recording_options['record_inputs']:
            names = model._inputs._names
            views = model._inputs._views
            in_vars = {
                name: views[name]
                for name in names if name in filt['in']
            }
        else:
            in_vars = {}

        if MPI:
            des_vars = self._gather_vars(model, des_vars)
            res_vars = self._gather_vars(model, res_vars)
            obj_vars = self._gather_vars(model, obj_vars)
            con_vars = self._gather_vars(model, con_vars)
            sys_vars = self._gather_vars(model, sys_vars)
            in_vars = self._gather_vars(model, in_vars)

        outs = {}
        if not MPI or model.comm.rank == 0:
            outs.update(des_vars)
            outs.update(res_vars)
            outs.update(obj_vars)
            outs.update(con_vars)
            outs.update(sys_vars)

        data = {'out': outs, 'in': in_vars}

        metadata = create_local_meta(self._get_name())

        self._rec_mgr.record_iteration(self, data, metadata)

    def _gather_vars(self, root, local_vars):
        """
        Gather and return only variables listed in `local_vars` from the `root` System.

        Parameters
        ----------
        root : <System>
            the root System for the Problem
        local_vars : dict
            local variable names and values

        Returns
        -------
        dct : dict
            variable names and values.
        """
        # if trace:
        #     debug("gathering vars for recording in %s" % root.pathname)
        all_vars = root.comm.gather(local_vars, root=0)
        # if trace:
        #     debug("DONE gathering rec vars for %s" % root.pathname)

        if root.comm.rank == 0:
            dct = all_vars[-1]
            for d in all_vars[:-1]:
                dct.update(d)
            return dct

    def _get_name(self):
        """
        Get name of current Driver.

        Returns
        -------
        str
            Name of current Driver.
        """
        return "Driver"

    def set_total_jac_sparsity(self, sparsity):
        """
        Set the sparsity of sub-jacobians of the total jacobian.

        Note: This currently will have no effect if you are not using the pyOptSparseDriver.

        Parameters
        ----------
        sparsity : str or dict

            ::

                # Sparsity is a nested dictionary where the outer keys are response
                # names, the inner keys are design variable names, and the value is a tuple of
                # the form (row_list, col_list, shape).
                {
                    resp1: {
                        dv1: (rows, cols, shape),  # for sub-jac d_resp1/d_dv1
                        dv2: (rows, cols, shape),
                          ...
                    },
                    resp2: {
                        ...
                    }
                    ...
                }
        """
        if self.supports['total_jac_sparsity']:
            self._total_jac_sparsity = sparsity
        else:
            raise RuntimeError(
                "Driver '%s' does not support setting of total jacobian sparsity."
                % self._get_name())

    def declare_coloring(
            self,
            num_full_jacs=coloring_mod.
        _DEF_COMP_SPARSITY_ARGS['num_full_jacs'],
            tol=coloring_mod._DEF_COMP_SPARSITY_ARGS['tol'],
            orders=coloring_mod._DEF_COMP_SPARSITY_ARGS['orders'],
            perturb_size=coloring_mod._DEF_COMP_SPARSITY_ARGS['perturb_size'],
            min_improve_pct=coloring_mod.
        _DEF_COMP_SPARSITY_ARGS['min_improve_pct'],
            show_summary=coloring_mod._DEF_COMP_SPARSITY_ARGS['show_summary'],
            show_sparsity=coloring_mod._DEF_COMP_SPARSITY_ARGS['show_sparsity']
    ):
        """
        Set options for total deriv coloring.

        Parameters
        ----------
        num_full_jacs : int
            Number of times to repeat partial jacobian computation when computing sparsity.
        tol : float
            Tolerance used to determine if an array entry is nonzero during sparsity determination.
        orders : int
            Number of orders above and below the tolerance to check during the tolerance sweep.
        perturb_size : float
            Size of input/output perturbation during generation of sparsity.
        min_improve_pct : float
            If coloring does not improve (decrease) the number of solves more than the given
            percentage, coloring will not be used.
        show_summary : bool
            If True, display summary information after generating coloring.
        show_sparsity : bool
            If True, display sparsity with coloring info after generating coloring.
        """
        self._coloring_info['num_full_jacs'] = num_full_jacs
        self._coloring_info['tol'] = tol
        self._coloring_info['orders'] = orders
        self._coloring_info['perturb_size'] = perturb_size
        self._coloring_info['min_improve_pct'] = min_improve_pct
        if self._coloring_info['static'] is None:
            self._coloring_info['dynamic'] = True
        else:
            self._coloring_info['dynamic'] = False
        self._coloring_info['coloring'] = None
        self._coloring_info['show_summary'] = show_summary
        self._coloring_info['show_sparsity'] = show_sparsity

    def use_fixed_coloring(self, coloring=coloring_mod._STD_COLORING_FNAME):
        """
        Tell the driver to use a precomputed coloring.

        Parameters
        ----------
        coloring : str
            A coloring filename.  If no arg is passed, filename will be determined
            automatically.

        """
        if self.supports['simultaneous_derivatives']:
            if coloring_mod._force_dyn_coloring and coloring is coloring_mod._STD_COLORING_FNAME:
                # force the generation of a dynamic coloring this time
                self._coloring_info['dynamic'] = True
                self._coloring_info['static'] = None
            else:
                self._coloring_info['static'] = coloring
                self._coloring_info['dynamic'] = False

            self._coloring_info['coloring'] = None
        else:
            raise RuntimeError(
                "Driver '%s' does not support simultaneous derivatives." %
                self._get_name())

    def set_simul_deriv_color(self, coloring):
        """
        See use_fixed_coloring. This method is deprecated.

        Parameters
        ----------
        coloring : str or Coloring
            Information about simultaneous coloring for design vars and responses.  If a
            string, then coloring is assumed to be the name of a file that contains the
            coloring information in pickle format. Otherwise it must be a Coloring object.
            See the docstring for Coloring for details.

        """
        warn_deprecation(
            "set_simul_deriv_color is deprecated.  Use use_fixed_coloring instead."
        )
        self.use_fixed_coloring(coloring)

    def _setup_tot_jac_sparsity(self):
        """
        Set up total jacobian subjac sparsity.

        Drivers that can use subjac sparsity should override this.
        """
        pass

    def _get_static_coloring(self):
        """
        Get the Coloring for this driver.

        If necessary, load the Coloring from a file.

        Returns
        -------
        Coloring or None
            The pre-existing or loaded Coloring, or None
        """
        info = self._coloring_info
        static = info['static']

        if isinstance(static, coloring_mod.Coloring):
            coloring = static
            info['coloring'] = coloring
        else:
            coloring = info['coloring']

        if coloring is not None:
            return coloring

        if static is coloring_mod._STD_COLORING_FNAME or isinstance(
                static, string_types):
            if static is coloring_mod._STD_COLORING_FNAME:
                fname = self._get_total_coloring_fname()
            else:
                fname = static
            print("loading total coloring from file %s" % fname)
            coloring = info['coloring'] = coloring_mod.Coloring.load(fname)
            info.update(coloring._meta)
            return coloring

    def _get_total_coloring_fname(self):
        return os.path.join(self._problem.options['coloring_dir'],
                            'total_coloring.pkl')

    def _setup_simul_coloring(self):
        """
        Set up metadata for coloring of total derivative solution.

        If set_coloring was called with a filename, load the coloring file.
        """
        # command line simul_coloring uses this env var to turn pre-existing coloring off
        if not coloring_mod._use_total_sparsity:
            return

        problem = self._problem
        if not problem.model._use_derivatives:
            simple_warning(
                "Derivatives are turned off.  Skipping simul deriv coloring.")
            return

        total_coloring = self._get_static_coloring()

        if total_coloring._rev and problem._orig_mode not in ('rev', 'auto'):
            revcol = total_coloring._rev[0][0]
            if revcol:
                raise RuntimeError(
                    "Simultaneous coloring does reverse solves but mode has "
                    "been set to '%s'" % problem._orig_mode)
        if total_coloring._fwd and problem._orig_mode not in ('fwd', 'auto'):
            fwdcol = total_coloring._fwd[0][0]
            if fwdcol:
                raise RuntimeError(
                    "Simultaneous coloring does forward solves but mode has "
                    "been set to '%s'" % problem._orig_mode)

    def _pre_run_model_debug_print(self):
        """
        Optionally print some debugging information before the model runs.
        """
        debug_opt = self.options['debug_print']
        if not debug_opt or debug_opt == ['totals']:
            return

        if not MPI or MPI.COMM_WORLD.rank == 0:
            header = 'Driver debug print for iter coord: {}'.format(
                self._recording_iter.get_formatted_iteration_coordinate())
            print(header)
            print(len(header) * '-')

        if 'desvars' in debug_opt:
            desvar_vals = self.get_design_var_values(driver_scaling=False,
                                                     ignore_indices=True)
            if not MPI or MPI.COMM_WORLD.rank == 0:
                print("Design Vars")
                if desvar_vals:

                    # Print desvars in non_flattened state.
                    meta = self._problem.model._var_allprocs_abs2meta
                    for name in desvar_vals:
                        shape = meta[name]['shape']
                        desvar_vals[name] = desvar_vals[name].reshape(shape)
                    pprint.pprint(desvar_vals)
                else:
                    print("None")
                print()

        sys.stdout.flush()

    def _post_run_model_debug_print(self):
        """
        Optionally print some debugging information after the model runs.
        """
        if 'nl_cons' in self.options['debug_print']:
            cons = self.get_constraint_values(lintype='nonlinear',
                                              driver_scaling=False)
            if not MPI or MPI.COMM_WORLD.rank == 0:
                print("Nonlinear constraints")
                if cons:
                    pprint.pprint(cons)
                else:
                    print("None")
                print()

        if 'ln_cons' in self.options['debug_print']:
            cons = self.get_constraint_values(lintype='linear',
                                              driver_scaling=False)
            if not MPI or MPI.COMM_WORLD.rank == 0:
                print("Linear constraints")
                if cons:
                    pprint.pprint(cons)
                else:
                    print("None")
                print()

        if 'objs' in self.options['debug_print']:
            objs = self.get_objective_values(driver_scaling=False)
            if not MPI or MPI.COMM_WORLD.rank == 0:
                print("Objectives")
                if objs:
                    pprint.pprint(objs)
                else:
                    print("None")
                print()

        sys.stdout.flush()
示例#15
0
    def __init__(self, **kwargs):
        """
        Initialize the driver.

        Parameters
        ----------
        **kwargs : dict of keyword arguments
            Keyword arguments that will be mapped into the Driver options.
        """
        self._rec_mgr = RecordingManager()
        self._vars_to_record = {
            'desvarnames': set(),
            'responsenames': set(),
            'objectivenames': set(),
            'constraintnames': set(),
            'sysinclnames': set(),
        }

        self._problem = None
        self._designvars = None
        self._cons = None
        self._objs = None
        self._responses = None

        # Driver options
        self.options = OptionsDictionary()

        self.options.declare(
            'debug_print',
            types=list,
            is_valid=_is_debug_print_opts_valid,
            desc="List of what type of Driver variables to print at each "
            "iteration. Valid items in list are 'desvars', 'ln_cons', "
            "'nl_cons', 'objs'",
            default=[])

        # Case recording options
        self.recording_options = OptionsDictionary()

        self.recording_options.declare('record_metadata',
                                       types=bool,
                                       default=True,
                                       desc='Record metadata')
        self.recording_options.declare(
            'record_desvars',
            types=bool,
            default=True,
            desc='Set to True to record design variables at the '
            'driver level')
        self.recording_options.declare(
            'record_responses',
            types=bool,
            default=False,
            desc='Set to True to record responses at the driver level')
        self.recording_options.declare(
            'record_objectives',
            types=bool,
            default=True,
            desc='Set to True to record objectives at the driver level')
        self.recording_options.declare(
            'record_constraints',
            types=bool,
            default=True,
            desc='Set to True to record constraints at the '
            'driver level')
        self.recording_options.declare(
            'includes',
            types=list,
            default=['*'],
            desc='Patterns for variables to include in recording')
        self.recording_options.declare(
            'excludes',
            types=list,
            default=[],
            desc='Patterns for vars to exclude in recording '
            '(processed post-includes)')
        self.recording_options.declare(
            'record_derivatives',
            types=bool,
            default=False,
            desc='Set to True to record derivatives at the driver '
            'level')
        self.recording_options.declare(
            'record_inputs',
            types=bool,
            default=True,
            desc='Set to True to record inputs at the driver level')

        # What the driver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('inequality_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('equality_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('linear_constraints', types=bool, default=False)
        self.supports.declare('two_sided_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('multiple_objectives', types=bool, default=False)
        self.supports.declare('integer_design_vars', types=bool, default=False)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('active_set', types=bool, default=False)
        self.supports.declare('simultaneous_derivatives',
                              types=bool,
                              default=False)
        self.supports.declare('total_jac_sparsity', types=bool, default=False)
        # TODO, support these in OpenMDAO
        self.supports.declare('integer_design_vars', types=bool, default=False)

        self.iter_count = 0
        self._model_viewer_data = None
        self.cite = ""

        self._simul_coloring_info = None
        self._total_jac_sparsity = None
        self._res_jacs = {}
        self._total_jac = None

        self.fail = False

        self._declare_options()
        self.options.update(kwargs)
示例#16
0
class Solver(object):
    """
    Base solver class.

    This class is subclassed by NonlinearSolver and LinearSolver,
    which are in turn subclassed by actual solver implementations.

    Attributes
    ----------
    _system : <System>
        Pointer to the owning system.
    _depth : int
        How many subsolvers deep this solver is (0 means not a subsolver).
    _vec_names : [str, ...]
        List of right-hand-side (RHS) vector names.
    _mode : str
        'fwd' or 'rev', applicable to linear solvers only.
    _iter_count : int
        Number of iterations for the current invocation of the solver.
    _rec_mgr : <RecordingManager>
        object that manages all recorders added to this solver
    cite : str
        Listing of relevant citations that should be referenced when
        publishing work that uses this class.
    options : <OptionsDictionary>
        Options dictionary.
    recording_options : <OptionsDictionary>
        Recording options dictionary.
    supports : <OptionsDictionary>
        Options dictionary describing what features are supported by this
        solver.
    _filtered_vars_to_record : Dict
        Dict of list of var names to record
    _norm0 : float
        Normalization factor
    _problem_meta : dict
        Problem level metadata.
    """

    # Object to store some formatting for iprint that is shared across all solvers.
    SOLVER = 'base_solver'

    def __init__(self, **kwargs):
        """
        Initialize all attributes.

        Parameters
        ----------
        **kwargs : dict of keyword arguments
            Keyword arguments that will be mapped into the Solver options.
        """
        self._system = None
        self._depth = 0
        self._vec_names = None
        self._mode = 'fwd'
        self._iter_count = 0
        self._problem_meta = None

        # Solver options
        self.options = OptionsDictionary(parent_name=self.msginfo)
        self.options.declare('maxiter',
                             types=int,
                             default=10,
                             desc='maximum number of iterations')
        self.options.declare('atol',
                             default=1e-10,
                             desc='absolute error tolerance')
        self.options.declare('rtol',
                             default=1e-10,
                             desc='relative error tolerance')
        self.options.declare('iprint',
                             types=int,
                             default=1,
                             desc='whether to print output')
        self.options.declare(
            'err_on_non_converge',
            types=bool,
            default=False,
            desc="When True, AnalysisError will be raised if we don't converge."
        )

        # Case recording options
        self.recording_options = OptionsDictionary(parent_name=self.msginfo)
        self.recording_options.declare(
            'record_abs_error',
            types=bool,
            default=True,
            desc='Set to True to record absolute error at the \
                                       solver level')
        self.recording_options.declare(
            'record_rel_error',
            types=bool,
            default=True,
            desc='Set to True to record relative error at the \
                                       solver level')
        self.recording_options.declare(
            'record_inputs',
            types=bool,
            default=True,
            desc='Set to True to record inputs at the solver level')
        self.recording_options.declare(
            'record_outputs',
            types=bool,
            default=True,
            desc='Set to True to record outputs at the solver level')
        self.recording_options.declare(
            'record_solver_residuals',
            types=bool,
            default=False,
            desc='Set to True to record residuals at the solver level')
        self.recording_options.declare(
            'record_metadata',
            types=bool,
            desc='Deprecated. Recording '
            'of metadata will always be done',
            deprecation="The recording option, record_metadata, on "
            "Solver is "
            "deprecated. Recording of metadata will always be done",
            default=True)
        self.recording_options.declare(
            'includes',
            types=list,
            default=['*'],
            desc="Patterns for variables to include in recording. \
                                       Paths are relative to solver's Group. \
                                       Uses fnmatch wildcards")
        self.recording_options.declare(
            'excludes',
            types=list,
            default=[],
            desc="Patterns for vars to exclude in recording. \
                                       (processed post-includes) \
                                       Paths are relative to solver's Group. \
                                       Uses fnmatch wildcards")
        # Case recording related
        self._filtered_vars_to_record = {}
        self._norm0 = 0.0

        # What the solver supports.
        self.supports = OptionsDictionary(parent_name=self.msginfo)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('implicit_components', types=bool, default=False)

        self._declare_options()
        self.options.update(kwargs)

        self._rec_mgr = RecordingManager()

        self.cite = ""

    @property
    def msginfo(self):
        """
        Return info to prepend to messages.

        Returns
        -------
        str
            Info to prepend to messages.
        """
        if self._system is None:
            return type(self).__name__
        return '{} in {}'.format(type(self).__name__, self._system().msginfo)

    @property
    def _recording_iter(self):
        if self._problem_meta is None:
            raise RuntimeError(
                f"{self.msginfo}: Can't access recording_iter because "
                "_setup_solvers has not been called.")
        return self._problem_meta['recording_iter']

    @property
    def _solver_info(self):
        if self._problem_meta is None:
            raise RuntimeError(
                f"{self.msginfo}: Can't access solver_info because _setup_solvers "
                "has not been called.")
        return self._problem_meta['solver_info']

    def _assembled_jac_solver_iter(self):
        """
        Return an empty generator of lin solvers using assembled jacs.
        """
        for i in ():
            yield

    def add_recorder(self, recorder):
        """
        Add a recorder to the solver's RecordingManager.

        Parameters
        ----------
        recorder : <CaseRecorder>
           A recorder instance to be added to RecManager.
        """
        if MPI:
            raise RuntimeError(
                "Recording of Solvers when running parallel code is not supported yet"
            )
        self._rec_mgr.append(recorder)

    def _declare_options(self):
        """
        Declare options before kwargs are processed in the init method.

        This is optionally implemented by subclasses of Solver.
        """
        pass

    def _setup_solvers(self, system, depth):
        """
        Assign system instance, set depth, and optionally perform setup.

        Parameters
        ----------
        system : <System>
            pointer to the owning system.
        depth : int
            depth of the current system (already incremented).
        """
        self._system = weakref.ref(system)
        self._depth = depth
        self._problem_meta = system._problem_meta

        if system.pathname:
            parent_name = self.msginfo
            self.options._parent_name = parent_name
            self.recording_options._parent_name = parent_name
            self.supports._parent_name = parent_name

        if isinstance(self, LinearSolver) and not system._use_derivatives:
            return

        self._rec_mgr.startup(self)

        myoutputs = myresiduals = myinputs = []
        incl = self.recording_options['includes']
        excl = self.recording_options['excludes']

        # doesn't matter if we're a linear or nonlinear solver.  The names for
        # inputs, outputs, and residuals are the same for both the 'linear' and 'nonlinear'
        # vectors.
        if system.pathname:
            incl = ['.'.join((system.pathname, i)) for i in incl]
            excl = ['.'.join((system.pathname, i)) for i in excl]

        if self.recording_options['record_solver_residuals']:
            myresiduals = [
                n for n in system._residuals._abs_iter()
                if check_path(n, incl, excl)
            ]

        if self.recording_options['record_outputs']:
            myoutputs = [
                n for n in system._outputs._abs_iter()
                if check_path(n, incl, excl)
            ]

        if self.recording_options['record_inputs']:
            myinputs = [
                n for n in system._inputs._abs_iter()
                if check_path(n, incl, excl)
            ]

        self._filtered_vars_to_record = {
            'input': myinputs,
            'output': myoutputs,
            'residual': myresiduals
        }

    def _set_solver_print(self, level=2, type_='all'):
        """
        Control printing for solvers and subsolvers in the model.

        Parameters
        ----------
        level : int
            iprint level. Set to 2 to print residuals each iteration; set to 1
            to print just the iteration totals; set to 0 to disable all printing
            except for failures, and set to -1 to disable all printing including failures.
        type_ : str
            Type of solver to set: 'LN' for linear, 'NL' for nonlinear, or 'all' for all.
        """
        self.options['iprint'] = level

    def _mpi_print(self, iteration, abs_res, rel_res):
        """
        Print residuals from an iteration.

        Parameters
        ----------
        iteration : int
            iteration counter, 0-based.
        abs_res : float
            current absolute residual norm.
        rel_res : float
            current relative residual norm.
        """
        if (self.options['iprint'] == 2
                and (self._system().comm.rank == 0
                     or os.environ.get('USE_PROC_FILES'))):

            prefix = self._solver_info.prefix
            solver_name = self.SOLVER

            if prefix.endswith('precon:'):
                solver_name = solver_name[3:]

            print_str = prefix + solver_name
            print_str += ' %d ; %.9g %.9g' % (iteration, abs_res, rel_res)
            print(print_str)

    def _mpi_print_header(self):
        """
        Print header text before solving.
        """
        if (self.options['iprint'] > 0
                and (self._system().comm.rank == 0
                     or os.environ.get('USE_PROC_FILES'))):

            pathname = self._system().pathname
            if pathname:
                nchar = len(pathname)
                prefix = self._solver_info.prefix
                header = prefix + "\n"
                header += prefix + nchar * "=" + "\n"
                header += prefix + pathname + "\n"
                header += prefix + nchar * "="
                print(header)

    def _iter_initialize(self):
        """
        Perform any necessary pre-processing operations.

        Returns
        -------
        float
            initial error.
        float
            error at the first iteration.
        """
        pass

    def _run_apply(self):
        """
        Run the appropriate apply method on the system.
        """
        pass

    def _linearize(self):
        """
        Perform any required linearization operations such as matrix factorization.
        """
        pass

    def _linearize_children(self):
        """
        Return a flag that is True when we need to call linearize on our subsystems' solvers.

        Returns
        -------
        boolean
            Flag for indicating child linerization
        """
        return True

    def __str__(self):
        """
        Return a string representation of the solver.

        Returns
        -------
        str
            String representation of the solver.
        """
        return self.SOLVER

    def record_iteration(self, **kwargs):
        """
        Record an iteration of the current Solver.

        Parameters
        ----------
        **kwargs : dict
            Keyword arguments (used for abs and rel error).
        """
        if not self._rec_mgr._recorders:
            return

        metadata = create_local_meta(self.SOLVER)

        # Get the data
        data = {
            'abs':
            kwargs.get('abs')
            if self.recording_options['record_abs_error'] else None,
            'rel':
            kwargs.get('rel')
            if self.recording_options['record_rel_error'] else None,
            'input': {},
            'output': {},
            'residual': {}
        }

        system = self._system()
        vec_name = 'nonlinear' if isinstance(self,
                                             NonlinearSolver) else 'linear'
        filt = self._filtered_vars_to_record
        parallel = self._rec_mgr._check_parallel(
        ) if system.comm.size > 1 else False

        if self.recording_options['record_outputs']:
            data['output'] = system._retrieve_data_of_kind(
                filt, 'output', vec_name, parallel)

        if self.recording_options['record_inputs']:
            data['input'] = system._retrieve_data_of_kind(
                filt, 'input', vec_name, parallel)

        if self.recording_options['record_solver_residuals']:
            data['residual'] = system._retrieve_data_of_kind(
                filt, 'residual', vec_name, parallel)

        self._rec_mgr.record_iteration(self, data, metadata)

    def cleanup(self):
        """
        Clean up resources prior to exit.
        """
        # shut down all recorders
        self._rec_mgr.shutdown()

    def _set_complex_step_mode(self, active):
        """
        Turn on or off complex stepping mode.

        Recurses to turn on or off complex stepping mode in all subsystems and their vectors.

        Parameters
        ----------
        active : bool
            Complex mode flag; set to True prior to commencing complex step.
        """
        pass

    def _disallow_distrib_solve(self):
        """
        Raise an exception if our system or any subsystems are distributed or non-local.
        """
        s = self._system()
        if s.comm.size == 1:
            return

        from openmdao.core.group import Group
        if s._has_distrib_vars or (isinstance(s, Group)
                                   and s._contains_parallel_group):
            msg = "{} linear solver in {} cannot be used in or above a ParallelGroup or a " + \
                "distributed component."
            raise RuntimeError(msg.format(type(self).__name__, s.msginfo))
    def __init__(self):
        """
        Initialize the driver.
        """
        self._rec_mgr = RecordingManager()
        self._vars_to_record = {
            'desvarnames': set(),
            'responsenames': set(),
            'objectivenames': set(),
            'constraintnames': set(),
            'sysinclnames': set(),
        }

        self._problem = None
        self._designvars = None
        self._cons = None
        self._objs = None
        self._responses = None
        self.options = OptionsDictionary()
        self.recording_options = OptionsDictionary()

        ###########################
        self.recording_options.declare('record_desvars', types=bool, default=True,
                                       desc='Set to True to record design variables at the \
                                       driver level')
        self.recording_options.declare('record_responses', types=bool, default=False,
                                       desc='Set to True to record responses at the driver level')
        self.recording_options.declare('record_objectives', types=bool, default=True,
                                       desc='Set to True to record objectives at the \
                                       driver level')
        self.recording_options.declare('record_constraints', types=bool, default=True,
                                       desc='Set to True to record constraints at the \
                                       driver level')
        self.recording_options.declare('includes', types=list, default=[],
                                       desc='Patterns for variables to include in recording. \
                                       Uses fnmatch wildcards')
        self.recording_options.declare('excludes', types=list, default=[],
                                       desc='Patterns for vars to exclude in recording '
                                       '(processed post-includes). Uses fnmatch wildcards')
        self.recording_options.declare('record_derivatives', types=bool, default=False,
                                       desc='Set to True to record derivatives at the driver \
                                       level')
        ###########################

        # What the driver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('inequality_constraints', types=bool, default=False)
        self.supports.declare('equality_constraints', types=bool, default=False)
        self.supports.declare('linear_constraints', types=bool, default=False)
        self.supports.declare('two_sided_constraints', types=bool, default=False)
        self.supports.declare('multiple_objectives', types=bool, default=False)
        self.supports.declare('integer_design_vars', types=bool, default=False)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('active_set', types=bool, default=False)
        self.supports.declare('simultaneous_derivatives', types=bool, default=False)
        self.supports.declare('distributed_design_vars', types=bool, default=False)

        self.iter_count = 0
        self.options = None
        self._model_viewer_data = None
        self.cite = ""

        # TODO, support these in OpenMDAO
        self.supports.declare('integer_design_vars', types=bool, default=False)

        self._res_jacs = {}

        self.fail = False
示例#18
0
class Driver(object):
    """ Base class for drivers in OpenMDAO. Drivers can only be placed in a
    Problem, and every problem has a Driver. Driver is the simplest driver that
    runs (solves using solve_nonlinear) a problem once.
    """

    def __init__(self):
        super(Driver, self).__init__()
        self.recorders = RecordingManager()

        # What this driver supports
        self.supports = OptionsDictionary(read_only=True)
        self.supports.add_option('inequality_constraints', True)
        self.supports.add_option('equality_constraints', True)
        self.supports.add_option('linear_constraints', True)
        self.supports.add_option('multiple_objectives', True)
        self.supports.add_option('two_sided_constraints', True)
        self.supports.add_option('integer_design_vars', True)

        # inheriting Drivers should override this setting and set it to False
        # if they don't use gradients.
        self.supports.add_option('gradients', True)

        # This driver's options
        self.options = OptionsDictionary()

        self._desvars = OrderedDict()
        self._objs = OrderedDict()
        self._cons = OrderedDict()

        self._voi_sets = []
        self._vars_to_record = None

        # We take root during setup
        self.root = None

        self.iter_count = 0
        self.dv_conversions = {}
        self.fn_conversions = {}

    def _setup(self):
        """ Updates metadata for params, constraints and objectives, and
        check for errors. Also determines all variables that need to be
        gathered for case recording.
        """
        root = self.root
        desvars = OrderedDict()
        objs = OrderedDict()
        cons = OrderedDict()

        if self.__class__ is Driver:
            has_gradients = False
        else:
            has_gradients = self.supports['gradients']

        item_tups = [
            ('Parameter', self._desvars, desvars),
            ('Objective', self._objs, objs),
            ('Constraint', self._cons, cons)
        ]

        for item_name, item, newitem in item_tups:
            for name, meta in iteritems(item):

                # Check validity of variable
                if name not in root.unknowns:
                    msg = "{} '{}' not found in unknowns."
                    msg = msg.format(item_name, name)
                    raise ValueError(msg)

                rootmeta = root.unknowns.metadata(name)
                if name in self._desvars:
                    rootmeta['is_desvar'] = True
                if name in self._objs:
                    rootmeta['is_objective'] = True
                if name in self._cons:
                    rootmeta['is_constraint'] = True

                if MPI and 'src_indices' in rootmeta:
                    raise ValueError("'%s' is a distributed variable and may "
                                     "not be used as a design var, objective, "
                                     "or constraint." % name)

                if has_gradients and rootmeta.get('pass_by_obj'):
                    if 'optimizer' in self.options:
                        oname = self.options['optimizer']
                    else:
                        oname = self.__class__.__name__
                    raise RuntimeError("%s '%s' is a 'pass_by_obj' variable "
                                       "and can't be used with a gradient "
                                       "based driver of type '%s'." %
                                       (item_name, name, oname))

                # Size is useful metadata to save
                if 'indices' in meta:
                    meta['size'] = len(meta['indices'])
                else:
                    meta['size'] = rootmeta['size']
                newitem[name] = meta

        self._desvars = desvars
        self._objs = objs
        self._cons = cons

        # Cache scalers for derivative calculation

        self.dv_conversions = OrderedDict()
        for name, meta in iteritems(desvars):
            scaler = meta.get('scaler')
            if isinstance(scaler, np.ndarray):
                if all(scaler == 1.0):
                    continue
            elif scaler == 1.0:
                continue

            self.dv_conversions[name] = np.reciprocal(scaler)

        self.fn_conversions = OrderedDict()
        for name, meta in chain(iteritems(objs), iteritems(cons)):
            scaler = meta.get('scaler')
            if isinstance(scaler, np.ndarray):
                if all(scaler == 1.0):
                    continue
            elif scaler == 1.0:
                continue

            self.fn_conversions[name] = scaler

    def _setup_communicators(self, comm, parent_dir):
        """
        Assign a communicator to the root `System`.

        Args
        ----
        comm : an MPI communicator (real or fake)
            The communicator being offered by the Problem.

        parent_dir : str
            Absolute directory of the Problem.
        """
        self.root._setup_communicators(comm, parent_dir)

    def get_req_procs(self):
        """
        Returns
        -------
        tuple
            A tuple of the form (min_procs, max_procs), indicating the
            min and max processors usable by this `Driver`.
        """
        return self.root.get_req_procs()

    def cleanup(self):
        """ Clean up resources prior to exit. """
        self.recorders.close()

    def _map_voi_indices(self):
        poi_indices = OrderedDict()
        qoi_indices = OrderedDict()
        for name, meta in chain(iteritems(self._cons), iteritems(self._objs)):
            # set indices of interest
            if 'indices' in meta:
                qoi_indices[name] = meta['indices']

        for name, meta in iteritems(self._desvars):
            # set indices of interest
            if 'indices' in meta:
                poi_indices[name] = meta['indices']

        return poi_indices, qoi_indices

    def _of_interest(self, voi_list):
        """Return a list of tuples, with the given voi_list organized
        into tuples based on the previously defined grouping of VOIs.
        """
        vois = []
        remaining = set(voi_list)
        for voi_set in self._voi_sets:
            vois.append([])

        for i, voi_set in enumerate(self._voi_sets):
            for v in voi_list:
                if v in voi_set:
                    vois[i].append(v)
                    remaining.remove(v)

        vois = [tuple(x) for x in vois if x]

        for v in voi_list:
            if v in remaining:
                vois.append((v,))

        return vois

    def desvars_of_interest(self):
        """
        Returns
        -------
        list of tuples of str
            The list of design vars, organized into tuples according to
            previously defined VOI groups.
        """
        return self._of_interest(self._desvars)

    def outputs_of_interest(self):
        """
        Returns
        -------
        list of tuples of str
            The list of constraints and objectives, organized into tuples
            according to previously defined VOI groups.
        """
        return self._of_interest(list(chain(self._objs, self._cons)))

    def parallel_derivs(self, vnames):
        """
        Specifies that the named variables of interest are to be grouped
        together so that their derivatives can be solved for concurrently.

        Args
        ----
        vnames : iter of str
            The names of variables of interest that are to be grouped.
        """
        #make sure all vnames are desvars, constraints, or objectives
        for n in vnames:
            if not (n in self._desvars or n in self._objs or n in self._cons):
                raise RuntimeError("'%s' is not a param, objective, or "
                                   "constraint" % n)
        for grp in self._voi_sets:
            for vname in vnames:
                if vname in grp:
                    msg = "'%s' cannot be added to VOI set %s because it " + \
                          "already exists in VOI set: %s"
                    raise RuntimeError(msg % (vname, tuple(vnames), grp))

        param_intsect = set(vnames).intersection(self._desvars.keys())

        if param_intsect and len(param_intsect) != len(vnames):
            raise RuntimeError("%s cannot be grouped because %s are design "
                               "vars and %s are not." %
                               (vnames, list(param_intsect),
                                list(set(vnames).difference(param_intsect))))

        if MPI:
            self._voi_sets.append(tuple(vnames))
        else:
            warnings.warn("parallel derivs %s specified but not running under MPI")

    def add_recorder(self, recorder):
        """
        Adds a recorder to the driver.

        Args
        ----
        recorder : BaseRecorder
           A recorder instance.
        """
        self.recorders.append(recorder)

    def add_desvar(self, name, lower=None, upper=None,
                   low=None, high=None,
                   indices=None, adder=0.0, scaler=1.0):
        """
        Adds a design variable to this driver.

        Args
        ----
        name : string
           Name of the design variable in the root system.

        lower : float or ndarray, optional
            Lower boundary for the param

        upper : upper or ndarray, optional
            Upper boundary for the param

        indices : iter of int, optional
            If a param is an array, these indicate which entries are of
            interest for derivatives.

        adder : float or ndarray, optional
            Value to add to the model value to get the scaled value. Adder
            is first in precedence.

        scaler : float or ndarray, optional
            value to multiply the model value to get the scaled value. Scaler
            is second in precedence.
        """

        if name in self._desvars:
            msg = "Desvar '{}' already exists."
            raise RuntimeError(msg.format(name))

        if low is not None or high is not None:
            warnings.simplefilter('always', DeprecationWarning)
            warnings.warn("'low' and 'high' are deprecated. "
                          "Use 'lower' and 'upper' instead.",
                          DeprecationWarning,stacklevel=2)
            warnings.simplefilter('ignore', DeprecationWarning)
            if low is not None and lower is None:
                lower = low
            if high is not None and upper is None:
                upper = high

        if isinstance(lower, np.ndarray):
            lower = lower.flatten()
        elif lower is None or lower == -float('inf'):
            lower = -sys.float_info.max

        if isinstance(upper, np.ndarray):
            upper = upper.flatten()
        elif upper is None or upper == float('inf'):
            upper = sys.float_info.max

        if isinstance(adder, np.ndarray):
            adder = adder.flatten().astype('float')
        else:
            adder = float(adder)

        if isinstance(scaler, np.ndarray):
            scaler = scaler.flatten().astype('float')
        else:
            scaler = float(scaler)

        # Scale the lower and upper values
        lower = (lower + adder)*scaler
        upper = (upper + adder)*scaler

        param = OrderedDict()
        param['lower'] = lower
        param['upper'] = upper
        param['adder'] = adder
        param['scaler'] = scaler
        if indices:
            param['indices'] = np.array(indices, dtype=int)

        self._desvars[name] = param

    def add_param(self, name, lower=None, upper=None, indices=None, adder=0.0,
                  scaler=1.0):
        """
        Deprecated.  Use ``add_desvar`` instead.
        """
        warnings.simplefilter('always', DeprecationWarning)
        warnings.warn("Driver.add_param() is deprecated. Use add_desvar() instead.",
                      DeprecationWarning,stacklevel=2)
        warnings.simplefilter('ignore', DeprecationWarning)

        self.add_desvar(name, lower=lower, upper=upper, indices=indices, adder=adder,
                        scaler=scaler)

    def get_desvars(self):
        """ Returns a dict of possibly distributed design variables.

        Returns
        -------
        dict
            Keys are the param object names, and the values are the param
            values.
        """
        desvars = OrderedDict()

        for key, meta in iteritems(self._desvars):
            desvars[key] = self._get_distrib_var(key, meta, 'design var')

        return desvars

    def _get_distrib_var(self, name, meta, voi_type):
        uvec = self.root.unknowns
        comm = self.root.comm
        nproc = comm.size
        iproc = comm.rank

        if nproc > 1:
            owner = self.root._owning_ranks[name]
            if iproc == owner:
                flatval = uvec._dat[name].val
            else:
                flatval = None
        else:
            owner = 0
            flatval = uvec._dat[name].val

        if 'indices' in meta and not (nproc > 1 and owner != iproc):
            # Make sure our indices are valid
            try:
                flatval = flatval[meta['indices']]
            except IndexError:
                msg = "Index for {} '{}' is out of bounds. "
                msg += "Requested index: {}, "
                msg += "shape: {}."
                raise IndexError(msg.format(voi_type, name, meta['indices'],
                                            uvec.metadata(name)['shape']))

        if nproc > 1:
            # TODO: use Bcast for improved performance
            if trace:
                debug("%s.driver._get_distrib_var bcast: val=%s" % (self.root.pathname, flatval))
            flatval = comm.bcast(flatval, root=owner)
            if trace:
                debug("%s.driver._get_distrib_var bcast DONE" % self.root.pathname)

        scaler = meta['scaler']
        adder = meta['adder']

        if isinstance(scaler, np.ndarray) or isinstance(adder, np.ndarray) \
           or scaler != 1.0 or adder != 0.0:
            return (flatval + adder)*scaler
        else:
            return flatval

    def get_desvar_metadata(self):
        """ Returns a dict of design variable metadata.

        Returns
        -------
        dict
            Keys are the param object names, and the values are the param
            values.
        """
        return self._desvars

    def set_desvar(self, name, value):
        """ Sets a design variable.

        Args
        ----
        name : string
            Name of the design variable in the root system.

        val : ndarray or float
            value to assign to the design variable.
        """
        val = self.root.unknowns._dat[name].val
        if not isinstance(val, _ByObjWrapper) and \
                       self.root.unknowns._dat[name].val.size == 0:
            return

        meta = self._desvars[name]
        scaler = meta['scaler']
        adder = meta['adder']
        if isinstance(scaler, np.ndarray) or isinstance(adder, np.ndarray) \
           or scaler != 1.0 or adder != 0.0:
            value = value/scaler - adder

        # Only set the indices we requested when we set the design variable.
        idx = meta.get('indices')
        if idx is not None:
            self.root.unknowns[name][idx] = value
        else:
            self.root.unknowns[name] = value

    def add_objective(self, name, indices=None, adder=0.0, scaler=1.0):
        """ Adds an objective to this driver.

        Args
        ----
        name : string
            Promoted pathname of the output that will serve as the objective.

        indices : iter of int, optional
            If an objective is an array, these indicate which entries are of
            interest for derivatives.

        adder : float or ndarray, optional
            Value to add to the model value to get the scaled value. Adder
            is first in precedence.

        scaler : float or ndarray, optional
            value to multiply the model value to get the scaled value. Scaler
            is second in precedence.
        """
        if len(self._objs) > 0 and not self.supports["multiple_objectives"]:
            raise RuntimeError("Attempted to add multiple objectives to a "
                               "driver that does not support multiple "
                               "objectives.")

        if name in self._objs:
            msg = "Objective '{}' already exists."
            raise RuntimeError(msg.format(name))

        if isinstance(adder, np.ndarray):
            adder = adder.flatten().astype('float')
        else:
            adder = float(adder)

        if isinstance(scaler, np.ndarray):
            scaler = scaler.flatten().astype('float')
        else:
            scaler = float(scaler)

        obj = OrderedDict()
        obj['adder'] = adder
        obj['scaler'] = scaler
        if indices:
            obj['indices'] = indices
            if len(indices) > 1 and not self.supports['multiple_objectives']:
                raise RuntimeError("Multiple objective indices specified for "
                                   "variable '%s', but driver '%s' doesn't "
                                   "support multiple objectives." %
                                   (name, self.pathname))
        self._objs[name] = obj

    def get_objectives(self, return_type='dict'):
        """ Gets all objectives of this driver.

        Args
        ----
        return_type : string
            Set to 'dict' to return a dictionary, or set to 'array' to return a
            flat ndarray.

        Returns
        -------
        dict (for return_type 'dict')
            Key is the objective name string, value is an ndarray with the values.

        ndarray (for return_type 'array')
            Array containing all objective values in the order they were added.
        """
        objs = OrderedDict()

        for key, meta in iteritems(self._objs):
            objs[key] = self._get_distrib_var(key, meta, 'objective')

        return objs

    def add_constraint(self, name, lower=None, upper=None, equals=None,
                       linear=False, jacs=None, indices=None, adder=0.0,
                       scaler=1.0):
        """ Adds a constraint to this driver. For inequality constraints,
        `lower` or `upper` must be specified. For equality constraints, `equals`
        must be specified.

        Args
        ----
        name : string
            Promoted pathname of the output that will serve as the quantity to
            constrain.

        lower : float or ndarray, optional
             Constrain the quantity to be greater than or equal to this value.

        upper : float or ndarray, optional
             Constrain the quantity to be less than or equal to this value.

        equals : float or ndarray, optional
             Constrain the quantity to be equal to this value.

        linear : bool, optional
            Set to True if this constraint is linear with respect to all design
            variables so that it can be calculated once and cached.

        jacs : dict of functions, optional
            Dictionary of user-defined functions that return the flattened
            Jacobian of this constraint with repsect to the design vars of
            this driver, as indicated by the dictionary keys. Default is None
            to let OpenMDAO calculate all derivatives. Note, this is currently
            unsupported

        indices : iter of int, optional
            If a constraint is an array, these indicate which entries are of
            interest for derivatives.

        adder : float or ndarray, optional
            Value to add to the model value to get the scaled value. Adder
            is first in precedence.

        scaler : float or ndarray, optional
            value to multiply the model value to get the scaled value. Scaler
            is second in precedence.
        """

        if name in self._cons:
            msg = "Constraint '{}' already exists."
            raise RuntimeError(msg.format(name))

        if equals is not None and (lower is not None or upper is not None):
            msg = "Constraint '{}' cannot be both equality and inequality."
            raise RuntimeError(msg.format(name))
        if equals is not None and self.supports['equality_constraints'] is False:
            msg = "Driver does not support equality constraint '{}'."
            raise RuntimeError(msg.format(name))
        if equals is None and self.supports['inequality_constraints'] is False:
            msg = "Driver does not support inequality constraint '{}'."
            raise RuntimeError(msg.format(name))
        if lower is not None and upper is not None and self.supports['two_sided_constraints'] is False:
            msg = "Driver does not support 2-sided constraint '{}'."
            raise RuntimeError(msg.format(name))
        if lower is None and upper is None and equals is None:
            msg = "Constraint '{}' needs to define lower, upper, or equals."
            raise RuntimeError(msg.format(name))

        if isinstance(scaler, np.ndarray):
            scaler = scaler.flatten().astype('float')
        else:
            scaler = float(scaler)

        if isinstance(adder, np.ndarray):
            adder = adder.flatten().astype('float')
        else:
            adder = float(adder)

        if isinstance(lower, np.ndarray):
            lower = lower.flatten()
        if isinstance(upper, np.ndarray):
            upper = upper.flatten()
        if isinstance(equals, np.ndarray):
            equals = equals.flatten()

        # Scale the lower and upper values
        if lower is not None:
            lower = (lower + adder)*scaler
        if upper is not None:
            upper = (upper + adder)*scaler
        if equals is not None:
            equals = (equals + adder)*scaler

        con = OrderedDict()
        con['lower'] = lower
        con['upper'] = upper
        con['equals'] = equals
        con['linear'] = linear
        con['adder'] = adder
        con['scaler'] = scaler
        con['jacs'] = jacs

        if indices:
            con['indices'] = indices
        self._cons[name] = con

    def get_constraints(self, ctype='all', lintype='all'):
        """ Gets all constraints for this driver.

        Args
        ----
        ctype : string
            Default is 'all'. Optionally return just the inequality constraints
            with 'ineq' or the equality constraints with 'eq'.

        lintype : string
            Default is 'all'. Optionally return just the linear constraints
            with 'linear' or the nonlinear constraints with 'nonlinear'.

        Returns
        -------
        dict
            Key is the constraint name string, value is an ndarray with the values.
        """
        cons = OrderedDict()

        for key, meta in iteritems(self._cons):

            if lintype == 'linear' and meta['linear'] is False:
                continue

            if lintype == 'nonlinear' and meta['linear']:
                continue

            if ctype == 'eq' and meta['equals'] is None:
                continue

            if ctype == 'ineq' and meta['equals'] is not None:
                continue

            cons[key] = self._get_distrib_var(key, meta, 'constraint')

        return cons

    def get_constraint_metadata(self):
        """ Returns a dict of constraint metadata.

        Returns
        -------
        dict
            Keys are the constraint object names, and the values are the param
            values.
        """
        return self._cons

    def run(self, problem):
        """ Runs the driver. This function should be overridden when inheriting.

        Args
        ----
        problem : `Problem`
            Our parent `Problem`.
        """
        self.run_once(problem)

    def run_once(self, problem):
        """ Runs root's solve_nonlinear one time

        Args
        ----
        problem : `Problem`
            Our parent `Problem`.
        """
        system = problem.root

        # Metadata Setup
        self.iter_count += 1
        metadata = self.metadata = create_local_meta(None, 'Driver')
        system.ln_solver.local_meta = metadata
        update_local_meta(metadata, (self.iter_count,))

        # Solve the system once and record results.
        with system._dircontext:
            system.solve_nonlinear(metadata=metadata)

        self.recorders.record_iteration(system, metadata)

    def calc_gradient(self, indep_list, unknown_list, mode='auto',
                      return_format='array', sparsity=None):
        """ Returns the scaled gradient for the system that is contained in
        self.root, scaled by all scalers that were specified when the desvars
        and constraints were added.

        Args
        ----
        indep_list : list of strings
            List of independent variable names that derivatives are to
            be calculated with respect to. All params must have a IndepVarComp.

        unknown_list : list of strings
            List of output or state names that derivatives are to
            be calculated for. All must be valid unknowns in OpenMDAO.

        mode : string, optional
            Deriviative direction, can be 'fwd', 'rev', 'fd', or 'auto'.
            Default is 'auto', which uses mode specified on the linear solver
            in root.

        return_format : string, optional
            Format for the derivatives, can be 'array' or 'dict'.

        sparsity : dict, optional
            Dictionary that gives the relevant design variables for each
            constraint. This option is only supported in the `dict` return
            format.

        Returns
        -------
        ndarray or dict
            Jacobian of unknowns with respect to params.
        """

        J = self._problem.calc_gradient(indep_list, unknown_list, mode=mode,
                                        return_format=return_format,
                                        dv_scale=self.dv_conversions,
                                        cn_scale=self.fn_conversions,
                                        sparsity=sparsity)

        self.recorders.record_derivatives(J, self.metadata)
        return J

    def generate_docstring(self):
        """
        Generates a numpy-style docstring for a user-created Driver class.

        Returns
        -------
        docstring : str
                string that contains a basic numpy docstring.
        """
        #start the docstring off
        docstring = '    \"\"\"\n'

        #Put options into docstring
        firstTime = 1

        for key, value in sorted(vars(self).items()):
            if type(value)==OptionsDictionary:
                if key == "supports":
                    continue
                if firstTime:  #start of Options docstring
                    docstring += '\n    Options\n    -------\n'
                    firstTime = 0
                docstring += value._generate_docstring(key)

        #finish up docstring
        docstring += '\n    \"\"\"\n'
        return docstring
示例#19
0
class SolverBase(object):
    """ Common base class for Linear and Nonlinear solver. Should not be used
    by users. Always inherit from `LinearSolver` or `NonlinearSolver`."""
    def __init__(self):
        self.iter_count = 0
        self.options = OptionsDictionary()
        desc = 'Set to 0 to disable printing, set to 1 to print the ' \
               'residual to stdout each iteration, set to 2 to print ' \
               'subiteration residuals as well.'
        self.options.add_option('iprint', 0, values=[0, 1, 2], desc=desc)
        self.recorders = RecordingManager()
        self.local_meta = None

    def setup(self, sub):
        """ Solvers override to define post-setup initiailzation.

        Args
        ----
        sub: `System`
            System that owns this solver.
        """
        pass

    def cleanup(self):
        """ Clean up resources prior to exit. """
        self.recorders.close()

    def print_norm(self,
                   solver_string,
                   pathname,
                   iteration,
                   res,
                   res0,
                   msg=None,
                   indent=0,
                   solver='NL'):
        """ Prints out the norm of the residual in a neat readable format.

        Args
        ----
        solver_string: string
            Unique string to identify your solver type (e.g., 'LN_GS' or
            'NEWTON').

        pathname: dict
            Parent system pathname.

        iteration: int
            Current iteration number

        res: float
            Absolute residual value.

        res0: float
            Baseline initial residual for relative comparison.

        msg: string, optional
            Message that indicates convergence.

        ident: int, optional
            Additional indentation levels for subiterations.

        solver: string, optional
            Solver type if not LN or NL (mostly for line search operations.)
        """
        if pathname == '':
            name = 'root'
        else:
            name = 'root.' + pathname

        # Find indentation level
        level = pathname.count('.')
        # No indentation for driver; top solver is no indentation.
        level = level + indent

        indent = '   ' * level
        if msg is not None:
            form = indent + '[%s] %s: %s   %d | %s'
            print(form % (name, solver, solver_string, iteration, msg))
            return

        form = indent + '[%s] %s: %s   %d | %.9g %.9g'
        print(form % (name, solver, solver_string, iteration, res, res / res0))

    def print_all_convergence(self):
        """ Turns on iprint for this solver and all subsolvers. Override if
        your solver has subsolvers."""
        self.options['iprint'] = 1

    def generate_docstring(self):
        """
        Generates a numpy-style docstring for a user-created System class.

        Returns
        -------
        docstring : str
                string that contains a basic numpy docstring.

        """
        #start the docstring off
        docstring = '    \"\"\"\n'

        #Put options into docstring
        firstTime = 1
        #for py3.4, items from vars must come out in same order.
        from collections import OrderedDict
        v = OrderedDict(sorted(vars(self).items()))
        for key, value in v.items():
            if type(value) == OptionsDictionary:
                if firstTime:  #start of Options docstring
                    docstring += '\n    Options\n    -------\n'
                    firstTime = 0
                for (name, val) in sorted(value.items()):
                    docstring += "    " + key + "['"
                    docstring += name + "']"
                    docstring += " :  " + type(val).__name__
                    docstring += "("
                    if type(val).__name__ == 'str': docstring += "'"
                    docstring += str(val)
                    if type(val).__name__ == 'str': docstring += "'"
                    docstring += ")\n"

                    desc = value._options[name]['desc']
                    if (desc):
                        docstring += "        " + desc + "\n"

        #finish up docstring
        docstring += '\n    \"\"\"\n'
        return docstring
示例#20
0
    def __init__(self, **kwargs):
        """
        Initialize the driver.

        Parameters
        ----------
        **kwargs : dict of keyword arguments
            Keyword arguments that will be mapped into the Driver options.
        """
        self._rec_mgr = RecordingManager()

        self._problem = None
        self._designvars = None
        self._designvars_discrete = []
        self._cons = None
        self._objs = None
        self._responses = None

        # Driver options
        self.options = OptionsDictionary(parent_name=type(self).__name__)

        self.options.declare(
            'debug_print',
            types=list,
            check_valid=_check_debug_print_opts_valid,
            desc="List of what type of Driver variables to print at each "
            "iteration. Valid items in list are 'desvars', 'ln_cons', "
            "'nl_cons', 'objs', 'totals'",
            default=[])

        # Case recording options
        self.recording_options = OptionsDictionary(
            parent_name=type(self).__name__)

        self.recording_options.declare(
            'record_model_metadata',
            types=bool,
            default=True,
            desc='Deprecated. Recording of model metadata will always '
            'be done',
            deprecation="The recording option, record_model_metadata, "
            "on Driver is "
            "deprecated. Recording of model metadata will always "
            "be done",
        )
        self.recording_options.declare(
            'record_desvars',
            types=bool,
            default=True,
            desc='Set to True to record design variables at the '
            'driver level')
        self.recording_options.declare(
            'record_responses',
            types=bool,
            default=False,
            desc='Set True to record constraints and objectives at the '
            'driver level')

        self.recording_options.declare(
            'record_objectives',
            types=bool,
            default=True,
            desc='Set to True to record objectives at the driver level')
        self.recording_options.declare(
            'record_constraints',
            types=bool,
            default=True,
            desc='Set to True to record constraints at the '
            'driver level')
        self.recording_options.declare(
            'includes',
            types=list,
            default=[],
            desc='Patterns for variables to include in recording. '
            'Uses fnmatch wildcards')
        self.recording_options.declare(
            'excludes',
            types=list,
            default=[],
            desc='Patterns for vars to exclude in recording '
            '(processed post-includes). Uses fnmatch wildcards')
        self.recording_options.declare(
            'record_derivatives',
            types=bool,
            default=False,
            desc='Set to True to record derivatives at the driver '
            'level')
        self.recording_options.declare(
            'record_inputs',
            types=bool,
            default=True,
            desc='Set to True to record inputs at the driver level')
        self.recording_options.declare(
            'record_outputs',
            types=bool,
            default=True,
            desc='Set True to record outputs at the '
            'driver level.')
        self.recording_options.declare(
            'record_residuals',
            types=bool,
            default=False,
            desc='Set True to record residuals at the '
            'driver level.')

        # What the driver supports.
        self.supports = OptionsDictionary(parent_name=type(self).__name__)
        self.supports.declare('inequality_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('equality_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('linear_constraints', types=bool, default=False)
        self.supports.declare('two_sided_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('multiple_objectives', types=bool, default=False)
        self.supports.declare('integer_design_vars', types=bool, default=True)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('active_set', types=bool, default=False)
        self.supports.declare('simultaneous_derivatives',
                              types=bool,
                              default=False)
        self.supports.declare('total_jac_sparsity', types=bool, default=False)

        self.iter_count = 0
        self.cite = ""

        self._coloring_info = coloring_mod._get_coloring_meta()

        self._total_jac_sparsity = None
        self._res_jacs = {}
        self._total_jac = None

        self.fail = False

        self._declare_options()
        self.options.update(kwargs)
示例#21
0
class SolverBase(object):
    """ Common base class for Linear and Nonlinear solver. Should not be used
    by users. Always inherit from `LinearSolver` or `NonlinearSolver`."""
    def __init__(self):
        self.iter_count = 0
        self.options = OptionsDictionary()
        desc =  "Set to 0 to print only failures, set to 1 to print iteration totals to" + \
                "stdout, set to 2 to print the residual each iteration to stdout," + \
                "or -1 to suppress all printing."

        self.options.add_option('iprint', 0, values=[-1, 0, 1, 2], desc=desc)
        self.options.add_option(
            'err_on_maxiter',
            False,
            desc='If True, raise an AnalysisError if not converged at maxiter.'
        )
        self.recorders = RecordingManager()
        self.local_meta = None

    def setup(self, sub):
        """ Solvers override to define post-setup initiailzation.

        Args
        ----
        sub: `System`
            System that owns this solver.
        """
        pass

    def cleanup(self):
        """ Clean up resources prior to exit. """
        self.recorders.close()

    def print_norm(self,
                   solver_string,
                   system,
                   iteration,
                   res,
                   res0,
                   msg=None,
                   indent=0,
                   solver='NL',
                   u_norm=None):
        """ Prints out the norm of the residual in a neat readable format.

        Args
        ----
        solver_string: string
            Unique string to identify your solver type (e.g., 'LN_GS' or
            'NEWTON').

        system: system
            Parent system, which contains pathname and the preconditioning flag.

        iteration: int
            Current iteration number

        res: float
            Norm of the absolute residual value.

        res0: float
            Norm of the baseline initial residual for relative comparison.

        msg: string, optional
            Message that indicates convergence.

        ident: int, optional
            Additional indentation levels for subiterations.

        solver: string, optional
            Solver type if not LN or NL (mostly for line search operations.)

        u_norm: float, optional
            Norm of the u vector, if applicable.
        """

        pathname = system.pathname
        if pathname == '':
            name = 'root'
        else:
            name = 'root.' + pathname

        # Find indentation level
        level = name.count('.')
        # No indentation for driver; top solver is no indentation.
        level = level + indent

        indent = '   ' * level

        if system._probdata.precon_level > 0:
            solver_string = 'PRECON:' + solver_string
            indent += '  ' * system._probdata.precon_level

        if msg is not None:
            form = indent + '[%s] %s: %s   %d | %s'

            if u_norm:
                form += ' (%s)' % u_norm

            print(form % (name, solver, solver_string, iteration, msg))
            return

        form = indent + '[%s] %s: %s   %d | %.9g %.9g'

        if u_norm:
            form += ' (%s)' % u_norm

        print(form % (name, solver, solver_string, iteration, res, res / res0))

    def print_all_convergence(self, level=2):
        """ Turns on iprint for this solver and all subsolvers. Override if
        your solver has subsolvers.

        Args
        ----
        level : int(2)
            iprint level. Set to 2 to print residuals each iteration; set to 1
            to print just the iteration totals.
        """
        self.options['iprint'] = level

    def generate_docstring(self):
        """
        Generates a numpy-style docstring for a user-created System class.

        Returns
        -------
        docstring : str
                string that contains a basic numpy docstring.

        """
        #start the docstring off
        docstrings = ['    \"\"\"']

        #Put options into docstring
        firstTime = 1

        for key, value in sorted(vars(self).items()):
            if type(value) == OptionsDictionary:
                if firstTime:  #start of Options docstring
                    docstrings.extend(['', '    Options', '    -------'])
                    firstTime = 0
                docstrings.append(value._generate_docstring(key))

        #finish up docstring
        docstrings.extend(['    \"\"\"', ''])
        return '\n'.join(docstrings)
示例#22
0
文件: driver.py 项目: sebasanper/blue
class Driver(object):
    """
    Top-level container for the systems and drivers.

    Attributes
    ----------
    fail : bool
        Reports whether the driver ran successfully.
    iter_count : int
        Keep track of iterations for case recording.
    metadata : list
        List of metadata
    options : <OptionsDictionary>
        Dictionary with general pyoptsparse options.
    _problem : <Problem>
        Pointer to the containing problem.
    supports : <OptionsDictionary>
        Provides a consistant way for drivers to declare what features they support.
    _designvars : dict
        Contains all design variable info.
    _cons : dict
        Contains all constraint info.
    _objs : dict
        Contains all objective info.
    _responses : dict
        Contains all response info.
    _rec_mgr : <RecordingManager>
        Object that manages all recorders added to this driver.
    _model_viewer_data : dict
        Structure of model, used to make n2 diagram.
    _remote_dvs : dict
        Dict of design variables that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_cons : dict
        Dict of constraints that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_objs : dict
        Dict of objectives that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_responses : dict
        A combined dict containing entries from _remote_cons and _remote_objs.
    """
    def __init__(self):
        """
        Initialize the driver.
        """
        self._rec_mgr = RecordingManager()

        self._problem = None
        self._designvars = None
        self._cons = None
        self._objs = None
        self._responses = None
        self.options = OptionsDictionary()

        # What the driver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('inequality_constraints',
                              type_=bool,
                              default=False)
        self.supports.declare('equality_constraints',
                              type_=bool,
                              default=False)
        self.supports.declare('linear_constraints', type_=bool, default=False)
        self.supports.declare('two_sided_constraints',
                              type_=bool,
                              default=False)
        self.supports.declare('multiple_objectives', type_=bool, default=False)
        self.supports.declare('integer_design_vars', type_=bool, default=False)
        self.supports.declare('gradients', type_=bool, default=False)
        self.supports.declare('active_set', type_=bool, default=False)

        self.iter_count = 0
        self.metadata = None
        self._model_viewer_data = None

        # TODO, support these in Openmdao blue
        self.supports.declare('integer_design_vars', type_=bool, default=False)

        self.fail = False

    def add_recorder(self, recorder):
        """
        Add a recorder to the driver.

        Parameters
        ----------
        recorder : BaseRecorder
           A recorder instance.
        """
        self._rec_mgr.append(recorder)

    def cleanup(self):
        """
        Clean up resources prior to exit.
        """
        self._rec_mgr.close()

    def _setup_driver(self, problem):
        """
        Prepare the driver for execution.

        This is the final thing to run during setup.

        Parameters
        ----------
        problem : <Problem>
            Pointer to the containing problem.
        """
        self._problem = problem
        model = problem.model

        self._objs = objs = OrderedDict()
        self._cons = cons = OrderedDict()
        self._responses = model.get_responses(recurse=True)
        for name, data in iteritems(self._responses):
            if data['type'] == 'con':
                cons[name] = data
            else:
                objs[name] = data

        # Gather up the information for design vars.
        self._designvars = model.get_design_vars(recurse=True)

        con_set = set()
        obj_set = set()
        dv_set = set()

        self._remote_dvs = dv_dict = {}
        self._remote_cons = con_dict = {}
        self._remote_objs = obj_dict = {}

        # Now determine if later we'll need to allgather cons, objs, or desvars.
        if model.comm.size > 1 and model._subsystems_allprocs:
            local_out_vars = set(model._outputs._views)
            remote_dvs = set(self._designvars) - local_out_vars
            remote_cons = set(self._cons) - local_out_vars
            remote_objs = set(self._objs) - local_out_vars
            all_remote_vois = model.comm.allgather(
                (remote_dvs, remote_cons, remote_objs))
            for rem_dvs, rem_cons, rem_objs in all_remote_vois:
                con_set.update(rem_cons)
                obj_set.update(rem_objs)
                dv_set.update(rem_dvs)

            # If we have remote VOIs, pick an owning rank for each and use that
            # to bcast to others later
            owning_ranks = model._owning_rank['output']
            sizes = model._var_sizes['nonlinear']['output']
            for i, vname in enumerate(model._var_allprocs_abs_names['output']):
                owner = owning_ranks[vname]
                if vname in dv_set:
                    dv_dict[vname] = (owner, sizes[owner, i])
                if vname in con_set:
                    con_dict[vname] = (owner, sizes[owner, i])
                if vname in obj_set:
                    obj_dict[vname] = (owner, sizes[owner, i])

        self._remote_responses = self._remote_cons.copy()
        self._remote_responses.update(self._remote_objs)

        self._rec_mgr.startup(self)
        if (self._rec_mgr._recorders):
            from openmdao.devtools.problem_viewer.problem_viewer import _get_viewer_data
            self._model_viewer_data = _get_viewer_data(problem)
        self._rec_mgr.record_metadata(self)

    def _get_voi_val(self, name, meta, remote_vois):
        """
        Get the value of a variable of interest (objective, constraint, or design var).

        This will retrieve the value if the VOI is remote.

        Parameters
        ----------
        name : str
            Name of the variable of interest.
        meta : dict
            Metadata for the variable of interest.
        remote_vois : dict
            Dict containing (owning_rank, size) for all remote vois of a particular
            type (design var, constraint, or objective).

        Returns
        -------
        float or ndarray
            The value of the named variable of interest.
        """
        model = self._problem.model
        comm = model.comm
        vec = model._outputs._views_flat
        indices = meta['indices']

        if name in remote_vois:
            owner, size = remote_vois[name]
            if owner == comm.rank:
                if indices is None:
                    val = vec[name].copy()
                else:
                    val = vec[name][indices]
            else:
                if indices is not None:
                    size = len(indices)
                val = np.empty(size)
            comm.Bcast(val, root=owner)
        else:
            if indices is None:
                val = vec[name].copy()
            else:
                val = vec[name][indices]

        # Scale design variable values
        adder = meta['adder']
        if adder is not None:
            val += adder

        scaler = meta['scaler']
        if scaler is not None:
            val *= scaler

        return val

    def get_design_var_values(self, filter=None):
        """
        Return the design variable values.

        This is called to gather the initial design variable state.

        Parameters
        ----------
        filter : list
            List of desvar names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each design variable.
        """
        if filter:
            dvs = filter
        else:
            # use all the designvars
            dvs = self._designvars

        return {
            n: self._get_voi_val(n, self._designvars[n], self._remote_dvs)
            for n in dvs
        }

    def set_design_var(self, name, value):
        """
        Set the value of a design variable.

        Parameters
        ----------
        name : str
            Global pathname of the design variable.
        value : float or ndarray
            Value for the design variable.
        """
        if (name in self._remote_dvs
                and self._problem.model._owning_rank['output'][name] !=
                self._problem.comm.rank):
            return

        meta = self._designvars[name]
        indices = meta['indices']
        if indices is None:
            indices = slice(None)

        desvar = self._problem.model._outputs._views_flat[name]
        desvar[indices] = value

        # Scale design variable values
        scaler = meta['scaler']
        if scaler is not None:
            desvar[indices] *= 1.0 / scaler

        adder = meta['adder']
        if adder is not None:
            desvar[indices] -= adder

    def get_response_values(self, filter=None):
        """
        Return response values.

        Parameters
        ----------
        filter : list
            List of response names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each response.
        """
        # TODO: finish this method when we have a driver that requires it.
        return {}

    def get_objective_values(self, filter=None):
        """
        Return objective values.

        Parameters
        ----------
        filter : list
            List of objective names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each objective.
        """
        if filter:
            objs = filter
        else:
            objs = self._objs

        return {
            n: self._get_voi_val(n, self._objs[n], self._remote_objs)
            for n in objs
        }

    def get_constraint_values(self, ctype='all', lintype='all', filter=None):
        """
        Return constraint values.

        Parameters
        ----------
        ctype : string
            Default is 'all'. Optionally return just the inequality constraints
            with 'ineq' or the equality constraints with 'eq'.

        lintype : string
            Default is 'all'. Optionally return just the linear constraints
            with 'linear' or the nonlinear constraints with 'nonlinear'.

        filter : list
            List of constraint names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each constraint.
        """
        if filter is not None:
            cons = filter
        else:
            cons = self._cons

        con_dict = {}
        for name in cons:
            meta = self._cons[name]

            if lintype == 'linear' and not meta['linear']:
                continue

            if lintype == 'nonlinear' and meta['linear']:
                continue

            if ctype == 'eq' and meta['equals'] is None:
                continue

            if ctype == 'ineq' and meta['equals'] is not None:
                continue

            con_dict[name] = self._get_voi_val(name, meta, self._remote_cons)

        return con_dict

    def run(self):
        """
        Execute this driver.

        The base `Driver` just runs the model. All other drivers overload
        this method.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        with Recording(self._get_name(), self.iter_count, self) as rec:
            failure_flag = self._problem.model._solve_nonlinear()

        self.iter_count += 1
        return failure_flag

    def _compute_totals(self,
                        of=None,
                        wrt=None,
                        return_format='flat_dict',
                        global_names=True):
        """
        Compute derivatives of desired quantities with respect to desired inputs.

        All derivatives are returned using driver scaling.

        Parameters
        ----------
        of : list of variable name strings or None
            Variables whose derivatives will be computed. Default is None, which
            uses the driver's objectives and constraints.
        wrt : list of variable name strings or None
            Variables with respect to which the derivatives will be computed.
            Default is None, which uses the driver's desvars.
        return_format : string
            Format to return the derivatives. Default is a 'flat_dict', which
            returns them in a dictionary whose keys are tuples of form (of, wrt). For
            the scipy optimizer, 'array' is also supported.
        global_names : bool
            Set to True when passing in global names to skip some translation steps.

        Returns
        -------
        derivs : object
            Derivatives in form requested by 'return_format'.
        """
        prob = self._problem

        # Compute the derivatives in dict format...
        if prob.model._owns_approx_jac:
            derivs = prob._compute_totals_approx(of=of,
                                                 wrt=wrt,
                                                 return_format='dict',
                                                 global_names=global_names)
        else:
            derivs = prob._compute_totals(of=of,
                                          wrt=wrt,
                                          return_format='dict',
                                          global_names=global_names)

        # ... then convert to whatever the driver needs.
        if return_format == 'dict':

            for okey, oval in iteritems(derivs):
                for ikey, val in iteritems(oval):

                    imeta = self._designvars[ikey]
                    ometa = self._responses[okey]

                    iscaler = imeta['scaler']
                    oscaler = ometa['scaler']

                    # Scale response side
                    if oscaler is not None:
                        val[:] = (oscaler * val.T).T

                    # Scale design var side
                    if iscaler is not None:
                        val *= 1.0 / iscaler

        elif return_format == 'array':

            # Use sizes pre-computed in derivs for ease
            osize = 0
            isize = 0
            do_wrt = True
            islices = {}
            oslices = {}
            for okey, oval in iteritems(derivs):
                if do_wrt:
                    for ikey, val in iteritems(oval):
                        istart = isize
                        isize += val.shape[1]
                        islices[ikey] = slice(istart, isize)
                    do_wrt = False
                ostart = osize
                osize += oval[ikey].shape[0]
                oslices[okey] = slice(ostart, osize)

            new_derivs = np.zeros((osize, isize))

            relevant = prob.model._relevant

            # Apply driver ref/ref0 and position subjac into array jacobian.
            for okey, oval in iteritems(derivs):
                oscaler = self._responses[okey]['scaler']
                for ikey, val in iteritems(oval):
                    if okey in relevant[ikey] or ikey in relevant[okey]:
                        iscaler = self._designvars[ikey]['scaler']

                        # Scale response side
                        if oscaler is not None:
                            val[:] = (oscaler * val.T).T

                        # Scale design var side
                        if iscaler is not None:
                            val *= 1.0 / iscaler

                        new_derivs[oslices[okey], islices[ikey]] = val

            derivs = new_derivs

        else:
            msg = "Derivative scaling by the driver only supports the 'dict' format at present."
            raise RuntimeError(msg)

        return derivs

    def record_iteration(self):
        """
        Record an iteration of the current Driver.
        """
        metadata = create_local_meta(self._get_name())
        self._rec_mgr.record_iteration(self, metadata)

    def _get_name(self):
        """
        Get name of current Driver.

        Returns
        -------
        str
            Name of current Driver.
        """
        return "Driver"
示例#23
0
class SolverBase(object):
    """ Common base class for Linear and Nonlinear solver. Should not be used
    by users. Always inherit from `LinearSolver` or `NonlinearSolver`."""

    def __init__(self):
        self.iter_count = 0
        self.options = OptionsDictionary()
        desc = (
            "Set to 0 to disable printing, set to 1 to print the "
            "residual to stdout each iteration, set to 2 to print "
            "subiteration residuals as well."
        )
        self.options.add_option("iprint", 0, values=[0, 1, 2], desc=desc)
        self.recorders = RecordingManager()
        self.local_meta = None

    def setup(self, sub):
        """ Solvers override to define post-setup initiailzation.

        Args
        ----
        sub: `System`
            System that owns this solver.
        """
        pass

    def cleanup(self):
        """ Clean up resources prior to exit. """
        self.recorders.close()

    def print_norm(self, solver_string, pathname, iteration, res, res0, msg=None, indent=0, solver="NL"):
        """ Prints out the norm of the residual in a neat readable format.

        Args
        ----
        solver_string: string
            Unique string to identify your solver type (e.g., 'LN_GS' or
            'NEWTON').

        pathname: dict
            Parent system pathname.

        iteration: int
            Current iteration number

        res: float
            Absolute residual value.

        res0: float
            Baseline initial residual for relative comparison.

        msg: string, optional
            Message that indicates convergence.

        ident: int, optional
            Additional indentation levels for subiterations.

        solver: string, optional
            Solver type if not LN or NL (mostly for line search operations.)
        """
        if pathname == "":
            name = "root"
        else:
            name = "root." + pathname

        # Find indentation level
        level = pathname.count(".")
        # No indentation for driver; top solver is no indentation.
        level = level + indent

        indent = "   " * level
        if msg is not None:
            form = indent + "[%s] %s: %s   %d | %s"
            print(form % (name, solver, solver_string, iteration, msg))
            return

        form = indent + "[%s] %s: %s   %d | %.9g %.9g"
        print(form % (name, solver, solver_string, iteration, res, res / res0))

    def print_all_convergence(self):
        """ Turns on iprint for this solver and all subsolvers. Override if
        your solver has subsolvers."""
        self.options["iprint"] = 1

    def generate_docstring(self):
        """
        Generates a numpy-style docstring for a user-created System class.

        Returns
        -------
        docstring : str
                string that contains a basic numpy docstring.

        """
        # start the docstring off
        docstring = '    """\n'

        # Put options into docstring
        firstTime = 1
        # for py3.4, items from vars must come out in same order.
        from collections import OrderedDict

        v = OrderedDict(sorted(vars(self).items()))
        for key, value in v.items():
            if type(value) == OptionsDictionary:
                if firstTime:  # start of Options docstring
                    docstring += "\n    Options\n    -------\n"
                    firstTime = 0
                for (name, val) in sorted(value.items()):
                    docstring += "    " + key + "['"
                    docstring += name + "']"
                    docstring += " :  " + type(val).__name__
                    docstring += "("
                    if type(val).__name__ == "str":
                        docstring += "'"
                    docstring += str(val)
                    if type(val).__name__ == "str":
                        docstring += "'"
                    docstring += ")\n"

                    desc = value._options[name]["desc"]
                    if desc:
                        docstring += "        " + desc + "\n"

        # finish up docstring
        docstring += '\n    """\n'
        return docstring
示例#24
0
文件: driver.py 项目: samtx/OpenMDAO
class Driver(object):
    """
    Top-level container for the systems and drivers.

    Options
    -------
    options['debug_print'] :  list of strings([])
        Indicates what variables to print at each iteration. The valid options are:
            'desvars','ln_cons','nl_cons',and 'objs'.
    recording_options['record_metadata'] :  bool(True)
        Tells recorder whether to record variable attribute metadata.
    recording_options['record_desvars'] :  bool(True)
        Tells recorder whether to record the desvars of the Driver.
    recording_options['record_responses'] :  bool(False)
        Tells recorder whether to record the responses of the Driver.
    recording_options['record_objectives'] :  bool(False)
        Tells recorder whether to record the objectives of the Driver.
    recording_options['record_constraints'] :  bool(False)
        Tells recorder whether to record the constraints of the Driver.
    recording_options['includes'] :  list of strings("*")
        Patterns for variables to include in recording.
    recording_options['excludes'] :  list of strings('')
        Patterns for variables to exclude in recording (processed after includes).


    Attributes
    ----------
    fail : bool
        Reports whether the driver ran successfully.
    iter_count : int
        Keep track of iterations for case recording.
    metadata : list
        List of metadata
    options : <OptionsDictionary>
        Dictionary with general pyoptsparse options.
    recording_options : <OptionsDictionary>
        Dictionary with driver recording options.
    debug_print : <OptionsDictionary>
        Dictionary with debugging printing options.
    cite : str
        Listing of relevant citataions that should be referenced when
        publishing work that uses this class.
    _problem : <Problem>
        Pointer to the containing problem.
    supports : <OptionsDictionary>
        Provides a consistant way for drivers to declare what features they support.
    _designvars : dict
        Contains all design variable info.
    _cons : dict
        Contains all constraint info.
    _objs : dict
        Contains all objective info.
    _responses : dict
        Contains all response info.
    _rec_mgr : <RecordingManager>
        Object that manages all recorders added to this driver.
    _vars_to_record: dict
        Dict of lists of var names indicating what to record
    _model_viewer_data : dict
        Structure of model, used to make n2 diagram.
    _remote_dvs : dict
        Dict of design variables that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_cons : dict
        Dict of constraints that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_objs : dict
        Dict of objectives that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_responses : dict
        A combined dict containing entries from _remote_cons and _remote_objs.
    _simul_coloring_info : tuple of dicts
        A data structure describing coloring for simultaneous derivs.
    _res_jacs : dict
        Dict of sparse subjacobians for use with certain optimizers, e.g. pyOptSparseDriver.
    """

    def __init__(self):
        """
        Initialize the driver.
        """
        self._rec_mgr = RecordingManager()
        self._vars_to_record = {
            'desvarnames': set(),
            'responsenames': set(),
            'objectivenames': set(),
            'constraintnames': set(),
            'sysinclnames': set(),
        }

        self._problem = None
        self._designvars = None
        self._cons = None
        self._objs = None
        self._responses = None
        self.options = OptionsDictionary()
        self.recording_options = OptionsDictionary()

        ###########################
        self.options.declare('debug_print', types=list, is_valid=_is_debug_print_opts_valid,
                             desc="List of what type of Driver variables to print at each "
                             "iteration. Valid items in list are 'desvars','ln_cons',"
                             "'nl_cons','objs'",
                             default=[])

        ###########################
        self.recording_options.declare('record_metadata', types=bool, desc='Record metadata',
                                       default=True)
        self.recording_options.declare('record_desvars', types=bool, default=True,
                                       desc='Set to True to record design variables at the \
                                       driver level')
        self.recording_options.declare('record_responses', types=bool, default=False,
                                       desc='Set to True to record responses at the driver level')
        self.recording_options.declare('record_objectives', types=bool, default=True,
                                       desc='Set to True to record objectives at the \
                                       driver level')
        self.recording_options.declare('record_constraints', types=bool, default=True,
                                       desc='Set to True to record constraints at the \
                                       driver level')
        self.recording_options.declare('includes', types=list, default=['*'],
                                       desc='Patterns for variables to include in recording')
        self.recording_options.declare('excludes', types=list, default=[],
                                       desc='Patterns for vars to exclude in recording '
                                       '(processed post-includes)')
        self.recording_options.declare('record_derivatives', types=bool, default=False,
                                       desc='Set to True to record derivatives at the driver \
                                       level')
        ###########################

        # What the driver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('inequality_constraints', types=bool, default=False)
        self.supports.declare('equality_constraints', types=bool, default=False)
        self.supports.declare('linear_constraints', types=bool, default=False)
        self.supports.declare('two_sided_constraints', types=bool, default=False)
        self.supports.declare('multiple_objectives', types=bool, default=False)
        self.supports.declare('integer_design_vars', types=bool, default=False)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('active_set', types=bool, default=False)
        self.supports.declare('simultaneous_derivatives', types=bool, default=False)

        # Debug printing.
        self.debug_print = OptionsDictionary()
        self.debug_print.declare('debug_print', types=bool, default=False,
                                 desc='Overall option to turn on Driver debug printing')
        self.debug_print.declare('debug_print_desvars', types=bool, default=False,
                                 desc='Print design variables')
        self.debug_print.declare('debug_print_nl_con', types=bool, default=False,
                                 desc='Print nonlinear constraints')
        self.debug_print.declare('debug_print_ln_con', types=bool, default=False,
                                 desc='Print linear constraints')
        self.debug_print.declare('debug_print_objective', types=bool, default=False,
                                 desc='Print objectives')

        self.iter_count = 0
        self.metadata = None
        self._model_viewer_data = None
        self.cite = ""

        # TODO, support these in OpenMDAO
        self.supports.declare('integer_design_vars', types=bool, default=False)

        self._simul_coloring_info = None
        self._res_jacs = {}

        self.fail = False

    def add_recorder(self, recorder):
        """
        Add a recorder to the driver.

        Parameters
        ----------
        recorder : BaseRecorder
           A recorder instance.
        """
        self._rec_mgr.append(recorder)

    def cleanup(self):
        """
        Clean up resources prior to exit.
        """
        self._rec_mgr.close()

    def _setup_driver(self, problem):
        """
        Prepare the driver for execution.

        This is the final thing to run during setup.

        Parameters
        ----------
        problem : <Problem>
            Pointer to the containing problem.
        """
        self._problem = problem
        model = problem.model

        self._objs = objs = OrderedDict()
        self._cons = cons = OrderedDict()
        self._responses = model.get_responses(recurse=True)
        response_size = 0
        for name, data in iteritems(self._responses):
            if data['type'] == 'con':
                cons[name] = data
            else:
                objs[name] = data
            response_size += data['size']

        # Gather up the information for design vars.
        self._designvars = model.get_design_vars(recurse=True)
        desvar_size = np.sum(data['size'] for data in itervalues(self._designvars))

        if ((problem._mode == 'fwd' and desvar_size > response_size) or
                (problem._mode == 'rev' and response_size > desvar_size)):
            warnings.warn("Inefficient choice of derivative mode.  You chose '%s' for a "
                          "problem with %d design variables and %d response variables "
                          "(objectives and constraints)." %
                          (problem._mode, desvar_size, response_size), RuntimeWarning)

        self._has_scaling = (
            np.any([r['scaler'] is not None for r in self._responses.values()]) or
            np.any([dv['scaler'] is not None for dv in self._designvars.values()])
        )

        con_set = set()
        obj_set = set()
        dv_set = set()

        self._remote_dvs = dv_dict = {}
        self._remote_cons = con_dict = {}
        self._remote_objs = obj_dict = {}

        # Now determine if later we'll need to allgather cons, objs, or desvars.
        if model.comm.size > 1 and model._subsystems_allprocs:
            local_out_vars = set(model._outputs._views)
            remote_dvs = set(self._designvars) - local_out_vars
            remote_cons = set(self._cons) - local_out_vars
            remote_objs = set(self._objs) - local_out_vars
            all_remote_vois = model.comm.allgather(
                (remote_dvs, remote_cons, remote_objs))
            for rem_dvs, rem_cons, rem_objs in all_remote_vois:
                con_set.update(rem_cons)
                obj_set.update(rem_objs)
                dv_set.update(rem_dvs)

            # If we have remote VOIs, pick an owning rank for each and use that
            # to bcast to others later
            owning_ranks = model._owning_rank['output']
            sizes = model._var_sizes['nonlinear']['output']
            for i, vname in enumerate(model._var_allprocs_abs_names['output']):
                owner = owning_ranks[vname]
                if vname in dv_set:
                    dv_dict[vname] = (owner, sizes[owner, i])
                if vname in con_set:
                    con_dict[vname] = (owner, sizes[owner, i])
                if vname in obj_set:
                    obj_dict[vname] = (owner, sizes[owner, i])

        self._remote_responses = self._remote_cons.copy()
        self._remote_responses.update(self._remote_objs)

        # Case recording setup
        mydesvars = myobjectives = myconstraints = myresponses = set()
        mysystem_outputs = set()
        incl = self.recording_options['includes']
        excl = self.recording_options['excludes']
        rec_desvars = self.recording_options['record_desvars']
        rec_objectives = self.recording_options['record_objectives']
        rec_constraints = self.recording_options['record_constraints']
        rec_responses = self.recording_options['record_responses']

        all_desvars = {n for n in self._designvars
                       if check_path(n, incl, excl, True)}
        all_objectives = {n for n in self._objs
                          if check_path(n, incl, excl, True)}
        all_constraints = {n for n in self._cons
                           if check_path(n, incl, excl, True)}
        if rec_desvars:
            mydesvars = all_desvars

        if rec_objectives:
            myobjectives = all_objectives

        if rec_constraints:
            myconstraints = all_constraints

        if rec_responses:
            myresponses = {n for n in self._responses
                           if check_path(n, incl, excl, True)}

        # get the includes that were requested for this Driver recording
        if incl:
            prob = self._problem
            root = prob.model
            # The my* variables are sets

            # First gather all of the desired outputs
            # The following might only be the local vars if MPI
            mysystem_outputs = {n for n in root._outputs
                                if check_path(n, incl, excl)}

            # If MPI, and on rank 0, need to gather up all the variables
            #    even those not local to rank 0
            if MPI:
                all_vars = root.comm.gather(mysystem_outputs, root=0)
                if MPI.COMM_WORLD.rank == 0:
                    mysystem_outputs = all_vars[-1]
                    for d in all_vars[:-1]:
                        mysystem_outputs.update(d)

            # de-duplicate mysystem_outputs
            mysystem_outputs = mysystem_outputs.difference(all_desvars, all_objectives,
                                                           all_constraints)

        if MPI:  # filter based on who owns the variables
            # TODO Eventually, we think we can get rid of this next check. But to be safe,
            #       we are leaving it in there.
            if not model.is_active():
                raise RuntimeError(
                    "RecordingManager.startup should never be called when "
                    "running in parallel on an inactive System")
            rrank = self._problem.comm.rank  # root ( aka model ) rank.
            rowned = model._owning_rank['output']
            mydesvars = [n for n in mydesvars if rrank == rowned[n]]
            myresponses = [n for n in myresponses if rrank == rowned[n]]
            myobjectives = [n for n in myobjectives if rrank == rowned[n]]
            myconstraints = [n for n in myconstraints if rrank == rowned[n]]
            mysystem_outputs = [n for n in mysystem_outputs if rrank == rowned[n]]

        self._filtered_vars_to_record = {
            'des': mydesvars,
            'obj': myobjectives,
            'con': myconstraints,
            'res': myresponses,
            'sys': mysystem_outputs,
        }

        self._rec_mgr.startup(self)
        if self._rec_mgr._recorders:
            from openmdao.devtools.problem_viewer.problem_viewer import _get_viewer_data
            self._model_viewer_data = _get_viewer_data(problem)
        if self.recording_options['record_metadata']:
            self._rec_mgr.record_metadata(self)

        # set up simultaneous deriv coloring
        if self._simul_coloring_info and self.supports['simultaneous_derivatives']:
            if problem._mode == 'fwd':
                self._setup_simul_coloring(problem._mode)
            else:
                raise RuntimeError("simultaneous derivs are currently not supported in rev mode.")

    def _get_voi_val(self, name, meta, remote_vois):
        """
        Get the value of a variable of interest (objective, constraint, or design var).

        This will retrieve the value if the VOI is remote.

        Parameters
        ----------
        name : str
            Name of the variable of interest.
        meta : dict
            Metadata for the variable of interest.
        remote_vois : dict
            Dict containing (owning_rank, size) for all remote vois of a particular
            type (design var, constraint, or objective).

        Returns
        -------
        float or ndarray
            The value of the named variable of interest.
        """
        model = self._problem.model
        comm = model.comm
        vec = model._outputs._views_flat
        indices = meta['indices']

        if name in remote_vois:
            owner, size = remote_vois[name]
            if owner == comm.rank:
                if indices is None:
                    val = vec[name].copy()
                else:
                    val = vec[name][indices]
            else:
                if indices is not None:
                    size = len(indices)
                val = np.empty(size)
            comm.Bcast(val, root=owner)
        else:
            if indices is None:
                val = vec[name].copy()
            else:
                val = vec[name][indices]

        if self._has_scaling:
            # Scale design variable values
            adder = meta['adder']
            if adder is not None:
                val += adder

            scaler = meta['scaler']
            if scaler is not None:
                val *= scaler

        return val

    def get_design_var_values(self, filter=None):
        """
        Return the design variable values.

        This is called to gather the initial design variable state.

        Parameters
        ----------
        filter : list
            List of desvar names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each design variable.
        """
        if filter:
            dvs = filter
        else:
            # use all the designvars
            dvs = self._designvars

        return {n: self._get_voi_val(n, self._designvars[n], self._remote_dvs) for n in dvs}

    def set_design_var(self, name, value):
        """
        Set the value of a design variable.

        Parameters
        ----------
        name : str
            Global pathname of the design variable.
        value : float or ndarray
            Value for the design variable.
        """
        if (name in self._remote_dvs and
                self._problem.model._owning_rank['output'][name] != self._problem.comm.rank):
            return

        meta = self._designvars[name]
        indices = meta['indices']
        if indices is None:
            indices = slice(None)

        desvar = self._problem.model._outputs._views_flat[name]
        desvar[indices] = value

        if self._has_scaling:
            # Scale design variable values
            scaler = meta['scaler']
            if scaler is not None:
                desvar[indices] *= 1.0 / scaler

            adder = meta['adder']
            if adder is not None:
                desvar[indices] -= adder

    def get_response_values(self, filter=None):
        """
        Return response values.

        Parameters
        ----------
        filter : list
            List of response names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each response.
        """
        if filter:
            resps = filter
        else:
            resps = self._responses

        return {n: self._get_voi_val(n, self._responses[n], self._remote_objs) for n in resps}

    def get_objective_values(self, filter=None):
        """
        Return objective values.

        Parameters
        ----------
        filter : list
            List of objective names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each objective.
        """
        if filter:
            objs = filter
        else:
            objs = self._objs

        return {n: self._get_voi_val(n, self._objs[n], self._remote_objs) for n in objs}

    def get_constraint_values(self, ctype='all', lintype='all', filter=None):
        """
        Return constraint values.

        Parameters
        ----------
        ctype : string
            Default is 'all'. Optionally return just the inequality constraints
            with 'ineq' or the equality constraints with 'eq'.
        lintype : string
            Default is 'all'. Optionally return just the linear constraints
            with 'linear' or the nonlinear constraints with 'nonlinear'.
        filter : list
            List of constraint names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each constraint.
        """
        if filter is not None:
            cons = filter
        else:
            cons = self._cons

        con_dict = {}
        for name in cons:
            meta = self._cons[name]

            if lintype == 'linear' and not meta['linear']:
                continue

            if lintype == 'nonlinear' and meta['linear']:
                continue

            if ctype == 'eq' and meta['equals'] is None:
                continue

            if ctype == 'ineq' and meta['equals'] is not None:
                continue

            con_dict[name] = self._get_voi_val(name, meta, self._remote_cons)

        return con_dict

    def run(self):
        """
        Execute this driver.

        The base `Driver` just runs the model. All other drivers overload
        this method.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        with RecordingDebugging(self._get_name(), self.iter_count, self) as rec:
            failure_flag, _, _ = self._problem.model._solve_nonlinear()

        self.iter_count += 1
        return failure_flag

    def _dict2array_jac(self, derivs):
        osize = 0
        isize = 0
        do_wrt = True
        islices = {}
        oslices = {}
        for okey, oval in iteritems(derivs):
            if do_wrt:
                for ikey, val in iteritems(oval):
                    istart = isize
                    isize += val.shape[1]
                    islices[ikey] = slice(istart, isize)
                do_wrt = False
            ostart = osize
            osize += oval[ikey].shape[0]
            oslices[okey] = slice(ostart, osize)

        new_derivs = np.zeros((osize, isize))

        relevant = self._problem.model._relevant

        for okey, odict in iteritems(derivs):
            for ikey, val in iteritems(odict):
                if okey in relevant[ikey] or ikey in relevant[okey]:
                    new_derivs[oslices[okey], islices[ikey]] = val

        return new_derivs

    def _compute_totals(self, of=None, wrt=None, return_format='flat_dict', global_names=True):
        """
        Compute derivatives of desired quantities with respect to desired inputs.

        All derivatives are returned using driver scaling.

        Parameters
        ----------
        of : list of variable name strings or None
            Variables whose derivatives will be computed. Default is None, which
            uses the driver's objectives and constraints.
        wrt : list of variable name strings or None
            Variables with respect to which the derivatives will be computed.
            Default is None, which uses the driver's desvars.
        return_format : string
            Format to return the derivatives. Default is a 'flat_dict', which
            returns them in a dictionary whose keys are tuples of form (of, wrt). For
            the scipy optimizer, 'array' is also supported.
        global_names : bool
            Set to True when passing in global names to skip some translation steps.

        Returns
        -------
        derivs : object
            Derivatives in form requested by 'return_format'.
        """
        prob = self._problem

        # Compute the derivatives in dict format...
        if prob.model._owns_approx_jac:
            derivs = prob._compute_totals_approx(of=of, wrt=wrt, return_format='dict',
                                                 global_names=global_names)
        else:
            derivs = prob._compute_totals(of=of, wrt=wrt, return_format='dict',
                                          global_names=global_names)

        # ... then convert to whatever the driver needs.
        if return_format in ('dict', 'array'):
            if self._has_scaling:
                for okey, odict in iteritems(derivs):
                    for ikey, val in iteritems(odict):

                        iscaler = self._designvars[ikey]['scaler']
                        oscaler = self._responses[okey]['scaler']

                        # Scale response side
                        if oscaler is not None:
                            val[:] = (oscaler * val.T).T

                        # Scale design var side
                        if iscaler is not None:
                            val *= 1.0 / iscaler
        else:
            raise RuntimeError("Derivative scaling by the driver only supports the 'dict' and "
                               "'array' formats at present.")

        if return_format == 'array':
            derivs = self._dict2array_jac(derivs)

        return derivs

    def record_iteration(self):
        """
        Record an iteration of the current Driver.
        """
        if not self._rec_mgr._recorders:
            return

        metadata = create_local_meta(self._get_name())

        # Get the data to record
        data = {}
        if self.recording_options['record_desvars']:
            # collective call that gets across all ranks
            desvars = self.get_design_var_values()
        else:
            desvars = {}

        if self.recording_options['record_responses']:
            # responses = self.get_response_values() # not really working yet
            responses = {}
        else:
            responses = {}

        if self.recording_options['record_objectives']:
            objectives = self.get_objective_values()
        else:
            objectives = {}

        if self.recording_options['record_constraints']:
            constraints = self.get_constraint_values()
        else:
            constraints = {}

        desvars = {name: desvars[name]
                   for name in self._filtered_vars_to_record['des']}
        # responses not working yet
        # responses = {name: responses[name] for name in self._filtered_vars_to_record['res']}
        objectives = {name: objectives[name]
                      for name in self._filtered_vars_to_record['obj']}
        constraints = {name: constraints[name]
                       for name in self._filtered_vars_to_record['con']}

        if self.recording_options['includes']:
            root = self._problem.model
            outputs = root._outputs
            # outputsinputs, outputs, residuals = root.get_nonlinear_vectors()
            sysvars = {}
            for name, value in iteritems(outputs._names):
                if name in self._filtered_vars_to_record['sys']:
                    sysvars[name] = value
        else:
            sysvars = {}

        if MPI:
            root = self._problem.model
            desvars = self._gather_vars(root, desvars)
            responses = self._gather_vars(root, responses)
            objectives = self._gather_vars(root, objectives)
            constraints = self._gather_vars(root, constraints)
            sysvars = self._gather_vars(root, sysvars)

        data['des'] = desvars
        data['res'] = responses
        data['obj'] = objectives
        data['con'] = constraints
        data['sys'] = sysvars

        self._rec_mgr.record_iteration(self, data, metadata)

    def _gather_vars(self, root, local_vars):
        """
        Gather and return only variables listed in `local_vars` from the `root` System.

        Parameters
        ----------
        root : <System>
            the root System for the Problem
        local_vars : dict
            local variable names and values

        Returns
        -------
        dct : dict
            variable names and values.
        """
        # if trace:
        #     debug("gathering vars for recording in %s" % root.pathname)
        all_vars = root.comm.gather(local_vars, root=0)
        # if trace:
        #     debug("DONE gathering rec vars for %s" % root.pathname)

        if root.comm.rank == 0:
            dct = all_vars[-1]
            for d in all_vars[:-1]:
                dct.update(d)
            return dct

    def _get_name(self):
        """
        Get name of current Driver.

        Returns
        -------
        str
            Name of current Driver.
        """
        return "Driver"

    def set_simul_deriv_color(self, simul_info):
        """
        Set the coloring for simultaneous derivatives.

        Parameters
        ----------
        simul_info : str or ({dv1: colors, ...}, {resp1: {dv1: {0: [res_idxs, dv_idxs]} ...} ...})
            Information about simultaneous coloring for design vars and responses.  If a string,
            then simul_info is assumed to be the name of a file that contains the coloring
            information in JSON format.
        """
        if self.supports['simultaneous_derivatives']:
            self._simul_coloring_info = simul_info
        else:
            raise RuntimeError("Driver '%s' does not support simultaneous derivatives." %
                               self._get_name())

    def _setup_simul_coloring(self, mode='fwd'):
        """
        Set up metadata for simultaneous derivative solution.

        Parameters
        ----------
        mode : str
            Derivative direction, either 'fwd' or 'rev'.
        """
        if mode == 'rev':
            raise NotImplementedError("Simultaneous derivatives are currently not supported "
                                      "in 'rev' mode")

        # command line simul_coloring uses this env var to turn pre-existing coloring off
        if not _use_simul_coloring:
            return

        prom2abs = self._problem.model._var_allprocs_prom2abs_list['output']

        if isinstance(self._simul_coloring_info, string_types):
            with open(self._simul_coloring_info, 'r') as f:
                self._simul_coloring_info = json.load(f)

        coloring, maps = self._simul_coloring_info
        for dv, colors in iteritems(coloring):
            if dv not in self._designvars:
                # convert name from promoted to absolute
                dv = prom2abs[dv][0]
            self._designvars[dv]['simul_deriv_color'] = colors

        for res, dvdict in iteritems(maps):
            if res not in self._responses:
                # convert name from promoted to absolute
                res = prom2abs[res][0]
            self._responses[res]['simul_map'] = dvdict

            for dv, col_dict in dvdict.items():
                col_dict = {int(k): v for k, v in iteritems(col_dict)}
                if dv not in self._designvars:
                    # convert name from promoted to absolute and replace dictionary key
                    del dvdict[dv]
                    dv = prom2abs[dv][0]
                dvdict[dv] = col_dict

    def _pre_run_model_debug_print(self):
        """
        Optionally print some debugging information before the model runs.
        """
        if not self.options['debug_print']:
            return

        if not MPI or MPI.COMM_WORLD.rank == 0:
            header = 'Driver debug print for iter coord: {}'.format(
                get_formatted_iteration_coordinate())
            print(header)
            print(len(header) * '-')

        if 'desvars' in self.options['debug_print']:
            desvar_vals = self.get_design_var_values()
            if not MPI or MPI.COMM_WORLD.rank == 0:
                print("Design Vars")
                if desvar_vals:
                    for name, value in iteritems(desvar_vals):
                        print("{}: {}".format(name, repr(value)))
                else:
                    print("None")
                print()

    def _post_run_model_debug_print(self):
        """
        Optionally print some debugging information after the model runs.
        """
        if 'nl_cons' in self.options['debug_print']:
            cons = self.get_constraint_values(lintype='nonlinear')
            if not MPI or MPI.COMM_WORLD.rank == 0:
                print("Nonlinear constraints")
                if cons:
                    for name, value in iteritems(cons):
                        print("{}: {}".format(name, repr(value)))
                else:
                    print("None")
                print()

        if 'ln_cons' in self.options['debug_print']:
            cons = self.get_constraint_values(lintype='linear')
            if not MPI or MPI.COMM_WORLD.rank == 0:
                print("Linear constraints")
                if cons:
                    for name, value in iteritems(cons):
                        print("{}: {}".format(name, repr(value)))
                else:
                    print("None")
                print()

        if 'objs' in self.options['debug_print']:
            objs = self.get_objective_values()
            if not MPI or MPI.COMM_WORLD.rank == 0:
                print("Objectives")
                if objs:
                    for name, value in iteritems(objs):
                        print("{}: {}".format(name, repr(value)))
                else:
                    print("None")
                print()
示例#25
0
文件: driver.py 项目: samtx/OpenMDAO
    def __init__(self):
        """
        Initialize the driver.
        """
        self._rec_mgr = RecordingManager()
        self._vars_to_record = {
            'desvarnames': set(),
            'responsenames': set(),
            'objectivenames': set(),
            'constraintnames': set(),
            'sysinclnames': set(),
        }

        self._problem = None
        self._designvars = None
        self._cons = None
        self._objs = None
        self._responses = None
        self.options = OptionsDictionary()
        self.recording_options = OptionsDictionary()

        ###########################
        self.options.declare('debug_print', types=list, is_valid=_is_debug_print_opts_valid,
                             desc="List of what type of Driver variables to print at each "
                             "iteration. Valid items in list are 'desvars','ln_cons',"
                             "'nl_cons','objs'",
                             default=[])

        ###########################
        self.recording_options.declare('record_metadata', types=bool, desc='Record metadata',
                                       default=True)
        self.recording_options.declare('record_desvars', types=bool, default=True,
                                       desc='Set to True to record design variables at the \
                                       driver level')
        self.recording_options.declare('record_responses', types=bool, default=False,
                                       desc='Set to True to record responses at the driver level')
        self.recording_options.declare('record_objectives', types=bool, default=True,
                                       desc='Set to True to record objectives at the \
                                       driver level')
        self.recording_options.declare('record_constraints', types=bool, default=True,
                                       desc='Set to True to record constraints at the \
                                       driver level')
        self.recording_options.declare('includes', types=list, default=['*'],
                                       desc='Patterns for variables to include in recording')
        self.recording_options.declare('excludes', types=list, default=[],
                                       desc='Patterns for vars to exclude in recording '
                                       '(processed post-includes)')
        self.recording_options.declare('record_derivatives', types=bool, default=False,
                                       desc='Set to True to record derivatives at the driver \
                                       level')
        ###########################

        # What the driver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('inequality_constraints', types=bool, default=False)
        self.supports.declare('equality_constraints', types=bool, default=False)
        self.supports.declare('linear_constraints', types=bool, default=False)
        self.supports.declare('two_sided_constraints', types=bool, default=False)
        self.supports.declare('multiple_objectives', types=bool, default=False)
        self.supports.declare('integer_design_vars', types=bool, default=False)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('active_set', types=bool, default=False)
        self.supports.declare('simultaneous_derivatives', types=bool, default=False)

        # Debug printing.
        self.debug_print = OptionsDictionary()
        self.debug_print.declare('debug_print', types=bool, default=False,
                                 desc='Overall option to turn on Driver debug printing')
        self.debug_print.declare('debug_print_desvars', types=bool, default=False,
                                 desc='Print design variables')
        self.debug_print.declare('debug_print_nl_con', types=bool, default=False,
                                 desc='Print nonlinear constraints')
        self.debug_print.declare('debug_print_ln_con', types=bool, default=False,
                                 desc='Print linear constraints')
        self.debug_print.declare('debug_print_objective', types=bool, default=False,
                                 desc='Print objectives')

        self.iter_count = 0
        self.metadata = None
        self._model_viewer_data = None
        self.cite = ""

        # TODO, support these in OpenMDAO
        self.supports.declare('integer_design_vars', types=bool, default=False)

        self._simul_coloring_info = None
        self._res_jacs = {}

        self.fail = False
示例#26
0
    def __init__(self, **kwargs):
        """
        Initialize the driver.

        Parameters
        ----------
        **kwargs : dict of keyword arguments
            Keyword arguments that will be mapped into the Driver options.
        """
        self._rec_mgr = RecordingManager()

        self._problem = None
        self._designvars = None
        self._designvars_discrete = []
        self._cons = None
        self._objs = None
        self._responses = None

        # Driver options
        self.options = OptionsDictionary(parent_name=type(self).__name__)

        self.options.declare(
            'debug_print',
            types=list,
            check_valid=_check_debug_print_opts_valid,
            desc="List of what type of Driver variables to print at each "
            "iteration. Valid items in list are 'desvars', 'ln_cons', "
            "'nl_cons', 'objs', 'totals'",
            default=[])

        # Case recording options
        self.recording_options = OptionsDictionary(
            parent_name=type(self).__name__)

        self.recording_options.declare(
            'record_model_metadata',
            types=bool,
            default=True,
            desc='Record metadata for all Systems in the model')
        self.recording_options.declare(
            'record_desvars',
            types=bool,
            default=True,
            desc='Set to True to record design variables at the '
            'driver level')
        self.recording_options.declare(
            'record_responses',
            types=bool,
            default=False,
            desc='Set to True to record responses at the driver level')
        self.recording_options.declare(
            'record_objectives',
            types=bool,
            default=True,
            desc='Set to True to record objectives at the driver level')
        self.recording_options.declare(
            'record_constraints',
            types=bool,
            default=True,
            desc='Set to True to record constraints at the '
            'driver level')
        self.recording_options.declare(
            'includes',
            types=list,
            default=[],
            desc='Patterns for variables to include in recording')
        self.recording_options.declare(
            'excludes',
            types=list,
            default=[],
            desc='Patterns for vars to exclude in recording '
            '(processed post-includes)')
        self.recording_options.declare(
            'record_derivatives',
            types=bool,
            default=False,
            desc='Set to True to record derivatives at the driver '
            'level')
        self.recording_options.declare(
            'record_inputs',
            types=bool,
            default=True,
            desc='Set to True to record inputs at the driver level')

        # What the driver supports.
        self.supports = OptionsDictionary(parent_name=type(self).__name__)
        self.supports.declare('inequality_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('equality_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('linear_constraints', types=bool, default=False)
        self.supports.declare('two_sided_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('multiple_objectives', types=bool, default=False)
        self.supports.declare('integer_design_vars', types=bool, default=False)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('active_set', types=bool, default=False)
        self.supports.declare('simultaneous_derivatives',
                              types=bool,
                              default=False)
        self.supports.declare('total_jac_sparsity', types=bool, default=False)

        self.iter_count = 0
        self.cite = ""

        self._coloring_info = coloring_mod._DEF_COMP_SPARSITY_ARGS.copy()
        self._coloring_info['coloring'] = None
        self._coloring_info['dynamic'] = False
        self._coloring_info['static'] = None

        self._total_jac_sparsity = None
        self._res_jacs = {}
        self._total_jac = None

        self.fail = False

        self._declare_options()
        self.options.update(kwargs)
示例#27
0
    def __init__(self, **kwargs):
        """
        Initialize the driver.

        Parameters
        ----------
        **kwargs : dict of keyword arguments
            Keyword arguments that will be mapped into the Driver options.
        """
        self._rec_mgr = RecordingManager()
        self._vars_to_record = {
            'desvarnames': set(),
            'responsenames': set(),
            'objectivenames': set(),
            'constraintnames': set(),
            'sysinclnames': set(),
        }

        self._problem = None
        self._designvars = None
        self._cons = None
        self._objs = None
        self._responses = None

        # Driver options
        self.options = OptionsDictionary()

        self.options.declare('debug_print', types=list, check_valid=_check_debug_print_opts_valid,
                             desc="List of what type of Driver variables to print at each "
                                  "iteration. Valid items in list are 'desvars', 'ln_cons', "
                                  "'nl_cons', 'objs', 'totals'",
                             default=[])

        # Case recording options
        self.recording_options = OptionsDictionary()

        self.recording_options.declare('record_model_metadata', types=bool, default=True,
                                       desc='Record metadata for all Systems in the model')
        self.recording_options.declare('record_desvars', types=bool, default=True,
                                       desc='Set to True to record design variables at the '
                                            'driver level')
        self.recording_options.declare('record_responses', types=bool, default=False,
                                       desc='Set to True to record responses at the driver level')
        self.recording_options.declare('record_objectives', types=bool, default=True,
                                       desc='Set to True to record objectives at the driver level')
        self.recording_options.declare('record_constraints', types=bool, default=True,
                                       desc='Set to True to record constraints at the '
                                            'driver level')
        self.recording_options.declare('includes', types=list, default=[],
                                       desc='Patterns for variables to include in recording')
        self.recording_options.declare('excludes', types=list, default=[],
                                       desc='Patterns for vars to exclude in recording '
                                            '(processed post-includes)')
        self.recording_options.declare('record_derivatives', types=bool, default=False,
                                       desc='Set to True to record derivatives at the driver '
                                            'level')
        self.recording_options.declare('record_inputs', types=bool, default=True,
                                       desc='Set to True to record inputs at the driver level')

        # What the driver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('inequality_constraints', types=bool, default=False)
        self.supports.declare('equality_constraints', types=bool, default=False)
        self.supports.declare('linear_constraints', types=bool, default=False)
        self.supports.declare('two_sided_constraints', types=bool, default=False)
        self.supports.declare('multiple_objectives', types=bool, default=False)
        self.supports.declare('integer_design_vars', types=bool, default=False)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('active_set', types=bool, default=False)
        self.supports.declare('simultaneous_derivatives', types=bool, default=False)
        self.supports.declare('total_jac_sparsity', types=bool, default=False)

        self.iter_count = 0
        self._model_viewer_data = None
        self.cite = ""

        self._simul_coloring_info = None
        self._total_jac_sparsity = None
        self._res_jacs = {}
        self._total_jac = None

        self.fail = False

        self._declare_options()
        self.options.update(kwargs)
示例#28
0
class Driver(object):
    """
    Top-level container for the systems and drivers.

    Attributes
    ----------
    fail : bool
        Reports whether the driver ran successfully.
    iter_count : int
        Keep track of iterations for case recording.
    options : <OptionsDictionary>
        Dictionary with general pyoptsparse options.
    recording_options : <OptionsDictionary>
        Dictionary with driver recording options.
    cite : str
        Listing of relevant citations that should be referenced when
        publishing work that uses this class.
    _problem : weakref to <Problem>
        Pointer to the containing problem.
    supports : <OptionsDictionary>
        Provides a consistent way for drivers to declare what features they support.
    _designvars : dict
        Contains all design variable info.
    _designvars_discrete : list
        List of design variables that are discrete.
    _dist_driver_vars : dict
        Dict of constraints that are distributed outputs. Key is rank, values are
        (local indices, local sizes).
    _cons : dict
        Contains all constraint info.
    _objs : dict
        Contains all objective info.
    _responses : dict
        Contains all response info.
    _remote_dvs : dict
        Dict of design variables that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_cons : dict
        Dict of constraints that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_objs : dict
        Dict of objectives that are remote on at least one proc. Values are
        (owning rank, size).
    _rec_mgr : <RecordingManager>
        Object that manages all recorders added to this driver.
    _coloring_info : dict
        Metadata pertaining to total coloring.
    _total_jac_sparsity : dict, str, or None
        Specifies sparsity of sub-jacobians of the total jacobian. Only used by pyOptSparseDriver.
    _res_jacs : dict
        Dict of sparse subjacobians for use with certain optimizers, e.g. pyOptSparseDriver.
    _total_jac : _TotalJacInfo or None
        Cached total jacobian handling object.
    """
    def __init__(self, **kwargs):
        """
        Initialize the driver.

        Parameters
        ----------
        **kwargs : dict of keyword arguments
            Keyword arguments that will be mapped into the Driver options.
        """
        self._rec_mgr = RecordingManager()

        self._problem = None
        self._designvars = None
        self._designvars_discrete = []
        self._cons = None
        self._objs = None
        self._responses = None

        # Driver options
        self.options = OptionsDictionary(parent_name=type(self).__name__)

        self.options.declare(
            'debug_print',
            types=list,
            check_valid=_check_debug_print_opts_valid,
            desc="List of what type of Driver variables to print at each "
            "iteration. Valid items in list are 'desvars', 'ln_cons', "
            "'nl_cons', 'objs', 'totals'",
            default=[])

        # Case recording options
        self.recording_options = OptionsDictionary(
            parent_name=type(self).__name__)

        self.recording_options.declare(
            'record_model_metadata',
            types=bool,
            default=True,
            desc='Deprecated. Recording of model metadata will always '
            'be done',
            deprecation="The recording option, record_model_metadata, "
            "on Driver is "
            "deprecated. Recording of model metadata will always "
            "be done",
        )
        self.recording_options.declare(
            'record_desvars',
            types=bool,
            default=True,
            desc='Set to True to record design variables at the '
            'driver level')
        self.recording_options.declare(
            'record_responses',
            types=bool,
            default=False,
            desc='Set True to record constraints and objectives at the '
            'driver level')

        self.recording_options.declare(
            'record_objectives',
            types=bool,
            default=True,
            desc='Set to True to record objectives at the driver level')
        self.recording_options.declare(
            'record_constraints',
            types=bool,
            default=True,
            desc='Set to True to record constraints at the '
            'driver level')
        self.recording_options.declare(
            'includes',
            types=list,
            default=[],
            desc='Patterns for variables to include in recording. '
            'Uses fnmatch wildcards')
        self.recording_options.declare(
            'excludes',
            types=list,
            default=[],
            desc='Patterns for vars to exclude in recording '
            '(processed post-includes). Uses fnmatch wildcards')
        self.recording_options.declare(
            'record_derivatives',
            types=bool,
            default=False,
            desc='Set to True to record derivatives at the driver '
            'level')
        self.recording_options.declare(
            'record_inputs',
            types=bool,
            default=True,
            desc='Set to True to record inputs at the driver level')
        self.recording_options.declare(
            'record_outputs',
            types=bool,
            default=True,
            desc='Set True to record outputs at the '
            'driver level.')
        self.recording_options.declare(
            'record_residuals',
            types=bool,
            default=False,
            desc='Set True to record residuals at the '
            'driver level.')

        # What the driver supports.
        self.supports = OptionsDictionary(parent_name=type(self).__name__)
        self.supports.declare('inequality_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('equality_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('linear_constraints', types=bool, default=False)
        self.supports.declare('two_sided_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('multiple_objectives', types=bool, default=False)
        self.supports.declare('integer_design_vars', types=bool, default=True)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('active_set', types=bool, default=False)
        self.supports.declare('simultaneous_derivatives',
                              types=bool,
                              default=False)
        self.supports.declare('total_jac_sparsity', types=bool, default=False)

        self.iter_count = 0
        self.cite = ""

        self._coloring_info = coloring_mod._get_coloring_meta()

        self._total_jac_sparsity = None
        self._res_jacs = {}
        self._total_jac = None

        self.fail = False

        self._declare_options()
        self.options.update(kwargs)

    @property
    def msginfo(self):
        """
        Return info to prepend to messages.

        Returns
        -------
        str
            Info to prepend to messages.
        """
        return type(self).__name__

    def add_recorder(self, recorder):
        """
        Add a recorder to the driver.

        Parameters
        ----------
        recorder : CaseRecorder
           A recorder instance.
        """
        self._rec_mgr.append(recorder)

    def cleanup(self):
        """
        Clean up resources prior to exit.
        """
        # shut down all recorders
        self._rec_mgr.shutdown()

    def _declare_options(self):
        """
        Declare options before kwargs are processed in the init method.

        This is optionally implemented by subclasses of Driver.
        """
        pass

    def _setup_comm(self, comm):
        """
        Perform any driver-specific setup of communicators for the model.

        Parameters
        ----------
        comm : MPI.Comm or <FakeComm> or None
            The communicator for the Problem.

        Returns
        -------
        MPI.Comm or <FakeComm> or None
            The communicator for the Problem model.
        """
        return comm

    def _setup_driver(self, problem):
        """
        Prepare the driver for execution.

        This is the final thing to run during setup.

        Parameters
        ----------
        problem : <Problem>
            Pointer to the containing problem.
        """
        self._problem = weakref.ref(problem)
        model = problem.model

        self._total_jac = None

        self._has_scaling = (np.any(
            [r['total_scaler'] is not None for r in self._responses.values()])
                             or np.any([
                                 dv['total_scaler'] is not None
                                 for dv in self._designvars.values()
                             ]))

        # Determine if any design variables are discrete.
        self._designvars_discrete = [
            name for name, meta in self._designvars.items()
            if meta['ivc_source'] in model._discrete_outputs
        ]
        if not self.supports['integer_design_vars'] and len(
                self._designvars_discrete) > 0:
            msg = "Discrete design variables are not supported by this driver: "
            msg += '.'.join(self._designvars_discrete)
            raise RuntimeError(msg)

        con_set = set()
        obj_set = set()
        dv_set = set()

        self._remote_dvs = remote_dv_dict = {}
        self._remote_cons = remote_con_dict = {}
        self._dist_driver_vars = dist_dict = {}
        self._remote_objs = remote_obj_dict = {}

        src_design_vars = prom2ivc_src_dict(self._designvars)
        src_cons = prom2ivc_src_dict(self._cons)
        src_objs = prom2ivc_src_dict(self._objs)
        responses = prom2ivc_src_dict(self._responses)

        # Now determine if later we'll need to allgather cons, objs, or desvars.
        if model.comm.size > 1 and model._subsystems_allprocs:
            local_out_vars = set(model._outputs._abs_iter())
            remote_dvs = set(src_design_vars) - local_out_vars
            remote_cons = set(src_cons) - local_out_vars
            remote_objs = set(src_objs) - local_out_vars

            all_remote_vois = model.comm.allgather(
                (remote_dvs, remote_cons, remote_objs))
            for rem_dvs, rem_cons, rem_objs in all_remote_vois:
                con_set.update(rem_cons)
                obj_set.update(rem_objs)
                dv_set.update(rem_dvs)

            # If we have remote VOIs, pick an owning rank for each and use that
            # to bcast to others later
            owning_ranks = model._owning_rank
            sizes = model._var_sizes['nonlinear']['output']
            abs2meta = model._var_allprocs_abs2meta
            rank = model.comm.rank
            nprocs = model.comm.size
            for i, vname in enumerate(model._var_allprocs_abs_names['output']):
                if vname in responses:
                    indices = responses[vname].get('indices')
                elif vname in src_design_vars:
                    indices = src_design_vars[vname].get('indices')
                else:
                    continue

                if abs2meta[vname]['distributed']:

                    idx = model._var_allprocs_abs2idx['nonlinear'][vname]
                    dist_sizes = model._var_sizes['nonlinear']['output'][:,
                                                                         idx]
                    total_dist_size = np.sum(dist_sizes)

                    # Determine which indices are on our proc.
                    offsets = sizes2offsets(dist_sizes)

                    if indices is not None:
                        indices = convert_neg(indices, total_dist_size)
                        true_sizes = np.zeros(nprocs, dtype=INT_DTYPE)
                        for irank in range(nprocs):
                            dist_inds = indices[np.logical_and(
                                indices >= offsets[irank], indices <
                                (offsets[irank] + dist_sizes[irank]))]
                            if irank == rank:
                                local_indices = dist_inds - offsets[rank]
                                distrib_indices = dist_inds

                            true_sizes[irank] = dist_inds.size
                        dist_dict[vname] = (local_indices, true_sizes,
                                            distrib_indices)
                    else:
                        dist_dict[vname] = (_full_slice, dist_sizes,
                                            slice(
                                                offsets[rank], offsets[rank] +
                                                dist_sizes[rank]))

                else:
                    owner = owning_ranks[vname]
                    sz = sizes[owner, i]

                    if vname in dv_set:
                        remote_dv_dict[vname] = (owner, sz)
                    if vname in con_set:
                        remote_con_dict[vname] = (owner, sz)
                    if vname in obj_set:
                        remote_obj_dict[vname] = (owner, sz)

        self._remote_responses = self._remote_cons.copy()
        self._remote_responses.update(self._remote_objs)

        # set up simultaneous deriv coloring
        if coloring_mod._use_total_sparsity:
            # reset the coloring
            if self._coloring_info['dynamic'] or self._coloring_info[
                    'static'] is not None:
                self._coloring_info['coloring'] = None

            coloring = self._get_static_coloring()
            if coloring is not None and self.supports[
                    'simultaneous_derivatives']:
                if model._owns_approx_jac:
                    coloring._check_config_partial(model)
                else:
                    coloring._check_config_total(self)
                self._setup_simul_coloring()

    def _check_for_missing_objective(self):
        """
        Check for missing objective and raise error if no objectives found.
        """
        if len(self._objs) == 0:
            msg = "Driver requires objective to be declared"
            raise RuntimeError(msg)

    def _get_vars_to_record(self, recording_options):
        """
        Get variables to record based on recording options.

        Parameters
        ----------
        recording_options : <OptionsDictionary>
            Dictionary with recording options.

        Returns
        -------
        dict
           Dictionary containing lists of variables to record.
        """
        problem = self._problem()
        model = problem.model

        incl = recording_options['includes']
        excl = recording_options['excludes']

        # includes and excludes for outputs are specified using promoted names
        abs2prom = model._var_allprocs_abs2prom['output']

        # 1. If record_outputs is True, get the set of outputs
        # 2. Filter those using includes and excludes to get the baseline set of variables to record
        # 3. Add or remove from that set any desvars, objs, and cons based on the recording
        #    options of those

        # includes and excludes for outputs are specified using _promoted_ names
        # vectors are keyed on absolute name, discretes on relative/promoted name
        myinputs = myoutputs = myresiduals = []

        if recording_options['record_outputs']:
            myoutputs = sorted([
                n for n, prom in abs2prom.items()
                if check_path(prom, incl, excl)
            ])

            model_outs = model._outputs

            if model._var_discrete['output']:
                # if we have discrete outputs then residual name set doesn't match output one
                if recording_options['record_residuals']:
                    myresiduals = [
                        n for n in myoutputs if model_outs._contains_abs(n)
                    ]
            elif recording_options['record_residuals']:
                myresiduals = myoutputs

        elif recording_options['record_residuals']:
            myresiduals = [
                n for n in model._residuals._abs_iter()
                if check_path(abs2prom[n], incl, excl)
            ]

        myoutputs = set(myoutputs)
        if recording_options['record_desvars']:
            myoutputs.update(self._designvars)
        if recording_options['record_objectives'] or recording_options[
                'record_responses']:
            myoutputs.update(self._objs)
        if recording_options['record_constraints'] or recording_options[
                'record_responses']:
            myoutputs.update(self._cons)

        # inputs (if in options). inputs use _absolute_ names for includes/excludes
        if 'record_inputs' in recording_options:
            if recording_options['record_inputs']:
                # sort the results since _var_allprocs_abs2prom isn't ordered
                myinputs = sorted([
                    n for n in model._var_allprocs_abs2prom['input']
                    if check_path(n, incl, excl)
                ])

        vars2record = {
            'input': myinputs,
            'output': list(myoutputs),
            'residual': myresiduals
        }

        return vars2record

    def _setup_recording(self):
        """
        Set up case recording.
        """
        self._filtered_vars_to_record = self._get_vars_to_record(
            self.recording_options)

        self._rec_mgr.startup(self)

    def _get_voi_val(self,
                     name,
                     meta,
                     remote_vois,
                     driver_scaling=True,
                     rank=None):
        """
        Get the value of a variable of interest (objective, constraint, or design var).

        This will retrieve the value if the VOI is remote.

        Parameters
        ----------
        name : str
            Name of the variable of interest.
        meta : dict
            Metadata for the variable of interest.
        remote_vois : dict
            Dict containing (owning_rank, size) for all remote vois of a particular
            type (design var, constraint, or objective).
        driver_scaling : bool
            When True, return values that are scaled according to either the adder and scaler or
            the ref and ref0 values that were specified when add_design_var, add_objective, and
            add_constraint were called on the model. Default is True.
        rank : int or None
            If not None, gather value to this rank only.

        Returns
        -------
        float or ndarray
            The value of the named variable of interest.
        """
        model = self._problem().model
        comm = model.comm
        get = model._outputs._abs_get_val
        distributed_vars = self._dist_driver_vars
        indices = meta['indices']

        if meta.get('ivc_source') is not None:
            src_name = meta['ivc_source']
        else:
            src_name = name

        if MPI:
            distributed = comm.size > 0 and src_name in distributed_vars
        else:
            distributed = False

        if src_name in remote_vois:
            owner, size = remote_vois[src_name]
            # if var is distributed or only gathering to one rank
            # TODO - support distributed var under a parallel group.
            if owner is None or rank is not None:
                val = model.get_val(src_name,
                                    get_remote=True,
                                    rank=rank,
                                    flat=True)
                if indices is not None:
                    val = val[indices]
            else:
                if owner == comm.rank:
                    if indices is None:
                        val = get(name).copy()
                    else:
                        val = get(name)[indices]
                else:
                    if indices is not None:
                        size = len(indices)
                    val = np.empty(size)

                comm.Bcast(val, root=owner)

        elif distributed:
            local_val = model.get_val(src_name, flat=True)
            local_indices, sizes, _ = distributed_vars[src_name]
            if local_indices is not None:
                local_val = local_val[local_indices]
            offsets = np.zeros(sizes.size, dtype=INT_DTYPE)
            offsets[1:] = np.cumsum(sizes[:-1])
            val = np.zeros(np.sum(sizes))
            comm.Allgatherv(local_val, [val, sizes, offsets, MPI.DOUBLE])

        else:
            if name in self._designvars_discrete:
                val = model._discrete_outputs[src_name]

                # At present, only integers are supported by OpenMDAO drivers.
                # We check the values here.
                msg = "Only integer scalars or ndarrays are supported as values for " + \
                      "discrete variables when used as a design variable. "
                if np.isscalar(val) and not isinstance(val, (int, np.integer)):
                    msg += "A value of type '{}' was specified.".format(
                        val.__class__.__name__)
                    raise ValueError(msg)
                elif isinstance(val, np.ndarray) and not np.issubdtype(
                        val[0], np.integer):
                    msg += "An array of type '{}' was specified.".format(
                        val[0].__class__.__name__)
                    raise ValueError(msg)

            elif indices is None:
                val = get(src_name).copy()
            else:
                val = get(src_name)[indices]

        if self._has_scaling and driver_scaling:
            # Scale design variable values
            adder = meta['total_adder']
            if adder is not None:
                val += adder

            scaler = meta['total_scaler']
            if scaler is not None:
                val *= scaler

        return val

    def get_design_var_values(self):
        """
        Return the design variable values.

        Returns
        -------
        dict
           Dictionary containing values of each design variable.
        """
        return {
            n: self._get_voi_val(n, dv, self._remote_dvs)
            for n, dv in self._designvars.items()
        }

    def set_design_var(self, name, value):
        """
        Set the value of a design variable.

        Parameters
        ----------
        name : str
            Global pathname of the design variable.
        value : float or ndarray
            Value for the design variable.
        """
        problem = self._problem()
        meta = self._designvars[name]

        src_name = meta['ivc_source']

        # if the value is not local, don't set the value
        if (src_name in self._remote_dvs
                and problem.model._owning_rank[src_name] != problem.comm.rank):
            return

        indices = meta['indices']
        if indices is None:
            indices = _full_slice

        if name in self._designvars_discrete:

            # Note, drivers set values here and generally should know it is setting an integer.
            # However, the DOEdriver may pull a non-integer value from its generator, so we
            # convert it.
            if isinstance(value, float):
                value = int(value)
            elif isinstance(value, np.ndarray):
                if isinstance(problem.model._discrete_outputs[src_name], int):
                    # Setting an integer value with a 1D array - don't want to convert to array.
                    value = int(value)
                else:
                    value = value.astype(np.int)

            problem.model._discrete_outputs[src_name] = value

        elif problem.model._outputs._contains_abs(src_name):
            desvar = problem.model._outputs._abs_get_val(src_name)
            if src_name in self._dist_driver_vars:
                loc_idxs, _, dist_idxs = self._dist_driver_vars[src_name]
            else:
                loc_idxs = indices
                dist_idxs = _full_slice

            desvar[loc_idxs] = np.atleast_1d(value)[dist_idxs]

            # Undo driver scaling when setting design var values into model.
            if self._has_scaling:
                scaler = meta['total_scaler']
                if scaler is not None:
                    desvar[loc_idxs] *= 1.0 / scaler

                adder = meta['total_adder']
                if adder is not None:
                    desvar[loc_idxs] -= adder

    def get_objective_values(self, driver_scaling=True):
        """
        Return objective values.

        Parameters
        ----------
        driver_scaling : bool
            When True, return values that are scaled according to either the adder and scaler or
            the ref and ref0 values that were specified when add_design_var, add_objective, and
            add_constraint were called on the model. Default is True.

        Returns
        -------
        dict
           Dictionary containing values of each objective.
        """
        return {
            n: self._get_voi_val(n,
                                 obj,
                                 self._remote_objs,
                                 driver_scaling=driver_scaling)
            for n, obj in self._objs.items()
        }

    def get_constraint_values(self,
                              ctype='all',
                              lintype='all',
                              driver_scaling=True):
        """
        Return constraint values.

        Parameters
        ----------
        ctype : string
            Default is 'all'. Optionally return just the inequality constraints
            with 'ineq' or the equality constraints with 'eq'.
        lintype : string
            Default is 'all'. Optionally return just the linear constraints
            with 'linear' or the nonlinear constraints with 'nonlinear'.
        driver_scaling : bool
            When True, return values that are scaled according to either the adder and scaler or
            the ref and ref0 values that were specified when add_design_var, add_objective, and
            add_constraint were called on the model. Default is True.

        Returns
        -------
        dict
           Dictionary containing values of each constraint.
        """
        con_dict = {}
        for name, meta in self._cons.items():
            if lintype == 'linear' and not meta['linear']:
                continue

            if lintype == 'nonlinear' and meta['linear']:
                continue

            if ctype == 'eq' and meta['equals'] is None:
                continue

            if ctype == 'ineq' and meta['equals'] is not None:
                continue

            con_dict[name] = self._get_voi_val(name,
                                               meta,
                                               self._remote_cons,
                                               driver_scaling=driver_scaling)

        return con_dict

    def _get_ordered_nl_responses(self):
        """
        Return the names of nonlinear responses in the order used by the driver.

        Default order is objectives followed by nonlinear constraints.  This is used for
        simultaneous derivative coloring and sparsity determination.

        Returns
        -------
        list of str
            The nonlinear response names in order.
        """
        order = list(self._objs)
        order.extend(n for n, meta in self._cons.items()
                     if not ('linear' in meta and meta['linear']))
        return order

    def _update_voi_meta(self, model):
        """
        Collect response and design var metadata from the model and size desvars and responses.

        Parameters
        ----------
        model : System
            The System that represents the entire model.

        Returns
        -------
        int
            Total size of responses, with linear constraints excluded.
        int
            Total size of design vars.
        """
        self._objs = objs = OrderedDict()
        self._cons = cons = OrderedDict()

        model._setup_driver_units()

        self._responses = resps = model.get_responses(recurse=True,
                                                      use_prom_ivc=True)
        for name, data in resps.items():
            if data['type'] == 'con':
                cons[name] = data
            else:
                objs[name] = data

        response_size = sum(resps[n]['size']
                            for n in self._get_ordered_nl_responses())

        # Gather up the information for design vars.
        self._designvars = designvars = model.get_design_vars(
            recurse=True, use_prom_ivc=True)
        desvar_size = sum(data['size'] for data in designvars.values())

        return response_size, desvar_size

    def run(self):
        """
        Execute this driver.

        The base `Driver` just runs the model. All other drivers overload
        this method.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        with RecordingDebugging(self._get_name(), self.iter_count, self):
            self._problem().model.run_solve_nonlinear()

        self.iter_count += 1
        return False

    @property
    def _recording_iter(self):
        return self._problem()._metadata['recording_iter']

    def _compute_totals(self,
                        of=None,
                        wrt=None,
                        return_format='flat_dict',
                        global_names=None,
                        use_abs_names=True):
        """
        Compute derivatives of desired quantities with respect to desired inputs.

        All derivatives are returned using driver scaling.

        Parameters
        ----------
        of : list of variable name strings or None
            Variables whose derivatives will be computed. Default is None, which
            uses the driver's objectives and constraints.
        wrt : list of variable name strings or None
            Variables with respect to which the derivatives will be computed.
            Default is None, which uses the driver's desvars.
        return_format : string
            Format to return the derivatives. Default is a 'flat_dict', which
            returns them in a dictionary whose keys are tuples of form (of, wrt). For
            the scipy optimizer, 'array' is also supported.
        global_names : bool
            Deprecated.  Use 'use_abs_names' instead.
        use_abs_names : bool
            Set to True when passing in absolute names to skip some translation steps.

        Returns
        -------
        derivs : object
            Derivatives in form requested by 'return_format'.
        """
        problem = self._problem()
        total_jac = self._total_jac
        debug_print = 'totals' in self.options['debug_print'] and (
            not MPI or problem.comm.rank == 0)

        if debug_print:
            header = 'Driver total derivatives for iteration: ' + str(
                self.iter_count)
            print(header)
            print(len(header) * '-' + '\n')

        if global_names is not None:
            warn_deprecation(
                "'global_names' is deprecated in calls to _compute_totals. "
                "Use 'use_abs_names' instead.")
            use_abs_names = global_names

        if problem.model._owns_approx_jac:
            self._recording_iter.push(('_compute_totals_approx', 0))

            try:
                if total_jac is None:
                    total_jac = _TotalJacInfo(problem,
                                              of,
                                              wrt,
                                              use_abs_names,
                                              return_format,
                                              approx=True,
                                              debug_print=debug_print)

                    # Don't cache linear constraint jacobian
                    if not total_jac.has_lin_cons:
                        self._total_jac = total_jac

                    totals = total_jac.compute_totals_approx(initialize=True)
                else:
                    totals = total_jac.compute_totals_approx()
            finally:
                self._recording_iter.pop()

        else:
            if total_jac is None:
                total_jac = _TotalJacInfo(problem,
                                          of,
                                          wrt,
                                          use_abs_names,
                                          return_format,
                                          debug_print=debug_print)

                # don't cache linear constraint jacobian
                if not total_jac.has_lin_cons:
                    self._total_jac = total_jac

            self._recording_iter.push(('_compute_totals', 0))

            try:
                totals = total_jac.compute_totals()
            finally:
                self._recording_iter.pop()

        if self._rec_mgr._recorders and self.recording_options[
                'record_derivatives']:
            metadata = create_local_meta(self._get_name())
            total_jac.record_derivatives(self, metadata)

        return totals

    def record_iteration(self):
        """
        Record an iteration of the current Driver.
        """
        record_iteration(self, self._problem(), self._get_name())

    def _get_recorder_metadata(self, case_name):
        """
        Return metadata from the latest iteration for use in the recorder.

        Parameters
        ----------
        case_name : str
            Name of current case.

        Returns
        -------
        dict
            Metadata dictionary for the recorder.
        """
        return create_local_meta(case_name)

    def _get_name(self):
        """
        Get name of current Driver.

        Returns
        -------
        str
            Name of current Driver.
        """
        return "Driver"

    def declare_coloring(
            self,
            num_full_jacs=coloring_mod.
        _DEF_COMP_SPARSITY_ARGS['num_full_jacs'],
            tol=coloring_mod._DEF_COMP_SPARSITY_ARGS['tol'],
            orders=coloring_mod._DEF_COMP_SPARSITY_ARGS['orders'],
            perturb_size=coloring_mod._DEF_COMP_SPARSITY_ARGS['perturb_size'],
            min_improve_pct=coloring_mod.
        _DEF_COMP_SPARSITY_ARGS['min_improve_pct'],
            show_summary=coloring_mod._DEF_COMP_SPARSITY_ARGS['show_summary'],
            show_sparsity=coloring_mod._DEF_COMP_SPARSITY_ARGS['show_sparsity']
    ):
        """
        Set options for total deriv coloring.

        Parameters
        ----------
        num_full_jacs : int
            Number of times to repeat partial jacobian computation when computing sparsity.
        tol : float
            Tolerance used to determine if an array entry is nonzero during sparsity determination.
        orders : int
            Number of orders above and below the tolerance to check during the tolerance sweep.
        perturb_size : float
            Size of input/output perturbation during generation of sparsity.
        min_improve_pct : float
            If coloring does not improve (decrease) the number of solves more than the given
            percentage, coloring will not be used.
        show_summary : bool
            If True, display summary information after generating coloring.
        show_sparsity : bool
            If True, display sparsity with coloring info after generating coloring.
        """
        self._coloring_info['num_full_jacs'] = num_full_jacs
        self._coloring_info['tol'] = tol
        self._coloring_info['orders'] = orders
        self._coloring_info['perturb_size'] = perturb_size
        self._coloring_info['min_improve_pct'] = min_improve_pct
        if self._coloring_info['static'] is None:
            self._coloring_info['dynamic'] = True
        else:
            self._coloring_info['dynamic'] = False
        self._coloring_info['coloring'] = None
        self._coloring_info['show_summary'] = show_summary
        self._coloring_info['show_sparsity'] = show_sparsity

    def use_fixed_coloring(self, coloring=coloring_mod._STD_COLORING_FNAME):
        """
        Tell the driver to use a precomputed coloring.

        Parameters
        ----------
        coloring : str
            A coloring filename.  If no arg is passed, filename will be determined
            automatically.

        """
        if self.supports['simultaneous_derivatives']:
            if coloring_mod._force_dyn_coloring and coloring is coloring_mod._STD_COLORING_FNAME:
                # force the generation of a dynamic coloring this time
                self._coloring_info['dynamic'] = True
                self._coloring_info['static'] = None
            else:
                self._coloring_info['static'] = coloring
                self._coloring_info['dynamic'] = False

            self._coloring_info['coloring'] = None
        else:
            raise RuntimeError(
                "Driver '%s' does not support simultaneous derivatives." %
                self._get_name())

    def _setup_tot_jac_sparsity(self, coloring=None):
        """
        Set up total jacobian subjac sparsity.

        Drivers that can use subjac sparsity should override this.

        Parameters
        ----------
        coloring : Coloring or None
            Current coloring.
        """
        pass

    def _get_static_coloring(self):
        """
        Get the Coloring for this driver.

        If necessary, load the Coloring from a file.

        Returns
        -------
        Coloring or None
            The pre-existing or loaded Coloring, or None
        """
        info = self._coloring_info
        static = info['static']

        if isinstance(static, coloring_mod.Coloring):
            coloring = static
            info['coloring'] = coloring
        else:
            coloring = info['coloring']

        if coloring is not None:
            return coloring

        if static is coloring_mod._STD_COLORING_FNAME or isinstance(
                static, str):
            if static is coloring_mod._STD_COLORING_FNAME:
                fname = self._get_total_coloring_fname()
            else:
                fname = static
            print("loading total coloring from file %s" % fname)
            coloring = info['coloring'] = coloring_mod.Coloring.load(fname)
            info.update(coloring._meta)
            return coloring

    def _get_total_coloring_fname(self):
        return os.path.join(self._problem().options['coloring_dir'],
                            'total_coloring.pkl')

    def _setup_simul_coloring(self):
        """
        Set up metadata for coloring of total derivative solution.

        If set_coloring was called with a filename, load the coloring file.
        """
        # command line simul_coloring uses this env var to turn pre-existing coloring off
        if not coloring_mod._use_total_sparsity:
            return

        problem = self._problem()
        if not problem.model._use_derivatives:
            simple_warning(
                "Derivatives are turned off.  Skipping simul deriv coloring.")
            return

        total_coloring = self._get_static_coloring()

        if total_coloring._rev and problem._orig_mode not in ('rev', 'auto'):
            revcol = total_coloring._rev[0][0]
            if revcol:
                raise RuntimeError(
                    "Simultaneous coloring does reverse solves but mode has "
                    "been set to '%s'" % problem._orig_mode)
        if total_coloring._fwd and problem._orig_mode not in ('fwd', 'auto'):
            fwdcol = total_coloring._fwd[0][0]
            if fwdcol:
                raise RuntimeError(
                    "Simultaneous coloring does forward solves but mode has "
                    "been set to '%s'" % problem._orig_mode)

    def _pre_run_model_debug_print(self):
        """
        Optionally print some debugging information before the model runs.
        """
        debug_opt = self.options['debug_print']
        rank = self._problem().comm.rank
        if not debug_opt or debug_opt == ['totals']:
            return

        if not MPI or rank == 0:
            header = 'Driver debug print for iter coord: {}'.format(
                self._recording_iter.get_formatted_iteration_coordinate())
            print(header)
            print(len(header) * '-')

        if 'desvars' in debug_opt:
            model = self._problem().model
            desvar_vals = {
                n: model.get_val(n, get_remote=True, rank=0)
                for n in self._designvars
            }
            if not MPI or rank == 0:
                print("Design Vars")
                if desvar_vals:
                    pprint.pprint(desvar_vals)
                else:
                    print("None")
                print()

        sys.stdout.flush()

    def _post_run_model_debug_print(self):
        """
        Optionally print some debugging information after the model runs.
        """
        rank = self._problem().comm.rank

        if 'nl_cons' in self.options['debug_print']:
            cons = self.get_constraint_values(lintype='nonlinear',
                                              driver_scaling=False)
            if not MPI or rank == 0:
                print("Nonlinear constraints")
                if cons:
                    pprint.pprint(cons)
                else:
                    print("None")
                print()

        if 'ln_cons' in self.options['debug_print']:
            cons = self.get_constraint_values(lintype='linear',
                                              driver_scaling=False)
            if not MPI or rank == 0:
                print("Linear constraints")
                if cons:
                    pprint.pprint(cons)
                else:
                    print("None")
                print()

        if 'objs' in self.options['debug_print']:
            objs = self.get_objective_values(driver_scaling=False)
            if not MPI or rank == 0:
                print("Objectives")
                if objs:
                    pprint.pprint(objs)
                else:
                    print("None")
                print()

        sys.stdout.flush()
示例#29
0
class SolverBase(object):
    """ Common base class for Linear and Nonlinear solver. Should not be used
    by users. Always inherit from `LinearSolver` or `NonlinearSolver`."""

    def __init__(self):
        self.iter_count = 0
        self.options = OptionsDictionary()
        desc = 'Set to 0 to disable printing, set to 1 to print the ' \
               'residual to stdout each iteration, set to 2 to print ' \
               'subiteration residuals as well.'
        self.options.add_option('iprint', 0, values=[0, 1, 2], desc=desc)
        self.options.add_option('err_on_maxiter', False,
            desc='If True, raise an AnalysisError if not converged at maxiter.')
        self.recorders = RecordingManager()
        self.local_meta = None

    def setup(self, sub):
        """ Solvers override to define post-setup initiailzation.

        Args
        ----
        sub: `System`
            System that owns this solver.
        """
        pass

    def cleanup(self):
        """ Clean up resources prior to exit. """
        self.recorders.close()

    def print_norm(self, solver_string, pathname, iteration, res, res0,
                   msg=None, indent=0, solver='NL', u_norm=None):
        """ Prints out the norm of the residual in a neat readable format.

        Args
        ----
        solver_string: string
            Unique string to identify your solver type (e.g., 'LN_GS' or
            'NEWTON').

        pathname: dict
            Parent system pathname.

        iteration: int
            Current iteration number

        res: float
            Norm of the absolute residual value.

        res0: float
            Norm of the baseline initial residual for relative comparison.

        msg: string, optional
            Message that indicates convergence.

        ident: int, optional
            Additional indentation levels for subiterations.

        solver: string, optional
            Solver type if not LN or NL (mostly for line search operations.)

        u_norm: float, optional
            Norm of the u vector, if applicable.
        """
        if pathname=='':
            name = 'root'
        else:
            name = 'root.' + pathname

        # Find indentation level
        level = pathname.count('.')
        # No indentation for driver; top solver is no indentation.
        level = level + indent

        indent = '   ' * level
        if msg is not None:
            form = indent + '[%s] %s: %s   %d | %s'

            if u_norm:
                form += ' (%s)' % u_norm

            print(form % (name, solver, solver_string, iteration, msg))
            return

        form = indent + '[%s] %s: %s   %d | %.9g %.9g'

        if u_norm:
            form += ' (%s)' % u_norm

        print(form % (name, solver, solver_string, iteration, res, res/res0))

    def print_all_convergence(self):
        """ Turns on iprint for this solver and all subsolvers. Override if
        your solver has subsolvers."""
        self.options['iprint'] = 1

    def generate_docstring(self):
        """
        Generates a numpy-style docstring for a user-created System class.

        Returns
        -------
        docstring : str
                string that contains a basic numpy docstring.

        """
        #start the docstring off
        docstrings = ['    \"\"\"']

        #Put options into docstring
        firstTime = 1

        for key, value in sorted(vars(self).items()):
            if type(value)==OptionsDictionary:
                if firstTime:  #start of Options docstring
                    docstrings.extend(['','    Options','    -------'])
                    firstTime = 0
                docstrings.append(value._generate_docstring(key))

        #finish up docstring
        docstrings.extend(['    \"\"\"',''])
        return '\n'.join(docstrings)
class ExperimentalDriver(object):
    """
    A fake driver class used for doc generation testing.

    Attributes
    ----------
    fail : bool
        Reports whether the driver ran successfully.
    iter_count : int
        Keep track of iterations for case recording.
    options : list
        List of options
    options : <OptionsDictionary>
        Dictionary with general pyoptsparse options.
    recording_options : <OptionsDictionary>
        Dictionary with driver recording options.
    cite : str
        Listing of relevant citations that should be referenced when
        publishing work that uses this class.
    _problem : <Problem>
        Pointer to the containing problem.
    supports : <OptionsDictionary>
        Provides a consistant way for drivers to declare what features they support.
    _designvars : dict
        Contains all design variable info.
    _cons : dict
        Contains all constraint info.
    _objs : dict
        Contains all objective info.
    _responses : dict
        Contains all response info.
    _rec_mgr : <RecordingManager>
        Object that manages all recorders added to this driver.
    _vars_to_record : dict
        Dict of lists of var names indicating what to record
    _model_viewer_data : dict
        Structure of model, used to make n2 diagram.
    _remote_dvs : dict
        Dict of design variables that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_cons : dict
        Dict of constraints that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_objs : dict
        Dict of objectives that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_responses : dict
        A combined dict containing entries from _remote_cons and _remote_objs.
    _total_coloring : tuple of dicts
        A data structure describing coloring for simultaneous derivs.
    _res_jacs : dict
        Dict of sparse subjacobians for use with certain optimizers, e.g. pyOptSparseDriver.
    """

    def __init__(self):
        """
        Initialize the driver.
        """
        self._rec_mgr = RecordingManager()
        self._vars_to_record = {
            'desvarnames': set(),
            'responsenames': set(),
            'objectivenames': set(),
            'constraintnames': set(),
            'sysinclnames': set(),
        }

        self._problem = None
        self._designvars = None
        self._cons = None
        self._objs = None
        self._responses = None
        self.options = OptionsDictionary()
        self.recording_options = OptionsDictionary()

        ###########################
        self.recording_options.declare('record_desvars', types=bool, default=True,
                                       desc='Set to True to record design variables at the \
                                       driver level')
        self.recording_options.declare('record_responses', types=bool, default=False,
                                       desc='Set to True to record responses at the driver level')
        self.recording_options.declare('record_objectives', types=bool, default=True,
                                       desc='Set to True to record objectives at the \
                                       driver level')
        self.recording_options.declare('record_constraints', types=bool, default=True,
                                       desc='Set to True to record constraints at the \
                                       driver level')
        self.recording_options.declare('includes', types=list, default=[],
                                       desc='Patterns for variables to include in recording. \
                                       Uses fnmatch wildcards')
        self.recording_options.declare('excludes', types=list, default=[],
                                       desc='Patterns for vars to exclude in recording '
                                       '(processed post-includes). Uses fnmatch wildcards')
        self.recording_options.declare('record_derivatives', types=bool, default=False,
                                       desc='Set to True to record derivatives at the driver \
                                       level')
        ###########################

        # What the driver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('inequality_constraints', types=bool, default=False)
        self.supports.declare('equality_constraints', types=bool, default=False)
        self.supports.declare('linear_constraints', types=bool, default=False)
        self.supports.declare('two_sided_constraints', types=bool, default=False)
        self.supports.declare('multiple_objectives', types=bool, default=False)
        self.supports.declare('integer_design_vars', types=bool, default=False)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('active_set', types=bool, default=False)
        self.supports.declare('simultaneous_derivatives', types=bool, default=False)
        self.supports.declare('distributed_design_vars', types=bool, default=False)

        self.iter_count = 0
        self.options = None
        self._model_viewer_data = None
        self.cite = ""

        # TODO, support these in OpenMDAO
        self.supports.declare('integer_design_vars', types=bool, default=False)

        self._res_jacs = {}

        self.fail = False

    def add_recorder(self, recorder):
        """
        Add a recorder to the driver.

        Parameters
        ----------
        recorder : CaseRecorder
           A recorder instance.
        """
        self._rec_mgr.append(recorder)

    def cleanup(self):
        """
        Clean up resources prior to exit.
        """
        self._rec_mgr.close()

    def _setup_driver(self, problem):
        """
        Prepare the driver for execution.

        This is the final thing to run during setup.

        Parameters
        ----------
        problem : <Problem>
            Pointer to the containing problem.
        """
        pass

    def _get_voi_val(self, name, meta, remote_vois):
        """
        Get the value of a variable of interest (objective, constraint, or design var).

        This will retrieve the value if the VOI is remote.

        Parameters
        ----------
        name : str
            Name of the variable of interest.
        meta : dict
            Metadata for the variable of interest.
        remote_vois : dict
            Dict containing (owning_rank, size) for all remote vois of a particular
            type (design var, constraint, or objective).

        Returns
        -------
        float or ndarray
            The value of the named variable of interest.
        """
        model = self._problem.model
        comm = model.comm
        vec = model._outputs._views_flat
        indices = meta['indices']

        if name in remote_vois:
            owner, size = remote_vois[name]
            if owner == comm.rank:
                if indices is None:
                    val = vec[name].copy()
                else:
                    val = vec[name][indices]
            else:
                if indices is not None:
                    size = indices.indexed_src_size
                val = np.empty(size)
            comm.Bcast(val, root=owner)
        else:
            if indices is None:
                val = vec[name].copy()
            else:
                val = vec[name][indices]

        if self._has_scaling:
            # Scale design variable values
            adder = meta['adder']
            if adder is not None:
                val += adder

            scaler = meta['scaler']
            if scaler is not None:
                val *= scaler

        return val

    def get_design_var_values(self, filter=None):
        """
        Return the design variable values.

        This is called to gather the initial design variable state.

        Parameters
        ----------
        filter : list
            List of desvar names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each design variable.
        """
        if filter:
            dvs = filter
        else:
            # use all the designvars
            dvs = self._designvars

        return {n: self._get_voi_val(n, self._designvars[n], self._remote_dvs) for n in dvs}

    def set_design_var(self, name, value):
        """
        Set the value of a design variable.

        Parameters
        ----------
        name : str
            Global pathname of the design variable.
        value : float or ndarray
            Value for the design variable.
        """
        if (name in self._remote_dvs and
                self._problem.model._owning_rank['output'][name] != self._problem.comm.rank):
            return

        meta = self._designvars[name]
        indices = meta['indices']
        if indices is None:
            indices = slice(None)

        desvar = self._problem.model._outputs._views_flat[name]
        desvar[indices] = value

        if self._has_scaling:
            # Scale design variable values
            scaler = meta['scaler']
            if scaler is not None:
                desvar[indices] *= 1.0 / scaler

            adder = meta['adder']
            if adder is not None:
                desvar[indices] -= adder

    def get_response_values(self, filter=None):
        """
        Return response values.

        Parameters
        ----------
        filter : list
            List of response names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each response.
        """
        if filter:
            resps = filter
        else:
            resps = self._responses

        return {n: self._get_voi_val(n, self._responses[n], self._remote_objs) for n in resps}

    def get_objective_values(self, filter=None):
        """
        Return objective values.

        Parameters
        ----------
        filter : list
            List of objective names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each objective.
        """
        if filter:
            objs = filter
        else:
            objs = self._objs

        return {n: self._get_voi_val(n, self._objs[n], self._remote_objs) for n in objs}

    def get_constraint_values(self, ctype='all', lintype='all', filter=None):
        """
        Return constraint values.

        Parameters
        ----------
        ctype : str
            Default is 'all'. Optionally return just the inequality constraints
            with 'ineq' or the equality constraints with 'eq'.
        lintype : str
            Default is 'all'. Optionally return just the linear constraints
            with 'linear' or the nonlinear constraints with 'nonlinear'.
        filter : list
            List of constraint names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each constraint.
        """
        if filter is not None:
            cons = filter
        else:
            cons = self._cons

        con_dict = {}
        for name in cons:
            meta = self._cons[name]

            if lintype == 'linear' and not meta['linear']:
                continue

            if lintype == 'nonlinear' and meta['linear']:
                continue

            if ctype == 'eq' and meta['equals'] is None:
                continue

            if ctype == 'ineq' and meta['equals'] is not None:
                continue

            con_dict[name] = self._get_voi_val(name, meta, self._remote_cons)

        return con_dict

    def run(self):
        """
        Execute this driver.

        The base `Driver` just runs the model. All other drivers overload
        this method.

        Returns
        -------
        bool
            Failure flag; True if failed to converge, False is successful.
        """
        with Recording(self._get_name(), self.iter_count, self) as rec:
            self._problem.model.run_solve_nonlinear()

        self.iter_count += 1
        return False

    def _dict2array_jac(self, derivs):
        osize = 0
        isize = 0
        do_wrt = True
        islices = {}
        oslices = {}
        for okey, oval in derivs.items():
            if do_wrt:
                for ikey, val in oval.items():
                    istart = isize
                    isize += val.shape[1]
                    islices[ikey] = slice(istart, isize)
                do_wrt = False
            ostart = osize
            osize += oval[ikey].shape[0]
            oslices[okey] = slice(ostart, osize)

        new_derivs = np.zeros((osize, isize))

        relevant = self._problem.model._relevant

        for okey, odict in derivs.items():
            for ikey, val in odict.items():
                if okey in relevant[ikey] or ikey in relevant[okey]:
                    new_derivs[oslices[okey], islices[ikey]] = val

        return new_derivs

    def _compute_totals(self, of=None, wrt=None, return_format='flat_dict', use_abs_names=True):
        """
        Compute derivatives of desired quantities with respect to desired inputs.

        All derivatives are returned using driver scaling.

        Parameters
        ----------
        of : list of variable name str or None
            Variables whose derivatives will be computed. Default is None, which
            uses the driver's objectives and constraints.
        wrt : list of variable name str or None
            Variables with respect to which the derivatives will be computed.
            Default is None, which uses the driver's desvars.
        return_format : str
            Format to return the derivatives. Default is a 'flat_dict', which
            returns them in a dictionary whose keys are tuples of form (of, wrt). For
            the scipy optimizer, 'array' is also supported.
        use_abs_names : bool
            Set to True when passing in global names to skip some translation steps.

        Returns
        -------
        derivs : object
            Derivatives in form requested by 'return_format'.
        """
        prob = self._problem

        # Compute the derivatives in dict format...
        if prob.model._owns_approx_jac:
            derivs = prob._compute_totals_approx(of=of, wrt=wrt, return_format='dict',
                                                 use_abs_names=use_abs_names)
        else:
            derivs = prob._compute_totals(of=of, wrt=wrt, return_format='dict',
                                          use_abs_names=use_abs_names)

        # ... then convert to whatever the driver needs.
        if return_format in ('dict', 'array'):
            if self._has_scaling:
                for okey, odict in derivs.items():
                    for ikey, val in odict.items():

                        iscaler = self._designvars[ikey]['scaler']
                        oscaler = self._responses[okey]['scaler']

                        # Scale response side
                        if oscaler is not None:
                            val[:] = (oscaler * val.T).T

                        # Scale design var side
                        if iscaler is not None:
                            val *= 1.0 / iscaler
        else:
            raise RuntimeError("Derivative scaling by the driver only supports the 'dict' and "
                               "'array' formats at present.")

        if return_format == 'array':
            derivs = self._dict2array_jac(derivs)

        return derivs

    def record_iteration(self):
        """
        Record an iteration of the current Driver.
        """
        if not self._rec_mgr._recorders:
            return

        metadata = create_local_meta(self._get_name())

        # Get the data to record
        data = {}
        if self.recording_options['record_desvars']:
            # collective call that gets across all ranks
            desvars = self.get_design_var_values()
        else:
            desvars = {}

        if self.recording_options['record_responses']:
            # responses = self.get_response_values() # not really working yet
            responses = {}
        else:
            responses = {}

        if self.recording_options['record_objectives']:
            objectives = self.get_objective_values()
        else:
            objectives = {}

        if self.recording_options['record_constraints']:
            constraints = self.get_constraint_values()
        else:
            constraints = {}

        desvars = {name: desvars[name]
                   for name in self._filtered_vars_to_record['des']}
        # responses not working yet
        # responses = {name: responses[name] for name in self._filtered_vars_to_record['res']}
        objectives = {name: objectives[name]
                      for name in self._filtered_vars_to_record['obj']}
        constraints = {name: constraints[name]
                       for name in self._filtered_vars_to_record['con']}

        if self.recording_options['includes']:
            root = self._problem.model
            outputs = root._outputs
            # outputsinputs, outputs, residuals = root.get_nonlinear_vectors()
            sysvars = {}
            for name, value in outputs._names.items():
                if name in self._filtered_vars_to_record['sys']:
                    sysvars[name] = value
        else:
            sysvars = {}

        if MPI:
            root = self._problem.model
            desvars = self._gather_vars(root, desvars)
            responses = self._gather_vars(root, responses)
            objectives = self._gather_vars(root, objectives)
            constraints = self._gather_vars(root, constraints)
            sysvars = self._gather_vars(root, sysvars)

        data['des'] = desvars
        data['res'] = responses
        data['obj'] = objectives
        data['con'] = constraints
        data['sys'] = sysvars

        self._rec_mgr.record_iteration(self, data, metadata)

    def _gather_vars(self, root, local_vars):
        """
        Gather and return only variables listed in `local_vars` from the `root` System.

        Parameters
        ----------
        root : <System>
            the root System for the Problem
        local_vars : dict
            local variable names and values

        Returns
        -------
        dct : dict
            variable names and values.
        """
        # if trace:
        #     debug("gathering vars for recording in %s" % root.pathname)
        all_vars = root.comm.gather(local_vars, root=0)
        # if trace:
        #     debug("DONE gathering rec vars for %s" % root.pathname)

        if root.comm.rank == 0:
            dct = all_vars[-1]
            for d in all_vars[:-1]:
                dct.update(d)
            return dct

    def _get_name(self):
        """
        Get name of current Driver.

        Returns
        -------
        str
            Name of current Driver.
        """
        return "Driver"
示例#31
0
class Driver(object):
    """
    Top-level container for the systems and drivers.

    Attributes
    ----------
    fail : bool
        Reports whether the driver ran successfully.
    iter_count : int
        Keep track of iterations for case recording.
    options : <OptionsDictionary>
        Dictionary with general pyoptsparse options.
    recording_options : <OptionsDictionary>
        Dictionary with driver recording options.
    cite : str
        Listing of relevant citations that should be referenced when
        publishing work that uses this class.
    _problem : <Problem>
        Pointer to the containing problem.
    supports : <OptionsDictionary>
        Provides a consistent way for drivers to declare what features they support.
    _designvars : dict
        Contains all design variable info.
    _cons : dict
        Contains all constraint info.
    _objs : dict
        Contains all objective info.
    _responses : dict
        Contains all response info.
    _rec_mgr : <RecordingManager>
        Object that manages all recorders added to this driver.
    _vars_to_record: dict
        Dict of lists of var names indicating what to record
    _model_viewer_data : dict
        Structure of model, used to make n2 diagram.
    _simul_coloring_info : tuple of dicts
        A data structure describing coloring for simultaneous derivs.
    _total_jac_sparsity : dict, str, or None
        Specifies sparsity of sub-jacobians of the total jacobian. Only used by pyOptSparseDriver.
    _res_jacs : dict
        Dict of sparse subjacobians for use with certain optimizers, e.g. pyOptSparseDriver.
    _total_jac : _TotalJacInfo or None
        Cached total jacobian handling object.
    """

    def __init__(self, **kwargs):
        """
        Initialize the driver.

        Parameters
        ----------
        **kwargs : dict of keyword arguments
            Keyword arguments that will be mapped into the Driver options.
        """
        self._rec_mgr = RecordingManager()
        self._vars_to_record = {
            'desvarnames': set(),
            'responsenames': set(),
            'objectivenames': set(),
            'constraintnames': set(),
            'sysinclnames': set(),
        }

        self._problem = None
        self._designvars = None
        self._cons = None
        self._objs = None
        self._responses = None

        # Driver options
        self.options = OptionsDictionary()

        self.options.declare('debug_print', types=list, check_valid=_check_debug_print_opts_valid,
                             desc="List of what type of Driver variables to print at each "
                                  "iteration. Valid items in list are 'desvars', 'ln_cons', "
                                  "'nl_cons', 'objs', 'totals'",
                             default=[])

        # Case recording options
        self.recording_options = OptionsDictionary()

        self.recording_options.declare('record_model_metadata', types=bool, default=True,
                                       desc='Record metadata for all Systems in the model')
        self.recording_options.declare('record_desvars', types=bool, default=True,
                                       desc='Set to True to record design variables at the '
                                            'driver level')
        self.recording_options.declare('record_responses', types=bool, default=False,
                                       desc='Set to True to record responses at the driver level')
        self.recording_options.declare('record_objectives', types=bool, default=True,
                                       desc='Set to True to record objectives at the driver level')
        self.recording_options.declare('record_constraints', types=bool, default=True,
                                       desc='Set to True to record constraints at the '
                                            'driver level')
        self.recording_options.declare('includes', types=list, default=[],
                                       desc='Patterns for variables to include in recording')
        self.recording_options.declare('excludes', types=list, default=[],
                                       desc='Patterns for vars to exclude in recording '
                                            '(processed post-includes)')
        self.recording_options.declare('record_derivatives', types=bool, default=False,
                                       desc='Set to True to record derivatives at the driver '
                                            'level')
        self.recording_options.declare('record_inputs', types=bool, default=True,
                                       desc='Set to True to record inputs at the driver level')

        # What the driver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('inequality_constraints', types=bool, default=False)
        self.supports.declare('equality_constraints', types=bool, default=False)
        self.supports.declare('linear_constraints', types=bool, default=False)
        self.supports.declare('two_sided_constraints', types=bool, default=False)
        self.supports.declare('multiple_objectives', types=bool, default=False)
        self.supports.declare('integer_design_vars', types=bool, default=False)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('active_set', types=bool, default=False)
        self.supports.declare('simultaneous_derivatives', types=bool, default=False)
        self.supports.declare('total_jac_sparsity', types=bool, default=False)

        self.iter_count = 0
        self._model_viewer_data = None
        self.cite = ""

        self._simul_coloring_info = None
        self._total_jac_sparsity = None
        self._res_jacs = {}
        self._total_jac = None

        self.fail = False

        self._declare_options()
        self.options.update(kwargs)

    def add_recorder(self, recorder):
        """
        Add a recorder to the driver.

        Parameters
        ----------
        recorder : CaseRecorder
           A recorder instance.
        """
        self._rec_mgr.append(recorder)

    def cleanup(self):
        """
        Clean up resources prior to exit.
        """
        # shut down all recorders
        self._rec_mgr.shutdown()

    def _declare_options(self):
        """
        Declare options before kwargs are processed in the init method.

        This is optionally implemented by subclasses of Driver.
        """
        pass

    def _setup_comm(self, comm):
        """
        Perform any driver-specific setup of communicators for the model.

        Parameters
        ----------
        comm : MPI.Comm or <FakeComm> or None
            The communicator for the Problem.

        Returns
        -------
        MPI.Comm or <FakeComm> or None
            The communicator for the Problem model.
        """
        return comm

    def _setup_driver(self, problem):
        """
        Prepare the driver for execution.

        This is the final thing to run during setup.

        Parameters
        ----------
        problem : <Problem>
            Pointer to the containing problem.
        """
        self._problem = problem
        self._recording_iter = problem._recording_iter
        model = problem.model

        self._total_jac = None

        self._has_scaling = (
            np.any([r['scaler'] is not None for r in itervalues(self._responses)]) or
            np.any([dv['scaler'] is not None for dv in itervalues(self._designvars)])
        )

        con_set = set()
        obj_set = set()
        dv_set = set()

        self._remote_dvs = dv_dict = {}
        self._remote_cons = con_dict = {}
        self._remote_objs = obj_dict = {}

        # Now determine if later we'll need to allgather cons, objs, or desvars.
        if model.comm.size > 1 and model._subsystems_allprocs:
            local_out_vars = set(model._outputs._views)
            remote_dvs = set(self._designvars) - local_out_vars
            remote_cons = set(self._cons) - local_out_vars
            remote_objs = set(self._objs) - local_out_vars
            all_remote_vois = model.comm.allgather(
                (remote_dvs, remote_cons, remote_objs))
            for rem_dvs, rem_cons, rem_objs in all_remote_vois:
                con_set.update(rem_cons)
                obj_set.update(rem_objs)
                dv_set.update(rem_dvs)

            # If we have remote VOIs, pick an owning rank for each and use that
            # to bcast to others later
            owning_ranks = model._owning_rank
            sizes = model._var_sizes['nonlinear']['output']
            for i, vname in enumerate(model._var_allprocs_abs_names['output']):
                owner = owning_ranks[vname]
                if vname in dv_set:
                    dv_dict[vname] = (owner, sizes[owner, i])
                if vname in con_set:
                    con_dict[vname] = (owner, sizes[owner, i])
                if vname in obj_set:
                    obj_dict[vname] = (owner, sizes[owner, i])

        self._remote_responses = self._remote_cons.copy()
        self._remote_responses.update(self._remote_objs)

        # set up simultaneous deriv coloring
        if (coloring_mod._use_sparsity and self._simul_coloring_info and
                self.supports['simultaneous_derivatives']):
            self._setup_simul_coloring()

    def _get_vars_to_record(self, recording_options):
        """
        Get variables to record based on recording options.

        Parameters
        ----------
        recording_options : <OptionsDictionary>
            Dictionary with recording options.

        Returns
        -------
        dict
           Dictionary containing lists of variables to record.
        """
        problem = self._problem
        model = problem.model

        if MPI:
            # TODO: Eventually, we think we can get rid of this next check.
            #       But to be safe, we are leaving it in there.
            if not model.is_active():
                raise RuntimeError("RecordingManager.startup should never be called when "
                                   "running in parallel on an inactive System")
            rrank = problem.comm.rank
            rowned = model._owning_rank

        incl = recording_options['includes']
        excl = recording_options['excludes']

        # includes and excludes for outputs are specified using promoted names
        # NOTE: only local var names are in abs2prom, all will be gathered later
        abs2prom = model._var_abs2prom['output']

        all_desvars = {n for n in self._designvars
                       if n in abs2prom and check_path(abs2prom[n], incl, excl, True)}
        all_objectives = {n for n in self._objs
                          if n in abs2prom and check_path(abs2prom[n], incl, excl, True)}
        all_constraints = {n for n in self._cons
                           if n in abs2prom and check_path(abs2prom[n], incl, excl, True)}

        # design variables, objectives and constraints are always in the options
        mydesvars = myobjectives = myconstraints = set()

        if recording_options['record_desvars']:
            if MPI:
                mydesvars = [n for n in all_desvars if rrank == rowned[n]]
            else:
                mydesvars = list(all_desvars)

        if recording_options['record_objectives']:
            if MPI:
                myobjectives = [n for n in all_objectives if rrank == rowned[n]]
            else:
                myobjectives = list(all_objectives)

        if recording_options['record_constraints']:
            if MPI:
                myconstraints = [n for n in all_constraints if rrank == rowned[n]]
            else:
                myconstraints = list(all_constraints)

        filtered_vars_to_record = {
            'des': mydesvars,
            'obj': myobjectives,
            'con': myconstraints
        }

        # responses (if in options)
        if 'record_responses' in recording_options:
            myresponses = set()

            if recording_options['record_responses']:
                myresponses = {n for n in self._responses
                               if n in abs2prom and check_path(abs2prom[n], incl, excl, True)}

                if MPI:
                    myresponses = [n for n in myresponses if rrank == rowned[n]]

            filtered_vars_to_record['res'] = list(myresponses)

        # inputs (if in options)
        if 'record_inputs' in recording_options:
            myinputs = set()

            if recording_options['record_inputs']:
                myinputs = {n for n in model._inputs if check_path(n, incl, excl)}

                if MPI:
                    # gather the variables from all ranks to rank 0
                    all_vars = model.comm.gather(myinputs, root=0)
                    if MPI.COMM_WORLD.rank == 0:
                        myinputs = all_vars[-1]
                        for d in all_vars[:-1]:
                            myinputs.update(d)

                    myinputs = [n for n in myinputs if rrank == rowned[n]]

            filtered_vars_to_record['in'] = list(myinputs)

        # system outputs (if the options being processed are for the driver itself)
        if recording_options is self.recording_options:
            myoutputs = set()

            if incl:
                myoutputs = {n for n in model._outputs
                             if n in abs2prom and check_path(abs2prom[n], incl, excl)}

                if MPI:
                    # gather the variables from all ranks to rank 0
                    all_vars = model.comm.gather(myoutputs, root=0)
                    if MPI.COMM_WORLD.rank == 0:
                        myoutputs = all_vars[-1]
                        for d in all_vars[:-1]:
                            myoutputs.update(d)

                # de-duplicate
                myoutputs = myoutputs.difference(all_desvars, all_objectives, all_constraints)

                if MPI:
                    myoutputs = [n for n in myoutputs if rrank == rowned[n]]

            filtered_vars_to_record['sys'] = list(myoutputs)

        return filtered_vars_to_record

    def _setup_recording(self):
        """
        Set up case recording.
        """
        self._filtered_vars_to_record = self._get_vars_to_record(self.recording_options)

        self._rec_mgr.startup(self)

        # record the system metadata to the recorders attached to this Driver
        if self.recording_options['record_model_metadata']:
            for sub in self._problem.model.system_iter(recurse=True, include_self=True):
                self._rec_mgr.record_metadata(sub)

    def _get_voi_val(self, name, meta, remote_vois, unscaled=False, ignore_indices=False):
        """
        Get the value of a variable of interest (objective, constraint, or design var).

        This will retrieve the value if the VOI is remote.

        Parameters
        ----------
        name : str
            Name of the variable of interest.
        meta : dict
            Metadata for the variable of interest.
        remote_vois : dict
            Dict containing (owning_rank, size) for all remote vois of a particular
            type (design var, constraint, or objective).
        unscaled : bool
            Set to True if unscaled (physical) design variables are desired.
        ignore_indices : bool
            Set to True if the full array is desired, not just those indicated by indices.

        Returns
        -------
        float or ndarray
            The value of the named variable of interest.
        """
        model = self._problem.model
        comm = model.comm
        vec = model._outputs._views_flat
        indices = meta['indices']

        if name in remote_vois:
            owner, size = remote_vois[name]
            if owner == comm.rank:
                if indices is None or ignore_indices:
                    val = vec[name].copy()
                else:
                    val = vec[name][indices]
            else:
                if not (indices is None or ignore_indices):
                    size = len(indices)
                val = np.empty(size)

            comm.Bcast(val, root=owner)
        else:
            if indices is None or ignore_indices:
                val = vec[name].copy()
            else:
                val = vec[name][indices]

        if self._has_scaling and not unscaled:
            # Scale design variable values
            adder = meta['adder']
            if adder is not None:
                val += adder

            scaler = meta['scaler']
            if scaler is not None:
                val *= scaler

        return val

    def get_design_var_values(self, filter=None, unscaled=False, ignore_indices=False):
        """
        Return the design variable values.

        This is called to gather the initial design variable state.

        Parameters
        ----------
        filter : list
            List of desvar names used by recorders.
        unscaled : bool
            Set to True if unscaled (physical) design variables are desired.
        ignore_indices : bool
            Set to True if the full array is desired, not just those indicated by indices.

        Returns
        -------
        dict
           Dictionary containing values of each design variable.
        """
        if filter:
            dvs = filter
        else:
            # use all the designvars
            dvs = self._designvars

        return {n: self._get_voi_val(n, self._designvars[n], self._remote_dvs, unscaled=unscaled,
                                     ignore_indices=ignore_indices) for n in dvs}

    def set_design_var(self, name, value):
        """
        Set the value of a design variable.

        Parameters
        ----------
        name : str
            Global pathname of the design variable.
        value : float or ndarray
            Value for the design variable.
        """
        problem = self._problem

        if (name in self._remote_dvs and
                problem.model._owning_rank[name] != problem.comm.rank):
            return

        meta = self._designvars[name]
        indices = meta['indices']
        if indices is None:
            indices = slice(None)

        desvar = problem.model._outputs._views_flat[name]
        desvar[indices] = value

        if self._has_scaling:
            # Scale design variable values
            scaler = meta['scaler']
            if scaler is not None:
                desvar[indices] *= 1.0 / scaler

            adder = meta['adder']
            if adder is not None:
                desvar[indices] -= adder

    def get_response_values(self, filter=None):
        """
        Return response values.

        Parameters
        ----------
        filter : list
            List of response names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each response.
        """
        if filter:
            resps = filter
        else:
            resps = self._responses

        return {n: self._get_voi_val(n, self._responses[n], self._remote_objs) for n in resps}

    def get_objective_values(self, unscaled=False, filter=None, ignore_indices=False):
        """
        Return objective values.

        Parameters
        ----------
        unscaled : bool
            Set to True if unscaled (physical) design variables are desired.
        filter : list
            List of objective names used by recorders.
        ignore_indices : bool
            Set to True if the full array is desired, not just those indicated by indices.

        Returns
        -------
        dict
           Dictionary containing values of each objective.
        """
        if filter:
            objs = filter
        else:
            objs = self._objs

        return {n: self._get_voi_val(n, self._objs[n], self._remote_objs, unscaled=unscaled,
                                     ignore_indices=ignore_indices)
                for n in objs}

    def get_constraint_values(self, ctype='all', lintype='all', unscaled=False, filter=None,
                              ignore_indices=False):
        """
        Return constraint values.

        Parameters
        ----------
        ctype : string
            Default is 'all'. Optionally return just the inequality constraints
            with 'ineq' or the equality constraints with 'eq'.
        lintype : string
            Default is 'all'. Optionally return just the linear constraints
            with 'linear' or the nonlinear constraints with 'nonlinear'.
        unscaled : bool
            Set to True if unscaled (physical) design variables are desired.
        filter : list
            List of constraint names used by recorders.
        ignore_indices : bool
            Set to True if the full array is desired, not just those indicated by indices.

        Returns
        -------
        dict
           Dictionary containing values of each constraint.
        """
        if filter is not None:
            cons = filter
        else:
            cons = self._cons

        con_dict = {}
        for name in cons:
            meta = self._cons[name]

            if lintype == 'linear' and not meta['linear']:
                continue

            if lintype == 'nonlinear' and meta['linear']:
                continue

            if ctype == 'eq' and meta['equals'] is None:
                continue

            if ctype == 'ineq' and meta['equals'] is not None:
                continue

            con_dict[name] = self._get_voi_val(name, meta, self._remote_cons, unscaled=unscaled,
                                               ignore_indices=ignore_indices)

        return con_dict

    def _get_ordered_nl_responses(self):
        """
        Return the names of nonlinear responses in the order used by the driver.

        Default order is objectives followed by nonlinear constraints.  This is used for
        simultaneous derivative coloring and sparsity determination.

        Returns
        -------
        list of str
            The nonlinear response names in order.
        """
        order = list(self._objs)
        order.extend(n for n, meta in iteritems(self._cons)
                     if not ('linear' in meta and meta['linear']))
        return order

    def _update_voi_meta(self, model):
        """
        Collect response and design var metadata from the model and size desvars and responses.

        Parameters
        ----------
        model : System
            The System that represents the entire model.

        Returns
        -------
        int
            Total size of responses, with linear constraints excluded.
        int
            Total size of design vars.
        """
        self._objs = objs = OrderedDict()
        self._cons = cons = OrderedDict()

        self._responses = resps = model.get_responses(recurse=True)
        for name, data in iteritems(resps):
            if data['type'] == 'con':
                cons[name] = data
            else:
                objs[name] = data

        response_size = sum(resps[n]['size'] for n in self._get_ordered_nl_responses())

        # Gather up the information for design vars.
        self._designvars = designvars = model.get_design_vars(recurse=True)
        desvar_size = sum(data['size'] for data in itervalues(designvars))

        return response_size, desvar_size

    def run(self):
        """
        Execute this driver.

        The base `Driver` just runs the model. All other drivers overload
        this method.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        with RecordingDebugging(self._get_name(), self.iter_count, self):
            self._problem.model._solve_nonlinear()

        self.iter_count += 1
        return False

    def _compute_totals(self, of=None, wrt=None, return_format='flat_dict', global_names=True):
        """
        Compute derivatives of desired quantities with respect to desired inputs.

        All derivatives are returned using driver scaling.

        Parameters
        ----------
        of : list of variable name strings or None
            Variables whose derivatives will be computed. Default is None, which
            uses the driver's objectives and constraints.
        wrt : list of variable name strings or None
            Variables with respect to which the derivatives will be computed.
            Default is None, which uses the driver's desvars.
        return_format : string
            Format to return the derivatives. Default is a 'flat_dict', which
            returns them in a dictionary whose keys are tuples of form (of, wrt). For
            the scipy optimizer, 'array' is also supported.
        global_names : bool
            Set to True when passing in global names to skip some translation steps.

        Returns
        -------
        derivs : object
            Derivatives in form requested by 'return_format'.
        """
        problem = self._problem
        total_jac = self._total_jac
        debug_print = 'totals' in self.options['debug_print'] and (not MPI or
                                                                   MPI.COMM_WORLD.rank == 0)

        if debug_print:
            header = 'Driver total derivatives for iteration: ' + str(self.iter_count)
            print(header)
            print(len(header) * '-' + '\n')

        if problem.model._owns_approx_jac:
            self._recording_iter.stack.append(('_compute_totals_approx', 0))

            try:
                if total_jac is None:
                    total_jac = _TotalJacInfo(problem, of, wrt, global_names,
                                              return_format, approx=True, debug_print=debug_print)
                    self._total_jac = total_jac
                    totals = total_jac.compute_totals_approx(initialize=True)
                else:
                    totals = total_jac.compute_totals_approx()
            finally:
                self._recording_iter.stack.pop()

        else:
            if total_jac is None:
                total_jac = _TotalJacInfo(problem, of, wrt, global_names, return_format,
                                          debug_print=debug_print)

            # don't cache linear constraint jacobian
            if not total_jac.has_lin_cons:
                self._total_jac = total_jac

            self._recording_iter.stack.append(('_compute_totals', 0))

            try:
                totals = total_jac.compute_totals()
            finally:
                self._recording_iter.stack.pop()

        if self._rec_mgr._recorders and self.recording_options['record_derivatives']:
            metadata = create_local_meta(self._get_name())
            total_jac.record_derivatives(self, metadata)

        return totals

    def record_iteration(self):
        """
        Record an iteration of the current Driver.
        """
        if not self._rec_mgr._recorders:
            return

        # Get the data to record (collective calls that get across all ranks)
        opts = self.recording_options
        filt = self._filtered_vars_to_record

        if opts['record_desvars']:
            des_vars = self.get_design_var_values(unscaled=True, ignore_indices=True)
        else:
            des_vars = {}

        if opts['record_objectives']:
            obj_vars = self.get_objective_values(unscaled=True, ignore_indices=True)
        else:
            obj_vars = {}

        if opts['record_constraints']:
            con_vars = self.get_constraint_values(unscaled=True, ignore_indices=True)
        else:
            con_vars = {}

        if opts['record_responses']:
            # res_vars = self.get_response_values()  # not really working yet
            res_vars = {}
        else:
            res_vars = {}

        des_vars = {name: des_vars[name] for name in filt['des']}
        obj_vars = {name: obj_vars[name] for name in filt['obj']}
        con_vars = {name: con_vars[name] for name in filt['con']}
        # res_vars = {name: res_vars[name] for name in filt['res']}

        model = self._problem.model

        names = model._outputs._names
        views = model._outputs._views
        sys_vars = {name: views[name] for name in names if name in filt['sys']}

        if self.recording_options['record_inputs']:
            names = model._inputs._names
            views = model._inputs._views
            in_vars = {name: views[name] for name in names if name in filt['in']}
        else:
            in_vars = {}

        if MPI:
            des_vars = self._gather_vars(model, des_vars)
            res_vars = self._gather_vars(model, res_vars)
            obj_vars = self._gather_vars(model, obj_vars)
            con_vars = self._gather_vars(model, con_vars)
            sys_vars = self._gather_vars(model, sys_vars)
            in_vars = self._gather_vars(model, in_vars)

        outs = {}
        if not MPI or model.comm.rank == 0:
            outs.update(des_vars)
            outs.update(res_vars)
            outs.update(obj_vars)
            outs.update(con_vars)
            outs.update(sys_vars)

        data = {
            'out': outs,
            'in': in_vars
        }

        metadata = create_local_meta(self._get_name())

        self._rec_mgr.record_iteration(self, data, metadata)

    def _gather_vars(self, root, local_vars):
        """
        Gather and return only variables listed in `local_vars` from the `root` System.

        Parameters
        ----------
        root : <System>
            the root System for the Problem
        local_vars : dict
            local variable names and values

        Returns
        -------
        dct : dict
            variable names and values.
        """
        # if trace:
        #     debug("gathering vars for recording in %s" % root.pathname)
        all_vars = root.comm.gather(local_vars, root=0)
        # if trace:
        #     debug("DONE gathering rec vars for %s" % root.pathname)

        if root.comm.rank == 0:
            dct = all_vars[-1]
            for d in all_vars[:-1]:
                dct.update(d)
            return dct

    def _get_name(self):
        """
        Get name of current Driver.

        Returns
        -------
        str
            Name of current Driver.
        """
        return "Driver"

    def set_simul_deriv_color(self, simul_info):
        """
        Set the coloring (and possibly the sub-jac sparsity) for simultaneous total derivatives.

        Parameters
        ----------
        simul_info : str or dict

            ::

                # Information about simultaneous coloring for design vars and responses.  If a
                # string, then simul_info is assumed to be the name of a file that contains the
                # coloring information in JSON format.  If a dict, the structure looks like this:

                {
                "fwd": [
                    # First, a list of column index lists, each index list representing columns
                    # having the same color, except for the very first index list, which contains
                    # indices of all columns that are not colored.
                    [
                        [i1, i2, i3, ...]    # list of non-colored columns
                        [ia, ib, ...]    # list of columns in first color
                        [ic, id, ...]    # list of columns in second color
                           ...           # remaining color lists, one list of columns per color
                    ],

                    # Next is a list of lists, one for each column, containing the nonzero rows for
                    # that column.  If a column is not colored, then it will have a None entry
                    # instead of a list.
                    [
                        [r1, rn, ...]   # list of nonzero rows for column 0
                        None,           # column 1 is not colored
                        [ra, rb, ...]   # list of nonzero rows for column 2
                            ...
                    ],
                ],
                # This example is not a bidirectional coloring, so the opposite direction, "rev"
                # in this case, has an empty row index list.  It could also be removed entirely.
                "rev": [[[]], []],
                "sparsity":
                    # The sparsity entry can be absent, indicating that no sparsity structure is
                    # specified, or it can be a nested dictionary where the outer keys are response
                    # names, the inner keys are design variable names, and the value is a tuple of
                    # the form (row_list, col_list, shape).
                    {
                        resp1_name: {
                            dv1_name: (rows, cols, shape),  # for sub-jac d_resp1/d_dv1
                            dv2_name: (rows, cols, shape),
                              ...
                        },
                        resp2_name: {
                            ...
                        }
                        ...
                    }
                }

        """
        if self.supports['simultaneous_derivatives']:
            self._simul_coloring_info = simul_info
        else:
            raise RuntimeError("Driver '%s' does not support simultaneous derivatives." %
                               self._get_name())

    def set_total_jac_sparsity(self, sparsity):
        """
        Set the sparsity of sub-jacobians of the total jacobian.

        Note: This currently will have no effect if you are not using the pyOptSparseDriver.

        Parameters
        ----------
        sparsity : str or dict

            ::

                # Sparsity is a nested dictionary where the outer keys are response
                # names, the inner keys are design variable names, and the value is a tuple of
                # the form (row_list, col_list, shape).
                {
                    resp1: {
                        dv1: (rows, cols, shape),  # for sub-jac d_resp1/d_dv1
                        dv2: (rows, cols, shape),
                          ...
                    },
                    resp2: {
                        ...
                    }
                    ...
                }
        """
        if self.supports['total_jac_sparsity']:
            self._total_jac_sparsity = sparsity
        else:
            raise RuntimeError("Driver '%s' does not support setting of total jacobian sparsity." %
                               self._get_name())

    def _setup_simul_coloring(self):
        """
        Set up metadata for simultaneous derivative solution.
        """
        # command line simul_coloring uses this env var to turn pre-existing coloring off
        if not coloring_mod._use_sparsity:
            return

        problem = self._problem
        if not problem.model._use_derivatives:
            simple_warning("Derivatives are turned off.  Skipping simul deriv coloring.")
            return

        if isinstance(self._simul_coloring_info, string_types):
            with open(self._simul_coloring_info, 'r') as f:
                self._simul_coloring_info = coloring_mod._json2coloring(json.load(f))

        if 'rev' in self._simul_coloring_info and problem._orig_mode not in ('rev', 'auto'):
            revcol = self._simul_coloring_info['rev'][0][0]
            if revcol:
                raise RuntimeError("Simultaneous coloring does reverse solves but mode has "
                                   "been set to '%s'" % problem._orig_mode)
        if 'fwd' in self._simul_coloring_info and problem._orig_mode not in ('fwd', 'auto'):
            fwdcol = self._simul_coloring_info['fwd'][0][0]
            if fwdcol:
                raise RuntimeError("Simultaneous coloring does forward solves but mode has "
                                   "been set to '%s'" % problem._orig_mode)

        # simul_coloring_info can contain data for either fwd, rev, or both, along with optional
        # sparsity patterns
        if 'sparsity' in self._simul_coloring_info:
            sparsity = self._simul_coloring_info['sparsity']
            del self._simul_coloring_info['sparsity']
        else:
            sparsity = None

        if sparsity is not None and self._total_jac_sparsity is not None:
            raise RuntimeError("Total jac sparsity was set in both _simul_coloring_info"
                               " and _total_jac_sparsity.")
        self._total_jac_sparsity = sparsity

    def _pre_run_model_debug_print(self):
        """
        Optionally print some debugging information before the model runs.
        """
        debug_opt = self.options['debug_print']
        if not debug_opt or debug_opt == ['totals']:
            return

        if not MPI or MPI.COMM_WORLD.rank == 0:
            header = 'Driver debug print for iter coord: {}'.format(
                self._recording_iter.get_formatted_iteration_coordinate())
            print(header)
            print(len(header) * '-')

        if 'desvars' in debug_opt:
            desvar_vals = self.get_design_var_values(unscaled=True, ignore_indices=True)
            if not MPI or MPI.COMM_WORLD.rank == 0:
                print("Design Vars")
                if desvar_vals:
                    pprint.pprint(desvar_vals)
                else:
                    print("None")
                print()

        sys.stdout.flush()

    def _post_run_model_debug_print(self):
        """
        Optionally print some debugging information after the model runs.
        """
        if 'nl_cons' in self.options['debug_print']:
            cons = self.get_constraint_values(lintype='nonlinear', unscaled=True)
            if not MPI or MPI.COMM_WORLD.rank == 0:
                print("Nonlinear constraints")
                if cons:
                    pprint.pprint(cons)
                else:
                    print("None")
                print()

        if 'ln_cons' in self.options['debug_print']:
            cons = self.get_constraint_values(lintype='linear', unscaled=True)
            if not MPI or MPI.COMM_WORLD.rank == 0:
                print("Linear constraints")
                if cons:
                    pprint.pprint(cons)
                else:
                    print("None")
                print()

        if 'objs' in self.options['debug_print']:
            objs = self.get_objective_values(unscaled=True)
            if not MPI or MPI.COMM_WORLD.rank == 0:
                print("Objectives")
                if objs:
                    pprint.pprint(objs)
                else:
                    print("None")
                print()

        sys.stdout.flush()
示例#32
0
class Solver(object):
    """
    Base solver class.

    This class is subclassed by NonlinearSolver and LinearSolver,
    which are in turn subclassed by actual solver implementations.

    Attributes
    ----------
    _system : <System>
        Pointer to the owning system.
    _depth : int
        How many subsolvers deep this solver is (0 means not a subsolver).
    _vec_names : [str, ...]
        List of right-hand-side (RHS) vector names.
    _mode : str
        'fwd' or 'rev', applicable to linear solvers only.
    _iter_count : int
        Number of iterations for the current invocation of the solver.
    _rec_mgr : <RecordingManager>
        object that manages all recorders added to this solver
    cite : str
        Listing of relevant citations that should be referenced when
        publishing work that uses this class.
    options : <OptionsDictionary>
        Options dictionary.
    recording_options : <OptionsDictionary>
        Recording options dictionary.
    supports : <OptionsDictionary>
        Options dictionary describing what features are supported by this
        solver.
    _filtered_vars_to_record: Dict
        Dict of list of var names to record
    _norm0: float
        Normalization factor
    _solver_info : SolverInfo
        A stack-like object shared by all Solvers in the model.
    """

    # Object to store some formatting for iprint that is shared across all solvers.
    SOLVER = 'base_solver'

    def __init__(self, **kwargs):
        """
        Initialize all attributes.

        Parameters
        ----------
        **kwargs : dict of keyword arguments
            Keyword arguments that will be mapped into the Solver options.
        """
        self._system = None
        self._depth = 0
        self._vec_names = None
        self._mode = 'fwd'
        self._iter_count = 0
        self._solver_info = None

        # Solver options
        self.options = OptionsDictionary(parent_name=self.msginfo)
        self.options.declare('maxiter', types=int, default=10,
                             desc='maximum number of iterations')
        self.options.declare('atol', default=1e-10,
                             desc='absolute error tolerance')
        self.options.declare('rtol', default=1e-10,
                             desc='relative error tolerance')
        self.options.declare('iprint', types=int, default=1,
                             desc='whether to print output')
        self.options.declare('err_on_maxiter', types=bool, default=None, allow_none=True,
                             desc="Deprecated. Use 'err_on_non_converge'.")
        self.options.declare('err_on_non_converge', types=bool, default=False,
                             desc="When True, AnalysisError will be raised if we don't converge.")

        # Case recording options
        self.recording_options = OptionsDictionary(parent_name=self.msginfo)
        self.recording_options.declare('record_abs_error', types=bool, default=True,
                                       desc='Set to True to record absolute error at the \
                                       solver level')
        self.recording_options.declare('record_rel_error', types=bool, default=True,
                                       desc='Set to True to record relative error at the \
                                       solver level')
        self.recording_options.declare('record_inputs', types=bool, default=True,
                                       desc='Set to True to record inputs at the solver level')
        self.recording_options.declare('record_outputs', types=bool, default=True,
                                       desc='Set to True to record outputs at the solver level')
        self.recording_options.declare('record_solver_residuals', types=bool, default=False,
                                       desc='Set to True to record residuals at the solver level')
        self.recording_options.declare('record_metadata', types=bool, desc='Record metadata',
                                       default=True)
        self.recording_options.declare('includes', types=list, default=['*'],
                                       desc='Patterns for variables to include in recording')
        self.recording_options.declare('excludes', types=list, default=[],
                                       desc='Patterns for vars to exclude in recording '
                                            '(processed post-includes)')
        # Case recording related
        self._filtered_vars_to_record = {}
        self._norm0 = 0.0

        # What the solver supports.
        self.supports = OptionsDictionary(parent_name=self.msginfo)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('implicit_components', types=bool, default=False)

        self._declare_options()
        self.options.update(kwargs)

        self._rec_mgr = RecordingManager()

        self.cite = ""

    @property
    def msginfo(self):
        """
        Return info to prepend to messages.

        Returns
        -------
        str
            Info to prepend to messages.
        """
        if self._system is None:
            return type(self).__name__
        return '{} in {}'.format(type(self).__name__, self._system().msginfo)

    def _assembled_jac_solver_iter(self):
        """
        Return an empty generator of lin solvers using assembled jacs.
        """
        for i in ():
            yield

    def add_recorder(self, recorder):
        """
        Add a recorder to the solver's RecordingManager.

        Parameters
        ----------
        recorder : <CaseRecorder>
           A recorder instance to be added to RecManager.
        """
        if MPI:
            raise RuntimeError(
                "Recording of Solvers when running parallel code is not supported yet")
        self._rec_mgr.append(recorder)

    def _declare_options(self):
        """
        Declare options before kwargs are processed in the init method.

        This is optionally implemented by subclasses of Solver.
        """
        pass

    def _setup_solvers(self, system, depth):
        """
        Assign system instance, set depth, and optionally perform setup.

        Parameters
        ----------
        system : <System>
            pointer to the owning system.
        depth : int
            depth of the current system (already incremented).
        """
        self._system = weakref.ref(system)
        self._depth = depth
        self._solver_info = system._solver_info
        self._recording_iter = system._recording_iter

        if system.pathname:
            parent_name = self.msginfo
            self.options._parent_name = parent_name
            self.recording_options._parent_name = parent_name
            self.supports._parent_name = parent_name

        if isinstance(self, LinearSolver) and not system._use_derivatives:
            return

        self._rec_mgr.startup(self)
        self._rec_mgr.record_metadata(self)

        myoutputs = myresiduals = myinputs = set()
        incl = self.recording_options['includes']
        excl = self.recording_options['excludes']

        if self.recording_options['record_solver_residuals']:
            if isinstance(self, NonlinearSolver):
                residuals = system._residuals
            else:  # it's a LinearSolver
                residuals = system._vectors['residual']['linear']

            myresiduals = {n for n in residuals._names if check_path(n, incl, excl)}

        if self.recording_options['record_outputs']:
            if isinstance(self, NonlinearSolver):
                outputs = system._outputs
            else:  # it's a LinearSolver
                outputs = system._vectors['output']['linear']

            myoutputs = {n for n in outputs._names if check_path(n, incl, excl)}

        if self.recording_options['record_inputs']:
            if isinstance(self, NonlinearSolver):
                inputs = system._inputs
            else:
                inputs = system._vectors['input']['linear']

            myinputs = {n for n in inputs._names if check_path(n, incl, excl)}

        self._filtered_vars_to_record = {
            'in': myinputs,
            'out': myoutputs,
            'res': myresiduals
        }

        # Raise a deprecation warning for changed option.
        if 'err_on_maxiter' in self.options and self.options['err_on_maxiter'] is not None:
            self.options['err_on_non_converge'] = self.options['err_on_maxiter']
            warn_deprecation("The 'err_on_maxiter' option provides backwards compatibility "
                             "with earlier version of OpenMDAO; use options['err_on_non_converge'] "
                             "instead.")

    def _set_solver_print(self, level=2, type_='all'):
        """
        Control printing for solvers and subsolvers in the model.

        Parameters
        ----------
        level : int
            iprint level. Set to 2 to print residuals each iteration; set to 1
            to print just the iteration totals; set to 0 to disable all printing
            except for failures, and set to -1 to disable all printing including failures.
        type_ : str
            Type of solver to set: 'LN' for linear, 'NL' for nonlinear, or 'all' for all.
        """
        self.options['iprint'] = level

    def _mpi_print(self, iteration, abs_res, rel_res):
        """
        Print residuals from an iteration.

        Parameters
        ----------
        iteration : int
            iteration counter, 0-based.
        abs_res : float
            current absolute residual norm.
        rel_res : float
            current relative residual norm.
        """
        if (self.options['iprint'] == 2 and self._system().comm.rank == 0):

            prefix = self._solver_info.prefix
            solver_name = self.SOLVER

            if prefix.endswith('precon:'):
                solver_name = solver_name[3:]

            print_str = prefix + solver_name
            print_str += ' %d ; %.9g %.9g' % (iteration, abs_res, rel_res)
            print(print_str)

    def _mpi_print_header(self):
        """
        Print header text before solving.
        """
        pass

    def _solve(self):
        """
        Run the iterative solver.
        """
        maxiter = self.options['maxiter']
        atol = self.options['atol']
        rtol = self.options['rtol']
        iprint = self.options['iprint']

        self._mpi_print_header()

        self._iter_count = 0
        norm0, norm = self._iter_initialize()

        self._norm0 = norm0

        self._mpi_print(self._iter_count, norm, norm / norm0)

        while self._iter_count < maxiter and norm > atol and norm / norm0 > rtol:
            with Recording(type(self).__name__, self._iter_count, self) as rec:
                self._single_iteration()
                self._iter_count += 1
                self._run_apply()
                norm = self._iter_get_norm()
                # With solvers, we want to record the norm AFTER the call, but the call needs to
                # be wrapped in the with for stack purposes, so we locally assign  norm & norm0
                # into the class.
                rec.abs = norm
                rec.rel = norm / norm0

            if norm0 == 0:
                norm0 = 1
            self._mpi_print(self._iter_count, norm, norm / norm0)

        system = self._system()
        if system.comm.rank == 0 or os.environ.get('USE_PROC_FILES'):
            prefix = self._solver_info.prefix + self.SOLVER

            # Solver terminated early because a Nan in the norm doesn't satisfy the while-loop
            # conditionals.
            if np.isinf(norm) or np.isnan(norm):
                msg = "Solver '{}' on system '{}': residuals contain 'inf' or 'NaN' after {} " + \
                      "iterations."
                if iprint > -1:
                    print(prefix + msg.format(self.SOLVER, system.pathname,
                                              self._iter_count))

                # Raise AnalysisError if requested.
                if self.options['err_on_non_converge']:
                    raise AnalysisError(msg.format(self.SOLVER, system.pathname,
                                                   self._iter_count))

            # Solver hit maxiter without meeting desired tolerances.
            elif (norm > atol and norm / norm0 > rtol):
                msg = "Solver '{}' on system '{}' failed to converge in {} iterations."

                if iprint > -1:
                    print(prefix + msg.format(self.SOLVER, system.pathname,
                                              self._iter_count))

                # Raise AnalysisError if requested.
                if self.options['err_on_non_converge']:
                    raise AnalysisError(msg.format(self.SOLVER, system.pathname,
                                                   self._iter_count))

            # Solver converged
            elif iprint == 1:
                print(prefix + ' Converged in {} iterations'.format(self._iter_count))
            elif iprint == 2:
                print(prefix + ' Converged')

    def _iter_initialize(self):
        """
        Perform any necessary pre-processing operations.

        Returns
        -------
        float
            initial error.
        float
            error at the first iteration.
        """
        pass

    def _run_apply(self):
        """
        Run the appropriate apply method on the system.
        """
        pass

    def _linearize(self):
        """
        Perform any required linearization operations such as matrix factorization.
        """
        pass

    def _linearize_children(self):
        """
        Return a flag that is True when we need to call linearize on our subsystems' solvers.

        Returns
        -------
        boolean
            Flag for indicating child linerization
        """
        return True

    def __str__(self):
        """
        Return a string representation of the solver.

        Returns
        -------
        str
            String representation of the solver.
        """
        return self.SOLVER

    def record_iteration(self, **kwargs):
        """
        Record an iteration of the current Solver.

        Parameters
        ----------
        **kwargs : dict
            Keyword arguments (used for abs and rel error).
        """
        if not self._rec_mgr._recorders:
            return

        metadata = create_local_meta(self.SOLVER)

        # Get the data
        data = {}

        if self.recording_options['record_abs_error']:
            data['abs'] = kwargs.get('abs')
        else:
            data['abs'] = None

        if self.recording_options['record_rel_error']:
            data['rel'] = kwargs.get('rel')
        else:
            data['rel'] = None

        system = self._system()
        if isinstance(self, NonlinearSolver):
            outputs = system._outputs
            inputs = system._inputs
            residuals = system._residuals
        else:  # it's a LinearSolver
            outputs = system._vectors['output']['linear']
            inputs = system._vectors['input']['linear']
            residuals = system._vectors['residual']['linear']

        if self.recording_options['record_outputs']:
            data['o'] = {}
            if 'out' in self._filtered_vars_to_record:
                for out in self._filtered_vars_to_record['out']:
                    if out in outputs._names:
                        data['o'][out] = outputs._views[out]
            else:
                data['o'] = outputs
        else:
            data['o'] = None

        if self.recording_options['record_inputs']:
            data['i'] = {}
            if 'in' in self._filtered_vars_to_record:
                for inp in self._filtered_vars_to_record['in']:
                    if inp in inputs._names:
                        data['i'][inp] = inputs._views[inp]
            else:
                data['i'] = inputs
        else:
            data['i'] = None

        if self.recording_options['record_solver_residuals']:
            data['r'] = {}
            if 'res' in self._filtered_vars_to_record:
                for res in self._filtered_vars_to_record['res']:
                    if res in residuals._names:
                        data['r'][res] = residuals._views[res]
            else:
                data['r'] = residuals
        else:
            data['r'] = None

        self._rec_mgr.record_iteration(self, data, metadata)

    def cleanup(self):
        """
        Clean up resources prior to exit.
        """
        # shut down all recorders
        self._rec_mgr.shutdown()

    def _set_complex_step_mode(self, active):
        """
        Turn on or off complex stepping mode.

        Recurses to turn on or off complex stepping mode in all subsystems and their vectors.

        Parameters
        ----------
        active : bool
            Complex mode flag; set to True prior to commencing complex step.
        """
        pass
示例#33
0
class Solver(object):
    """
    Base solver class.

    This class is subclassed by NonlinearSolver and LinearSolver,
    which are in turn subclassed by actual solver implementations.

    Attributes
    ----------
    _system : <System>
        Pointer to the owning system.
    _depth : int
        How many subsolvers deep this solver is (0 means not a subsolver).
    _vec_names : [str, ...]
        List of right-hand-side (RHS) vector names.
    _mode : str
        'fwd' or 'rev', applicable to linear solvers only.
    _iter_count : int
        Number of iterations for the current invocation of the solver.
    _rec_mgr : <RecordingManager>
        object that manages all recorders added to this solver
    _solver_info : <SolverInfo>
        Object to store some formatting for iprint that is shared across all solvers.
    cite : str
        Listing of relevant citataions that should be referenced when
        publishing work that uses this class.
    options : <OptionsDictionary>
        Options dictionary.
    recording_options : <OptionsDictionary>
        Recording options dictionary.
    metadata : dict
        Dictionary holding data about this solver.
    supports : <OptionsDictionary>
        Options dictionary describing what features are supported by this
        solver.
    _filtered_vars_to_record: Dict
        Dict of list of var names to record
    _norm0: float
        Normalization factor
    """

    SOLVER = 'base_solver'
    _solver_info = SolverInfo()

    def __init__(self, **kwargs):
        """
        Initialize all attributes.

        Parameters
        ----------
        **kwargs : dict
            options dictionary.
        """
        self._system = None
        self._depth = 0
        self._vec_names = None
        self._mode = 'fwd'
        self._iter_count = 0

        self.options = OptionsDictionary()
        self.recording_options = OptionsDictionary()
        self.options.declare('maxiter', types=int, default=10,
                             desc='maximum number of iterations')
        self.options.declare('atol', default=1e-10,
                             desc='absolute error tolerance')
        self.options.declare('rtol', default=1e-10,
                             desc='relative error tolerance')
        self.options.declare('iprint', types=int, default=1,
                             desc='whether to print output')
        self.options.declare('err_on_maxiter', types=bool, default=False,
                             desc="When True, AnalysisError will be raised if we don't converge.")
        # Case recording options
        self.recording_options.declare('record_abs_error', types=bool, default=True,
                                       desc='Set to True to record absolute error at the \
                                       solver level')
        self.recording_options.declare('record_rel_error', types=bool, default=True,
                                       desc='Set to True to record relative error at the \
                                       solver level')
        self.recording_options.declare('record_solver_residuals', types=bool, default=False,
                                       desc='Set to True to record residuals at the solver level')
        self.recording_options.declare('record_metadata', types=bool, desc='Record metadata',
                                       default=True)
        self.recording_options.declare('includes', types=list, default=['*'],
                                       desc='Patterns for variables to include in recording')
        self.recording_options.declare('excludes', types=list, default=[],
                                       desc='Patterns for vars to exclude in recording '
                                       '(processed post-includes)')
        # Case recording related
        self._filtered_vars_to_record = {}
        self._norm0 = 0.0

        # What the solver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('gradients', types=bool, default=False)

        self._declare_options()
        self.options.update(kwargs)

        self.metadata = {}
        self._rec_mgr = RecordingManager()

        self.cite = ""

    def add_recorder(self, recorder):
        """
        Add a recorder to the driver's RecordingManager.

        Parameters
        ----------
        recorder : <BaseRecorder>
           A recorder instance to be added to RecManager.
        """
        if MPI:
            raise RuntimeError(
                "Recording of Solvers when running parallel code is not supported yet")
        self._rec_mgr.append(recorder)

    def _declare_options(self):
        """
        Declare options before kwargs are processed in the init method.

        This is optionally implemented by subclasses of Solver.
        """
        pass

    def _setup_solvers(self, system, depth):
        """
        Assign system instance, set depth, and optionally perform setup.

        Parameters
        ----------
        system : <System>
            pointer to the owning system.
        depth : int
            depth of the current system (already incremented).
        """
        self._system = system
        self._depth = depth
        self._rec_mgr.startup(self)
        self._rec_mgr.record_metadata(self)

        myoutputs = myresiduals = set()
        incl = self.recording_options['includes']
        excl = self.recording_options['excludes']

        if self.recording_options['record_solver_residuals']:
            if isinstance(self, NonlinearSolver):
                residuals = self._system._residuals
            else:  # it's a LinearSolver
                residuals = self._system._vectors['residual']['linear']

            myresiduals = {n for n in residuals._names
                           if check_path(n, incl, excl)}

        if isinstance(self, NonlinearSolver):
            outputs = self._system._outputs
        else:  # it's a LinearSolver
            outputs = self._system._vectors['output']['linear']

        myoutputs = {n for n in outputs._names
                     if check_path(n, incl, excl)}

        self._filtered_vars_to_record = {
            'out': myoutputs,
            'res': myresiduals
        }

    def _set_solver_print(self, level=2, type_='all'):
        """
        Control printing for solvers and subsolvers in the model.

        Parameters
        ----------
        level : int
            iprint level. Set to 2 to print residuals each iteration; set to 1
            to print just the iteration totals; set to 0 to disable all printing
            except for failures, and set to -1 to disable all printing including failures.
        type_ : str
            Type of solver to set: 'LN' for linear, 'NL' for nonlinear, or 'all' for all.
        """
        self.options['iprint'] = level

    def _mpi_print(self, iteration, abs_res, rel_res):
        """
        Print residuals from an iteration.

        Parameters
        ----------
        iteration : int
            iteration counter, 0-based.
        abs_res : float
            current absolute residual norm.
        rel_res : float
            current relative residual norm.
        """
        if (self.options['iprint'] == 2 and self._system.comm.rank == 0):

            prefix = self._solver_info.prefix
            solver_name = self.SOLVER

            if prefix.endswith('precon:'):
                solver_name = solver_name[3:]

            print_str = prefix + solver_name
            print_str += ' %d ; %.9g %.9g' % (iteration, abs_res, rel_res)
            print(print_str)

    def _mpi_print_header(self):
        """
        Print header text before solving.
        """
        pass

    def _run_iterator(self):
        """
        Run the iterative solver.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        float
            absolute error.
        float
            relative error.
        """
        maxiter = self.options['maxiter']
        atol = self.options['atol']
        rtol = self.options['rtol']
        iprint = self.options['iprint']

        self._mpi_print_header()

        self._iter_count = 0
        norm0, norm = self._iter_initialize()

        self._norm0 = norm0

        self._mpi_print(self._iter_count, norm, norm / norm0)

        while self._iter_count < maxiter and \
                norm > atol and norm / norm0 > rtol:
            with Recording(type(self).__name__, self._iter_count, self) as rec:
                self._iter_execute()
                self._iter_count += 1
                self._run_apply()
                norm = self._iter_get_norm()
                # With solvers, we want to record the norm AFTER the call, but the call needs to
                # be wrapped in the with for stack purposes, so we locally assign  norm & norm0
                # into the class.
                rec.abs = norm
                rec.rel = norm / norm0

            if norm0 == 0:
                norm0 = 1
            self._mpi_print(self._iter_count, norm, norm / norm0)

        fail = (np.isinf(norm) or np.isnan(norm) or
                (norm > atol and norm / norm0 > rtol))

        if self._system.comm.rank == 0 or os.environ.get('USE_PROC_FILES'):
            prefix = self._solver_info.prefix + self.SOLVER
            if fail:
                if iprint > -1:
                    msg = ' Failed to Converge in {} iterations'.format(self._iter_count)
                    print(prefix + msg)

                # Raise AnalysisError if requested.
                if self.options['err_on_maxiter']:
                    msg = "Solver '{}' on system '{}' failed to converge."
                    raise AnalysisError(msg.format(self.SOLVER, self._system.pathname))

            elif iprint == 1:
                print(prefix + ' Converged in {} iterations'.format(self._iter_count))
            elif iprint == 2:
                print(prefix + ' Converged')

        return fail, norm, norm / norm0

    def _iter_initialize(self):
        """
        Perform any necessary pre-processing operations.

        Returns
        -------
        float
            initial error.
        float
            error at the first iteration.
        """
        pass

    def _iter_execute(self):
        """
        Perform the operations in the iteration loop.
        """
        pass

    def _run_apply(self):
        """
        Run the appropriate apply method on the system.
        """
        pass

    def _iter_get_norm(self):
        """
        Return the norm of the residual.

        Returns
        -------
        float
            norm.
        """
        pass

    def _linearize(self):
        """
        Perform any required linearization operations such as matrix factorization.
        """
        pass

    def _linearize_children(self):
        """
        Return a flag that is True when we need to call linearize on our subsystems' solvers.t.

        Returns
        -------
        boolean
            Flag for indicating child linerization
        """
        return True

    def solve(self):
        """
        Run the solver.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        float
            absolute error.
        float
            relative error.
        """
        pass

    def __str__(self):
        """
        Return a string representation of the solver.

        Returns
        -------
        str
            String representation of the solver.
        """
        return self.SOLVER

    def record_iteration(self, **kwargs):
        """
        Record an iteration of the current Solver.

        Parameters
        ----------
        **kwargs : dict
            Keyword arguments (used for abs and rel error).
        """
        if not self._rec_mgr._recorders:
            return

        metadata = create_local_meta(self.SOLVER)

        # Get the data
        data = {}
        # if self.options['record_abs_error'] or self.options['record_rel_error']:
        #     norm = self._iter_get_norm()

        if self.recording_options['record_abs_error']:
            # data['abs'] = norm
            data['abs'] = kwargs.get('abs')
        else:
            data['abs'] = None

        if self.recording_options['record_rel_error']:
            # data['rel'] = norm / self._norm0
            data['rel'] = kwargs.get('rel')
        else:
            data['rel'] = None

        if isinstance(self, NonlinearSolver):
            outputs = self._system._outputs
        else:  # it's a LinearSolver
            outputs = self._system._vectors['output']['linear']

        data['o'] = {}
        if 'out' in self._filtered_vars_to_record:
            for out in self._filtered_vars_to_record['out']:
                if out in outputs._names:
                    data['o'][out] = outputs._names[out]
        else:
            data['o'] = outputs

        if self.recording_options['record_solver_residuals']:

            if isinstance(self, NonlinearSolver):
                residuals = self._system._residuals
            else:  # it's a LinearSolver
                residuals = self._system._vectors['residual']['linear']

            data['r'] = {}
            if 'res' in self._filtered_vars_to_record:
                for res in self._filtered_vars_to_record['res']:
                    if res in residuals._names:
                        data['r'][res] = residuals._names[res]
            else:
                data['r'] = residuals
        else:
            data['r'] = None

        self._rec_mgr.record_iteration(self, data, metadata)
示例#34
0
    def __init__(self):
        """
        Initialize the driver.
        """
        self._rec_mgr = RecordingManager()
        self._vars_to_record = {
            'desvarnames': set(),
            'responsenames': set(),
            'objectivenames': set(),
            'constraintnames': set(),
            'sysinclnames': set(),
        }

        self._problem = None
        self._designvars = None
        self._cons = None
        self._objs = None
        self._responses = None
        self.options = OptionsDictionary()
        self.recording_options = OptionsDictionary()

        ###########################
        self.recording_options.declare('record_metadata', types=bool, desc='Record metadata',
                                       default=True)
        self.recording_options.declare('record_desvars', types=bool, default=True,
                                       desc='Set to True to record design variables at the \
                                       driver level')
        self.recording_options.declare('record_responses', types=bool, default=False,
                                       desc='Set to True to record responses at the driver level')
        self.recording_options.declare('record_objectives', types=bool, default=True,
                                       desc='Set to True to record objectives at the \
                                       driver level')
        self.recording_options.declare('record_constraints', types=bool, default=True,
                                       desc='Set to True to record constraints at the \
                                       driver level')
        self.recording_options.declare('includes', types=list, default=[],
                                       desc='Patterns for variables to include in recording')
        self.recording_options.declare('excludes', types=list, default=[],
                                       desc='Patterns for vars to exclude in recording '
                                       '(processed post-includes)')
        self.recording_options.declare('record_derivatives', types=bool, default=False,
                                       desc='Set to True to record derivatives at the driver \
                                       level')
        ###########################

        # What the driver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('inequality_constraints', types=bool, default=False)
        self.supports.declare('equality_constraints', types=bool, default=False)
        self.supports.declare('linear_constraints', types=bool, default=False)
        self.supports.declare('two_sided_constraints', types=bool, default=False)
        self.supports.declare('multiple_objectives', types=bool, default=False)
        self.supports.declare('integer_design_vars', types=bool, default=False)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('active_set', types=bool, default=False)
        self.supports.declare('simultaneous_derivatives', types=bool, default=False)

        self.iter_count = 0
        self.options = None
        self._model_viewer_data = None
        self.cite = ""

        # TODO, support these in OpenMDAO
        self.supports.declare('integer_design_vars', types=bool, default=False)

        self._simul_coloring_info = None
        self._res_jacs = {}

        self.fail = False
示例#35
0
class Solver(object):
    """
    Base solver class.

    This class is subclassed by NonlinearSolver and LinearSolver,
    which are in turn subclassed by actual solver implementations.

    Attributes
    ----------
    _system : <System>
        Pointer to the owning system.
    _depth : int
        How many subsolvers deep this solver is (0 means not a subsolver).
    _vec_names : [str, ...]
        List of right-hand-side (RHS) vector names.
    _mode : str
        'fwd' or 'rev', applicable to linear solvers only.
    _iter_count : int
        Number of iterations for the current invocation of the solver.
    _rec_mgr : <RecordingManager>
        object that manages all recorders added to this solver
    cite : str
        Listing of relevant citations that should be referenced when
        publishing work that uses this class.
    options : <OptionsDictionary>
        Options dictionary.
    recording_options : <OptionsDictionary>
        Recording options dictionary.
    supports : <OptionsDictionary>
        Options dictionary describing what features are supported by this
        solver.
    _filtered_vars_to_record: Dict
        Dict of list of var names to record
    _norm0: float
        Normalization factor
    _solver_info : SolverInfo
        A stack-like object shared by all Solvers in the model.
    """

    # Object to store some formatting for iprint that is shared across all solvers.
    SOLVER = 'base_solver'

    def __init__(self, **kwargs):
        """
        Initialize all attributes.

        Parameters
        ----------
        **kwargs : dict of keyword arguments
            Keyword arguments that will be mapped into the Solver options.
        """
        self._system = None
        self._depth = 0
        self._vec_names = None
        self._mode = 'fwd'
        self._iter_count = 0
        self._solver_info = None

        # Solver options
        self.options = OptionsDictionary()
        self.options.declare('maxiter', types=int, default=10,
                             desc='maximum number of iterations')
        self.options.declare('atol', default=1e-10,
                             desc='absolute error tolerance')
        self.options.declare('rtol', default=1e-10,
                             desc='relative error tolerance')
        self.options.declare('iprint', types=int, default=1,
                             desc='whether to print output')
        self.options.declare('err_on_maxiter', types=bool, default=False,
                             desc="When True, AnalysisError will be raised if we don't converge.")

        # Case recording options
        self.recording_options = OptionsDictionary()
        self.recording_options.declare('record_abs_error', types=bool, default=True,
                                       desc='Set to True to record absolute error at the \
                                       solver level')
        self.recording_options.declare('record_rel_error', types=bool, default=True,
                                       desc='Set to True to record relative error at the \
                                       solver level')
        self.recording_options.declare('record_inputs', types=bool, default=True,
                                       desc='Set to True to record inputs at the solver level')
        self.recording_options.declare('record_outputs', types=bool, default=True,
                                       desc='Set to True to record outputs at the solver level')
        self.recording_options.declare('record_solver_residuals', types=bool, default=False,
                                       desc='Set to True to record residuals at the solver level')
        self.recording_options.declare('record_metadata', types=bool, desc='Record metadata',
                                       default=True)
        self.recording_options.declare('includes', types=list, default=['*'],
                                       desc='Patterns for variables to include in recording')
        self.recording_options.declare('excludes', types=list, default=[],
                                       desc='Patterns for vars to exclude in recording '
                                            '(processed post-includes)')
        # Case recording related
        self._filtered_vars_to_record = {}
        self._norm0 = 0.0

        # What the solver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('implicit_components', types=bool, default=False)

        self._declare_options()
        self.options.update(kwargs)

        self._rec_mgr = RecordingManager()

        self.cite = ""

    def _assembled_jac_solver_iter(self):
        """
        Return an empty generator of lin solvers using assembled jacs.
        """
        for i in ():
            yield

    def add_recorder(self, recorder):
        """
        Add a recorder to the solver's RecordingManager.

        Parameters
        ----------
        recorder : <CaseRecorder>
           A recorder instance to be added to RecManager.
        """
        if MPI:
            raise RuntimeError(
                "Recording of Solvers when running parallel code is not supported yet")
        self._rec_mgr.append(recorder)

    def _declare_options(self):
        """
        Declare options before kwargs are processed in the init method.

        This is optionally implemented by subclasses of Solver.
        """
        pass

    def _setup_solvers(self, system, depth):
        """
        Assign system instance, set depth, and optionally perform setup.

        Parameters
        ----------
        system : <System>
            pointer to the owning system.
        depth : int
            depth of the current system (already incremented).
        """
        self._system = system
        self._depth = depth
        self._solver_info = system._solver_info
        self._recording_iter = system._recording_iter

        if isinstance(self, LinearSolver) and not system._use_derivatives:
            return

        self._rec_mgr.startup(self)
        self._rec_mgr.record_metadata(self)

        myoutputs = myresiduals = myinputs = set()
        incl = self.recording_options['includes']
        excl = self.recording_options['excludes']

        if self.recording_options['record_solver_residuals']:
            if isinstance(self, NonlinearSolver):
                residuals = system._residuals
            else:  # it's a LinearSolver
                residuals = system._vectors['residual']['linear']

            myresiduals = {n for n in residuals._names if check_path(n, incl, excl)}

        if self.recording_options['record_outputs']:
            if isinstance(self, NonlinearSolver):
                outputs = system._outputs
            else:  # it's a LinearSolver
                outputs = system._vectors['output']['linear']

            myoutputs = {n for n in outputs._names if check_path(n, incl, excl)}

        if self.recording_options['record_inputs']:
            if isinstance(self, NonlinearSolver):
                inputs = system._inputs
            else:
                inputs = system._vectors['input']['linear']

            myinputs = {n for n in inputs._names if check_path(n, incl, excl)}

        self._filtered_vars_to_record = {
            'in': myinputs,
            'out': myoutputs,
            'res': myresiduals
        }

    def _set_solver_print(self, level=2, type_='all'):
        """
        Control printing for solvers and subsolvers in the model.

        Parameters
        ----------
        level : int
            iprint level. Set to 2 to print residuals each iteration; set to 1
            to print just the iteration totals; set to 0 to disable all printing
            except for failures, and set to -1 to disable all printing including failures.
        type_ : str
            Type of solver to set: 'LN' for linear, 'NL' for nonlinear, or 'all' for all.
        """
        self.options['iprint'] = level

    def _mpi_print(self, iteration, abs_res, rel_res):
        """
        Print residuals from an iteration.

        Parameters
        ----------
        iteration : int
            iteration counter, 0-based.
        abs_res : float
            current absolute residual norm.
        rel_res : float
            current relative residual norm.
        """
        if (self.options['iprint'] == 2 and self._system.comm.rank == 0):

            prefix = self._solver_info.prefix
            solver_name = self.SOLVER

            if prefix.endswith('precon:'):
                solver_name = solver_name[3:]

            print_str = prefix + solver_name
            print_str += ' %d ; %.9g %.9g' % (iteration, abs_res, rel_res)
            print(print_str)

    def _mpi_print_header(self):
        """
        Print header text before solving.
        """
        pass

    def _solve(self):
        """
        Run the iterative solver.
        """
        maxiter = self.options['maxiter']
        atol = self.options['atol']
        rtol = self.options['rtol']
        iprint = self.options['iprint']

        self._mpi_print_header()

        self._iter_count = 0
        norm0, norm = self._iter_initialize()

        self._norm0 = norm0

        self._mpi_print(self._iter_count, norm, norm / norm0)

        while self._iter_count < maxiter and norm > atol and norm / norm0 > rtol:
            with Recording(type(self).__name__, self._iter_count, self) as rec:
                self._single_iteration()
                self._iter_count += 1
                self._run_apply()
                norm = self._iter_get_norm()
                # With solvers, we want to record the norm AFTER the call, but the call needs to
                # be wrapped in the with for stack purposes, so we locally assign  norm & norm0
                # into the class.
                rec.abs = norm
                rec.rel = norm / norm0

            if norm0 == 0:
                norm0 = 1
            self._mpi_print(self._iter_count, norm, norm / norm0)

        if self._system.comm.rank == 0 or os.environ.get('USE_PROC_FILES'):
            prefix = self._solver_info.prefix + self.SOLVER
            if np.isinf(norm) or np.isnan(norm) or (norm > atol and norm / norm0 > rtol):
                if iprint > -1:
                    msg = ' Failed to Converge in {} iterations'.format(self._iter_count)
                    print(prefix + msg)

                # Raise AnalysisError if requested.
                if self.options['err_on_maxiter']:
                    msg = "Solver '{}' on system '{}' failed to converge."
                    raise AnalysisError(msg.format(self.SOLVER, self._system.pathname))

            elif iprint == 1:
                print(prefix + ' Converged in {} iterations'.format(self._iter_count))
            elif iprint == 2:
                print(prefix + ' Converged')

    def _iter_initialize(self):
        """
        Perform any necessary pre-processing operations.

        Returns
        -------
        float
            initial error.
        float
            error at the first iteration.
        """
        pass

    def _run_apply(self):
        """
        Run the appropriate apply method on the system.
        """
        pass

    def _linearize(self):
        """
        Perform any required linearization operations such as matrix factorization.
        """
        pass

    def _linearize_children(self):
        """
        Return a flag that is True when we need to call linearize on our subsystems' solvers.

        Returns
        -------
        boolean
            Flag for indicating child linerization
        """
        return True

    def __str__(self):
        """
        Return a string representation of the solver.

        Returns
        -------
        str
            String representation of the solver.
        """
        return self.SOLVER

    def record_iteration(self, **kwargs):
        """
        Record an iteration of the current Solver.

        Parameters
        ----------
        **kwargs : dict
            Keyword arguments (used for abs and rel error).
        """
        if not self._rec_mgr._recorders:
            return

        metadata = create_local_meta(self.SOLVER)

        # Get the data
        data = {}

        if self.recording_options['record_abs_error']:
            data['abs'] = kwargs.get('abs')
        else:
            data['abs'] = None

        if self.recording_options['record_rel_error']:
            data['rel'] = kwargs.get('rel')
        else:
            data['rel'] = None

        system = self._system
        if isinstance(self, NonlinearSolver):
            outputs = system._outputs
            inputs = system._inputs
            residuals = system._residuals
        else:  # it's a LinearSolver
            outputs = system._vectors['output']['linear']
            inputs = system._vectors['input']['linear']
            residuals = system._vectors['residual']['linear']

        if self.recording_options['record_outputs']:
            data['o'] = {}
            if 'out' in self._filtered_vars_to_record:
                for out in self._filtered_vars_to_record['out']:
                    if out in outputs._names:
                        data['o'][out] = outputs._views[out]
            else:
                data['o'] = outputs
        else:
            data['o'] = None

        if self.recording_options['record_inputs']:
            data['i'] = {}
            if 'in' in self._filtered_vars_to_record:
                for inp in self._filtered_vars_to_record['in']:
                    if inp in inputs._names:
                        data['i'][inp] = inputs._views[inp]
            else:
                data['i'] = inputs
        else:
            data['i'] = None

        if self.recording_options['record_solver_residuals']:
            data['r'] = {}
            if 'res' in self._filtered_vars_to_record:
                for res in self._filtered_vars_to_record['res']:
                    if res in residuals._names:
                        data['r'][res] = residuals._views[res]
            else:
                data['r'] = residuals
        else:
            data['r'] = None

        self._rec_mgr.record_iteration(self, data, metadata)

    def cleanup(self):
        """
        Clean up resources prior to exit.
        """
        # shut down all recorders
        self._rec_mgr.shutdown()

    def _set_complex_step_mode(self, active):
        """
        Turn on or off complex stepping mode.

        Recurses to turn on or off complex stepping mode in all subsystems and their vectors.

        Parameters
        ----------
        active : bool
            Complex mode flag; set to True prior to commencing complex step.
        """
        pass
示例#36
0
class Driver(object):
    """
    Top-level container for the systems and drivers.

    Attributes
    ----------
    fail : bool
        Reports whether the driver ran successfully.
    iter_count : int
        Keep track of iterations for case recording.
    options : <OptionsDictionary>
        Dictionary with general pyoptsparse options.
    recording_options : <OptionsDictionary>
        Dictionary with driver recording options.
    debug_print : <OptionsDictionary>
        Dictionary with debugging printing options.
    cite : str
        Listing of relevant citataions that should be referenced when
        publishing work that uses this class.
    _problem : <Problem>
        Pointer to the containing problem.
    supports : <OptionsDictionary>
        Provides a consistant way for drivers to declare what features they support.
    _designvars : dict
        Contains all design variable info.
    _cons : dict
        Contains all constraint info.
    _objs : dict
        Contains all objective info.
    _responses : dict
        Contains all response info.
    _rec_mgr : <RecordingManager>
        Object that manages all recorders added to this driver.
    _vars_to_record: dict
        Dict of lists of var names indicating what to record
    _model_viewer_data : dict
        Structure of model, used to make n2 diagram.
    _remote_dvs : dict
        Dict of design variables that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_cons : dict
        Dict of constraints that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_objs : dict
        Dict of objectives that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_responses : dict
        A combined dict containing entries from _remote_cons and _remote_objs.
    _simul_coloring_info : tuple of dicts
        A data structure describing coloring for simultaneous derivs.
    _total_jac_sparsity : dict, str, or None
        Specifies sparsity of sub-jacobians of the total jacobian. Only used by pyOptSparseDriver.
    _res_jacs : dict
        Dict of sparse subjacobians for use with certain optimizers, e.g. pyOptSparseDriver.
    _total_jac : _TotalJacInfo or None
        Cached total jacobian handling object.
    """
    def __init__(self, **kwargs):
        """
        Initialize the driver.

        Parameters
        ----------
        **kwargs : dict of keyword arguments
            Keyword arguments that will be mapped into the Driver options.
        """
        self._rec_mgr = RecordingManager()
        self._vars_to_record = {
            'desvarnames': set(),
            'responsenames': set(),
            'objectivenames': set(),
            'constraintnames': set(),
            'sysinclnames': set(),
        }

        self._problem = None
        self._designvars = None
        self._cons = None
        self._objs = None
        self._responses = None

        # Driver options
        self.options = OptionsDictionary()

        self.options.declare(
            'debug_print',
            types=list,
            is_valid=_is_debug_print_opts_valid,
            desc="List of what type of Driver variables to print at each "
            "iteration. Valid items in list are 'desvars', 'ln_cons', "
            "'nl_cons', 'objs'",
            default=[])

        # Case recording options
        self.recording_options = OptionsDictionary()

        self.recording_options.declare('record_metadata',
                                       types=bool,
                                       default=True,
                                       desc='Record metadata')
        self.recording_options.declare(
            'record_desvars',
            types=bool,
            default=True,
            desc='Set to True to record design variables at the '
            'driver level')
        self.recording_options.declare(
            'record_responses',
            types=bool,
            default=False,
            desc='Set to True to record responses at the driver level')
        self.recording_options.declare(
            'record_objectives',
            types=bool,
            default=True,
            desc='Set to True to record objectives at the driver level')
        self.recording_options.declare(
            'record_constraints',
            types=bool,
            default=True,
            desc='Set to True to record constraints at the '
            'driver level')
        self.recording_options.declare(
            'includes',
            types=list,
            default=['*'],
            desc='Patterns for variables to include in recording')
        self.recording_options.declare(
            'excludes',
            types=list,
            default=[],
            desc='Patterns for vars to exclude in recording '
            '(processed post-includes)')
        self.recording_options.declare(
            'record_derivatives',
            types=bool,
            default=False,
            desc='Set to True to record derivatives at the driver '
            'level')
        self.recording_options.declare(
            'record_inputs',
            types=bool,
            default=True,
            desc='Set to True to record inputs at the driver level')

        # What the driver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('inequality_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('equality_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('linear_constraints', types=bool, default=False)
        self.supports.declare('two_sided_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('multiple_objectives', types=bool, default=False)
        self.supports.declare('integer_design_vars', types=bool, default=False)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('active_set', types=bool, default=False)
        self.supports.declare('simultaneous_derivatives',
                              types=bool,
                              default=False)
        self.supports.declare('total_jac_sparsity', types=bool, default=False)
        # TODO, support these in OpenMDAO
        self.supports.declare('integer_design_vars', types=bool, default=False)

        self.iter_count = 0
        self._model_viewer_data = None
        self.cite = ""

        self._simul_coloring_info = None
        self._total_jac_sparsity = None
        self._res_jacs = {}
        self._total_jac = None

        self.fail = False

        self._declare_options()
        self.options.update(kwargs)

    def add_recorder(self, recorder):
        """
        Add a recorder to the driver.

        Parameters
        ----------
        recorder : BaseRecorder
           A recorder instance.
        """
        self._rec_mgr.append(recorder)

    def cleanup(self):
        """
        Clean up resources prior to exit.
        """
        self._rec_mgr.close()

    def _declare_options(self):
        """
        Declare options before kwargs are processed in the init method.

        This is optionally implemented by subclasses of Driver.
        """
        pass

    def _setup_comm(self, comm):
        """
        Perform any driver-specific setup of communicators for the model.

        Parameters
        ----------
        comm : MPI.Comm or <FakeComm> or None
            The communicator for the Problem.

        Returns
        -------
        MPI.Comm or <FakeComm> or None
            The communicator for the Problem model.
        """
        return comm

    def _setup_driver(self, problem):
        """
        Prepare the driver for execution.

        This is the final thing to run during setup.

        Parameters
        ----------
        problem : <Problem>
            Pointer to the containing problem.
        """
        self._problem = problem
        model = problem.model

        self._total_jac = None

        self._objs = objs = OrderedDict()
        self._cons = cons = OrderedDict()

        self._responses = model.get_responses(recurse=True)
        response_size = 0
        for name, data in iteritems(self._responses):
            if data['type'] == 'con':
                cons[name] = data
            else:
                objs[name] = data
            response_size += data['size']

        # Gather up the information for design vars.
        self._designvars = model.get_design_vars(recurse=True)
        self._has_scaling = (
            np.any([r['scaler'] is not None for r in self._responses.values()])
            or np.any(
                [dv['scaler'] is not None
                 for dv in self._designvars.values()]))

        con_set = set()
        obj_set = set()
        dv_set = set()

        self._remote_dvs = dv_dict = {}
        self._remote_cons = con_dict = {}
        self._remote_objs = obj_dict = {}

        # Now determine if later we'll need to allgather cons, objs, or desvars.
        if model.comm.size > 1 and model._subsystems_allprocs:
            local_out_vars = set(model._outputs._views)
            remote_dvs = set(self._designvars) - local_out_vars
            remote_cons = set(self._cons) - local_out_vars
            remote_objs = set(self._objs) - local_out_vars
            all_remote_vois = model.comm.allgather(
                (remote_dvs, remote_cons, remote_objs))
            for rem_dvs, rem_cons, rem_objs in all_remote_vois:
                con_set.update(rem_cons)
                obj_set.update(rem_objs)
                dv_set.update(rem_dvs)

            # If we have remote VOIs, pick an owning rank for each and use that
            # to bcast to others later
            owning_ranks = model._owning_rank
            sizes = model._var_sizes['nonlinear']['output']
            for i, vname in enumerate(model._var_allprocs_abs_names['output']):
                owner = owning_ranks[vname]
                if vname in dv_set:
                    dv_dict[vname] = (owner, sizes[owner, i])
                if vname in con_set:
                    con_dict[vname] = (owner, sizes[owner, i])
                if vname in obj_set:
                    obj_dict[vname] = (owner, sizes[owner, i])

        self._remote_responses = self._remote_cons.copy()
        self._remote_responses.update(self._remote_objs)

        # set up case recording
        self._setup_recording()

        desvar_size = np.sum(data['size']
                             for data in itervalues(self._designvars))

        # set up simultaneous deriv coloring
        if (coloring_mod._use_sparsity and self._simul_coloring_info
                and self.supports['simultaneous_derivatives']):
            if problem._mode == 'fwd':
                self._setup_simul_coloring(problem._mode)
            else:
                raise RuntimeError(
                    "simultaneous derivs are currently not supported in rev mode."
                )

        # if we're using simultaneous derivatives then our effective design var size is less
        # than the full design var size
        if self._simul_coloring_info:
            col_lists = self._simul_coloring_info[0]
            if col_lists:
                desvar_size = len(col_lists[0])
                desvar_size += len(col_lists) - 1

        if ((problem._mode == 'fwd' and desvar_size > response_size)
                or (problem._mode == 'rev' and response_size > desvar_size)):
            warnings.warn(
                "Inefficient choice of derivative mode.  You chose '%s' for a "
                "problem with %d design variables and %d response variables "
                "(objectives and constraints)." %
                (problem._mode, desvar_size, response_size), RuntimeWarning)

    def _setup_recording(self):
        """
        Set up case recording.
        """
        problem = self._problem
        model = problem.model

        mydesvars = myobjectives = myconstraints = myresponses = set()
        myinputs = set()
        mysystem_outputs = set()

        incl = self.recording_options['includes']
        excl = self.recording_options['excludes']

        rec_desvars = self.recording_options['record_desvars']
        rec_objectives = self.recording_options['record_objectives']
        rec_constraints = self.recording_options['record_constraints']
        rec_responses = self.recording_options['record_responses']
        rec_inputs = self.recording_options['record_inputs']

        all_desvars = {
            n
            for n in self._designvars if check_path(n, incl, excl, True)
        }
        all_objectives = {
            n
            for n in self._objs if check_path(n, incl, excl, True)
        }
        all_constraints = {
            n
            for n in self._cons if check_path(n, incl, excl, True)
        }
        if rec_desvars:
            mydesvars = all_desvars

        if rec_objectives:
            myobjectives = all_objectives

        if rec_constraints:
            myconstraints = all_constraints

        if rec_responses:
            myresponses = {
                n
                for n in self._responses if check_path(n, incl, excl, True)
            }

        # get the includes that were requested for this Driver recording
        if incl:
            # The my* variables are sets

            # First gather all of the desired outputs
            # The following might only be the local vars if MPI
            mysystem_outputs = {
                n
                for n in model._outputs if check_path(n, incl, excl)
            }

            # If MPI, and on rank 0, need to gather up all the variables
            #    even those not local to rank 0
            if MPI:
                all_vars = model.comm.gather(mysystem_outputs, root=0)
                if MPI.COMM_WORLD.rank == 0:
                    mysystem_outputs = all_vars[-1]
                    for d in all_vars[:-1]:
                        mysystem_outputs.update(d)

            # de-duplicate mysystem_outputs
            mysystem_outputs = mysystem_outputs.difference(
                all_desvars, all_objectives, all_constraints)

        if rec_inputs:
            prob = self._problem
            root = prob.model
            myinputs = {n for n in root._inputs if check_path(n, incl, excl)}

            if MPI:
                all_vars = root.comm.gather(myinputs, root=0)
                if MPI.COMM_WORLD.rank == 0:
                    myinputs = all_vars[-1]
                    for d in all_vars[:-1]:
                        myinputs.update(d)

        if MPI:  # filter based on who owns the variables
            # TODO Eventually, we think we can get rid of this next check. But to be safe,
            #       we are leaving it in there.
            if not model.is_active():
                raise RuntimeError(
                    "RecordingManager.startup should never be called when "
                    "running in parallel on an inactive System")
            rrank = problem.comm.rank
            rowned = model._owning_rank
            mydesvars = [n for n in mydesvars if rrank == rowned[n]]
            myresponses = [n for n in myresponses if rrank == rowned[n]]
            myobjectives = [n for n in myobjectives if rrank == rowned[n]]
            myconstraints = [n for n in myconstraints if rrank == rowned[n]]
            mysystem_outputs = [
                n for n in mysystem_outputs if rrank == rowned[n]
            ]
            myinputs = [n for n in myinputs if rrank == rowned[n]]

        self._filtered_vars_to_record = {
            'des': mydesvars,
            'obj': myobjectives,
            'con': myconstraints,
            'res': myresponses,
            'sys': mysystem_outputs,
            'in': myinputs
        }

        self._rec_mgr.startup(self)
        if self._rec_mgr._recorders:
            from openmdao.devtools.problem_viewer.problem_viewer import _get_viewer_data
            self._model_viewer_data = _get_viewer_data(problem)
        if self.recording_options['record_metadata']:
            self._rec_mgr.record_metadata(self)

    def _get_voi_val(self,
                     name,
                     meta,
                     remote_vois,
                     unscaled=False,
                     ignore_indices=False):
        """
        Get the value of a variable of interest (objective, constraint, or design var).

        This will retrieve the value if the VOI is remote.

        Parameters
        ----------
        name : str
            Name of the variable of interest.
        meta : dict
            Metadata for the variable of interest.
        remote_vois : dict
            Dict containing (owning_rank, size) for all remote vois of a particular
            type (design var, constraint, or objective).
        unscaled : bool
            Set to True if unscaled (physical) design variables are desired.
        ignore_indices : bool
            Set to True if the full array is desired, not just those indicated by indices.

        Returns
        -------
        float or ndarray
            The value of the named variable of interest.
        """
        model = self._problem.model
        comm = model.comm
        vec = model._outputs._views_flat
        indices = meta['indices']

        if name in remote_vois:
            owner, size = remote_vois[name]
            if owner == comm.rank:
                if indices is None or ignore_indices:
                    val = vec[name].copy()
                else:
                    val = vec[name][indices]
            else:
                if indices is not None:
                    size = len(indices)
                val = np.empty(size)
            comm.Bcast(val, root=owner)
        else:
            if indices is None or ignore_indices:
                val = vec[name].copy()
            else:
                val = vec[name][indices]

        if self._has_scaling and not unscaled:
            # Scale design variable values
            adder = meta['adder']
            if adder is not None:
                val += adder

            scaler = meta['scaler']
            if scaler is not None:
                val *= scaler

        return val

    def get_design_var_values(self,
                              filter=None,
                              unscaled=False,
                              ignore_indices=False):
        """
        Return the design variable values.

        This is called to gather the initial design variable state.

        Parameters
        ----------
        filter : list
            List of desvar names used by recorders.
        unscaled : bool
            Set to True if unscaled (physical) design variables are desired.
        ignore_indices : bool
            Set to True if the full array is desired, not just those indicated by indices.

        Returns
        -------
        dict
           Dictionary containing values of each design variable.
        """
        if filter:
            dvs = filter
        else:
            # use all the designvars
            dvs = self._designvars

        return {
            n: self._get_voi_val(n,
                                 self._designvars[n],
                                 self._remote_dvs,
                                 unscaled=unscaled,
                                 ignore_indices=ignore_indices)
            for n in dvs
        }

    def set_design_var(self, name, value):
        """
        Set the value of a design variable.

        Parameters
        ----------
        name : str
            Global pathname of the design variable.
        value : float or ndarray
            Value for the design variable.
        """
        if (name in self._remote_dvs and self._problem.model._owning_rank[name]
                != self._problem.comm.rank):
            return

        meta = self._designvars[name]
        indices = meta['indices']
        if indices is None:
            indices = slice(None)

        desvar = self._problem.model._outputs._views_flat[name]
        desvar[indices] = value

        if self._has_scaling:
            # Scale design variable values
            scaler = meta['scaler']
            if scaler is not None:
                desvar[indices] *= 1.0 / scaler

            adder = meta['adder']
            if adder is not None:
                desvar[indices] -= adder

    def get_response_values(self, filter=None):
        """
        Return response values.

        Parameters
        ----------
        filter : list
            List of response names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each response.
        """
        if filter:
            resps = filter
        else:
            resps = self._responses

        return {
            n: self._get_voi_val(n, self._responses[n], self._remote_objs)
            for n in resps
        }

    def get_objective_values(self, unscaled=False, filter=None):
        """
        Return objective values.

        Parameters
        ----------
        unscaled : bool
            Set to True if unscaled (physical) design variables are desired.
        filter : list
            List of objective names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each objective.
        """
        if filter:
            objs = filter
        else:
            objs = self._objs

        return {
            n: self._get_voi_val(n,
                                 self._objs[n],
                                 self._remote_objs,
                                 unscaled=unscaled)
            for n in objs
        }

    def get_constraint_values(self,
                              ctype='all',
                              lintype='all',
                              unscaled=False,
                              filter=None):
        """
        Return constraint values.

        Parameters
        ----------
        ctype : string
            Default is 'all'. Optionally return just the inequality constraints
            with 'ineq' or the equality constraints with 'eq'.
        lintype : string
            Default is 'all'. Optionally return just the linear constraints
            with 'linear' or the nonlinear constraints with 'nonlinear'.
        unscaled : bool
            Set to True if unscaled (physical) design variables are desired.
        filter : list
            List of constraint names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each constraint.
        """
        if filter is not None:
            cons = filter
        else:
            cons = self._cons

        con_dict = {}
        for name in cons:
            meta = self._cons[name]

            if lintype == 'linear' and not meta['linear']:
                continue

            if lintype == 'nonlinear' and meta['linear']:
                continue

            if ctype == 'eq' and meta['equals'] is None:
                continue

            if ctype == 'ineq' and meta['equals'] is not None:
                continue

            con_dict[name] = self._get_voi_val(name,
                                               meta,
                                               self._remote_cons,
                                               unscaled=unscaled)

        return con_dict

    def _get_ordered_nl_responses(self):
        """
        Return the names of nonlinear responses in the order used by the driver.

        Default order is objectives followed by nonlinear constraints.  This is used for
        simultaneous derivative coloring and sparsity determination.

        Returns
        -------
        list of str
            The nonlinear response names in order.
        """
        order = list(self._objs)
        order.extend(n for n, meta in iteritems(self._cons)
                     if not ('linear' in meta and meta['linear']))
        return order

    def run(self):
        """
        Execute this driver.

        The base `Driver` just runs the model. All other drivers overload
        this method.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            failure_flag, _, _ = self._problem.model._solve_nonlinear()

        self.iter_count += 1
        return failure_flag

    def _compute_totals(self,
                        of=None,
                        wrt=None,
                        return_format='flat_dict',
                        global_names=True):
        """
        Compute derivatives of desired quantities with respect to desired inputs.

        All derivatives are returned using driver scaling.

        Parameters
        ----------
        of : list of variable name strings or None
            Variables whose derivatives will be computed. Default is None, which
            uses the driver's objectives and constraints.
        wrt : list of variable name strings or None
            Variables with respect to which the derivatives will be computed.
            Default is None, which uses the driver's desvars.
        return_format : string
            Format to return the derivatives. Default is a 'flat_dict', which
            returns them in a dictionary whose keys are tuples of form (of, wrt). For
            the scipy optimizer, 'array' is also supported.
        global_names : bool
            Set to True when passing in global names to skip some translation steps.

        Returns
        -------
        derivs : object
            Derivatives in form requested by 'return_format'.
        """
        total_jac = self._total_jac
        debug_print = 'totals' in self.options['debug_print'] and (
            not MPI or MPI.COMM_WORLD.rank == 0)

        if debug_print:
            header = 'Driver total derivatives for iteration: ' + str(
                self.iter_count)
            print(header)
            print(len(header) * '-' + '\n')

        if self._problem.model._owns_approx_jac:
            if total_jac is None:
                self._total_jac = total_jac = _TotalJacInfo(
                    self._problem,
                    of,
                    wrt,
                    global_names,
                    return_format,
                    approx=True,
                    debug_print=debug_print)
            return total_jac.compute_totals_approx()
        else:
            if total_jac is None:
                total_jac = _TotalJacInfo(self._problem,
                                          of,
                                          wrt,
                                          global_names,
                                          return_format,
                                          debug_print=debug_print)

            # don't cache linear constraint jacobian
            if not total_jac.has_lin_cons:
                self._total_jac = total_jac

            return total_jac.compute_totals()

    def record_iteration(self):
        """
        Record an iteration of the current Driver.
        """
        if not self._rec_mgr._recorders:
            return

        # Get the data to record (collective calls that get across all ranks)
        opts = self.recording_options
        filt = self._filtered_vars_to_record

        if opts['record_desvars']:
            des_vars = self.get_design_var_values()
        else:
            des_vars = {}

        if opts['record_objectives']:
            obj_vars = self.get_objective_values()
        else:
            obj_vars = {}

        if opts['record_constraints']:
            con_vars = self.get_constraint_values()
        else:
            con_vars = {}

        if opts['record_responses']:
            # res_vars = self.get_response_values()  # not really working yet
            res_vars = {}
        else:
            res_vars = {}

        des_vars = {name: des_vars[name] for name in filt['des']}
        obj_vars = {name: obj_vars[name] for name in filt['obj']}
        con_vars = {name: con_vars[name] for name in filt['con']}
        # res_vars = {name: res_vars[name] for name in filt['res']}

        model = self._problem.model

        sys_vars = {}
        in_vars = {}
        outputs = model._outputs
        inputs = model._inputs
        views = outputs._views
        views_in = inputs._views
        sys_vars = {
            name: views[name]
            for name in outputs._names if name in filt['sys']
        }
        if self.recording_options['record_inputs']:
            in_vars = {
                name: views_in[name]
                for name in inputs._names if name in filt['in']
            }

        if MPI:
            des_vars = self._gather_vars(model, des_vars)
            res_vars = self._gather_vars(model, res_vars)
            obj_vars = self._gather_vars(model, obj_vars)
            con_vars = self._gather_vars(model, con_vars)
            sys_vars = self._gather_vars(model, sys_vars)
            in_vars = self._gather_vars(model, in_vars)

        outs = {}
        if not MPI or model.comm.rank == 0:
            outs.update(des_vars)
            outs.update(res_vars)
            outs.update(obj_vars)
            outs.update(con_vars)
            outs.update(sys_vars)

        data = {'out': outs, 'in': in_vars}

        metadata = create_local_meta(self._get_name())

        self._rec_mgr.record_iteration(self, data, metadata)

    def _gather_vars(self, root, local_vars):
        """
        Gather and return only variables listed in `local_vars` from the `root` System.

        Parameters
        ----------
        root : <System>
            the root System for the Problem
        local_vars : dict
            local variable names and values

        Returns
        -------
        dct : dict
            variable names and values.
        """
        # if trace:
        #     debug("gathering vars for recording in %s" % root.pathname)
        all_vars = root.comm.gather(local_vars, root=0)
        # if trace:
        #     debug("DONE gathering rec vars for %s" % root.pathname)

        if root.comm.rank == 0:
            dct = all_vars[-1]
            for d in all_vars[:-1]:
                dct.update(d)
            return dct

    def _get_name(self):
        """
        Get name of current Driver.

        Returns
        -------
        str
            Name of current Driver.
        """
        return "Driver"

    def set_simul_deriv_color(self, simul_info):
        """
        Set the coloring (and possibly the sub-jac sparsity) for simultaneous total derivatives.

        Parameters
        ----------
        simul_info : str or tuple

            ::

                # Information about simultaneous coloring for design vars and responses.  If a
                # string, then simul_info is assumed to be the name of a file that contains the
                # coloring information in JSON format.  If a tuple, the structure looks like this:

                (
                    # First, a list of column index lists, each index list representing columns
                    # having the same color, except for the very first index list, which contains
                    # indices of all columns that are not colored.
                    [
                        [i1, i2, i3, ...]    # list of non-colored columns
                        [ia, ib, ...]    # list of columns in first color
                        [ic, id, ...]    # list of columns in second color
                           ...           # remaining color lists, one list of columns per color
                    ],

                    # Next is a list of lists, one for each column, containing the nonzero rows for
                    # that column.  If a column is not colored, then it will have a None entry
                    # instead of a list.
                    [
                        [r1, rn, ...]   # list of nonzero rows for column 0
                        None,           # column 1 is not colored
                        [ra, rb, ...]   # list of nonzero rows for column 2
                            ...
                    ],

                    # The last tuple entry can be None, indicating that no sparsity structure is
                    # specified, or it can be a nested dictionary where the outer keys are response
                    # names, the inner keys are design variable names, and the value is a tuple of
                    # the form (row_list, col_list, shape).
                    {
                        resp1_name: {
                            dv1_name: (rows, cols, shape),  # for sub-jac d_resp1/d_dv1
                            dv2_name: (rows, cols, shape),
                              ...
                        },
                        resp2_name: {
                            ...
                        }
                        ...
                    }
                )

        """
        if self.supports['simultaneous_derivatives']:
            self._simul_coloring_info = simul_info
        else:
            raise RuntimeError(
                "Driver '%s' does not support simultaneous derivatives." %
                self._get_name())

    def set_total_jac_sparsity(self, sparsity):
        """
        Set the sparsity of sub-jacobians of the total jacobian.

        Note: This currently will have no effect if you are not using the pyOptSparseDriver.

        Parameters
        ----------
        sparsity : str or dict

            ::

                # Sparsity is a nested dictionary where the outer keys are response
                # names, the inner keys are design variable names, and the value is a tuple of
                # the form (row_list, col_list, shape).
                {
                    resp1: {
                        dv1: (rows, cols, shape),  # for sub-jac d_resp1/d_dv1
                        dv2: (rows, cols, shape),
                          ...
                    },
                    resp2: {
                        ...
                    }
                    ...
                }
        """
        if self.supports['total_jac_sparsity']:
            self._total_jac_sparsity = sparsity
        else:
            raise RuntimeError(
                "Driver '%s' does not support setting of total jacobian sparsity."
                % self._get_name())

    def _setup_simul_coloring(self, mode='fwd'):
        """
        Set up metadata for simultaneous derivative solution.

        Parameters
        ----------
        mode : str
            Derivative direction, either 'fwd' or 'rev'.
        """
        if mode == 'rev':
            raise NotImplementedError(
                "Simultaneous derivatives are currently not supported "
                "in 'rev' mode")

        # command line simul_coloring uses this env var to turn pre-existing coloring off
        if not coloring_mod._use_sparsity:
            return

        if isinstance(self._simul_coloring_info, string_types):
            with open(self._simul_coloring_info, 'r') as f:
                self._simul_coloring_info = json.load(f)

        tup = self._simul_coloring_info
        column_lists, row_map = tup[:2]
        if len(tup) > 2:
            sparsity = tup[2]
            if self._total_jac_sparsity is not None:
                raise RuntimeError(
                    "Total jac sparsity was set in both _simul_coloring_info"
                    " and _total_jac_sparsity.")
            self._total_jac_sparsity = sparsity

        self._simul_coloring_info = column_lists, row_map

    def _pre_run_model_debug_print(self):
        """
        Optionally print some debugging information before the model runs.
        """
        debug_opt = self.options['debug_print']
        if not debug_opt or debug_opt == ['totals']:
            return

        if not MPI or MPI.COMM_WORLD.rank == 0:
            header = 'Driver debug print for iter coord: {}'.format(
                get_formatted_iteration_coordinate())
            print(header)
            print(len(header) * '-')

        if 'desvars' in debug_opt:
            desvar_vals = self.get_design_var_values(unscaled=True,
                                                     ignore_indices=True)
            if not MPI or MPI.COMM_WORLD.rank == 0:
                print("Design Vars")
                if desvar_vals:
                    pprint.pprint(desvar_vals)
                else:
                    print("None")
                print()

        sys.stdout.flush()

    def _post_run_model_debug_print(self):
        """
        Optionally print some debugging information after the model runs.
        """
        if 'nl_cons' in self.options['debug_print']:
            cons = self.get_constraint_values(lintype='nonlinear',
                                              unscaled=True)
            if not MPI or MPI.COMM_WORLD.rank == 0:
                print("Nonlinear constraints")
                if cons:
                    pprint.pprint(cons)
                else:
                    print("None")
                print()

        if 'ln_cons' in self.options['debug_print']:
            cons = self.get_constraint_values(lintype='linear', unscaled=True)
            if not MPI or MPI.COMM_WORLD.rank == 0:
                print("Linear constraints")
                if cons:
                    pprint.pprint(cons)
                else:
                    print("None")
                print()

        if 'objs' in self.options['debug_print']:
            objs = self.get_objective_values(unscaled=True)
            if not MPI or MPI.COMM_WORLD.rank == 0:
                print("Objectives")
                if objs:
                    pprint.pprint(objs)
                else:
                    print("None")
                print()

        sys.stdout.flush()
示例#37
0
class ExperimentalDriver(object):
    """
    A fake driver class used for doc generation testing.

    Attributes
    ----------
    fail : bool
        Reports whether the driver ran successfully.
    iter_count : int
        Keep track of iterations for case recording.
    options : list
        List of options
    options : <OptionsDictionary>
        Dictionary with general pyoptsparse options.
    recording_options : <OptionsDictionary>
        Dictionary with driver recording options.
    cite : str
        Listing of relevant citations that should be referenced when
        publishing work that uses this class.
    _problem : <Problem>
        Pointer to the containing problem.
    supports : <OptionsDictionary>
        Provides a consistant way for drivers to declare what features they support.
    _designvars : dict
        Contains all design variable info.
    _cons : dict
        Contains all constraint info.
    _objs : dict
        Contains all objective info.
    _responses : dict
        Contains all response info.
    _rec_mgr : <RecordingManager>
        Object that manages all recorders added to this driver.
    _vars_to_record: dict
        Dict of lists of var names indicating what to record
    _model_viewer_data : dict
        Structure of model, used to make n2 diagram.
    _remote_dvs : dict
        Dict of design variables that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_cons : dict
        Dict of constraints that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_objs : dict
        Dict of objectives that are remote on at least one proc. Values are
        (owning rank, size).
    _remote_responses : dict
        A combined dict containing entries from _remote_cons and _remote_objs.
    _total_coloring : tuple of dicts
        A data structure describing coloring for simultaneous derivs.
    _res_jacs : dict
        Dict of sparse subjacobians for use with certain optimizers, e.g. pyOptSparseDriver.
    """
    def __init__(self):
        """
        Initialize the driver.
        """
        self._rec_mgr = RecordingManager()
        self._vars_to_record = {
            'desvarnames': set(),
            'responsenames': set(),
            'objectivenames': set(),
            'constraintnames': set(),
            'sysinclnames': set(),
        }

        self._problem = None
        self._designvars = None
        self._cons = None
        self._objs = None
        self._responses = None
        self.options = OptionsDictionary()
        self.recording_options = OptionsDictionary()

        ###########################
        self.recording_options.declare('record_metadata',
                                       types=bool,
                                       desc='Record metadata',
                                       default=True)
        self.recording_options.declare(
            'record_desvars',
            types=bool,
            default=True,
            desc='Set to True to record design variables at the \
                                       driver level')
        self.recording_options.declare(
            'record_responses',
            types=bool,
            default=False,
            desc='Set to True to record responses at the driver level')
        self.recording_options.declare(
            'record_objectives',
            types=bool,
            default=True,
            desc='Set to True to record objectives at the \
                                       driver level')
        self.recording_options.declare(
            'record_constraints',
            types=bool,
            default=True,
            desc='Set to True to record constraints at the \
                                       driver level')
        self.recording_options.declare(
            'includes',
            types=list,
            default=[],
            desc='Patterns for variables to include in recording')
        self.recording_options.declare(
            'excludes',
            types=list,
            default=[],
            desc='Patterns for vars to exclude in recording '
            '(processed post-includes)')
        self.recording_options.declare(
            'record_derivatives',
            types=bool,
            default=False,
            desc='Set to True to record derivatives at the driver \
                                       level')
        ###########################

        # What the driver supports.
        self.supports = OptionsDictionary()
        self.supports.declare('inequality_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('equality_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('linear_constraints', types=bool, default=False)
        self.supports.declare('two_sided_constraints',
                              types=bool,
                              default=False)
        self.supports.declare('multiple_objectives', types=bool, default=False)
        self.supports.declare('integer_design_vars', types=bool, default=False)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('active_set', types=bool, default=False)
        self.supports.declare('simultaneous_derivatives',
                              types=bool,
                              default=False)

        self.iter_count = 0
        self.options = None
        self._model_viewer_data = None
        self.cite = ""

        # TODO, support these in OpenMDAO
        self.supports.declare('integer_design_vars', types=bool, default=False)

        self._res_jacs = {}

        self.fail = False

    def add_recorder(self, recorder):
        """
        Add a recorder to the driver.

        Parameters
        ----------
        recorder : CaseRecorder
           A recorder instance.
        """
        self._rec_mgr.append(recorder)

    def cleanup(self):
        """
        Clean up resources prior to exit.
        """
        self._rec_mgr.close()

    def _setup_driver(self, problem):
        """
        Prepare the driver for execution.

        This is the final thing to run during setup.

        Parameters
        ----------
        problem : <Problem>
            Pointer to the containing problem.
        """
        self._problem = problem
        model = problem.model

        self._objs = objs = OrderedDict()
        self._cons = cons = OrderedDict()
        self._responses = model.get_responses(recurse=True)
        response_size = 0
        for name, data in iteritems(self._responses):
            if data['type'] == 'con':
                cons[name] = data
            else:
                objs[name] = data
            response_size += data['size']

        # Gather up the information for design vars.
        self._designvars = model.get_design_vars(recurse=True)
        desvar_size = np.sum(data['size']
                             for data in itervalues(self._designvars))

        if ((problem._mode == 'fwd' and desvar_size > response_size)
                or (problem._mode == 'rev' and response_size > desvar_size)):
            warnings.warn(
                "Inefficient choice of derivative mode.  You chose '%s' for a "
                "problem with %d design variables and %d response variables "
                "(objectives and constraints)." %
                (problem._mode, desvar_size, response_size), RuntimeWarning)

        self._has_scaling = (
            np.any([r['scaler'] is not None for r in self._responses.values()])
            or np.any(
                [dv['scaler'] is not None
                 for dv in self._designvars.values()]))

        con_set = set()
        obj_set = set()
        dv_set = set()

        self._remote_dvs = dv_dict = {}
        self._remote_cons = con_dict = {}
        self._remote_objs = obj_dict = {}

        # Now determine if later we'll need to allgather cons, objs, or desvars.
        if model.comm.size > 1 and model._subsystems_allprocs:
            local_out_vars = set(model._outputs._views)
            remote_dvs = set(self._designvars) - local_out_vars
            remote_cons = set(self._cons) - local_out_vars
            remote_objs = set(self._objs) - local_out_vars
            all_remote_vois = model.comm.allgather(
                (remote_dvs, remote_cons, remote_objs))
            for rem_dvs, rem_cons, rem_objs in all_remote_vois:
                con_set.update(rem_cons)
                obj_set.update(rem_objs)
                dv_set.update(rem_dvs)

            # If we have remote VOIs, pick an owning rank for each and use that
            # to bcast to others later
            owning_ranks = model._owning_rank['output']
            sizes = model._var_sizes['nonlinear']['output']
            for i, vname in enumerate(model._var_allprocs_abs_names['output']):
                owner = owning_ranks[vname]
                if vname in dv_set:
                    dv_dict[vname] = (owner, sizes[owner, i])
                if vname in con_set:
                    con_dict[vname] = (owner, sizes[owner, i])
                if vname in obj_set:
                    obj_dict[vname] = (owner, sizes[owner, i])

        self._remote_responses = self._remote_cons.copy()
        self._remote_responses.update(self._remote_objs)

        # Case recording setup
        mydesvars = myobjectives = myconstraints = myresponses = set()
        mysystem_outputs = set()
        incl = self.recording_options['includes']
        excl = self.recording_options['excludes']
        rec_desvars = self.recording_options['record_desvars']
        rec_objectives = self.recording_options['record_objectives']
        rec_constraints = self.recording_options['record_constraints']
        rec_responses = self.recording_options['record_responses']

        # includes and excludes for outputs are specified using promoted names
        # NOTE: only local var names are in abs2prom, all will be gathered later
        abs2prom = model._var_abs2prom['output']

        all_desvars = {
            n
            for n in self._designvars
            if n in abs2prom and check_path(abs2prom[n], incl, excl, True)
        }
        all_objectives = {
            n
            for n in self._objs
            if n in abs2prom and check_path(abs2prom[n], incl, excl, True)
        }
        all_constraints = {
            n
            for n in self._cons
            if n in abs2prom and check_path(abs2prom[n], incl, excl, True)
        }
        if rec_desvars:
            mydesvars = all_desvars

        if rec_objectives:
            myobjectives = all_objectives

        if rec_constraints:
            myconstraints = all_constraints

        if rec_responses:
            myresponses = {
                n
                for n in self._responses
                if n in abs2prom and check_path(abs2prom[n], incl, excl, True)
            }

        # get the includes that were requested for this Driver recording
        if incl:
            prob = self._problem
            root = prob.model
            # The my* variables are sets

            # First gather all of the desired outputs
            # The following might only be the local vars if MPI
            mysystem_outputs = {
                n
                for n in root._outputs
                if n in abs2prom and check_path(abs2prom[n], incl, excl)
            }

            # If MPI, and on rank 0, need to gather up all the variables
            #    even those not local to rank 0
            if MPI:
                all_vars = root.comm.gather(mysystem_outputs, root=0)
                if MPI.COMM_WORLD.rank == 0:
                    mysystem_outputs = all_vars[-1]
                    for d in all_vars[:-1]:
                        mysystem_outputs.update(d)

            # de-duplicate mysystem_outputs
            mysystem_outputs = mysystem_outputs.difference(
                all_desvars, all_objectives, all_constraints)

        if MPI:  # filter based on who owns the variables
            # TODO Eventually, we think we can get rid of this next check. But to be safe,
            #       we are leaving it in there.
            if not model.is_active():
                raise RuntimeError(
                    "RecordingManager.startup should never be called when "
                    "running in parallel on an inactive System")
            rrank = self._problem.comm.rank  # root ( aka model ) rank.
            rowned = model._owning_rank['output']
            mydesvars = [n for n in mydesvars if rrank == rowned[n]]
            myresponses = [n for n in myresponses if rrank == rowned[n]]
            myobjectives = [n for n in myobjectives if rrank == rowned[n]]
            myconstraints = [n for n in myconstraints if rrank == rowned[n]]
            mysystem_outputs = [
                n for n in mysystem_outputs if rrank == rowned[n]
            ]

        self._filtered_vars_to_record = {
            'des': mydesvars,
            'obj': myobjectives,
            'con': myconstraints,
            'res': myresponses,
            'sys': mysystem_outputs,
        }

        self._rec_mgr.startup(self)

    def _get_voi_val(self, name, meta, remote_vois):
        """
        Get the value of a variable of interest (objective, constraint, or design var).

        This will retrieve the value if the VOI is remote.

        Parameters
        ----------
        name : str
            Name of the variable of interest.
        meta : dict
            Metadata for the variable of interest.
        remote_vois : dict
            Dict containing (owning_rank, size) for all remote vois of a particular
            type (design var, constraint, or objective).

        Returns
        -------
        float or ndarray
            The value of the named variable of interest.
        """
        model = self._problem.model
        comm = model.comm
        vec = model._outputs._views_flat
        indices = meta['indices']

        if name in remote_vois:
            owner, size = remote_vois[name]
            if owner == comm.rank:
                if indices is None:
                    val = vec[name].copy()
                else:
                    val = vec[name][indices]
            else:
                if indices is not None:
                    size = len(indices)
                val = np.empty(size)
            comm.Bcast(val, root=owner)
        else:
            if indices is None:
                val = vec[name].copy()
            else:
                val = vec[name][indices]

        if self._has_scaling:
            # Scale design variable values
            adder = meta['adder']
            if adder is not None:
                val += adder

            scaler = meta['scaler']
            if scaler is not None:
                val *= scaler

        return val

    def get_design_var_values(self, filter=None):
        """
        Return the design variable values.

        This is called to gather the initial design variable state.

        Parameters
        ----------
        filter : list
            List of desvar names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each design variable.
        """
        if filter:
            dvs = filter
        else:
            # use all the designvars
            dvs = self._designvars

        return {
            n: self._get_voi_val(n, self._designvars[n], self._remote_dvs)
            for n in dvs
        }

    def set_design_var(self, name, value):
        """
        Set the value of a design variable.

        Parameters
        ----------
        name : str
            Global pathname of the design variable.
        value : float or ndarray
            Value for the design variable.
        """
        if (name in self._remote_dvs
                and self._problem.model._owning_rank['output'][name] !=
                self._problem.comm.rank):
            return

        meta = self._designvars[name]
        indices = meta['indices']
        if indices is None:
            indices = slice(None)

        desvar = self._problem.model._outputs._views_flat[name]
        desvar[indices] = value

        if self._has_scaling:
            # Scale design variable values
            scaler = meta['scaler']
            if scaler is not None:
                desvar[indices] *= 1.0 / scaler

            adder = meta['adder']
            if adder is not None:
                desvar[indices] -= adder

    def get_response_values(self, filter=None):
        """
        Return response values.

        Parameters
        ----------
        filter : list
            List of response names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each response.
        """
        if filter:
            resps = filter
        else:
            resps = self._responses

        return {
            n: self._get_voi_val(n, self._responses[n], self._remote_objs)
            for n in resps
        }

    def get_objective_values(self, filter=None):
        """
        Return objective values.

        Parameters
        ----------
        filter : list
            List of objective names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each objective.
        """
        if filter:
            objs = filter
        else:
            objs = self._objs

        return {
            n: self._get_voi_val(n, self._objs[n], self._remote_objs)
            for n in objs
        }

    def get_constraint_values(self, ctype='all', lintype='all', filter=None):
        """
        Return constraint values.

        Parameters
        ----------
        ctype : string
            Default is 'all'. Optionally return just the inequality constraints
            with 'ineq' or the equality constraints with 'eq'.
        lintype : string
            Default is 'all'. Optionally return just the linear constraints
            with 'linear' or the nonlinear constraints with 'nonlinear'.
        filter : list
            List of constraint names used by recorders.

        Returns
        -------
        dict
           Dictionary containing values of each constraint.
        """
        if filter is not None:
            cons = filter
        else:
            cons = self._cons

        con_dict = {}
        for name in cons:
            meta = self._cons[name]

            if lintype == 'linear' and not meta['linear']:
                continue

            if lintype == 'nonlinear' and meta['linear']:
                continue

            if ctype == 'eq' and meta['equals'] is None:
                continue

            if ctype == 'ineq' and meta['equals'] is not None:
                continue

            con_dict[name] = self._get_voi_val(name, meta, self._remote_cons)

        return con_dict

    def run(self):
        """
        Execute this driver.

        The base `Driver` just runs the model. All other drivers overload
        this method.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        with Recording(self._get_name(), self.iter_count, self) as rec:
            self._problem.model.run_solve_nonlinear()

        self.iter_count += 1
        return False

    def _dict2array_jac(self, derivs):
        osize = 0
        isize = 0
        do_wrt = True
        islices = {}
        oslices = {}
        for okey, oval in iteritems(derivs):
            if do_wrt:
                for ikey, val in iteritems(oval):
                    istart = isize
                    isize += val.shape[1]
                    islices[ikey] = slice(istart, isize)
                do_wrt = False
            ostart = osize
            osize += oval[ikey].shape[0]
            oslices[okey] = slice(ostart, osize)

        new_derivs = np.zeros((osize, isize))

        relevant = self._problem.model._relevant

        for okey, odict in iteritems(derivs):
            for ikey, val in iteritems(odict):
                if okey in relevant[ikey] or ikey in relevant[okey]:
                    new_derivs[oslices[okey], islices[ikey]] = val

        return new_derivs

    def _compute_totals(self,
                        of=None,
                        wrt=None,
                        return_format='flat_dict',
                        global_names=True):
        """
        Compute derivatives of desired quantities with respect to desired inputs.

        All derivatives are returned using driver scaling.

        Parameters
        ----------
        of : list of variable name strings or None
            Variables whose derivatives will be computed. Default is None, which
            uses the driver's objectives and constraints.
        wrt : list of variable name strings or None
            Variables with respect to which the derivatives will be computed.
            Default is None, which uses the driver's desvars.
        return_format : string
            Format to return the derivatives. Default is a 'flat_dict', which
            returns them in a dictionary whose keys are tuples of form (of, wrt). For
            the scipy optimizer, 'array' is also supported.
        global_names : bool
            Set to True when passing in global names to skip some translation steps.

        Returns
        -------
        derivs : object
            Derivatives in form requested by 'return_format'.
        """
        prob = self._problem

        # Compute the derivatives in dict format...
        if prob.model._owns_approx_jac:
            derivs = prob._compute_totals_approx(of=of,
                                                 wrt=wrt,
                                                 return_format='dict',
                                                 global_names=global_names)
        else:
            derivs = prob._compute_totals(of=of,
                                          wrt=wrt,
                                          return_format='dict',
                                          global_names=global_names)

        # ... then convert to whatever the driver needs.
        if return_format in ('dict', 'array'):
            if self._has_scaling:
                for okey, odict in iteritems(derivs):
                    for ikey, val in iteritems(odict):

                        iscaler = self._designvars[ikey]['scaler']
                        oscaler = self._responses[okey]['scaler']

                        # Scale response side
                        if oscaler is not None:
                            val[:] = (oscaler * val.T).T

                        # Scale design var side
                        if iscaler is not None:
                            val *= 1.0 / iscaler
        else:
            raise RuntimeError(
                "Derivative scaling by the driver only supports the 'dict' and "
                "'array' formats at present.")

        if return_format == 'array':
            derivs = self._dict2array_jac(derivs)

        return derivs

    def record_iteration(self):
        """
        Record an iteration of the current Driver.
        """
        if not self._rec_mgr._recorders:
            return

        metadata = create_local_meta(self._get_name())

        # Get the data to record
        data = {}
        if self.recording_options['record_desvars']:
            # collective call that gets across all ranks
            desvars = self.get_design_var_values()
        else:
            desvars = {}

        if self.recording_options['record_responses']:
            # responses = self.get_response_values() # not really working yet
            responses = {}
        else:
            responses = {}

        if self.recording_options['record_objectives']:
            objectives = self.get_objective_values()
        else:
            objectives = {}

        if self.recording_options['record_constraints']:
            constraints = self.get_constraint_values()
        else:
            constraints = {}

        desvars = {
            name: desvars[name]
            for name in self._filtered_vars_to_record['des']
        }
        # responses not working yet
        # responses = {name: responses[name] for name in self._filtered_vars_to_record['res']}
        objectives = {
            name: objectives[name]
            for name in self._filtered_vars_to_record['obj']
        }
        constraints = {
            name: constraints[name]
            for name in self._filtered_vars_to_record['con']
        }

        if self.recording_options['includes']:
            root = self._problem.model
            outputs = root._outputs
            # outputsinputs, outputs, residuals = root.get_nonlinear_vectors()
            sysvars = {}
            for name, value in iteritems(outputs._names):
                if name in self._filtered_vars_to_record['sys']:
                    sysvars[name] = value
        else:
            sysvars = {}

        if MPI:
            root = self._problem.model
            desvars = self._gather_vars(root, desvars)
            responses = self._gather_vars(root, responses)
            objectives = self._gather_vars(root, objectives)
            constraints = self._gather_vars(root, constraints)
            sysvars = self._gather_vars(root, sysvars)

        data['des'] = desvars
        data['res'] = responses
        data['obj'] = objectives
        data['con'] = constraints
        data['sys'] = sysvars

        self._rec_mgr.record_iteration(self, data, metadata)

    def _gather_vars(self, root, local_vars):
        """
        Gather and return only variables listed in `local_vars` from the `root` System.

        Parameters
        ----------
        root : <System>
            the root System for the Problem
        local_vars : dict
            local variable names and values

        Returns
        -------
        dct : dict
            variable names and values.
        """
        # if trace:
        #     debug("gathering vars for recording in %s" % root.pathname)
        all_vars = root.comm.gather(local_vars, root=0)
        # if trace:
        #     debug("DONE gathering rec vars for %s" % root.pathname)

        if root.comm.rank == 0:
            dct = all_vars[-1]
            for d in all_vars[:-1]:
                dct.update(d)
            return dct

    def _get_name(self):
        """
        Get name of current Driver.

        Returns
        -------
        str
            Name of current Driver.
        """
        return "Driver"
示例#38
0
    def __init__(self, **kwargs):
        """
        Initialize all attributes.

        Parameters
        ----------
        **kwargs : dict of keyword arguments
            Keyword arguments that will be mapped into the Solver options.
        """
        self._system = None
        self._depth = 0
        self._vec_names = None
        self._mode = 'fwd'
        self._iter_count = 0
        self._problem_meta = None

        # Solver options
        self.options = OptionsDictionary(parent_name=self.msginfo)
        self.options.declare('maxiter',
                             types=int,
                             default=10,
                             desc='maximum number of iterations')
        self.options.declare('atol',
                             default=1e-10,
                             desc='absolute error tolerance')
        self.options.declare('rtol',
                             default=1e-10,
                             desc='relative error tolerance')
        self.options.declare('iprint',
                             types=int,
                             default=1,
                             desc='whether to print output')
        self.options.declare(
            'err_on_non_converge',
            types=bool,
            default=False,
            desc="When True, AnalysisError will be raised if we don't converge."
        )

        # Case recording options
        self.recording_options = OptionsDictionary(parent_name=self.msginfo)
        self.recording_options.declare(
            'record_abs_error',
            types=bool,
            default=True,
            desc='Set to True to record absolute error at the \
                                       solver level')
        self.recording_options.declare(
            'record_rel_error',
            types=bool,
            default=True,
            desc='Set to True to record relative error at the \
                                       solver level')
        self.recording_options.declare(
            'record_inputs',
            types=bool,
            default=True,
            desc='Set to True to record inputs at the solver level')
        self.recording_options.declare(
            'record_outputs',
            types=bool,
            default=True,
            desc='Set to True to record outputs at the solver level')
        self.recording_options.declare(
            'record_solver_residuals',
            types=bool,
            default=False,
            desc='Set to True to record residuals at the solver level')
        self.recording_options.declare(
            'record_metadata',
            types=bool,
            desc='Deprecated. Recording '
            'of metadata will always be done',
            deprecation="The recording option, record_metadata, on "
            "Solver is "
            "deprecated. Recording of metadata will always be done",
            default=True)
        self.recording_options.declare(
            'includes',
            types=list,
            default=['*'],
            desc="Patterns for variables to include in recording. \
                                       Paths are relative to solver's Group. \
                                       Uses fnmatch wildcards")
        self.recording_options.declare(
            'excludes',
            types=list,
            default=[],
            desc="Patterns for vars to exclude in recording. \
                                       (processed post-includes) \
                                       Paths are relative to solver's Group. \
                                       Uses fnmatch wildcards")
        # Case recording related
        self._filtered_vars_to_record = {}
        self._norm0 = 0.0

        # What the solver supports.
        self.supports = OptionsDictionary(parent_name=self.msginfo)
        self.supports.declare('gradients', types=bool, default=False)
        self.supports.declare('implicit_components', types=bool, default=False)

        self._declare_options()
        self.options.update(kwargs)

        self._rec_mgr = RecordingManager()

        self.cite = ""
示例#39
0
class Driver(object):
    """ Base class for drivers in OpenMDAO. Drivers can only be placed in a
    Problem, and every problem has a Driver. Driver is the simplest driver that
    runs (solves using solve_nonlinear) a problem once.
    """

    def __init__(self):
        super(Driver, self).__init__()
        self.recorders = RecordingManager()

        # What this driver supports
        self.supports = OptionsDictionary(read_only=True)
        self.supports.add_option("inequality_constraints", True)
        self.supports.add_option("equality_constraints", True)
        self.supports.add_option("linear_constraints", True)
        self.supports.add_option("multiple_objectives", True)
        self.supports.add_option("two_sided_constraints", True)
        self.supports.add_option("integer_design_vars", True)

        # This driver's options
        self.options = OptionsDictionary()

        self._desvars = OrderedDict()
        self._objs = OrderedDict()
        self._cons = OrderedDict()

        self._voi_sets = []
        self._vars_to_record = None

        # We take root during setup
        self.root = None

        self.iter_count = 0

    def _setup(self, root):
        """ Updates metadata for params, constraints and objectives, and
        check for errors. Also determines all variables that need to be
        gathered for case recording.
        """
        self.root = root

        desvars = OrderedDict()
        objs = OrderedDict()
        cons = OrderedDict()

        item_tups = [
            ("Parameter", self._desvars, desvars),
            ("Objective", self._objs, objs),
            ("Constraint", self._cons, cons),
        ]

        for item_name, item, newitem in item_tups:
            for name, meta in iteritems(item):
                rootmeta = root.unknowns.metadata(name)

                if MPI and "src_indices" in rootmeta:  # pragma: no cover
                    raise ValueError(
                        "'%s' is a distributed variable and may "
                        "not be used as a design var, objective, "
                        "or constraint." % name
                    )

                # Check validity of variable
                if name not in root.unknowns:
                    msg = "{} '{}' not found in unknowns."
                    msg = msg.format(item_name, name)
                    raise ValueError(msg)

                # Size is useful metadata to save
                if "indices" in meta:
                    meta["size"] = len(meta["indices"])
                else:
                    meta["size"] = rootmeta["size"]

                newitem[name] = meta

        self._desvars = desvars
        self._objs = objs
        self._cons = cons

    def _map_voi_indices(self):
        poi_indices = {}
        qoi_indices = {}
        for name, meta in chain(iteritems(self._cons), iteritems(self._objs)):
            # set indices of interest
            if "indices" in meta:
                qoi_indices[name] = meta["indices"]

        for name, meta in iteritems(self._desvars):
            # set indices of interest
            if "indices" in meta:
                poi_indices[name] = meta["indices"]

        return poi_indices, qoi_indices

    def _of_interest(self, voi_list):
        """Return a list of tuples, with the given voi_list organized
        into tuples based on the previously defined grouping of VOIs.
        """
        vois = []
        remaining = set(voi_list)
        for voi_set in self._voi_sets:
            vois.append([])

        for i, voi_set in enumerate(self._voi_sets):
            for v in voi_list:
                if v in voi_set:
                    vois[i].append(v)
                    remaining.remove(v)

        vois = [tuple(x) for x in vois if x]

        for v in voi_list:
            if v in remaining:
                vois.append((v,))

        return vois

    def desvars_of_interest(self):
        """
        Returns
        -------
        list of tuples of str
            The list of design vars, organized into tuples according to
            previously defined VOI groups.
        """
        return self._of_interest(self._desvars)

    def outputs_of_interest(self):
        """
        Returns
        -------
        list of tuples of str
            The list of constraints and objectives, organized into tuples
            according to previously defined VOI groups.
        """
        return self._of_interest(list(chain(self._objs, self._cons)))

    def parallel_derivs(self, vnames):
        """
        Specifies that the named variables of interest are to be grouped
        together so that their derivatives can be solved for concurrently.

        Args
        ----
        vnames : iter of str
            The names of variables of interest that are to be grouped.
        """
        # make sure all vnames are desvars, constraints, or objectives
        found = set()
        for n in vnames:
            if not (n in self._desvars or n in self._objs or n in self._cons):
                raise RuntimeError("'%s' is not a param, objective, or " "constraint" % n)
        for grp in self._voi_sets:
            for vname in vnames:
                if vname in grp:
                    msg = "'%s' cannot be added to VOI set %s because it " + "already exists in VOI set: %s"
                    raise RuntimeError(msg % (vname, tuple(vnames), grp))

        param_intsect = set(vnames).intersection(self._desvars.keys())

        if param_intsect and len(param_intsect) != len(vnames):
            raise RuntimeError(
                "%s cannot be grouped because %s are design "
                "vars and %s are not." % (vnames, list(param_intsect), list(set(vnames).difference(param_intsect)))
            )

        if MPI:  # pragma: no cover
            self._voi_sets.append(tuple(vnames))
        else:
            warnings.warn("parallel derivs %s specified but not running under MPI")

    def add_recorder(self, recorder):
        """
        Adds a recorder to the driver.

        Args
        ----
        recorder : BaseRecorder
           A recorder instance.
        """
        self.recorders.append(recorder)

    def add_desvar(self, name, low=None, high=None, indices=None, adder=0.0, scaler=1.0):
        """
        Adds a parameter to this driver.

        Args
        ----
        name : string
           Name of the IndepVarComp in the root system.

        low : float or ndarray, optional
            Lower boundary for the param

        high : upper or ndarray, optional
            Lower boundary for the param

        indices : iter of int, optional
            If a param is an array, these indicate which entries are of
            interest for derivatives.

        adder : float or ndarray, optional
            Value to add to the model value to get the scaled value. Adder
            is first in precedence.

        scaler : float or ndarray, optional
            value to multiply the model value to get the scaled value. Scaler
            is second in precedence.
        """

        if low is None:
            low = -1e99
        elif isinstance(low, np.ndarray):
            low = low.flatten()

        if high is None:
            high = 1e99
        elif isinstance(high, np.ndarray):
            high = high.flatten()

        if isinstance(adder, np.ndarray):
            adder = adder.flatten()
        if isinstance(scaler, np.ndarray):
            scaler = scaler.flatten()

        # Scale the low and high values
        low = (low + adder) * scaler
        high = (high + adder) * scaler

        param = {}
        param["low"] = low
        param["high"] = high
        param["adder"] = adder
        param["scaler"] = scaler
        if indices:
            param["indices"] = np.array(indices, dtype=int)

        self._desvars[name] = param

    def add_param(self, name, low=None, high=None, indices=None, adder=0.0, scaler=1.0):
        """
        Deprecated.  Use ``add_desvar`` instead.
        """
        warnings.simplefilter("always", DeprecationWarning)
        warnings.warn("Driver.add_param() is deprecated. Use add_desvar() instead.", DeprecationWarning, stacklevel=2)
        warnings.simplefilter("ignore", DeprecationWarning)

        self.add_desvar(name, low=low, high=high, indices=indices, adder=adder, scaler=scaler)

    def get_desvars(self):
        """ Returns a dict of possibly distributed parameters.

        Returns
        -------
        dict
            Keys are the param object names, and the values are the param
            values.
        """
        uvec = self.root.unknowns
        desvars = OrderedDict()

        for key, meta in iteritems(self._desvars):
            desvars[key] = self._get_distrib_var(key, meta, "design var")

        return desvars

    def _get_distrib_var(self, name, meta, voi_type):
        uvec = self.root.unknowns
        comm = self.root.comm
        nproc = comm.size
        iproc = comm.rank

        if nproc > 1:
            owner = self.root._owning_ranks[name]
            if iproc == owner:
                flatval = uvec.flat[name]
            else:
                flatval = None
        else:
            owner = 0
            flatval = uvec.flat[name]

        if "indices" in meta and not (nproc > 1 and owner != iproc):
            # Make sure our indices are valid
            try:
                flatval = flatval[meta["indices"]]
            except IndexError:
                msg = "Index for {} '{}' is out of bounds. "
                msg += "Requested index: {}, "
                msg += "shape: {}."
                raise IndexError(msg.format(voi_type, name, meta["indices"], uvec.metadata(name)["shape"]))

        if nproc > 1:
            flatval = comm.bcast(flatval, root=owner)

        scaler = meta["scaler"]
        adder = meta["adder"]

        if isinstance(scaler, np.ndarray) or isinstance(adder, np.ndarray) or scaler != 1.0 or adder != 0.0:
            return (flatval + adder) * scaler
        else:
            return flatval

    def get_desvar_metadata(self):
        """ Returns a dict of parameter metadata.

        Returns
        -------
        dict
            Keys are the param object names, and the values are the param
            values.
        """
        return self._desvars

    def set_desvar(self, name, value):
        """ Sets a parameter.

        Args
        ----
        name : string
           Name of the IndepVarComp in the root system.

        val : ndarray or float
            value to set the parameter
        """
        if self.root.unknowns.flat[name].size == 0:
            return

        scaler = self._desvars[name]["scaler"]
        adder = self._desvars[name]["adder"]
        if isinstance(scaler, np.ndarray) or isinstance(adder, np.ndarray) or scaler != 1.0 or adder != 0.0:
            value = value / scaler - adder
        else:
            value = value

        # Only set the indices we requested when we set the parameter.
        idx = self._desvars[name].get("indices")
        if idx is not None:
            self.root.unknowns[name][idx] = value
        else:
            self.root.unknowns[name] = value

    def add_objective(self, name, indices=None, adder=0.0, scaler=1.0):
        """ Adds an objective to this driver.

        Args
        ----
        name : string
            Promoted pathname of the output that will serve as the objective.

        indices : iter of int, optional
            If an objective is an array, these indicate which entries are of
            interest for derivatives.

        adder : float or ndarray, optional
            Value to add to the model value to get the scaled value. Adder
            is first in precedence.

        scaler : float or ndarray, optional
            value to multiply the model value to get the scaled value. Scaler
            is second in precedence.
        """

        if isinstance(adder, np.ndarray):
            adder = adder.flatten()
        if isinstance(scaler, np.ndarray):
            scaler = scaler.flatten()

        obj = {}
        obj["adder"] = adder
        obj["scaler"] = scaler
        if indices:
            obj["indices"] = indices
            if len(indices) > 1 and not self.supports["multiple_objectives"]:
                raise RuntimeError(
                    "Multiple objective indices specified for "
                    "variable '%s', but driver '%s' doesn't "
                    "support multiple objectives." % (name, self.pathname)
                )
        self._objs[name] = obj

    def get_objectives(self, return_type="dict"):
        """ Gets all objectives of this driver.

        Args
        ----
        return_type : string
            Set to 'dict' to return a dictionary, or set to 'array' to return a
            flat ndarray.

        Returns
        -------
        dict (for return_type 'dict')
            Key is the objective name string, value is an ndarray with the values.

        ndarray (for return_type 'array')
            Array containing all objective values in the order they were added.
        """
        uvec = self.root.unknowns
        objs = OrderedDict()

        for key, meta in iteritems(self._objs):
            objs[key] = self._get_distrib_var(key, meta, "objective")

        return objs

    def add_constraint(
        self, name, lower=None, upper=None, equals=None, linear=False, jacs=None, indices=None, adder=0.0, scaler=1.0
    ):
        """ Adds a constraint to this driver. For inequality constraints,
        `lower` or `upper` must be specified. For equality constraints, `equals`
        must be specified.

        Args
        ----
        name : string
            Promoted pathname of the output that will serve as the quantity to
            constrain.

        lower : float or ndarray, optional
             Constrain the quantity to be greater than this value.

        upper : float or ndarray, optional
             Constrain the quantity to be less than this value.

        equals : float or ndarray, optional
             Constrain the quantity to be equal to this value.

        linear : bool, optional
            Set to True if this constraint is linear with respect to all design
            variables so that it can be calculated once and cached.

        jacs : dict of functions, optional
            Dictionary of user-defined functions that return the flattened
            Jacobian of this constraint with repsect to the design vars of
            this driver, as indicated by the dictionary keys. Default is None
            to let OpenMDAO calculate all derivatives. Note, this is currently
            unsupported

        indices : iter of int, optional
            If a constraint is an array, these indicate which entries are of
            interest for derivatives.

        adder : float or ndarray, optional
            Value to add to the model value to get the scaled value. Adder
            is first in precedence.

        scaler : float or ndarray, optional
            value to multiply the model value to get the scaled value. Scaler
            is second in precedence.
        """

        if equals is not None and (lower is not None or upper is not None):
            msg = "Constraint '{}' cannot be both equality and inequality."
            raise RuntimeError(msg.format(name))
        if equals is not None and self.supports["equality_constraints"] is False:
            msg = "Driver does not support equality constraint '{}'."
            raise RuntimeError(msg.format(name))
        if equals is None and self.supports["inequality_constraints"] is False:
            msg = "Driver does not support inequality constraint '{}'."
            raise RuntimeError(msg.format(name))
        if lower is not None and upper is not None and self.supports["two_sided_constraints"] is False:
            msg = "Driver does not support 2-sided constraint '{}'."
            raise RuntimeError(msg.format(name))
        if lower is None and upper is None and equals is None:
            msg = "Constraint '{}' needs to define lower, upper, or equals."
            raise RuntimeError(msg.format(name))

        if isinstance(scaler, np.ndarray):
            scaler = scaler.flatten()
        if isinstance(adder, np.ndarray):
            adder = adder.flatten()
        if isinstance(lower, np.ndarray):
            lower = lower.flatten()
        if isinstance(upper, np.ndarray):
            upper = upper.flatten()
        if isinstance(equals, np.ndarray):
            equals = equals.flatten()

        con = {}
        con["lower"] = lower
        con["upper"] = upper
        con["equals"] = equals
        con["linear"] = linear
        con["adder"] = adder
        con["scaler"] = scaler
        con["jacs"] = jacs

        if indices:
            con["indices"] = indices
        self._cons[name] = con

    def get_constraints(self, ctype="all", lintype="all"):
        """ Gets all constraints for this driver.

        Args
        ----
        ctype : string
            Default is 'all'. Optionally return just the inequality constraints
            with 'ineq' or the equality constraints with 'eq'.

        lintype : string
            Default is 'all'. Optionally return just the linear constraints
            with 'linear' or the nonlinear constraints with 'nonlinear'.

        Returns
        -------
        dict
            Key is the constraint name string, value is an ndarray with the values.
        """
        uvec = self.root.unknowns
        cons = OrderedDict()

        for key, meta in iteritems(self._cons):

            if lintype == "linear" and meta["linear"] == False:
                continue

            if lintype == "nonlinear" and meta["linear"]:
                continue

            if ctype == "eq" and meta["equals"] is None:
                continue

            if ctype == "ineq" and meta["equals"] is not None:
                continue

            scaler = meta["scaler"]
            adder = meta["adder"]

            cons[key] = self._get_distrib_var(key, meta, "constraint")

        return cons

    def get_constraint_metadata(self):
        """ Returns a dict of constraint metadata.

        Returns
        -------
        dict
            Keys are the constraint object names, and the values are the param
            values.
        """
        return self._cons

    def run(self, problem):
        """ Runs the driver. This function should be overriden when inheriting.

        Args
        ----
        problem : `Problem`
            Our parent `Problem`.
        """
        system = problem.root

        # Metadata Setup
        self.iter_count += 1
        metadata = create_local_meta(None, "Driver")
        system.ln_solver.local_meta = metadata
        update_local_meta(metadata, (self.iter_count,))

        # Solve the system once and record results.
        system.solve_nonlinear(metadata=metadata)

        self.recorders.record(system, metadata)

    def generate_docstring(self):
        """
        Generates a numpy-style docstring for a user-created Driver class.

        Returns
        -------
        docstring : str
                string that contains a basic numpy docstring.
        """
        # start the docstring off
        docstring = '    """\n'

        # Put options into docstring
        from openmdao.core.options import OptionsDictionary

        firstTime = 1
        # for py3.4, items from vars must come out in same order.
        v = OrderedDict(sorted(vars(self).items()))
        for key, value in v.items():
            if type(value) == OptionsDictionary:
                if key == "supports":
                    continue
                if firstTime:  # start of Options docstring
                    docstring += "\n    Options\n    -------\n"
                    firstTime = 0
                for (name, val) in sorted(value.items()):
                    docstring += "    " + key + "['"
                    docstring += name + "']"
                    docstring += " :  " + type(val).__name__
                    docstring += "("
                    if type(val).__name__ == "str":
                        docstring += "'"
                    docstring += str(val)
                    if type(val).__name__ == "str":
                        docstring += "'"
                    docstring += ")\n"

                    desc = value._options[name]["desc"]
                    if desc:
                        docstring += "        " + desc + "\n"
        # finish up docstring
        docstring += '\n    """\n'
        return docstring