예제 #1
0
def _timing_cmd(options, user_args):
    """
    Implement the 'openmdao timing' command.

    Parameters
    ----------
    options : argparse Namespace
        Command line options.
    user_args : list of str
        Args to be passed to the user script.
    """
    if not options.funcs:
        options.funcs = _default_timer_methods.copy()

    filename = _to_filename(options.file[0])
    if filename.endswith('.py'):
        hooks._register_hook('setup',
                             'Problem',
                             pre=partial(_set_timer_setup_hook, options))

        # register an atexit function to write out all of the timing data
        atexit.register(partial(_postprocess, options))

        with timing_context(not options.use_context):
            _load_and_exec(options.file[0], user_args)

    else:  # assume file is a pickle file
        if options.use_context:
            issue_warning(
                f"Since given file '{options.file[0]}' is not a python script, the "
                "'--use_context' option is ignored.")
        _show_view(options.file[0], options)
예제 #2
0
    def _setup_simul_coloring(self):
        """
        Set up metadata for coloring of total derivative solution.

        If set_coloring was called with a filename, load the coloring file.
        """
        # command line simul_coloring uses this env var to turn pre-existing coloring off
        if not coloring_mod._use_total_sparsity:
            return

        problem = self._problem()
        if not problem.model._use_derivatives:
            issue_warning("Derivatives are turned off.  Skipping simul deriv coloring.",
                          category=DerivativesWarning)
            return

        total_coloring = self._get_static_coloring()

        if total_coloring._rev and problem._orig_mode not in ('rev', 'auto'):
            revcol = total_coloring._rev[0][0]
            if revcol:
                raise RuntimeError("Simultaneous coloring does reverse solves but mode has "
                                   "been set to '%s'" % problem._orig_mode)
        if total_coloring._fwd and problem._orig_mode not in ('fwd', 'auto'):
            fwdcol = total_coloring._fwd[0][0]
            if fwdcol:
                raise RuntimeError("Simultaneous coloring does forward solves but mode has "
                                   "been set to '%s'" % problem._orig_mode)
예제 #3
0
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
    """
    Raise an exception or issue a warning, depending on the value of _ignore_errors.

    Parameters
    ----------
    msg : str
        The error/warning message.
    exc : Exception class or exception info tuple (exception class, exception instance, traceback)
        This exception class is used to create the exception to be raised, or an exception info
        tuple from a previously raised exception that is to be re-raised, contingent on the value
        of 'err'.
    category : warning class
        This category is the class of warning to be issued.
    err : bool
        If None, use ignore_errors(), otherwise use value of err to determine whether to
        raise an exception (err=True) or issue a warning (err=False).
    """
    if (err is None and ignore_errors()) or err is False:
        issue_warning(msg, category=category)
    else:
        if isinstance(exc, tuple):
            raise exc[0](msg).with_traceback(exc[2])
        else:
            raise exc(msg)
예제 #4
0
def _print_violations(outputs, lower, upper):
    """
    Print out which variables exceed their bounds.

    Parameters
    ----------
    outputs : <Vector>
        Vector containing the outputs.
    lower : <Vector>
        Vector containing the lower bounds.
    upper : <Vector>
        Vector containing the upper bounds.
    """
    start = end = 0
    for name, val in outputs._abs_item_iter():
        end += val.size
        if upper is not None and any(val > upper[start:end]):
            msg = (f"'{name}' exceeds upper bounds\n  Val: {val}\n  Upper: {upper[start:end]}\n")
            issue_warning(msg, category=SolverWarning)

        if lower is not None and any(val < lower[start:end]):
            msg = (f"'{name}' exceeds lower bounds\n  Val: {val}\n  Lower: {lower[start:end]}\n")
            issue_warning(msg, category=SolverWarning)

        start = end
예제 #5
0
    def compute_approx_col_iter(self, system, under_cs=False):
        """
        Execute the system to compute the approximate sub-Jacobians.

        Parameters
        ----------
        system : System
            System on which the execution is run.
        under_cs : bool
            True if we're currently under complex step at a higher level.

        Yields
        ------
        int
            column index
        ndarray
            solution array corresponding to the jacobian column at the given column index
        """
        if not self._wrt_meta:
            return

        if system.under_complex_step:

            # If we are nested under another complex step, then warn and swap to FD.
            if not self._fd:
                from openmdao.approximation_schemes.finite_difference import FiniteDifference

                issue_warning(
                    "Nested complex step detected. Finite difference will be used.",
                    prefix=system.pathname,
                    category=DerivativesWarning)

                fd = self._fd = FiniteDifference()
                empty = {}
                for wrt in self._wrt_meta:
                    fd.add_approximation(wrt, system, empty)

            yield from self._fd.compute_approx_col_iter(system)
            return

        saved_inputs = system._inputs._get_data().copy()
        system._inputs._data.imag[:] = 0.0
        saved_outputs = system._outputs.asarray(copy=True)
        system._outputs._data.imag[:] = 0.0
        saved_resids = system._residuals.asarray(copy=True)
        system._residuals._data.imag[:] = 0.0

        # Turn on complex step.
        system._set_complex_step_mode(True)

        try:
            yield from self._compute_approx_col_iter(system, under_cs=True)
        finally:
            # Turn off complex step.
            system._set_complex_step_mode(False)

        system._inputs.set_val(saved_inputs)
        system._outputs.set_val(saved_outputs)
        system._residuals.set_val(saved_resids)
예제 #6
0
    def __call__(self, idx, src_shape=None, flat_src=False):
        """
        Return an Indexer instance based on the passed indices/slices.

        Parameters
        ----------
        idx : int, ndarray, slice, or tuple
            Some sort of index/indices/slice.
        src_shape : tuple or None
            Source shape if known.
        flat_src : bool
            If True, indices are into a flat source.

        Returns
        -------
        Indexer
            The Indexer instance we created based on the args.
        """
        if idx is ...:
            idxer = EllipsisIndexer((idx, ), flat_src=flat_src)
        elif isinstance(idx, int):
            idxer = IntIndexer(idx, flat_src=flat_src)
        elif isinstance(idx, slice):
            idxer = SliceIndexer(idx, flat_src=flat_src)

        elif isinstance(idx, tuple):
            multi = len(idx) > 1
            for i in idx:
                if i is ...:
                    multi = len(
                        idx
                    ) > 2  # ... doesn't count toward limit of dimensions
                    idxer = EllipsisIndexer(idx, flat_src=flat_src)
                    break
            else:
                idxer = MultiIndexer(idx, flat_src=flat_src)
            if flat_src and multi:
                raise RuntimeError(
                    "Can't use a multdimensional index into a flat source.")
        else:
            arr = np.atleast_1d(idx)
            if arr.ndim == 1:
                idxer = ArrayIndexer(arr, flat_src=flat_src)
            else:
                issue_warning(
                    "Using a non-tuple sequence for multidimensional indexing is "
                    "deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the "
                    "future this will be interpreted as an array index, "
                    "`arr[np.array(seq)]`, which will result either in an error or a "
                    "different result.")
                idxer = MultiIndexer(tuple(idx), flat_src=flat_src)

        if src_shape is not None:
            if flat_src:
                src_shape = (np.product(src_shape, dtype=int), )
            idxer.set_src_shape(src_shape)

        return idxer
예제 #7
0
    def record_metadata_system(self, system, run_number=None):
        """
        Record system metadata.

        Parameters
        ----------
        system : System
            The System for which to record metadata.
        run_number : int or None
            Number indicating which run the metadata is associated with.
            None for the first run, 1 for the second, etc.
        """
        if self._record_metadata and self.metadata_connection:

            scaling_vecs, user_options = self._get_metadata_system(system)

            if scaling_vecs is None:
                return

            scaling_factors = pickle.dumps(scaling_vecs, self._pickle_version)

            # try to pickle the metadata, report if it failed
            try:
                pickled_metadata = pickle.dumps(user_options,
                                                self._pickle_version)
            except Exception:
                try:
                    for key, values in user_options._dict.items():
                        pickle.dumps(values, self._pickle_version)
                except Exception:
                    pickled_metadata = pickle.dumps(OptionsDictionary(),
                                                    self._pickle_version)
                    msg = f"Trying to record option '{key}' which cannot be pickled on this " \
                          "system. Set option 'recordable' to False. Skipping recording options " \
                          "for this system."
                    issue_warning(msg,
                                  prefix=system.msginfo,
                                  category=CaseRecorderWarning)

            path = system.pathname
            if not path:
                path = 'root'

            scaling_factors = sqlite3.Binary(zlib.compress(scaling_factors))
            pickled_metadata = sqlite3.Binary(zlib.compress(pickled_metadata))

            if run_number is None:
                name = path
            else:
                name = META_KEY_SEP.join([path, str(run_number)])

            with self.metadata_connection as m:
                m.execute(
                    "INSERT INTO system_metadata"
                    "(id, scaling_factors, component_metadata) "
                    "VALUES(?,?,?)", (name, scaling_factors, pickled_metadata))
예제 #8
0
def _get_used_before_calc_subs(group, input_srcs):
    """
    Return Systems that are executed out of dataflow order.

    Parameters
    ----------
    group : <Group>
        The Group where we're checking subsystem order.
    input_srcs : {}
        dict containing variable abs names for sources of the inputs.
        This describes all variable connections, either explicit or implicit,
        in the entire model.

    Returns
    -------
    dict
        A dict mapping names of target Systems to a set of names of their
        source Systems that execute after them.
    """
    parallel_solver = {}
    allsubs = group._subsystems_allprocs
    for sub, i in allsubs.values():
        if hasattr(sub,
                   '_mpi_proc_allocator') and sub._mpi_proc_allocator.parallel:
            parallel_solver[sub.name] = sub.nonlinear_solver.SOLVER

    glen = len(group.pathname.split('.')) if group.pathname else 0

    ubcs = defaultdict(set)
    for tgt_abs, src_abs in input_srcs.items():
        if src_abs is not None:
            iparts = tgt_abs.split('.')
            oparts = src_abs.split('.')
            src_sys = oparts[glen]
            tgt_sys = iparts[glen]
            hierarchy_check = True if oparts[glen + 1] == iparts[glen +
                                                                 1] else False

            if (src_sys in parallel_solver and tgt_sys in parallel_solver
                    and (parallel_solver[src_sys]
                         not in ["NL: NLBJ", "NL: Newton", "BROYDEN"])
                    and src_sys == tgt_sys and not hierarchy_check):
                msg = f"Need to attach NonlinearBlockJac, NewtonSolver, or BroydenSolver " \
                      f"to '{src_sys}' when connecting components inside parallel groups"
                issue_warning(msg, category=SetupWarning)
                ubcs[tgt_abs.rsplit('.', 1)[0]].add(src_abs.rsplit('.', 1)[0])
            if (src_sys in allsubs and tgt_sys in allsubs
                    and (allsubs[src_sys].index > allsubs[tgt_sys].index)):
                ubcs[tgt_sys].add(src_sys)

    return ubcs
예제 #9
0
    def _chk_shape_dims(self, flat_src, iname, oname, prefix):
        if self._orig_src_shape is None or flat_src or not self._check_dims:
            return

        if len(self._orig_src_shape) > len(shape2tuple(self.shape)):
            issue_warning(
                f"connecting source '{oname}' of dimension {len(self._orig_src_shape)} "
                f"to '{iname}' using src_indices of dimension {len(self.shape)} without "
                "setting `flat_src_indices=True`.  The source is currently treated as "
                "flat, but this automatic flattening is deprecated and will be removed "
                "in a future release.  To keep the old behavior, set `flat_src_indices`"
                "=True in the connect(), promotes(), or add_input() call.",
                category=OMDeprecationWarning,
                prefix=prefix)
예제 #10
0
    def _setup_partials(self):
        """
        Check that all partials are declared.
        """
        if not self._manual_decl_partials:
            meta = self._var_rel2meta
            decl_partials = super().declare_partials
            for i, (outs, tup) in enumerate(self._exprs_info):
                vs, funcs = tup
                ins = sorted(set(vs).difference(outs))
                for out in sorted(outs):
                    for inp in ins:
                        if self.options['has_diag_partials']:
                            ival = meta[inp]['val']
                            iarray = isinstance(ival, ndarray) and ival.size > 1
                            oval = meta[out]['val']
                            if iarray and isinstance(oval, ndarray) and oval.size > 1:
                                if oval.size != ival.size:
                                    raise RuntimeError(
                                        "%s: has_diag_partials is True but partial(%s, %s) "
                                        "is not square (shape=(%d, %d))." %
                                        (self.msginfo, out, inp, oval.size, ival.size))
                                # partial will be declared as diagonal
                                inds = np.arange(oval.size, dtype=INT_DTYPE)
                            else:
                                inds = None
                            decl_partials(of=out, wrt=inp, rows=inds, cols=inds)
                        else:
                            decl_partials(of=out, wrt=inp)

        super()._setup_partials()
        if self._manual_decl_partials:
            undeclared = []
            for i, (outs, tup) in enumerate(self._exprs_info):
                vs, funcs = tup
                ins = sorted(set(vs).difference(outs))
                for out in sorted(outs):
                    out = '.'.join((self.pathname, out)) if self.pathname else out
                    for inp in ins:
                        inp = '.'.join((self.pathname, inp)) if self.pathname else inp
                        if (out, inp) not in self._subjacs_info:
                            undeclared.append((out, inp))
            if undeclared:
                idx = len(self.pathname) + 1 if self.pathname else 0
                undeclared = ', '.join([' wrt '.join((f"'{of[idx:]}'", f"'{wrt[idx:]}'"))
                                        for of, wrt in undeclared])
                issue_warning(f"The following partial derivatives have not been "
                              f"declared so they are assumed to be zero: [{undeclared}].",
                              prefix=self.msginfo, category=DerivativesWarning)
예제 #11
0
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
    """
    Raise an exception or issue a warning, depending on the value of _ignore_errors.

    Parameters
    ----------
    msg : str
        The error/warning message.
    exc : Exception class
        This exception class is used to create the exception to be raised.
    category : warning class
        This category is the class of warning to be issued.
    err : bool
        If None, use ignore_errors(), otherwise use value of err to determine whether to
        raise an exception (err=True) or issue a warning (err=False).
    """
    if (err is None and ignore_errors()) or err is False:
        issue_warning(msg, category=category)
    else:
        raise exc(msg)
예제 #12
0
def _show_view(timing_file, options):
    # given a timing file, display based on options.view
    view = options.view.lower()

    if view == 'text':
        for f in options.funcs:
            ret = view_MPI_timing(timing_file, method=f, out_stream=sys.stdout)
            if ret is None:
                issue_warning(
                    f"Could find no children of a ParallelGroup running method '{f}'."
                )
    elif view == 'browser' or view == 'no_browser':
        view_timing(timing_file,
                    outfile='timing_report.html',
                    show_browser=view == 'browser')
    elif view == 'dump':
        view_timing_dump(timing_file, out_stream=sys.stdout)
    elif view == 'none':
        pass
    else:
        issue_warning(f"Viewing option '{view}' ignored. Valid options are "
                      f"{_view_options}.")
예제 #13
0
def timing_context(active=True):
    """
    Context manager to set whether timing is active or not.

    Note that this will only work if the --use_context arg is passed to the `openmdao timing`
    command line tool.  Otherwise it will be ignored and the entire python script will be
    timed.

    Parameters
    ----------
    active : bool
        Indicates if timing is active or inactive.

    Yields
    ------
    nothing
    """
    global _timing_active, _total_time

    active = bool(active)
    ignore = _timing_active and active
    if ignore:
        issue_warning(
            "Timing is already active outside of this timing_context, so it will be "
            "ignored.")

    start_time = perf_counter()

    save = _timing_active
    _timing_active = active
    try:
        yield
    finally:
        _timing_active = save
        if active and not ignore:
            _total_time += perf_counter() - start_time
예제 #14
0
    def _initialize_database(self):
        """
        Initialize the database.
        """
        if MPI:
            rank = MPI.COMM_WORLD.rank
            if self._parallel and self._record_on_proc:
                filepath = '%s_%d' % (self._filepath, rank)
                print(
                    "Note: SqliteRecorder is running on multiple processors. "
                    "Cases from rank %d are being written to %s." %
                    (rank, filepath))
                if rank == 0:
                    metadata_filepath = f'{self._filepath}_meta'
                    print(
                        f"Note: Metadata is being recorded separately as {metadata_filepath}."
                    )
                    try:
                        os.remove(metadata_filepath)
                        issue_warning(
                            'The existing case recorder metadata file, '
                            f'{metadata_filepath}, is being overwritten.',
                            category=UserWarning)
                    except OSError:
                        pass
                    self.metadata_connection = sqlite3.connect(
                        metadata_filepath)
                else:
                    self._record_metadata = False
            elif rank == 0:
                filepath = self._filepath
            else:
                filepath = None
        else:
            filepath = self._filepath

        if filepath:
            try:
                os.remove(filepath)
                issue_warning(
                    f'The existing case recorder file, {filepath},'
                    ' is being overwritten.',
                    category=UserWarning)
            except OSError:
                pass

            self.connection = sqlite3.connect(filepath)
            if self._record_metadata and self.metadata_connection is None:
                self.metadata_connection = self.connection

            with self.connection as c:
                # used to keep track of the order of the case records across all case tables
                c.execute(
                    "CREATE TABLE global_iterations(id INTEGER PRIMARY KEY, "
                    "record_type TEXT, rowid INT, source TEXT)")

                c.execute(
                    "CREATE TABLE driver_iterations(id INTEGER PRIMARY KEY, "
                    "counter INT, iteration_coordinate TEXT, timestamp REAL, "
                    "success INT, msg TEXT, inputs TEXT, outputs TEXT, residuals TEXT)"
                )
                c.execute(
                    "CREATE TABLE driver_derivatives(id INTEGER PRIMARY KEY, "
                    "counter INT, iteration_coordinate TEXT, timestamp REAL, "
                    "success INT, msg TEXT, derivatives BLOB)")
                c.execute(
                    "CREATE INDEX driv_iter_ind on driver_iterations(iteration_coordinate)"
                )

                c.execute(
                    "CREATE TABLE problem_cases(id INTEGER PRIMARY KEY, "
                    "counter INT, case_name TEXT, timestamp REAL, "
                    "success INT, msg TEXT, inputs TEXT, outputs TEXT, residuals TEXT, "
                    "jacobian BLOB, abs_err REAL, rel_err REAL)")
                c.execute(
                    "CREATE INDEX prob_name_ind on problem_cases(case_name)")

                c.execute(
                    "CREATE TABLE system_iterations(id INTEGER PRIMARY KEY, "
                    "counter INT, iteration_coordinate TEXT, timestamp REAL, "
                    "success INT, msg TEXT, inputs TEXT, outputs TEXT, residuals TEXT)"
                )
                c.execute(
                    "CREATE INDEX sys_iter_ind on system_iterations(iteration_coordinate)"
                )

                c.execute(
                    "CREATE TABLE solver_iterations(id INTEGER PRIMARY KEY, "
                    "counter INT, iteration_coordinate TEXT, timestamp REAL, "
                    "success INT, msg TEXT, abs_err REAL, rel_err REAL, "
                    "solver_inputs TEXT, solver_output TEXT, solver_residuals TEXT)"
                )
                c.execute(
                    "CREATE INDEX solv_iter_ind on solver_iterations(iteration_coordinate)"
                )

            if self._record_metadata:
                with self.metadata_connection as m:
                    m.execute(
                        "CREATE TABLE metadata(format_version INT, openmdao_version TEXT, "
                        "abs2prom BLOB, prom2abs BLOB, abs2meta BLOB, var_settings BLOB,"
                        "conns BLOB)")
                    m.execute(
                        "INSERT INTO metadata(format_version, openmdao_version, abs2prom,"
                        " prom2abs) VALUES(?,?,?,?)",
                        (format_version, openmdao_version, None, None))
                    m.execute(
                        "CREATE TABLE driver_metadata(id TEXT PRIMARY KEY, "
                        "model_viewer_data TEXT)")
                    m.execute(
                        "CREATE TABLE system_metadata(id TEXT PRIMARY KEY, "
                        "scaling_factors BLOB, component_metadata BLOB)")
                    m.execute(
                        "CREATE TABLE solver_metadata(id TEXT PRIMARY KEY, "
                        "solver_options BLOB, solver_class TEXT)")

        self._database_initialized = True
        if MPI is not None:
            MPI.COMM_WORLD.barrier()
예제 #15
0
    def _solve(self):
        """
        Run the iterative solver.
        """
        maxiter = self.options['maxiter']
        atol = self.options['atol']
        rtol = self.options['rtol']
        iprint = self.options['iprint']
        stall_limit = self.options['stall_limit']
        stall_tol = self.options['stall_tol']

        self._mpi_print_header()

        self._iter_count = 0
        norm0, norm = self._iter_initialize()

        self._norm0 = norm0

        self._mpi_print(self._iter_count, norm, norm / norm0)

        stalled = False
        stall_count = 0
        if stall_limit > 0:
            stall_norm = norm0

        while self._iter_count < maxiter and norm > atol and norm / norm0 > rtol and not stalled:
            with Recording(type(self).__name__, self._iter_count, self) as rec:

                if stall_count == 3 and not self.linesearch.options[
                        'print_bound_enforce']:

                    self.linesearch.options['print_bound_enforce'] = True

                    if self._system().pathname:
                        pathname = f"{self._system().pathname}."
                    else:
                        pathname = ""

                    msg = (
                        f"Your model has stalled three times and may be violating the bounds. "
                        f"In the future, turn on print_bound_enforce in your solver options "
                        f"here: \n{pathname}nonlinear_solver.linesearch.options"
                        f"['print_bound_enforce']=True. "
                        f"\nThe bound(s) being violated now are:\n")
                    issue_warning(msg, category=SolverWarning)

                    self._single_iteration()
                    self.linesearch.options['print_bound_enforce'] = False
                else:
                    self._single_iteration()

                self._iter_count += 1
                self._run_apply()
                norm = self._iter_get_norm()

                # Save the norm values in the context manager so they can also be recorded.
                rec.abs = norm
                if norm0 == 0:
                    norm0 = 1
                rec.rel = norm / norm0

                # Check if convergence is stalled.
                if stall_limit > 0:
                    rel_norm = rec.rel
                    norm_diff = np.abs(stall_norm - rel_norm)
                    if norm_diff <= stall_tol:
                        stall_count += 1
                        if stall_count >= stall_limit:
                            stalled = True
                    else:
                        stall_count = 0
                        stall_norm = rel_norm

            self._mpi_print(self._iter_count, norm, norm / norm0)

        system = self._system()

        # flag for the print statements. we only print on root if USE_PROC_FILES is not set to True
        print_flag = system.comm.rank == 0 or os.environ.get('USE_PROC_FILES')

        prefix = self._solver_info.prefix + self.SOLVER

        # Solver terminated early because a Nan in the norm doesn't satisfy the while-loop
        # conditionals.
        if np.isinf(norm) or np.isnan(norm):
            msg = "Solver '{}' on system '{}': residuals contain 'inf' or 'NaN' after {} " + \
                  "iterations."
            if iprint > -1 and print_flag:
                print(
                    prefix +
                    msg.format(self.SOLVER, system.pathname, self._iter_count))

            # Raise AnalysisError if requested.
            if self.options['err_on_non_converge']:
                raise AnalysisError(
                    msg.format(self.SOLVER, system.pathname, self._iter_count))

        # Solver hit maxiter without meeting desired tolerances.
        # Or solver stalled.
        elif (norm > atol and norm / norm0 > rtol) or stalled:

            if stalled:
                msg = "Solver '{}' on system '{}' stalled after {} iterations."
            else:
                msg = "Solver '{}' on system '{}' failed to converge in {} iterations."

            if iprint > -1 and print_flag:
                print(
                    prefix +
                    msg.format(self.SOLVER, system.pathname, self._iter_count))

            # Raise AnalysisError if requested.
            if self.options['err_on_non_converge']:
                raise AnalysisError(
                    msg.format(self.SOLVER, system.pathname, self._iter_count))

        # Solver converged
        elif iprint == 1 and print_flag:
            print(prefix +
                  ' Converged in {} iterations'.format(self._iter_count))
        elif iprint == 2 and print_flag:
            print(prefix + ' Converged')
예제 #16
0
def trace_mpi(fname='mpi_trace', skip=(), flush=True):
    """
    Dump traces to the specified filename<.rank> showing openmdao and mpi/petsc calls.

    Parameters
    ----------
    fname : str
        Name of the trace file(s).  <.rank> will be appended to the name on each rank.
    skip : set-like
        Collection of function names to skip.
    flush : bool
        If True, flush print buffer after every print call.
    """
    if MPI is None:
        issue_warning("MPI is not active.  Trace aborted.", category=MPIWarning)
        return
    if sys.getprofile() is not None:
        raise RuntimeError("another profile function is already active.")

    my_fname = fname + '.' + str(MPI.COMM_WORLD.rank)

    outfile = open(my_fname, 'w')

    stack = []

    _c_map = {
        'c_call': '(c) -->',
        'c_return': '(c) <--',
        'c_exception': '(c_exception)',
    }


    def _print_c_func(frame, arg, typestr):
        s = str(arg)
        if 'mpi4py' in s or 'petsc4py' in s:
            c = arg.__self__.__class__
            print('   ' * len(stack), typestr, "%s.%s.%s" %
                    (c.__module__, c.__name__, arg.__name__),
                    "%s:%d" % (frame.f_code.co_filename, frame.f_code.co_firstlineno),
                    file=outfile, flush=True)


    def _mpi_trace_callback(frame, event, arg):
        pname = None
        commsize = ''
        if event == 'call':
            if 'openmdao' in frame.f_code.co_filename:
                if frame.f_code.co_name in skip:
                    return
                if 'self' in frame.f_locals:
                    try:
                        pname = frame.f_locals['self'].msginfo
                    except:
                        pass
                    try:
                        commsize = frame.f_locals['self'].comm.size
                    except:
                        pass
                if pname is not None:
                    if not stack or pname != stack[-1][0]:
                        stack.append([pname, 1])
                        print('   ' * len(stack), commsize, pname, file=outfile, flush=flush)
                    else:
                        stack[-1][1] += 1
                print('   ' * len(stack), '-->', frame.f_code.co_name, "%s:%d" %
                      (frame.f_code.co_filename, frame.f_code.co_firstlineno),
                      file=outfile, flush=flush)
        elif event == 'return':
            if 'openmdao' in frame.f_code.co_filename:
                if frame.f_code.co_name in skip:
                    return
                if 'self' in frame.f_locals:
                    try:
                        pname = frame.f_locals['self'].msginfo
                    except:
                        pass
                    try:
                        commsize = frame.f_locals['self'].comm.size
                    except:
                        pass
                print('   ' * len(stack), '<--', frame.f_code.co_name, "%s:%d" %
                      (frame.f_code.co_filename, frame.f_code.co_firstlineno),
                      file=outfile, flush=flush)
                if pname is not None and stack and pname == stack[-1][0]:
                    stack[-1][1] -= 1
                    if stack[-1][1] < 1:
                        stack.pop()
                        if stack:
                            print('   ' * len(stack), commsize, stack[-1][0], file=outfile,
                                  flush=flush)
        else:
            _print_c_func(frame, arg, _c_map[event])

    sys.setprofile(_mpi_trace_callback)
예제 #17
0
    def run(self):
        """
        Optimize the problem using selected Scipy optimizer.

        Returns
        -------
        bool
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem()
        opt = self.options['optimizer']
        model = problem.model
        self.iter_count = 0
        self._total_jac = None

        self._check_for_missing_objective()

        # Initial Run
        with RecordingDebugging(self._get_name(), self.iter_count, self) as rec:
            model.run_solve_nonlinear()
            self.iter_count += 1

        self._con_cache = self.get_constraint_values()
        desvar_vals = self.get_design_var_values()
        self._dvlist = list(self._designvars)

        # maxiter and disp get passed into scipy with all the other options.
        if 'maxiter' not in self.opt_settings:  # lets you override the value in options
            self.opt_settings['maxiter'] = self.options['maxiter']
        self.opt_settings['disp'] = self.options['disp']

        # Size Problem
        ndesvar = 0
        for desvar in self._designvars.values():
            size = desvar['global_size'] if desvar['distributed'] else desvar['size']
            ndesvar += size
        x_init = np.empty(ndesvar)

        # Initial Design Vars
        i = 0
        use_bounds = (opt in _bounds_optimizers)
        if use_bounds:
            bounds = []
        else:
            bounds = None

        for name, meta in self._designvars.items():
            size = meta['global_size'] if meta['distributed'] else meta['size']
            x_init[i:i + size] = desvar_vals[name]
            i += size

            # Bounds if our optimizer supports them
            if use_bounds:
                meta_low = meta['lower']
                meta_high = meta['upper']
                for j in range(size):

                    if isinstance(meta_low, np.ndarray):
                        p_low = meta_low[j]
                    else:
                        p_low = meta_low

                    if isinstance(meta_high, np.ndarray):
                        p_high = meta_high[j]
                    else:
                        p_high = meta_high

                    bounds.append((p_low, p_high))

        if use_bounds and (opt in _supports_new_style) and _use_new_style:
            # For 'trust-constr' it is better to use the new type bounds, because it seems to work
            # better (for the current examples in the tests) with the "keep_feasible" option
            try:
                from scipy.optimize import Bounds
                from scipy.optimize._constraints import old_bound_to_new
            except ImportError:
                msg = ('The "trust-constr" optimizer is supported for SciPy 1.1.0 and above. '
                       'The installed version is {}')
                raise ImportError(msg.format(scipy_version))

            # Convert "old-style" bounds to "new_style" bounds
            lower, upper = old_bound_to_new(bounds)  # tuple, tuple
            keep_feasible = self.opt_settings.get('keep_feasible_bounds', True)
            bounds = Bounds(lb=lower, ub=upper, keep_feasible=keep_feasible)

        # Constraints
        constraints = []
        i = 1  # start at 1 since row 0 is the objective.  Constraints start at row 1.
        lin_i = 0  # counter for linear constraint jacobian
        lincons = []  # list of linear constraints
        self._obj_and_nlcons = list(self._objs)

        if opt in _constraint_optimizers:
            for name, meta in self._cons.items():
                if meta['indices'] is not None:
                    meta['size'] = size = meta['indices'].size
                else:
                    size = meta['global_size'] if meta['distributed'] else meta['size']
                upper = meta['upper']
                lower = meta['lower']
                equals = meta['equals']
                if opt in _gradient_optimizers and 'linear' in meta and meta['linear']:
                    lincons.append(name)
                    self._con_idx[name] = lin_i
                    lin_i += size
                else:
                    self._obj_and_nlcons.append(name)
                    self._con_idx[name] = i
                    i += size

                # In scipy constraint optimizers take constraints in two separate formats

                # Type of constraints is list of NonlinearConstraint
                if opt in _supports_new_style and _use_new_style:
                    try:
                        from scipy.optimize import NonlinearConstraint
                    except ImportError:
                        msg = ('The "trust-constr" optimizer is supported for SciPy 1.1.0 and'
                               'above. The installed version is {}')
                        raise ImportError(msg.format(scipy_version))

                    if equals is not None:
                        lb = ub = equals
                    else:
                        lb = lower
                        ub = upper
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        # Double-sided constraints are accepted by the algorithm
                        args = [name, False, j]
                        # TODO linear constraint if meta['linear']
                        # TODO add option for Hessian
                        con = NonlinearConstraint(
                            fun=signature_extender(WeakMethodWrapper(self, '_con_val_func'),
                                                   args),
                            lb=lb, ub=ub,
                            jac=signature_extender(WeakMethodWrapper(self, '_congradfunc'), args))
                        constraints.append(con)
                else:  # Type of constraints is list of dict
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        con_dict = {}
                        if meta['equals'] is not None:
                            con_dict['type'] = 'eq'
                        else:
                            con_dict['type'] = 'ineq'
                        con_dict['fun'] = WeakMethodWrapper(self, '_confunc')
                        if opt in _constraint_grad_optimizers:
                            con_dict['jac'] = WeakMethodWrapper(self, '_congradfunc')
                        con_dict['args'] = [name, False, j]
                        constraints.append(con_dict)

                        if isinstance(upper, np.ndarray):
                            upper = upper[j]

                        if isinstance(lower, np.ndarray):
                            lower = lower[j]

                        dblcon = (upper < INF_BOUND) and (lower > -INF_BOUND)

                        # Add extra constraint if double-sided
                        if dblcon:
                            dcon_dict = {}
                            dcon_dict['type'] = 'ineq'
                            dcon_dict['fun'] = WeakMethodWrapper(self, '_confunc')
                            if opt in _constraint_grad_optimizers:
                                dcon_dict['jac'] = WeakMethodWrapper(self, '_congradfunc')
                            dcon_dict['args'] = [name, True, j]
                            constraints.append(dcon_dict)

            # precalculate gradients of linear constraints
            if lincons:
                self._lincongrad_cache = self._compute_totals(of=lincons, wrt=self._dvlist,
                                                              return_format='array')
            else:
                self._lincongrad_cache = None

        # Provide gradients for optimizers that support it
        if opt in _gradient_optimizers:
            jac = self._gradfunc
        else:
            jac = None

        # Hessian calculation method for optimizers, which require it
        if opt in _hessian_optimizers:
            if 'hess' in self.opt_settings:
                hess = self.opt_settings.pop('hess')
            else:
                # Defaults to BFGS, if not in opt_settings
                from scipy.optimize import BFGS
                hess = BFGS()
        else:
            hess = None

        # compute dynamic simul deriv coloring if option is set
        if coloring_mod._use_total_sparsity:
            if ((self._coloring_info['coloring'] is None and self._coloring_info['dynamic'])):
                coloring_mod.dynamic_total_coloring(self, run_model=False,
                                                    fname=self._get_total_coloring_fname())

                # if the improvement wasn't large enough, turn coloring off
                info = self._coloring_info
                if info['coloring'] is not None:
                    pct = info['coloring']._solves_info()[-1]
                    if info['min_improve_pct'] > pct:
                        info['coloring'] = info['static'] = None
                        msg = f"Coloring was deactivated.  Improvement of {pct:.1f}% was less " \
                              f"than min allowed ({info['min_improve_pct']:.1f}%)."
                        issue_warning(msg, prefix=self.msginfo, category=DerivativesWarning)

        # optimize
        try:
            if opt in _optimizers:
                if self._problem().comm.rank != 0:
                    self.opt_settings['disp'] = False

                result = minimize(self._objfunc, x_init,
                                  # args=(),
                                  method=opt,
                                  jac=jac,
                                  hess=hess,
                                  # hessp=None,
                                  bounds=bounds,
                                  constraints=constraints,
                                  tol=self.options['tol'],
                                  # callback=None,
                                  options=self.opt_settings)
            elif opt == 'basinhopping':
                from scipy.optimize import basinhopping

                def fun(x):
                    return self._objfunc(x), jac(x)

                if 'minimizer_kwargs' not in self.opt_settings:
                    self.opt_settings['minimizer_kwargs'] = {"method": "L-BFGS-B", "jac": True}
                self.opt_settings.pop('maxiter')  # It does not have this argument

                def accept_test(f_new, x_new, f_old, x_old):
                    # Used to implement bounds besides the original functionality
                    if bounds is not None:
                        bound_check = all([b[0] <= xi <= b[1] for xi, b in zip(x_new, bounds)])
                        user_test = self.opt_settings.pop('accept_test', None)  # callable
                        # has to satisfy both the bounds and the acceptance test defined by the
                        # user
                        if user_test is not None:
                            test_res = user_test(f_new, x_new, f_old, x_old)
                            if test_res == 'force accept':
                                return test_res
                            else:  # result is boolean
                                return bound_check and test_res
                        else:  # no user acceptance test, check only the bounds
                            return bound_check
                    else:
                        return True

                result = basinhopping(fun, x_init,
                                      accept_test=accept_test,
                                      **self.opt_settings)
            elif opt == 'dual_annealing':
                from scipy.optimize import dual_annealing
                self.opt_settings.pop('disp')  # It does not have this argument
                # There is no "options" param, so "opt_settings" can be used to set the (many)
                # keyword arguments
                result = dual_annealing(self._objfunc,
                                        bounds=bounds,
                                        **self.opt_settings)
            elif opt == 'differential_evolution':
                from scipy.optimize import differential_evolution
                # There is no "options" param, so "opt_settings" can be used to set the (many)
                # keyword arguments
                result = differential_evolution(self._objfunc,
                                                bounds=bounds,
                                                **self.opt_settings)
            elif opt == 'shgo':
                from scipy.optimize import shgo
                kwargs = dict()
                for option in ('minimizer_kwargs', 'sampling_method ', 'n', 'iters'):
                    if option in self.opt_settings:
                        kwargs[option] = self.opt_settings[option]
                # Set the Jacobian and the Hessian to the value calculated in OpenMDAO
                if 'minimizer_kwargs' not in kwargs or kwargs['minimizer_kwargs'] is None:
                    kwargs['minimizer_kwargs'] = {}
                kwargs['minimizer_kwargs'].setdefault('jac', jac)
                kwargs['minimizer_kwargs'].setdefault('hess', hess)
                # Objective function tolerance
                self.opt_settings['f_tol'] = self.options['tol']
                result = shgo(self._objfunc,
                              bounds=bounds,
                              constraints=constraints,
                              options=self.opt_settings,
                              **kwargs)
            else:
                msg = 'Optimizer "{}" is not implemented yet. Choose from: {}'
                raise NotImplementedError(msg.format(opt, _all_optimizers))

        # If an exception was swallowed in one of our callbacks, we want to raise it
        # rather than the cryptic message from scipy.
        except Exception as msg:
            if self._exc_info is not None:
                self._reraise()
            else:
                raise

        if self._exc_info is not None:
            self._reraise()

        self.result = result

        if hasattr(result, 'success'):
            self.fail = False if result.success else True
            if self.fail:
                if self._problem().comm.rank == 0:
                    print('Optimization FAILED.')
                    print(result.message)
                    print('-' * 35)

            elif self.options['disp']:
                if self._problem().comm.rank == 0:
                    print('Optimization Complete')
                    print('-' * 35)
        else:
            self.fail = True  # It is not known, so the worst option is assumed
            if self._problem().comm.rank == 0:
                print('Optimization Complete (success not known)')
                print(result.message)
                print('-' * 35)

        return self.fail
예제 #18
0
def _get_viewer_data(data_source, case_id=None):
    """
    Get the data needed by the N2 viewer as a dictionary.

    Parameters
    ----------
    data_source : <Problem> or <Group> or str
        A Problem or Group or case recorder filename containing the model or model data.
        If the case recorder file from a parallel run has separate metadata, the
        filenames can be specified with a comma, e.g.: case.sql_0,case.sql_meta

    case_id : int or str or None
        Case name or index of case in SQL file.

    Returns
    -------
    dict
        A dictionary containing information about the model for use by the viewer.
    """
    if isinstance(data_source, Problem):
        root_group = data_source.model

        if not isinstance(root_group, Group):
            issue_warning(
                "The model is not a Group, viewer data is unavailable.")
            return {}

        driver = data_source.driver
        driver_name = driver.__class__.__name__
        driver_type = 'doe' if isinstance(driver,
                                          DOEDriver) else 'optimization'

        driver_options = {
            key: _serialize_single_option(driver.options._dict[key])
            for key in driver.options
        }

        if driver_type == 'optimization' and hasattr(driver, 'opt_settings'):
            driver_opt_settings = driver.opt_settings
        else:
            driver_opt_settings = None

    elif isinstance(data_source, Group):
        if not data_source.pathname:  # root group
            root_group = data_source
            driver_name = None
            driver_type = None
            driver_options = None
            driver_opt_settings = None
        else:
            # this function only makes sense when it is at the root
            issue_warning(
                f"Viewer data is not available for sub-Group '{data_source.pathname}'."
            )
            return {}

    elif isinstance(data_source, str):
        if ',' in data_source:
            filenames = data_source.split(',')
            cr = CaseReader(filenames[0], metadata_filename=filenames[1])
        else:
            cr = CaseReader(data_source)

        data_dict = cr.problem_metadata

        if case_id is not None:
            cases = cr.get_case(case_id)
            print(f"Using source: {cases.source}\nCase: {cases.name}")

            def recurse(children, stack):
                for child in children:
                    # if 'val' in child
                    if child['type'] == 'subsystem':
                        if child['name'] != '_auto_ivc':
                            stack.append(child['name'])
                            recurse(child['children'], stack)
                            stack.pop()
                    elif child['type'] == 'input':
                        if cases.inputs is None:
                            child['val'] = 'N/A'
                        else:
                            path = child['name'] if not stack else '.'.join(
                                stack + [child['name']])
                            child['val'] = cases.inputs[path]
                    elif child['type'] == 'output':
                        if cases.outputs is None:
                            child['val'] = 'N/A'
                        else:
                            path = child['name'] if not stack else '.'.join(
                                stack + [child['name']])
                            try:
                                child['val'] = cases.outputs[path]
                            except KeyError:
                                child['val'] = 'N/A'

            recurse(data_dict['tree']['children'], [])

        # Delete the variables key since it's not used in N2
        if 'variables' in data_dict:
            del data_dict['variables']

        # Older recordings might not have this.
        if 'md5_hash' not in data_dict:
            data_dict['md5_hash'] = None

        return data_dict

    else:
        raise TypeError(
            f"Viewer data is not available for '{data_source}'."
            "The source must be a Problem, model or the filename of a recording."
        )

    data_dict = {}
    data_dict['tree'] = _get_tree_dict(root_group)
    data_dict['md5_hash'] = root_group._generate_md5_hash()

    connections_list = []

    sys_idx = {
    }  # map of pathnames to index of pathname in list (systems in cycles only)

    G = root_group.compute_sys_graph(comps_only=True)

    scc = nx.strongly_connected_components(G)

    strongdict = {}

    for i, strong_comp in enumerate(scc):
        for c in strong_comp:
            strongdict[
                c] = i  # associate each comp with a strongly connected component

        if len(strong_comp) > 1:
            # these IDs are only used when back edges are present
            for name in strong_comp:
                sys_idx[name] = len(sys_idx)

    comp_orders = {
        name: i
        for i, name in enumerate(root_group._ordered_comp_name_iter())
    }

    # 1 is added to the indices of all edges in the matrix so that we can use 0 entries to
    # indicate that there is no connection.
    matrix = np.zeros((len(comp_orders), len(comp_orders)), dtype=np.int32)
    edge_ids = []
    for i, edge in enumerate(G.edges()):
        src, tgt = edge
        if strongdict[src] == strongdict[tgt]:
            matrix[comp_orders[src],
                   comp_orders[tgt]] = i + 1  # bump edge index by 1
            edge_ids.append((sys_idx[src], sys_idx[tgt]))
        else:
            edge_ids.append(None)

    for edge_i, (src, tgt) in enumerate(G.edges()):
        if strongdict[src] == strongdict[tgt]:
            start = comp_orders[src]
            end = comp_orders[tgt]
            # get a view here so we can remove this edge from submat temporarily to eliminate
            # an 'if' check inside the nested list comprehension for edges_list
            rem = matrix[start:start + 1, end:end + 1]
            rem[0, 0] = 0

            if end < start:
                start, end = end, start

            submat = matrix[start:end + 1, start:end + 1]
            nz = submat[submat > 0]

            rem[0, 0] = edge_i + 1  # put removed edge back

            if nz.size > 1:
                nz -= 1  # convert back to correct edge index
                edges_list = [edge_ids[i] for i in nz]
                for vsrc, vtgtlist in G.get_edge_data(src,
                                                      tgt)['conns'].items():
                    for vtgt in vtgtlist:
                        connections_list.append({
                            'src': vsrc,
                            'tgt': vtgt,
                            'cycle_arrows': edges_list
                        })
                continue

        for vsrc, vtgtlist in G.get_edge_data(src, tgt)['conns'].items():
            for vtgt in vtgtlist:
                connections_list.append({'src': vsrc, 'tgt': vtgt})

    data_dict['sys_pathnames_list'] = list(sys_idx)
    data_dict['connections_list'] = connections_list
    data_dict['abs2prom'] = root_group._var_abs2prom

    data_dict['driver'] = {
        'name': driver_name,
        'type': driver_type,
        'options': driver_options,
        'opt_settings': driver_opt_settings
    }
    data_dict['design_vars'] = root_group.get_design_vars(use_prom_ivc=False)
    data_dict['responses'] = root_group.get_responses()

    data_dict['declare_partials_list'] = _get_declare_partials(root_group)

    return data_dict
예제 #19
0
    def train(self, x, y):
        """
        Train the surrogate model with the given set of inputs and outputs.

        Parameters
        ----------
        x : array-like
            Training input locations.
        y : array-like
            Model responses at given inputs.
        """
        super().train(x, y)
        x, y = np.atleast_2d(x, y)

        cache = self.options['training_cache']

        if cache:
            data_hash = md5()  # nosec: hashed content not sensitive
            data_hash.update(x.flatten())
            data_hash.update(y.flatten())
            training_data_hash = data_hash.hexdigest()
            cache_hash = ''

        if cache and os.path.exists(cache):

            with np.load(cache, allow_pickle=False) as data:
                try:
                    self.n_samples = data['n_samples']
                    self.n_dims = data['n_dims']
                    self.X = np.array(data['X'])
                    self.Y = np.array(data['Y'])
                    self.X_mean = np.array(data['X_mean'])
                    self.Y_mean = np.array(data['Y_mean'])
                    self.X_std = np.array(data['X_std'])
                    self.Y_std = np.array(data['Y_std'])
                    self.thetas = np.array(data['thetas'])
                    self.alpha = np.array(data['alpha'])
                    self.U = np.array(data['U'])
                    self.S_inv = np.array(data['S_inv'])
                    self.Vh = np.array(data['Vh'])
                    self.sigma2 = np.array(data['sigma2'])
                    cache_hash = str(data['hash'])
                except KeyError as e:
                    msg = (
                        "An error occurred while loading KrigingSurrogate Cache: %s. "
                        "Ignoring and training from scratch.")
                    issue_warning(msg % str(e), category=CacheWarning)

            # if the loaded data passes the hash check with the current training data, we exit
            if cache_hash == training_data_hash:
                return

        # Training fallthrough
        self.n_samples, self.n_dims = x.shape

        if self.n_samples <= 1:
            raise ValueError(
                'KrigingSurrogate requires at least 2 training points.')

        # Normalize the data
        X_mean = np.mean(x, axis=0)
        X_std = np.std(x, axis=0)
        Y_mean = np.mean(y, axis=0)
        Y_std = np.std(y, axis=0)

        X_std[X_std == 0.] = 1.
        Y_std[Y_std == 0.] = 1.

        X = (x - X_mean) / X_std
        Y = (y - Y_mean) / Y_std

        self.X = X
        self.Y = Y
        self.X_mean, self.X_std = X_mean, X_std
        self.Y_mean, self.Y_std = Y_mean, Y_std

        def _calcll(thetas):
            """Calculate loglike (callback function)."""
            loglike = self._calculate_reduced_likelihood_params(
                np.exp(thetas))[0]
            return -loglike

        bounds = [(np.log(1e-5), np.log(1e5)) for _ in range(self.n_dims)]

        options = {'eps': 1e-3}

        if cache:
            # Enable logging since we expect the model to take long to train
            options['disp'] = True
            options['iprint'] = 2

        optResult = minimize(_calcll,
                             1e-1 * np.ones(self.n_dims),
                             method='slsqp',
                             options=options,
                             bounds=bounds)

        if not optResult.success:
            raise ValueError(
                f'Kriging Hyper-parameter optimization failed: {optResult.message}'
            )

        self.thetas = np.exp(optResult.x)
        _, params = self._calculate_reduced_likelihood_params()
        self.alpha = params['alpha']
        self.U = params['U']
        self.S_inv = params['S_inv']
        self.Vh = params['Vh']
        self.sigma2 = params['sigma2']

        # Save data to cache if specified
        if cache:
            data = {
                'n_samples': self.n_samples,
                'n_dims': self.n_dims,
                'X': self.X,
                'Y': self.Y,
                'X_mean': self.X_mean,
                'Y_mean': self.Y_mean,
                'X_std': self.X_std,
                'Y_std': self.Y_std,
                'thetas': self.thetas,
                'alpha': self.alpha,
                'U': self.U,
                'S_inv': self.S_inv,
                'Vh': self.Vh,
                'sigma2': self.sigma2,
                'hash': training_data_hash
            }

            if not os.path.exists(cache) or cache_hash != training_data_hash:
                with open(cache, 'wb') as f:
                    np.savez_compressed(f, **data)
예제 #20
0
    def list_inputs(self,
                    val=True,
                    prom_name=False,
                    units=False,
                    shape=False,
                    desc=False,
                    hierarchical=True,
                    print_arrays=False,
                    tags=None,
                    includes=None,
                    excludes=None,
                    out_stream=_DEFAULT_OUT_STREAM,
                    values=None):
        """
        Return and optionally log a list of input names and other optional information.

        Parameters
        ----------
        val : bool, optional
            When True, display/return input values. Default is True.
        prom_name : bool, optional
            When True, display/return the promoted name of the variable.
            Default is False.
        units : bool, optional
            When True, display/return units. Default is False.
        shape : bool, optional
            When True, display/return the shape of the value. Default is False.
        desc : bool, optional
            When True, display/return description. Default is False.
        hierarchical : bool, optional
            When True, human readable output shows variables in hierarchical format.
        print_arrays : bool, optional
            When False, in the columnar display, just display norm of any ndarrays with size > 1.
            The norm is surrounded by vertical bars to indicate that it is a norm.
            When True, also display full values of the ndarray below the row. Format is affected
            by the values set with numpy.set_printoptions
            Default is False.
        tags : str or list of strs
            User defined tags that can be used to filter what gets listed. Only inputs with the
            given tags will be listed.
            Default is None, which means there will be no filtering based on tags.
        includes : str, iter of str, or None
            Glob patterns for pathnames to include in the check. Default is None, which
            includes all.
        excludes : str, iter of str, or None
            Glob patterns for pathnames to exclude from the check. Default is None, which
            excludes nothing.
        out_stream : file-like object
            Where to send human readable output. Default is sys.stdout.
            Set to None to suppress.
        values : bool, optional
            This argument has been deprecated and will be removed in 4.0.

        Returns
        -------
        list
            List of input names and other optional information about those inputs.
        """
        meta = self._abs2meta
        inputs = []

        if values is not None:
            issue_warning("'value' is deprecated and will be removed in 4.0. "
                          "Please index in using 'val'")
        elif not val and values:
            values = True
        else:
            values = val

        if isinstance(includes, str):
            includes = [
                includes,
            ]

        if isinstance(excludes, str):
            excludes = [
                excludes,
            ]

        if self.inputs is not None:
            for var_name in self.inputs.absolute_names():
                # Filter based on tags
                if tags and not (make_set(tags)
                                 & make_set(meta[var_name]['tags'])):
                    continue

                var_name_prom = self._abs2prom['input'][var_name]

                if not match_prom_or_abs(var_name, var_name_prom, includes,
                                         excludes):
                    continue

                val = self.inputs[var_name]

                var_meta = {}
                if values:
                    var_meta['val'] = val
                    var_meta['value'] = val
                if prom_name:
                    var_meta['prom_name'] = var_name_prom
                if units:
                    var_meta['units'] = meta[var_name]['units']
                if shape:
                    var_meta['shape'] = val.shape
                if desc:
                    var_meta['desc'] = meta[var_name]['desc']

                inputs.append((var_name, var_meta))

        if out_stream:
            if self.inputs:
                self._write_table('input', inputs, hierarchical, print_arrays,
                                  out_stream)
            else:
                ostream = sys.stdout if out_stream is _DEFAULT_OUT_STREAM else out_stream
                ostream.write(
                    'WARNING: Inputs not recorded. Make sure your recording ' +
                    'settings have record_inputs set to True\n')

        return inputs
예제 #21
0
    def _initialize_database(self, comm):
        """
        Initialize the database.

        Parameters
        ----------
        comm : MPI.Comm or <FakeComm> or None
            The communicator for the recorder (should be the comm for the Problem).
        """
        filepath = None

        if MPI and comm and comm.size > 1:
            if self._record_on_proc:
                if not self._parallel:
                    # recording only on this proc
                    filepath = self._filepath
                else:
                    # recording on multiple procs, so a separate file for each recording proc
                    # plus a file for the common metadata, written by the lowest recording rank
                    rank = comm.rank
                    filepath = f"{self._filepath}_{rank}"
                    print("Note: SqliteRecorder is running on multiple processors. "
                          f"Cases from rank {rank} are being written to {filepath}.")
                    if rank == min(self._recording_ranks):
                        metadata_filepath = f'{self._filepath}_meta'
                        print("Note: Metadata is being recorded separately as "
                              f"{metadata_filepath}.")
                        try:
                            rc = os.remove(metadata_filepath)
                            issue_warning("The existing case recorder metadata file, "
                                          f"{metadata_filepath}, is being overwritten.",
                                          category=UserWarning)
                        except OSError:
                            pass
                        self.metadata_connection = sqlite3.connect(metadata_filepath)
                    else:
                        self._record_metadata = False
        else:
            # no MPI or comm size == 1
            filepath = self._filepath

        if filepath:
            try:
                os.remove(filepath)
                issue_warning(f'The existing case recorder file, {filepath},'
                              ' is being overwritten.', category=UserWarning)
            except OSError:
                pass

            self.connection = sqlite3.connect(filepath)
            if self._record_metadata and self.metadata_connection is None:
                self.metadata_connection = self.connection

            with self.connection as c:
                # used to keep track of the order of the case records across all case tables
                c.execute("CREATE TABLE global_iterations(id INTEGER PRIMARY KEY, "
                          "record_type TEXT, rowid INT, source TEXT)")

                c.execute("CREATE TABLE driver_iterations(id INTEGER PRIMARY KEY, "
                          "counter INT, iteration_coordinate TEXT, timestamp REAL, "
                          "success INT, msg TEXT, inputs TEXT, outputs TEXT, residuals TEXT)")
                c.execute("CREATE TABLE driver_derivatives(id INTEGER PRIMARY KEY, "
                          "counter INT, iteration_coordinate TEXT, timestamp REAL, "
                          "success INT, msg TEXT, derivatives BLOB)")
                c.execute("CREATE INDEX driv_iter_ind on driver_iterations(iteration_coordinate)")

                c.execute("CREATE TABLE problem_cases(id INTEGER PRIMARY KEY, "
                          "counter INT, case_name TEXT, timestamp REAL, "
                          "success INT, msg TEXT, inputs TEXT, outputs TEXT, residuals TEXT, "
                          "jacobian BLOB, abs_err REAL, rel_err REAL)")
                c.execute("CREATE INDEX prob_name_ind on problem_cases(case_name)")

                c.execute("CREATE TABLE system_iterations(id INTEGER PRIMARY KEY, "
                          "counter INT, iteration_coordinate TEXT, timestamp REAL, "
                          "success INT, msg TEXT, inputs TEXT, outputs TEXT, residuals TEXT)")
                c.execute("CREATE INDEX sys_iter_ind on system_iterations(iteration_coordinate)")

                c.execute("CREATE TABLE solver_iterations(id INTEGER PRIMARY KEY, "
                          "counter INT, iteration_coordinate TEXT, timestamp REAL, "
                          "success INT, msg TEXT, abs_err REAL, rel_err REAL, "
                          "solver_inputs TEXT, solver_output TEXT, solver_residuals TEXT)")
                c.execute("CREATE INDEX solv_iter_ind on solver_iterations(iteration_coordinate)")

            if self._record_metadata:
                with self.metadata_connection as m:
                    m.execute("CREATE TABLE metadata(format_version INT, openmdao_version TEXT, "
                              "abs2prom BLOB, prom2abs BLOB, abs2meta BLOB, var_settings BLOB,"
                              "conns BLOB)")
                    m.execute("INSERT INTO metadata(format_version, openmdao_version, abs2prom,"
                              " prom2abs) VALUES(?,?,?,?)", (format_version, openmdao_version,
                                                             None, None))
                    m.execute("CREATE TABLE driver_metadata(id TEXT PRIMARY KEY, "
                              "model_viewer_data TEXT)")
                    m.execute("CREATE TABLE system_metadata(id TEXT PRIMARY KEY, "
                              "scaling_factors BLOB, component_metadata BLOB)")
                    m.execute("CREATE TABLE solver_metadata(id TEXT PRIMARY KEY, "
                              "solver_options BLOB, solver_class TEXT)")

        self._database_initialized = True
        if MPI and comm and comm.size > 1:
            comm.barrier()
예제 #22
0
    def _setup_solvers(self, system, depth):
        """
        Assign system instance, set depth, and optionally perform setup.

        Parameters
        ----------
        system : <System>
            Pointer to the owning system.
        depth : int
            Depth of the current system (already incremented).
        """
        super()._setup_solvers(system, depth)
        self._recompute_jacobian = True
        self._computed_jacobians = 0
        iproc = system.comm.rank

        rank = MPI.COMM_WORLD.rank if MPI is not None else 0
        self._disallow_discrete_outputs()

        if self.linear_solver is not None:
            self.linear_solver._setup_solvers(system, self._depth + 1)
        else:
            self.linear_solver = system.linear_solver

        if self.linesearch is not None:
            self.linesearch._setup_solvers(system, self._depth + 1)
            self.linesearch._do_subsolve = True

        # this check is incorrect (for broyden) and needs to be done differently.
        # self._disallow_distrib_solve()

        states = self.options['state_vars']
        prom2abs = system._var_allprocs_prom2abs_list['output']

        # Check names of states.
        bad_names = [name for name in states if name not in prom2abs]
        if len(bad_names) > 0:
            msg = "{}: The following variable names were not found: {}"
            raise ValueError(msg.format(self.msginfo, ', '.join(bad_names)))

        # Size linear system
        if len(states) > 0:
            # User has specified states, so we must size them.
            n = 0
            meta = system._var_allprocs_abs2meta['output']

            for i, name in enumerate(states):
                size = meta[prom2abs[name][0]]['global_size']
                self._idx[name] = (n, n + size)
                n += size
        else:
            # Full system size.
            self._full_inverse = True
            n = np.sum(system._owned_sizes)

        self.size = n
        self.Gm = np.empty((n, n))
        self.xm = np.empty((n, ))
        self.fxm = np.empty((n, ))
        self.delta_xm = None
        self.delta_fxm = None

        if self._full_inverse:

            # Can only use DirectSolver here.
            from openmdao.solvers.linear.direct import DirectSolver
            if not isinstance(self.linear_solver, DirectSolver):
                msg = "{}: Linear solver must be DirectSolver when solving the full model."
                raise ValueError(msg.format(self.msginfo,
                                            ', '.join(bad_names)))

            return

        # Always look for states that aren't being solved so we can warn the user.
        def sys_recurse(system, all_states):
            subs = system._subsystems_myproc
            if len(subs) == 0:

                # Skip implicit components that appear to solve themselves.
                from openmdao.core.implicitcomponent import ImplicitComponent
                if overrides_method('solve_nonlinear', system,
                                    ImplicitComponent):
                    return

                all_states.extend(system._list_states())

            else:
                for subsys in subs:
                    sub_nl = subsys.nonlinear_solver
                    if sub_nl and sub_nl.supports['implicit_components']:
                        continue
                    sys_recurse(subsys, all_states)

        all_states = []
        sys_recurse(system, all_states)
        all_states = [
            system._var_abs2prom['output'][name] for name in all_states
        ]

        missing = set(all_states).difference(states)
        if len(missing) > 0:
            msg = "The following states are not covered by a solver, and may have been " + \
                  "omitted from the BroydenSolver 'state_vars': "
            msg += ', '.join(sorted(missing))
            issue_warning(msg, category=SetupWarning)
예제 #23
0
    def list_outputs(self,
                     explicit=True,
                     implicit=True,
                     val=True,
                     prom_name=False,
                     residuals=False,
                     residuals_tol=None,
                     units=False,
                     shape=False,
                     bounds=False,
                     scaling=False,
                     desc=False,
                     hierarchical=True,
                     print_arrays=False,
                     tags=None,
                     includes=None,
                     excludes=None,
                     list_autoivcs=False,
                     out_stream=_DEFAULT_OUT_STREAM,
                     values=None):
        """
        Return and optionally log a list of output names and other optional information.

        Parameters
        ----------
        explicit : bool, optional
            Include outputs from explicit components. Default is True.
        implicit : bool, optional
            Include outputs from implicit components. Default is True.
        val : bool, optional
            When True, display/return output values. Default is True.
        prom_name : bool, optional
            When True, display/return the promoted name of the variable.
            Default is False.
        residuals : bool, optional
            When True, display/return residual values. Default is False.
        residuals_tol : float, optional
            If set, limits the output of list_outputs to only variables where
            the norm of the resids array is greater than the given 'residuals_tol'.
            Default is None.
        units : bool, optional
            When True, display/return units. Default is False.
        shape : bool, optional
            When True, display/return the shape of the value. Default is False.
        bounds : bool, optional
            When True, display/return bounds (lower and upper). Default is False.
        scaling : bool, optional
            When True, display/return scaling (ref, ref0, and res_ref). Default is False.
        desc : bool, optional
            When True, display/return description. Default is False.
        hierarchical : bool, optional
            When True, human readable output shows variables in hierarchical format.
        print_arrays : bool, optional
            When False, in the columnar display, just display norm of any ndarrays with size > 1.
            The norm is surrounded by vertical bars to indicate that it is a norm.
            When True, also display full values of the ndarray below the row. Format  is affected
            by the values set with numpy.set_printoptions
            Default is False.
        tags : str or list of strs
            User defined tags that can be used to filter what gets listed. Only outputs with the
            given tags will be listed.
            Default is None, which means there will be no filtering based on tags.
        includes : str, iter of str, or None
            Glob patterns for pathnames to include in the check. Default is None, which
            includes all.
        excludes : str, iter of str, or None
            Glob patterns for pathnames to exclude from the check. Default is None, which
            excludes nothing.
        list_autoivcs : bool
            If True, include auto_ivc outputs in the listing.  Defaults to False.
        out_stream : file-like
            Where to send human readable output. Default is sys.stdout.
            Set to None to suppress.
        values : bool, optional
            This argument has been deprecated and will be removed in 4.0.

        Returns
        -------
        list
            List of output names and other optional information about those outputs.
        """
        meta = self._abs2meta
        expl_outputs = []
        impl_outputs = []

        if values is not None:
            issue_warning("'value' is deprecated and will be removed in 4.0. "
                          "Please index in using 'val'")
        elif not val and values:
            values = True
        else:
            values = val

        if isinstance(includes, str):
            includes = [
                includes,
            ]

        if isinstance(excludes, str):
            excludes = [
                excludes,
            ]

        for var_name in self.outputs.absolute_names():
            if not list_autoivcs and var_name.startswith('_auto_ivc.'):
                continue

            # Filter based on tags
            if tags and not (make_set(tags)
                             & make_set(meta[var_name]['tags'])):
                continue

            var_name_prom = self._abs2prom['output'][var_name]

            if not match_prom_or_abs(var_name, var_name_prom, includes,
                                     excludes):
                continue

            # check if residuals were recorded, skip if within specifed tolerance
            if residuals and self.residuals and var_name in self.residuals.absolute_names(
            ):
                resids = self.residuals[var_name]
                if residuals_tol and np.linalg.norm(resids) < residuals_tol:
                    continue
            else:
                resids = 'Not Recorded'

            val = self.outputs[var_name]

            var_meta = {}
            if values:
                var_meta['val'] = val
                var_meta['value'] = val
            if prom_name:
                var_meta['prom_name'] = var_name_prom
            if residuals:
                var_meta['resids'] = resids
            if units:
                var_meta['units'] = meta[var_name]['units']
            if shape:
                var_meta['shape'] = val.shape
            if bounds:
                var_meta['lower'] = meta[var_name]['lower']
                var_meta['upper'] = meta[var_name]['upper']
            if scaling:
                var_meta['ref'] = meta[var_name]['ref']
                var_meta['ref0'] = meta[var_name]['ref0']
                var_meta['res_ref'] = meta[var_name]['res_ref']
            if desc:
                var_meta['desc'] = meta[var_name]['desc']
            if meta[var_name]['explicit']:
                expl_outputs.append((var_name, var_meta))
            else:
                impl_outputs.append((var_name, var_meta))

        if out_stream:
            if not self.outputs:
                ostream = sys.stdout if out_stream is _DEFAULT_OUT_STREAM else out_stream
                ostream.write(
                    'WARNING: Outputs not recorded. Make sure your recording '
                    + 'settings have record_outputs set to True\n')
            if explicit:
                self._write_table('explicit', expl_outputs, hierarchical,
                                  print_arrays, out_stream)
            if implicit:
                self._write_table('implicit', impl_outputs, hierarchical,
                                  print_arrays, out_stream)

        if explicit and implicit:
            return expl_outputs + impl_outputs
        elif explicit:
            return expl_outputs
        elif implicit:
            return impl_outputs
        else:
            raise RuntimeError(
                'You have excluded both Explicit and Implicit components.')
예제 #24
0
    def _setup_partials(self):
        """
        Process all partials and approximations that the user declared.

        Metamodel needs to declare its partials after inputs and outputs are known.
        """
        super()._setup_partials()

        vec_size = self.options['vec_size']
        if vec_size > 1:
            vec_arange = np.arange(vec_size)

            # Sparse specification of partials for vectorized models.
            for wrt, n_wrt in self._surrogate_input_names:
                for of, shape_of in self._surrogate_output_names:
                    n_of = shape_to_len(shape_of)
                    rows = np.repeat(np.arange(n_of), n_wrt)
                    cols = np.tile(np.arange(n_wrt), n_of)
                    repeat = np.repeat(vec_arange, len(rows))
                    rows = np.tile(rows, vec_size) + repeat * n_of
                    cols = np.tile(cols, vec_size) + repeat * n_wrt

                    dct = {
                        'rows': rows,
                        'cols': cols,
                        'dependent': True,
                    }
                    self._declare_partials(of=of, wrt=wrt, dct=dct)
        else:
            dct = {
                'val': None,
                'dependent': True,
            }
            # Dense specification of partials for non-vectorized models.
            self._declare_partials(
                of=tuple([name[0] for name in self._surrogate_output_names]),
                wrt=tuple([name[0] for name in self._surrogate_input_names]),
                dct=dct)

        # Support for user declaring fd partials in a child class and assigning new defaults.
        # We want a warning for all partials that were not explicitly declared.
        declared_partials = set([
            key for key, dct in self._subjacs_info.items()
            if 'method' in dct and dct['method']
        ])

        # Gather undeclared fd partials on surrogates that don't support analytic derivatives.
        # While we do this, declare the missing ones.
        non_declared_partials = []
        for of, _ in self._surrogate_output_names:
            surrogate = self._metadata(of).get('surrogate')
            if surrogate and not overrides_method('linearize', surrogate,
                                                  SurrogateModel):
                wrt_list = [name[0] for name in self._surrogate_input_names]
                self._approx_partials(of=of, wrt=wrt_list, method='fd')

                for wrt in wrt_list:
                    abs_key = rel_key2abs_key(self, (of, wrt))
                    if abs_key not in declared_partials:
                        non_declared_partials.append(abs_key)

        if non_declared_partials:
            self._get_approx_scheme('fd')

            msg = "Because the MetaModelUnStructuredComp '{}' uses a surrogate " \
                  "which does not define a linearize method,\nOpenMDAO will use " \
                  "finite differences to compute derivatives. Some of the derivatives " \
                  "will be computed\nusing default finite difference " \
                  "options because they were not explicitly declared.\n".format(self.name)
            msg += "The derivatives computed using the defaults are:\n"
            for abs_key in non_declared_partials:
                msg += "    {}, {}\n".format(*abs_key)
            issue_warning(msg, category=DerivativesWarning)
예제 #25
0
    def run(self):
        """
        Excute pyOptsparse.

        Note that pyOpt controls the execution, and the individual optimizers
        (e.g., SNOPT) control the iteration.

        Returns
        -------
        bool
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem()
        model = problem.model
        relevant = model._relevant
        self.pyopt_solution = None
        self._total_jac = None
        self.iter_count = 0
        fwd = problem._mode == 'fwd'
        optimizer = self.options['optimizer']
        self._quantities = []

        self._check_for_missing_objective()
        self._check_jac = self.options['singular_jac_behavior'] in [
            'error', 'warn'
        ]

        # Only need initial run if we have linear constraints or if we are using an optimizer that
        # doesn't perform one initially.
        con_meta = self._cons
        model_ran = False
        if optimizer in run_required or np.any(
            [con['linear'] for con in self._cons.values()]):
            with RecordingDebugging(self._get_name(), self.iter_count,
                                    self) as rec:
                # Initial Run
                model.run_solve_nonlinear()
                rec.abs = 0.0
                rec.rel = 0.0
                model_ran = True
            self.iter_count += 1

        # compute dynamic simul deriv coloring or just sparsity if option is set
        if c_mod._use_total_sparsity:
            coloring = None
            if self._coloring_info['coloring'] is None and self._coloring_info[
                    'dynamic']:
                coloring = c_mod.dynamic_total_coloring(
                    self,
                    run_model=not model_ran,
                    fname=self._get_total_coloring_fname())

            if coloring is not None:
                # if the improvement wasn't large enough, don't use coloring
                pct = coloring._solves_info()[-1]
                info = self._coloring_info
                if info['min_improve_pct'] > pct:
                    info['coloring'] = info['static'] = None
                    msg = f"Coloring was deactivated.  Improvement of {pct:.1f}% was less " \
                          f"than min allowed ({info['min_improve_pct']:.1f}%)."
                    issue_warning(msg,
                                  prefix=self.msginfo,
                                  category=DerivativesWarning)

        comm = None if isinstance(problem.comm, FakeComm) else problem.comm
        opt_prob = Optimization(self.options['title'],
                                WeakMethodWrapper(self, '_objfunc'),
                                comm=comm)

        # Add all design variables
        dv_meta = self._designvars
        self._indep_list = indep_list = list(dv_meta)
        input_vals = self.get_design_var_values()

        for name, meta in dv_meta.items():
            size = meta['global_size'] if meta['distributed'] else meta['size']
            opt_prob.addVarGroup(name,
                                 size,
                                 type='c',
                                 value=input_vals[name],
                                 lower=meta['lower'],
                                 upper=meta['upper'])

        if not hasattr(pyoptsparse, '__version__') or \
           LooseVersion(pyoptsparse.__version__) < LooseVersion('2.5.1'):
            opt_prob.finalizeDesignVariables()
        else:
            opt_prob.finalize()

        # Add all objectives
        objs = self.get_objective_values()
        for name in objs:
            opt_prob.addObj(name)
            self._quantities.append(name)

        # Calculate and save derivatives for any linear constraints.
        lcons = [key for (key, con) in con_meta.items() if con['linear']]
        if len(lcons) > 0:
            _lin_jacs = self._compute_totals(of=lcons,
                                             wrt=indep_list,
                                             return_format='dict')
            # convert all of our linear constraint jacs to COO format. Otherwise pyoptsparse will
            # do it for us and we'll end up with a fully dense COO matrix and very slow evaluation
            # of linear constraints!
            to_remove = []
            for jacdct in _lin_jacs.values():
                for n, subjac in jacdct.items():
                    if isinstance(subjac, np.ndarray):
                        # we can safely use coo_matrix to automatically convert the ndarray
                        # since our linear constraint jacs are constant, so zeros won't become
                        # nonzero during the optimization.
                        mat = coo_matrix(subjac)
                        if mat.row.size > 0:
                            # convert to 'coo' format here to avoid an emphatic warning
                            # by pyoptsparse.
                            jacdct[n] = {
                                'coo': [mat.row, mat.col, mat.data],
                                'shape': mat.shape
                            }

        # Add all equality constraints
        for name, meta in con_meta.items():
            if meta['equals'] is None:
                continue
            size = meta['global_size'] if meta['distributed'] else meta['size']
            lower = upper = meta['equals']
            path = meta['source'] if meta['alias'] is not None else name
            if fwd:
                wrt = [
                    v for v in indep_list
                    if path in relevant[dv_meta[v]['source']]
                ]
            else:
                rels = relevant[path]
                wrt = [v for v in indep_list if dv_meta[v]['source'] in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     linear=True,
                                     wrt=wrt,
                                     jac=jac)
            else:
                if name in self._res_subjacs:
                    resjac = self._res_subjacs[name]
                    jac = {n: resjac[dv_meta[n]['source']] for n in wrt}
                else:
                    jac = None

                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     wrt=wrt,
                                     jac=jac)
                self._quantities.append(name)

        # Add all inequality constraints
        for name, meta in con_meta.items():
            if meta['equals'] is not None:
                continue
            size = meta['global_size'] if meta['distributed'] else meta['size']

            # Bounds - double sided is supported
            lower = meta['lower']
            upper = meta['upper']

            path = meta['source'] if meta['alias'] is not None else name

            if fwd:
                wrt = [
                    v for v in indep_list
                    if path in relevant[dv_meta[v]['source']]
                ]
            else:
                rels = relevant[path]
                wrt = [v for v in indep_list if dv_meta[v]['source'] in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     linear=True,
                                     wrt=wrt,
                                     jac=jac)
            else:
                if name in self._res_subjacs:
                    resjac = self._res_subjacs[name]
                    jac = {n: resjac[dv_meta[n]['source']] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     wrt=wrt,
                                     jac=jac)
                self._quantities.append(name)

        # Instantiate the requested optimizer
        try:
            _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer],
                              0)
            opt = getattr(_tmp, optimizer)()

        except Exception as err:
            # Change whatever pyopt gives us to an ImportError, give it a readable message,
            # but raise with the original traceback.
            msg = "Optimizer %s is not available in this installation." % optimizer
            raise ImportError(msg)

        # Process any default optimizer-specific settings.
        if optimizer in DEFAULT_OPT_SETTINGS:
            for name, value in DEFAULT_OPT_SETTINGS[optimizer].items():
                if name not in self.opt_settings:
                    self.opt_settings[name] = value

        # Set optimization options
        for option, value in self.opt_settings.items():
            opt.setOption(option, value)

        # Print the pyoptsparse optimization problem summary before running the optimization.
        # This allows users to confirm their optimization setup.
        if self.options['print_opt_prob']:
            if not MPI or model.comm.rank == 0:
                print(opt_prob)

        self._exc_info = None
        try:

            # Execute the optimization problem
            if self.options['gradient_method'] == 'pyopt_fd':

                # Use pyOpt's internal finite difference
                # TODO: Need to get this from OpenMDAO
                # fd_step = problem.model.deriv_options['step_size']
                fd_step = 1e-6
                sol = opt(opt_prob,
                          sens='FD',
                          sensStep=fd_step,
                          storeHistory=self.hist_file,
                          hotStart=self.hotstart_file)

            elif self.options['gradient_method'] == 'snopt_fd':
                if self.options['optimizer'] == 'SNOPT':

                    # Use SNOPT's internal finite difference
                    # TODO: Need to get this from OpenMDAO
                    # fd_step = problem.model.deriv_options['step_size']
                    fd_step = 1e-6
                    sol = opt(opt_prob,
                              sens=None,
                              sensStep=fd_step,
                              storeHistory=self.hist_file,
                              hotStart=self.hotstart_file)

                else:
                    msg = "SNOPT's internal finite difference can only be used with SNOPT"
                    self._exc_info = (Exception, Exception(msg), None)
            else:

                # Use OpenMDAO's differentiator for the gradient
                sol = opt(opt_prob,
                          sens=WeakMethodWrapper(self, '_gradfunc'),
                          storeHistory=self.hist_file,
                          hotStart=self.hotstart_file)

        except Exception as c:
            if not self._exc_info:
                raise

        if self._exc_info:
            if self._exc_info[2] is None:
                raise self._exc_info[1]
            raise self._exc_info[1].with_traceback(self._exc_info[2])

        # Print results
        if self.options['print_results']:
            if not MPI or model.comm.rank == 0:
                print(sol)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        for name in indep_list:
            self.set_design_var(name, dv_dict[name])

        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            try:
                model.run_solve_nonlinear()
            except AnalysisError:
                model._clear_iprint()

            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        # Save the most recent solution.
        self.pyopt_solution = sol

        try:
            exit_status = sol.optInform['value']
            self.fail = False

            # These are various failed statuses.
            if optimizer == 'IPOPT':
                if exit_status not in {0, 1}:
                    self.fail = True
            elif exit_status > 2:
                self.fail = True

        except KeyError:
            # optimizers other than pySNOPT may not populate this dict
            pass

        # revert signal handler to cached version
        sigusr = self.options['user_terminate_signal']
        if sigusr is not None:
            signal.signal(sigusr, self._signal_cache)
            self._signal_cache = None  # to prevent memory leak test from failing

        return self.fail
예제 #26
0
def view_connections(root,
                     outfile='connections.html',
                     show_browser=True,
                     show_values=True,
                     precision=6,
                     title=None):
    """
    Generate a self-contained html file containing a detailed connection viewer.

    Optionally pops up a web browser to view the file.

    Parameters
    ----------
    root : System or Problem
        The root for the desired tree.

    outfile : str, optional
        The name of the output html file.  Defaults to 'connections.html'.

    show_browser : bool, optional
        If True, pop up a browser to view the generated html file.
        Defaults to True.

    show_values : bool, optional
        If True, retrieve the values and display them.

    precision : int, optional
        Sets the precision for displaying array values.

    title : str, optional
        Sets the title of the web page.
    """
    if MPI and MPI.COMM_WORLD.rank != 0:
        return

    # since people will be used to passing the Problem as the first arg to
    # the N2 diagram funct, allow them to pass a Problem here as well.
    if isinstance(root, Problem):
        system = root.model
    else:
        system = root

    connections = system._problem_meta['model_ref']()._conn_global_abs_in2out

    src2tgts = defaultdict(list)
    units = defaultdict(lambda: '')
    for io in ('input', 'output'):
        for n, data in system._var_allprocs_abs2meta[io].items():
            u = data.get('units', '')
            if u is not None:
                units[n] = u

    vals = {}

    prefix = system.pathname + '.' if system.pathname else ''
    all_vars = {}
    for io in ('input', 'output'):
        all_vars[io] = chain(system._var_abs2meta[io].items(),
                             [(prefix + n, m)
                              for n, m in system._var_discrete[io].items()])

    if show_values and system._outputs is None:
        issue_warning(
            "Values will not be shown because final_setup has not been called yet.",
            prefix=system.msginfo)

    with printoptions(precision=precision, suppress=True, threshold=10000):

        for t, meta in all_vars['input']:
            s = connections[t]
            if show_values and system._outputs is not None:
                if s.startswith('_auto_ivc.'):
                    val = system.get_val(t,
                                         flat=True,
                                         get_remote=True,
                                         from_src=False)
                else:
                    val = system.get_val(t, flat=True, get_remote=True)

                    # if there's a unit conversion, express the value in the
                    # units of the target
                    if units[t] and s in system._outputs:
                        val = system.get_val(t,
                                             flat=True,
                                             units=units[t],
                                             get_remote=True)
                    else:
                        val = system.get_val(t, flat=True, get_remote=True)
            else:
                val = ''

            src2tgts[s].append(t)

            vals[t] = val

    NOCONN = '[NO CONNECTION]'
    vals[NOCONN] = ''

    src_systems = set()
    tgt_systems = set()
    for s, _ in all_vars['output']:
        parts = s.split('.')
        for i in range(len(parts)):
            src_systems.add('.'.join(parts[:i]))

    for t, _ in all_vars['input']:
        parts = t.split('.')
        for i in range(len(parts)):
            tgt_systems.add('.'.join(parts[:i]))

    src_systems = [{'name': n} for n in sorted(src_systems)]
    src_systems.insert(1, {'name': NOCONN})
    tgt_systems = [{'name': n} for n in sorted(tgt_systems)]
    tgt_systems.insert(1, {'name': NOCONN})

    tprom = system._var_allprocs_abs2prom['input']
    sprom = system._var_allprocs_abs2prom['output']

    table = []
    idx = 1  # unique ID for use by Tabulator
    for tgt, src in connections.items():
        usrc = units[src]
        utgt = units[tgt]
        if usrc != utgt:
            # prepend these with '!' so they'll be colored red
            if usrc:
                usrc = '!' + units[src]
            if utgt:
                utgt = '!' + units[tgt]

        row = {
            'id': idx,
            'src': src,
            'sprom': sprom[src],
            'sunits': usrc,
            'val': _val2str(vals[tgt]),
            'tunits': utgt,
            'tprom': tprom[tgt],
            'tgt': tgt
        }
        table.append(row)
        idx += 1

    # add rows for unconnected sources
    for src, _ in all_vars['output']:
        if src not in src2tgts:
            if show_values:
                v = _val2str(system._abs_get_val(src))
            else:
                v = ''
            row = {
                'id': idx,
                'src': src,
                'sprom': sprom[src],
                'sunits': units[src],
                'val': v,
                'tunits': '',
                'tprom': NOCONN,
                'tgt': NOCONN
            }
            table.append(row)
            idx += 1

    if title is None:
        title = ''

    data = {
        'title': title,
        'table': table,
        'show_values': show_values,
    }

    viewer = 'connect_table.html'

    code_dir = os.path.dirname(os.path.abspath(__file__))
    libs_dir = os.path.join(os.path.dirname(code_dir), 'common', 'libs')
    style_dir = os.path.join(os.path.dirname(code_dir), 'common', 'style')

    with open(os.path.join(code_dir, viewer), "r") as f:
        template = f.read()

    with open(os.path.join(libs_dir, 'tabulator.min.js'), "r") as f:
        tabulator_src = f.read()

    with open(os.path.join(style_dir, 'tabulator.min.css'), "r") as f:
        tabulator_style = f.read()

    jsontxt = json.dumps(data)

    with open(outfile, 'w') as f:
        s = template.replace("<connection_data>", jsontxt)
        s = s.replace("<tabulator_src>", tabulator_src)
        s = s.replace("<tabulator_style>", tabulator_style)
        f.write(s)

    if notebook:
        # display in Jupyter Notebook
        if not colab:
            display(IFrame(src=outfile, width=1000, height=1000))
        else:
            display(HTML(outfile))

    elif show_browser:
        # open it up in the browser
        from openmdao.utils.webview import webview
        webview(outfile)