Exemple #1
0
    def _setup_partials(self):
        """
        Check that all partials are declared.
        """
        if not self._manual_decl_partials:
            meta = self._var_rel2meta
            decl_partials = super().declare_partials
            for i, (outs, tup) in enumerate(self._exprs_info):
                vs, funcs = tup
                ins = sorted(set(vs).difference(outs))
                for out in sorted(outs):
                    for inp in ins:
                        if self.options['has_diag_partials']:
                            ival = meta[inp]['value']
                            iarray = isinstance(ival,
                                                ndarray) and ival.size > 1
                            oval = meta[out]['value']
                            if iarray and isinstance(
                                    oval, ndarray) and oval.size > 1:
                                if oval.size != ival.size:
                                    raise RuntimeError(
                                        "%s: has_diag_partials is True but partial(%s, %s) "
                                        "is not square (shape=(%d, %d))." %
                                        (self.msginfo, out, inp, oval.size,
                                         ival.size))
                                # partial will be declared as diagonal
                                inds = np.arange(oval.size, dtype=int)
                            else:
                                inds = None
                            decl_partials(of=out,
                                          wrt=inp,
                                          rows=inds,
                                          cols=inds)
                        else:
                            decl_partials(of=out, wrt=inp)

        super()._setup_partials()
        if self._manual_decl_partials:
            undeclared = []
            for i, (outs, tup) in enumerate(self._exprs_info):
                vs, funcs = tup
                ins = sorted(set(vs).difference(outs))
                for out in sorted(outs):
                    out = '.'.join(
                        (self.pathname, out)) if self.pathname else out
                    for inp in ins:
                        inp = '.'.join(
                            (self.pathname, inp)) if self.pathname else inp
                        if (out, inp) not in self._subjacs_info:
                            undeclared.append((out, inp))
            if undeclared:
                idx = len(self.pathname) + 1 if self.pathname else 0
                undeclared = ', '.join([
                    ' wrt '.join((f"'{of[idx:]}'", f"'{wrt[idx:]}'"))
                    for of, wrt in undeclared
                ])
                simple_warning(
                    f"{self.msginfo}: The following partial derivatives have not been "
                    f"declared so they are assumed to be zero: [{undeclared}]."
                )
Exemple #2
0
def _print_violations(outputs, lower, upper):
    """
    Print out which variables exceed their bounds.

    Parameters
    ----------
    outputs : <Vector>
        Vector containing the outputs.
    lower : <Vector>
        Vector containing the lower bounds.
    upper : <Vector>
        Vector containing the upper bounds.
    """
    start = end = 0
    for name, val in outputs._abs_item_iter():
        end += val.size
        if upper is not None and any(val > upper[start:end]):
            msg = (
                f"'{name}' exceeds upper bounds\n  Val: {val}\n  Upper: {upper[start:end]}\n"
            )
            simple_warning(msg)

        if lower is not None and any(val < lower[start:end]):
            msg = (
                f"'{name}' exceeds lower bounds\n  Val: {val}\n  Lower: {lower[start:end]}\n"
            )
            simple_warning(msg)

        start = end
    def initialize(self):
        """
        Initialize the component.
        """
        if not make_interp_spline:
            msg = "'MetaModelStructuredComp' requires scipy>=0.19, but the currently" \
                  " installed version is %s." % scipy_version
            simple_warning(msg)

        self.options.declare(
            'extrapolate',
            types=bool,
            default=False,
            desc='Sets whether extrapolation should be performed '
            'when an input is out of bounds.')
        self.options.declare(
            'training_data_gradients',
            types=bool,
            default=False,
            desc='Sets whether gradients with respect to output '
            'training data should be computed.')
        self.options.declare('vec_size',
                             types=int,
                             default=1,
                             desc='Number of points to evaluate at once.')
        self.options.declare('method',
                             values=('cubic', 'slinear', 'quintic'),
                             default="cubic",
                             desc='Spline interpolation order.')
Exemple #4
0
def dynamic_simul_coloring(driver, run_model=True, do_sparsity=False, show_jac=False):
    """
    Compute simultaneous deriv coloring during runtime.

    Parameters
    ----------
    driver : <Driver>
        The driver performing the optimization.
    run_model : bool
        If True, call run_model before computing coloring.
    do_sparsity : bool
        If True, setup the total jacobian sparsity (needed by pyOptSparseDriver).
    show_jac : bool
        If True, display a visualization of the colored jacobian.
    """
    problem = driver._problem
    if not problem.model._use_derivatives:
        simple_warning("Derivatives have been turned off. Skipping dynamic simul coloring.")
        return

    driver._total_jac = None

    # save the coloring.json file for later inspection
    with open("coloring.json", "w") as f:
        coloring = get_simul_meta(problem,
                                  repeats=driver.options['dynamic_derivs_repeats'],
                                  tol=1.e-15, include_sparsity=do_sparsity,
                                  setup=False, run_model=run_model, show_jac=show_jac, stream=f)
    driver.set_simul_deriv_color(coloring)
    driver._setup_simul_coloring()
    if do_sparsity:
        driver._setup_tot_jac_sparsity()

    simul_coloring_summary(coloring, stream=sys.stdout)
def get_code(reference, hide_doc_string=False):
    """
    Return the source code of the given reference path to a function.

    Parameters
    ----------
    reference : str
        Dot path of desired function.
    hide_doc_string : bool
        Option to hide the docstring.

    Returns
    -------
    IPython.display.Code
        Source code of the given class or function.
    """
    obj = inspect.getsource(_get_object_from_reference(reference))

    if hide_doc_string:
        obj = obj.split('"""')
        del obj[1]
        obj = ''.join(obj)

    if ipy:
        return Code(obj, language='python')
    else:
        simple_warning(
            "IPython is not installed. Run `pip install openmdao[notebooks]` or "
            "`pip install openmdao[docs]` to upgrade.")
Exemple #6
0
def record_system_options(problem):
    """
    Record the system options for all systems in the model.

    Parameters
    ----------
    problem : Problem
        The problem for which all its systems' options are to be recorded.
    """
    # get all recorders in the problem
    recorders = set(_get_all_recorders(problem))
    if recorders:
        if problem._system_options_recorded:
            simple_warning(
                "The model is being run again, if the options or scaling of any "
                "components has changed then only their new values will be recorded."
            )
        else:
            problem._system_options_recorded = True

        for recorder in recorders:
            for sub in problem.model.system_iter(recurse=True,
                                                 include_self=True):
                if problem._run_counter >= 1:
                    recorder.record_metadata_system(sub, problem._run_counter)
                else:
                    recorder.record_metadata_system(sub)
Exemple #7
0
def dynamic_simul_coloring(driver, run_model=True, do_sparsity=False, show_jac=False):
    """
    Compute simultaneous deriv coloring during runtime.

    Parameters
    ----------
    driver : <Driver>
        The driver performing the optimization.
    run_model : bool
        If True, call run_model before computing coloring.
    do_sparsity : bool
        If True, setup the total jacobian sparsity (needed by pyOptSparseDriver).
    show_jac : bool
        If True, display a visualization of the colored jacobian.
    """
    problem = driver._problem
    if not problem.model._use_derivatives:
        simple_warning("Derivatives have been turned off. Skipping dynamic simul coloring.")
        return

    driver._total_jac = None

    # save the coloring.json file for later inspection
    with open("coloring.json", "w") as f:
        coloring = get_simul_meta(problem,
                                  repeats=driver.options['dynamic_derivs_repeats'],
                                  tol=1.e-15, include_sparsity=do_sparsity,
                                  setup=False, run_model=run_model, show_jac=show_jac, stream=f)
    driver.set_simul_deriv_color(coloring)
    driver._setup_simul_coloring()
    if do_sparsity:
        driver._setup_tot_jac_sparsity()

    simul_coloring_summary(coloring, stream=sys.stdout)
Exemple #8
0
    def _setup_simul_coloring(self):
        """
        Set up metadata for coloring of total derivative solution.

        If set_coloring was called with a filename, load the coloring file.
        """
        # command line simul_coloring uses this env var to turn pre-existing coloring off
        if not coloring_mod._use_total_sparsity:
            return

        problem = self._problem()
        if not problem.model._use_derivatives:
            simple_warning(
                "Derivatives are turned off.  Skipping simul deriv coloring.")
            return

        total_coloring = self._get_static_coloring()

        if total_coloring._rev and problem._orig_mode not in ('rev', 'auto'):
            revcol = total_coloring._rev[0][0]
            if revcol:
                raise RuntimeError(
                    "Simultaneous coloring does reverse solves but mode has "
                    "been set to '%s'" % problem._orig_mode)
        if total_coloring._fwd and problem._orig_mode not in ('fwd', 'auto'):
            fwdcol = total_coloring._fwd[0][0]
            if fwdcol:
                raise RuntimeError(
                    "Simultaneous coloring does forward solves but mode has "
                    "been set to '%s'" % problem._orig_mode)
Exemple #9
0
def dynamic_sparsity(driver):
    """
    Compute deriv sparsity during runtime.

    Parameters
    ----------
    driver : <Driver>
        The driver performing the optimization.
    """
    problem = driver._problem
    if not problem.model._use_derivatives:
        simple_warning(
            "Derivatives have been turned off. Skipping dynamic sparsity computation."
        )
        return

    driver._total_jac = None
    repeats = driver.options['dynamic_derivs_repeats']

    # save the sparsity.json file for later inspection
    with open("sparsity.json", "w") as f:
        sparsity = get_sparsity(problem,
                                mode=problem._mode,
                                repeats=repeats,
                                stream=f)

    driver.set_total_jac_sparsity(sparsity)
    driver._setup_tot_jac_sparsity()
Exemple #10
0
    def add_approximation(self, abs_key, system, kwargs, vector=None):
        """
        Use this approximation scheme to approximate the derivative d(of)/d(wrt).

        Parameters
        ----------
        abs_key : tuple(str,str)
            Absolute name pairing of (of, wrt) for the derivative.
        system : System
            Containing System.
        vector : ndarray or None
            Direction for difference when using directional derivatives.
        kwargs : dict
            Additional keyword arguments, to be interpreted by sub-classes.
        """
        options = self.DEFAULT_OPTIONS.copy()
        options.update(kwargs)
        options['vector'] = vector

        wrt = abs_key[1]
        if wrt in self._wrt_meta:
            simple_warning(
                f"{system.msginfo}: overriding previous approximation defined for "
                f"'{wrt}.")
        self._wrt_meta[wrt] = options
        self._reset()  # force later regen of approx_groups
Exemple #11
0
def show_options_table(reference, recording_options=False):
    """
    Return the options table of the given reference path.

    Parameters
    ----------
    reference : str
        Dot path of desired class or function.

    recording_options : bool
        If True, display recording options instead of options.

    Returns
    -------
    IPython.display
        Options table of the given class or function.
    """
    obj = _get_object_from_reference(reference)()

    if ipy:
        if not hasattr(obj, "options"):
            return display(HTML(obj.to_table(fmt='html')))
        elif not recording_options:
            return display(HTML(obj.options.to_table(fmt='html')))
        else:
            return display(HTML(obj.recording_options.to_table(fmt='html')))
    else:
        simple_warning(
            "IPython is not installed. Run `pip install openmdao[notebooks]` or "
            "`pip install openmdao[docs]` to upgrade.")
Exemple #12
0
    def get_case(self, case_id, cache=False):
        """
        Get a case from the database.

        Parameters
        ----------
        case_id : str or int
            The string-identifier of the case to be retrieved or the index of the case.
        cache : bool
            If True, case will be cached for faster access by key.

        Returns
        -------
        Case
            The specified case from the table.
        """
        # check to see if we've already cached this case
        if isinstance(case_id, int):
            case_id = self._get_iteration_coordinate(case_id)

        # if we've already cached this case, return the cached instance
        if case_id in self._cases:
            return self._cases[case_id]

        # we don't have it, so fetch it
        with sqlite3.connect(self._filename) as con:
            con.row_factory = sqlite3.Row
            cur = con.cursor()
            cur.execute("SELECT * FROM %s WHERE %s='%s'" %
                        (self._table_name, self._index_name, case_id))
            row = cur.fetchone()

        con.close()

        # if found, extract the data and optionally cache the Case
        if row is not None:
            if self._format_version >= 5:
                source = self._get_row_source(row['id'])

                # check for situations where parsing the iter coord doesn't work correctly
                iter_source = self._get_source(row[self._index_name])
                if iter_source != source:
                    simple_warning('Mismatched source for %d: %s = %s vs %s' %
                                   (row['id'], row[self._index_name],
                                    iter_source, source))
            else:
                source = self._get_source(row[self._index_name])

            case = Case(source, row, self._prom2abs, self._abs2prom,
                        self._abs2meta, self._var_info, self._format_version)

            # cache it if requested
            if cache:
                self._cases[case_id] = case

            return case
        else:
            return None
Exemple #13
0
    def _setup_solvers(self, system, depth):
        """
        Assign system instance, set depth, and optionally perform setup.

        Parameters
        ----------
        system : System
            pointer to the owning system.
        depth : int
            depth of the current system (already incremented).
        """
        super(LinesearchSolver, self)._setup_solvers(system, depth)
        if system._has_bounds:
            abs2meta = system._var_abs2meta
            start = end = 0
            for abs_name, val in system._outputs._abs_val_iter():
                end += val.size
                meta = abs2meta[abs_name]
                var_lower = meta['lower']
                var_upper = meta['upper']

                if var_lower is None and var_upper is None:
                    start = end
                    continue

                ref0 = meta['ref0']
                ref = meta['ref']

                if not np.isscalar(ref0):
                    ref0 = ref0.ravel()
                if not np.isscalar(ref):
                    ref = ref.ravel()

                if var_lower is not None:
                    if self._lower_bounds is None:
                        self._lower_bounds = np.full(len(system._outputs),
                                                     -np.inf)
                    if not np.isscalar(var_lower):
                        var_lower = var_lower.ravel()
                    self._lower_bounds[start:end] = (var_lower - ref0) / (ref -
                                                                          ref0)

                if var_upper is not None:
                    if self._upper_bounds is None:
                        self._upper_bounds = np.full(len(system._outputs),
                                                     np.inf)
                    if not np.isscalar(var_upper):
                        var_upper = var_upper.ravel()
                    self._upper_bounds[start:end] = (var_upper - ref0) / (ref -
                                                                          ref0)

                start = end
        else:
            simple_warning(
                f"{self.msginfo}: linesearch is active but no bounds have been set."
            )
            self._lower_bounds = self._upper_bounds = None
    def record_metadata_system(self, recording_requester, run_counter=None):
        """
        Record system metadata.

        Parameters
        ----------
        recording_requester : System
            The System that would like to record its metadata.
        run_counter : int or None
            The number of times run_driver or run_model has been called.
        """
        if self.connection:

            scaling_vecs, user_options = self._get_metadata_system(
                recording_requester)

            if scaling_vecs is None:
                return

            scaling_factors = pickle.dumps(scaling_vecs, self._pickle_version)

            # try to pickle the metadata, report if it failed
            try:
                pickled_metadata = pickle.dumps(user_options,
                                                self._pickle_version)
            except Exception:
                try:
                    for key, values in user_options._dict.items():
                        pickle.dumps(values, self._pickle_version)
                except Exception:
                    pickled_metadata = pickle.dumps(OptionsDictionary(),
                                                    self._pickle_version)
                    simple_warning(
                        "Trying to record option '%s' which cannot be pickled on system "
                        "%s. Set 'recordable' to False. Skipping recording options for "
                        "this system." % (key, recording_requester.msginfo))

            path = recording_requester.pathname
            if not path:
                path = 'root'

            scaling_factors = sqlite3.Binary(scaling_factors)
            pickled_metadata = sqlite3.Binary(pickled_metadata)

            # Need to use OR IGNORE in here because if the user does run_driver more than once
            #   the current OpenMDAO code will call this function each time and there will be
            #   SQL errors for "UNIQUE constraint failed: system_metadata.id"
            # Future versions of OpenMDAO will handle this better.
            if run_counter is None:
                name = path
            else:
                name = "{}_{}".format(path, str(run_counter))
            with self.connection as c:
                c.execute(
                    "INSERT OR IGNORE INTO system_metadata"
                    "(id, scaling_factors, component_metadata) "
                    "VALUES(?,?,?)", (name, scaling_factors, pickled_metadata))
Exemple #15
0
def _get_used_before_calc_subs(group, input_srcs):
    """
    Return Systems that are executed out of dataflow order.

    Parameters
    ----------
    group : <Group>
        The Group where we're checking subsystem order.
    input_srcs : {}
        dict containing variable abs names for sources of the inputs.
        This describes all variable connections, either explicit or implicit,
        in the entire model.

    Returns
    -------
    dict
        A dict mapping names of target Systems to a set of names of their
        source Systems that execute after them.
    """
    sub2i = {}
    parallel_solver = {}
    for i, sub in enumerate(group._subsystems_allprocs):
        if hasattr(sub,
                   '_mpi_proc_allocator') and sub._mpi_proc_allocator.parallel:
            parallel_solver[sub.name] = sub.nonlinear_solver.SOLVER

        sub2i[sub.name] = i

    glen = len(group.pathname.split('.')) if group.pathname else 0

    ubcs = defaultdict(set)
    for tgt_abs, src_abs in input_srcs.items():
        if src_abs is not None:
            iparts = tgt_abs.split('.')
            oparts = src_abs.split('.')
            src_sys = oparts[glen]
            tgt_sys = iparts[glen]
            hierarchy_check = True if oparts[glen + 1] == iparts[glen +
                                                                 1] else False

            if (src_sys in parallel_solver and tgt_sys in parallel_solver
                    and (parallel_solver[src_sys]
                         not in ["NL: NLBJ", "NL: Newton", "BROYDEN"])
                    and src_sys == tgt_sys and not hierarchy_check):
                simple_warning(
                    "Need to attach NonlinearBlockJac, NewtonSolver, or BroydenSolver "
                    "to '%s' when connecting components inside parallel "
                    "groups" % (src_sys))
                ubcs[tgt_abs.rsplit('.', 1)[0]].add(src_abs.rsplit('.', 1)[0])
            if (src_sys in sub2i and tgt_sys in sub2i
                    and (sub2i[src_sys] > sub2i[tgt_sys])):
                ubcs[tgt_sys].add(src_sys)

    return ubcs
Exemple #16
0
    def record_metadata_system(self, system, run_number=None):
        """
        Record system metadata.

        Parameters
        ----------
        system : System
            The System for which to record metadata.
        run_number : int or None
            Number indicating which run the metadata is associated with.
            None for the first run, 1 for the second, etc.
        """
        if self._record_metadata and self.metadata_connection:

            scaling_vecs, user_options = self._get_metadata_system(system)

            if scaling_vecs is None:
                return

            scaling_factors = pickle.dumps(scaling_vecs, self._pickle_version)

            # try to pickle the metadata, report if it failed
            try:
                pickled_metadata = pickle.dumps(user_options,
                                                self._pickle_version)
            except Exception:
                try:
                    for key, values in user_options._dict.items():
                        pickle.dumps(values, self._pickle_version)
                except Exception:
                    pickled_metadata = pickle.dumps(OptionsDictionary(),
                                                    self._pickle_version)
                    simple_warning(
                        "Trying to record option '%s' which cannot be pickled on system "
                        "%s. Set 'recordable' to False. Skipping recording options for "
                        "this system." % (key, system.msginfo))

            path = system.pathname
            if not path:
                path = 'root'

            scaling_factors = sqlite3.Binary(zlib.compress(scaling_factors))
            pickled_metadata = sqlite3.Binary(zlib.compress(pickled_metadata))

            if run_number is None:
                name = path
            else:
                name = META_KEY_SEP.join([path, str(run_number)])

            with self.metadata_connection as m:
                m.execute(
                    "INSERT INTO system_metadata"
                    "(id, scaling_factors, component_metadata) "
                    "VALUES(?,?,?)", (name, scaling_factors, pickled_metadata))
Exemple #17
0
    def compute_approximations(self, system, jac, total=False):
        """
        Execute the system to compute the approximate sub-Jacobians.

        Parameters
        ----------
        system : System
            System on which the execution is run.
        jac : dict-like
            Approximations are stored in the given dict-like object.
        total : bool
            If True total derivatives are being approximated, else partials.
        """
        if not self._wrt_meta:
            return

        if system.under_complex_step:

            # If we are nested under another complex step, then warn and swap to FD.
            if not self._fd:
                from openmdao.approximation_schemes.finite_difference import FiniteDifference

                msg = "Nested complex step detected. Finite difference will be used for '%s'."
                simple_warning(msg % system.pathname)

                fd = self._fd = FiniteDifference()
                empty = {}
                for wrt in self._wrt_meta:
                    fd.add_approximation(wrt, system, empty)

            self._fd.compute_approximations(system, jac, total=total)
            return

        saved_inputs = system._inputs._get_data().copy()
        system._inputs._data.imag[:] = 0.0
        saved_outputs = system._outputs.asarray(copy=True)
        system._outputs._data.imag[:] = 0.0
        saved_resids = system._residuals.asarray(copy=True)
        system._residuals._data.imag[:] = 0.0

        # Turn on complex step.
        system._set_complex_step_mode(True)

        try:
            self._compute_approximations(system, jac, total, under_cs=True)
        finally:
            # Turn off complex step.
            system._set_complex_step_mode(False)

        system._inputs.set_val(saved_inputs)
        system._outputs.set_val(saved_outputs)
        system._residuals.set_val(saved_resids)
Exemple #18
0
def notebook_mode():
    """
    Check if the environment is interactive and if tabulate is installed.

    Returns
    -------
    bool
        True if the environment is an interactive notebook.
    """
    if ipy and tabulate is None:
        simple_warning("Tabulate is not installed. Run `pip install openmdao[notebooks]` to "
                       "install required dependencies. Using ASCII for outputs.")
    return ipy
Exemple #19
0
    def record_metadata_system(self, recording_requester):
        """
        Record system metadata.

        Parameters
        ----------
        recording_requester : System
            The System that would like to record its metadata.
        """
        if self.connection:
            scaling_vecs, user_options = self._get_metadata_system(
                recording_requester)

            if scaling_vecs is None:
                return

            scaling_factors = pickle.dumps(scaling_vecs, self._pickle_version)

            # try to pickle the metadata, report if it failed
            try:
                pickled_metadata = pickle.dumps(user_options,
                                                self._pickle_version)
            except Exception:
                try:
                    for key, values in user_options._dict.items():
                        pickle.dumps(values, self._pickle_version)
                except Exception:
                    pickled_metadata = pickle.dumps(OptionsDictionary(),
                                                    self._pickle_version)
                    simple_warning(
                        "Trying to record option '%s' which cannot be pickled on system "
                        "%s. Set 'recordable' to False. Skipping recording options for "
                        "this system." % (key, recording_requester.msginfo))

            path = recording_requester.pathname
            if not path:
                path = 'root'

            scaling_factors = sqlite3.Binary(scaling_factors)
            pickled_metadata = sqlite3.Binary(pickled_metadata)

            with self.connection as c:
                # Because we can have a recorder attached to multiple Systems,
                #   and because we are now recording System metadata recursively,
                #   we can store System metadata multiple times. Need to ignore when that happens
                #   so we don't get database errors. So use OR IGNORE
                c.execute(
                    "INSERT OR IGNORE INTO system_metadata"
                    "(id, scaling_factors, component_metadata) "
                    "VALUES(?,?,?)", (path, scaling_factors, pickled_metadata))
Exemple #20
0
    def record_metadata_system(self, recording_requester):
        """
        Record system metadata.

        Parameters
        ----------
        recording_requester : System
            The System that would like to record its metadata.
        """
        if self.connection:
            scaling_vecs, user_options = self._get_metadata_system(
                recording_requester)

            if scaling_vecs is None:
                return

            scaling_factors = pickle.dumps(scaling_vecs, self._pickle_version)

            # try to pickle the metadata, report if it failed
            try:
                pickled_metadata = pickle.dumps(user_options,
                                                self._pickle_version)
            except Exception:
                pickled_metadata = pickle.dumps(OptionsDictionary(),
                                                self._pickle_version)
                simple_warning(
                    "Trying to record options which cannot be pickled "
                    "on system with name: %s. Use the 'options_excludes' "
                    "recording option on system objects to avoid attempting "
                    "to record options which cannot be pickled. Skipping "
                    "recording options for this system." %
                    recording_requester.name, RuntimeWarning)

            path = recording_requester.pathname
            if not path:
                path = 'root'

            scaling_factors = sqlite3.Binary(scaling_factors)
            pickled_metadata = sqlite3.Binary(pickled_metadata)

            with self.connection as c:
                # Because we can have a recorder attached to multiple Systems,
                #   and because we are now recording System metadata recursively,
                #   we can store System metadata multiple times. Need to ignore when that happens
                #   so we don't get database errors. So use OR IGNORE
                c.execute(
                    "INSERT OR IGNORE INTO system_metadata"
                    "(id, scaling_factors, component_metadata) "
                    "VALUES(?,?,?)", (path, scaling_factors, pickled_metadata))
Exemple #21
0
    def _setup_simul_coloring(self):
        """
        Set up metadata for simultaneous derivative solution.
        """
        # command line simul_coloring uses this env var to turn pre-existing coloring off
        if not coloring_mod._use_sparsity:
            return

        problem = self._problem
        if not problem.model._use_derivatives:
            simple_warning(
                "Derivatives are turned off.  Skipping simul deriv coloring.")
            return

        if isinstance(self._simul_coloring_info, string_types):
            with open(self._simul_coloring_info, 'r') as f:
                self._simul_coloring_info = coloring_mod._json2coloring(
                    json.load(f))

        if 'rev' in self._simul_coloring_info and problem._orig_mode not in (
                'rev', 'auto'):
            revcol = self._simul_coloring_info['rev'][0][0]
            if revcol:
                raise RuntimeError(
                    "Simultaneous coloring does reverse solves but mode has "
                    "been set to '%s'" % problem._orig_mode)
        if 'fwd' in self._simul_coloring_info and problem._orig_mode not in (
                'fwd', 'auto'):
            fwdcol = self._simul_coloring_info['fwd'][0][0]
            if fwdcol:
                raise RuntimeError(
                    "Simultaneous coloring does forward solves but mode has "
                    "been set to '%s'" % problem._orig_mode)

        # simul_coloring_info can contain data for either fwd, rev, or both, along with optional
        # sparsity patterns
        if 'sparsity' in self._simul_coloring_info:
            sparsity = self._simul_coloring_info['sparsity']
            del self._simul_coloring_info['sparsity']
        else:
            sparsity = None

        if sparsity is not None and self._total_jac_sparsity is not None:
            raise RuntimeError(
                "Total jac sparsity was set in both _simul_coloring_info"
                " and _total_jac_sparsity.")
        self._total_jac_sparsity = sparsity
Exemple #22
0
def show_options_table(reference, recording_options=False):
    """
    Return the options table of the given reference path.

    Parameters
    ----------
    reference : str or object
        Dot path of desired class or function or an instance.

    recording_options : bool
        If True, display recording options instead of options.

    Returns
    -------
    IPython.display
        Options table of the given class or function.
    """
    if isinstance(reference, str):
        obj = _get_object_from_reference(reference)()
    else:
        obj = reference

    if ipy:
        if not hasattr(obj, "options"):
            html = obj.to_table(fmt='html')
        elif not recording_options:
            html = obj.options.to_table(fmt='html')
        else:
            html = obj.recording_options.to_table(fmt='html')

        # Jupyter notebook imposes right justification, so we have to enforce what we want:
        # - Center table headers
        # - Left justify table columns
        # - Limit column width so there is adequate width left for the deprecation message
        style = '<{tag} style="text-align:{align}; max-width:{width}; overflow-wrap:break-word;">'

        cols = html.count('<th>')                 # there could be 5 or 6 columns
        width = '300px' if cols > 5 else '600px'  # limit width depending on number of columns

        html = html.replace('<th>', style.format(tag='th', align='center', width=width))
        html = html.replace('<td>', style.format(tag='td', align='left', width=width))

        return display(HTML(html))
    else:
        simple_warning("IPython is not installed. Run `pip install openmdao[notebooks]` or "
                       "`pip install openmdao[docs]` to upgrade.")
Exemple #23
0
    def record_metadata_system(self, recording_requester):
        """
        Record system metadata.

        Parameters
        ----------
        recording_requester : System
            The System that would like to record its metadata.
        """
        if self.connection:
            scaling_vecs, user_options = self._get_metadata_system(recording_requester)

            if scaling_vecs is None:
                return

            scaling_factors = pickle.dumps(scaling_vecs, self._pickle_version)

            # try to pickle the metadata, report if it failed
            try:
                pickled_metadata = pickle.dumps(user_options, self._pickle_version)
            except Exception:
                pickled_metadata = pickle.dumps(OptionsDictionary(), self._pickle_version)
                simple_warning("Trying to record options which cannot be pickled "
                               "on system with name: %s. Use the 'options_excludes' "
                               "recording option on system objects to avoid attempting "
                               "to record options which cannot be pickled. Skipping "
                               "recording options for this system." % recording_requester.name,
                               RuntimeWarning)

            path = recording_requester.pathname
            if not path:
                path = 'root'

            scaling_factors = sqlite3.Binary(scaling_factors)
            pickled_metadata = sqlite3.Binary(pickled_metadata)

            with self.connection as c:
                # Because we can have a recorder attached to multiple Systems,
                #   and because we are now recording System metadata recursively,
                #   we can store System metadata multiple times. Need to ignore when that happens
                #   so we don't get database errors. So use OR IGNORE
                c.execute("INSERT OR IGNORE INTO system_metadata"
                          "(id, scaling_factors, component_metadata) "
                          "VALUES(?,?,?)", (path, scaling_factors, pickled_metadata))
Exemple #24
0
    def compute_approximations(self, system, jac, total=False):
        """
        Execute the system to compute the approximate sub-Jacobians.

        Parameters
        ----------
        system : System
            System on which the execution is run.
        jac : dict-like
            Approximations are stored in the given dict-like object.
        total : bool
            If True total derivatives are being approximated, else partials.
        """
        if not self._exec_dict:
            return

        if system.under_complex_step:

            # If we are nested under another complex step, then warn and swap to FD.
            if not self._fd:
                from openmdao.approximation_schemes.finite_difference import FiniteDifference

                msg = "Nested complex step detected. Finite difference will be used for '%s'."
                simple_warning(msg % system.pathname)

                fd = self._fd = FiniteDifference()
                empty = {}
                for lst in itervalues(self._exec_dict):
                    for apprx in lst:
                        fd.add_approximation(apprx[0], empty)

            self._fd.compute_approximations(system, jac, total=total)
            return

        # Turn on complex step.
        system._set_complex_step_mode(True)

        self._compute_approximations(system, jac, total, under_cs=True)

        # Turn off complex step.
        system._set_complex_step_mode(False)
Exemple #25
0
def notebook_mode():
    """
    Check if the environment is interactive and if tabulate is installed.

    Returns
    -------
    bool
        True if the environment is an interactive notebook.
    """
    ipy = False
    try:
        from IPython import get_ipython
        ipy = get_ipython() is not None
    except ImportError:
        pass

    if ipy and tabulate is None:
        simple_warning(
            "Tabulate is not installed run `pip install openmdao[notebooks]` to "
            "install required dependencies. Using ASCII for outputs.")
    return ipy
Exemple #26
0
    def _setup_simul_coloring(self):
        """
        Set up metadata for simultaneous derivative solution.
        """
        # command line simul_coloring uses this env var to turn pre-existing coloring off
        if not coloring_mod._use_sparsity:
            return

        problem = self._problem
        if not problem.model._use_derivatives:
            simple_warning("Derivatives are turned off.  Skipping simul deriv coloring.")
            return

        if isinstance(self._simul_coloring_info, string_types):
            with open(self._simul_coloring_info, 'r') as f:
                self._simul_coloring_info = coloring_mod._json2coloring(json.load(f))

        if 'rev' in self._simul_coloring_info and problem._orig_mode not in ('rev', 'auto'):
            revcol = self._simul_coloring_info['rev'][0][0]
            if revcol:
                raise RuntimeError("Simultaneous coloring does reverse solves but mode has "
                                   "been set to '%s'" % problem._orig_mode)
        if 'fwd' in self._simul_coloring_info and problem._orig_mode not in ('fwd', 'auto'):
            fwdcol = self._simul_coloring_info['fwd'][0][0]
            if fwdcol:
                raise RuntimeError("Simultaneous coloring does forward solves but mode has "
                                   "been set to '%s'" % problem._orig_mode)

        # simul_coloring_info can contain data for either fwd, rev, or both, along with optional
        # sparsity patterns
        if 'sparsity' in self._simul_coloring_info:
            sparsity = self._simul_coloring_info['sparsity']
            del self._simul_coloring_info['sparsity']
        else:
            sparsity = None

        if sparsity is not None and self._total_jac_sparsity is not None:
            raise RuntimeError("Total jac sparsity was set in both _simul_coloring_info"
                               " and _total_jac_sparsity.")
        self._total_jac_sparsity = sparsity
Exemple #27
0
def show_options_table(reference):
    """
    Return the options table of the given reference path.

    Parameters
    ----------
    reference : str
        Dot path of desired class or function.

    Returns
    -------
    IPython.display
        Options table of the given class or function.
    """
    obj = _get_object_from_reference(reference)()

    if ipy:
        return display(HTML(obj.options.to_table(fmt='html')))
    else:
        simple_warning(
            "IPython is not installed. Run `pip install openmdao[notebooks]` or "
            "`pip install openmdao[docs]` to upgrade.")
Exemple #28
0
def dynamic_sparsity(driver):
    """
    Compute deriv sparsity during runtime.

    Parameters
    ----------
    driver : <Driver>
        The driver performing the optimization.
    """
    problem = driver._problem
    if not problem.model._use_derivatives:
        simple_warning("Derivatives have been turned off. Skipping dynamic sparsity computation.")
        return

    driver._total_jac = None
    repeats = driver.options['dynamic_derivs_repeats']

    # save the sparsity.json file for later inspection
    with open("sparsity.json", "w") as f:
        sparsity = get_sparsity(problem, mode=problem._mode, repeats=repeats, stream=f)

    driver.set_total_jac_sparsity(sparsity)
    driver._setup_tot_jac_sparsity()
    def add_approximation(self, abs_key, system, kwargs, vector=None):
        """
        Use this approximation scheme to approximate the derivative d(of)/d(wrt).

        Parameters
        ----------
        abs_key : tuple(str,str)
            Absolute name pairing of (of, wrt) for the derivative.
        system : System
            Containing System.
        kwargs : dict
            Additional keyword arguments, to be interpreted by sub-classes.
        vector : ndarray or None
            Direction for difference when using directional derivatives.
        """
        options = self.DEFAULT_OPTIONS.copy()
        options.update(kwargs)

        if options['order'] is None:
            form = options['form']
            if form in DEFAULT_ORDER:
                options['order'] = DEFAULT_ORDER[options['form']]
            else:
                raise ValueError(
                    "{}: '{}' is not a valid form of finite difference; must be "
                    "one of {}".format(system.msginfo, form,
                                       list(DEFAULT_ORDER.keys())))

        options['vector'] = vector
        wrt = abs_key[1]
        if wrt in self._wrt_meta:
            simple_warning(
                f"{system.msginfo}: overriding previous approximation defined for "
                f"'{wrt}.")
        self._wrt_meta[wrt] = options
        self._reset()  # force later regen of approx_groups
    def _setup_partials(self, recurse=True):
        """
        Process all partials and approximations that the user declared.

        Metamodel needs to declare its partials after inputs and outputs are known.

        Parameters
        ----------
        recurse : bool
            Whether to call this method in subsystems.
        """
        super(MetaModelUnStructuredComp, self)._setup_partials()

        vec_size = self.options['vec_size']
        if vec_size > 1:
            # Sparse specification of partials for vectorized models.
            for wrt, n_wrt in self._surrogate_input_names:
                for of, shape_of in self._surrogate_output_names:

                    n_of = np.prod(shape_of)
                    rows = np.repeat(np.arange(n_of), n_wrt)
                    cols = np.tile(np.arange(n_wrt), n_of)
                    nnz = len(rows)
                    rows = np.tile(rows, vec_size) + np.repeat(
                        np.arange(vec_size), nnz) * n_of
                    cols = np.tile(cols, vec_size) + np.repeat(
                        np.arange(vec_size), nnz) * n_wrt

                    self._declare_partials(of=of,
                                           wrt=wrt,
                                           rows=rows,
                                           cols=cols)

        else:
            # Dense specification of partials for non-vectorized models.
            self._declare_partials(
                of=[name[0] for name in self._surrogate_output_names],
                wrt=[name[0] for name in self._surrogate_input_names])

            # warn the user that if they don't explicitly set options for fd,
            #   the defaults will be used
            # get a list of approximated partials
            declared_partials = set()
            for of, wrt, method, fd_options in self._approximated_partials:
                pattern_matches = self._find_partial_matches(of, wrt)
                for of_bundle, wrt_bundle in product(*pattern_matches):
                    of_pattern, of_matches = of_bundle
                    wrt_pattern, wrt_matches = wrt_bundle
                    for rel_key in product(of_matches, wrt_matches):
                        abs_key = rel_key2abs_key(self, rel_key)
                        declared_partials.add(abs_key)
            non_declared_partials = []
            for of, n_of in self._surrogate_output_names:
                has_derivs = False
                surrogate = self._metadata(of).get('surrogate')
                if surrogate:
                    has_derivs = overrides_method('linearize', surrogate,
                                                  SurrogateModel)
                if not has_derivs:
                    for wrt, n_wrt in self._surrogate_input_names:
                        abs_key = rel_key2abs_key(self, (of, wrt))
                        if abs_key not in declared_partials:
                            non_declared_partials.append(abs_key)
            if non_declared_partials:
                msg = "Because the MetaModelUnStructuredComp '{}' uses a surrogate " \
                      "which does not define a linearize method,\nOpenMDAO will use " \
                      "finite differences to compute derivatives. Some of the derivatives " \
                      "will be computed\nusing default finite difference " \
                      "options because they were not explicitly declared.\n".format(self.name)
                msg += "The derivatives computed using the defaults are:\n"
                for abs_key in non_declared_partials:
                    msg += "    {}, {}\n".format(*abs_key)
                simple_warning(msg, RuntimeWarning)

            for out_name, out_shape in self._surrogate_output_names:
                surrogate = self._metadata(out_name).get('surrogate')
                if surrogate and not overrides_method('linearize', surrogate,
                                                      SurrogateModel):
                    self._approx_partials(
                        of=out_name,
                        wrt=[name[0] for name in self._surrogate_input_names],
                        method='fd')
                    if "fd" not in self._approx_schemes:
                        self._approx_schemes['fd'] = FiniteDifference()
Exemple #31
0
def _get_viewer_data(data_source):
    """
    Get the data needed by the N2 viewer as a dictionary.

    Parameters
    ----------
    data_source : <Problem> or <Group> or str
        A Problem or Group or case recorder file name containing the model or model data.

    Returns
    -------
    dict
        A dictionary containing information about the model for use by the viewer.
    """
    if isinstance(data_source, Problem):
        root_group = data_source.model

        if not isinstance(root_group, Group):
            simple_warning(
                "The model is not a Group, viewer data is unavailable.")
            return {}

        driver = data_source.driver
        driver_name = driver.__class__.__name__
        driver_type = 'doe' if isinstance(driver,
                                          DOEDriver) else 'optimization'
        driver_options = {k: driver.options[k] for k in driver.options}
        driver_opt_settings = None
        if driver_type is 'optimization' and 'opt_settings' in dir(driver):
            driver_opt_settings = driver.opt_settings

    elif isinstance(data_source, Group):
        if not data_source.pathname:  # root group
            root_group = data_source
            driver_name = None
            driver_type = None
            driver_options = None
            driver_opt_settings = None
        else:
            # this function only makes sense when it is at the root
            return {}

    elif isinstance(data_source, str):
        return CaseReader(data_source, pre_load=False).problem_metadata

    else:
        raise TypeError(
            '_get_viewer_data only accepts Problems, Groups or filenames')

    data_dict = {}
    comp_exec_idx = [0]  # list so pass by ref
    orders = {}
    data_dict['tree'] = _get_tree_dict(root_group, orders, comp_exec_idx)

    connections_list = []

    sys_pathnames_list = []  # list of pathnames of systems found in cycles
    sys_pathnames_dict = {}  # map of pathnames to index of pathname in list

    G = root_group.compute_sys_graph(comps_only=True)

    scc = nx.strongly_connected_components(G)

    for strong_comp in scc:
        if len(strong_comp) > 1:
            # these IDs are only used when back edges are present
            sys_pathnames_list.extend(strong_comp)
            for name in strong_comp:
                sys_pathnames_dict[name] = len(sys_pathnames_dict)

        for src, tgt in G.edges(strong_comp):
            if src in strong_comp and tgt in strong_comp:
                exe_src = orders[src]
                exe_tgt = orders[tgt]
                if exe_tgt < exe_src:
                    exe_low = exe_tgt
                    exe_high = exe_src
                else:
                    exe_low = exe_src
                    exe_high = exe_tgt

                edges_list = [
                    (sys_pathnames_dict[s], sys_pathnames_dict[t])
                    for s, t in G.edges(strong_comp)
                    if exe_low <= orders[s] <= exe_high and exe_low <=
                    orders[t] <= exe_high and not (s == src and t == tgt)
                ]
                for vsrc, vtgtlist in iteritems(
                        G.get_edge_data(src, tgt)['conns']):
                    for vtgt in vtgtlist:
                        connections_list.append({
                            'src': vsrc,
                            'tgt': vtgt,
                            'cycle_arrows': edges_list
                        })
            else:  # edge is out of the SCC
                for vsrc, vtgtlist in iteritems(
                        G.get_edge_data(src, tgt)['conns']):
                    for vtgt in vtgtlist:
                        connections_list.append({'src': vsrc, 'tgt': vtgt})

    data_dict['sys_pathnames_list'] = sys_pathnames_list
    data_dict['connections_list'] = connections_list
    data_dict['abs2prom'] = root_group._var_abs2prom

    data_dict['driver'] = {
        'name': driver_name,
        'type': driver_type,
        'options': driver_options,
        'opt_settings': driver_opt_settings
    }
    data_dict['design_vars'] = root_group.get_design_vars()
    data_dict['responses'] = root_group.get_responses()

    data_dict['declare_partials_list'] = _get_declare_partials(root_group)

    return data_dict
Exemple #32
0
    def _setup_solvers(self, system, depth):
        """
        Assign system instance, set depth, and optionally perform setup.

        Parameters
        ----------
        system : <System>
            Pointer to the owning system.
        depth : int
            Depth of the current system (already incremented).
        """
        super(BroydenSolver, self)._setup_solvers(system, depth)
        self._recompute_jacobian = True
        self._computed_jacobians = 0

        self._disallow_discrete_outputs()

        if self.linear_solver is not None:
            self.linear_solver._setup_solvers(self._system, self._depth + 1)
        else:
            self.linear_solver = system.linear_solver

        if self.linesearch is not None:
            self.linesearch._setup_solvers(self._system, self._depth + 1)
            self.linesearch._do_subsolve = True

        states = self.options['state_vars']
        prom = system._var_allprocs_prom2abs_list['output']

        # Check names of states.
        bad_names = [name for name in states if name not in prom]
        if len(bad_names) > 0:
            msg = "The following variable names were not found: {}"
            raise ValueError(msg.format(', '.join(bad_names)))

        # Size linear system
        outputs = system._outputs
        if len(states) > 0:
            n = 0
            for name in states:
                size = len(outputs[name])
                self._idx[name] = (n, n + size)
                n += size

        else:
            self._full_inverse = True
            n = len(outputs._data)

        self.n = n
        self.Gm = np.empty((n, n))
        self.xm = np.empty((n, ))
        self.fxm = np.empty((n, ))
        self.delta_xm = None
        self.delta_fxm = None

        if self._full_inverse:

            # Can only use DirectSolver here.
            from openmdao.solvers.linear.direct import DirectSolver
            if not isinstance(self.linear_solver, DirectSolver):
                msg = "Linear solver must be DirectSolver when solving the full model."
                raise ValueError(msg.format(', '.join(bad_names)))

            return

        # Always look for states that aren't being solved so we can warn the user.
        def sys_recurse(system, all_states):
            subs = system._subsystems_myproc
            if len(subs) == 0:

                # Skip implicit components that appear to solve themselves.
                from openmdao.core.implicitcomponent import ImplicitComponent
                if overrides_method('solve_nonlinear', system, ImplicitComponent):
                    return

                all_states.extend(system._list_states())

            else:
                for subsys in subs:
                    sub_nl = subsys.nonlinear_solver
                    if sub_nl and sub_nl.supports['implicit_components']:
                        continue
                    sys_recurse(subsys, all_states)

        all_states = []
        sys_recurse(system, all_states)
        all_states = [system._var_abs2prom['output'][name] for name in all_states]

        missing = set(all_states).difference(states)
        if len(missing) > 0:
            msg = "The following states are not covered by a solver, and may have been " + \
                  "omitted from the BroydenSolver 'state_vars': "
            msg += ', '.join(sorted(missing))
            simple_warning(msg)
Exemple #33
0
def _get_viewer_data(data_source):
    """
    Get the data needed by the N2 viewer as a dictionary.

    Parameters
    ----------
    data_source : <Problem> or <Group> or str
        A Problem or Group or case recorder file name containing the model or model data.

    Returns
    -------
    dict
        A dictionary containing information about the model for use by the viewer.
    """
    if isinstance(data_source, Problem):
        root_group = data_source.model

        if not isinstance(root_group, Group):
            simple_warning(
                "The model is not a Group, viewer data is unavailable.")
            return {}

        driver = data_source.driver
        driver_name = driver.__class__.__name__
        driver_type = 'doe' if isinstance(driver,
                                          DOEDriver) else 'optimization'
        driver_options = {k: driver.options[k] for k in driver.options}
        driver_opt_settings = None
        if driver_type is 'optimization' and 'opt_settings' in dir(driver):
            driver_opt_settings = driver.opt_settings

    elif isinstance(data_source, Group):
        if not data_source.pathname:  # root group
            root_group = data_source
            driver_name = None
            driver_type = None
            driver_options = None
            driver_opt_settings = None
        else:
            # this function only makes sense when it is at the root
            return {}

    elif isinstance(data_source, str):
        return CaseReader(data_source, pre_load=False).problem_metadata

    else:
        raise TypeError(
            '_get_viewer_data only accepts Problems, Groups or filenames')

    data_dict = {}
    comp_exec_idx = [0]  # list so pass by ref
    comp_exec_orders = {}
    data_dict['tree'] = _get_tree_dict(root_group, comp_exec_orders,
                                       comp_exec_idx)

    connections_list = []

    sys_pathnames_list = []  # list of pathnames of systems found in cycles
    sys_pathnames_dict = {}  # map of pathnames to index of pathname in list

    # sort to make deterministic for testing
    sorted_abs_input2src = OrderedDict(
        sorted(root_group._conn_global_abs_in2out.items()))
    root_group._conn_global_abs_in2out = sorted_abs_input2src

    G = root_group.compute_sys_graph(comps_only=True)
    scc = nx.strongly_connected_components(G)
    scc_list = [s for s in scc if len(s) > 1]

    for in_abs, out_abs in iteritems(sorted_abs_input2src):
        if out_abs is None:
            continue

        src_subsystem = out_abs.rsplit('.', 1)[0]
        tgt_subsystem = in_abs.rsplit('.', 1)[0]
        src_to_tgt_str = src_subsystem + ' ' + tgt_subsystem

        count = 0
        edges_list = []

        for li in scc_list:
            if src_subsystem in li and tgt_subsystem in li:
                count += 1
                if count > 1:
                    raise ValueError('Count greater than 1')

                exe_tgt = comp_exec_orders[tgt_subsystem]
                exe_src = comp_exec_orders[src_subsystem]
                exe_low = min(exe_tgt, exe_src)
                exe_high = max(exe_tgt, exe_src)

                subg = G.subgraph(
                    n for n in li
                    if exe_low <= comp_exec_orders[n] <= exe_high)
                for edge in subg.edges():
                    edge_str = ' '.join(edge)
                    if edge_str != src_to_tgt_str:
                        src, tgt = edge

                        # add src & tgt to pathnames list & dict if not already there
                        for pathname in edge:
                            if pathname not in sys_pathnames_dict:
                                sys_pathnames_list.append(pathname)
                                sys_pathnames_dict[pathname] = len(
                                    sys_pathnames_list) - 1

                        # replace src & tgt pathnames with indices into pathname list
                        src = sys_pathnames_dict[src]
                        tgt = sys_pathnames_dict[tgt]

                        edges_list.append([src, tgt])

        if edges_list:
            edges_list.sort(
            )  # make deterministic so same .html file will be produced each run
            connections_list.append(
                dict([('src', out_abs), ('tgt', in_abs),
                      ('cycle_arrows', edges_list)]))
        else:
            connections_list.append(dict([('src', out_abs), ('tgt', in_abs)]))

    data_dict['sys_pathnames_list'] = sys_pathnames_list
    data_dict['connections_list'] = connections_list
    data_dict['abs2prom'] = root_group._var_abs2prom

    data_dict['driver'] = {
        'name': driver_name,
        'type': driver_type,
        'options': driver_options,
        'opt_settings': driver_opt_settings
    }
    data_dict['design_vars'] = root_group.get_design_vars()
    data_dict['responses'] = root_group.get_responses()

    data_dict['declare_partials_list'] = _get_declare_partials(root_group)

    return data_dict
    def _setup_partials(self, recurse=True):
        """
        Process all partials and approximations that the user declared.

        Metamodel needs to declare its partials after inputs and outputs are known.

        Parameters
        ----------
        recurse : bool
            Whether to call this method in subsystems.
        """
        super(MetaModelUnStructuredComp, self)._setup_partials()

        vec_size = self.options['vec_size']
        if vec_size > 1:
            # Sparse specification of partials for vectorized models.
            for wrt, n_wrt in self._surrogate_input_names:
                for of, shape_of in self._surrogate_output_names:

                    n_of = np.prod(shape_of)
                    rows = np.repeat(np.arange(n_of), n_wrt)
                    cols = np.tile(np.arange(n_wrt), n_of)
                    nnz = len(rows)
                    rows = np.tile(rows, vec_size) + np.repeat(np.arange(vec_size), nnz) * n_of
                    cols = np.tile(cols, vec_size) + np.repeat(np.arange(vec_size), nnz) * n_wrt

                    self._declare_partials(of=of, wrt=wrt, rows=rows, cols=cols)

        else:
            # Dense specification of partials for non-vectorized models.
            self._declare_partials(of=[name[0] for name in self._surrogate_output_names],
                                   wrt=[name[0] for name in self._surrogate_input_names])

            # warn the user that if they don't explicitly set options for fd,
            #   the defaults will be used
            # get a list of approximated partials
            declared_partials = set()
            for of, wrt, method, fd_options in self._approximated_partials:
                pattern_matches = self._find_partial_matches(of, wrt)
                for of_bundle, wrt_bundle in product(*pattern_matches):
                    of_pattern, of_matches = of_bundle
                    wrt_pattern, wrt_matches = wrt_bundle
                    for rel_key in product(of_matches, wrt_matches):
                        abs_key = rel_key2abs_key(self, rel_key)
                        declared_partials.add(abs_key)
            non_declared_partials = []
            for of, n_of in self._surrogate_output_names:
                has_derivs = False
                surrogate = self._metadata(of).get('surrogate')
                if surrogate:
                    has_derivs = overrides_method('linearize', surrogate, SurrogateModel)
                if not has_derivs:
                    for wrt, n_wrt in self._surrogate_input_names:
                        abs_key = rel_key2abs_key(self, (of, wrt))
                        if abs_key not in declared_partials:
                            non_declared_partials.append(abs_key)
            if non_declared_partials:
                msg = "Because the MetaModelUnStructuredComp '{}' uses a surrogate " \
                      "which does not define a linearize method,\nOpenMDAO will use " \
                      "finite differences to compute derivatives. Some of the derivatives " \
                      "will be computed\nusing default finite difference " \
                      "options because they were not explicitly declared.\n".format(self.name)
                msg += "The derivatives computed using the defaults are:\n"
                for abs_key in non_declared_partials:
                    msg += "    {}, {}\n".format(*abs_key)
                simple_warning(msg, RuntimeWarning)

            for out_name, out_shape in self._surrogate_output_names:
                surrogate = self._metadata(out_name).get('surrogate')
                if surrogate and not overrides_method('linearize', surrogate, SurrogateModel):
                    self._approx_partials(of=out_name,
                                          wrt=[name[0] for name in self._surrogate_input_names],
                                          method='fd')
                    if "fd" not in self._approx_schemes:
                        self._approx_schemes['fd'] = FiniteDifference()
    def run(self):
        """
        Excute pyOptsparse.

        Note that pyOpt controls the execution, and the individual optimizers
        (e.g., SNOPT) control the iteration.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem()
        model = problem.model
        relevant = model._relevant
        self.pyopt_solution = None
        self._total_jac = None
        self.iter_count = 0
        fwd = problem._mode == 'fwd'
        optimizer = self.options['optimizer']
        self._quantities = []

        self._check_for_missing_objective()

        # Only need initial run if we have linear constraints or if we are using an optimizer that
        # doesn't perform one initially.
        con_meta = self._cons
        model_ran = False
        if optimizer in run_required or np.any(
            [con['linear'] for con in self._cons.values()]):
            with RecordingDebugging(self._get_name(), self.iter_count,
                                    self) as rec:
                # Initial Run
                model.run_solve_nonlinear()
                rec.abs = 0.0
                rec.rel = 0.0
                model_ran = True
            self.iter_count += 1

        # compute dynamic simul deriv coloring or just sparsity if option is set
        if c_mod._use_total_sparsity:
            coloring = None
            if self._coloring_info['coloring'] is None and self._coloring_info[
                    'dynamic']:
                coloring = c_mod.dynamic_total_coloring(
                    self,
                    run_model=not model_ran,
                    fname=self._get_total_coloring_fname())

            if coloring is not None:
                # if the improvement wasn't large enough, don't use coloring
                pct = coloring._solves_info()[-1]
                info = self._coloring_info
                if info['min_improve_pct'] > pct:
                    info['coloring'] = info['static'] = None
                    simple_warning(
                        "%s: Coloring was deactivated.  Improvement of %.1f%% was less "
                        "than min allowed (%.1f%%)." %
                        (self.msginfo, pct, info['min_improve_pct']))

        comm = None if isinstance(problem.comm, FakeComm) else problem.comm
        opt_prob = Optimization(self.options['title'],
                                weak_method_wrapper(self, '_objfunc'),
                                comm=comm)

        # Add all design variables
        param_meta = self._designvars
        self._indep_list = indep_list = list(param_meta)
        param_vals = self.get_design_var_values()

        for name, meta in param_meta.items():
            opt_prob.addVarGroup(name,
                                 meta['size'],
                                 type='c',
                                 value=param_vals[name],
                                 lower=meta['lower'],
                                 upper=meta['upper'])

        opt_prob.finalizeDesignVariables()

        # Add all objectives
        objs = self.get_objective_values()
        for name in objs:
            opt_prob.addObj(name)
            self._quantities.append(name)

        # Calculate and save derivatives for any linear constraints.
        lcons = [key for (key, con) in con_meta.items() if con['linear']]
        if len(lcons) > 0:
            _lin_jacs = self._compute_totals(of=lcons,
                                             wrt=indep_list,
                                             return_format='dict')
            # convert all of our linear constraint jacs to COO format. Otherwise pyoptsparse will
            # do it for us and we'll end up with a fully dense COO matrix and very slow evaluation
            # of linear constraints!
            to_remove = []
            for jacdct in _lin_jacs.values():
                for n, subjac in jacdct.items():
                    if isinstance(subjac, np.ndarray):
                        # we can safely use coo_matrix to automatically convert the ndarray
                        # since our linear constraint jacs are constant, so zeros won't become
                        # nonzero during the optimization.
                        mat = coo_matrix(subjac)
                        if mat.row.size > 0:
                            # convert to 'coo' format here to avoid an emphatic warning
                            # by pyoptsparse.
                            jacdct[n] = {
                                'coo': [mat.row, mat.col, mat.data],
                                'shape': mat.shape
                            }

        # Add all equality constraints
        for name, meta in con_meta.items():
            if meta['equals'] is None:
                continue
            size = meta['size']
            lower = upper = meta['equals']
            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     linear=True,
                                     wrt=wrt,
                                     jac=jac)
            else:
                if name in self._res_jacs:
                    resjac = self._res_jacs[name]
                    jac = {n: resjac[n] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     wrt=wrt,
                                     jac=jac)
                self._quantities.append(name)

        # Add all inequality constraints
        for name, meta in con_meta.items():
            if meta['equals'] is not None:
                continue
            size = meta['size']

            # Bounds - double sided is supported
            lower = meta['lower']
            upper = meta['upper']

            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     linear=True,
                                     wrt=wrt,
                                     jac=jac)
            else:
                if name in self._res_jacs:
                    resjac = self._res_jacs[name]
                    jac = {n: resjac[n] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     wrt=wrt,
                                     jac=jac)
                self._quantities.append(name)

        # Instantiate the requested optimizer
        try:
            _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer],
                              0)
            opt = getattr(_tmp, optimizer)()

        except Exception as err:
            # Change whatever pyopt gives us to an ImportError, give it a readable message,
            # but raise with the original traceback.
            msg = "Optimizer %s is not available in this installation." % optimizer
            raise ImportError(msg)

        # Process any default optimizer-specific settings.
        if optimizer in DEFAULT_OPT_SETTINGS:
            for name, value in DEFAULT_OPT_SETTINGS[optimizer].items():
                if name not in self.opt_settings:
                    self.opt_settings[name] = value

        # Set optimization options
        for option, value in self.opt_settings.items():
            opt.setOption(option, value)

        # Execute the optimization problem
        if self.options['gradient method'] == 'pyopt_fd':

            # Use pyOpt's internal finite difference
            # TODO: Need to get this from OpenMDAO
            # fd_step = problem.model.deriv_options['step_size']
            fd_step = 1e-6
            sol = opt(opt_prob,
                      sens='FD',
                      sensStep=fd_step,
                      storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        elif self.options['gradient method'] == 'snopt_fd':
            if self.options['optimizer'] == 'SNOPT':

                # Use SNOPT's internal finite difference
                # TODO: Need to get this from OpenMDAO
                # fd_step = problem.model.deriv_options['step_size']
                fd_step = 1e-6
                sol = opt(opt_prob,
                          sens=None,
                          sensStep=fd_step,
                          storeHistory=self.hist_file,
                          hotStart=self.hotstart_file)

            else:
                raise Exception(
                    "SNOPT's internal finite difference can only be used with SNOPT"
                )
        else:

            # Use OpenMDAO's differentiator for the gradient
            sol = opt(opt_prob,
                      sens=weak_method_wrapper(self, '_gradfunc'),
                      storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        # Print results
        if self.options['print_results']:
            print(sol)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        for name in indep_list:
            self.set_design_var(name, dv_dict[name])

        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            model.run_solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        # Save the most recent solution.
        self.pyopt_solution = sol

        try:
            exit_status = sol.optInform['value']
            self.fail = False

            # These are various failed statuses.
            if optimizer == 'IPOPT':
                if exit_status not in {0, 1}:
                    self.fail = True
            elif exit_status > 2:
                self.fail = True

        except KeyError:
            # optimizers other than pySNOPT may not populate this dict
            pass

        # revert signal handler to cached version
        sigusr = self.options['user_teriminate_signal']
        if sigusr is not None:
            signal.signal(sigusr, self._signal_cache)
            self._signal_cache = None  # to prevent memory leak test from failing

        return self.fail
Exemple #36
0
    def _setup_solvers(self, system, depth):
        """
        Assign system instance, set depth, and optionally perform setup.

        Parameters
        ----------
        system : <System>
            Pointer to the owning system.
        depth : int
            Depth of the current system (already incremented).
        """
        super(BroydenSolver, self)._setup_solvers(system, depth)
        self._recompute_jacobian = True
        self._computed_jacobians = 0

        self._disallow_discrete_outputs()

        if self.linear_solver is not None:
            self.linear_solver._setup_solvers(self._system, self._depth + 1)
        else:
            self.linear_solver = system.linear_solver

        if self.linesearch is not None:
            self.linesearch._setup_solvers(self._system, self._depth + 1)
            self.linesearch._do_subsolve = True

        states = self.options['state_vars']
        prom = system._var_allprocs_prom2abs_list['output']

        # Check names of states.
        bad_names = [name for name in states if name not in prom]
        if len(bad_names) > 0:
            msg = "The following variable names were not found: {}"
            raise ValueError(msg.format(', '.join(bad_names)))

        # Size linear system
        outputs = system._outputs
        if len(states) > 0:
            n = 0
            for name in states:
                size = len(outputs[name])
                self._idx[name] = (n, n + size)
                n += size

        else:
            self._full_inverse = True
            n = len(outputs._data)

        self.n = n
        self.Gm = np.empty((n, n))
        self.xm = np.empty((n, ))
        self.fxm = np.empty((n, ))
        self.delta_xm = None
        self.delta_fxm = None

        if self._full_inverse:

            # Can only use DirectSolver here.
            from openmdao.solvers.linear.direct import DirectSolver
            if not isinstance(self.linear_solver, DirectSolver):
                msg = "Linear solver must be DirectSolver when solving the full model."
                raise ValueError(msg.format(', '.join(bad_names)))

            return

        # Always look for states that aren't being solved so we can warn the user.
        def sys_recurse(system, all_states):
            subs = system._subsystems_myproc
            if len(subs) == 0:

                # Skip implicit components that appear to solve themselves.
                from openmdao.core.implicitcomponent import ImplicitComponent
                if overrides_method('solve_nonlinear', system, ImplicitComponent):
                    return

                all_states.extend(system._list_states())

            else:
                for subsys in subs:
                    sub_nl = subsys.nonlinear_solver
                    if sub_nl and sub_nl.supports['implicit_components']:
                        continue
                    sys_recurse(subsys, all_states)

        all_states = []
        sys_recurse(system, all_states)
        all_states = [system._var_abs2prom['output'][name] for name in all_states]

        missing = set(all_states).difference(states)
        if len(missing) > 0:
            msg = "The following states are not covered by a solver, and may have been " + \
                  "omitted from the BroydenSolver 'state_vars': "
            msg += ', '.join(sorted(missing))
            simple_warning(msg)
Exemple #37
0
def _get_viewer_data(data_source):
    """
    Get the data needed by the N2 viewer as a dictionary.

    Parameters
    ----------
    data_source : <Problem> or <Group> or str
        A Problem or Group or case recorder file name containing the model or model data.

    Returns
    -------
    dict
        A dictionary containing information about the model for use by the viewer.
    """
    if isinstance(data_source, Problem):
        root_group = data_source.model
        if not isinstance(root_group, Group):
            simple_warning("The model is not a Group, viewer data is unavailable.")
            return {}

    elif isinstance(data_source, Group):
        if not data_source.pathname:  # root group
            root_group = data_source
        else:
            # this function only makes sense when it is at the root
            return {}

    elif isinstance(data_source, str):
        check_valid_sqlite3_db(data_source)
        import sqlite3
        con = sqlite3.connect(data_source, detect_types=sqlite3.PARSE_DECLTYPES)
        cur = con.cursor()
        cur.execute("SELECT format_version FROM metadata")
        row = cur.fetchone()
        format_version = row[0]

        cur.execute("SELECT model_viewer_data FROM driver_metadata;")
        model_text = cur.fetchone()

        from six import PY2, PY3
        if row is not None:
            if format_version >= 3:
                return json.loads(model_text[0])
            elif format_version in (1, 2):
                if PY2:
                    import cPickle
                    return cPickle.loads(str(model_text[0]))
                if PY3:
                    import pickle
                    return pickle.loads(model_text[0])

    else:
        raise TypeError('_get_viewer_data only accepts Problems, Groups or filenames')

    data_dict = {}
    comp_exec_idx = [0]  # list so pass by ref
    comp_exec_orders = {}
    data_dict['tree'] = _get_tree_dict(root_group, comp_exec_orders, comp_exec_idx)

    connections_list = []

    # sort to make deterministic for testing
    sorted_abs_input2src = OrderedDict(sorted(root_group._conn_global_abs_in2out.items()))
    root_group._conn_global_abs_in2out = sorted_abs_input2src
    G = root_group.compute_sys_graph(comps_only=True)
    scc = nx.strongly_connected_components(G)
    scc_list = [s for s in scc if len(s) > 1]
    for in_abs, out_abs in iteritems(sorted_abs_input2src):
        if out_abs is None:
            continue
        src_subsystem = out_abs.rsplit('.', 1)[0]
        tgt_subsystem = in_abs.rsplit('.', 1)[0]
        src_to_tgt_str = src_subsystem + ' ' + tgt_subsystem

        count = 0
        edges_list = []
        for li in scc_list:
            if src_subsystem in li and tgt_subsystem in li:
                count += 1
                if count > 1:
                    raise ValueError('Count greater than 1')

                exe_tgt = comp_exec_orders[tgt_subsystem]
                exe_src = comp_exec_orders[src_subsystem]
                exe_low = min(exe_tgt, exe_src)
                exe_high = max(exe_tgt, exe_src)

                subg = G.subgraph(n for n in li if exe_low <= comp_exec_orders[n] <= exe_high)
                for edge in subg.edges():
                    edge_str = ' '.join(edge)
                    if edge_str != src_to_tgt_str:
                        edges_list.append(edge_str)

        if edges_list:
            edges_list.sort()  # make deterministic so same .html file will be produced each run
            connections_list.append(OrderedDict([('src', out_abs), ('tgt', in_abs),
                                                 ('cycle_arrows', edges_list)]))
        else:
            connections_list.append(OrderedDict([('src', out_abs), ('tgt', in_abs)]))

    data_dict['connections_list'] = connections_list

    data_dict['abs2prom'] = root_group._var_abs2prom

    return data_dict
Exemple #38
0
    def _setup_procs(self, pathname, comm, mode):
        """
        Execute first phase of the setup process.

        Distribute processors, assign pathnames, and call setup on the component.

        Parameters
        ----------
        pathname : str
            Global name of the system, including the path.
        comm : MPI.Comm or <FakeComm>
            MPI communicator object.
        mode : string
            Derivatives calculation mode, 'fwd' for forward, and 'rev' for
            reverse (adjoint). Default is 'rev'.
        """
        self.pathname = pathname

        orig_comm = comm
        if self._num_par_fd > 1:
            if comm.size > 1:
                comm = self._setup_par_fd_procs(comm)
            elif not MPI:
                msg = ("'%s': MPI is not active but num_par_fd = %d. No parallel finite difference "
                       "will be performed." % (self.pathname, self._num_par_fd))
                simple_warning(msg)

        self.comm = comm
        self._mode = mode
        self._subsystems_proc_range = []

        # Clear out old variable information so that we can call setup on the component.
        self._var_rel_names = {'input': [], 'output': []}
        self._var_rel2meta = {}
        self._design_vars = OrderedDict()
        self._responses = OrderedDict()

        self._static_mode = False
        self._var_rel2meta.update(self._static_var_rel2meta)
        for type_ in ['input', 'output']:
            self._var_rel_names[type_].extend(self._static_var_rel_names[type_])
        self._design_vars.update(self._static_design_vars)
        self._responses.update(self._static_responses)
        self.setup()

        # check to make sure that if num_par_fd > 1 that this system is actually doing FD.
        # Unfortunately we have to do this check after system setup has been called because that's
        # when declare_partials generally happens, so we raise an exception here instead of just
        # resetting the value of num_par_fd (because the comm has already been split and possibly
        # used by the system setup).
        if self._num_par_fd > 1 and orig_comm.size > 1 and not (self._owns_approx_jac or
                                                                self._approximated_partials):
            raise RuntimeError("'%s': num_par_fd is > 1 but no FD is active." % self.pathname)

        self._static_mode = True

        if self.options['distributed']:
            if self._distributed_vector_class is not None:
                self._vector_class = self._distributed_vector_class
            else:
                simple_warning("The 'distributed' option is set to True for Component %s, "
                               "but there is no distributed vector implementation (MPI/PETSc) "
                               "available. The default non-distributed vectors will be used."
                               % pathname)
                self._vector_class = self._local_vector_class
        else:
            self._vector_class = self._local_vector_class
Exemple #39
0
    def compute_approximations(self, system, jac, total=False):
        """
        Execute the system to compute the approximate sub-Jacobians.

        Parameters
        ----------
        system : System
            System on which the execution is run.
        jac : dict-like
            Approximations are stored in the given dict-like object.
        total : bool
            If True total derivatives are being approximated, else partials.
        """
        if len(self._exec_list) == 0:
            return

        if system.under_complex_step:

            # If we are nested under another complex step, then warn and swap to FD.
            if not self._fd:
                from openmdao.approximation_schemes.finite_difference import FiniteDifference

                msg = "Nested complex step detected. Finite difference will be used for '%s'."
                simple_warning(msg % system.pathname)

                fd = self._fd = FiniteDifference()
                for item in self._exec_list:
                    fd.add_approximation(item[0:2], {})

            self._fd.compute_approximations(system, jac, total=total)
            return

        if total:
            current_vec = system._outputs
        else:
            current_vec = system._residuals

        # Clean vector for results
        results_clone = current_vec._clone(True)

        # Turn on complex step.
        system._set_complex_step_mode(True)
        results_clone.set_complex_step_mode(True)

        # To support driver src_indices, we need to override some checks in Jacobian, but do it
        # selectively.
        uses_src_indices = (system._owns_approx_of_idx or system._owns_approx_wrt_idx) and \
            not isinstance(jac, dict)

        use_parallel_fd = system._num_par_fd > 1 and (system._full_comm is not None and
                                                      system._full_comm.size > 1)
        num_par_fd = system._num_par_fd if use_parallel_fd else 1
        is_parallel = use_parallel_fd or system.comm.size > 1

        results = defaultdict(list)
        iproc = system.comm.rank
        owns = system._owning_rank
        mycomm = system._full_comm if use_parallel_fd else system.comm

        fd_count = 0
        approx_groups = self._get_approx_groups(system)
        for tup in approx_groups:
            wrt, delta, fact, in_idx, in_size, outputs = tup
            for i_count, idx in enumerate(in_idx):
                if fd_count % num_par_fd == system._par_fd_id:
                    # Run the Finite Difference
                    result = self._run_point_complex(system, wrt, idx, delta, results_clone, total)

                    if is_parallel:
                        for of, _, out_idx in outputs:
                            if owns[of] == iproc:
                                results[(of, wrt)].append(
                                    (i_count, result._views_flat[of][out_idx].imag.copy()))
                    else:
                        for of, subjac, out_idx in outputs:
                            subjac[:, i_count] = result._views_flat[of][out_idx].imag

                fd_count += 1

        if is_parallel:
            results = _gather_jac_results(mycomm, results)

        for wrt, _, fact, _, _, outputs in approx_groups:
            for of, subjac, _ in outputs:
                key = (of, wrt)
                if is_parallel:
                    for i, result in results[key]:
                        subjac[:, i] = result

                subjac *= fact
                if uses_src_indices:
                    jac._override_checks = True
                    jac[key] = subjac
                    jac._override_checks = False
                else:
                    jac[key] = subjac

        # Turn off complex step.
        system._set_complex_step_mode(False)
Exemple #40
0
def _get_viewer_data(data_source):
    """
    Get the data needed by the N2 viewer as a dictionary.

    Parameters
    ----------
    data_source : <Problem> or <Group> or str
        A Problem or Group or case recorder file name containing the model or model data.

    Returns
    -------
    dict
        A dictionary containing information about the model for use by the viewer.
    """
    if isinstance(data_source, Problem):
        root_group = data_source.model

        if not isinstance(root_group, Group):
            simple_warning("The model is not a Group, viewer data is unavailable.")
            return {}

        driver = data_source.driver
        driver_name = driver.__class__.__name__
        driver_type = 'doe' if isinstance(driver, DOEDriver) else 'optimization'
        driver_options = {k: driver.options[k] for k in driver.options}
        driver_opt_settings = None
        if driver_type is 'optimization' and 'opt_settings' in dir(driver):
            driver_opt_settings = driver.opt_settings   

    elif isinstance(data_source, Group):
        if not data_source.pathname:  # root group
            root_group = data_source
            driver_name = None
            driver_type = None
            driver_options = None
            driver_opt_settings = None
        else:
            # this function only makes sense when it is at the root
            return {}

    elif isinstance(data_source, str):
        return CaseReader(data_source, pre_load=False).problem_metadata

    else:
        raise TypeError('_get_viewer_data only accepts Problems, Groups or filenames')

    data_dict = {}
    comp_exec_idx = [0]  # list so pass by ref
    comp_exec_orders = {}
    data_dict['tree'] = _get_tree_dict(root_group, comp_exec_orders, comp_exec_idx)

    connections_list = []

    sys_pathnames_list = []  # list of pathnames of systems found in cycles
    sys_pathnames_dict = {}  # map of pathnames to index of pathname in list

    # sort to make deterministic for testing
    sorted_abs_input2src = OrderedDict(sorted(root_group._conn_global_abs_in2out.items()))
    root_group._conn_global_abs_in2out = sorted_abs_input2src

    G = root_group.compute_sys_graph(comps_only=True)
    scc = nx.strongly_connected_components(G)
    scc_list = [s for s in scc if len(s) > 1]

    for in_abs, out_abs in iteritems(sorted_abs_input2src):
        if out_abs is None:
            continue

        src_subsystem = out_abs.rsplit('.', 1)[0]
        tgt_subsystem = in_abs.rsplit('.', 1)[0]
        src_to_tgt_str = src_subsystem + ' ' + tgt_subsystem

        count = 0
        edges_list = []

        for li in scc_list:
            if src_subsystem in li and tgt_subsystem in li:
                count += 1
                if count > 1:
                    raise ValueError('Count greater than 1')

                exe_tgt = comp_exec_orders[tgt_subsystem]
                exe_src = comp_exec_orders[src_subsystem]
                exe_low = min(exe_tgt, exe_src)
                exe_high = max(exe_tgt, exe_src)

                subg = G.subgraph(n for n in li if exe_low <= comp_exec_orders[n] <= exe_high)
                for edge in subg.edges():
                    edge_str = ' '.join(edge)
                    if edge_str != src_to_tgt_str:
                        src, tgt = edge

                        # add src & tgt to pathnames list & dict if not already there
                        for pathname in edge:
                            if pathname not in sys_pathnames_dict:
                                sys_pathnames_list.append(pathname)
                                sys_pathnames_dict[pathname] = len(sys_pathnames_list) - 1

                        # replace src & tgt pathnames with indices into pathname list
                        src = sys_pathnames_dict[src]
                        tgt = sys_pathnames_dict[tgt]

                        edges_list.append([src, tgt])

        if edges_list:
            edges_list.sort()  # make deterministic so same .html file will be produced each run
            connections_list.append(dict([('src', out_abs), ('tgt', in_abs),
                                          ('cycle_arrows', edges_list)]))
        else:
            connections_list.append(dict([('src', out_abs), ('tgt', in_abs)]))

    data_dict['sys_pathnames_list'] = sys_pathnames_list
    data_dict['connections_list'] = connections_list
    data_dict['abs2prom'] = root_group._var_abs2prom

    data_dict['driver'] = {'name': driver_name, 'type': driver_type, 
                           'options': driver_options, 'opt_settings': driver_opt_settings} 
    data_dict['design_vars'] = root_group.get_design_vars()
    data_dict['responses'] = root_group.get_responses()

    return data_dict