Esempio n. 1
0
def _print_violations(outputs, lower, upper):
    """
    Print out which variables exceed their bounds.

    Parameters
    ----------
    outputs : <Vector>
        Vector containing the outputs.
    lower : <Vector>
        Vector containing the lower bounds.
    upper : <Vector>
        Vector containing the upper bounds.
    """
    start = end = 0
    for name, val in outputs._abs_item_iter():
        end += val.size
        if upper is not None and any(val > upper[start:end]):
            msg = (
                f"'{name}' exceeds upper bounds\n  Val: {val}\n  Upper: {upper[start:end]}\n"
            )
            issue_warning(msg, category=SolverWarning)

        if lower is not None and any(val < lower[start:end]):
            msg = (
                f"'{name}' exceeds lower bounds\n  Val: {val}\n  Lower: {lower[start:end]}\n"
            )
            issue_warning(msg, category=SolverWarning)

        start = end
Esempio n. 2
0
    def record_metadata_system(self, system, run_number=None):
        """
        Record system metadata.

        Parameters
        ----------
        system : System
            The System for which to record metadata.
        run_number : int or None
            Number indicating which run the metadata is associated with.
            None for the first run, 1 for the second, etc.
        """
        if self._record_metadata and self.metadata_connection:

            scaling_vecs, user_options = self._get_metadata_system(system)

            if scaling_vecs is None:
                return

            scaling_factors = pickle.dumps(scaling_vecs, self._pickle_version)

            # try to pickle the metadata, report if it failed
            try:
                pickled_metadata = pickle.dumps(user_options,
                                                self._pickle_version)
            except Exception:
                try:
                    for key, values in user_options._dict.items():
                        pickle.dumps(values, self._pickle_version)
                except Exception:
                    pickled_metadata = pickle.dumps(OptionsDictionary(),
                                                    self._pickle_version)
                    msg = f"Trying to record option '{key}' which cannot be pickled on this " \
                          "system. Set option 'recordable' to False. Skipping recording options " \
                          "for this system."
                    issue_warning(msg,
                                  prefix=system.msginfo,
                                  category=CaseRecorderWarning)

            path = system.pathname
            if not path:
                path = 'root'

            scaling_factors = sqlite3.Binary(zlib.compress(scaling_factors))
            pickled_metadata = sqlite3.Binary(zlib.compress(pickled_metadata))

            if run_number is None:
                name = path
            else:
                name = META_KEY_SEP.join([path, str(run_number)])

            with self.metadata_connection as m:
                m.execute(
                    "INSERT INTO system_metadata"
                    "(id, scaling_factors, component_metadata) "
                    "VALUES(?,?,?)", (name, scaling_factors, pickled_metadata))
Esempio n. 3
0
    def compute_approx_col_iter(self, system, total=False, under_cs=False):
        """
        Execute the system to compute the approximate sub-Jacobians.

        Parameters
        ----------
        system : System
            System on which the execution is run.
        total : bool
            If True total derivatives are being approximated, else partials.
        under_cs : bool
            True if we're currently under complex step at a higher level.
        """
        if not self._wrt_meta:
            return

        if system.under_complex_step:

            # If we are nested under another complex step, then warn and swap to FD.
            if not self._fd:
                from openmdao.approximation_schemes.finite_difference import FiniteDifference

                issue_warning("Nested complex step detected. Finite difference will be used.",
                              prefix=system.pathname, category=DerivativesWarning)

                fd = self._fd = FiniteDifference()
                empty = {}
                for wrt in self._wrt_meta:
                    fd.add_approximation(wrt, system, empty)

            yield from self._fd.compute_approx_col_iter(system, total=total)
            return

        saved_inputs = system._inputs._get_data().copy()
        system._inputs._data.imag[:] = 0.0
        saved_outputs = system._outputs.asarray(copy=True)
        system._outputs._data.imag[:] = 0.0
        saved_resids = system._residuals.asarray(copy=True)
        system._residuals._data.imag[:] = 0.0

        # Turn on complex step.
        system._set_complex_step_mode(True)

        try:
            yield from self._compute_approx_col_iter(system, total, under_cs=True)
        finally:
            # Turn off complex step.
            system._set_complex_step_mode(False)

        system._inputs.set_val(saved_inputs)
        system._outputs.set_val(saved_outputs)
        system._residuals.set_val(saved_resids)
Esempio n. 4
0
def _get_used_before_calc_subs(group, input_srcs):
    """
    Return Systems that are executed out of dataflow order.

    Parameters
    ----------
    group : <Group>
        The Group where we're checking subsystem order.
    input_srcs : {}
        dict containing variable abs names for sources of the inputs.
        This describes all variable connections, either explicit or implicit,
        in the entire model.

    Returns
    -------
    dict
        A dict mapping names of target Systems to a set of names of their
        source Systems that execute after them.
    """
    parallel_solver = {}
    allsubs = group._subsystems_allprocs
    for sub, i in allsubs.values():
        if hasattr(sub,
                   '_mpi_proc_allocator') and sub._mpi_proc_allocator.parallel:
            parallel_solver[sub.name] = sub.nonlinear_solver.SOLVER

    glen = len(group.pathname.split('.')) if group.pathname else 0

    ubcs = defaultdict(set)
    for tgt_abs, src_abs in input_srcs.items():
        if src_abs is not None:
            iparts = tgt_abs.split('.')
            oparts = src_abs.split('.')
            src_sys = oparts[glen]
            tgt_sys = iparts[glen]
            hierarchy_check = True if oparts[glen + 1] == iparts[glen +
                                                                 1] else False

            if (src_sys in parallel_solver and tgt_sys in parallel_solver
                    and (parallel_solver[src_sys]
                         not in ["NL: NLBJ", "NL: Newton", "BROYDEN"])
                    and src_sys == tgt_sys and not hierarchy_check):
                msg = f"Need to attach NonlinearBlockJac, NewtonSolver, or BroydenSolver " \
                      f"to '{src_sys}' when connecting components inside parallel groups"
                issue_warning(msg, category=SetupWarning)
                ubcs[tgt_abs.rsplit('.', 1)[0]].add(src_abs.rsplit('.', 1)[0])
            if (src_sys in allsubs and tgt_sys in allsubs
                    and (allsubs[src_sys].index > allsubs[tgt_sys].index)):
                ubcs[tgt_sys].add(src_sys)

    return ubcs
Esempio n. 5
0
def conditional_error(msg, exc=RuntimeError, category=UserWarning):
    """
    Raise an exception or issue a warning, depending on the value of _ignore_errors.

    Parameters
    ----------
    msg : str
        The error/warning message.
    exc : Exception class
        This exception class is used to create the exception to be raised.
    category : warning class
        This category is the class of warning to be issued.
    """
    if ignore_errors():
        issue_warning(msg, category=category)
    else:
        raise exc(msg)
Esempio n. 6
0
    def _solve(self):
        """
        Run the iterative solver.
        """
        maxiter = self.options['maxiter']
        atol = self.options['atol']
        rtol = self.options['rtol']
        iprint = self.options['iprint']
        stall_limit = self.options['stall_limit']
        stall_tol = self.options['stall_tol']

        self._mpi_print_header()

        self._iter_count = 0
        norm0, norm = self._iter_initialize()

        self._norm0 = norm0

        self._mpi_print(self._iter_count, norm, norm / norm0)

        stalled = False
        stall_count = 0
        if stall_limit > 0:
            stall_norm = norm0

        while self._iter_count < maxiter and norm > atol and norm / norm0 > rtol and not stalled:
            with Recording(type(self).__name__, self._iter_count, self) as rec:

                if stall_count == 3 and not self.linesearch.options[
                        'print_bound_enforce']:

                    self.linesearch.options['print_bound_enforce'] = True

                    if self._system().pathname:
                        pathname = f"{self._system().pathname}."
                    else:
                        pathname = ""

                    msg = (
                        f"Your model has stalled three times and may be violating the bounds. "
                        f"In the future, turn on print_bound_enforce in your solver options "
                        f"here: \n{pathname}nonlinear_solver.linesearch.options"
                        f"['print_bound_enforce']=True. "
                        f"\nThe bound(s) being violated now are:\n")
                    issue_warning(msg, category=SolverWarning)

                    self._single_iteration()
                    self.linesearch.options['print_bound_enforce'] = False
                else:
                    self._single_iteration()

                self._iter_count += 1
                self._run_apply()
                norm = self._iter_get_norm()

                # Save the norm values in the context manager so they can also be recorded.
                rec.abs = norm
                if norm0 == 0:
                    norm0 = 1
                rec.rel = norm / norm0

                # Check if convergence is stalled.
                if stall_limit > 0:
                    rel_norm = rec.rel
                    norm_diff = np.abs(stall_norm - rel_norm)
                    if norm_diff <= stall_tol:
                        stall_count += 1
                        if stall_count >= stall_limit:
                            stalled = True
                    else:
                        stall_count = 0
                        stall_norm = rel_norm

            self._mpi_print(self._iter_count, norm, norm / norm0)

        system = self._system()

        # flag for the print statements. we only print on root if USE_PROC_FILES is not set to True
        print_flag = system.comm.rank == 0 or os.environ.get('USE_PROC_FILES')

        prefix = self._solver_info.prefix + self.SOLVER

        # Solver terminated early because a Nan in the norm doesn't satisfy the while-loop
        # conditionals.
        if np.isinf(norm) or np.isnan(norm):
            msg = "Solver '{}' on system '{}': residuals contain 'inf' or 'NaN' after {} " + \
                  "iterations."
            if iprint > -1 and print_flag:
                print(
                    prefix +
                    msg.format(self.SOLVER, system.pathname, self._iter_count))

            # Raise AnalysisError if requested.
            if self.options['err_on_non_converge']:
                raise AnalysisError(
                    msg.format(self.SOLVER, system.pathname, self._iter_count))

        # Solver hit maxiter without meeting desired tolerances.
        # Or solver stalled.
        elif (norm > atol and norm / norm0 > rtol) or stalled:

            if stalled:
                msg = "Solver '{}' on system '{}' stalled after {} iterations."
            else:
                msg = "Solver '{}' on system '{}' failed to converge in {} iterations."

            if iprint > -1 and print_flag:
                print(
                    prefix +
                    msg.format(self.SOLVER, system.pathname, self._iter_count))

            # Raise AnalysisError if requested.
            if self.options['err_on_non_converge']:
                raise AnalysisError(
                    msg.format(self.SOLVER, system.pathname, self._iter_count))

        # Solver converged
        elif iprint == 1 and print_flag:
            print(prefix +
                  ' Converged in {} iterations'.format(self._iter_count))
        elif iprint == 2 and print_flag:
            print(prefix + ' Converged')
Esempio n. 7
0
    def train(self, x, y):
        """
        Train the surrogate model with the given set of inputs and outputs.

        Parameters
        ----------
        x : array-like
            Training input locations
        y : array-like
            Model responses at given inputs.
        """
        super().train(x, y)
        x, y = np.atleast_2d(x, y)

        cache = self.options['training_cache']

        if cache:
            data_hash = md5()
            data_hash.update(x.flatten())
            data_hash.update(y.flatten())
            training_data_hash = data_hash.hexdigest()
            cache_hash = ''

        if cache and os.path.exists(cache):

            with np.load(cache, allow_pickle=False) as data:
                try:
                    self.n_samples = data['n_samples']
                    self.n_dims = data['n_dims']
                    self.X = np.array(data['X'])
                    self.Y = np.array(data['Y'])
                    self.X_mean = np.array(data['X_mean'])
                    self.Y_mean = np.array(data['Y_mean'])
                    self.X_std = np.array(data['X_std'])
                    self.Y_std = np.array(data['Y_std'])
                    self.thetas = np.array(data['thetas'])
                    self.alpha = np.array(data['alpha'])
                    self.U = np.array(data['U'])
                    self.S_inv = np.array(data['S_inv'])
                    self.Vh = np.array(data['Vh'])
                    self.sigma2 = np.array(data['sigma2'])
                    cache_hash = str(data['hash'])
                except KeyError as e:
                    msg = (
                        "An error occurred while loading KrigingSurrogate Cache: %s. "
                        "Ignoring and training from scratch.")
                    issue_warning(msg % str(e), category=CacheWarning)

            # if the loaded data passes the hash check with the current training data, we exit
            if cache_hash == training_data_hash:
                return

        # Training fallthrough
        self.n_samples, self.n_dims = x.shape

        if self.n_samples <= 1:
            raise ValueError(
                'KrigingSurrogate requires at least 2 training points.')

        # Normalize the data
        X_mean = np.mean(x, axis=0)
        X_std = np.std(x, axis=0)
        Y_mean = np.mean(y, axis=0)
        Y_std = np.std(y, axis=0)

        X_std[X_std == 0.] = 1.
        Y_std[Y_std == 0.] = 1.

        X = (x - X_mean) / X_std
        Y = (y - Y_mean) / Y_std

        self.X = X
        self.Y = Y
        self.X_mean, self.X_std = X_mean, X_std
        self.Y_mean, self.Y_std = Y_mean, Y_std

        def _calcll(thetas):
            """Calculate loglike (callback function)."""
            loglike = self._calculate_reduced_likelihood_params(
                np.exp(thetas))[0]
            return -loglike

        bounds = [(np.log(1e-5), np.log(1e5)) for _ in range(self.n_dims)]

        options = {'eps': 1e-3}

        if cache:
            # Enable logging since we expect the model to take long to train
            options['disp'] = True
            options['iprint'] = 2

        optResult = minimize(_calcll,
                             1e-1 * np.ones(self.n_dims),
                             method='slsqp',
                             options=options,
                             bounds=bounds)

        if not optResult.success:
            raise ValueError(
                f'Kriging Hyper-parameter optimization failed: {optResult.message}'
            )

        self.thetas = np.exp(optResult.x)
        _, params = self._calculate_reduced_likelihood_params()
        self.alpha = params['alpha']
        self.U = params['U']
        self.S_inv = params['S_inv']
        self.Vh = params['Vh']
        self.sigma2 = params['sigma2']

        # Save data to cache if specified
        if cache:
            data = {
                'n_samples': self.n_samples,
                'n_dims': self.n_dims,
                'X': self.X,
                'Y': self.Y,
                'X_mean': self.X_mean,
                'Y_mean': self.Y_mean,
                'X_std': self.X_std,
                'Y_std': self.Y_std,
                'thetas': self.thetas,
                'alpha': self.alpha,
                'U': self.U,
                'S_inv': self.S_inv,
                'Vh': self.Vh,
                'sigma2': self.sigma2,
                'hash': training_data_hash
            }

            if not os.path.exists(cache) or cache_hash != training_data_hash:
                with open(cache, 'wb') as f:
                    np.savez_compressed(f, **data)
    def _setup_partials(self):
        """
        Process all partials and approximations that the user declared.

        Metamodel needs to declare its partials after inputs and outputs are known.
        """
        super()._setup_partials()

        vec_size = self.options['vec_size']
        if vec_size > 1:
            vec_arange = np.arange(vec_size)

            # Sparse specification of partials for vectorized models.
            for wrt, n_wrt in self._surrogate_input_names:
                for of, shape_of in self._surrogate_output_names:
                    n_of = np.prod(shape_of)
                    rows = np.repeat(np.arange(n_of), n_wrt)
                    cols = np.tile(np.arange(n_wrt), n_of)
                    repeat = np.repeat(vec_arange, len(rows))
                    rows = np.tile(rows, vec_size) + repeat * n_of
                    cols = np.tile(cols, vec_size) + repeat * n_wrt

                    dct = {
                        'rows': rows,
                        'cols': cols,
                        'dependent': True,
                    }
                    self._declare_partials(of=of, wrt=wrt, dct=dct)
        else:
            dct = {
                'value': None,
                'dependent': True,
            }
            # Dense specification of partials for non-vectorized models.
            self._declare_partials(
                of=tuple([name[0] for name in self._surrogate_output_names]),
                wrt=tuple([name[0] for name in self._surrogate_input_names]),
                dct=dct)

        # Support for user declaring fd partials in a child class and assigning new defaults.
        # We want a warning for all partials that were not explicitly declared.
        declared_partials = set([
            key for key, dct in self._subjacs_info.items()
            if 'method' in dct and dct['method']
        ])

        # Gather undeclared fd partials on surrogates that don't support analytic derivatives.
        # While we do this, declare the missing ones.
        non_declared_partials = []
        for of, _ in self._surrogate_output_names:
            surrogate = self._metadata(of).get('surrogate')
            if surrogate and not overrides_method('linearize', surrogate,
                                                  SurrogateModel):
                wrt_list = [name[0] for name in self._surrogate_input_names]
                self._approx_partials(of=of, wrt=wrt_list, method='fd')

                for wrt in wrt_list:
                    abs_key = rel_key2abs_key(self, (of, wrt))
                    if abs_key not in declared_partials:
                        non_declared_partials.append(abs_key)

        if non_declared_partials:
            self._get_approx_scheme('fd')

            msg = "Because the MetaModelUnStructuredComp '{}' uses a surrogate " \
                  "which does not define a linearize method,\nOpenMDAO will use " \
                  "finite differences to compute derivatives. Some of the derivatives " \
                  "will be computed\nusing default finite difference " \
                  "options because they were not explicitly declared.\n".format(self.name)
            msg += "The derivatives computed using the defaults are:\n"
            for abs_key in non_declared_partials:
                msg += "    {}, {}\n".format(*abs_key)
            issue_warning(msg, category=DerivativesWarning)
Esempio n. 9
0
    def _setup_solvers(self, system, depth):
        """
        Assign system instance, set depth, and optionally perform setup.

        Parameters
        ----------
        system : <System>
            Pointer to the owning system.
        depth : int
            Depth of the current system (already incremented).
        """
        super()._setup_solvers(system, depth)
        self._recompute_jacobian = True
        self._computed_jacobians = 0
        iproc = system.comm.rank

        rank = MPI.COMM_WORLD.rank if MPI is not None else 0
        self._disallow_discrete_outputs()

        if self.linear_solver is not None:
            self.linear_solver._setup_solvers(system, self._depth + 1)
        else:
            self.linear_solver = system.linear_solver

        if self.linesearch is not None:
            self.linesearch._setup_solvers(system, self._depth + 1)
            self.linesearch._do_subsolve = True

        # this check is incorrect (for broyden) and needs to be done differently.
        # self._disallow_distrib_solve()

        states = self.options['state_vars']
        prom2abs = system._var_allprocs_prom2abs_list['output']

        # Check names of states.
        bad_names = [name for name in states if name not in prom2abs]
        if len(bad_names) > 0:
            msg = "{}: The following variable names were not found: {}"
            raise ValueError(msg.format(self.msginfo, ', '.join(bad_names)))

        # Size linear system
        if len(states) > 0:
            # User has specified states, so we must size them.
            n = 0
            meta = system._var_allprocs_abs2meta['output']

            for i, name in enumerate(states):
                size = meta[prom2abs[name][0]]['global_size']
                self._idx[name] = (n, n + size)
                n += size
        else:
            # Full system size.
            self._full_inverse = True
            n = np.sum(system._owned_sizes)

        self.size = n
        self.Gm = np.empty((n, n))
        self.xm = np.empty((n, ))
        self.fxm = np.empty((n, ))
        self.delta_xm = None
        self.delta_fxm = None

        if self._full_inverse:

            # Can only use DirectSolver here.
            from openmdao.solvers.linear.direct import DirectSolver
            if not isinstance(self.linear_solver, DirectSolver):
                msg = "{}: Linear solver must be DirectSolver when solving the full model."
                raise ValueError(msg.format(self.msginfo,
                                            ', '.join(bad_names)))

            return

        # Always look for states that aren't being solved so we can warn the user.
        def sys_recurse(system, all_states):
            subs = system._subsystems_myproc
            if len(subs) == 0:

                # Skip implicit components that appear to solve themselves.
                from openmdao.core.implicitcomponent import ImplicitComponent
                if overrides_method('solve_nonlinear', system,
                                    ImplicitComponent):
                    return

                all_states.extend(system._list_states())

            else:
                for subsys in subs:
                    sub_nl = subsys.nonlinear_solver
                    if sub_nl and sub_nl.supports['implicit_components']:
                        continue
                    sys_recurse(subsys, all_states)

        all_states = []
        sys_recurse(system, all_states)
        all_states = [
            system._var_abs2prom['output'][name] for name in all_states
        ]

        missing = set(all_states).difference(states)
        if len(missing) > 0:
            msg = "The following states are not covered by a solver, and may have been " + \
                  "omitted from the BroydenSolver 'state_vars': "
            msg += ', '.join(sorted(missing))
            issue_warning(msg, category=SetupWarning)
Esempio n. 10
0
    def _setup_partials(self):
        """
        Check that all partials are declared.
        """
        if not self._manual_decl_partials:
            meta = self._var_rel2meta
            decl_partials = super().declare_partials
            for i, (outs, tup) in enumerate(self._exprs_info):
                vs, funcs = tup
                ins = sorted(set(vs).difference(outs))
                for out in sorted(outs):
                    for inp in ins:
                        if self.options['has_diag_partials']:
                            ival = meta[inp]['value']
                            iarray = isinstance(ival,
                                                ndarray) and ival.size > 1
                            oval = meta[out]['value']
                            if iarray and isinstance(
                                    oval, ndarray) and oval.size > 1:
                                if oval.size != ival.size:
                                    raise RuntimeError(
                                        "%s: has_diag_partials is True but partial(%s, %s) "
                                        "is not square (shape=(%d, %d))." %
                                        (self.msginfo, out, inp, oval.size,
                                         ival.size))
                                # partial will be declared as diagonal
                                inds = np.arange(oval.size, dtype=INT_DTYPE)
                            else:
                                inds = None
                            decl_partials(of=out,
                                          wrt=inp,
                                          rows=inds,
                                          cols=inds)
                        else:
                            decl_partials(of=out, wrt=inp)

        super()._setup_partials()
        if self._manual_decl_partials:
            undeclared = []
            for i, (outs, tup) in enumerate(self._exprs_info):
                vs, funcs = tup
                ins = sorted(set(vs).difference(outs))
                for out in sorted(outs):
                    out = '.'.join(
                        (self.pathname, out)) if self.pathname else out
                    for inp in ins:
                        inp = '.'.join(
                            (self.pathname, inp)) if self.pathname else inp
                        if (out, inp) not in self._subjacs_info:
                            undeclared.append((out, inp))
            if undeclared:
                idx = len(self.pathname) + 1 if self.pathname else 0
                undeclared = ', '.join([
                    ' wrt '.join((f"'{of[idx:]}'", f"'{wrt[idx:]}'"))
                    for of, wrt in undeclared
                ])
                issue_warning(
                    f"The following partial derivatives have not been "
                    f"declared so they are assumed to be zero: [{undeclared}].",
                    prefix=self.msginfo,
                    category=DerivativesWarning)
Esempio n. 11
0
def _get_viewer_data(data_source, case_id=None):
    """
    Get the data needed by the N2 viewer as a dictionary.

    Parameters
    ----------
    data_source : <Problem> or <Group> or str
        A Problem or Group or case recorder filename containing the model or model data.
        If the case recorder file from a parallel run has separate metadata, the
        filenames can be specified with a comma, e.g.: case.sql_0,case.sql_meta

    case_id : int or str or None
        Case name or index of case in SQL file.

    Returns
    -------
    dict
        A dictionary containing information about the model for use by the viewer.
    """
    if isinstance(data_source, Problem):
        root_group = data_source.model

        if not isinstance(root_group, Group):
            issue_warning("The model is not a Group, viewer data is unavailable.")
            return {}

        driver = data_source.driver
        driver_name = driver.__class__.__name__
        driver_type = 'doe' if isinstance(driver, DOEDriver) else 'optimization'

        driver_options = {key: _serialize_single_option(driver.options._dict[key])
                          for key in driver.options}

        if driver_type == 'optimization' and 'opt_settings' in dir(driver):
            driver_opt_settings = driver.opt_settings
        else:
            driver_opt_settings = None

    elif isinstance(data_source, Group):
        if not data_source.pathname:  # root group
            root_group = data_source
            driver_name = None
            driver_type = None
            driver_options = None
            driver_opt_settings = None
        else:
            # this function only makes sense when it is at the root
            issue_warning(f"Viewer data is not available for sub-Group '{data_source.pathname}'.")
            return {}

    elif isinstance(data_source, str):
        if ',' in data_source:
            filenames = data_source.split(',')
            cr = CaseReader(filenames[0], metadata_filename=filenames[1])
        else:
            cr = CaseReader(data_source)

        data_dict = cr.problem_metadata

        if case_id is not None:
            cases = cr.get_case(case_id)
            print(f"Using source: {cases.source}\nCase: {cases.name}")

            def recurse(children, stack):
                for child in children:
                    if child['type'] == 'subsystem':
                        if child['name'] != '_auto_ivc':
                            stack.append(child['name'])
                            recurse(child['children'], stack)
                            stack.pop()
                    elif child['type'] == 'input':
                        if cases.inputs is None:
                            child['value'] = 'N/A'
                        else:
                            path = child['name'] if not stack else '.'.join(stack + [child['name']])
                            child['value'] = cases.inputs[path]
                    elif child['type'] == 'output':
                        if cases.outputs is None:
                            child['value'] = 'N/A'
                        else:
                            path = child['name'] if not stack else '.'.join(stack + [child['name']])
                            try:
                                child['value'] = cases.outputs[path]
                            except KeyError:
                                child['value'] = 'N/A'
            recurse(data_dict['tree']['children'], [])

        # Delete the variables key since it's not used in N2
        if 'variables' in data_dict:
            del data_dict['variables']

        # Older recordings might not have this.
        if 'md5_hash' not in data_dict:
            data_dict['md5_hash'] = None

        return data_dict

    else:
        raise TypeError(f"Viewer data is not available for '{data_source}'."
                        "The source must be a Problem, model or the filename of a recording.")

    data_dict = {}
    comp_exec_idx = [0]  # list so pass by ref
    orders = {}
    data_dict['tree'] = _get_tree_dict(root_group, orders, comp_exec_idx)
    data_dict['md5_hash'] = root_group._generate_md5_hash()

    connections_list = []

    sys_pathnames_list = []  # list of pathnames of systems found in cycles
    sys_pathnames_dict = {}  # map of pathnames to index of pathname in list

    G = root_group.compute_sys_graph(comps_only=True)

    scc = nx.strongly_connected_components(G)

    for strong_comp in scc:
        if len(strong_comp) > 1:
            # these IDs are only used when back edges are present
            sys_pathnames_list.extend(strong_comp)
            for name in strong_comp:
                sys_pathnames_dict[name] = len(sys_pathnames_dict)

        for src, tgt in G.edges(strong_comp):
            if src in strong_comp and tgt in strong_comp:
                if src in orders:
                    exe_src = orders[src]
                else:
                    exe_src = orders[src] = -1
                if tgt in orders:
                    exe_tgt = orders[tgt]
                else:
                    exe_tgt = orders[tgt] = -1

                if exe_tgt < exe_src:
                    exe_low = exe_tgt
                    exe_high = exe_src
                else:
                    exe_low = exe_src
                    exe_high = exe_tgt

                edges_list = [
                    (sys_pathnames_dict[s], sys_pathnames_dict[t]) for s, t in G.edges(strong_comp)
                    if s in orders and exe_low <= orders[s] <= exe_high and t in orders and
                    exe_low <= orders[t] <= exe_high and
                    not (s == src and t == tgt) and t in sys_pathnames_dict
                ]
                for vsrc, vtgtlist in G.get_edge_data(src, tgt)['conns'].items():
                    for vtgt in vtgtlist:
                        connections_list.append({'src': vsrc, 'tgt': vtgt,
                                                 'cycle_arrows': edges_list})
            else:  # edge is out of the SCC
                for vsrc, vtgtlist in G.get_edge_data(src, tgt)['conns'].items():
                    for vtgt in vtgtlist:
                        connections_list.append({'src': vsrc, 'tgt': vtgt})

    data_dict['sys_pathnames_list'] = sys_pathnames_list
    data_dict['connections_list'] = connections_list
    data_dict['abs2prom'] = root_group._var_abs2prom

    data_dict['driver'] = {
        'name': driver_name,
        'type': driver_type,
        'options': driver_options,
        'opt_settings': driver_opt_settings
    }
    data_dict['design_vars'] = root_group.get_design_vars(use_prom_ivc=False)
    data_dict['responses'] = root_group.get_responses()

    data_dict['declare_partials_list'] = _get_declare_partials(root_group)

    return data_dict
Esempio n. 12
0
def trace_mpi(fname='mpi_trace', skip=(), flush=True):
    """
    Dump traces to the specified filename<.rank> showing openmdao and mpi/petsc calls.

    Parameters
    ----------
    fname : str
        Name of the trace file(s).  <.rank> will be appended to the name on each rank.
    skip : set-like
        Collection of function names to skip.
    flush : bool
        If True, flush print buffer after every print call.
    """
    if MPI is None:
        issue_warning("MPI is not active.  Trace aborted.", category=MPIWarning)
        return
    if sys.getprofile() is not None:
        raise RuntimeError("another profile function is already active.")

    my_fname = fname + '.' + str(MPI.COMM_WORLD.rank)

    outfile = open(my_fname, 'w')

    stack = []

    _c_map = {
        'c_call': '(c) -->',
        'c_return': '(c) <--',
        'c_exception': '(c_exception)',
    }


    def _print_c_func(frame, arg, typestr):
        s = str(arg)
        if 'mpi4py' in s or 'petsc4py' in s:
            c = arg.__self__.__class__
            print('   ' * len(stack), typestr, "%s.%s.%s" %
                    (c.__module__, c.__name__, arg.__name__),
                    "%s:%d" % (frame.f_code.co_filename, frame.f_code.co_firstlineno),
                    file=outfile, flush=True)


    def _mpi_trace_callback(frame, event, arg):
        pname = None
        commsize = ''
        if event == 'call':
            if 'openmdao' in frame.f_code.co_filename:
                if frame.f_code.co_name in skip:
                    return
                if 'self' in frame.f_locals:
                    try:
                        pname = frame.f_locals['self'].msginfo
                    except:
                        pass
                    try:
                        commsize = frame.f_locals['self'].comm.size
                    except:
                        pass
                if pname is not None:
                    if not stack or pname != stack[-1][0]:
                        stack.append([pname, 1])
                        print('   ' * len(stack), commsize, pname, file=outfile, flush=flush)
                    else:
                        stack[-1][1] += 1
                print('   ' * len(stack), '-->', frame.f_code.co_name, "%s:%d" %
                      (frame.f_code.co_filename, frame.f_code.co_firstlineno),
                      file=outfile, flush=flush)
        elif event == 'return':
            if 'openmdao' in frame.f_code.co_filename:
                if frame.f_code.co_name in skip:
                    return
                if 'self' in frame.f_locals:
                    try:
                        pname = frame.f_locals['self'].msginfo
                    except:
                        pass
                    try:
                        commsize = frame.f_locals['self'].comm.size
                    except:
                        pass
                print('   ' * len(stack), '<--', frame.f_code.co_name, "%s:%d" %
                      (frame.f_code.co_filename, frame.f_code.co_firstlineno),
                      file=outfile, flush=flush)
                if pname is not None and stack and pname == stack[-1][0]:
                    stack[-1][1] -= 1
                    if stack[-1][1] < 1:
                        stack.pop()
                        if stack:
                            print('   ' * len(stack), commsize, stack[-1][0], file=outfile,
                                  flush=flush)
        else:
            _print_c_func(frame, arg, _c_map[event])

    sys.setprofile(_mpi_trace_callback)
Esempio n. 13
0
    def run(self):
        """
        Excute pyOptsparse.

        Note that pyOpt controls the execution, and the individual optimizers
        (e.g., SNOPT) control the iteration.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem()
        model = problem.model
        relevant = model._relevant
        self.pyopt_solution = None
        self._total_jac = None
        self.iter_count = 0
        fwd = problem._mode == 'fwd'
        optimizer = self.options['optimizer']
        self._quantities = []

        self._check_for_missing_objective()
        self._check_jac = self.options['singular_jac_behavior'] in [
            'error', 'warn'
        ]

        # Only need initial run if we have linear constraints or if we are using an optimizer that
        # doesn't perform one initially.
        con_meta = self._cons
        model_ran = False
        if optimizer in run_required or np.any(
            [con['linear'] for con in self._cons.values()]):
            with RecordingDebugging(self._get_name(), self.iter_count,
                                    self) as rec:
                # Initial Run
                model.run_solve_nonlinear()
                rec.abs = 0.0
                rec.rel = 0.0
                model_ran = True
            self.iter_count += 1

        # compute dynamic simul deriv coloring or just sparsity if option is set
        if c_mod._use_total_sparsity:
            coloring = None
            if self._coloring_info['coloring'] is None and self._coloring_info[
                    'dynamic']:
                coloring = c_mod.dynamic_total_coloring(
                    self,
                    run_model=not model_ran,
                    fname=self._get_total_coloring_fname())

            if coloring is not None:
                # if the improvement wasn't large enough, don't use coloring
                pct = coloring._solves_info()[-1]
                info = self._coloring_info
                if info['min_improve_pct'] > pct:
                    info['coloring'] = info['static'] = None
                    msg = f"Coloring was deactivated.  Improvement of {pct:.1f}% was less " \
                          f"than min allowed ({info['min_improve_pct']:.1f}%)."
                    issue_warning(msg,
                                  prefix=self.msginfo,
                                  category=DerivativesWarning)

        comm = None if isinstance(problem.comm, FakeComm) else problem.comm
        opt_prob = Optimization(self.options['title'],
                                weak_method_wrapper(self, '_objfunc'),
                                comm=comm)

        # Add all design variables
        input_meta = self._designvars
        self._indep_list = indep_list = list(input_meta)
        input_vals = self.get_design_var_values()

        for name, meta in input_meta.items():
            size = meta['global_size'] if meta['distributed'] else meta['size']
            opt_prob.addVarGroup(name,
                                 size,
                                 type='c',
                                 value=input_vals[name],
                                 lower=meta['lower'],
                                 upper=meta['upper'])

        opt_prob.finalizeDesignVariables()

        # Add all objectives
        objs = self.get_objective_values()
        for name in objs:
            opt_prob.addObj(name)
            self._quantities.append(name)

        # Calculate and save derivatives for any linear constraints.
        lcons = [key for (key, con) in con_meta.items() if con['linear']]
        if len(lcons) > 0:
            _lin_jacs = self._compute_totals(of=lcons,
                                             wrt=indep_list,
                                             return_format='dict')
            # convert all of our linear constraint jacs to COO format. Otherwise pyoptsparse will
            # do it for us and we'll end up with a fully dense COO matrix and very slow evaluation
            # of linear constraints!
            to_remove = []
            for jacdct in _lin_jacs.values():
                for n, subjac in jacdct.items():
                    if isinstance(subjac, np.ndarray):
                        # we can safely use coo_matrix to automatically convert the ndarray
                        # since our linear constraint jacs are constant, so zeros won't become
                        # nonzero during the optimization.
                        mat = coo_matrix(subjac)
                        if mat.row.size > 0:
                            # convert to 'coo' format here to avoid an emphatic warning
                            # by pyoptsparse.
                            jacdct[n] = {
                                'coo': [mat.row, mat.col, mat.data],
                                'shape': mat.shape
                            }

        # Add all equality constraints
        for name, meta in con_meta.items():
            if meta['equals'] is None:
                continue
            size = meta['global_size'] if meta['distributed'] else meta['size']
            lower = upper = meta['equals']
            if fwd:
                wrt = [
                    v for v in indep_list
                    if name in relevant[input_meta[v]['ivc_source']]
                ]
            else:
                rels = relevant[name]
                wrt = [
                    v for v in indep_list
                    if input_meta[v]['ivc_source'] in rels
                ]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     linear=True,
                                     wrt=wrt,
                                     jac=jac)
            else:
                if name in self._res_jacs:
                    resjac = self._res_jacs[name]
                    jac = {n: resjac[input_meta[n]['ivc_source']] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     wrt=wrt,
                                     jac=jac)
                self._quantities.append(name)

        # Add all inequality constraints
        for name, meta in con_meta.items():
            if meta['equals'] is not None:
                continue
            size = meta['global_size'] if meta['distributed'] else meta['size']

            # Bounds - double sided is supported
            lower = meta['lower']
            upper = meta['upper']

            if fwd:
                wrt = [
                    v for v in indep_list
                    if name in relevant[input_meta[v]['ivc_source']]
                ]
            else:
                rels = relevant[name]
                wrt = [
                    v for v in indep_list
                    if input_meta[v]['ivc_source'] in rels
                ]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     linear=True,
                                     wrt=wrt,
                                     jac=jac)
            else:
                if name in self._res_jacs:
                    resjac = self._res_jacs[name]
                    jac = {n: resjac[input_meta[n]['ivc_source']] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     wrt=wrt,
                                     jac=jac)
                self._quantities.append(name)

        # Instantiate the requested optimizer
        try:
            _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer],
                              0)
            opt = getattr(_tmp, optimizer)()

        except Exception as err:
            # Change whatever pyopt gives us to an ImportError, give it a readable message,
            # but raise with the original traceback.
            msg = "Optimizer %s is not available in this installation." % optimizer
            raise ImportError(msg)

        # Process any default optimizer-specific settings.
        if optimizer in DEFAULT_OPT_SETTINGS:
            for name, value in DEFAULT_OPT_SETTINGS[optimizer].items():
                if name not in self.opt_settings:
                    self.opt_settings[name] = value

        # Set optimization options
        for option, value in self.opt_settings.items():
            opt.setOption(option, value)

        self._exc_info = None
        try:

            # Execute the optimization problem
            if self.options['gradient method'] == 'pyopt_fd':

                # Use pyOpt's internal finite difference
                # TODO: Need to get this from OpenMDAO
                # fd_step = problem.model.deriv_options['step_size']
                fd_step = 1e-6
                sol = opt(opt_prob,
                          sens='FD',
                          sensStep=fd_step,
                          storeHistory=self.hist_file,
                          hotStart=self.hotstart_file)

            elif self.options['gradient method'] == 'snopt_fd':
                if self.options['optimizer'] == 'SNOPT':

                    # Use SNOPT's internal finite difference
                    # TODO: Need to get this from OpenMDAO
                    # fd_step = problem.model.deriv_options['step_size']
                    fd_step = 1e-6
                    sol = opt(opt_prob,
                              sens=None,
                              sensStep=fd_step,
                              storeHistory=self.hist_file,
                              hotStart=self.hotstart_file)

                else:
                    msg = "SNOPT's internal finite difference can only be used with SNOPT"
                    self._exc_info = Exception(msg)
            else:

                # Use OpenMDAO's differentiator for the gradient
                sol = opt(opt_prob,
                          sens=weak_method_wrapper(self, '_gradfunc'),
                          storeHistory=self.hist_file,
                          hotStart=self.hotstart_file)

        except Exception as _:
            if not self._exc_info:
                raise ()

        if self._exc_info:
            raise self._exc_info

        # Print results
        if self.options['print_results']:
            print(sol)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        for name in indep_list:
            self.set_design_var(name, dv_dict[name])

        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            try:
                model.run_solve_nonlinear()
            except AnalysisError:
                model._clear_iprint()

            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        # Save the most recent solution.
        self.pyopt_solution = sol

        try:
            exit_status = sol.optInform['value']
            self.fail = False

            # These are various failed statuses.
            if optimizer == 'IPOPT':
                if exit_status not in {0, 1}:
                    self.fail = True
            elif exit_status > 2:
                self.fail = True

        except KeyError:
            # optimizers other than pySNOPT may not populate this dict
            pass

        # revert signal handler to cached version
        sigusr = self.options['user_terminate_signal']
        if sigusr is not None:
            signal.signal(sigusr, self._signal_cache)
            self._signal_cache = None  # to prevent memory leak test from failing

        return self.fail
Esempio n. 14
0
def view_connections(root,
                     outfile='connections.html',
                     show_browser=True,
                     show_values=True,
                     precision=6,
                     title=None):
    """
    Generate a self-contained html file containing a detailed connection viewer.

    Optionally pops up a web browser to view the file.

    Parameters
    ----------
    root : System or Problem
        The root for the desired tree.

    outfile : str, optional
        The name of the output html file.  Defaults to 'connections.html'.

    show_browser : bool, optional
        If True, pop up a browser to view the generated html file.
        Defaults to True.

    show_values : bool, optional
        If True, retrieve the values and display them.

    precision : int, optional
        Sets the precision for displaying array values.

    title : str, optional
        Sets the title of the web page.
    """
    if MPI and MPI.COMM_WORLD.rank != 0:
        return

    # since people will be used to passing the Problem as the first arg to
    # the N2 diagram funct, allow them to pass a Problem here as well.
    if isinstance(root, Problem):
        system = root.model
    else:
        system = root

    connections = system._problem_meta['model_ref']()._conn_global_abs_in2out

    src2tgts = defaultdict(list)
    units = defaultdict(lambda: '')
    for io in ('input', 'output'):
        for n, data in system._var_allprocs_abs2meta[io].items():
            u = data.get('units', '')
            if u is not None:
                units[n] = u

    vals = {}

    prefix = system.pathname + '.' if system.pathname else ''
    all_vars = {}
    for io in ('input', 'output'):
        all_vars[io] = chain(system._var_abs2meta[io].items(),
                             [(prefix + n, m)
                              for n, m in system._var_discrete[io].items()])

    if show_values and system._outputs is None:
        issue_warning(
            "Values will not be shown because final_setup has not been called yet.",
            prefix=system.msginfo)

    with printoptions(precision=precision, suppress=True, threshold=10000):

        for t, meta in all_vars['input']:
            s = connections[t]
            if show_values and system._outputs is not None:
                if s.startswith('_auto_ivc.'):
                    val = system.get_val(t,
                                         flat=True,
                                         get_remote=True,
                                         from_src=False)
                else:
                    val = system.get_val(t, flat=True, get_remote=True)

                    # if there's a unit conversion, express the value in the
                    # units of the target
                    if units[t] and s in system._outputs:
                        val = system.get_val(t,
                                             flat=True,
                                             units=units[t],
                                             get_remote=True)
                    else:
                        val = system.get_val(t, flat=True, get_remote=True)
            else:
                val = ''

            src2tgts[s].append(t)

            vals[t] = val

    NOCONN = '[NO CONNECTION]'
    vals[NOCONN] = ''

    src_systems = set()
    tgt_systems = set()
    for s, _ in all_vars['output']:
        parts = s.split('.')
        for i in range(len(parts)):
            src_systems.add('.'.join(parts[:i]))

    for t, _ in all_vars['input']:
        parts = t.split('.')
        for i in range(len(parts)):
            tgt_systems.add('.'.join(parts[:i]))

    src_systems = [{'name': n} for n in sorted(src_systems)]
    src_systems.insert(1, {'name': NOCONN})
    tgt_systems = [{'name': n} for n in sorted(tgt_systems)]
    tgt_systems.insert(1, {'name': NOCONN})

    tprom = system._var_allprocs_abs2prom['input']
    sprom = system._var_allprocs_abs2prom['output']

    table = []
    idx = 1  # unique ID for use by Tabulator
    for tgt, src in connections.items():
        usrc = units[src]
        utgt = units[tgt]
        if usrc != utgt:
            # prepend these with '!' so they'll be colored red
            if usrc:
                usrc = '!' + units[src]
            if utgt:
                utgt = '!' + units[tgt]

        row = {
            'id': idx,
            'src': src,
            'sprom': sprom[src],
            'sunits': usrc,
            'val': _val2str(vals[tgt]),
            'tunits': utgt,
            'tprom': tprom[tgt],
            'tgt': tgt
        }
        table.append(row)
        idx += 1

    # add rows for unconnected sources
    for src, _ in all_vars['output']:
        if src not in src2tgts:
            if show_values:
                v = _val2str(system._abs_get_val(src))
            else:
                v = ''
            row = {
                'id': idx,
                'src': src,
                'sprom': sprom[src],
                'sunits': units[src],
                'val': v,
                'tunits': '',
                'tprom': NOCONN,
                'tgt': NOCONN
            }
            table.append(row)
            idx += 1

    if title is None:
        title = ''

    data = {
        'title': title,
        'table': table,
        'show_values': show_values,
    }

    viewer = 'connect_table.html'

    code_dir = os.path.dirname(os.path.abspath(__file__))
    libs_dir = os.path.join(os.path.dirname(code_dir), 'common', 'libs')
    style_dir = os.path.join(os.path.dirname(code_dir), 'common', 'style')

    with open(os.path.join(code_dir, viewer), "r") as f:
        template = f.read()

    with open(os.path.join(libs_dir, 'tabulator.min.js'), "r") as f:
        tabulator_src = f.read()

    with open(os.path.join(style_dir, 'tabulator.min.css'), "r") as f:
        tabulator_style = f.read()

    jsontxt = json.dumps(data)

    with open(outfile, 'w') as f:
        s = template.replace("<connection_data>", jsontxt)
        s = s.replace("<tabulator_src>", tabulator_src)
        s = s.replace("<tabulator_style>", tabulator_style)
        f.write(s)

    if notebook:
        # display in Jupyter Notebook
        if not colab:
            display(IFrame(src=outfile, width=1000, height=1000))
        else:
            display(HTML(outfile))

    elif show_browser:
        # open it up in the browser
        from openmdao.utils.webview import webview
        webview(outfile)
Esempio n. 15
0
    def run(self):
        """
        Optimize the problem using selected Scipy optimizer.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem()
        opt = self.options['optimizer']
        model = problem.model
        self.iter_count = 0
        self._total_jac = None

        self._check_for_missing_objective()

        # Initial Run
        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            model.run_solve_nonlinear()
            self.iter_count += 1

        self._con_cache = self.get_constraint_values()
        desvar_vals = self.get_design_var_values()
        self._dvlist = list(self._designvars)

        # maxiter and disp get passed into scipy with all the other options.
        if 'maxiter' not in self.opt_settings:  # lets you override the value in options
            self.opt_settings['maxiter'] = self.options['maxiter']
        self.opt_settings['disp'] = self.options['disp']

        # Size Problem
        ndesvar = 0
        for desvar in self._designvars.values():
            size = desvar['global_size'] if desvar['distributed'] else desvar[
                'size']
            ndesvar += size
        x_init = np.empty(ndesvar)

        # Initial Design Vars
        i = 0
        use_bounds = (opt in _bounds_optimizers)
        if use_bounds:
            bounds = []
        else:
            bounds = None

        for name, meta in self._designvars.items():
            size = meta['global_size'] if meta['distributed'] else meta['size']
            x_init[i:i + size] = desvar_vals[name]
            i += size

            # Bounds if our optimizer supports them
            if use_bounds:
                meta_low = meta['lower']
                meta_high = meta['upper']
                for j in range(size):

                    if isinstance(meta_low, np.ndarray):
                        p_low = meta_low[j]
                    else:
                        p_low = meta_low

                    if isinstance(meta_high, np.ndarray):
                        p_high = meta_high[j]
                    else:
                        p_high = meta_high

                    bounds.append((p_low, p_high))

        if use_bounds and (opt in _supports_new_style) and _use_new_style:
            # For 'trust-constr' it is better to use the new type bounds, because it seems to work
            # better (for the current examples in the tests) with the "keep_feasible" option
            try:
                from scipy.optimize import Bounds
                from scipy.optimize._constraints import old_bound_to_new
            except ImportError:
                msg = (
                    'The "trust-constr" optimizer is supported for SciPy 1.1.0 and above. '
                    'The installed version is {}')
                raise ImportError(msg.format(scipy_version))

            # Convert "old-style" bounds to "new_style" bounds
            lower, upper = old_bound_to_new(bounds)  # tuple, tuple
            keep_feasible = self.opt_settings.get('keep_feasible_bounds', True)
            bounds = Bounds(lb=lower, ub=upper, keep_feasible=keep_feasible)

        # Constraints
        constraints = []
        i = 1  # start at 1 since row 0 is the objective.  Constraints start at row 1.
        lin_i = 0  # counter for linear constraint jacobian
        lincons = []  # list of linear constraints
        self._obj_and_nlcons = list(self._objs)

        if opt in _constraint_optimizers:
            for name, meta in self._cons.items():
                size = meta['global_size'] if meta['distributed'] else meta[
                    'size']
                upper = meta['upper']
                lower = meta['lower']
                equals = meta['equals']
                if opt in _gradient_optimizers and 'linear' in meta and meta[
                        'linear']:
                    lincons.append(name)
                    self._con_idx[name] = lin_i
                    lin_i += size
                else:
                    self._obj_and_nlcons.append(name)
                    self._con_idx[name] = i
                    i += size

                # In scipy constraint optimizers take constraints in two separate formats

                # Type of constraints is list of NonlinearConstraint
                if opt in _supports_new_style and _use_new_style:
                    try:
                        from scipy.optimize import NonlinearConstraint
                    except ImportError:
                        msg = (
                            'The "trust-constr" optimizer is supported for SciPy 1.1.0 and'
                            'above. The installed version is {}')
                        raise ImportError(msg.format(scipy_version))

                    if equals is not None:
                        lb = ub = equals
                    else:
                        lb = lower
                        ub = upper
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        # Double-sided constraints are accepted by the algorithm
                        args = [name, False, j]
                        # TODO linear constraint if meta['linear']
                        # TODO add option for Hessian
                        con = NonlinearConstraint(fun=signature_extender(
                            weak_method_wrapper(self, '_con_val_func'), args),
                                                  lb=lb,
                                                  ub=ub,
                                                  jac=signature_extender(
                                                      weak_method_wrapper(
                                                          self,
                                                          '_congradfunc'),
                                                      args))
                        constraints.append(con)
                else:  # Type of constraints is list of dict
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        con_dict = {}
                        if meta['equals'] is not None:
                            con_dict['type'] = 'eq'
                        else:
                            con_dict['type'] = 'ineq'
                        con_dict['fun'] = weak_method_wrapper(self, '_confunc')
                        if opt in _constraint_grad_optimizers:
                            con_dict['jac'] = weak_method_wrapper(
                                self, '_congradfunc')
                        con_dict['args'] = [name, False, j]
                        constraints.append(con_dict)

                        if isinstance(upper, np.ndarray):
                            upper = upper[j]

                        if isinstance(lower, np.ndarray):
                            lower = lower[j]

                        dblcon = (upper < openmdao.INF_BOUND) and (
                            lower > -openmdao.INF_BOUND)

                        # Add extra constraint if double-sided
                        if dblcon:
                            dcon_dict = {}
                            dcon_dict['type'] = 'ineq'
                            dcon_dict['fun'] = weak_method_wrapper(
                                self, '_confunc')
                            if opt in _constraint_grad_optimizers:
                                dcon_dict['jac'] = weak_method_wrapper(
                                    self, '_congradfunc')
                            dcon_dict['args'] = [name, True, j]
                            constraints.append(dcon_dict)

            # precalculate gradients of linear constraints
            if lincons:
                self._lincongrad_cache = self._compute_totals(
                    of=lincons, wrt=self._dvlist, return_format='array')
            else:
                self._lincongrad_cache = None

        # Provide gradients for optimizers that support it
        if opt in _gradient_optimizers:
            jac = self._gradfunc
        else:
            jac = None

        # Hessian calculation method for optimizers, which require it
        if opt in _hessian_optimizers:
            if 'hess' in self.opt_settings:
                hess = self.opt_settings.pop('hess')
            else:
                # Defaults to BFGS, if not in opt_settings
                from scipy.optimize import BFGS
                hess = BFGS()
        else:
            hess = None

        # compute dynamic simul deriv coloring if option is set
        if coloring_mod._use_total_sparsity:
            if ((self._coloring_info['coloring'] is None
                 and self._coloring_info['dynamic'])):
                coloring_mod.dynamic_total_coloring(
                    self,
                    run_model=False,
                    fname=self._get_total_coloring_fname())

                # if the improvement wasn't large enough, turn coloring off
                info = self._coloring_info
                if info['coloring'] is not None:
                    pct = info['coloring']._solves_info()[-1]
                    if info['min_improve_pct'] > pct:
                        info['coloring'] = info['static'] = None
                        msg = f"Coloring was deactivated.  Improvement of {pct:.1f}% was less " \
                              f"than min allowed ({info['min_improve_pct']:.1f}%)."
                        issue_warning(msg,
                                      prefix=self.msginfo,
                                      category=DerivativesWarning)

        # optimize
        try:
            if opt in _optimizers:
                result = minimize(
                    self._objfunc,
                    x_init,
                    # args=(),
                    method=opt,
                    jac=jac,
                    hess=hess,
                    # hessp=None,
                    bounds=bounds,
                    constraints=constraints,
                    tol=self.options['tol'],
                    # callback=None,
                    options=self.opt_settings)
            elif opt == 'basinhopping':
                from scipy.optimize import basinhopping

                def fun(x):
                    return self._objfunc(x), jac(x)

                if 'minimizer_kwargs' not in self.opt_settings:
                    self.opt_settings['minimizer_kwargs'] = {
                        "method": "L-BFGS-B",
                        "jac": True
                    }
                self.opt_settings.pop(
                    'maxiter')  # It does not have this argument

                def accept_test(f_new, x_new, f_old, x_old):
                    # Used to implement bounds besides the original functionality
                    if bounds is not None:
                        bound_check = all([
                            b[0] <= xi <= b[1] for xi, b in zip(x_new, bounds)
                        ])
                        user_test = self.opt_settings.pop('accept_test',
                                                          None)  # callable
                        # has to satisfy both the bounds and the acceptance test defined by the
                        # user
                        if user_test is not None:
                            test_res = user_test(f_new, x_new, f_old, x_old)
                            if test_res == 'force accept':
                                return test_res
                            else:  # result is boolean
                                return bound_check and test_res
                        else:  # no user acceptance test, check only the bounds
                            return bound_check
                    else:
                        return True

                result = basinhopping(fun,
                                      x_init,
                                      accept_test=accept_test,
                                      **self.opt_settings)
            elif opt == 'dual_annealing':
                from scipy.optimize import dual_annealing
                self.opt_settings.pop('disp')  # It does not have this argument
                # There is no "options" param, so "opt_settings" can be used to set the (many)
                # keyword arguments
                result = dual_annealing(self._objfunc,
                                        bounds=bounds,
                                        **self.opt_settings)
            elif opt == 'differential_evolution':
                from scipy.optimize import differential_evolution
                # There is no "options" param, so "opt_settings" can be used to set the (many)
                # keyword arguments
                result = differential_evolution(self._objfunc,
                                                bounds=bounds,
                                                **self.opt_settings)
            elif opt == 'shgo':
                from scipy.optimize import shgo
                kwargs = dict()
                for option in ('minimizer_kwargs', 'sampling_method ', 'n',
                               'iters'):
                    if option in self.opt_settings:
                        kwargs[option] = self.opt_settings[option]
                # Set the Jacobian and the Hessian to the value calculated in OpenMDAO
                if 'minimizer_kwargs' not in kwargs or kwargs[
                        'minimizer_kwargs'] is None:
                    kwargs['minimizer_kwargs'] = {}
                kwargs['minimizer_kwargs'].setdefault('jac', jac)
                kwargs['minimizer_kwargs'].setdefault('hess', hess)
                # Objective function tolerance
                self.opt_settings['f_tol'] = self.options['tol']
                result = shgo(self._objfunc,
                              bounds=bounds,
                              constraints=constraints,
                              options=self.opt_settings,
                              **kwargs)
            else:
                msg = 'Optimizer "{}" is not implemented yet. Choose from: {}'
                raise NotImplementedError(msg.format(opt, _all_optimizers))

        # If an exception was swallowed in one of our callbacks, we want to raise it
        # rather than the cryptic message from scipy.
        except Exception as msg:
            if self._exc_info is not None:
                self._reraise()
            else:
                raise

        if self._exc_info is not None:
            self._reraise()

        self.result = result

        if hasattr(result, 'success'):
            self.fail = False if result.success else True
            if self.fail:
                print('Optimization FAILED.')
                print(result.message)
                print('-' * 35)

            elif self.options['disp']:
                print('Optimization Complete')
                print('-' * 35)
        else:
            self.fail = True  # It is not known, so the worst option is assumed
            print('Optimization Complete (success not known)')
            print(result.message)
            print('-' * 35)

        return self.fail