Ejemplo n.º 1
0
 def run(self):
     """
     Color the model.
     """
     if coloring_mod._use_total_sparsity:
         if self._coloring_info['coloring'] is None and self._coloring_info['dynamic']:
             coloring_mod.dynamic_total_coloring(self, run_model=True,
                                                 fname=self._get_total_coloring_fname())
             self._setup_tot_jac_sparsity()
Ejemplo n.º 2
0
    def run(self):
        """
        Excute pyOptsparse.

        Note that pyOpt controls the execution, and the individual optimizers
        (e.g., SNOPT) control the iteration.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem()
        model = problem.model
        relevant = model._relevant
        self.pyopt_solution = None
        self._total_jac = None
        self.iter_count = 0
        fwd = problem._mode == 'fwd'
        optimizer = self.options['optimizer']
        self._quantities = []

        self._check_for_missing_objective()

        # Only need initial run if we have linear constraints or if we are using an optimizer that
        # doesn't perform one initially.
        con_meta = self._cons
        model_ran = False
        if optimizer in run_required or np.any(
            [con['linear'] for con in self._cons.values()]):
            with RecordingDebugging(self._get_name(), self.iter_count,
                                    self) as rec:
                # Initial Run
                model.run_solve_nonlinear()
                rec.abs = 0.0
                rec.rel = 0.0
                model_ran = True
            self.iter_count += 1

        # compute dynamic simul deriv coloring or just sparsity if option is set
        if c_mod._use_total_sparsity:
            coloring = None
            if self._coloring_info['coloring'] is None and self._coloring_info[
                    'dynamic']:
                coloring = c_mod.dynamic_total_coloring(
                    self,
                    run_model=not model_ran,
                    fname=self._get_total_coloring_fname())

            if coloring is not None:
                # if the improvement wasn't large enough, don't use coloring
                pct = coloring._solves_info()[-1]
                info = self._coloring_info
                if info['min_improve_pct'] > pct:
                    info['coloring'] = info['static'] = None
                    simple_warning(
                        "%s: Coloring was deactivated.  Improvement of %.1f%% was less "
                        "than min allowed (%.1f%%)." %
                        (self.msginfo, pct, info['min_improve_pct']))

        comm = None if isinstance(problem.comm, FakeComm) else problem.comm
        opt_prob = Optimization(self.options['title'],
                                weak_method_wrapper(self, '_objfunc'),
                                comm=comm)

        # Add all design variables
        param_meta = self._designvars
        self._indep_list = indep_list = list(param_meta)
        param_vals = self.get_design_var_values()

        for name, meta in param_meta.items():
            opt_prob.addVarGroup(name,
                                 meta['size'],
                                 type='c',
                                 value=param_vals[name],
                                 lower=meta['lower'],
                                 upper=meta['upper'])

        opt_prob.finalizeDesignVariables()

        # Add all objectives
        objs = self.get_objective_values()
        for name in objs:
            opt_prob.addObj(name)
            self._quantities.append(name)

        # Calculate and save derivatives for any linear constraints.
        lcons = [key for (key, con) in con_meta.items() if con['linear']]
        if len(lcons) > 0:
            _lin_jacs = self._compute_totals(of=lcons,
                                             wrt=indep_list,
                                             return_format='dict')
            # convert all of our linear constraint jacs to COO format. Otherwise pyoptsparse will
            # do it for us and we'll end up with a fully dense COO matrix and very slow evaluation
            # of linear constraints!
            to_remove = []
            for jacdct in _lin_jacs.values():
                for n, subjac in jacdct.items():
                    if isinstance(subjac, np.ndarray):
                        # we can safely use coo_matrix to automatically convert the ndarray
                        # since our linear constraint jacs are constant, so zeros won't become
                        # nonzero during the optimization.
                        mat = coo_matrix(subjac)
                        if mat.row.size > 0:
                            # convert to 'coo' format here to avoid an emphatic warning
                            # by pyoptsparse.
                            jacdct[n] = {
                                'coo': [mat.row, mat.col, mat.data],
                                'shape': mat.shape
                            }

        # Add all equality constraints
        for name, meta in con_meta.items():
            if meta['equals'] is None:
                continue
            size = meta['size']
            lower = upper = meta['equals']
            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     linear=True,
                                     wrt=wrt,
                                     jac=jac)
            else:
                if name in self._res_jacs:
                    resjac = self._res_jacs[name]
                    jac = {n: resjac[n] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     wrt=wrt,
                                     jac=jac)
                self._quantities.append(name)

        # Add all inequality constraints
        for name, meta in con_meta.items():
            if meta['equals'] is not None:
                continue
            size = meta['size']

            # Bounds - double sided is supported
            lower = meta['lower']
            upper = meta['upper']

            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     linear=True,
                                     wrt=wrt,
                                     jac=jac)
            else:
                if name in self._res_jacs:
                    resjac = self._res_jacs[name]
                    jac = {n: resjac[n] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     wrt=wrt,
                                     jac=jac)
                self._quantities.append(name)

        # Instantiate the requested optimizer
        try:
            _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer],
                              0)
            opt = getattr(_tmp, optimizer)()

        except Exception as err:
            # Change whatever pyopt gives us to an ImportError, give it a readable message,
            # but raise with the original traceback.
            msg = "Optimizer %s is not available in this installation." % optimizer
            raise ImportError(msg)

        # Process any default optimizer-specific settings.
        if optimizer in DEFAULT_OPT_SETTINGS:
            for name, value in DEFAULT_OPT_SETTINGS[optimizer].items():
                if name not in self.opt_settings:
                    self.opt_settings[name] = value

        # Set optimization options
        for option, value in self.opt_settings.items():
            opt.setOption(option, value)

        # Execute the optimization problem
        if self.options['gradient method'] == 'pyopt_fd':

            # Use pyOpt's internal finite difference
            # TODO: Need to get this from OpenMDAO
            # fd_step = problem.model.deriv_options['step_size']
            fd_step = 1e-6
            sol = opt(opt_prob,
                      sens='FD',
                      sensStep=fd_step,
                      storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        elif self.options['gradient method'] == 'snopt_fd':
            if self.options['optimizer'] == 'SNOPT':

                # Use SNOPT's internal finite difference
                # TODO: Need to get this from OpenMDAO
                # fd_step = problem.model.deriv_options['step_size']
                fd_step = 1e-6
                sol = opt(opt_prob,
                          sens=None,
                          sensStep=fd_step,
                          storeHistory=self.hist_file,
                          hotStart=self.hotstart_file)

            else:
                raise Exception(
                    "SNOPT's internal finite difference can only be used with SNOPT"
                )
        else:

            # Use OpenMDAO's differentiator for the gradient
            sol = opt(opt_prob,
                      sens=weak_method_wrapper(self, '_gradfunc'),
                      storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        # Print results
        if self.options['print_results']:
            print(sol)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        for name in indep_list:
            self.set_design_var(name, dv_dict[name])

        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            model.run_solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        # Save the most recent solution.
        self.pyopt_solution = sol

        try:
            exit_status = sol.optInform['value']
            self.fail = False

            # These are various failed statuses.
            if optimizer == 'IPOPT':
                if exit_status not in {0, 1}:
                    self.fail = True
            elif exit_status > 2:
                self.fail = True

        except KeyError:
            # optimizers other than pySNOPT may not populate this dict
            pass

        # revert signal handler to cached version
        sigusr = self.options['user_teriminate_signal']
        if sigusr is not None:
            signal.signal(sigusr, self._signal_cache)
            self._signal_cache = None  # to prevent memory leak test from failing

        return self.fail
Ejemplo n.º 3
0
    def run(self):
        """
        Optimize the problem using selected Scipy optimizer.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem
        opt = self.options['optimizer']
        model = problem.model
        self.iter_count = 0
        self._total_jac = None

        self._check_for_missing_objective()

        # Initial Run
        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            model.run_solve_nonlinear()
            self.iter_count += 1

        self._con_cache = self.get_constraint_values()
        desvar_vals = self.get_design_var_values()
        self._dvlist = list(self._designvars)

        # maxiter and disp get passsed into scipy with all the other options.
        self.opt_settings['maxiter'] = self.options['maxiter']
        self.opt_settings['disp'] = self.options['disp']

        # Size Problem
        nparam = 0
        for param in itervalues(self._designvars):
            nparam += param['size']
        x_init = np.empty(nparam)

        # Initial Design Vars
        i = 0
        use_bounds = (opt in _bounds_optimizers)
        if use_bounds:
            bounds = []
        else:
            bounds = None

        for name, meta in iteritems(self._designvars):
            size = meta['size']
            x_init[i:i + size] = desvar_vals[name]
            i += size

            # Bounds if our optimizer supports them
            if use_bounds:
                meta_low = meta['lower']
                meta_high = meta['upper']
                for j in range(size):

                    if isinstance(meta_low, np.ndarray):
                        p_low = meta_low[j]
                    else:
                        p_low = meta_low

                    if isinstance(meta_high, np.ndarray):
                        p_high = meta_high[j]
                    else:
                        p_high = meta_high

                    bounds.append((p_low, p_high))

        if use_bounds and (opt in _supports_new_style) and _use_new_style:
            # For 'trust-constr' it is better to use the new type bounds, because it seems to work
            # better (for the current examples in the tests) with the "keep_feasible" option
            try:
                from scipy.optimize import Bounds
                from scipy.optimize._constraints import old_bound_to_new
            except ImportError:
                msg = (
                    'The "trust-constr" optimizer is supported for SciPy 1.1.0 and above. '
                    'The installed version is {}')
                raise ImportError(msg.format(scipy_version))

            # Convert "old-style" bounds to "new_style" bounds
            lower, upper = old_bound_to_new(bounds)  # tuple, tuple
            keep_feasible = self.opt_settings.get('keep_feasible_bounds', True)
            bounds = Bounds(lb=lower, ub=upper, keep_feasible=keep_feasible)

        # Constraints
        constraints = []
        i = 1  # start at 1 since row 0 is the objective.  Constraints start at row 1.
        lin_i = 0  # counter for linear constraint jacobian
        lincons = []  # list of linear constraints
        self._obj_and_nlcons = list(self._objs)

        if opt in _constraint_optimizers:
            for name, meta in iteritems(self._cons):
                size = meta['size']
                upper = meta['upper']
                lower = meta['lower']
                equals = meta['equals']
                if 'linear' in meta and meta['linear']:
                    lincons.append(name)
                    self._con_idx[name] = lin_i
                    lin_i += size
                else:
                    self._obj_and_nlcons.append(name)
                    self._con_idx[name] = i
                    i += size

                # In scipy constraint optimizers take constraints in two separate formats

                # Type of constraints is list of NonlinearConstraint
                if opt in _supports_new_style and _use_new_style:
                    try:
                        from scipy.optimize import NonlinearConstraint
                    except ImportError:
                        msg = (
                            'The "trust-constr" optimizer is supported for SciPy 1.1.0 and'
                            'above. The installed version is {}')
                        raise ImportError(msg.format(scipy_version))

                    if equals is not None:
                        lb = ub = equals
                    else:
                        lb = lower
                        ub = upper
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        # Double-sided constraints are accepted by the algorithm
                        args = [name, False, j]
                        # TODO linear constraint if meta['linear']
                        # TODO add option for Hessian
                        con = NonlinearConstraint(
                            fun=signature_extender(self._con_val_func, args),
                            lb=lb,
                            ub=ub,
                            jac=signature_extender(self._congradfunc, args))
                        constraints.append(con)
                else:  # Type of constraints is list of dict
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        con_dict = {}
                        if meta['equals'] is not None:
                            con_dict['type'] = 'eq'
                        else:
                            con_dict['type'] = 'ineq'
                        con_dict['fun'] = self._confunc
                        if opt in _constraint_grad_optimizers:
                            con_dict['jac'] = self._congradfunc
                        con_dict['args'] = [name, False, j]
                        constraints.append(con_dict)

                        if isinstance(upper, np.ndarray):
                            upper = upper[j]

                        if isinstance(lower, np.ndarray):
                            lower = lower[j]

                        dblcon = (upper < openmdao.INF_BOUND) and (
                            lower > -openmdao.INF_BOUND)

                        # Add extra constraint if double-sided
                        if dblcon:
                            dcon_dict = {}
                            dcon_dict['type'] = 'ineq'
                            dcon_dict['fun'] = self._confunc
                            if opt in _constraint_grad_optimizers:
                                dcon_dict['jac'] = self._congradfunc
                            dcon_dict['args'] = [name, True, j]
                            constraints.append(dcon_dict)

            # precalculate gradients of linear constraints
            if lincons:
                self._lincongrad_cache = self._compute_totals(
                    of=lincons, wrt=self._dvlist, return_format='array')
            else:
                self._lincongrad_cache = None

        # Provide gradients for optimizers that support it
        if opt in _gradient_optimizers:
            jac = self._gradfunc
        else:
            jac = None

        # Hessian calculation method for optimizers, which require it
        if opt in _hessian_optimizers:
            if 'hess' in self.opt_settings:
                hess = self.opt_settings.pop('hess')
            else:
                # Defaults to BFGS, if not in opt_settings
                from scipy.optimize import BFGS
                hess = BFGS()
        else:
            hess = None

        # compute dynamic simul deriv coloring if option is set
        if coloring_mod._use_total_sparsity:
            if self._coloring_info['coloring'] is coloring_mod._DYN_COLORING:
                coloring_mod.dynamic_total_coloring(
                    self,
                    run_model=False,
                    fname=self._get_total_coloring_fname())
            elif self.options['dynamic_simul_derivs']:
                warn_deprecation(
                    "The 'dynamic_simul_derivs' option has been deprecated. Call "
                    "the 'declare_coloring' function instead.")
                coloring_mod.dynamic_total_coloring(
                    self,
                    run_model=False,
                    fname=self._get_total_coloring_fname())

        # optimize
        try:
            if opt in _optimizers:
                result = minimize(
                    self._objfunc,
                    x_init,
                    # args=(),
                    method=opt,
                    jac=jac,
                    hess=hess,
                    # hessp=None,
                    bounds=bounds,
                    constraints=constraints,
                    tol=self.options['tol'],
                    # callback=None,
                    options=self.opt_settings)
            elif opt == 'basinhopping':
                from scipy.optimize import basinhopping

                def fun(x):
                    return self._objfunc(x), jac(x)

                if 'minimizer_kwargs' not in self.opt_settings:
                    self.opt_settings['minimizer_kwargs'] = {
                        "method": "L-BFGS-B",
                        "jac": True
                    }
                self.opt_settings.pop(
                    'maxiter')  # It does not have this argument

                def accept_test(f_new, x_new, f_old, x_old):
                    # Used to implement bounds besides the original functionality
                    if bounds is not None:
                        bound_check = all([
                            b[0] <= xi <= b[1] for xi, b in zip(x_new, bounds)
                        ])
                        user_test = self.opt_settings.pop('accept_test',
                                                          None)  # callable
                        # has to satisfy both the bounds and the acceptance test defined by the
                        # user
                        if user_test is not None:
                            test_res = user_test(f_new, x_new, f_old, x_old)
                            if test_res == 'force accept':
                                return test_res
                            else:  # result is boolean
                                return bound_check and test_res
                        else:  # no user acceptance test, check only the bounds
                            return bound_check
                    else:
                        return True

                result = basinhopping(fun,
                                      x_init,
                                      accept_test=accept_test,
                                      **self.opt_settings)
            elif opt == 'dual_annealing':
                from scipy.optimize import dual_annealing
                self.opt_settings.pop('disp')  # It does not have this argument
                # There is no "options" param, so "opt_settings" can be used to set the (many)
                # keyword arguments
                result = dual_annealing(self._objfunc,
                                        bounds=bounds,
                                        **self.opt_settings)
            elif opt == 'differential_evolution':
                from scipy.optimize import differential_evolution
                # There is no "options" param, so "opt_settings" can be used to set the (many)
                # keyword arguments
                result = differential_evolution(self._objfunc,
                                                bounds=bounds,
                                                **self.opt_settings)
            elif opt == 'shgo':
                from scipy.optimize import shgo
                kwargs = dict()
                for param in ('minimizer_kwargs', 'sampling_method ', 'n',
                              'iters'):
                    if param in self.opt_settings:
                        kwargs[param] = self.opt_settings[param]
                # Set the Jacobian and the Hessian to the value calculated in OpenMDAO
                if 'minimizer_kwargs' not in kwargs or kwargs[
                        'minimizer_kwargs'] is None:
                    kwargs['minimizer_kwargs'] = {}
                kwargs['minimizer_kwargs'].setdefault('jac', jac)
                kwargs['minimizer_kwargs'].setdefault('hess', hess)
                # Objective function tolerance
                self.opt_settings['f_tol'] = self.options['tol']
                result = shgo(self._objfunc,
                              bounds=bounds,
                              constraints=constraints,
                              options=self.opt_settings,
                              **kwargs)
            else:
                msg = 'Optimizer "{}" is not implemented yet. Choose from: {}'
                raise NotImplementedError(msg.format(opt, _all_optimizers))

        # If an exception was swallowed in one of our callbacks, we want to raise it
        # rather than the cryptic message from scipy.
        except Exception as msg:
            if self._exc_info is not None:
                self._reraise()
            else:
                raise

        if self._exc_info is not None:
            self._reraise()

        self.result = result

        if hasattr(result, 'success'):
            self.fail = False if result.success else True
            if self.fail:
                print('Optimization FAILED.')
                print(result.message)
                print('-' * 35)

            elif self.options['disp']:
                print('Optimization Complete')
                print('-' * 35)
        else:
            self.fail = True  # It is not known, so the worst option is assumed
            print('Optimization Complete (success not known)')
            print(result.message)
            print('-' * 35)

        return self.fail
Ejemplo n.º 4
0
def view_driver_scaling(driver,
                        outfile='driver_scaling_report.html',
                        show_browser=True,
                        title=None,
                        jac=True):
    """
    Generate a self-contained html file containing a detailed connection viewer.

    Optionally pops up a web browser to view the file.

    Parameters
    ----------
    driver : Driver
        The driver used for the scaling report.
    outfile : str, optional
        The name of the output html file.  Defaults to 'connections.html'.
    show_browser : bool, optional
        If True, pop up a browser to view the generated html file.
        Defaults to True.
    title : str, optional
        Sets the title of the web page.
    jac : bool
        If True, show jacobian information.

    Returns
    -------
    dict
        Data to used to generate html file.
    """
    if MPI and MPI.COMM_WORLD.rank != 0:
        return

    dv_table = []
    con_table = []
    obj_table = []

    dv_vals = driver.get_design_var_values(get_remote=True)
    obj_vals = driver.get_objective_values(driver_scaling=True)
    con_vals = driver.get_constraint_values(driver_scaling=True)

    mod_meta = driver._problem().model._var_allprocs_abs2meta['output']

    if driver._problem(
    )._metadata['setup_status'] < _SetupStatus.POST_FINAL_SETUP:
        raise RuntimeError(
            "Driver scaling report cannot be generated before calling final_setup "
            "on the Problem.")

    default = ''

    idx = 1  # unique ID for use by Tabulator

    # set up design vars table data
    for name, meta in driver._designvars.items():
        scaler = meta['total_scaler']
        adder = meta['total_adder']
        ref = meta['ref']
        ref0 = meta['ref0']
        lower = meta['lower']
        upper = meta['upper']

        dval = dv_vals[name]
        mval = _unscale(dval, scaler, adder, default)

        if dval.size == 1:
            index = meta['indices']
            if index is not None:
                index = index[0]
            index = _getdef(index, '')
        else:
            index = ''

        dct = {
            'id': idx,
            'name': name,
            'size': meta['size'],
            'driver_val': _get_val_and_size(dval),
            'driver_units': _getdef(meta['units'], default),
            'model_val': _get_val_and_size(mval),
            'model_units': _getdef(mod_meta[meta['ivc_source']]['units'],
                                   default),
            'ref': _get_val_and_size(ref, default),
            'ref0': _get_val_and_size(ref0, default),
            'scaler': _get_val_and_size(scaler, default),
            'adder': _get_val_and_size(adder, default),
            'lower': _get_val_and_size(lower, default),  # scaled
            'upper': _get_val_and_size(upper, default),  # scaled
            'index': index,
        }

        dv_table.append(dct)

        _add_child_rows(dct,
                        mval,
                        dval,
                        scaler=scaler,
                        adder=adder,
                        ref=ref,
                        ref0=ref0,
                        lower=lower,
                        upper=upper,
                        inds=meta['indices'])

        idx += 1

    # set up constraints table data
    for name, meta in driver._cons.items():
        scaler = meta['total_scaler']
        adder = meta['total_adder']
        ref = meta['ref']
        ref0 = meta['ref0']
        lower = meta['lower']
        upper = meta['upper']
        equals = meta['equals']

        print(name, "EQUALS:", _get_val_and_size(meta['equals'], default))

        dval = con_vals[name]
        mval = _unscale(dval, scaler, adder, default)

        if dval.size == 1:
            index = meta['indices']
            if index is not None:
                index = index[0]
            index = _getdef(index, '')
        else:
            index = ''

        dct = {
            'id': idx,
            'name': name,
            'size': meta['size'],
            'index': index,
            'driver_val': _get_val_and_size(dval),
            'driver_units': _getdef(meta['units'], default),
            'model_val': _get_val_and_size(mval),
            'model_units': _getdef(mod_meta[meta['ivc_source']]['units'],
                                   default),
            'ref': _get_val_and_size(meta['ref'], default),
            'ref0': _get_val_and_size(meta['ref0'], default),
            'scaler': _get_val_and_size(scaler, default),
            'adder': _get_val_and_size(adder, default),
            'lower': _get_val_and_size(meta['lower'], default),  # scaled
            'upper': _get_val_and_size(meta['upper'], default),  # scaled
            'equals': _get_val_and_size(meta['equals'], default),  # scaled
            'linear': meta['linear'],
        }

        con_table.append(dct)
        _add_child_rows(dct,
                        mval,
                        dval,
                        scaler=scaler,
                        adder=adder,
                        ref=ref,
                        ref0=ref0,
                        lower=lower,
                        upper=upper,
                        equals=equals,
                        inds=meta['indices'])

        idx += 1

    # set up objectives table data
    for name, meta in driver._objs.items():
        scaler = meta['total_scaler']
        adder = meta['total_adder']
        ref = meta['ref']
        ref0 = meta['ref0']

        dval = obj_vals[name]
        mval = _unscale(dval, scaler, adder, default)

        if dval.size == 1:
            index = meta['indices']
            if index is not None:
                index = index[0]
            index = _getdef(index, '')
        else:
            index = ''

        dct = {
            'id': idx,
            'name': name,
            'size': meta['size'],
            'index': index,
            'driver_val': _get_val_and_size(dval),
            'driver_units': _getdef(meta['units'], default),
            'model_val': _get_val_and_size(mval),
            'model_units': _getdef(mod_meta[meta['ivc_source']]['units'],
                                   default),
            'ref': _get_val_and_size(meta['ref'], default),
            'ref0': _get_val_and_size(meta['ref0'], default),
            'scaler': _get_val_and_size(scaler, default),
            'adder': _get_val_and_size(adder, default),
        }

        obj_table.append(dct)
        _add_child_rows(dct,
                        mval,
                        dval,
                        scaler=scaler,
                        adder=adder,
                        ref=ref,
                        ref0=ref0,
                        inds=meta['indices'])

        idx += 1

    data = {
        'title': _getdef(title, ''),
        'dv_table': dv_table,
        'con_table': con_table,
        'obj_table': obj_table,
        'oflabels': [],
        'wrtlabels': [],
        'var_mat_list': [],
        'linear': {
            'oflabels': [],
        }
    }

    if jac and not driver._problem().model._use_derivatives:
        print("\nCan't display jacobian because derivatives are turned off.\n")
        jac = False

    if jac:
        coloring = driver._get_static_coloring()
        if coloring_mod._use_total_sparsity and jac:
            if coloring is None and driver._coloring_info['dynamic']:
                coloring = coloring_mod.dynamic_total_coloring(driver)

        # assemble data for jacobian visualization
        data['oflabels'] = driver._get_ordered_nl_responses()
        data['wrtlabels'] = list(dv_vals)

        totals = driver._compute_totals(of=data['oflabels'],
                                        wrt=data['wrtlabels'],
                                        return_format='array')

        data['linear'] = lindata = {}
        lindata['oflabels'] = [
            n for n, meta in driver._cons.items() if meta['linear']
        ]
        lindata['wrtlabels'] = data[
            'wrtlabels']  # needs to mimic data structure

        # check for separation of linear constraints
        if lindata['oflabels']:
            if set(lindata['oflabels']).intersection(data['oflabels']):
                # linear cons are found in data['oflabels'] so they're not separated
                lindata['oflabels'] = []
                lindata['wrtlables'] = []

        full_response_vals = con_vals.copy()
        full_response_vals.update(obj_vals)
        response_vals = {n: full_response_vals[n] for n in data['oflabels']}

        _compute_jac_view_info(totals, data, dv_vals, response_vals, coloring)

        if lindata['oflabels']:
            # prevent reuse of nonlinear totals
            save = driver._total_jac
            driver._total_jac = None

            lintotals = driver._compute_totals(of=lindata['oflabels'],
                                               wrt=data['wrtlabels'],
                                               return_format='array')
            lin_response_vals = {
                n: full_response_vals[n]
                for n in lindata['oflabels']
            }
            driver._total_jac = save

            _compute_jac_view_info(lintotals, lindata, dv_vals,
                                   lin_response_vals, None)

    if driver._problem().comm.rank == 0:

        viewer = 'scaling_table.html'

        code_dir = os.path.dirname(os.path.abspath(__file__))
        libs_dir = os.path.join(os.path.dirname(code_dir), 'common', 'libs')
        style_dir = os.path.join(os.path.dirname(code_dir), 'common', 'style')

        with open(os.path.join(code_dir, viewer), "r") as f:
            template = f.read()

        with open(os.path.join(libs_dir, 'tabulator.min.js'), "r") as f:
            tabulator_src = f.read()

        with open(os.path.join(style_dir, 'tabulator.min.css'), "r") as f:
            tabulator_style = f.read()

        with open(os.path.join(libs_dir, 'd3.v6.min.js'), "r") as f:
            d3_src = f.read()

        jsontxt = json.dumps(data, default=default_noraise)

        with open(outfile, 'w') as f:
            s = template.replace("<tabulator_src>", tabulator_src)
            s = s.replace("<tabulator_style>", tabulator_style)
            s = s.replace("<d3_src>", d3_src)
            s = s.replace("<scaling_data>", jsontxt)
            f.write(s)

        if show_browser:
            webview(outfile)

    return data
    def run(self):
        """
        Optimize the problem using selected NLopt optimizer.
        """
        problem = self._problem()
        opt = self.options["optimizer"]
        model = problem.model
        self.iter_count = 0
        self._total_jac = None

        self._check_for_missing_objective()

        # Initial Run
        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            model.run_solve_nonlinear()
            self.iter_count += 1

        self._con_cache = self.get_constraint_values()
        desvar_vals = self.get_design_var_values()
        self._dvlist = list(self._designvars)

        # Size Problem
        nparam = 0
        for param in self._designvars.values():
            nparam += param["size"]
        x_init = np.empty(nparam)

        # Initialize the NLopt problem with the method and number of design vars
        opt_prob = nlopt.opt(optimizer_methods[opt], int(nparam))

        # Initial Design Vars
        i = 0
        use_bounds = opt in _bounds_optimizers
        if use_bounds:
            bounds = []
        else:
            bounds = None

        # Loop through all OpenMDAO design variables and process their bounds
        for name, meta in self._designvars.items():
            size = meta["size"]
            x_init[i:i + size] = desvar_vals[name]
            i += size

            # Bounds if our optimizer supports them
            if use_bounds:
                meta_low = meta["lower"]
                meta_high = meta["upper"]
                for j in range(size):

                    if isinstance(meta_low, np.ndarray):
                        p_low = meta_low[j]
                    else:
                        p_low = meta_low

                    if isinstance(meta_high, np.ndarray):
                        p_high = meta_high[j]
                    else:
                        p_high = meta_high

                    bounds.append((p_low, p_high))

        # Actually add the bounds to the optimization problem.
        if bounds is not None:
            lower_bound, upper_bound = zip(*bounds)
            lower = np.array(
                [x if x is not None else -np.inf for x in lower_bound])
            upper = np.array(
                [x if x is not None else np.inf for x in upper_bound])
            opt_prob.set_lower_bounds(lower)
            opt_prob.set_upper_bounds(upper)

        # Constraints
        i = 1  # start at 1 since row 0 is the objective.  Constraints start at row 1.
        lin_i = 0  # counter for linear constraint jacobian
        lincons = []  # list of linear constraints
        self._obj_and_nlcons = list(self._objs)

        # Process and add constraints to the optimization problem.
        if opt in _constraint_optimizers:
            for name, meta in self._cons.items():
                size = meta["global_size"] if meta["distributed"] else meta[
                    "size"]
                upper = meta["upper"]
                lower = meta["lower"]
                equals = meta["equals"]

                if opt in _gradient_optimizers and "linear" in meta and meta[
                        "linear"]:
                    lincons.append(name)
                    self._con_idx[name] = lin_i
                    lin_i += size
                else:
                    self._obj_and_nlcons.append(name)
                    self._con_idx[name] = i
                    i += size

                # Loop over every index separately,
                # because it's easier to defined each
                # constraint by index.
                for j in range(size):

                    # Equality constraints are added as two inequality constraints
                    if equals is not None:
                        args = [name, False, j]
                        try:
                            opt_prob.add_equality_constraint(
                                signature_extender(
                                    weak_method_wrapper(self, "_confunc"),
                                    args))
                        except ValueError:
                            msg = (
                                "The selected optimizer, {}, does not support"
                                + " equality constraints. Select from {}.")
                            raise NotImplementedError(
                                msg.format(opt, _eq_constraint_optimizers))

                    else:
                        # Double-sided constraints are accepted by the algorithm
                        args = [name, False, j]
                        opt_prob.add_inequality_constraint(
                            signature_extender(
                                weak_method_wrapper(self, "_confunc"), args))

                        if isinstance(upper, np.ndarray):
                            upper = upper[j]

                        if isinstance(lower, np.ndarray):
                            lower = lower[j]

                        dblcon = (upper < openmdao.INF_BOUND) and (
                            lower > -openmdao.INF_BOUND)

                        # Add extra constraint if double-sided
                        if dblcon:
                            args = [name, True, j]
                            opt_prob.add_inequality_constraint(
                                signature_extender(
                                    weak_method_wrapper(self, "_confunc"),
                                    args))

            # precalculate gradients of linear constraints
            if lincons:
                self._lincongrad_cache = self._compute_totals(
                    of=lincons, wrt=self._dvlist, return_format="array")
            else:
                self._lincongrad_cache = None

        # compute dynamic simul deriv coloring if option is set
        if coloring_mod._use_total_sparsity:
            if (self._coloring_info["coloring"] is None
                    and self._coloring_info["dynamic"]):
                coloring_mod.dynamic_total_coloring(
                    self,
                    run_model=False,
                    fname=self._get_total_coloring_fname())

                # if the improvement wasn't large enough, turn coloring off
                info = self._coloring_info
                if info["coloring"] is not None:
                    pct = info["coloring"]._solves_info()[-1]
                    if info["min_improve_pct"] > pct:
                        info["coloring"] = info["static"] = None
                        simple_warning(
                            "%s: Coloring was deactivated.  Improvement of %.1f%% was "
                            "less than min allowed (%.1f%%)." %
                            (self.msginfo, pct, info["min_improve_pct"]))

        # Finalize the optimization problem setup and actually perform optimization
        try:
            if opt in _optimizers:
                opt_prob.set_min_objective(self._objfunc)
                opt_prob.set_ftol_rel(self.options["tol"])
                opt_prob.set_maxeval(int(self.options["maxiter"]))
                opt_prob.set_maxtime(self.options["maxtime"])
                opt_prob.optimize(x_init)
                self.result = opt_prob.last_optimize_result()

            else:
                msg = 'Optimizer "{}" is not implemented yet. Choose from: {}'
                raise NotImplementedError(msg.format(opt, _optimizers))

        # If an exception was swallowed in one of our callbacks, we want to raise it
        except Exception as msg:
            if self._exc_info is not None:
                self._reraise()
            else:
                raise

        if self._exc_info is not None:
            self._reraise()