Ejemplo n.º 1
0
    def _run_case(self, case):
        """
        Run case, save exception info and mark the metadata if the case fails.

        Parameters
        ----------
        case : list
            list of name, value tuples for the design variables.
        """
        metadata = {}

        for dv_name, dv_val in case:
            self.set_design_var(dv_name, dv_val)

        with RecordingDebugging(self._name, self.iter_count, self) as rec:
            try:
                failure_flag, _, _ = self._problem.model._solve_nonlinear()
                metadata['success'] = not failure_flag
                metadata['msg'] = ''
            except AnalysisError:
                metadata['success'] = 0
                metadata['msg'] = traceback.format_exc()
            except Exception:
                metadata['success'] = 0
                metadata['msg'] = traceback.format_exc()
                print(metadata['msg'])

            # save reference to metadata for use in record_iteration
            self._metadata = metadata
    def _objfunc(self, x_new, grad):
        """
        Evaluate and return the objective function.

        Model is executed here.

        Parameters
        ----------
        x_new : ndarray
            Array containing parameter values at new design point.
        grad : ndarray
            Empty array that is modified in-place with gradient information for
            the new design point.

        Returns
        -------
        float
            Value of the objective function evaluated at the new design point.
        """
        model = self._problem().model

        try:

            # Pass in new parameters
            i = 0
            for name, meta in self._designvars.items():
                size = meta["size"]
                self.set_design_var(name, x_new[i:i + size])
                i += size

            with RecordingDebugging(self._get_name(), self.iter_count,
                                    self) as rec:
                self.iter_count += 1

                # This is the actual model evaluation for OpenMDAO
                model.run_solve_nonlinear()

            # Get the objective function evaluations
            f_new = list(self.get_objective_values().values())[0]

            self._con_cache = self.get_constraint_values()

        except Exception as msg:
            self._exc_info = msg

        try:
            if grad.size > 0:
                self._grad_cache = self._compute_totals(
                    of=self._obj_and_nlcons,
                    wrt=self._dvlist,
                    return_format="array")
                grad[:] = self._grad_cache[0, :]

        except Exception as msg:
            self._exc_info = msg

        return float(f_new)
Ejemplo n.º 3
0
    def run(self):
        """
        Execute the driver algorithm.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        self._check_for_missing_objective()

        # Size design variables.
        desvars = self._designvars
        desvar_vals = self.get_design_var_values()

        count = 0
        for name, meta in desvars.items():
            size = meta['size']
            self._desvar_idx[name] = (count, count + size)
            count += size

        lower_bound = np.empty((count, ))
        upper_bound = np.empty((count, ))
        x0 = np.empty(count)

        # Figure out bounds vectors and initial design vars
        for name, meta in desvars.items():
            i, j = self._desvar_idx[name]
            lower_bound[i:j] = meta['lower']
            upper_bound[i:j] = meta['upper']
            x0[i:j] = desvar_vals[name]

        model_mpi = None
        comm = self._problem().comm
        if self._concurrent_pop_size > 0:
            model_mpi = (self._concurrent_pop_size, self._concurrent_color)
        elif not self.options['run_parallel']:
            comm = None

        xopt, fopt = self.options['algorithm'].execute(x0, lower_bound, upper_bound,
                                                       self.objective_callback,
                                                       comm, model_mpi)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        for name in desvars:
            i, j = self._desvar_idx[name]
            val = xopt[i:j]
            self.set_design_var(name, val)

        with RecordingDebugging(self._get_name(), self.iter_count, self) as rec:
            self._problem().model.run_solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        return False
Ejemplo n.º 4
0
    def run(self):
        """
        Execute the CMA-ES algorithm.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        self._check_for_missing_objective()

        # Size design variables.
        desvars = self._designvars
        desvar_vals = self.get_design_var_values()

        count = 0
        for name, meta in desvars.items():
            size = meta['size']
            self._desvar_idx[name] = (count, count + size)
            count += size

        lower_bound = np.empty((count, ))
        upper_bound = np.empty((count, ))
        x0 = np.empty(count)

        # Figure out bounds vectors and initial design vars
        for name, meta in desvars.items():
            i, j = self._desvar_idx[name]
            lower_bound[i:j] = meta['lower']
            upper_bound[i:j] = meta['upper']
            x0[i:j] = desvar_vals[name]

        self.CMAOptions['bounds'] = [lower_bound, upper_bound]

        desvar_new, obj = self._cmaes.execute(x0, self.options['sigma0'],
                                              self.CMAOptions)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        for name in desvars:
            i, j = self._desvar_idx[name]
            val = desvar_new[i:j]
            self.set_design_var(name, val)

        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            self._problem().model.run_solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        return False
Ejemplo n.º 5
0
    def objective_callback(self, x, icase):
        """
        Evaluate problem objective at the requested point.

        Parameters
        ----------
        x : ndarray
            Value of design variables.
        icase : int
            Case number, used for identification when run in parallel.

        Returns
        -------
        float
            Objective value
        bool
            Success flag, True if successful
        int
            Case number, used for identification when run in parallel.
        """
        model = self._problem.model
        success = 1

        for name in self._designvars:
            i, j = self._desvar_idx[name]
            self.set_design_var(name, x[i:j])

        # Execute the model
        with RecordingDebugging('SimpleGA', self.iter_count, self) as rec:
            self.iter_count += 1
            try:
                model._solve_nonlinear()

            # Tell the optimizer that this is a bad point.
            except AnalysisError:
                model._clear_iprint()
                success = 0

            for name, val in iteritems(self.get_objective_values()):
                obj = val
                break

            # Record after getting obj to assure they have
            # been gathered in MPI.
            rec.abs = 0.0
            rec.rel = 0.0

        # print("Functions calculated")
        # print(x)
        # print(obj)
        return obj, success, icase
Ejemplo n.º 6
0
    def _objfunc(self, x_new):
        """
        Evaluate and return the objective function.

        Model is executed here.

        Parameters
        ----------
        x_new : ndarray
            Array containing parameter values at new design point.

        Returns
        -------
        float
            Value of the objective function evaluated at the new design point.
        """
        model = self._problem().model

        try:

            # Pass in new parameters
            i = 0
            if MPI:
                model.comm.Bcast(x_new, root=0)
            for name, meta in self._designvars.items():
                size = meta['size']
                self.set_design_var(name, x_new[i:i + size])
                i += size

            with RecordingDebugging(self._get_name(), self.iter_count,
                                    self) as rec:
                self.iter_count += 1
                model.run_solve_nonlinear()

            # Get the objective function evaluations
            for obj in self.get_objective_values().values():
                f_new = obj
                break

            self._con_cache = self.get_constraint_values()

        except Exception as msg:
            self._exc_info = msg
            return 0

        # print("Functions calculated")
        # print('   xnew', x_new)
        # print('   fnew', f_new)

        return f_new
Ejemplo n.º 7
0
    def _objfunc(self, x_new):
        """
        Evaluate and return the objective function.

        Model is executed here.

        Parameters
        ----------
        x_new : ndarray
            Array containing parameter values at new design point.

        Returns
        -------
        float
            Value of the objective function evaluated at the new design point.
        """
        model = self._problem.model

        try:

            # Pass in new parameters
            i = 0
            for name, meta in iteritems(self._designvars):
                size = meta['size']
                self.set_design_var(name, x_new[i:i + size])
                i += size

            with RecordingDebugging(self.options['optimizer'], self.iter_count,
                                    self) as rec:
                self.iter_count += 1
                model._solve_nonlinear()

            # Get the objective function evaluations
            for name, obj in iteritems(self.get_objective_values()):
                f_new = obj
                break

            self._con_cache = self.get_constraint_values()

        except Exception as msg:
            self._exc_info = sys.exc_info()
            return 0

        # print("Functions calculated")
        # print(x_new)
        # print(f_new)

        return f_new
Ejemplo n.º 8
0
    def evaluate_individual(self, individual):
        self.evaluation_metadata.update({"generation": individual.generation})

        for (name, value) in self.toolbox.ind_to_dvs(individual).items():
            self.set_design_var(name, value)

        with RecordingDebugging(self._get_name(), self.iter_count, self):
            self._problem().model.run_solve_nonlinear()

        self.iter_count += 1

        return ObjectiveValueWithConstraintViolation(
            objectives=tuple(
                chain.from_iterable(
                    x.flat for x in self.get_objective_values().values())),
            constraint_violation=constraint_violation(
                values=self.get_constraint_values(),
                meta=self._problem().model.get_constraints(),
            ),
        )
Ejemplo n.º 9
0
    def _run_case(self, case):
        """
        Run case, save exception info and mark the metadata if the case fails.

        Parameters
        ----------
        case : list
            list of name, value tuples for the design variables.
        """
        metadata = {}

        for dv_name, dv_val in case:
            try:
                msg = None
                if isinstance(dv_val, np.ndarray):
                    self.set_design_var(dv_name, dv_val.flatten())
                else:
                    self.set_design_var(dv_name, dv_val)
            except ValueError as err:
                msg = "Error assigning %s = %s: " % (dv_name,
                                                     dv_val) + str(err)
            finally:
                if msg:
                    raise (ValueError(msg))

        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            try:
                self._problem().model.run_solve_nonlinear()
                metadata['success'] = 1
                metadata['msg'] = ''
            except AnalysisError:
                metadata['success'] = 0
                metadata['msg'] = traceback.format_exc()
            except Exception:
                metadata['success'] = 0
                metadata['msg'] = traceback.format_exc()
                print(metadata['msg'])

            # save reference to metadata for use in record_iteration
            self._metadata = metadata
Ejemplo n.º 10
0
    def run(self):
        """
        Optimize the problem using selected Scipy optimizer.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem
        opt = self.options['optimizer']
        model = problem.model
        self.iter_count = 0
        self._total_jac = None

        self._check_for_missing_objective()

        # Initial Run
        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            model.run_solve_nonlinear()
            self.iter_count += 1

        self._con_cache = self.get_constraint_values()
        desvar_vals = self.get_design_var_values()
        self._dvlist = list(self._designvars)

        # maxiter and disp get passsed into scipy with all the other options.
        self.opt_settings['maxiter'] = self.options['maxiter']
        self.opt_settings['disp'] = self.options['disp']

        # Size Problem
        nparam = 0
        for param in itervalues(self._designvars):
            nparam += param['size']
        x_init = np.empty(nparam)

        # Initial Design Vars
        i = 0
        use_bounds = (opt in _bounds_optimizers)
        if use_bounds:
            bounds = []
        else:
            bounds = None

        for name, meta in iteritems(self._designvars):
            size = meta['size']
            x_init[i:i + size] = desvar_vals[name]
            i += size

            # Bounds if our optimizer supports them
            if use_bounds:
                meta_low = meta['lower']
                meta_high = meta['upper']
                for j in range(size):

                    if isinstance(meta_low, np.ndarray):
                        p_low = meta_low[j]
                    else:
                        p_low = meta_low

                    if isinstance(meta_high, np.ndarray):
                        p_high = meta_high[j]
                    else:
                        p_high = meta_high

                    bounds.append((p_low, p_high))

        if use_bounds and (opt in _supports_new_style) and _use_new_style:
            # For 'trust-constr' it is better to use the new type bounds, because it seems to work
            # better (for the current examples in the tests) with the "keep_feasible" option
            try:
                from scipy.optimize import Bounds
                from scipy.optimize._constraints import old_bound_to_new
            except ImportError:
                msg = (
                    'The "trust-constr" optimizer is supported for SciPy 1.1.0 and above. '
                    'The installed version is {}')
                raise ImportError(msg.format(scipy_version))

            # Convert "old-style" bounds to "new_style" bounds
            lower, upper = old_bound_to_new(bounds)  # tuple, tuple
            keep_feasible = self.opt_settings.get('keep_feasible_bounds', True)
            bounds = Bounds(lb=lower, ub=upper, keep_feasible=keep_feasible)

        # Constraints
        constraints = []
        i = 1  # start at 1 since row 0 is the objective.  Constraints start at row 1.
        lin_i = 0  # counter for linear constraint jacobian
        lincons = []  # list of linear constraints
        self._obj_and_nlcons = list(self._objs)

        if opt in _constraint_optimizers:
            for name, meta in iteritems(self._cons):
                size = meta['size']
                upper = meta['upper']
                lower = meta['lower']
                equals = meta['equals']
                if 'linear' in meta and meta['linear']:
                    lincons.append(name)
                    self._con_idx[name] = lin_i
                    lin_i += size
                else:
                    self._obj_and_nlcons.append(name)
                    self._con_idx[name] = i
                    i += size

                # In scipy constraint optimizers take constraints in two separate formats

                # Type of constraints is list of NonlinearConstraint
                if opt in _supports_new_style and _use_new_style:
                    try:
                        from scipy.optimize import NonlinearConstraint
                    except ImportError:
                        msg = (
                            'The "trust-constr" optimizer is supported for SciPy 1.1.0 and'
                            'above. The installed version is {}')
                        raise ImportError(msg.format(scipy_version))

                    if equals is not None:
                        lb = ub = equals
                    else:
                        lb = lower
                        ub = upper
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        # Double-sided constraints are accepted by the algorithm
                        args = [name, False, j]
                        # TODO linear constraint if meta['linear']
                        # TODO add option for Hessian
                        con = NonlinearConstraint(
                            fun=signature_extender(self._con_val_func, args),
                            lb=lb,
                            ub=ub,
                            jac=signature_extender(self._congradfunc, args))
                        constraints.append(con)
                else:  # Type of constraints is list of dict
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        con_dict = {}
                        if meta['equals'] is not None:
                            con_dict['type'] = 'eq'
                        else:
                            con_dict['type'] = 'ineq'
                        con_dict['fun'] = self._confunc
                        if opt in _constraint_grad_optimizers:
                            con_dict['jac'] = self._congradfunc
                        con_dict['args'] = [name, False, j]
                        constraints.append(con_dict)

                        if isinstance(upper, np.ndarray):
                            upper = upper[j]

                        if isinstance(lower, np.ndarray):
                            lower = lower[j]

                        dblcon = (upper < openmdao.INF_BOUND) and (
                            lower > -openmdao.INF_BOUND)

                        # Add extra constraint if double-sided
                        if dblcon:
                            dcon_dict = {}
                            dcon_dict['type'] = 'ineq'
                            dcon_dict['fun'] = self._confunc
                            if opt in _constraint_grad_optimizers:
                                dcon_dict['jac'] = self._congradfunc
                            dcon_dict['args'] = [name, True, j]
                            constraints.append(dcon_dict)

            # precalculate gradients of linear constraints
            if lincons:
                self._lincongrad_cache = self._compute_totals(
                    of=lincons, wrt=self._dvlist, return_format='array')
            else:
                self._lincongrad_cache = None

        # Provide gradients for optimizers that support it
        if opt in _gradient_optimizers:
            jac = self._gradfunc
        else:
            jac = None

        # Hessian calculation method for optimizers, which require it
        if opt in _hessian_optimizers:
            if 'hess' in self.opt_settings:
                hess = self.opt_settings.pop('hess')
            else:
                # Defaults to BFGS, if not in opt_settings
                from scipy.optimize import BFGS
                hess = BFGS()
        else:
            hess = None

        # compute dynamic simul deriv coloring if option is set
        if coloring_mod._use_total_sparsity:
            if self._coloring_info['coloring'] is coloring_mod._DYN_COLORING:
                coloring_mod.dynamic_total_coloring(
                    self,
                    run_model=False,
                    fname=self._get_total_coloring_fname())
            elif self.options['dynamic_simul_derivs']:
                warn_deprecation(
                    "The 'dynamic_simul_derivs' option has been deprecated. Call "
                    "the 'declare_coloring' function instead.")
                coloring_mod.dynamic_total_coloring(
                    self,
                    run_model=False,
                    fname=self._get_total_coloring_fname())

        # optimize
        try:
            if opt in _optimizers:
                result = minimize(
                    self._objfunc,
                    x_init,
                    # args=(),
                    method=opt,
                    jac=jac,
                    hess=hess,
                    # hessp=None,
                    bounds=bounds,
                    constraints=constraints,
                    tol=self.options['tol'],
                    # callback=None,
                    options=self.opt_settings)
            elif opt == 'basinhopping':
                from scipy.optimize import basinhopping

                def fun(x):
                    return self._objfunc(x), jac(x)

                if 'minimizer_kwargs' not in self.opt_settings:
                    self.opt_settings['minimizer_kwargs'] = {
                        "method": "L-BFGS-B",
                        "jac": True
                    }
                self.opt_settings.pop(
                    'maxiter')  # It does not have this argument

                def accept_test(f_new, x_new, f_old, x_old):
                    # Used to implement bounds besides the original functionality
                    if bounds is not None:
                        bound_check = all([
                            b[0] <= xi <= b[1] for xi, b in zip(x_new, bounds)
                        ])
                        user_test = self.opt_settings.pop('accept_test',
                                                          None)  # callable
                        # has to satisfy both the bounds and the acceptance test defined by the
                        # user
                        if user_test is not None:
                            test_res = user_test(f_new, x_new, f_old, x_old)
                            if test_res == 'force accept':
                                return test_res
                            else:  # result is boolean
                                return bound_check and test_res
                        else:  # no user acceptance test, check only the bounds
                            return bound_check
                    else:
                        return True

                result = basinhopping(fun,
                                      x_init,
                                      accept_test=accept_test,
                                      **self.opt_settings)
            elif opt == 'dual_annealing':
                from scipy.optimize import dual_annealing
                self.opt_settings.pop('disp')  # It does not have this argument
                # There is no "options" param, so "opt_settings" can be used to set the (many)
                # keyword arguments
                result = dual_annealing(self._objfunc,
                                        bounds=bounds,
                                        **self.opt_settings)
            elif opt == 'differential_evolution':
                from scipy.optimize import differential_evolution
                # There is no "options" param, so "opt_settings" can be used to set the (many)
                # keyword arguments
                result = differential_evolution(self._objfunc,
                                                bounds=bounds,
                                                **self.opt_settings)
            elif opt == 'shgo':
                from scipy.optimize import shgo
                kwargs = dict()
                for param in ('minimizer_kwargs', 'sampling_method ', 'n',
                              'iters'):
                    if param in self.opt_settings:
                        kwargs[param] = self.opt_settings[param]
                # Set the Jacobian and the Hessian to the value calculated in OpenMDAO
                if 'minimizer_kwargs' not in kwargs or kwargs[
                        'minimizer_kwargs'] is None:
                    kwargs['minimizer_kwargs'] = {}
                kwargs['minimizer_kwargs'].setdefault('jac', jac)
                kwargs['minimizer_kwargs'].setdefault('hess', hess)
                # Objective function tolerance
                self.opt_settings['f_tol'] = self.options['tol']
                result = shgo(self._objfunc,
                              bounds=bounds,
                              constraints=constraints,
                              options=self.opt_settings,
                              **kwargs)
            else:
                msg = 'Optimizer "{}" is not implemented yet. Choose from: {}'
                raise NotImplementedError(msg.format(opt, _all_optimizers))

        # If an exception was swallowed in one of our callbacks, we want to raise it
        # rather than the cryptic message from scipy.
        except Exception as msg:
            if self._exc_info is not None:
                self._reraise()
            else:
                raise

        if self._exc_info is not None:
            self._reraise()

        self.result = result

        if hasattr(result, 'success'):
            self.fail = False if result.success else True
            if self.fail:
                print('Optimization FAILED.')
                print(result.message)
                print('-' * 35)

            elif self.options['disp']:
                print('Optimization Complete')
                print('-' * 35)
        else:
            self.fail = True  # It is not known, so the worst option is assumed
            print('Optimization Complete (success not known)')
            print(result.message)
            print('-' * 35)

        return self.fail
Ejemplo n.º 11
0
    def run(self):
        """
        Execute the genetic algorithm.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        model = self._problem().model

        pop_size = self.options['pop_size']
        max_gen = self.options['max_gen']

        self._check_for_missing_objective()

        # Size design variables.
        desvars = self._designvars
        desvar_vals = self.get_design_var_values()

        count = 0
        for name, meta in desvars.items():
            size = meta['size']
            self._desvar_idx[name] = (count, count + size)
            count += size

        lower_bound = np.empty((count, ))
        upper_bound = np.empty((count, ))
        x0 = np.empty(count)

        # Figure out bounds vectors and initial design vars
        for name, meta in desvars.items():
            i, j = self._desvar_idx[name]
            lower_bound[i:j] = meta['lower']
            upper_bound[i:j] = meta['upper']
            x0[i:j] = desvar_vals[name]

        abs2prom = model._var_abs2prom['output']

        # Automatic population size.
        if pop_size == 0:
            pop_size = 20 * count

        self.CMAOptions['bounds'] = [lower_bound, upper_bound]

        if self._randomstate:
            self.CMAOptions['seed'] = self._randomstate

        res = cma.fmin(self.objective_callback, x0, self.options['sigma0'],
                       options=self.CMAOptions)

        desvar_new = res[0]

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        for name in desvars:
            i, j = self._desvar_idx[name]
            val = desvar_new[i:j]
            self.set_design_var(name, val)

        with RecordingDebugging(self._get_name(), self.iter_count, self) as rec:
            model.run_solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        return False
Ejemplo n.º 12
0
    def run(self):
        """
        Execute the differential evolution algorithm.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        model = self._problem().model
        de = self._de

        de.strategy = EvolutionStrategy(self.options["strategy"])
        de.f = self.options["Pm"]
        de.cr = self.options["Pc"]
        de.adaptivity = self.options["adaptivity"]
        de.n_pop = self.options["pop_size"]
        de.max_gen = self.options["max_gen"]
        de.tolx = self.options["tolx"]
        de.tolf = self.options["tolf"]
        de.tolc = self.options["tolc"]

        self._check_for_missing_objective()

        # Size design variables.
        desvars = self._designvars
        desvar_vals = self.get_design_var_values()

        count = 0
        for name, meta in desvars.items():
            if name in self._designvars_discrete:
                val = desvar_vals[name]
                if np.isscalar(val):
                    size = 1
                else:
                    size = len(val)
            else:
                size = meta["size"]
            self._desvar_idx[name] = (count, count + size)
            count += size

        bounds = []
        x0 = np.empty(count)

        # Figure out bounds vectors and initial design vars
        for name, meta in desvars.items():
            i, j = self._desvar_idx[name]
            lb = meta["lower"]
            if isinstance(lb, float):
                lb = [lb] * (j - i)
            ub = meta["upper"]
            if isinstance(ub, float):
                ub = [ub] * (j - i)
            for k in range(j - i):
                bounds += [(lb[k], ub[k])]
            x0[i:j] = desvar_vals[name]

        de.init(self.objective_callback, bounds,
                self.options["initial_population"])
        if rank == 0 and self.options["show_progress"]:
            print(progress_string(de))
        if self.options["generation_callback"] is not None:
            self.options["generation_callback"](de)

        gen_iter = de
        if rank == 0 and self.options["show_progress"] and tqdm is not None:
            gen_iter = tqdm(gen_iter, total=self.options["max_gen"])

        for generation in gen_iter:
            if rank == 0 and self.options["show_progress"]:
                print(progress_string(generation))
                if self.options["generation_callback"] is not None:
                    self.options["generation_callback"](de)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        best = de.pop[0]
        for name in desvars:
            i, j = self._desvar_idx[name]
            val = best[i:j]
            self.set_design_var(name, val)

        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            model.run_solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        return False
Ejemplo n.º 13
0
    def run(self):
        """
        Execute the genetic algorithm.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        model = self._problem.model
        ga = self._ga

        ga.elite = self.options['elitism']
        pop_size = self.options['pop_size']
        max_gen = self.options['max_gen']
        user_bits = self.options['bits']
        Pm = self.options['Pm']  # if None, it will be calculated in execute_ga()
        Pc = self.options['Pc']

        # Size design variables.
        desvars = self._designvars
        count = 0
        for name, meta in iteritems(desvars):
            size = meta['size']
            self._desvar_idx[name] = (count, count + size)
            count += size

        lower_bound = np.empty((count, ))
        upper_bound = np.empty((count, ))
        outer_bound = np.full((count, ), np.inf)
        bits = np.empty((count, ), dtype=np.int)
        x0 = np.empty(count)

        desvar_vals = self.get_design_var_values()

        # Figure out bounds vectors and initial design vars
        for name, meta in iteritems(desvars):
            i, j = self._desvar_idx[name]
            lower_bound[i:j] = meta['lower']
            upper_bound[i:j] = meta['upper']
            x0[i:j] = desvar_vals[name]

        # Bits of resolution
        abs2prom = model._var_abs2prom['output']

        for name, meta in iteritems(desvars):
            i, j = self._desvar_idx[name]
            prom_name = abs2prom[name]

            if name in user_bits:
                val = user_bits[name]

            elif prom_name in user_bits:
                val = user_bits[prom_name]

            else:
                # If the user does not declare a bits for this variable, we assume they want it to
                # be encoded as an integer. Encoding requires a power of 2 in the range, so we need
                # to pad additional values above the upper range, and adjust accordingly. Design
                # points with values above the upper bound will be discarded by the GA.
                log_range = np.log2(upper_bound[i:j] - lower_bound[i:j] + 1)
                if log_range % 2 > 0:
                    val = np.ceil(log_range)
                    outer_bound[i:j] = upper_bound[i:j]
                    upper_bound[i:j] = 2**np.ceil(log_range) - 1 + lower_bound[i:j]
                else:
                    val = log_range

            bits[i:j] = val

        # Automatic population size.
        if pop_size == 0:
            pop_size = 4 * np.sum(bits)

        desvar_new, obj, nfit = ga.execute_ga(x0, lower_bound, upper_bound, outer_bound,
                                              bits, pop_size, max_gen,
                                              self._randomstate, Pm, Pc)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        for name in desvars:
            i, j = self._desvar_idx[name]
            val = desvar_new[i:j]
            self.set_design_var(name, val)

        with RecordingDebugging('SimpleGA', self.iter_count, self) as rec:
            model._solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        return False
Ejemplo n.º 14
0
    def run(self):
        """
        Excute pyOptsparse.

        Note that pyOpt controls the execution, and the individual optimizers
        (e.g., SNOPT) control the iteration.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem()
        model = problem.model
        relevant = model._relevant
        self.pyopt_solution = None
        self._total_jac = None
        self.iter_count = 0
        fwd = problem._mode == 'fwd'
        optimizer = self.options['optimizer']
        self._quantities = []

        self._check_for_missing_objective()

        # Only need initial run if we have linear constraints or if we are using an optimizer that
        # doesn't perform one initially.
        con_meta = self._cons
        model_ran = False
        if optimizer in run_required or np.any(
            [con['linear'] for con in self._cons.values()]):
            with RecordingDebugging(self._get_name(), self.iter_count,
                                    self) as rec:
                # Initial Run
                model.run_solve_nonlinear()
                rec.abs = 0.0
                rec.rel = 0.0
                model_ran = True
            self.iter_count += 1

        # compute dynamic simul deriv coloring or just sparsity if option is set
        if c_mod._use_total_sparsity:
            coloring = None
            if self._coloring_info['coloring'] is None and self._coloring_info[
                    'dynamic']:
                coloring = c_mod.dynamic_total_coloring(
                    self,
                    run_model=not model_ran,
                    fname=self._get_total_coloring_fname())

            if coloring is not None:
                # if the improvement wasn't large enough, don't use coloring
                pct = coloring._solves_info()[-1]
                info = self._coloring_info
                if info['min_improve_pct'] > pct:
                    info['coloring'] = info['static'] = None
                    simple_warning(
                        "%s: Coloring was deactivated.  Improvement of %.1f%% was less "
                        "than min allowed (%.1f%%)." %
                        (self.msginfo, pct, info['min_improve_pct']))

        comm = None if isinstance(problem.comm, FakeComm) else problem.comm
        opt_prob = Optimization(self.options['title'],
                                weak_method_wrapper(self, '_objfunc'),
                                comm=comm)

        # Add all design variables
        param_meta = self._designvars
        self._indep_list = indep_list = list(param_meta)
        param_vals = self.get_design_var_values()

        for name, meta in param_meta.items():
            opt_prob.addVarGroup(name,
                                 meta['size'],
                                 type='c',
                                 value=param_vals[name],
                                 lower=meta['lower'],
                                 upper=meta['upper'])

        opt_prob.finalizeDesignVariables()

        # Add all objectives
        objs = self.get_objective_values()
        for name in objs:
            opt_prob.addObj(name)
            self._quantities.append(name)

        # Calculate and save derivatives for any linear constraints.
        lcons = [key for (key, con) in con_meta.items() if con['linear']]
        if len(lcons) > 0:
            _lin_jacs = self._compute_totals(of=lcons,
                                             wrt=indep_list,
                                             return_format='dict')
            # convert all of our linear constraint jacs to COO format. Otherwise pyoptsparse will
            # do it for us and we'll end up with a fully dense COO matrix and very slow evaluation
            # of linear constraints!
            to_remove = []
            for jacdct in _lin_jacs.values():
                for n, subjac in jacdct.items():
                    if isinstance(subjac, np.ndarray):
                        # we can safely use coo_matrix to automatically convert the ndarray
                        # since our linear constraint jacs are constant, so zeros won't become
                        # nonzero during the optimization.
                        mat = coo_matrix(subjac)
                        if mat.row.size > 0:
                            # convert to 'coo' format here to avoid an emphatic warning
                            # by pyoptsparse.
                            jacdct[n] = {
                                'coo': [mat.row, mat.col, mat.data],
                                'shape': mat.shape
                            }

        # Add all equality constraints
        for name, meta in con_meta.items():
            if meta['equals'] is None:
                continue
            size = meta['size']
            lower = upper = meta['equals']
            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     linear=True,
                                     wrt=wrt,
                                     jac=jac)
            else:
                if name in self._res_jacs:
                    resjac = self._res_jacs[name]
                    jac = {n: resjac[n] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     wrt=wrt,
                                     jac=jac)
                self._quantities.append(name)

        # Add all inequality constraints
        for name, meta in con_meta.items():
            if meta['equals'] is not None:
                continue
            size = meta['size']

            # Bounds - double sided is supported
            lower = meta['lower']
            upper = meta['upper']

            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     linear=True,
                                     wrt=wrt,
                                     jac=jac)
            else:
                if name in self._res_jacs:
                    resjac = self._res_jacs[name]
                    jac = {n: resjac[n] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     wrt=wrt,
                                     jac=jac)
                self._quantities.append(name)

        # Instantiate the requested optimizer
        try:
            _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer],
                              0)
            opt = getattr(_tmp, optimizer)()

        except Exception as err:
            # Change whatever pyopt gives us to an ImportError, give it a readable message,
            # but raise with the original traceback.
            msg = "Optimizer %s is not available in this installation." % optimizer
            raise ImportError(msg)

        # Process any default optimizer-specific settings.
        if optimizer in DEFAULT_OPT_SETTINGS:
            for name, value in DEFAULT_OPT_SETTINGS[optimizer].items():
                if name not in self.opt_settings:
                    self.opt_settings[name] = value

        # Set optimization options
        for option, value in self.opt_settings.items():
            opt.setOption(option, value)

        # Execute the optimization problem
        if self.options['gradient method'] == 'pyopt_fd':

            # Use pyOpt's internal finite difference
            # TODO: Need to get this from OpenMDAO
            # fd_step = problem.model.deriv_options['step_size']
            fd_step = 1e-6
            sol = opt(opt_prob,
                      sens='FD',
                      sensStep=fd_step,
                      storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        elif self.options['gradient method'] == 'snopt_fd':
            if self.options['optimizer'] == 'SNOPT':

                # Use SNOPT's internal finite difference
                # TODO: Need to get this from OpenMDAO
                # fd_step = problem.model.deriv_options['step_size']
                fd_step = 1e-6
                sol = opt(opt_prob,
                          sens=None,
                          sensStep=fd_step,
                          storeHistory=self.hist_file,
                          hotStart=self.hotstart_file)

            else:
                raise Exception(
                    "SNOPT's internal finite difference can only be used with SNOPT"
                )
        else:

            # Use OpenMDAO's differentiator for the gradient
            sol = opt(opt_prob,
                      sens=weak_method_wrapper(self, '_gradfunc'),
                      storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        # Print results
        if self.options['print_results']:
            print(sol)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        for name in indep_list:
            self.set_design_var(name, dv_dict[name])

        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            model.run_solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        # Save the most recent solution.
        self.pyopt_solution = sol

        try:
            exit_status = sol.optInform['value']
            self.fail = False

            # These are various failed statuses.
            if optimizer == 'IPOPT':
                if exit_status not in {0, 1}:
                    self.fail = True
            elif exit_status > 2:
                self.fail = True

        except KeyError:
            # optimizers other than pySNOPT may not populate this dict
            pass

        # revert signal handler to cached version
        sigusr = self.options['user_teriminate_signal']
        if sigusr is not None:
            signal.signal(sigusr, self._signal_cache)
            self._signal_cache = None  # to prevent memory leak test from failing

        return self.fail
Ejemplo n.º 15
0
    def run(self):
        """
        Execute the genetic algorithm.

        Returns
        -------
        bool
            Failure flag; True if failed to converge, False is successful.
        """
        model = self._problem().model
        ga = self._ga

        ga.elite = self.options['elitism']
        ga.gray_code = self.options['gray']
        ga.cross_bits = self.options['cross_bits']
        pop_size = self.options['pop_size']
        max_gen = self.options['max_gen']
        user_bits = self.options['bits']
        compute_pareto = self.options['compute_pareto']

        Pm = self.options[
            'Pm']  # if None, it will be calculated in execute_ga()
        Pc = self.options['Pc']

        self._check_for_missing_objective()

        if compute_pareto:
            self._ga.nobj = len(self._objs)

        # Size design variables.
        desvars = self._designvars
        desvar_vals = self.get_design_var_values()

        count = 0
        for name, meta in desvars.items():
            if name in self._designvars_discrete:
                val = desvar_vals[name]
                if np.ndim(val) == 0:
                    size = 1
                else:
                    size = len(val)
            else:
                size = meta['size']
            self._desvar_idx[name] = (count, count + size)
            count += size

        lower_bound = np.empty((count, ))
        upper_bound = np.empty((count, ))
        outer_bound = np.full((count, ), np.inf)
        bits = np.empty((count, ), dtype=np.int_)
        x0 = np.empty(count)

        # Figure out bounds vectors and initial design vars
        for name, meta in desvars.items():
            i, j = self._desvar_idx[name]
            lower_bound[i:j] = meta['lower']
            upper_bound[i:j] = meta['upper']
            x0[i:j] = desvar_vals[name]

        # Bits of resolution
        abs2prom = model._var_abs2prom['output']

        for name, meta in desvars.items():
            i, j = self._desvar_idx[name]

            if name in abs2prom:
                prom_name = abs2prom[name]
            else:
                prom_name = name

            if name in user_bits:
                val = user_bits[name]

            elif prom_name in user_bits:
                val = user_bits[prom_name]

            else:
                # If the user does not declare a bits for this variable, we assume they want it to
                # be encoded as an integer. Encoding requires a power of 2 in the range, so we need
                # to pad additional values above the upper range, and adjust accordingly. Design
                # points with values above the upper bound will be discarded by the GA.
                log_range = np.log2(upper_bound[i:j] - lower_bound[i:j] + 1)
                val = log_range  # default case -- no padding required
                mask = log_range % 2 > 0  # mask for vars requiring padding
                val[mask] = np.ceil(log_range[mask])
                outer_bound[i:j][mask] = upper_bound[i:j][mask]
                upper_bound[i:j][mask] = 2**np.ceil(
                    log_range[mask]) - 1 + lower_bound[i:j][mask]

            bits[i:j] = val

        # Automatic population size.
        if pop_size == 0:
            pop_size = 4 * np.sum(bits)

        desvar_new, obj, nfit = ga.execute_ga(x0, lower_bound, upper_bound,
                                              outer_bound, bits, pop_size,
                                              max_gen, self._randomstate, Pm,
                                              Pc)

        if compute_pareto:
            # Just save the non-dominated points.
            self.desvar_nd = desvar_new
            self.obj_nd = obj

        else:
            # Pull optimal parameters back into framework and re-run, so that
            # framework is left in the right final state
            for name in desvars:
                i, j = self._desvar_idx[name]
                val = desvar_new[i:j]
                self.set_design_var(name, val)

            with RecordingDebugging(self._get_name(), self.iter_count,
                                    self) as rec:
                model.run_solve_nonlinear()
                rec.abs = 0.0
                rec.rel = 0.0
            self.iter_count += 1

        return False
Ejemplo n.º 16
0
    def run(self):
        """
        Execute the genetic algorithm.

        Returns
        -------
        bool
            Failure flag; True if failed to converge, False is successful.
        """
        model = self._problem().model
        ga = self._ga

        pop_size = self.options['pop_size']
        max_gen = self.options['max_gen']
        F = self.options['F']
        Pc = self.options['Pc']

        self._check_for_missing_objective()

        # Size design variables.
        desvars = self._designvars
        desvar_vals = self.get_design_var_values()

        count = 0
        for name, meta in desvars.items():
            size = meta['size']
            self._desvar_idx[name] = (count, count + size)
            count += size

        lower_bound = np.empty((count, ))
        upper_bound = np.empty((count, ))
        x0 = np.empty(count)

        # Figure out bounds vectors and initial design vars
        for name, meta in desvars.items():
            i, j = self._desvar_idx[name]
            lower_bound[i:j] = meta['lower']
            upper_bound[i:j] = meta['upper']
            x0[i:j] = desvar_vals[name]

        # Automatic population size.
        if pop_size == 0:
            pop_size = 20 * count

        desvar_new, obj, nfit = ga.execute_ga(x0, lower_bound, upper_bound,
                                              pop_size, max_gen,
                                              self._randomstate, F, Pc)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        for name in desvars:
            i, j = self._desvar_idx[name]
            val = desvar_new[i:j]
            self.set_design_var(name, val)

        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            model.run_solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        return False
Ejemplo n.º 17
0
    def run(self):
        """
        Optimize the problem using selected Scipy optimizer.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem
        opt = self.options['optimizer']
        model = problem.model
        self.iter_count = 0
        self._total_jac = None

        # Initial Run
        with RecordingDebugging(self.options['optimizer'], self.iter_count,
                                self) as rec:
            model._solve_nonlinear()
            self.iter_count += 1

        self._con_cache = self.get_constraint_values()
        desvar_vals = self.get_design_var_values()
        self._dvlist = list(self._designvars)

        # maxiter and disp get passsed into scipy with all the other options.
        self.opt_settings['maxiter'] = self.options['maxiter']
        self.opt_settings['disp'] = self.options['disp']

        # Size Problem
        nparam = 0
        for param in itervalues(self._designvars):
            nparam += param['size']
        x_init = np.empty(nparam)

        # Initial Design Vars
        i = 0
        use_bounds = (opt in _bounds_optimizers)
        if use_bounds:
            bounds = []
        else:
            bounds = None

        for name, meta in iteritems(self._designvars):
            size = meta['size']
            x_init[i:i + size] = desvar_vals[name]
            i += size

            # Bounds if our optimizer supports them
            if use_bounds:
                meta_low = meta['lower']
                meta_high = meta['upper']
                for j in range(size):

                    if isinstance(meta_low, np.ndarray):
                        p_low = meta_low[j]
                    else:
                        p_low = meta_low

                    if isinstance(meta_high, np.ndarray):
                        p_high = meta_high[j]
                    else:
                        p_high = meta_high

                    bounds.append((p_low, p_high))

        # Constraints
        constraints = []
        i = 1  # start at 1 since row 0 is the objective.  Constraints start at row 1.
        lin_i = 0  # counter for linear constraint jacobian
        lincons = []  # list of linear constraints
        self._obj_and_nlcons = list(self._objs)

        if opt in _constraint_optimizers:
            for name, meta in iteritems(self._cons):
                size = meta['size']
                upper = meta['upper']
                lower = meta['lower']
                equals = meta['equals']
                if 'linear' in meta and meta['linear']:
                    lincons.append(name)
                    self._con_idx[name] = lin_i
                    lin_i += size
                else:
                    self._obj_and_nlcons.append(name)
                    self._con_idx[name] = i
                    i += size

                # In scipy constraint optimizers take constraints in two separate formats
                if opt in [
                        'trust-constr'
                ]:  # Type of constraints is list of NonlinearConstraint
                    try:
                        from scipy.optimize import NonlinearConstraint
                    except ImportError:
                        msg = (
                            'The "trust-constr" optimizer is supported for SciPy 1.1.0 and'
                            'above. The installed version is {}')
                        raise ImportError(msg.format(scipy_version))

                    if equals is not None:
                        lb = ub = equals
                    else:
                        lb = lower
                        ub = upper
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        # Double-sided constraints are accepted by the algorithm
                        args = [name, False, j]
                        # TODO linear constraint if meta['linear']
                        # TODO add option for Hessian
                        con = NonlinearConstraint(
                            fun=signature_extender(self._con_val_func, args),
                            lb=lb,
                            ub=ub,
                            jac=signature_extender(self._congradfunc, args))
                        constraints.append(con)
                else:  # Type of constraints is list of dict
                    # Loop over every index separately,
                    # because scipy calls each constraint by index.
                    for j in range(size):
                        con_dict = {}
                        if meta['equals'] is not None:
                            con_dict['type'] = 'eq'
                        else:
                            con_dict['type'] = 'ineq'
                        con_dict['fun'] = self._confunc
                        if opt in _constraint_grad_optimizers:
                            con_dict['jac'] = self._congradfunc
                        con_dict['args'] = [name, False, j]
                        constraints.append(con_dict)

                        if isinstance(upper, np.ndarray):
                            upper = upper[j]

                        if isinstance(lower, np.ndarray):
                            lower = lower[j]

                        dblcon = (upper < openmdao.INF_BOUND) and (
                            lower > -openmdao.INF_BOUND)

                        # Add extra constraint if double-sided
                        if dblcon:
                            dcon_dict = {}
                            dcon_dict['type'] = 'ineq'
                            dcon_dict['fun'] = self._confunc
                            if opt in _constraint_grad_optimizers:
                                dcon_dict['jac'] = self._congradfunc
                            dcon_dict['args'] = [name, True, j]
                            constraints.append(dcon_dict)

            # precalculate gradients of linear constraints
            if lincons:
                self._lincongrad_cache = self._compute_totals(
                    of=lincons, wrt=self._dvlist, return_format='array')
            else:
                self._lincongrad_cache = None

        # Provide gradients for optimizers that support it
        if opt in _gradient_optimizers:
            jac = self._gradfunc
        else:
            jac = None

        # Hessian calculation method for optimizers, which require it
        if opt in _hessian_optimizers:
            if 'hess' in self.opt_settings:
                hess = self.opt_settings.pop('hess')
            else:
                # Defaults to BFGS, if not in opt_settings
                from scipy.optimize import BFGS
                hess = BFGS()
        else:
            hess = None

        # compute dynamic simul deriv coloring if option is set
        if coloring_mod._use_sparsity and self.options['dynamic_simul_derivs']:
            coloring_mod.dynamic_simul_coloring(self,
                                                run_model=False,
                                                do_sparsity=False)

        # optimize
        try:
            result = minimize(
                self._objfunc,
                x_init,
                # args=(),
                method=opt,
                jac=jac,
                hess=hess,
                # hessp=None,
                bounds=bounds,
                constraints=constraints,
                tol=self.options['tol'],
                # callback=None,
                options=self.opt_settings)

        # If an exception was swallowed in one of our callbacks, we want to raise it
        # rather than the cryptic message from scipy.
        except Exception as msg:
            if self._exc_info is not None:
                self._reraise()
            else:
                raise

        if self._exc_info is not None:
            self._reraise()

        self.result = result

        if hasattr(result, 'success'):
            self.fail = False if result.success else True
            if self.fail:
                print('Optimization FAILED.')
                print(result.message)
                print('-' * 35)

            elif self.options['disp']:
                print('Optimization Complete')
                print('-' * 35)
        else:
            self.fail = True  # It is not known, so the worst option is assumed
            print('Optimization Complete (success not known)')
            print(result.message)
            print('-' * 35)

        return self.fail
Ejemplo n.º 18
0
    def objective_callback(self, x):
        r"""
        Evaluate problem objective at the requested point.

        Parameters
        ----------
        x : ndarray
            Value of design variables.

        Returns
        -------
        f : ndarray
            Objective values
        g : ndarray
            Constraint values
        """
        model = self._problem().model

        for name in self._designvars:
            i, j = self._desvar_idx[name]
            self.set_design_var(name, x[i:j])

        # a very large number, but smaller than the result of nan_to_num in Numpy
        almost_inf = openmdao.INF_BOUND

        # Execute the model
        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            self.iter_count += 1
            try:
                model.run_solve_nonlinear()

            # Tell the optimizer that this is a bad point.
            except AnalysisError:
                model._clear_iprint()

            # Get the objective functions' values
            f = np.array(list(self.get_objective_values().values()))

            # Get the constraint violations
            g = np.array([])
            with np.errstate(divide="ignore"
                             ):  # Ignore divide-by-zero warnings temporarily
                for name, val in self.get_constraint_values().items():
                    con = self._cons[name]
                    # The not used fields will either None or a very large number
                    # All constraints will be converted into standard <= 0 form.
                    if (con["lower"] is not None) and np.any(
                            con["lower"] > -almost_inf):
                        g = np.append(
                            g,
                            np.where(
                                con["lower"] == 0,
                                con["lower"] - val,
                                1 - val / con["lower"],
                            ).flatten(),
                        )
                    elif (con["upper"]
                          is not None) and np.any(con["upper"] < almost_inf):
                        g = np.append(
                            g,
                            np.where(
                                con["upper"] == 0,
                                val - con["upper"],
                                val / con["upper"] - 1,
                            ).flatten(),
                        )
                    elif (con["equals"] is not None) and np.any(
                            np.abs(con["equals"]) < almost_inf):
                        g = np.append(
                            g,
                            np.where(
                                con["equals"] == 0,
                                np.abs(con["equals"] - val),
                                np.abs(1 - val / con["equals"]),
                            ).flatten() - self.options["tolc"],
                        )

            # Record after getting obj to assure they have
            # been gathered in MPI.
            rec.abs = 0.0
            rec.rel = 0.0

        return f.flatten(), g.flatten()
    def run(self):
        """
        Optimize the problem using selected NLopt optimizer.
        """
        problem = self._problem()
        opt = self.options["optimizer"]
        model = problem.model
        self.iter_count = 0
        self._total_jac = None

        self._check_for_missing_objective()

        # Initial Run
        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            model.run_solve_nonlinear()
            self.iter_count += 1

        self._con_cache = self.get_constraint_values()
        desvar_vals = self.get_design_var_values()
        self._dvlist = list(self._designvars)

        # Size Problem
        nparam = 0
        for param in self._designvars.values():
            nparam += param["size"]
        x_init = np.empty(nparam)

        # Initialize the NLopt problem with the method and number of design vars
        opt_prob = nlopt.opt(optimizer_methods[opt], int(nparam))

        # Initial Design Vars
        i = 0
        use_bounds = opt in _bounds_optimizers
        if use_bounds:
            bounds = []
        else:
            bounds = None

        # Loop through all OpenMDAO design variables and process their bounds
        for name, meta in self._designvars.items():
            size = meta["size"]
            x_init[i:i + size] = desvar_vals[name]
            i += size

            # Bounds if our optimizer supports them
            if use_bounds:
                meta_low = meta["lower"]
                meta_high = meta["upper"]
                for j in range(size):

                    if isinstance(meta_low, np.ndarray):
                        p_low = meta_low[j]
                    else:
                        p_low = meta_low

                    if isinstance(meta_high, np.ndarray):
                        p_high = meta_high[j]
                    else:
                        p_high = meta_high

                    bounds.append((p_low, p_high))

        # Actually add the bounds to the optimization problem.
        if bounds is not None:
            lower_bound, upper_bound = zip(*bounds)
            lower = np.array(
                [x if x is not None else -np.inf for x in lower_bound])
            upper = np.array(
                [x if x is not None else np.inf for x in upper_bound])
            opt_prob.set_lower_bounds(lower)
            opt_prob.set_upper_bounds(upper)

        # Constraints
        i = 1  # start at 1 since row 0 is the objective.  Constraints start at row 1.
        lin_i = 0  # counter for linear constraint jacobian
        lincons = []  # list of linear constraints
        self._obj_and_nlcons = list(self._objs)

        # Process and add constraints to the optimization problem.
        if opt in _constraint_optimizers:
            for name, meta in self._cons.items():
                size = meta["global_size"] if meta["distributed"] else meta[
                    "size"]
                upper = meta["upper"]
                lower = meta["lower"]
                equals = meta["equals"]

                if opt in _gradient_optimizers and "linear" in meta and meta[
                        "linear"]:
                    lincons.append(name)
                    self._con_idx[name] = lin_i
                    lin_i += size
                else:
                    self._obj_and_nlcons.append(name)
                    self._con_idx[name] = i
                    i += size

                # Loop over every index separately,
                # because it's easier to defined each
                # constraint by index.
                for j in range(size):

                    # Equality constraints are added as two inequality constraints
                    if equals is not None:
                        args = [name, False, j]
                        try:
                            opt_prob.add_equality_constraint(
                                signature_extender(
                                    weak_method_wrapper(self, "_confunc"),
                                    args))
                        except ValueError:
                            msg = (
                                "The selected optimizer, {}, does not support"
                                + " equality constraints. Select from {}.")
                            raise NotImplementedError(
                                msg.format(opt, _eq_constraint_optimizers))

                    else:
                        # Double-sided constraints are accepted by the algorithm
                        args = [name, False, j]
                        opt_prob.add_inequality_constraint(
                            signature_extender(
                                weak_method_wrapper(self, "_confunc"), args))

                        if isinstance(upper, np.ndarray):
                            upper = upper[j]

                        if isinstance(lower, np.ndarray):
                            lower = lower[j]

                        dblcon = (upper < openmdao.INF_BOUND) and (
                            lower > -openmdao.INF_BOUND)

                        # Add extra constraint if double-sided
                        if dblcon:
                            args = [name, True, j]
                            opt_prob.add_inequality_constraint(
                                signature_extender(
                                    weak_method_wrapper(self, "_confunc"),
                                    args))

            # precalculate gradients of linear constraints
            if lincons:
                self._lincongrad_cache = self._compute_totals(
                    of=lincons, wrt=self._dvlist, return_format="array")
            else:
                self._lincongrad_cache = None

        # compute dynamic simul deriv coloring if option is set
        if coloring_mod._use_total_sparsity:
            if (self._coloring_info["coloring"] is None
                    and self._coloring_info["dynamic"]):
                coloring_mod.dynamic_total_coloring(
                    self,
                    run_model=False,
                    fname=self._get_total_coloring_fname())

                # if the improvement wasn't large enough, turn coloring off
                info = self._coloring_info
                if info["coloring"] is not None:
                    pct = info["coloring"]._solves_info()[-1]
                    if info["min_improve_pct"] > pct:
                        info["coloring"] = info["static"] = None
                        simple_warning(
                            "%s: Coloring was deactivated.  Improvement of %.1f%% was "
                            "less than min allowed (%.1f%%)." %
                            (self.msginfo, pct, info["min_improve_pct"]))

        # Finalize the optimization problem setup and actually perform optimization
        try:
            if opt in _optimizers:
                opt_prob.set_min_objective(self._objfunc)
                opt_prob.set_ftol_rel(self.options["tol"])
                opt_prob.set_maxeval(int(self.options["maxiter"]))
                opt_prob.set_maxtime(self.options["maxtime"])
                opt_prob.optimize(x_init)
                self.result = opt_prob.last_optimize_result()

            else:
                msg = 'Optimizer "{}" is not implemented yet. Choose from: {}'
                raise NotImplementedError(msg.format(opt, _optimizers))

        # If an exception was swallowed in one of our callbacks, we want to raise it
        except Exception as msg:
            if self._exc_info is not None:
                self._reraise()
            else:
                raise

        if self._exc_info is not None:
            self._reraise()
Ejemplo n.º 20
0
    def _objfunc(self, dv_dict):
        """
        Compute the objective function and constraints.

        This function is passed to pyOpt's Optimization object and is called
        from its optimizers.

        Parameters
        ----------
        dv_dict : dict
            Dictionary of design variable values.

        Returns
        -------
        func_dict : dict
            Dictionary of all functional variables evaluated at design point.

        fail : int
            0 for successful function evaluation
            1 for unsuccessful function evaluation
        """
        model = self._problem.model
        fail = 0

        try:
            for name in self._indep_list:
                self.set_design_var(name, dv_dict[name])

            # print("Setting DV")
            # print(dv_dict)

            # Execute the model
            with RecordingDebugging(self._get_name(), self.iter_count, self) as rec:
                self.iter_count += 1
                try:
                    model.run_solve_nonlinear()

                # Let the optimizer try to handle the error
                except AnalysisError:
                    model._clear_iprint()
                    fail = 1

                func_dict = self.get_objective_values()
                func_dict.update(self.get_constraint_values(lintype='nonlinear'))

                # Record after getting obj and constraint to assure they have
                # been gathered in MPI.
                rec.abs = 0.0
                rec.rel = 0.0

        except Exception as msg:
            tb = traceback.format_exc()

            # Exceptions seem to be swallowed by the C code, so this
            # should give the user more info than the dreaded "segfault"
            print("Exception: %s" % str(msg))
            print(70 * "=", tb, 70 * "=")
            fail = 1
            func_dict = {}

        # print("Functions calculated")
        # print(dv_dict)

        return func_dict, fail
Ejemplo n.º 21
0
    def run(self):
        """
        Excute the genetic algorithm.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        model = self._problem.model
        ga = self._ga

        # Size design variables.
        desvars = self._designvars
        count = 0
        for name, meta in iteritems(desvars):
            size = meta['size']
            self._desvar_idx[name] = (count, count + size)
            count += size

        lower_bound = np.empty((count, ))
        upper_bound = np.empty((count, ))

        # Figure out bounds vectors.
        for name, meta in iteritems(desvars):
            i, j = self._desvar_idx[name]
            lower_bound[i:j] = meta['lower']
            upper_bound[i:j] = meta['upper']

        ga.elite = self.options['elitism']
        pop_size = self.options['pop_size']
        max_gen = self.options['max_gen']
        user_bits = self.options['bits']

        # Bits of resolution
        bits = np.ceil(np.log2(upper_bound - lower_bound + 1)).astype(int)
        prom2abs = model._var_allprocs_prom2abs_list['output']

        for name, val in iteritems(user_bits):
            try:
                i, j = self._desvar_idx[name]
            except KeyError:
                abs_name = prom2abs[name][0]
                i, j = self._desvar_idx[abs_name]

            bits[i:j] = val

        # Automatic population size.
        if pop_size == 0:
            pop_size = 4 * np.sum(bits)

        desvar_new, obj, nfit = ga.execute_ga(lower_bound, upper_bound, bits,
                                              pop_size, max_gen,
                                              self._randomstate)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        for name in desvars:
            i, j = self._desvar_idx[name]
            val = desvar_new[i:j]
            self.set_design_var(name, val)

        with RecordingDebugging('SimpleGA', self.iter_count, self) as rec:
            model._solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        return False
Ejemplo n.º 22
0
    def objective_callback(self, x, icase):
        r"""
        Evaluate problem objective at the requested point.

        In case of multi-objective optimization, a simple weighted sum method is used:

        .. math::

           f = (\sum_{k=1}^{N_f} w_k \cdot f_k)^a

        where :math:`N_f` is the number of objectives and :math:`a>0` is an exponential
        weight. Choosing :math:`a=1` is equivalent to the conventional weighted sum method.

        The weights given in the options are normalized, so:

        .. math::

            \sum_{k=1}^{N_f} w_k = 1

        If one of the objectives :math:`f_k` is not a scalar, its elements will have the same
        weights, and it will be normed with length of the vector.

        Takes into account constraints with a penalty function.

        All constraints are converted to the form of :math:`g_i(x) \leq 0` for
        inequality constraints and :math:`h_i(x) = 0` for equality constraints.
        The constraint vector for inequality constraints is the following:

        .. math::

           g = [g_1, g_2  \dots g_N], g_i \in R^{N_{g_i}}

           h = [h_1, h_2  \dots h_N], h_i \in R^{N_{h_i}}

        The number of all constraints:

        .. math::

           N_g = \sum_{i=1}^N N_{g_i},  N_h = \sum_{i=1}^N N_{h_i}

        The fitness function is constructed with the penalty parameter :math:`p`
        and the exponent :math:`\kappa`:

        .. math::

           \Phi(x) = f(x) + p \cdot \sum_{k=1}^{N^g}(\delta_k \cdot g_k)^{\kappa}
           + p \cdot \sum_{k=1}^{N^h}|h_k|^{\kappa}

        where :math:`\delta_k = 0` if :math:`g_k` is satisfied, 1 otherwise

        .. note::

            The values of :math:`\kappa` and :math:`p` can be defined as driver options.

        Parameters
        ----------
        x : ndarray
            Value of design variables.
        icase : int
            Case number, used for identification when run in parallel.

        Returns
        -------
        float
            Objective value.
        bool
            Success flag, True if successful.
        int
            Case number, used for identification when run in parallel.
        """
        model = self._problem().model
        success = 1

        objs = self.get_objective_values()
        nr_objectives = len(objs)

        # Single objective, if there is only one objective, which has only one element
        if nr_objectives > 1:
            is_single_objective = False
        else:
            for obj in objs.items():
                is_single_objective = len(obj) == 1
                break

        obj_exponent = self.options['multi_obj_exponent']
        if self.options['multi_obj_weights']:  # not empty
            obj_weights = self.options['multi_obj_weights']
        else:
            # Same weight for all objectives, if not specified
            obj_weights = {name: 1. for name in objs.keys()}
        sum_weights = sum(obj_weights.values())

        for name in self._designvars:
            i, j = self._desvar_idx[name]
            self.set_design_var(name, x[i:j])

        # a very large number, but smaller than the result of nan_to_num in Numpy
        almost_inf = INF_BOUND

        # Execute the model
        with RecordingDebugging(self._get_name(), self.iter_count,
                                self) as rec:
            self.iter_count += 1
            try:
                model.run_solve_nonlinear()

            # Tell the optimizer that this is a bad point.
            except AnalysisError:
                model._clear_iprint()
                success = 0

            obj_values = self.get_objective_values()
            if is_single_objective:  # Single objective optimization
                for i in obj_values.values():
                    obj = i  # First and only key in the dict

            elif self.options['compute_pareto']:
                obj = np.array([val for val in obj_values.values()]).flatten()

            else:  # Multi-objective optimization with weighted sums
                weighted_objectives = np.array([])
                for name, val in obj_values.items():
                    # element-wise multiplication with scalar
                    # takes the average, if an objective is a vector
                    try:
                        weighted_obj = val * obj_weights[name] / val.size
                    except KeyError:
                        msg = ('Name "{}" in "multi_obj_weights" option '
                               'is not an absolute name of an objective.')
                        raise KeyError(msg.format(name))
                    weighted_objectives = np.hstack(
                        (weighted_objectives, weighted_obj))

                obj = sum(weighted_objectives / sum_weights)**obj_exponent

            # Parameters of the penalty method
            penalty = self.options['penalty_parameter']
            exponent = self.options['penalty_exponent']

            if penalty == 0:
                fun = obj
            else:
                constraint_violations = np.array([])
                for name, val in self.get_constraint_values().items():
                    con = self._cons[name]
                    # The not used fields will either None or a very large number
                    if (con['lower'] is not None) and np.any(
                            con['lower'] > -almost_inf):
                        diff = val - con['lower']
                        violation = np.array(
                            [0. if d >= 0 else abs(d) for d in diff])
                    elif (con['upper']
                          is not None) and np.any(con['upper'] < almost_inf):
                        diff = val - con['upper']
                        violation = np.array(
                            [0. if d <= 0 else abs(d) for d in diff])
                    elif (con['equals'] is not None) and np.any(
                            np.abs(con['equals']) < almost_inf):
                        diff = val - con['equals']
                        violation = np.absolute(diff)
                    constraint_violations = np.hstack(
                        (constraint_violations, violation))
                fun = obj + penalty * sum(
                    np.power(constraint_violations, exponent))
            # Record after getting obj to assure they have
            # been gathered in MPI.
            rec.abs = 0.0
            rec.rel = 0.0

        # print("Functions calculated")
        # print(x)
        # print(obj)
        return fun, success, icase
    def _objfunc(self, point):
        """
        Function that evaluates and returns the objective function and the
        constraints. This function is called by SEGOMOE

        Parameters
        ----------
        point : numpy.ndarray
            point to evaluate

        Returns
        -------
        func_dict : dict
            Dictionary of all functional variables evaluated at design point.
        fail : int
            0 for successful function evaluation
            1 for unsuccessful function evaluation
        """
        fail = False
        res = np.zeros(1 + self._n_mapped_con)
        if version.parse(OPENMDAO_VERSION) > version.parse("2.9.1"):
            model = self._problem().model
        else:
            model = self._problem.model

        try:
            # Pass in new parameters
            i = 0

            for name, meta in self._designvars.items():
                size = meta["size"]
                self.set_design_var(name, point[i : i + size])
                i += size

            # Execute the model
            with RecordingDebugging(
                self.options["optimizer"], self.iter_count, self
            ) as _:
                self.iter_count += 1
                try:
                    model._solve_nonlinear()

                # Let the optimizer try to handle the error
                except AnalysisError:
                    model._clear_iprint()
                    fail = True

            # Get the objective function evaluation - single obj support
            for name, obj in self.get_objective_values().items():
                res[0] = obj

            # Get the constraint evaluations:
            for name, con_res in self.get_constraint_values().items():
                # Make sure con_res is array_like
                con_res = to_list(con_res, len(self._map_con[name]))
                # Perform mapping
                for i, con_index in enumerate(self._map_con[name]):
                    if isinstance(con_index, list):
                        # Double sided inequality constraint -> duplicate response
                        for k in con_index:
                            res[k] = con_res[i]
                    else:
                        res[con_index] = con_res[i]

        except Exception as msg:
            tb = traceback.format_exc()

            # Exceptions seem to be swallowed by the C code, so this
            # should give the user more info than the dreaded "segfault"
            print("Exception: %s" % str(msg))
            print(70 * "=", tb, 70 * "=")
            fail = True

        return res, fail
Ejemplo n.º 24
0
    def _objfunc(self, dv_dict):
        """
        Compute the objective function and constraints.

        This function is passed to pyOpt's Optimization object and is called
        from its optimizers.

        Parameters
        ----------
        dv_dict : dict
            Dictionary of design variable values.

        Returns
        -------
        func_dict : dict
            Dictionary of all functional variables evaluated at design point.

        fail : int
            0 for successful function evaluation
            1 for unsuccessful function evaluation
        """
        model = self._problem().model
        fail = 0

        # Note: we place our handler as late as possible so that codes that run in the
        # workflow can place their own handlers.
        sigusr = self.options['user_teriminate_signal']
        if sigusr is not None and self._signal_cache is None:
            self._signal_cache = signal.getsignal(sigusr)
            signal.signal(sigusr, self._signal_handler)

        try:
            for name in self._indep_list:
                self.set_design_var(name, dv_dict[name])

            # print("Setting DV")
            # print(dv_dict)

            # Check if we caught a termination signal while SNOPT was running.
            if self._user_termination_flag:
                func_dict = self.get_objective_values()
                func_dict.update(
                    self.get_constraint_values(lintype='nonlinear'))
                return func_dict, 2

            # Execute the model
            with RecordingDebugging(self._get_name(), self.iter_count,
                                    self) as rec:
                self.iter_count += 1
                try:
                    self._in_user_function = True
                    model.run_solve_nonlinear()

                # Let the optimizer try to handle the error
                except AnalysisError:
                    model._clear_iprint()
                    fail = 1

                # User requested termination
                except UserRequestedException:
                    model._clear_iprint()
                    fail = 2

                func_dict = self.get_objective_values()
                func_dict.update(
                    self.get_constraint_values(lintype='nonlinear'))

                # Record after getting obj and constraint to assure they have
                # been gathered in MPI.
                rec.abs = 0.0
                rec.rel = 0.0

        except Exception as msg:
            tb = traceback.format_exc()

            # Exceptions seem to be swallowed by the C code, so this
            # should give the user more info than the dreaded "segfault"
            print("Exception: %s" % str(msg))
            print(70 * "=", tb, 70 * "=")
            fail = 1
            func_dict = {}

        # print("Functions calculated")
        # print(dv_dict)

        self._in_user_function = False
        return func_dict, fail
    def run(self, path_hs="", eq_tol={}, ieq_tol={}):
        """
        Optimize the problem using SEGOMOE.

        Parameters
        ----------
        problem : Problem object
            An optimization problem of OpenMDAO framework
        path_hs: string, optional
            path to a directory storing hot_start data
        eq_tol: dict
            Dictionary to define specific tolerance for eq constraints
            {'[groupName]': [tol]} Default tol = 1e-5
        ieq_tol: dict
            Dictionary to define specific tolerance for ieq constraints
            {'[groupName]': [tol]} Default tol = 1e-5
        """
        if version.parse(OPENMDAO_VERSION) > version.parse("2.9.1"):
            model = self._problem().model
        else:
            model = self._problem.model
        path_hs = ""
        self.eq_tol = eq_tol
        self.ieq_tol = ieq_tol
        self.iter_count = 0
        self.name = "onera_optimizer_segomoe"
        self._sego_vars = []
        self._sego_cons = []
        self._map_con = {}
        self._n_mapped_con = 0

        # Initial Run
        with RecordingDebugging(
            self.options["optimizer"], self.iter_count, self
        ) as rec:
            # Initial Run
            model._solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        # Format design variables to suit segomoe implementation
        self._initialize_vars()

        # Format constraints to suit segomoe implementation
        self._initialize_cons()

        # Format option dictionary to suit SEGO implementation
        optim_settings = {}
        for opt, opt_dict in get_sego_options().items():
            optim_settings[opt] = opt_dict["default"]
        n_iter = self.opt_settings["maxiter"]
        optim_settings.update(
            {k: v for k, v in self.opt_settings.items() if k != "maxiter"}
        )

        # In OpenMDAO context, obj and constraints are always evaluated together
        optim_settings["grouped_eval"] = True

        # default model
        mod_obj = {"corr": "squared_exponential", "regr": "constant", "normalize": True}

        dim = 0
        for name, meta in self._designvars.items():
            dim += meta["size"]
        print("Designvars dimension: ", dim)
        if dim > 10:
            n_components = 3
            mod_obj["n_components"] = n_components
            mod_obj["type"] = "KrigKPLS" if dim > 20 else "KrigKPLSK"
        else:
            n_components = dim
            mod_obj["type"] = "Krig"
        mod_obj["theta0"] = [1.0] * n_components
        mod_obj["thetaL"] = [0.1] * n_components
        mod_obj["thetaU"] = [10.0] * n_components

        optim_settings["model_type"] = {"obj": mod_obj, "con": mod_obj}

        print(optim_settings)
        # Instanciate a SEGO optimizer
        sego = Sego(
            self._objfunc,
            self._sego_vars,
            const=self._sego_cons,
            optim_settings=optim_settings,
            path_hs=path_hs,
            comm=self.comm,
        )

        # Run the optim
        # exit_flag, x_best, obj_best, dt_opt = sego.run_optim(
        exit_flag, x_best, _, _ = sego.run_optim(n_iter=n_iter)

        # Set optimal parameters
        i = 0
        for name, meta in self._designvars.items():
            size = meta["size"]
            self.set_design_var(name, x_best[i : i + size])
            i += size

        with RecordingDebugging(
            self.options["optimizer"], self.iter_count, self
        ) as rec:
            model._solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        self.exit_flag = (exit_flag == ExitStatus.valid_sol) or (
            exit_flag == ExitStatus.solution_reached
        )

        return self.exit_flag
Ejemplo n.º 26
0
    def objective_callback(self, x, icase):
        r"""
        Evaluate problem objective at the requested point.

        Takes into account constraints with a penalty function.

        All constraints are converted to the form of :math:`g(x)_i \leq 0` for
        inequality constraints and :math:`h(x)_i = 0` for equality constraints.
        The constraint vector for inequality constraints is the following:

        .. math::

           g = [g_1, g_2  \dots g_N], g_i \in R^{N_{g_i}}
           h = [h_1, h_2  \dots h_N], h_i \in R^{N_{h_i}}

        The number of all constraints:

        .. math::

           N_g = \sum_{i=1}^N N_{g_i},  N_h = \sum_{i=1}^N N_{h_i}

        The fitness function is constructed with the penalty parameter :math:`p`
        and the exponent :math:`\kappa`:

        .. math::

           \Phi(x) = f(x) + p \cdot \sum_{k=1}^{N^g}(\delta_k \cdot g_k^{\kappa})
           + p \cdot \sum_{k=1}^{N^h}|h_k|^{\kappa}

        where :math:`\delta_k = 0` if :math:`g_k` is satisfied, 1 otherwise

        .. note::

            The values of :math:`\kappa` and :math:`p` can be defined as driver options.

        Parameters
        ----------
        x : ndarray
            Value of design variables.
        icase : int
            Case number, used for identification when run in parallel.

        Returns
        -------
        float
            Objective value
        bool
            Success flag, True if successful
        int
            Case number, used for identification when run in parallel.
        """
        model = self._problem.model
        success = 1
        # self._update_voi_meta(model)  # by O.P.  # FIXME test

        for name in self._designvars:
            i, j = self._desvar_idx[name]
            self.set_design_var(name, x[i:j])

        # Execute the model
        with RecordingDebugging('SimpleGA', self.iter_count, self) as rec:
            self.iter_count += 1
            try:
                model._solve_nonlinear()

            # Tell the optimizer that this is a bad point.
            except AnalysisError:
                model._clear_iprint()
                success = 0

            for name, val in iteritems(self.get_objective_values()):
                obj = val
                break

            # Parameters of the penalty method
            penalty = self.options['penalty_parameter']
            exponent = self.options['penalty_exponent']

            if penalty == 0:
                fun = obj
            else:
                constraint_violations = np.array([])
                for name, val in iteritems(self.get_constraint_values()):
                    con = self._cons[name]
                    # The not used fields will either None or a very large number
                    if (con['lower'] is not None) and np.isfinite(
                            con['lower']):
                        diff = val - con['lower']
                        violation = np.array(
                            [0. if d >= 0 else abs(d) for d in diff])
                    elif (con['upper'] is not None) and np.isfinite(
                            con['upper']):
                        diff = val - con['upper']
                        violation = np.array(
                            [0. if d <= 0 else abs(d) for d in diff])
                    elif (con['equals'] is not None) and np.isfinite(
                            con['equals']):
                        diff = val - con['equals']
                        violation = np.absolute(diff)
                    constraint_violations = np.hstack(
                        (constraint_violations, violation))
                fun = obj + penalty * sum(
                    np.power(constraint_violations, exponent))
            # Record after getting obj to assure they have
            # been gathered in MPI.
            rec.abs = 0.0
            rec.rel = 0.0

        # print("Functions calculated")
        # print(x)
        # print(obj)
        return fun, success, icase
Ejemplo n.º 27
0
    def run(self):
        """
        Excute pyOptsparse.

        Note that pyOpt controls the execution, and the individual optimizers
        (e.g., SNOPT) control the iteration.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem
        model = problem.model
        relevant = model._relevant
        self.pyopt_solution = None
        self._total_jac = None
        self.iter_count = 0
        fwd = problem._mode == 'fwd'
        optimizer = self.options['optimizer']

        # Only need initial run if we have linear constraints or if we are using an optimizer that
        # doesn't perform one initially.
        con_meta = self._cons
        if optimizer in run_required or np.any([con['linear'] for con in itervalues(self._cons)]):
            with RecordingDebugging(optimizer, self.iter_count, self) as rec:
                # Initial Run
                model._solve_nonlinear()
                rec.abs = 0.0
                rec.rel = 0.0
            self.iter_count += 1

        # compute dynamic simul deriv coloring or just sparsity if option is set
        if coloring_mod._use_sparsity:
            if self.options['dynamic_simul_derivs']:
                coloring_mod.dynamic_simul_coloring(self, do_sparsity=True)
            elif self.options['dynamic_derivs_sparsity']:
                coloring_mod.dynamic_sparsity(self)

        opt_prob = Optimization(self.options['title'], self._objfunc)

        # Add all design variables
        param_meta = self._designvars
        self._indep_list = indep_list = list(param_meta)
        param_vals = self.get_design_var_values()

        for name, meta in iteritems(param_meta):
            opt_prob.addVarGroup(name, meta['size'], type='c',
                                 value=param_vals[name],
                                 lower=meta['lower'], upper=meta['upper'])

        opt_prob.finalizeDesignVariables()

        # Add all objectives
        objs = self.get_objective_values()
        for name in objs:
            opt_prob.addObj(name)
            self._quantities.append(name)

        # Calculate and save derivatives for any linear constraints.
        lcons = [key for (key, con) in iteritems(con_meta) if con['linear']]
        if len(lcons) > 0:
            _lin_jacs = self._compute_totals(of=lcons, wrt=indep_list, return_format='dict')
            # convert all of our linear constraint jacs to COO format. Otherwise pyoptsparse will
            # do it for us and we'll end up with a fully dense COO matrix and very slow evaluation
            # of linear constraints!
            to_remove = []
            for oname, jacdct in iteritems(_lin_jacs):
                for n, subjac in iteritems(jacdct):
                    if isinstance(subjac, np.ndarray):
                        # we can safely use coo_matrix to automatically convert the ndarray
                        # since our linear constraint jacs are constant, so zeros won't become
                        # nonzero during the optimization.
                        mat = coo_matrix(subjac)
                        if mat.row.size > 0:
                            # convert to 'coo' format here to avoid an emphatic warning
                            # by pyoptsparse.
                            jacdct[n] = {'coo': [mat.row, mat.col, mat.data], 'shape': mat.shape}

        # Add all equality constraints
        for name, meta in iteritems(con_meta):
            if meta['equals'] is None:
                continue
            size = meta['size']
            lower = upper = meta['equals']
            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name, size, lower=lower, upper=upper,
                                     linear=True, wrt=wrt, jac=jac)
            else:
                if name in self._res_jacs:
                    resjac = self._res_jacs[name]
                    jac = {n: resjac[n] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name, size, lower=lower, upper=upper, wrt=wrt, jac=jac)
                self._quantities.append(name)

        # Add all inequality constraints
        for name, meta in iteritems(con_meta):
            if meta['equals'] is not None:
                continue
            size = meta['size']

            # Bounds - double sided is supported
            lower = meta['lower']
            upper = meta['upper']

            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name, size, upper=upper, lower=lower,
                                     linear=True, wrt=wrt, jac=jac)
            else:
                if name in self._res_jacs:
                    resjac = self._res_jacs[name]
                    jac = {n: resjac[n] for n in wrt}
                else:
                    jac = None
                opt_prob.addConGroup(name, size, upper=upper, lower=lower, wrt=wrt, jac=jac)
                self._quantities.append(name)

        # Instantiate the requested optimizer
        try:
            _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer], 0)
            opt = getattr(_tmp, optimizer)()

        except Exception as err:
            # Change whatever pyopt gives us to an ImportError, give it a readable message,
            # but raise with the original traceback.
            msg = "Optimizer %s is not available in this installation." % optimizer
            reraise(ImportError, ImportError(msg), sys.exc_info()[2])

        # Set optimization options
        for option, value in self.opt_settings.items():
            opt.setOption(option, value)

        # Execute the optimization problem
        if self.options['gradient method'] == 'pyopt_fd':

            # Use pyOpt's internal finite difference
            # TODO: Need to get this from OpenMDAO
            # fd_step = problem.root.deriv_options['step_size']
            fd_step = 1e-6
            sol = opt(opt_prob, sens='FD', sensStep=fd_step, storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        elif self.options['gradient method'] == 'snopt_fd':
            if self.options['optimizer'] == 'SNOPT':

                # Use SNOPT's internal finite difference
                # TODO: Need to get this from OpenMDAO
                # fd_step = problem.root.deriv_options['step_size']
                fd_step = 1e-6
                sol = opt(opt_prob, sens=None, sensStep=fd_step, storeHistory=self.hist_file,
                          hotStart=self.hotstart_file)

            else:
                msg = "SNOPT's internal finite difference can only be used with SNOPT"
                raise Exception(msg)
        else:

            # Use OpenMDAO's differentiator for the gradient
            sol = opt(opt_prob, sens=self._gradfunc, storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        # Print results
        if self.options['print_results']:
            print(sol)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        for name in indep_list:
            val = dv_dict[name]
            self.set_design_var(name, val)

        with RecordingDebugging(self.options['optimizer'], self.iter_count, self) as rec:
            model._solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        # Save the most recent solution.
        self.pyopt_solution = sol
        try:
            exit_status = sol.optInform['value']
            self.fail = False

            # These are various failed statuses.
            if exit_status > 2:
                self.fail = True

        except KeyError:
            # optimizers other than pySNOPT may not populate this dict
            pass

        return self.fail