Beispiel #1
0
    def record_iteration(self, **kwargs):
        """
        Record an iteration of the current Solver.

        Parameters
        ----------
        **kwargs : dict
            Keyword arguments (used for abs and rel error).
        """
        metadata = create_local_meta(self.SOLVER)
        self._rec_mgr.record_iteration(self, metadata, **kwargs)
Beispiel #2
0
    def _get_recorder_metadata(self, case_name):
        """
        Return metadata from the latest iteration for use in the recorder.

        Parameters
        ----------
        case_name : str
            Name of current case.

        Returns
        -------
        dict
            Metadata dictionary for the recorder.
        """
        return create_local_meta(case_name)
Beispiel #3
0
    def record_iteration(self, **kwargs):
        """
        Record an iteration of the current Solver.

        Parameters
        ----------
        **kwargs : dict
            Keyword arguments (used for abs and rel error).
        """
        if not self._rec_mgr._recorders:
            return

        metadata = create_local_meta(self.SOLVER)

        # Get the data
        data = {
            'abs':
            kwargs.get('abs')
            if self.recording_options['record_abs_error'] else None,
            'rel':
            kwargs.get('rel')
            if self.recording_options['record_rel_error'] else None,
            'input': {},
            'output': {},
            'residual': {}
        }

        system = self._system()
        vec_name = 'nonlinear' if isinstance(self,
                                             NonlinearSolver) else 'linear'
        filt = self._filtered_vars_to_record
        parallel = self._rec_mgr._check_parallel(
        ) if system.comm.size > 1 else False

        if self.recording_options['record_outputs']:
            data['output'] = system._retrieve_data_of_kind(
                filt, 'output', vec_name, parallel)

        if self.recording_options['record_inputs']:
            data['input'] = system._retrieve_data_of_kind(
                filt, 'input', vec_name, parallel)

        if self.recording_options['record_solver_residuals']:
            data['residual'] = system._retrieve_data_of_kind(
                filt, 'residual', vec_name, parallel)

        self._rec_mgr.record_iteration(self, data, metadata)
Beispiel #4
0
    def record_iteration(self):
        """
        Record an iteration of the current Driver.
        """
        if not self._rec_mgr._recorders:
            return

        # Get the data to record (collective calls that get across all ranks)
        opts = self.recording_options
        filt = self._filtered_vars_to_record

        if opts['record_desvars']:
            des_vars = self.get_design_var_values()
        else:
            des_vars = {}

        if opts['record_objectives']:
            obj_vars = self.get_objective_values()
        else:
            obj_vars = {}

        if opts['record_constraints']:
            con_vars = self.get_constraint_values()
        else:
            con_vars = {}

        if opts['record_responses']:
            # res_vars = self.get_response_values()  # not really working yet
            res_vars = {}
        else:
            res_vars = {}

        des_vars = {name: des_vars[name] for name in filt['des']}
        obj_vars = {name: obj_vars[name] for name in filt['obj']}
        con_vars = {name: con_vars[name] for name in filt['con']}
        # res_vars = {name: res_vars[name] for name in filt['res']}

        model = self._problem.model

        sys_vars = {}
        in_vars = {}
        outputs = model._outputs
        inputs = model._inputs
        views = outputs._views
        views_in = inputs._views
        sys_vars = {
            name: views[name]
            for name in outputs._names if name in filt['sys']
        }
        if self.recording_options['record_inputs']:
            in_vars = {
                name: views_in[name]
                for name in inputs._names if name in filt['in']
            }

        if MPI:
            des_vars = self._gather_vars(model, des_vars)
            res_vars = self._gather_vars(model, res_vars)
            obj_vars = self._gather_vars(model, obj_vars)
            con_vars = self._gather_vars(model, con_vars)
            sys_vars = self._gather_vars(model, sys_vars)
            in_vars = self._gather_vars(model, in_vars)

        outs = {}
        if not MPI or model.comm.rank == 0:
            outs.update(des_vars)
            outs.update(res_vars)
            outs.update(obj_vars)
            outs.update(con_vars)
            outs.update(sys_vars)

        data = {'out': outs, 'in': in_vars}

        metadata = create_local_meta(self._get_name())

        self._rec_mgr.record_iteration(self, data, metadata)
Beispiel #5
0
    def record_iteration(self, **kwargs):
        """
        Record an iteration of the current Solver.

        Parameters
        ----------
        **kwargs : dict
            Keyword arguments (used for abs and rel error).
        """
        if not self._rec_mgr._recorders:
            return

        metadata = create_local_meta(self.SOLVER)

        # Get the data
        data = {}

        if self.recording_options['record_abs_error']:
            data['abs'] = kwargs.get('abs')
        else:
            data['abs'] = None

        if self.recording_options['record_rel_error']:
            data['rel'] = kwargs.get('rel')
        else:
            data['rel'] = None

        system = self._system()
        if isinstance(self, NonlinearSolver):
            outputs = system._outputs
            inputs = system._inputs
            residuals = system._residuals
        else:  # it's a LinearSolver
            outputs = system._vectors['output']['linear']
            inputs = system._vectors['input']['linear']
            residuals = system._vectors['residual']['linear']

        if self.recording_options['record_outputs']:
            data['o'] = {}
            if 'out' in self._filtered_vars_to_record:
                for out in self._filtered_vars_to_record['out']:
                    if out in outputs._names:
                        data['o'][out] = outputs._views[out]
            else:
                data['o'] = outputs
        else:
            data['o'] = None

        if self.recording_options['record_inputs']:
            data['i'] = {}
            if 'in' in self._filtered_vars_to_record:
                for inp in self._filtered_vars_to_record['in']:
                    if inp in inputs._names:
                        data['i'][inp] = inputs._views[inp]
            else:
                data['i'] = inputs
        else:
            data['i'] = None

        if self.recording_options['record_solver_residuals']:
            data['r'] = {}
            if 'res' in self._filtered_vars_to_record:
                for res in self._filtered_vars_to_record['res']:
                    if res in residuals._names:
                        data['r'][res] = residuals._views[res]
            else:
                data['r'] = residuals
        else:
            data['r'] = None

        self._rec_mgr.record_iteration(self, data, metadata)
    def record_iteration(self):
        """
        Record an iteration of the current Driver.
        """
        if not self._rec_mgr._recorders:
            return

        metadata = create_local_meta(self._get_name())

        # Get the data to record
        data = {}
        if self.recording_options['record_desvars']:
            # collective call that gets across all ranks
            desvars = self.get_design_var_values()
        else:
            desvars = {}

        if self.recording_options['record_responses']:
            # responses = self.get_response_values() # not really working yet
            responses = {}
        else:
            responses = {}

        if self.recording_options['record_objectives']:
            objectives = self.get_objective_values()
        else:
            objectives = {}

        if self.recording_options['record_constraints']:
            constraints = self.get_constraint_values()
        else:
            constraints = {}

        desvars = {name: desvars[name]
                   for name in self._filtered_vars_to_record['des']}
        # responses not working yet
        # responses = {name: responses[name] for name in self._filtered_vars_to_record['res']}
        objectives = {name: objectives[name]
                      for name in self._filtered_vars_to_record['obj']}
        constraints = {name: constraints[name]
                       for name in self._filtered_vars_to_record['con']}

        if self.recording_options['includes']:
            root = self._problem.model
            outputs = root._outputs
            # outputsinputs, outputs, residuals = root.get_nonlinear_vectors()
            sysvars = {}
            for name, value in outputs._names.items():
                if name in self._filtered_vars_to_record['sys']:
                    sysvars[name] = value
        else:
            sysvars = {}

        if MPI:
            root = self._problem.model
            desvars = self._gather_vars(root, desvars)
            responses = self._gather_vars(root, responses)
            objectives = self._gather_vars(root, objectives)
            constraints = self._gather_vars(root, constraints)
            sysvars = self._gather_vars(root, sysvars)

        data['des'] = desvars
        data['res'] = responses
        data['obj'] = objectives
        data['con'] = constraints
        data['sys'] = sysvars

        self._rec_mgr.record_iteration(self, data, metadata)
Beispiel #7
0
    def _compute_totals(self,
                        of=None,
                        wrt=None,
                        return_format='flat_dict',
                        global_names=None,
                        use_abs_names=True):
        """
        Compute derivatives of desired quantities with respect to desired inputs.

        All derivatives are returned using driver scaling.

        Parameters
        ----------
        of : list of variable name strings or None
            Variables whose derivatives will be computed. Default is None, which
            uses the driver's objectives and constraints.
        wrt : list of variable name strings or None
            Variables with respect to which the derivatives will be computed.
            Default is None, which uses the driver's desvars.
        return_format : string
            Format to return the derivatives. Default is a 'flat_dict', which
            returns them in a dictionary whose keys are tuples of form (of, wrt). For
            the scipy optimizer, 'array' is also supported.
        global_names : bool
            Deprecated.  Use 'use_abs_names' instead.
        use_abs_names : bool
            Set to True when passing in absolute names to skip some translation steps.

        Returns
        -------
        derivs : object
            Derivatives in form requested by 'return_format'.
        """
        problem = self._problem()
        total_jac = self._total_jac
        debug_print = 'totals' in self.options['debug_print'] and (
            not MPI or problem.comm.rank == 0)

        if debug_print:
            header = 'Driver total derivatives for iteration: ' + str(
                self.iter_count)
            print(header)
            print(len(header) * '-' + '\n')

        if global_names is not None:
            warn_deprecation(
                "'global_names' is deprecated in calls to _compute_totals. "
                "Use 'use_abs_names' instead.")
            use_abs_names = global_names

        if problem.model._owns_approx_jac:
            self._recording_iter.push(('_compute_totals_approx', 0))

            try:
                if total_jac is None:
                    total_jac = _TotalJacInfo(problem,
                                              of,
                                              wrt,
                                              use_abs_names,
                                              return_format,
                                              approx=True,
                                              debug_print=debug_print)

                    # Don't cache linear constraint jacobian
                    if not total_jac.has_lin_cons:
                        self._total_jac = total_jac

                    totals = total_jac.compute_totals_approx(initialize=True)
                else:
                    totals = total_jac.compute_totals_approx()
            finally:
                self._recording_iter.pop()

        else:
            if total_jac is None:
                total_jac = _TotalJacInfo(problem,
                                          of,
                                          wrt,
                                          use_abs_names,
                                          return_format,
                                          debug_print=debug_print)

                # don't cache linear constraint jacobian
                if not total_jac.has_lin_cons:
                    self._total_jac = total_jac

            self._recording_iter.push(('_compute_totals', 0))

            try:
                totals = total_jac.compute_totals()
            finally:
                self._recording_iter.pop()

        if self._rec_mgr._recorders and self.recording_options[
                'record_derivatives']:
            metadata = create_local_meta(self._get_name())
            total_jac.record_derivatives(self, metadata)

        return totals
Beispiel #8
0
    def record_iteration(self, **kwargs):
        """
        Record an iteration of the current Solver.

        Parameters
        ----------
        **kwargs : dict
            Keyword arguments (used for abs and rel error).
        """
        if not self._rec_mgr._recorders:
            return

        metadata = create_local_meta(self.SOLVER)

        # Get the data
        data = {}

        if self.recording_options['record_abs_error']:
            data['abs'] = kwargs.get('abs')
        else:
            data['abs'] = None

        if self.recording_options['record_rel_error']:
            data['rel'] = kwargs.get('rel')
        else:
            data['rel'] = None

        system = self._system
        if isinstance(self, NonlinearSolver):
            outputs = system._outputs
            inputs = system._inputs
            residuals = system._residuals
        else:  # it's a LinearSolver
            outputs = system._vectors['output']['linear']
            inputs = system._vectors['input']['linear']
            residuals = system._vectors['residual']['linear']

        if self.recording_options['record_outputs']:
            data['o'] = {}
            if 'out' in self._filtered_vars_to_record:
                for out in self._filtered_vars_to_record['out']:
                    if out in outputs._names:
                        data['o'][out] = outputs._views[out]
            else:
                data['o'] = outputs
        else:
            data['o'] = None

        if self.recording_options['record_inputs']:
            data['i'] = {}
            if 'in' in self._filtered_vars_to_record:
                for inp in self._filtered_vars_to_record['in']:
                    if inp in inputs._names:
                        data['i'][inp] = inputs._views[inp]
            else:
                data['i'] = inputs
        else:
            data['i'] = None

        if self.recording_options['record_solver_residuals']:
            data['r'] = {}
            if 'res' in self._filtered_vars_to_record:
                for res in self._filtered_vars_to_record['res']:
                    if res in residuals._names:
                        data['r'][res] = residuals._views[res]
            else:
                data['r'] = residuals
        else:
            data['r'] = None

        self._rec_mgr.record_iteration(self, data, metadata)
Beispiel #9
0
 def record_iteration(self):
     """
     Record an iteration of the current Driver.
     """
     metadata = create_local_meta(self._get_name())
     self._rec_mgr.record_iteration(self, metadata)
Beispiel #10
0
    def record_iteration(self):
        """
        Record an iteration of the current Driver.
        """
        if not self._rec_mgr._recorders:
            return

        metadata = create_local_meta(self._get_name())

        # Get the data to record
        data = {}
        if self.recording_options['record_desvars']:
            # collective call that gets across all ranks
            desvars = self.get_design_var_values()
        else:
            desvars = {}

        if self.recording_options['record_responses']:
            # responses = self.get_response_values() # not really working yet
            responses = {}
        else:
            responses = {}

        if self.recording_options['record_objectives']:
            objectives = self.get_objective_values()
        else:
            objectives = {}

        if self.recording_options['record_constraints']:
            constraints = self.get_constraint_values()
        else:
            constraints = {}

        desvars = {name: desvars[name]
                   for name in self._filtered_vars_to_record['des']}
        # responses not working yet
        # responses = {name: responses[name] for name in self._filtered_vars_to_record['res']}
        objectives = {name: objectives[name]
                      for name in self._filtered_vars_to_record['obj']}
        constraints = {name: constraints[name]
                       for name in self._filtered_vars_to_record['con']}

        if self.recording_options['includes']:
            root = self._problem.model
            outputs = root._outputs
            # outputsinputs, outputs, residuals = root.get_nonlinear_vectors()
            sysvars = {}
            for name, value in iteritems(outputs._names):
                if name in self._filtered_vars_to_record['sys']:
                    sysvars[name] = value
        else:
            sysvars = {}

        if MPI:
            root = self._problem.model
            desvars = self._gather_vars(root, desvars)
            responses = self._gather_vars(root, responses)
            objectives = self._gather_vars(root, objectives)
            constraints = self._gather_vars(root, constraints)
            sysvars = self._gather_vars(root, sysvars)

        data['des'] = desvars
        data['res'] = responses
        data['obj'] = objectives
        data['con'] = constraints
        data['sys'] = sysvars

        self._rec_mgr.record_iteration(self, data, metadata)
Beispiel #11
0
    def run(self):
        """
        Excute pyOptsparse.

        Note that pyOpt controls the execution, and the individual optimizers
        (e.g., SNOPT) control the iteration.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem
        model = problem.model
        relevant = model._relevant
        self.pyopt_solution = None
        self.iter_count = 0
        fwd = problem._mode == 'fwd'

        # Metadata Setup
        self.metadata = create_local_meta(self.options['optimizer'])

        with Recording(self.options['optimizer'], self.iter_count,
                       self) as rec:
            # Initial Run
            model._solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        opt_prob = Optimization(self.options['title'], self._objfunc)

        # Add all design variables
        param_meta = self._designvars
        self._indep_list = indep_list = list(param_meta)
        param_vals = self.get_design_var_values()

        for name, meta in iteritems(param_meta):
            opt_prob.addVarGroup(name,
                                 meta['size'],
                                 type='c',
                                 value=param_vals[name],
                                 lower=meta['lower'],
                                 upper=meta['upper'])

        opt_prob.finalizeDesignVariables()

        # Add all objectives
        objs = self.get_objective_values()
        for name in objs:
            opt_prob.addObj(name)
            self._quantities.append(name)

        # Calculate and save derivatives for any linear constraints.
        con_meta = self._cons
        lcons = [
            key for (key, con) in iteritems(con_meta) if con['linear'] is True
        ]
        if len(lcons) > 0:
            _lin_jacs = problem._compute_totals(of=lcons,
                                                wrt=indep_list,
                                                return_format='dict')

        # Add all equality constraints
        self.active_tols = {}
        eqcons = OrderedDict((key, con) for (key, con) in iteritems(con_meta)
                             if con['equals'] is not None)
        for name, meta in iteritems(eqcons):
            size = meta['size']
            lower = upper = meta['equals']
            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     linear=True,
                                     wrt=wrt,
                                     jac=_lin_jacs[name])
            else:
                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     wrt=wrt)
                self._quantities.append(name)

        # Add all inequality constraints
        iqcons = OrderedDict((key, con) for (key, con) in iteritems(con_meta)
                             if con['equals'] is None)
        for name, meta in iteritems(iqcons):
            size = meta['size']

            # Bounds - double sided is supported
            lower = meta['lower']
            upper = meta['upper']

            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     linear=True,
                                     wrt=wrt,
                                     jac=_lin_jacs[name])
            else:
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     wrt=wrt)
                self._quantities.append(name)

        # Instantiate the requested optimizer
        optimizer = self.options['optimizer']
        try:
            _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer],
                              0)
            opt = getattr(_tmp, optimizer)()

        except ImportError:
            msg = "Optimizer %s is not available in this installation." % optimizer
            raise ImportError(msg)

        # Set optimization options
        for option, value in self.opt_settings.items():
            opt.setOption(option, value)

        self.opt_prob = opt_prob
        self.opt = opt

        # Execute the optimization problem
        if self.options['gradient method'] == 'pyopt_fd':

            # Use pyOpt's internal finite difference
            # TODO: Need to get this from OpenMDAO
            # fd_step = problem.root.deriv_options['step_size']
            fd_step = 1e-6
            sol = opt(opt_prob,
                      sens='FD',
                      sensStep=fd_step,
                      storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        elif self.options['gradient method'] == 'snopt_fd':
            if self.options['optimizer'] == 'SNOPT':

                # Use SNOPT's internal finite difference
                # TODO: Need to get this from OpenMDAO
                # fd_step = problem.root.deriv_options['step_size']
                fd_step = 1e-6
                sol = opt(opt_prob,
                          sens=None,
                          sensStep=fd_step,
                          storeHistory=self.hist_file,
                          hotStart=self.hotstart_file)

            else:
                msg = "SNOPT's internal finite difference can only be used with SNOPT"
                raise Exception(msg)
        else:

            # Use OpenMDAO's differentiator for the gradient
            sol = opt(opt_prob,
                      sens=self._gradfunc,
                      storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        # Print results
        if self.options['print_results']:
            print(sol)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        for name in indep_list:
            val = dv_dict[name]
            self.set_design_var(name, val)

        with Recording(self.options['optimizer'], self.iter_count,
                       self) as rec:
            model._solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        # Save the most recent solution.
        self.pyopt_solution = sol
        try:
            exit_status = sol.optInform['value']
            self.fail = False

            # These are various failed statuses.
            if exit_status > 2:
                self.fail = True

        except KeyError:
            # Nothing is here, so something bad happened!
            self.fail = True

        return self.fail
Beispiel #12
0
    def run(self):
        """
        Excute pyOptsparse.

        Note that pyOpt controls the execution, and the individual optimizers
        (e.g., SNOPT) control the iteration.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem
        model = problem.model
        relevant = model._relevant
        self.pyopt_solution = None
        self.iter_count = 0
        fwd = problem._mode == 'fwd'

        # Metadata Setup
        self.metadata = create_local_meta(self.options['optimizer'])

        with RecordingDebugging(self.options['optimizer'], self.iter_count, self) as rec:
            # Initial Run
            model._solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        opt_prob = Optimization(self.options['title'], self._objfunc)

        # Add all design variables
        param_meta = self._designvars
        self._indep_list = indep_list = list(param_meta)
        param_vals = self.get_design_var_values()

        for name, meta in iteritems(param_meta):
            opt_prob.addVarGroup(name, meta['size'], type='c',
                                 value=param_vals[name],
                                 lower=meta['lower'], upper=meta['upper'])

        opt_prob.finalizeDesignVariables()

        # Add all objectives
        objs = self.get_objective_values()
        for name in objs:
            opt_prob.addObj(name)
            self._quantities.append(name)

        # Calculate and save derivatives for any linear constraints.
        con_meta = self._cons
        lcons = [key for (key, con) in iteritems(con_meta) if con['linear'] is True]
        if len(lcons) > 0:
            _lin_jacs = self._compute_totals(of=lcons, wrt=indep_list, return_format='dict')
            # convert all of our linear constraint jacs to COO format. Otherwise pyoptsparse will
            # do it for us and we'll end up with a fully dense COO matrix and very slow evaluation
            # of linear constraints!
            to_remove = []
            for oname, jacdct in iteritems(_lin_jacs):
                for n, subjac in iteritems(jacdct):
                    if isinstance(subjac, np.ndarray):
                        # we can safely use coo_matrix to automatically convert the ndarray
                        # since our linear constraint jacs are constant, so zeros won't become
                        # nonzero during the optimization.
                        mat = coo_matrix(subjac)
                        if mat.row.size > 0:
                            jacdct[n] = {'coo': [mat.row, mat.col, mat.data], 'shape': mat.shape}

        # Add all equality constraints
        self.active_tols = {}
        eqcons = OrderedDict((key, con) for (key, con) in iteritems(con_meta)
                             if con['equals'] is not None)
        for name, meta in iteritems(eqcons):
            size = meta['size']
            lower = upper = meta['equals']
            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name, size, lower=lower, upper=upper,
                                     linear=True, wrt=wrt, jac=jac)
            else:
                if name in self._res_jacs:
                    jac = self._res_jacs[name]
                else:
                    jac = None
                opt_prob.addConGroup(name, size, lower=lower, upper=upper, wrt=wrt, jac=jac)
                self._quantities.append(name)

        # Add all inequality constraints
        iqcons = OrderedDict((key, con) for (key, con) in iteritems(con_meta)
                             if con['equals'] is None)
        for name, meta in iteritems(iqcons):
            size = meta['size']

            # Bounds - double sided is supported
            lower = meta['lower']
            upper = meta['upper']

            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name, size, upper=upper, lower=lower,
                                     linear=True, wrt=wrt, jac=jac)
            else:
                if name in self._res_jacs:
                    jac = self._res_jacs[name]
                else:
                    jac = None
                opt_prob.addConGroup(name, size, upper=upper, lower=lower, wrt=wrt, jac=jac)
                self._quantities.append(name)

        # Instantiate the requested optimizer
        optimizer = self.options['optimizer']
        try:
            _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer], 0)
            opt = getattr(_tmp, optimizer)()

        except ImportError:
            msg = "Optimizer %s is not available in this installation." % optimizer
            raise ImportError(msg)

        # Set optimization options
        for option, value in self.opt_settings.items():
            opt.setOption(option, value)

        self.opt_prob = opt_prob
        self.opt = opt

        # Execute the optimization problem
        if self.options['gradient method'] == 'pyopt_fd':

            # Use pyOpt's internal finite difference
            # TODO: Need to get this from OpenMDAO
            # fd_step = problem.root.deriv_options['step_size']
            fd_step = 1e-6
            sol = opt(opt_prob, sens='FD', sensStep=fd_step, storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        elif self.options['gradient method'] == 'snopt_fd':
            if self.options['optimizer'] == 'SNOPT':

                # Use SNOPT's internal finite difference
                # TODO: Need to get this from OpenMDAO
                # fd_step = problem.root.deriv_options['step_size']
                fd_step = 1e-6
                sol = opt(opt_prob, sens=None, sensStep=fd_step, storeHistory=self.hist_file,
                          hotStart=self.hotstart_file)

            else:
                msg = "SNOPT's internal finite difference can only be used with SNOPT"
                raise Exception(msg)
        else:

            # Use OpenMDAO's differentiator for the gradient
            sol = opt(opt_prob, sens=self._gradfunc, storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        # Print results
        if self.options['print_results']:
            print(sol)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        for name in indep_list:
            val = dv_dict[name]
            self.set_design_var(name, val)

        with RecordingDebugging(self.options['optimizer'], self.iter_count, self) as rec:
            model._solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        # Save the most recent solution.
        self.pyopt_solution = sol
        try:
            exit_status = sol.optInform['value']
            self.fail = False

            # These are various failed statuses.
            if exit_status > 2:
                self.fail = True

        except KeyError:
            # optimizers other than pySNOPT may not populate this dict
            pass

        return self.fail
Beispiel #13
0
    def record_iteration(self):
        """
        Record an iteration of the current Driver.
        """
        if not self._rec_mgr._recorders:
            return

        # Get the data to record (collective calls that get across all ranks)
        opts = self.recording_options
        filt = self._filtered_vars_to_record

        if opts['record_desvars']:
            des_vars = self.get_design_var_values(unscaled=True, ignore_indices=True)
        else:
            des_vars = {}

        if opts['record_objectives']:
            obj_vars = self.get_objective_values(unscaled=True, ignore_indices=True)
        else:
            obj_vars = {}

        if opts['record_constraints']:
            con_vars = self.get_constraint_values(unscaled=True, ignore_indices=True)
        else:
            con_vars = {}

        if opts['record_responses']:
            # res_vars = self.get_response_values()  # not really working yet
            res_vars = {}
        else:
            res_vars = {}

        des_vars = {name: des_vars[name] for name in filt['des']}
        obj_vars = {name: obj_vars[name] for name in filt['obj']}
        con_vars = {name: con_vars[name] for name in filt['con']}
        # res_vars = {name: res_vars[name] for name in filt['res']}

        model = self._problem.model

        names = model._outputs._names
        views = model._outputs._views
        sys_vars = {name: views[name] for name in names if name in filt['sys']}

        if self.recording_options['record_inputs']:
            names = model._inputs._names
            views = model._inputs._views
            in_vars = {name: views[name] for name in names if name in filt['in']}
        else:
            in_vars = {}

        if MPI:
            des_vars = self._gather_vars(model, des_vars)
            res_vars = self._gather_vars(model, res_vars)
            obj_vars = self._gather_vars(model, obj_vars)
            con_vars = self._gather_vars(model, con_vars)
            sys_vars = self._gather_vars(model, sys_vars)
            in_vars = self._gather_vars(model, in_vars)

        outs = {}
        if not MPI or model.comm.rank == 0:
            outs.update(des_vars)
            outs.update(res_vars)
            outs.update(obj_vars)
            outs.update(con_vars)
            outs.update(sys_vars)

        data = {
            'out': outs,
            'in': in_vars
        }

        metadata = create_local_meta(self._get_name())

        self._rec_mgr.record_iteration(self, data, metadata)
Beispiel #14
0
    def _compute_totals(self, of=None, wrt=None, return_format='flat_dict', global_names=True):
        """
        Compute derivatives of desired quantities with respect to desired inputs.

        All derivatives are returned using driver scaling.

        Parameters
        ----------
        of : list of variable name strings or None
            Variables whose derivatives will be computed. Default is None, which
            uses the driver's objectives and constraints.
        wrt : list of variable name strings or None
            Variables with respect to which the derivatives will be computed.
            Default is None, which uses the driver's desvars.
        return_format : string
            Format to return the derivatives. Default is a 'flat_dict', which
            returns them in a dictionary whose keys are tuples of form (of, wrt). For
            the scipy optimizer, 'array' is also supported.
        global_names : bool
            Set to True when passing in global names to skip some translation steps.

        Returns
        -------
        derivs : object
            Derivatives in form requested by 'return_format'.
        """
        problem = self._problem
        total_jac = self._total_jac
        debug_print = 'totals' in self.options['debug_print'] and (not MPI or
                                                                   MPI.COMM_WORLD.rank == 0)

        if debug_print:
            header = 'Driver total derivatives for iteration: ' + str(self.iter_count)
            print(header)
            print(len(header) * '-' + '\n')

        if problem.model._owns_approx_jac:
            self._recording_iter.stack.append(('_compute_totals_approx', 0))

            try:
                if total_jac is None:
                    total_jac = _TotalJacInfo(problem, of, wrt, global_names,
                                              return_format, approx=True, debug_print=debug_print)
                    self._total_jac = total_jac
                    totals = total_jac.compute_totals_approx(initialize=True)
                else:
                    totals = total_jac.compute_totals_approx()
            finally:
                self._recording_iter.stack.pop()

        else:
            if total_jac is None:
                total_jac = _TotalJacInfo(problem, of, wrt, global_names, return_format,
                                          debug_print=debug_print)

            # don't cache linear constraint jacobian
            if not total_jac.has_lin_cons:
                self._total_jac = total_jac

            self._recording_iter.stack.append(('_compute_totals', 0))

            try:
                totals = total_jac.compute_totals()
            finally:
                self._recording_iter.stack.pop()

        if self._rec_mgr._recorders and self.recording_options['record_derivatives']:
            metadata = create_local_meta(self._get_name())
            total_jac.record_derivatives(self, metadata)

        return totals
    def run(self):
        """
        Excute pyOptsparse.

        Note that pyOpt controls the execution, and the individual optimizers
        (e.g., SNOPT) control the iteration.

        Returns
        -------
        boolean
            Failure flag; True if failed to converge, False is successful.
        """
        problem = self._problem
        model = problem.model
        relevant = model._relevant
        self.pyopt_solution = None
        self.iter_count = 0
        fwd = problem._mode == 'fwd'

        # Metadata Setup
        self.metadata = create_local_meta(self.options['optimizer'])

        opt_prob = Optimization(self.options['title'], self._objfunc)

        # Add all design variables
        param_meta = self._designvars
        self._indep_list = indep_list = list(param_meta)
        param_vals = self.get_design_var_values()

        for name, meta in iteritems(param_meta):
            opt_prob.addVarGroup(name,
                                 meta['size'],
                                 type='c',
                                 value=param_vals[name],
                                 lower=meta['lower'],
                                 upper=meta['upper'])

        opt_prob.finalizeDesignVariables()

        # Add all objectives
        objs = self.get_objective_values()
        for name in objs:
            opt_prob.addObj(name)
            self._quantities.append(name)

        # Calculate and save derivatives for any linear constraints.
        con_meta = self._cons
        lcons = [
            key for (key, con) in iteritems(con_meta) if con['linear'] is True
        ]
        if len(lcons) > 0:
            _lin_jacs = self._compute_totals(of=lcons,
                                             wrt=indep_list,
                                             return_format='dict')
            # convert all of our linear constraint jacs to COO format. Otherwise pyoptsparse will
            # do it for us and we'll end up with a fully dense COO matrix and very slow evaluation
            # of linear constraints!
            to_remove = []
            for oname, jacdct in iteritems(_lin_jacs):
                for n, subjac in iteritems(jacdct):
                    if isinstance(subjac, np.ndarray):
                        # we can safely use coo_matrix to automatically convert the ndarray
                        # since our linear constraint jacs are constant, so zeros won't become
                        # nonzero during the optimization.
                        mat = coo_matrix(subjac)
                        if mat.row.size > 0:
                            jacdct[n] = {
                                'coo': [mat.row, mat.col, mat.data],
                                'shape': mat.shape
                            }

        # Add all equality constraints
        self.active_tols = {}
        eqcons = OrderedDict((key, con) for (key, con) in iteritems(con_meta)
                             if con['equals'] is not None)
        for name, meta in iteritems(eqcons):
            size = meta['size']
            lower = upper = meta['equals']
            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     linear=True,
                                     wrt=wrt,
                                     jac=jac)
            else:
                if name in self._res_jacs:
                    jac = self._res_jacs[name]
                else:
                    jac = None
                opt_prob.addConGroup(name,
                                     size,
                                     lower=lower,
                                     upper=upper,
                                     wrt=wrt,
                                     jac=jac)
                self._quantities.append(name)

        # Add all inequality constraints
        iqcons = OrderedDict((key, con) for (key, con) in iteritems(con_meta)
                             if con['equals'] is None)
        for name, meta in iteritems(iqcons):
            size = meta['size']

            # Bounds - double sided is supported
            lower = meta['lower']
            upper = meta['upper']

            if fwd:
                wrt = [v for v in indep_list if name in relevant[v]]
            else:
                rels = relevant[name]
                wrt = [v for v in indep_list if v in rels]

            if meta['linear']:
                jac = {w: _lin_jacs[name][w] for w in wrt}
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     linear=True,
                                     wrt=wrt,
                                     jac=jac)
            else:
                if name in self._res_jacs:
                    jac = self._res_jacs[name]
                else:
                    jac = None
                opt_prob.addConGroup(name,
                                     size,
                                     upper=upper,
                                     lower=lower,
                                     wrt=wrt,
                                     jac=jac)
                self._quantities.append(name)

        # Instantiate the requested optimizer
        optimizer = self.options['optimizer']
        try:
            _tmp = __import__('pyoptsparse', globals(), locals(), [optimizer],
                              0)
            opt = getattr(_tmp, optimizer)()

        except ImportError:
            msg = "Optimizer %s is not available in this installation." % optimizer
            raise ImportError(msg)

        # Set optimization options
        for option, value in self.opt_settings.items():
            opt.setOption(option, value)

        # Execute the optimization problem
        if self.options['gradient method'] == 'pyopt_fd':

            # Use pyOpt's internal finite difference
            # TODO: Need to get this from OpenMDAO
            # fd_step = problem.root.deriv_options['step_size']
            fd_step = 1e-6
            sol = opt(opt_prob,
                      sens='FD',
                      sensStep=fd_step,
                      storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        elif self.options['gradient method'] == 'snopt_fd':
            if self.options['optimizer'] == 'SNOPT':

                # Use SNOPT's internal finite difference
                # TODO: Need to get this from OpenMDAO
                # fd_step = problem.root.deriv_options['step_size']
                fd_step = 1e-6
                sol = opt(opt_prob,
                          sens=None,
                          sensStep=fd_step,
                          storeHistory=self.hist_file,
                          hotStart=self.hotstart_file)

            else:
                msg = "SNOPT's internal finite difference can only be used with SNOPT"
                raise Exception(msg)
        else:

            # Use OpenMDAO's differentiator for the gradient
            sol = opt(opt_prob,
                      sens=self._gradfunc,
                      storeHistory=self.hist_file,
                      hotStart=self.hotstart_file)

        # Print results
        if self.options['print_results']:
            print(sol)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        for name in indep_list:
            val = dv_dict[name]
            self.set_design_var(name, val)

        with RecordingDebugging(self.options['optimizer'], self.iter_count,
                                self) as rec:
            model._solve_nonlinear()
            rec.abs = 0.0
            rec.rel = 0.0
        self.iter_count += 1

        # Save the most recent solution.
        self.pyopt_solution = sol
        try:
            exit_status = sol.optInform['value']
            self.fail = False

            # These are various failed statuses.
            if exit_status > 2:
                self.fail = True

        except KeyError:
            # optimizers other than pySNOPT may not populate this dict
            pass

        return self.fail