Esempio n. 1
0
    def solve(self, params, unknowns, resids, system, metadata=None):
        """ Executes each item in the system hierarchy sequentially.

        Args
        ----
        params : `VecWrapper`
            `VecWrapper` containing parameters. (p)

        unknowns : `VecWrapper`
            `VecWrapper` containing outputs and states. (u)

        resids : `VecWrapper`
            `VecWrapper` containing residuals. (r)

        system : `System`
            Parent `System` object.
        """

        self.iter_count += 1
        # Metadata setup
        local_meta = create_local_meta(metadata, system.name)
        system.ln_solver.local_meta = local_meta
        update_local_meta(local_meta, (self.iter_count,))

        system.children_solve_nonlinear(local_meta)

        for recorder in self.recorders:
            recorder.raw_record(system.params, system.unknowns, system.resids, local_meta)
Esempio n. 2
0
    def run(self, problem):
        """ Runs the driver. This function should be overriden when inheriting.

        Args
        ----
        problem : `Problem`
            Our parent `Problem`.
        """
        system = problem.root

        # Metadata Setup
        self.iter_count += 1
        metadata = create_local_meta(None, 'Driver')
        system.ln_solver.local_meta = metadata
        update_local_meta(metadata, (self.iter_count,))

        # Solve the system once and record results.
        system.solve_nonlinear(metadata=metadata)
        for recorder in self.recorders:
            recorder.raw_record(system.params, system.unknowns, system.resids, metadata)
Esempio n. 3
0
    def run(self, problem):
        """pyOpt execution. Note that pyOpt controls the execution, and the
        individual optimizers (i.e., SNOPT) control the iteration.

        Args
        ----
        problem : `Problem`
            Our parent `Problem`.
        """

        self.pyopt_solution = None
        rel = problem.root._relevance

        # Metadata Setup
        self.metadata = create_local_meta(None, self.options['optimizer'])
        self.iter_count = 0
        update_local_meta(self.metadata, (self.iter_count,))

        # Initial Run
        problem.root.solve_nonlinear(metadata=self.metadata)

        opt_prob = Optimization(self.options['title'], self.objfunc)

        # Add all parameters
        param_meta = self.get_param_metadata()
        param_list = list(param_meta.keys())
        param_vals = self.get_params()
        for name, meta in param_meta.items():
            opt_prob.addVarGroup(name, meta['size'], type='c',
                                 value=param_vals[name],
                                 lower=meta['low'], upper=meta['high'])

        # Add all objectives
        objs = self.get_objectives()
        self.quantities = list(objs.keys())
        for name in objs:
            opt_prob.addObj(name)

        # Calculate and save gradient for any linear constraints.
        lcons = self.get_constraints(lintype='linear').values()
        if len(lcons) > 0:
            self.lin_jacs = problem.calc_gradient(param_list, lcons,
                                                  return_format='dict')
            #print("Linear Gradient")
            #print(self.lin_jacs)

        # Add all equality constraints
        econs = self.get_constraints(ctype='eq', lintype='nonlinear')
        con_meta = self.get_constraint_metadata()
        self.quantities += list(econs.keys())
        for name in econs:
            size = con_meta[name]['size']
            lower = np.zeros((size))
            upper = np.zeros((size))

            # Sparsify Jacobian via relevance
            wrt = rel.relevant[name].intersection(param_list)

            if con_meta[name]['linear'] is True:
                opt_prob.addConGroup(name, size, lower=lower, upper=upper,
                                     linear=True, wrt=wrt,
                                     jac=self.lin_jacs[name])
            else:
                opt_prob.addConGroup(name, size, lower=lower, upper=upper,
                                     wrt=wrt)

        # Add all inequality constraints
        incons = self.get_constraints(ctype='ineq', lintype='nonlinear')
        self.quantities += list(incons.keys())
        for name in incons:
            size = con_meta[name]['size']
            upper = np.zeros((size))

            # Sparsify Jacobian via relevance
            wrt = rel.relevant[name].intersection(param_list)

            if con_meta[name]['linear'] is True:
                opt_prob.addConGroup(name, size, upper=upper, linear=True,
                                     wrt=wrt, jac=self.lin_jacs[name])
            else:
                opt_prob.addConGroup(name, size, upper=upper, wrt=wrt)

        # TODO: Support double-sided constraints in openMDAO
        # Add all double_sided constraints
        #for name, con in self.get_2sided_constraints().items():
            #size = con_meta[name]['size']
            #upper = con.high * np.ones((size))
            #lower = con.low * np.ones((size))
            #name = '%s.out0' % con.pcomp_name
            #if con.linear is True:
                #opt_prob.addConGroup(name,
                #size, upper=upper, lower=lower,
                                     #linear=True, wrt=param_list,
                                     #jac=self.lin_jacs[name])
            #else:
                #opt_prob.addConGroup(name,
                #                     size, upper=upper, lower=lower)

        # Instantiate the requested optimizer
        optimizer = self.options['optimizer']
        try:
            exec('from pyoptsparse import %s' % optimizer)
        except ImportError:
            msg = "Optimizer %s is not available in this installation." % \
                   optimizer
            raise ImportError(msg)

        optname = vars()[optimizer]
        opt = optname()

        #Set optimization options
        for option, value in self.opt_settings.items():
            opt.setOption(option, value)

        self._problem = problem

        # Execute the optimization problem
        if self.options['pyopt_diff'] is True:
            # Use pyOpt's internal finite difference
            fd_step = problem.root.fd_options['step_size']
            sol = opt(opt_prob, sens='FD', sensStep=fd_step)
        else:
            # Use OpenMDAO's differentiator for the gradient
            sol = opt(opt_prob, sens=self.gradfunc)

        self._problem = None

        # Print results
        if self.options['print_results'] is True:
            print(sol)

        # Pull optimal parameters back into framework and re-run, so that
        # framework is left in the right final state
        dv_dict = sol.getDVs()
        for name in self.get_params():
            val = dv_dict[name]
            self.set_param(name, val)

        self.root.solve_nonlinear(metadata=self.metadata)

        # Save the most recent solution.
        self.pyopt_solution = sol
        try:
            exit_status = sol.optInform['value']
            self.exit_flag = 1
            if exit_status > 2: # bad
                self.exit_flag = 0
        except KeyError: #nothing is here, so something bad happened!
            self.exit_flag = 0
Esempio n. 4
0
    def solve(self, params, unknowns, resids, system, metadata=None):
        """ Solves the system using a Netwon's Method.

        Args
        ----
        params : `VecWrapper`
            `VecWrapper` containing parameters. (p)

        unknowns : `VecWrapper`
            `VecWrapper` containing outputs and states. (u)

        resids : `VecWrapper`
            `VecWrapper` containing residuals. (r)

        system : `System`
            Parent `System` object.

        metadata : dict, optional
            Dictionary containing execution metadata (e.g. iteration coordinate).
        """

        atol = self.options['atol']
        rtol = self.options['rtol']
        maxiter = self.options['maxiter']
        ls_atol = self.options['ls_atol']
        ls_rtol = self.options['ls_rtol']
        ls_maxiter = self.options['ls_maxiter']
        alpha = self.options['alpha']

        # Metadata setup
        self.iter_count = 0
        ls_itercount = 0
        local_meta = create_local_meta(metadata, system.name)
        system.ln_solver.local_meta = local_meta
        update_local_meta(local_meta, (self.iter_count, ls_itercount))

        # Perform an initial run to propagate srcs to targets.
        system.children_solve_nonlinear(local_meta)
        system.apply_nonlinear(params, unknowns, resids)

        f_norm = resids.norm()
        f_norm0 = f_norm

        if self.options['iprint'] > 0:
            self.print_norm('NEWTON', local_meta, 0, f_norm, f_norm0)

        arg = system.drmat[None]
        result = system.dumat[None]

        alpha_base = alpha
        while self.iter_count < maxiter and f_norm > atol and \
                f_norm/f_norm0 > rtol:

            # Linearize Model
            system.jacobian(params, unknowns, resids)

            # Calculate direction to take step
            arg.vec[:] = resids.vec[:]
            system.solve_linear(system.dumat, system.drmat, [None], mode='fwd')

            unknowns.vec[:] += alpha*result.vec[:]

            # Metadata update
            self.iter_count += 1
            ls_itercount = 0
            update_local_meta(local_meta, (self.iter_count, ls_itercount))

            # Just evaluate the model with the new points
            system.children_solve_nonlinear(local_meta)
            system.apply_nonlinear(params, unknowns, resids, local_meta)

            for recorder in self.recorders:
                recorder.raw_record(params, unknowns, resids, local_meta)

            f_norm = resids.norm()
            if self.options['iprint'] > 0:
                self.print_norm('NEWTON', local_meta, self.iter_count, f_norm, f_norm0)

            # Backtracking Line Search
            while ls_itercount < ls_maxiter and \
                    f_norm > ls_atol and \
                    f_norm/f_norm0 > ls_rtol:

                alpha *= 0.5
                unknowns.vec[:] -= alpha*result.vec[:]
                ls_itercount += 1

                # Metadata update
                update_local_meta(local_meta, (self.iter_count, ls_itercount))

                # Just evaluate the model with the new points

                system.children_solve_nonlinear(local_meta)
                system.apply_nonlinear(params, unknowns, resids, local_meta)

                for recorder in self.recorders:
                    recorder.raw_record(params, unknowns, resids, local_meta)

                f_norm = resids.norm()
                if self.options['iprint'] > 1:
                    self.print_norm('BK_TKG', local_meta, ls_itercount, f_norm,
                                    f_norm/f_norm0, indent=1, solver='LS')

            # Reset backtracking
            alpha = alpha_base

            for recorder in self.recorders:
                recorder.raw_record(params, unknowns, resids, local_meta)

        # Need to make sure the whole workflow is executed at the final
        # point, not just evaluated.
        #self.iter_count += 1
        #update_local_meta(local_meta, (self.iter_count, 0))
        #system.children_solve_nonlinear(local_meta)

        if self.options['iprint'] > 0:
            self.print_norm('NEWTON', local_meta, self.iter_count, f_norm,
                            f_norm0, msg='Converged')
Esempio n. 5
0
    def solve(self, params, unknowns, resids, system, metadata=None):
        """ Solves the system using Gauss Seidel.

        Args
        ----
        params : `VecWrapper`
            `VecWrapper` containing parameters. (p)

        unknowns : `VecWrapper`
            `VecWrapper` containing outputs and states. (u)

        resids : `VecWrapper`
            `VecWrapper` containing residuals. (r)

        system : `System`
            Parent `System` object.

        metadata : dict, optional
            Dictionary containing execution metadata (e.g. iteration coordinate).
        """

        atol = self.options['atol']
        rtol = self.options['rtol']
        maxiter = self.options['maxiter']
        iprint = self.options['iprint']

        # Initial run
        self.iter_count = 1

        # Metadata setup
        local_meta = create_local_meta(metadata, system.name)
        system.ln_solver.local_meta = local_meta
        update_local_meta(local_meta, (self.iter_count,))

        # Initial Solve
        system.children_solve_nonlinear(local_meta)

        for recorder in self.recorders:
            recorder.raw_record(params, unknowns, resids, local_meta)

        # Bail early if the user wants to.
        if maxiter == 1:
            return

        resids = system.resids

        # Evaluate Norm
        system.apply_nonlinear(params, unknowns, resids)
        normval = resids.norm()
        basenorm = normval if normval > atol else 1.0

        if self.options['iprint'] > 0:
            self.print_norm('NLN_GS', local_meta, 0, normval, basenorm)

        while self.iter_count < maxiter and \
                normval > atol and \
                normval/basenorm > rtol:

            # Metadata update
            self.iter_count += 1
            update_local_meta(local_meta, (self.iter_count,))

            # Runs an iteration
            system.children_solve_nonlinear(local_meta)
            for recorder in self.recorders:
                recorder.raw_record(params, unknowns, resids, local_meta)

            # Evaluate Norm
            system.apply_nonlinear(params, unknowns, resids)
            normval = resids.norm()

            if self.options['iprint'] > 0:
                self.print_norm('NLN_GS', local_meta, self.iter_count, normval,
                                basenorm)

        if self.options['iprint'] > 0:
            self.print_norm('NLN_GS', local_meta, self.iter_count, normval,
                            basenorm, msg='Converged')
Esempio n. 6
0
    def run(self, problem):
        """Optimize the problem using your choice of Scipy optimizer.

        Args
        ----
        problem : `Problem`
            Our parent `Problem`.
        """

        # Metadata Setup
        opt = self.options['optimizer']
        self.metadata = create_local_meta(None, opt)
        self.iter_count = 0
        update_local_meta(self.metadata, (self.iter_count,))

        # Initial Run
        problem.root.solve_nonlinear(metadata=self.metadata)

        pmeta = self.get_param_metadata()
        self.objs = list(self.get_objectives().keys())
        con_meta = self.get_constraint_metadata()
        self.cons = list(con_meta.keys())

        self.opt_settings['maxiter'] = self.options['maxiter']
        self.opt_settings['disp'] = self.options['disp']

        # Size Problem
        nparam = 0
        for param in pmeta.values():
            nparam += param['size']
        x_init = np.zeros(nparam)

        # Initial Parameters
        i = 0
        use_bounds = (opt in _bounds_optimizers)
        if use_bounds:
            bounds = []
        else:
            bounds = None

        for name, val in self.get_params().items():
            size = pmeta[name]['size']
            x_init[i:i+size] = val
            i += size

            # Bounds if our optimizer supports them
            if use_bounds:
                meta_low = pmeta[name]['low']
                meta_high = pmeta[name]['high']
                for j in range(0, size):

                    if isinstance(meta_low, np.ndarray):
                        p_low = meta_low[j]
                    else:
                        p_low = meta_low

                    if isinstance(meta_high, np.ndarray):
                        p_high = meta_high[j]
                    else:
                        p_high = meta_high

                    bounds.append((p_low, p_high))

        # Constraints
        constraints = []
        i = 0
        if opt in _constraint_optimizers:
            for name, meta in con_meta.items():
                size = meta['size']
                for j in range(0, size):
                    con_dict = {}
                    con_dict['type'] = meta['ctype']
                    con_dict['fun'] = self.confunc
                    if opt in _constraint_grad_optimizers:
                        con_dict['jac'] = self.congradfunc
                    con_dict['args'] = [name, j]
                    constraints.append(con_dict)
                self.con_idx[name] = i
                i += size

        # Provide gradients for optimizers that support it
        if opt in _gradient_optimizers:
            jac = self.gradfunc
        else:
            jac = None

        # optimize
        self._problem = problem
        result = minimize(self.objfunc, x_init,
                          #args=(),
                          method=opt,
                          jac=jac,
                          #hess=None,
                          #hessp=None,
                          bounds=bounds,
                          constraints=constraints,
                          tol=self.options['tol'],
                          #callback=None,
                          options=self.opt_settings)

        self._problem = None
        self.result = result

        print('Optimization Complete')
        print('-'*35)