示例#1
0
    def optimize_openopt(self, omega_min, lower_bounds, upper_bounds, max_power, fluid_type):

        def optimize_force_resolution(x):
            c = cantilever_divingboard(self.freq_min, self.freq_max, *x)
            return c.force_resolution()*1e12
        
        # We always want power dissipation to be less than max_power
        def enforce_max_power(x):
            c = cantilever_divingboard(self.freq_min, self.freq_max, *x)
            return c.power_dissipation() - max_power

        # We always want the natural frequency to be greater than the minimum allowed
        def enforce_minimum_natural_frequency(x):
            c = cantilever_divingboard(self.freq_min, self.freq_max, *x)

            if fluid_type == 'vacuum':
                return omega_min - c.omega_vacuum_hz()
            elif fluid_type == 'water':
                return omega_min - c.omega_damped_hz()
            else:
                return -c.omega_vacuum_hz()  
        
        # Our initial guess is based upon the existing cantilever dimensions
        initial_guess = (self.l, self.w, self.t, 
                            self.l_pr_ratio, self.w_pr_ratio, self.w_gap_ratio, self.t_pr_ratio, 
                            self.V_bias, self.N)
                        
        # Setup the solver
        p = NLP(optimize_force_resolution, initial_guess)
        p.lb = lower_bounds
        p.ub = upper_bounds
        p.scale=(1e-6, 1e-6, 1e-9, 1, 1, 1, 1, 1e2, 1e19)
                 
        # c(x) <= 0 constraints (use these if the solver supports NL constraints)
        # p.c = (enforce_minimum_natural_frequency,
        #        enforce_max_power)
        
        
        
        # Solving parameters
        p.ftol = 1e-12 # one of stop criteria, default 1e-6
        p.xtol = 1e-12 # one of stop criteria, default 1e-6
        p.gradtol = 1e-7
        p.contol = 1e-7
        
        # Set limits of how long to solve for
        p.maxIter = 1e4
        p.maxFunEvals = 1e8
        p.maxTime = 1e4
        
        # p.plot = 1
        # p.debug=1
        
        r = p.solve('algencan')
        
        (self.l, self.w, self.t, 
            self.l_pr_ratio, self.w_pr_ratio, self.w_gap_ratio, self.t_pr_ratio, 
            self.V_bias, self.N) = r.xf
        
示例#2
0
    def max_log_marginal_likelihood(self, hyp_initial_guess, maxiter=1,
            optimization_algorithm="scipy_cg", ftol=1.0e-3, fixedHypers=None,
            use_gradient=False, logscale=False):
        """
        Set up the optimization problem in order to maximize
        the log_marginal_likelihood.

        :Parameters:

          parametric_model : Classifier
            the actual parameteric model to be optimized.

          hyp_initial_guess : numpy.ndarray
            set of hyperparameters' initial values where to start
            optimization.

          optimization_algorithm : string
            actual name of the optimization algorithm. See
            http://scipy.org/scipy/scikits/wiki/NLP
            for a comprehensive/updated list of available NLP solvers.
            (Defaults to 'ralg')

          ftol : float
            threshold for the stopping criterion of the solver,
            which is mapped in OpenOpt NLP.ftol
            (Defaults to 1.0e-3)

          fixedHypers : numpy.ndarray (boolean array)
            boolean vector of the same size of hyp_initial_guess;
            'False' means that the corresponding hyperparameter must
            be kept fixed (so not optimized).
            (Defaults to None, which during means all True)

        NOTE: the maximization of log_marginal_likelihood is a non-linear
        optimization problem (NLP). This fact is confirmed by Dmitrey,
        author of OpenOpt.
        """
        self.problem = None
        self.use_gradient = use_gradient
        self.logscale = logscale # use log-scale on hyperparameters to enhance numerical stability
        self.optimization_algorithm = optimization_algorithm
        self.hyp_initial_guess = N.array(hyp_initial_guess)
        self.hyp_initial_guess_log = N.log(self.hyp_initial_guess)
        if fixedHypers is None:
            fixedHypers = N.zeros(self.hyp_initial_guess.shape[0],dtype=bool)
            pass
        self.freeHypers = -fixedHypers
        if self.logscale:
            self.hyp_running_guess = self.hyp_initial_guess_log.copy()
        else:
            self.hyp_running_guess = self.hyp_initial_guess.copy()
            pass
        self.f_last_x = None

        def f(x):
            """
            Wrapper to the log_marginal_likelihood to be
            maximized.
            """
            # XXX EO: since some OpenOpt NLP solvers does not
            # implement lower bounds the hyperparameters bounds are
            # implemented inside PyMVPA: (see dmitrey's post on
            # [SciPy-user] 20080628).
            #
            # XXX EO: OpenOpt does not implement optimization of a
            # subset of the hyperparameters so it is implemented here.
            #
            # XXX EO: OpenOpt does not implement logrithmic scale of
            # the hyperparameters (to enhance numerical stability), so
            # it is implemented here.
            self.f_last_x = x.copy()
            self.hyp_running_guess[self.freeHypers] = x
            # REMOVE print "guess:",self.hyp_running_guess,x
            try:
                if self.logscale:
                    self.parametric_model.set_hyperparameters(N.exp(self.hyp_running_guess))
                else:
                    self.parametric_model.set_hyperparameters(self.hyp_running_guess)
                    pass
            except InvalidHyperparameterError:
                if __debug__: debug("MOD_SEL","WARNING: invalid hyperparameters!")
                return -N.inf
            try:
                self.parametric_model.train(self.dataset)
            except (N.linalg.linalg.LinAlgError, SL.basic.LinAlgError, ValueError):
                # Note that ValueError could be raised when Cholesky gets Inf or Nan.
                if __debug__: debug("MOD_SEL", "WARNING: Cholesky failed! Invalid hyperparameters!")
                return -N.inf
            log_marginal_likelihood = self.parametric_model.compute_log_marginal_likelihood()
            # REMOVE print log_marginal_likelihood
            return log_marginal_likelihood

        def df(x):
            """
            Proxy to the log_marginal_likelihood first
            derivative. Necessary for OpenOpt when using derivatives.
            """
            self.hyp_running_guess[self.freeHypers] = x
            # REMOVE print "df guess:",self.hyp_running_guess,x
            # XXX EO: Most of the following lines can be skipped if
            # df() is computed just after f() with the same
            # hyperparameters. The partial results obtained during f()
            # are what is needed for df(). For now, in order to avoid
            # bugs difficult to trace, we keep this redunundancy. A
            # deep check with how OpenOpt works or using memoization
            # should solve this issue.
            try:
                if self.logscale:
                    self.parametric_model.set_hyperparameters(N.exp(self.hyp_running_guess))
                else:
                    self.parametric_model.set_hyperparameters(self.hyp_running_guess)
                    pass
            except InvalidHyperparameterError:
                if __debug__: debug("MOD_SEL", "WARNING: invalid hyperparameters!")
                return -N.inf
            # Check if it is possible to avoid useless computations
            # already done in f(). According to tests and information
            # collected from OpenOpt people, it is sufficiently
            # unexpected that the following test succeed:
            if N.any(x!=self.f_last_x):
                if __debug__: debug("MOD_SEL","UNEXPECTED: recomputing train+log_marginal_likelihood.")
                try:
                    self.parametric_model.train(self.dataset)
                except (N.linalg.linalg.LinAlgError, SL.basic.LinAlgError, ValueError):
                    if __debug__: debug("MOD_SEL", "WARNING: Cholesky failed! Invalid hyperparameters!")
                    # XXX EO: which value for the gradient to return to
                    # OpenOpt when hyperparameters are wrong?
                    return N.zeros(x.size)
                log_marginal_likelihood = self.parametric_model.compute_log_marginal_likelihood() # recompute what's needed (to be safe) REMOVE IN FUTURE!
                pass
            if self.logscale:
                gradient_log_marginal_likelihood = self.parametric_model.compute_gradient_log_marginal_likelihood_logscale()
            else:
                gradient_log_marginal_likelihood = self.parametric_model.compute_gradient_log_marginal_likelihood()
                pass
            # REMOVE print "grad:",gradient_log_marginal_likelihood
            return gradient_log_marginal_likelihood[self.freeHypers]


        if self.logscale:
            # vector of hyperparameters' values where to start the search
            x0 = self.hyp_initial_guess_log[self.freeHypers]
        else:
            x0 = self.hyp_initial_guess[self.freeHypers]
            pass
        self.contol = 1.0e-20 # Constraint tolerance level
        # XXX EO: is it necessary to use contol when self.logscale is
        # True and there is no lb? Ask dmitrey.
        if self.use_gradient:
            # actual instance of the OpenOpt non-linear problem
            self.problem = NLP(f, x0, df=df, contol=self.contol, goal='maximum')
        else:
            self.problem = NLP(f, x0, contol=self.contol, goal='maximum')
            pass
        self.problem.name = "Max LogMargLikelihood"
        if not self.logscale:
             # set lower bound for hyperparameters: avoid negative
             # hyperparameters. Note: problem.n is the size of
             # hyperparameters' vector
            self.problem.lb = N.zeros(self.problem.n)+self.contol
            pass
        # max number of iterations for the optimizer.
        self.problem.maxiter = maxiter
        # check whether the derivative of log_marginal_likelihood converged to
        # zero before ending optimization
        self.problem.checkdf = True
         # set increment of log_marginal_likelihood under which the optimizer stops
        self.problem.ftol = ftol
        self.problem.iprint = _openopt_debug()
        return self.problem