예제 #1
0
    def findMin(self,inffunc, meanfunc, covfunc, likfunc, x, y):
        hypInArray = self.convert_to_array(meanfunc, covfunc, likfunc)
        opt = bfgs(self.nlml, hypInArray, self.dnlml, (inffunc, meanfunc, covfunc, likfunc, x, y), maxiter=100, disp=False, full_output=True)
        optimalHyp = opt[0]
        funcValue  = opt[1]
        warnFlag   = opt[6]
        if warnFlag == 1:
            print "Maximum number of iterations exceeded."
        elif warnFlag ==  2:
            print "Gradient and/or function calls not changing."

        if self.searchConfig:
            searchRange = self.searchConfig.meanRange + self.searchConfig.covRange + self.searchConfig.likRange 
            if not (self.searchConfig.max_trails or self.searchConfig.min_threshold):
                raise Exception('Specify at least one of the stop conditions')
            while True:
                self.trailsCounter += 1                 # increase counter
                for i in xrange(hypInArray.shape[0]):   # random init of hyp
                    hypInArray[i]= np.random.uniform(low=searchRange[i][0], high=searchRange[i][1])
                # value this time is better than optiaml min value
                thisopt = bfgs(self.nlml, hypInArray, self.dnlml, (inffunc, meanfunc, covfunc, likfunc, x, y), maxiter=100, disp=False, full_output=True)
                if thisopt[1] < funcValue:
                    funcValue  = thisopt[1]
                    optimalHyp = thisopt[0]
                if self.searchConfig.max_trails and self.trailsCounter > self.searchConfig.max_trails:         # if exceed max_trails
                    return optimalHyp, funcValue
                if self.searchConfig.min_threshold and funcValue <= self.searchConfig.min_threshold:                           # reach provided mininal
                    return optimalHyp, funcValue 

        return optimalHyp, funcValue
예제 #2
0
파일: opt.py 프로젝트: mathDR/gpts
    def findMin(self, x, y):
        meanfunc = self.model.meanfunc
        covfunc = self.model.covfunc
        likfunc = self.model.likfunc
        inffunc = self.model.inffunc
        hypInArray = self._convert_to_array()

        try:
            opt = bfgs(self._nlml, hypInArray, self._dnlml, maxiter=100, disp=False, full_output=True)
            optimalHyp = deepcopy(opt[0])
            funcValue  = opt[1]
            warnFlag   = opt[6]
            if warnFlag == 1:
                print "Maximum number of iterations exceeded."
            elif warnFlag ==  2:
                print "Gradient and/or function calls not changing."
        except:
            self.errorCounter += 1
            if not self.searchConfig:
                raise Exception("Can not use BFGS. Try other hyparameters")
        self.trailsCounter += 1


        if self.searchConfig:
            searchRange = self.searchConfig.meanRange + self.searchConfig.covRange + self.searchConfig.likRange 
            if not (self.searchConfig.num_restarts or self.searchConfig.min_threshold):
                raise Exception('Specify at least one of the stop conditions')
            while True:
                self.trailsCounter += 1                 # increase counter
                for i in xrange(hypInArray.shape[0]):   # random init of hyp
                    hypInArray[i]= np.random.uniform(low=searchRange[i][0], high=searchRange[i][1])
                # value this time is better than optiaml min value
                try:
                    thisopt = bfgs(self._nlml, hypInArray, self._dnlml, maxiter=100, disp=False, full_output=True)
                    if thisopt[1] < funcValue:
                        funcValue  = thisopt[1]
                        optimalHyp = thisopt[0]
                except:
                    self.errorCounter += 1
                if self.searchConfig.num_restarts and self.errorCounter > self.searchConfig.num_restarts/2:
                    print "[BFGS] %d out of %d trails failed during optimization" % (self.errorCounter, self.trailsCounter)
                    raise Exception("Over half of the trails failed for BFGS")
                if self.searchConfig.num_restarts and self.trailsCounter > self.searchConfig.num_restarts-1:         # if exceed num_restarts
                    print "[BFGS] %d out of %d trails failed during optimization" % (self.errorCounter, self.trailsCounter)
                    return optimalHyp, funcValue
                if self.searchConfig.min_threshold and funcValue <= self.searchConfig.min_threshold:           # reach provided mininal
                    print "[BFGS] %d out of %d trails failed during optimization" % (self.errorCounter, self.trailsCounter)
                    return optimalHyp, funcValue 

        return optimalHyp, funcValue
예제 #3
0
def min_wrapper(hyp, F, Flag, *varargin):
    # Utilize scipy.optimize functions to minimize the negative log marginal liklihood.  This is REALLY inefficient!
    x = convert_to_array(hyp)

    if Flag == 'CG':
        aa = cg(nlml, x, dnlml, (F,hyp,varargin), maxiter=100, disp=False, full_output=True)
        x = aa[0]; fx = aa[1]; funcCalls = aa[2]; gradcalls = aa[3]
        if aa[4] == 1:
            print "Maximum number of iterations exceeded."
        elif aa[4] ==  2:
            print "Gradient and/or function calls not changing."
        gvals = dnlml(x,F,hyp,varargin)
        return convert_to_class(x,hyp), fx, gvals, funcCalls

    elif Flag == 'BFGS':
        # Use BFGS
        aa = bfgs(nlml, x, dnlml, (F,hyp,varargin), maxiter=100, disp=True, full_output=True)
        x = aa[0]; fvals = aa[1]; gvals = aa[2]; Bopt = aa[3]; funcCalls = aa[4]; gradcalls = aa[5]
        if aa[6] == 1:
            print "Maximum number of iterations exceeded."
        elif aa[6] ==  2:
            print "Gradient and/or function calls not changing."
        return convert_to_class(x,hyp), fvals, gvals, funcCalls

    else:
        raise Exception('Incorrect usage of optimization flag in min_wrapper')
예제 #4
0
 def train(self,eps,data,nIter = None):
     if nIter is None:
         nIter = 15000
     if self.layer == 'bottom':
         states = np.tile(data,(1,2))
     else:
         states = data
     params = np.concatenate((self.weights.ravel(),self.biasv,self.biash))
     params,f,d = bfgs(flow,params,fprime=gradFlow,args=(eps,states,self.n_visible,self.n_hidden),iprint=0,maxiter = nIter)
     num = self.n_visible*self.n_hidden
     self.weights = params[:num].reshape(self.n_visible,self.n_hidden)
     self.biasv = params[num:num+self.n_visible]
     num += self.n_visible
     self.biash = params[num:]
     self.constrainWeights()
     if self.layer == 'bottom':
         weights = self.weights[:int(self.n_visible/2)]
         biasv = self.biasv[:int(self.n_visible/2)]
         biash = self.biash
     elif self.layer == 'middle':
         weights = self.weights/2.
         biasv = self.biasv
         biash = self.biash
     elif self.layer == 'top':
         weights = self.weights[:,:int(self.n_hidden/2)]
         biasv = self.biasv
         biash = self.biash[:int(self.n_hidden/2)]
     else:
         raise ValueError
     return (weights,biasv,biash)
예제 #5
0
 def train(self, eps, data, nIter=None):
     if nIter is None:
         nIter = 15000
     if self.layer == 'bottom':
         states = np.tile(data, (1, 2))
     else:
         states = data
     params = np.concatenate((self.weights.ravel(), self.biasv, self.biash))
     params, f, d = bfgs(flow,
                         params,
                         fprime=gradFlow,
                         args=(eps, states, self.n_visible, self.n_hidden),
                         iprint=0,
                         maxiter=nIter)
     num = self.n_visible * self.n_hidden
     self.weights = params[:num].reshape(self.n_visible, self.n_hidden)
     self.biasv = params[num:num + self.n_visible]
     num += self.n_visible
     self.biash = params[num:]
     self.constrainWeights()
     if self.layer == 'bottom':
         weights = self.weights[:int(self.n_visible / 2)]
         biasv = self.biasv[:int(self.n_visible / 2)]
         biash = self.biash
     elif self.layer == 'middle':
         weights = self.weights / 2.
         biasv = self.biasv
         biash = self.biash
     elif self.layer == 'top':
         weights = self.weights[:, :int(self.n_hidden / 2)]
         biasv = self.biasv
         biash = self.biash[:int(self.n_hidden / 2)]
     else:
         raise ValueError
     return (weights, biasv, biash)
예제 #6
0
def min_wrapper(hyp, F, Flag, *varargin):
    # Utilize scipy.optimize functions to minimize the negative log marginal liklihood.  This is REALLY inefficient!
    x = convert_to_array(hyp)  # Converts the hyperparameter class to an array

    if Flag == 'CG':
        aa = cg(nlml,
                x,
                dnlml, (F, hyp, varargin),
                maxiter=100,
                disp=True,
                full_output=True)
        x = aa[0]
        fx = aa[1]
        funcCalls = aa[2]
        gradcalls = aa[3]
        if aa[4] == 1:
            print "Maximum number of iterations exceeded."
        elif aa[4] == 2:
            print "Gradient and/or function calls not changing."
        gvals = dnlml(x, F, hyp, varargin)
        return convert_to_class(x, hyp), fx, gvals, funcCalls

    elif Flag == 'BFGS':
        # Use BFGS
        aa = bfgs(nlml,
                  x,
                  dnlml, (F, hyp, varargin),
                  maxiter=100,
                  disp=False,
                  full_output=True)
        x = aa[0]
        fvals = aa[1]
        gvals = aa[2]
        Bopt = aa[3]
        funcCalls = aa[4]
        gradcalls = aa[5]
        if aa[6] == 1:
            print "Maximum number of iterations exceeded."
        elif aa[6] == 2:
            print "Gradient and/or function calls not changing."
        return convert_to_class(x, hyp), fvals, gvals, funcCalls

    elif Flag == 'SCG':
        # Use SCG
        aa = scg(x, nlml, dnlml, (F, hyp, varargin), niters=40)
        x = aa[0]
        fvals = aa[1]
        gvals = dnlml(x, F, hyp, varargin)
        return convert_to_class(x, hyp), fvals, gvals

    else:
        raise Exception('Incorrect usage of optimization flag in min_wrapper')
예제 #7
0
파일: PMF_self.py 프로젝트: neerajg/sdap
def mf_solver(rows, cols, vals, rank, reg, M, N, Su,Sv,Su_root,Sv_root):
    w0 = np.random.randn((M+N)*rank)
    Su_root = sp.coo_matrix((Su_root,(range(M),range(M))),shape=(M,M))
    Sv_root = sp.coo_matrix((Sv_root,(range(N),range(N))),shape=(N,N))
    Su = sp.coo_matrix((Su,(range(M),range(M))),shape=(M,M))
    Sv = sp.coo_matrix((Sv,(range(N),range(N))),shape=(N,N))
    
    w = bfgs(func=f_and_g, x0=w0, args=(reg, rows, cols, vals, M, N, rank ,Su,Sv,Su_root,Sv_root),maxfun=500)[0]
    W = w.reshape((M+N, rank))
    U = W[:M]
    V = W[M:]
    
    return U, V
예제 #8
0
파일: gp.py 프로젝트: chaitan3/GPEXP
    def chooseParams(self, paramLowerBounds, paramUpperBounds, startValues, costFunction,maxiter=40, useLastParams=True):
        
        if NLOPT is True:
            local_opt = nlopt.opt(nlopt.LN_COBYLA, len(startValues))

            local_opt.set_xtol_rel(1e-3)
            local_opt.set_ftol_rel(1e-3)
            local_opt.set_ftol_abs(1e-3)
            local_opt.set_maxtime(10);
            local_opt.set_maxeval(50*len(startValues)); 
               
            local_opt.set_lower_bounds(paramLowerBounds)
            local_opt.set_upper_bounds(paramUpperBounds)

            try:
                local_opt.set_min_objective(costFunction)       
                sol = local_opt.optimize(startValues)
            except nlopt.RoundoffLimited:
                if useLastParams:
                    return costFunction.last_x_value, costFunction.last_f_value
                else:
                    return startValues, None
            return sol, local_opt.last_optimum_value()
        else:
            maxeval = 100
            bounds = zip(paramLowerBounds, paramUpperBounds)
            objFunc = lambda x: costFunction(x,np.empty(0))
            #sol = slsqp(objFunc, np.array(startValues), bounds=bounds,
            #            iter=maxeval)
            

            #print "startValuies ", len(startValues), len(paramLowerBounds)
            objFunc = lambda x : costFunction(x,np.empty(0))
            def const(x):
                good = 1.0
                for ii in xrange(len(x)):
                    if (x[ii] < paramLowerBounds[ii]):
                        return -1.0
                    elif (x[ii] > paramUpperBounds[ii]):
                        return -1.0
                return good

            #sol = cobyla(objFunc, np.array(startValues), cons=(const), maxfun=maxeval)
            sol_bfgs = bfgs(objFunc, np.array(startValues), bounds=bounds, approx_grad=True, factr=1e10, maxfun=maxiter)
            sol = sol_bfgs[0]
            #print "sol ", np.round(sol,4)
            val = objFunc(sol)
            return sol,val
예제 #9
0
def min_wrapper(hyp, F, Flag, *varargin):
    # Utilize scipy.optimize functions, sgc.py, or minimize.py to
    # minimize the negative log marginal liklihood.
    
    x = convert_to_array(hyp)   # convert the hyperparameter class to an array

    if Flag == 'CG':
        aa = cg(nlml, x, dnlml, (F,hyp,varargin), maxiter=100, disp=False, full_output=True)
        x = aa[0]; fopt = aa[1]; funcCalls = aa[2]; gradcalls = aa[3]
        if aa[4] == 1:
            print "Maximum number of iterations exceeded."
        elif aa[4] ==  2:
            print "Gradient and/or function calls not changing."
        gopt = dnlml(x,F,hyp,varargin)
        return convert_to_class(x,hyp), fopt, gopt, funcCalls

    elif Flag == 'BFGS':
        # Use BFGS
        aa = bfgs(nlml, x, dnlml, (F,hyp,varargin), maxiter=100, disp=False, full_output=True)
        x = aa[0]; fopt = aa[1]; gopt = aa[2]; Bopt = aa[3]; funcCalls = aa[4]; gradcalls = aa[5]
        if aa[6] == 1:
            print "Maximum number of iterations exceeded."
        elif aa[6] ==  2:
            print "Gradient and/or function calls not changing."
        if isinstance(fopt, ndarray):
            fopt = fopt[0]
        return convert_to_class(x,hyp), fopt, gopt, funcCalls

    elif Flag == 'SCG':
        # use sgc.py
        aa   = scg(x, nlml, dnlml, (F,hyp,varargin), niters = 100)
        hyp  = convert_to_class(aa[0],hyp)
        fopt = aa[1][-1]
        gopt = dnlml(aa[0],F,hyp,varargin)
        return hyp, fopt, gopt, len(aa[1])

    elif Flag == 'Minimize':
        # use minimize.py
        aa   = run(x, nlml, dnlml, (F,hyp,varargin), maxnumfuneval=-100)
        hyp  = convert_to_class(aa[0],hyp)
        fopt = aa[1][-1]
        gopt = dnlml(aa[0],F,hyp,varargin)
        return hyp, fopt, gopt, len(aa[1])

    else:
        raise Exception('Incorrect usage of optimization flag in min_wrapper')
예제 #10
0
def gp_train(gp, X, y, R=None, w=None, Flag = None):
    ''' gp_train() returns the learnt hyperparameters.
    Following chapter 5.4.1 in Rasmussen and Williams: GPs for ML (2006).
    The original version (MATLAB implementation) of used optimizer minimize.m 
    is copyright (C) 1999 - 2006, Carl Edward Rasmussen.
    The used python versions are in scipy.optimize
    
    Input R and w is needed for XGP regression! '''

    # Build the parameter list that we will optimize
    theta = np.concatenate((gp['meantheta'],gp['covtheta']))
    if Flag == 'CG':
        aa = cg(nlml, theta, dnlml, [gp,X,y,R,w], maxiter=100, disp=False, full_output=True)
        theta = aa[0]; fvals = aa[1]; funcCalls = aa[2]; gradcalls = aa[3]
        gvals = dnlml(theta, gp, X, y, R, w)
        if aa[4] == 1:
            print "Maximum number of iterations exceeded." 
        elif aa[4] ==  2:
            print "Gradient and/or function calls not changing."
        mt = len(gp['meantheta'])
        gp['meantheta'] = theta[:mt]
        gp['covtheta']  = theta[mt:]
        return gp, fvals, gvals, funcCalls
    elif Flag == 'BFGS':
        # Use BFGS
        #aa = bfgs(nlml, theta, dnlml, [gp,X,y,R,w], maxiter=100, disp=False, full_output=True)
        aa = bfgs(nlml, theta, dnlml, [gp,X,y,R,w], maxiter=100, disp=True, full_output=True)
        theta = aa[0]; fvals = aa[1]; gvals = aa[2]; Bopt = aa[3]; funcCalls = aa[4]; gradcalls = aa[5]
        if aa[6] == 1:
            print "Maximum number of iterations exceeded." 
        elif aa[6] ==  2:
            print "Gradient and/or function calls not changing."
        mt = len(gp['meantheta'])
        gp['meantheta'] = theta[:mt]
        gp['covtheta']  = theta[mt:]
        return gp, fvals, gvals, funcCalls
    elif Flag == 'SCG':
        theta, listF = scg.scg(theta, nlml, dnlml, [gp,X,y,R,w], niters = 100)
        mt = len(gp['meantheta'])
        gp['meantheta'] = theta[:mt]
        gp['covtheta']  = theta[mt:]
        return gp, listF 
    else:
        raise Exception("Need to specify a method for optimization in gp_train")
예제 #11
0
    def train(self,inp,iterations,with_bfgs = False,grad_check_freq = None):
        
 
        def rcst(x):
            v = self._rcost(inp,params.roll(x,self.indim,self.hdim))
            print 'rcst says: %s' % str(v)
            return v
 
        def rcstprime(x):
            return self._sparse_ae_cost_unrolled(inp,params.roll(x,self.indim,self.hdim))
 
 
        if with_bfgs:
            x0 = self._netp.unroll()
            mn,val,d = bfgs(rcst,
                            x0,
                            fprime = rcstprime,
                            factr=100,
                            maxfun=800,
                            disp=1)
            print val
            print d['task']
            print d['warnflag']
            print d['grad'].sum()
            print d['funcalls']
            with open('autoencoder2.dat','wb') as f:
                cPickle.dump(mn,f)
            return mn
        else:
            for i in range(iterations):
                if grad_check_freq:
                    gc = i and not i % grad_check_freq
                else:
                    gc = False
                grads = self._sparse_ae_cost(inp,self._netp, check_grad = gc)
                # leave out cost, the first item
                # in the grads tuple
                self._update(inp,*grads[1:])
            return self._netp.unroll()
예제 #12
0
    def train(self, inp, iterations, with_bfgs=False, grad_check_freq=None):
        def rcst(x):
            v = self._rcost(inp, params.roll(x, self.indim, self.hdim))
            print 'rcst says: %s' % str(v)
            return v

        def rcstprime(x):
            return self._sparse_ae_cost_unrolled(
                inp, params.roll(x, self.indim, self.hdim))

        if with_bfgs:
            x0 = self._netp.unroll()
            mn, val, d = bfgs(rcst,
                              x0,
                              fprime=rcstprime,
                              factr=100,
                              maxfun=800,
                              disp=1)
            print val
            print d['task']
            print d['warnflag']
            print d['grad'].sum()
            print d['funcalls']
            with open('autoencoder2.dat', 'wb') as f:
                cPickle.dump(mn, f)
            return mn
        else:
            for i in range(iterations):
                if grad_check_freq:
                    gc = i and not i % grad_check_freq
                else:
                    gc = False
                grads = self._sparse_ae_cost(inp, self._netp, check_grad=gc)
                # leave out cost, the first item
                # in the grads tuple
                self._update(inp, *grads[1:])
            return self._netp.unroll()
예제 #13
0
파일: opt.py 프로젝트: s-bear/pyGPs
    def findMin(self, x, y, numIters=100):
        meanfunc = self.model.meanfunc
        covfunc = self.model.covfunc
        likfunc = self.model.likfunc
        inffunc = self.model.inffunc
        hypInArray = self._convert_to_array()

        try:
            opt = bfgs(self._nlml,
                       hypInArray,
                       self._dnlml,
                       maxiter=numIters,
                       disp=False,
                       full_output=True)
            optimalHyp = deepcopy(opt[0])
            funcValue = opt[1]
            warnFlag = opt[6]
            if warnFlag == 1:
                print("Maximum number of iterations exceeded.")
            elif warnFlag == 2:
                print("Gradient and/or function calls not changing.")
        except:
            self.errorCounter += 1
            if not self.searchConfig:
                raise Exception("Can not learn hyperparamters using BFGS.")
        self.trailsCounter += 1

        if self.searchConfig:
            searchRange = self.searchConfig.meanRange + self.searchConfig.covRange + self.searchConfig.likRange
            if not (self.searchConfig.num_restarts
                    or self.searchConfig.min_threshold):
                raise Exception('Specify at least one of the stop conditions')
            while True:
                self.trailsCounter += 1  # increase counter
                for i in range(hypInArray.shape[0]):  # random init of hyp
                    hypInArray[i] = np.random.uniform(low=searchRange[i][0],
                                                      high=searchRange[i][1])
                # value this time is better than optiaml min value
                try:
                    thisopt = bfgs(self._nlml,
                                   hypInArray,
                                   self._dnlml,
                                   maxiter=100,
                                   disp=False,
                                   full_output=True)
                    if thisopt[1] < funcValue:
                        funcValue = thisopt[1]
                        optimalHyp = thisopt[0]
                except:
                    self.errorCounter += 1
                if self.searchConfig.num_restarts and self.errorCounter > old_div(
                        self.searchConfig.num_restarts, 2):
                    print(
                        "[BFGS] %d out of %d trails failed during optimization"
                        % (self.errorCounter, self.trailsCounter))
                    raise Exception("Over half of the trails failed for BFGS")
                if self.searchConfig.num_restarts and self.trailsCounter > self.searchConfig.num_restarts - 1:  # if exceed num_restarts
                    print(
                        "[BFGS] %d out of %d trails failed during optimization"
                        % (self.errorCounter, self.trailsCounter))
                    return optimalHyp, funcValue
                if self.searchConfig.min_threshold and funcValue <= self.searchConfig.min_threshold:  # reach provided mininal
                    print(
                        "[BFGS] %d out of %d trails failed during optimization"
                        % (self.errorCounter, self.trailsCounter))
                    return optimalHyp, funcValue

        return optimalHyp, funcValue
예제 #14
0
    def begin(self, startValues, lbounds = [], rbounds = [],maxiter=10,disp=1):   

        if NLOPT is True:
            #print "here "
            local_opt = nlopt.opt(nlopt.LN_COBYLA, len(startValues[0])*self.nDims)
            #local_opt = nlopt.opt(nlopt.LN_NELDERMEAD, len(startValues[0])*self.nDims)
            local_opt.set_ftol_rel(1e-3)
            local_opt.set_xtol_rel(1e-3)
            local_opt.set_ftol_abs(1e-5)
            #local_opt.set_maxtime(120);
            local_opt.set_maxtime(40)
            local_opt.set_maxeval(200)
            
            if len(lbounds)==0:
                local_opt.set_lower_bounds(-100.0*np.ones((len(startValues[0])*self.nDims)))
                local_opt.set_upper_bounds(100.0* np.ones((len(startValues[0])*self.nDims)))
            else:
                local_opt.set_lower_bounds(lbounds)
                local_opt.set_upper_bounds(rbounds)

            #local_opt.add_inequality_constraint(self.constraint, 0.0)
            opt = copy.copy(local_opt)
            opt.set_min_objective(self.objFunc)
            sol = []
            obj = np.zeros((len(startValues)))
            for ii in xrange(len(startValues)):
                #print "Start objective ", #self.objFunc(startValues[ii].reshape((len(startValues[ii])*self.nDims)))   
                pts = startValues[ii].reshape((len(startValues[ii])*self.nDims))
                sol.append(opt.optimize(pts))
                obj[ii] = self.objFunc(sol[ii], np.zeros((0))) 
            
            indBest = np.argmin(obj)    
            #print sol
            endVals = np.reshape(sol[indBest], (len(sol[indBest])/self.nDims, self.nDims))
           
            return endVals        
        else:
            if len(lbounds)==0:
                lb = -100.0*np.ones((len(startValues[0])*self.nDims))
                ub =  100.0* np.ones((len(startValues[0])*self.nDims))
                bounds = zip(lb,ub)
            else:
                bounds = zip(lbounds-1e-12, rbounds+1e-12)
            
            #print bounds

            sol = []
            obj = np.zeros((len(startValues)))
            optResults = np.zeros((len(startValues)))
            def const(x):
                good = 1.0
                for ii in xrange(len(x)):
                    if (x[ii] < bounds[ii][0]):
                        return -1.0
                    elif (x[ii] > bounds[ii][1]):
                        return -1.0
                return good

            for ii in xrange(len(startValues)):
                
                #nIters = 0
                objFunc = lambda x: self.objFunc(x,np.empty(0))
                def objFunc(x):
                    print x.shape
                    #nIters = nIters + 1
                    #print nIters
                    return self.objFunc(x, np.empty(0))
                
                sval = startValues[ii].reshape((len(startValues[ii])*self.nDims))
                #pts = slsqp(objFunc, \
                #    startValues[ii].reshape((len(startValues[ii])*self.nDims)), \
                #    bounds=bounds, acc=1e-3, iter=maxiter)
        
                sol_bfgs = bfgs(objFunc, sval, bounds=bounds, approx_grad=True, factr=1e12,pgtol=1e-3,  maxfun=maxiter)
                pts = sol_bfgs[0]
                #pts = cobyla(objFunc, sval, cons=(const), maxfun=maxiter, disp=2)
                #pts = minimize(objFunc, np.array(startValues), method='Nelder-Mead')#,bounds=bounds)
                sol.append(pts)
                obj[ii] = objFunc(pts)
            
            indBest = np.argmin(obj)    
            endVals = np.reshape(sol[indBest], \
                    (len(sol[indBest])/self.nDims, self.nDims))
            return endVals
예제 #15
0
def bfgs_update(C, u, z, rho, x0):
    n = len(x0)
    args = (C, z, u, rho, n)
    # bfgs(func, x0, args, bounds, callback)
    return bfgs(l2_log, x0, args=args, bounds=None, callback=callback)
예제 #16
0
        with _callback.open() as output:
            print("stableloss: {:.2e}".format(stableloss),
                  "  dataloss: {:.2e}".format(dataloss),
                  "  sparseloss: {:.2e}".format(sparseloss),
                  "momentloss: {:.2e}".format(momentloss),
                  file=output)
        return None

    callbackhookhandle = callback.register_hook(callbackhook)
    if block == 0:
        callback.save(nfi.flat_param, 'start')
    try:
        # optimize
        xopt = bfgs(nfi.f,
                    nfi.flat_param,
                    nfi.fprime,
                    gtol=2e-16,
                    maxiter=maxiter,
                    callback=callback)
        # xopt,f,d = lbfgsb(nfi.f, nfi.flat_param, nfi.fprime, m=maxiter, callback=callback, factr=1e7, pgtol=1e-8,maxiter=maxiter,iprint=0)
        np.set_printoptions(precision=2, linewidth=90)
        print("convolution moment and kernels")
        for k in range(max_order + 1):
            for j in range(k + 1):
                print(
                    (model.__getattr__('fd' + str(j) +
                                       str(k - j)).moment).data.cpu().numpy())
                print(
                    (model.__getattr__('fd' + str(j) +
                                       str(k - j)).kernel).data.cpu().numpy())
        for p in model.expr_params():
            print("SymNet parameters")
예제 #17
0
def min_wrapper(hyp, F, Flag, *varargin):
    # Utilize scipy.optimize functions, sgc.py, or minimize.py to
    # minimize the negative log marginal liklihood.

    x = convert_to_array(hyp)  # convert the hyperparameter class to an array

    if Flag == 'CG':
        aa = cg(nlml,
                x,
                dnlml, (F, hyp, varargin),
                maxiter=100,
                disp=False,
                full_output=True)
        x = aa[0]
        fopt = aa[1]
        funcCalls = aa[2]
        gradcalls = aa[3]
        if aa[4] == 1:
            print "Maximum number of iterations exceeded."
        elif aa[4] == 2:
            print "Gradient and/or function calls not changing."
        gopt = dnlml(x, F, hyp, varargin)
        return convert_to_class(x, hyp), fopt, gopt, funcCalls

    elif Flag == 'BFGS':
        # Use BFGS
        aa = bfgs(nlml,
                  x,
                  dnlml, (F, hyp, varargin),
                  maxiter=100,
                  disp=False,
                  full_output=True)
        x = aa[0]
        fopt = aa[1]
        gopt = aa[2]
        Bopt = aa[3]
        funcCalls = aa[4]
        gradcalls = aa[5]
        if aa[6] == 1:
            print "Maximum number of iterations exceeded."
        elif aa[6] == 2:
            print "Gradient and/or function calls not changing."
        if isinstance(fopt, ndarray):
            fopt = fopt[0]
        return convert_to_class(x, hyp), fopt, gopt, funcCalls

    elif Flag == 'SCG':
        # use sgc.py
        aa = scg(x, nlml, dnlml, (F, hyp, varargin), niters=100)
        hyp = convert_to_class(aa[0], hyp)
        fopt = aa[1][-1]
        gopt = dnlml(aa[0], F, hyp, varargin)
        return hyp, fopt, gopt, len(aa[1])

    elif Flag == 'Minimize':
        # use minimize.py
        aa = run(x, nlml, dnlml, (F, hyp, varargin), maxnumfuneval=-100)
        hyp = convert_to_class(aa[0], hyp)
        fopt = aa[1][-1]
        gopt = dnlml(aa[0], F, hyp, varargin)
        return hyp, fopt, gopt, len(aa[1])

    else:
        raise Exception('Incorrect usage of optimization flag in min_wrapper')
예제 #18
0
    def begin(self, startValues, lbounds=[], rbounds=[], maxiter=10, disp=1):

        if NLOPT is True:
            #print "here "
            local_opt = nlopt.opt(nlopt.LN_COBYLA,
                                  len(startValues[0]) * self.nDims)
            #local_opt = nlopt.opt(nlopt.LN_NELDERMEAD, len(startValues[0])*self.nDims)
            local_opt.set_ftol_rel(1e-3)
            local_opt.set_xtol_rel(1e-3)
            local_opt.set_ftol_abs(1e-3)
            #local_opt.set_ftol_abs(1e-5)

            #local_opt.set_maxtime(120);
            local_opt.set_maxtime(40)
            local_opt.set_maxeval(200)

            if len(lbounds) == 0:
                local_opt.set_lower_bounds(-100.0 * np.ones(
                    (len(startValues[0]) * self.nDims)))
                local_opt.set_upper_bounds(100.0 * np.ones(
                    (len(startValues[0]) * self.nDims)))
            else:
                local_opt.set_lower_bounds(lbounds)
                local_opt.set_upper_bounds(rbounds)

            #local_opt.add_inequality_constraint(self.constraint, 0.0)
            opt = copy.copy(local_opt)
            opt.set_min_objective(self.objFunc)
            sol = []
            obj = np.zeros((len(startValues)))
            for ii in xrange(len(startValues)):
                #print "Start objective ", #self.objFunc(startValues[ii].reshape((len(startValues[ii])*self.nDims)))
                pts = startValues[ii].reshape(
                    (len(startValues[ii]) * self.nDims))
                sol.append(opt.optimize(pts))
                obj[ii] = self.objFunc(sol[ii], np.zeros((0)))

            indBest = np.argmin(obj)
            #print sol
            endVals = np.reshape(sol[indBest],
                                 (len(sol[indBest]) / self.nDims, self.nDims))

            return endVals
        else:
            if len(lbounds) == 0:
                lb = -100.0 * np.ones((len(startValues[0]) * self.nDims))
                ub = 100.0 * np.ones((len(startValues[0]) * self.nDims))
                bounds = zip(lb, ub)
            else:
                bounds = zip(lbounds - 1e-12, rbounds + 1e-12)

            #print bounds

            sol = []
            obj = np.zeros((len(startValues)))
            optResults = np.zeros((len(startValues)))

            def const(x):
                good = 1.0
                for ii in xrange(len(x)):
                    if (x[ii] < bounds[ii][0]):
                        return -1.0
                    elif (x[ii] > bounds[ii][1]):
                        return -1.0
                return good

            for ii in xrange(len(startValues)):

                #nIters = 0
                objFunc = lambda x: self.objFunc(x, np.empty(0))

                def objFunc(x):
                    print x.shape
                    #nIters = nIters + 1
                    #print nIters
                    return self.objFunc(x, np.empty(0))

                sval = startValues[ii].reshape(
                    (len(startValues[ii]) * self.nDims))
                #pts = slsqp(objFunc, \
                #    startValues[ii].reshape((len(startValues[ii])*self.nDims)), \
                #    bounds=bounds, acc=1e-3, iter=maxiter)

                sol_bfgs = bfgs(objFunc,
                                sval,
                                bounds=bounds,
                                approx_grad=True,
                                factr=1e12,
                                pgtol=1e-3,
                                maxfun=maxiter)
                pts = sol_bfgs[0]
                #pts = cobyla(objFunc, sval, cons=(const), maxfun=maxiter, disp=2)
                #pts = minimize(objFunc, np.array(startValues), method='Nelder-Mead')#,bounds=bounds)
                sol.append(pts)
                obj[ii] = objFunc(pts)

            indBest = np.argmin(obj)
            endVals = np.reshape(sol[indBest], \
                    (len(sol[indBest])/self.nDims, self.nDims))
            return endVals