def test_grad(self):
        hyper = opt_hyper(self.gpr, {'covar':self.logtheta})[0]
        hyperparams = hyper
#        X0 = param_dict_to_list(hyperparams)
#        Ifilter_x = {'covar':SP.ones(len(X0),dtype='bool')}

        def f(x):
            x_ = hyperparams
#            x_[Ifilter_x] = x
            rv =  self.gpr.LML(x_)
            #LG.debug("L("+str(x_)+")=="+str(rv))
            if SP.isnan(rv):
                return 1E6
            return rv
        
        def df(x):
            x_ = hyperparams
#            x_[Ifilter_x] = x
            rv =  self.gpr.LMLgrad(x_)
            #convert to list
            rv = param_dict_to_list(rv)
            #LG.debug("dL("+str(x_)+")=="+str(rv))
            if SP.isnan(rv).any():
                In = isnan(rv)
                rv[In] = 1E6
            return rv#[Ifilter_x]
    
        self.assertAlmostEqual(OPT.check_grad(f,df,hyper['covar']),10**-4,3)
Esempio n. 2
0
    def test_grad(self):
        hyper = opt_hyper(self.gpr, {'covar': self.logtheta})[0]
        hyperparams = hyper

        #        X0 = param_dict_to_list(hyperparams)
        #        Ifilter_x = {'covar':SP.ones(len(X0),dtype='bool')}

        def f(x):
            x_ = hyperparams
            #            x_[Ifilter_x] = x
            rv = self.gpr.LML(x_)
            #LG.debug("L("+str(x_)+")=="+str(rv))
            if SP.isnan(rv):
                return 1E6
            return rv

        def df(x):
            x_ = hyperparams
            #            x_[Ifilter_x] = x
            rv = self.gpr.LMLgrad(x_)
            #convert to list
            rv = param_dict_to_list(rv)
            #LG.debug("dL("+str(x_)+")=="+str(rv))
            if SP.isnan(rv).any():
                In = isnan(rv)
                rv[In] = 1E6
            return rv  #[Ifilter_x]

        self.assertAlmostEqual(OPT.check_grad(f, df, hyper['covar']), 10**-4,
                               3)
Esempio n. 3
0
def optHyper(gpr,logtheta,Ifilter=None,priors=None,maxiter=100,gradcheck=False):
    """optimize hyperparemters of gp gpr starting from gpr
    optHyper(gpr,logtheta,filter=None,prior=None)
    gpr: GP regression classe
    logtheta: starting piont for optimization
    Ifilter  : filter index vector
    prior   : non-default prior, otherwise assume first index amplitude, last noise, rest:lengthscales
    """
    if priors is None:        # use a very crude default prior if we don't get anything else:
        priors = defaultPriors(gpr,logtheta)

    def fixlogtheta(logtheta,limit=1E3):
        """make a valid logtheta which is non-infinite and non-0"""
        rv      = logtheta.copy()
        I_upper = logtheta>limit
        I_lower = logtheta<-limit
        rv[I_upper] = +limit
        rv[I_lower] = -limit
        return rv

    def checklogtheta(logtheta,limit=1E3):
        """make a valid logtheta which is non-infinite and non-0"""
        I_upper = logtheta>limit
        I_lower = logtheta<-limit
        return not (I_upper.any() or I_lower.any())
        
    #TODO: mean-function
    def f(logtheta):
        #logtheta_ = fixlogtheta(logtheta)
        logtheta_ = logtheta
        if not checklogtheta(logtheta):
            print logtheta
            #make optimzier/sampler search somewhere else
            return 1E6

        rv =  gpr.lMl(logtheta_,lml=True,dlml=False,priors=priors)
        LG.debug("L("+str(logtheta_)+")=="+str(rv))
        if isnan(rv):
            return 1E6
        return rv
    def df(logtheta):
        #logtheta_ = fixlogtheta(logtheta)
        logtheta_ = logtheta
        if not checklogtheta(logtheta):
            #make optimzier/sampler search somewhere else
            print logtheta
            return zeros_like(logtheta_)
        rv =  gpr.lMl(logtheta_,lml=False,dlml=True,priors=priors)
        LG.debug("dL("+str(logtheta_)+")=="+str(rv))
        #mask out filtered dimensions
        if not Ifilter is None:
            rv = rv*Ifilter
        if isnan(rv).any():
            In = isnan(rv)
            rv[In] = 1E6
        return rv

    plotit = True
    plotit = False
    if(plotit):
        X = arange(0.001,0.05,0.001)
        Y = zeros(size(X))
        dY = zeros(size(X))
        k=2
        theta = logtheta
        for i in range(len(X)):
            theta[k] = log(X[i])
            Y[i] = f(theta)
            dY[i] = df(theta)[k]
        plot(X,Y)
        hold(True);
        plot(X,dY)
        show()

    #start-parameters
    theta0 = logtheta

    LG.info("startparameters for opt:"+str(exp(logtheta)))
    if gradcheck:
        LG.info("check_grad:" + str(OPT.check_grad(f,df,theta0)))
        raw_input()
    LG.info("start optimization")
    #opt_params=OPT.fmin_cg (f, theta0, fprime = df, args = (), gtol = 1.0000000000000001e-005, maxiter =maxiter, full_output = 1, disp = 1, retall = 0)
    #opt_params=OPT.fmin_ncg (f, theta0, fprime = df, fhess_p=None, fhess=None, args=(), avextol=1.0000000000000001e-04, epsilon=1.4901161193847656e-08, maxiter=maxiter, full_output=1, disp=1, retall=0)

    opt_params=OPT.fmin_bfgs(f, theta0, fprime=df, args=(), gtol=1.0000000000000001e-04, norm=inf, epsilon=1.4901161193847656e-08, maxiter=maxiter, full_output=1, disp=(0), retall=0)

    rv = opt_params[0]
    LG.info("old parameters:")
    LG.info(str(exp(logtheta)))
    LG.info("optimized parameters:")
    LG.info(str(exp(rv)))
    LG.info("grad:"+str(df(rv)))
    return rv
Esempio n. 4
0
def optHyper(gpr,
             logtheta,
             Ifilter=None,
             priors=None,
             maxiter=100,
             gradcheck=False):
    """optimize hyperparemters of gp gpr starting from gpr
    optHyper(gpr,logtheta,filter=None,prior=None)
    gpr: GP regression classe
    logtheta: starting piont for optimization
    Ifilter  : filter index vector
    prior   : non-default prior, otherwise assume first index amplitude, last noise, rest:lengthscales
    """
    if priors is None:  # use a very crude default prior if we don't get anything else:
        priors = defaultPriors(gpr, logtheta)

    def fixlogtheta(logtheta, limit=1E3):
        """make a valid logtheta which is non-infinite and non-0"""
        rv = logtheta.copy()
        I_upper = logtheta > limit
        I_lower = logtheta < -limit
        rv[I_upper] = +limit
        rv[I_lower] = -limit
        return rv

    def checklogtheta(logtheta, limit=1E3):
        """make a valid logtheta which is non-infinite and non-0"""
        I_upper = logtheta > limit
        I_lower = logtheta < -limit
        return not (I_upper.any() or I_lower.any())

    #TODO: mean-function
    def f(logtheta):
        #logtheta_ = fixlogtheta(logtheta)
        logtheta_ = logtheta
        if not checklogtheta(logtheta):
            print logtheta
            #make optimzier/sampler search somewhere else
            return 1E6

        rv = gpr.lMl(logtheta_, lml=True, dlml=False, priors=priors)
        LG.debug("L(" + str(logtheta_) + ")==" + str(rv))
        if isnan(rv):
            return 1E6
        return rv

    def df(logtheta):
        #logtheta_ = fixlogtheta(logtheta)
        logtheta_ = logtheta
        if not checklogtheta(logtheta):
            #make optimzier/sampler search somewhere else
            print logtheta
            return zeros_like(logtheta_)
        rv = gpr.lMl(logtheta_, lml=False, dlml=True, priors=priors)
        LG.debug("dL(" + str(logtheta_) + ")==" + str(rv))
        #mask out filtered dimensions
        if not Ifilter is None:
            rv = rv * Ifilter
        if isnan(rv).any():
            In = isnan(rv)
            rv[In] = 1E6
        return rv

    plotit = True
    plotit = False
    if (plotit):
        X = arange(0.001, 0.05, 0.001)
        Y = zeros(size(X))
        dY = zeros(size(X))
        k = 2
        theta = logtheta
        for i in range(len(X)):
            theta[k] = log(X[i])
            Y[i] = f(theta)
            dY[i] = df(theta)[k]
        plot(X, Y)
        hold(True)
        plot(X, dY)
        show()

    #start-parameters
    theta0 = logtheta

    LG.info("startparameters for opt:" + str(exp(logtheta)))
    if gradcheck:
        LG.info("check_grad:" + str(OPT.check_grad(f, df, theta0)))
        raw_input()
    LG.info("start optimization")
    #opt_params=OPT.fmin_cg (f, theta0, fprime = df, args = (), gtol = 1.0000000000000001e-005, maxiter =maxiter, full_output = 1, disp = 1, retall = 0)
    #opt_params=OPT.fmin_ncg (f, theta0, fprime = df, fhess_p=None, fhess=None, args=(), avextol=1.0000000000000001e-04, epsilon=1.4901161193847656e-08, maxiter=maxiter, full_output=1, disp=1, retall=0)

    opt_params = OPT.fmin_bfgs(f,
                               theta0,
                               fprime=df,
                               args=(),
                               gtol=1.0000000000000001e-04,
                               norm=inf,
                               epsilon=1.4901161193847656e-08,
                               maxiter=maxiter,
                               full_output=1,
                               disp=(0),
                               retall=0)

    rv = opt_params[0]
    LG.info("old parameters:")
    LG.info(str(exp(logtheta)))
    LG.info("optimized parameters:")
    LG.info(str(exp(rv)))
    LG.info("grad:" + str(df(rv)))
    return rv