Esempio n. 1
0
 def find_min_h_brent(self, B, dtau_init, tol=5E-2, skipIfLower=False, 
                      taus=[], hs=[], trybracket=True):
     def f(tau, *args):
         if tau == 0:
             return self.h.real
             
         try:
             i = taus.index(tau)
             return hs[i]
         except ValueError:
             for s in xrange(self.q):
                 self.A[s] = A0[s] - tau * B[s]
             
             self.calc_lr()
             self.calc_AA()
             
             h = self.expect_2s(self.h_nn)
             
             print (tau, h.real)
             
             res = h.real
             
             taus.append(tau)
             hs.append(res)
             
             return res
     
     A0 = self.A.copy()
     
     if skipIfLower:
         if f(dtau_init) < self.h.real:
             return dtau_init
     
     fb_brack = (dtau_init * 0.9, dtau_init * 1.1)
     if trybracket:
         brack = (dtau_init * 0.1, dtau_init, dtau_init * 2.0)
     else:
         brack = fb_brack
             
     try:
         tau_opt = opti.brent(f, 
                            brack=brack, 
                            tol=tol,
                            maxiter=20)
     except ValueError:
         print "Bracketing attempt failed..."
         tau_opt = opti.brent(f, 
                            brack=fb_brack, 
                            tol=tol,
                            maxiter=20)
     
     self.A = A0
     
     return tau_opt
Esempio n. 2
0
    def test_brent(self):
        x = optimize.brent(self.fun)
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.brent(self.fun, brack=(-3, -2))
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.brent(self.fun, full_output=True)
        assert_allclose(x[0], self.solution, atol=1e-6)

        x = optimize.brent(self.fun, brack=(-15, -1, 15))
        assert_allclose(x, self.solution, atol=1e-6)
Esempio n. 3
0
    def test_brent(self):
        """ brent algorithm
        """
        x = optimize.brent(lambda x: (x-1.5)**2-0.8)
        err1 = abs(x - 1.5)
        x = optimize.brent(lambda x: (x-1.5)**2-0.8, brack = (-3,-2))
        err2 = abs(x - 1.5)
        x = optimize.brent(lambda x: (x-1.5)**2-0.8, full_output=True)
        err3 = abs(x[0] - 1.5)
        x = optimize.brent(lambda x: (x-1.5)**2-0.8, brack = (-15,-1,15))
        err4 = abs(x - 1.5)

        assert max((err1,err2,err3,err4)) < 1e-6
 def _volOpt(self, volumeExpansion):
     '''
        volumeExpansion -  percent volume expansion relative to equilibrium
        at which the optimization is performed
     '''        
     ibrav = self.lattice0.ibrav
     if ibrav < 1 or ibrav > 4:
         raise Exception("This lattice type is not implemented")
     
     a0 = self.lattice0.a
     c0 = self.lattice0.c
     
     if ibrav == 4:
     # will find optimal a and c of hexagonal lattice of fixed volume:
         volume = a0*a0*c0
         volume = volume + volume*volumeExpansion/100.
         # initial assumption: all latice parameters expand equally in percents
         c = c0*(1.+volumeExpansion/100.)**(1./3.)
 
         # percent of c we want to bracket around it for minima 
         # search(does not have to guarantee the minima is inside
         prcntC = 0.2 
 
         brentOut = brent(self._getHexEnergy, (volume,), (c-c*prcntC/100, \
                               c+c*prcntC/100), tol = 1.e-7, full_output = 1)
         print brentOut
         c = brentOut[0]
         energy = brentOut[1]            
         # relax structure at optimized parameters to get optimized atomic positions
         relax_energy = self._relax( a = numpy.sqrt(volume/c), c = c )
         
     if ibrav > 0 and ibrav < 4:
         aExpansion = (1.+volumeExpansion/100.)**(1./3.)
         a = a0 * (1.+volumeExpansion/100.)**(1./3.)
         volume = a0*a0*c0
         volume = volume + volume*volumeExpansion/100.
         prcntA = 0.2 
         brentOut = brent(self._getCubicEnergy, (volume,), (a-a*prcntA/100, \
                               a+a*prcntA/100), tol = 1.e-7, full_output = 1)
         energy = brentOut[1]
         
     print "Double check: Brent energy = %f, Relax energy = %f"%(energy, relax_energy)       
     
     os.system('cp ' + self.pw.setting.get('pwOutput') + ' ' +  \
                          str(volumeExpansion) + self.pw.setting.get('pwOutput'))
     os.system('cp ' + self.pw.setting.get('pwInput') + ' ' +  \
                           str(volumeExpansion) + self.pw.setting.get('pwInput'))            
         
     return self.pw.input.toString(), energy
def single_var_maximize_ln_L(data, ln_L_func, low_param, high_param):
    '''Takes `data` (the data set)
    `initial` is a starting parameter value
    `ln_L_func` is a function that should take the (data, parameter_value)
    and return a log-likelihood.
    `low_param` and `high_param` should be 2 numbers (with high_param > low_param)
        that can be used in the call to find a set of points that bracket the
        optimum.
    '''
    def scipy_ln_likelihood(x):
        '''SciPy minimizes functions. We want to maximize the likelihood. This
        function adapts our ln_likelihood function to the minimization context
        by returning the negative log-likelihood.
    
        We use this function with SciPy's minimization routine (minimizing the
        negative log-likelihood will maximize the log-likelihood).
        '''
        ln_L = ln_L_func(data, x)
        if VERBOSE:
            sys.stderr.write('In wrapper around ln_L_func with param = {m} lnL = {l}\n'.format(m=x, l=ln_L))
        return -ln_L
    # Now that we have wrapped the scipy_ln_likelihood, we can find an 
    #       approximation to the solution
    mle = optimize.brent(scipy_ln_likelihood,
                         brack=(low_param, high_param),
                         tol=1e-8,
                         full_output=False)
    return mle
Esempio n. 6
0
def optdelta(UY,UX,S,ldeltanull=None,numintervals=100,ldeltamin=-10.0,ldeltamax=10.0):
    """find the optimal delta"""
    if ldeltanull==None:
        nllgrid=SP.ones(numintervals+1)*SP.inf;
        ldeltagrid=SP.arange(numintervals+1)/(numintervals*1.0)*(ldeltamax-ldeltamin)+ldeltamin;
        nllmin=SP.inf;
        for i in SP.arange(numintervals+1):
            nllgrid[i]=nLLeval(ldeltagrid[i],UY,UX,S);
            if nllgrid[i]<nllmin:
                nllmin=nllgrid[i];
                ldeltaopt_glob=ldeltagrid[i];
        foundMin=False
        for i in SP.arange(numintervals-1)+1:
            continue
            ee = 1E-8
            #carry out brent optimization within the interval
            if ((nllgrid[i-1]-nllgrid[i])>ee) and ((nllgrid[i+1]-nllgrid[i])>1E-8):
                foundMin = True
                ldeltaopt,nllopt,iter,funcalls = OPT.brent(nLLeval,(UY,UX,S),(ldeltagrid[i-1],ldeltagrid[i],ldeltagrid[i+1]),full_output=True);
                if nllopt<nllmin:
                    nllmin=nllopt;
                    ldeltaopt_glob=ldeltaopt;
    else:
        ldeltaopt_glob=ldeltanull;
    return ldeltaopt_glob;
Esempio n. 7
0
    def getMax(self,H, X=None,REML=False):
 
        """
           Helper functions for .fit(...).  
           This function takes a set of LLs computed over a grid and finds possible regions 
           containing a maximum.  Within these regions, a Brent search is performed to find the 
           optimum.
  
        """
        n = len(self.LLs)
        HOpt = []
        for i in range(1,n-2):
            if self.LLs[i-1] < self.LLs[i] and self.LLs[i] > self.LLs[i+1]: 
                HOpt.append(optimize.brent(self.LL_brent,args=(X,REML),brack=(H[i-1],H[i+1])))
                if np.isnan(HOpt[-1][0]):
                    HOpt[-1][0] = [self.LLs[i-1]]

        if len(HOpt) > 1: 
            if self.verbose:
                sys.stderr.write("NOTE: Found multiple optima.  Returning first...\n")
            return HOpt[0]
        elif len(HOpt) == 1:
            return HOpt[0]
        elif self.LLs[0] > self.LLs[n-1]:
            return H[0]
        else:
            return H[n-1]
Esempio n. 8
0
def _solve_equivalent_width(abundance, coeffs, wavelength, stellar_parameters,
    tol=1.48e-08, maxiter=500):

    f = lambda ew: (abundance - \
        np.dot(_abundance_predictors(ew, wavelength, stellar_parameters), coeffs))**2

    return op.brent(f, brack=[0, 300], tol=tol, maxiter=maxiter)
def maximize_mu_ln_L_over_all_sigma(data, ln_L_func, low_param, high_param):
    '''Takes `data` (the data set)
    `ln_L_func` is a function that should take the (data, parameter_value)
    and return a log-likelihood.
    `low_param` and `high_param` should be 2 numbers (with high_param > low_param)
        that can be used in the call to find a set of points that bracket the
        optimum.
    '''
    def mu_scipy_ln_likelihood(x):
        '''Here we perform an optimization to find the MLE of sigma
        for our current value of mu'''
        # a variance is sort of an average of the squared differences
        #   between the mean that the variates.
        # So a good guess at a bracketing range is the smallest absolvute value
        #   of a difference and the largest absolute value of the difference
        sigma_mle = maximize_ln_L_for_fixed_mu(data, ln_L_func, x)
        ln_L = ln_L_func(data, x, sigma_mle)
        if VERBOSE:
            sys.stderr.write('for mu={m} nuisance sigma_mle is {s} lnL = {l}\n'.format(m=x, s=sigma_mle, l=ln_L))
        return -ln_L
    # Now that we have wrapped the scipy_ln_likelihood, we can find an 
    #       approximation to the solution
    mle = optimize.brent(mu_scipy_ln_likelihood,
                         brack=(low_param, high_param),
                         tol=1e-8,
                         full_output=False)
    return mle
Esempio n. 10
0
def boxcox(x,lmbda=None,alpha=None):
    """Return a positive dataset tranformed by a Box-Cox power transformation.

    If lmbda is not None, do the transformation for that value.

    If lmbda is None, find the lambda that maximizes the log-likelihood
    function and return it as the second output argument.

    If alpha is not None, return the 100(1-alpha)% confidence interval for
    lambda as the third output argument.
    """
    if any(x < 0):
        raise ValueError("Data must be positive.")
    if lmbda is not None:  # single transformation
        lmbda = lmbda*(x==x)
        y = where(lmbda == 0, log(x), (x**lmbda - 1)/lmbda)
        return y
    # Otherwise find the lmbda that maximizes the log-likelihood function.
    def tempfunc(lmb, data):  # function to minimize
        return -boxcox_llf(lmb,data)
    lmax = optimize.brent(tempfunc, brack=(-2.0,2.0),args=(x,))
    y = boxcox(x, lmax)
    if alpha is None:
        return y, lmax
    # Otherwise find confidence interval
    interval = _boxcox_conf_interval(x, lmax, alpha)
    return y, lmax, interval
Esempio n. 11
0
def optdelta(X,Y,K,ldeltanull=None,numintervals=100,ldeltamin=-5.0,ldeltamax=5.0):
    """find the optimal delta"""
    if ldeltanull==None:
        nllgrid=SP.ones(numintervals+1)*SP.inf;
        #nllgridf=SP.ones(numintervals+1)*SP.inf;
        ldeltagrid=SP.arange(numintervals+1)/(numintervals*1.0)*(ldeltamax-ldeltamin)+ldeltamin;
        nllmin=SP.inf;
        ldeltamin=None;
        for i in SP.arange(numintervals+1):
            #nllgridf[i]=nLLevalf(ldeltagrid[i],UY,UX,S);
            nllgrid[i]=nLLeval(ldeltagrid[i],X,Y,K);
            if nllgrid[i]<nllmin:
                nllmin=nllgrid[i];
                ldeltamin=ldeltagrid[i];
        for i in SP.arange(numintervals-1)+1:
            ee=1E-8
            if ( ((nllgrid[i-1]-nllgrid[i])>ee) and (nllgrid[i+1]-nllgrid[i])>ee):
                #search within brent if needed
                ldeltaopt,nllopt,iter,funcalls = OPT.brent(nLLeval,(X,Y,K),(ldeltagrid[i-1],ldeltagrid[i],ldeltagrid[i+1]),full_output=True);
                if nllopt<nllmin:
                    nllmin=nllopt;
                    ldeltamin=ldeltaopt;
    else:
        ldeltamin=ldeltanull;
        #nllminf=nLLevalf(ldeltamin,UY,UX,S);
        nllmin=nLLeval(ldeltamin,X,Y,K);
        #assert SP.absolute(nllmin-nllminf)<1E-5, 'outch'
    return nllmin,ldeltamin;
Esempio n. 12
0
def hexVolOpt(a0, c0_a0, volumeExpansion):
    '''provide initial(equilibrium) lattice parameters a0, c0/a0, and \
    desired volume expansion in percents at which one should run \
    the optimization '''
    qe = PWCalc('config.ini')
    qe.pw.input.parse()
    if qe.pw.input.structure.lattice.ibrav != 4:
        raise Exception("The lattice must be hexagonal")
#   Initial(equilibrium) volume:
    c0 = a0*c0_a0
    volume = a0*a0*c0

    volume = volume + volume*volumeExpansion/100.
#   initial assumption: all latice parameters expand equally in percents
    cExpansion = (1.+volumeExpansion/100.)**(1./3.)
    c = c0*cExpansion

    prcntC = 0.2 # percent of c we want to bracket around it for minima search(does not have to warantee the minima is inside)s

    brentOut = brent(getHexEnergy, (volume, qe), (c-c*prcntC/100, c+c*prcntC/100), tol = 1.e-7, full_output = 1)
    print brentOut
    c = brentOut[0]
    energy = brentOut[1]
    a = np.sqrt(volume/c)
    os.system('cp ' + qe.pw.setting.pwscfOutput + ' ' +  str(c) + qe.pw.setting.pwscfOutput)
    os.system('cp ' + qe.pw.setting.pwscfInput + ' ' +  str(c) + qe.pw.setting.pwscfInput)
    return a, c/a, energy
Esempio n. 13
0
def minimize1D(f, evalgrid = None, nGrid=10, minval=0.0, maxval = 0.99999, verbose=False, brent=True,check_boundaries = True, resultgrid=None):
    '''
    minimize a function f(x) in the grid between minval and maxval.
    The function will be evaluated on a grid and then all triplets,
    where the inner value is smaller than the two outer values are optimized by
    Brent's algorithm.
    --------------------------------------------------------------------------
    Input:
    f(x)    : callable target function
    evalgrid: 1-D array prespecified grid of x-values
    nGrid   : number of x-grid points to evaluate f(x)
    minval  : minimum x-value for optimization of f(x)
    maxval  : maximum x-value for optimization of f(x)
    brent   : boolean indicator whether to do Brent search or not.
              (default: True)
    --------------------------------------------------------------------------
    Output list:
    [xopt, f(xopt)]
    xopt    : x-value at the optimum
    f(xopt) : function value at the optimum
    --------------------------------------------------------------------------
    '''
    #evaluate the target function on a grid:
    if verbose: print "evaluating target function on a grid"
    if evalgrid is not None and brent:# if brent we need to sort the input values
        i_sort = evalgrid.argsort()
        evalgrid = evalgrid[i_sort]
    if resultgrid is None:
        [evalgrid,resultgrid] = evalgrid1D(f, evalgrid = evalgrid, nGrid=nGrid, minval=minval, maxval = maxval  )
    
    i_currentmin=resultgrid.argmin()
    minglobal = (evalgrid[i_currentmin],resultgrid[i_currentmin])
    if brent:#do Brent search in addition to rest? 
        if check_boundaries:
            if verbose: print "checking grid point boundaries to see if further search is required"
            if resultgrid[0]<resultgrid[1]:#if the outer boundary point is a local optimum expand search bounded between the grid points
                if verbose: print "resultgrid[0]<resultgrid[1]--> outer boundary point is a local optimum expand search bounded between the grid points"
                minlocal = opt.fminbound(f,evalgrid[0],evalgrid[1],full_output=True)
                if minlocal[1]<minglobal[1]:
                    if verbose: print "found a new minimum during grid search"
                    minglobal=minlocal[0:2]
            if resultgrid[-1]<resultgrid[-2]:#if the outer boundary point is a local optimum expand search bounded between the grid points
                if verbose: print "resultgrid[-1]<resultgrid[-2]-->outer boundary point is a local optimum expand search bounded between the grid points"
                minlocal = opt.fminbound(f,evalgrid[-2],evalgrid[-1],full_output=True)
                if minlocal[1]<minglobal[1]:
                    if verbose: print "found a new minimum during grid search"
                    minglobal=minlocal[0:2]
        if verbose: print "exploring triplets with brent search"
        onebrent=False
        for i in xrange(resultgrid.shape[0]-2):#if any triplet is found, where the inner point is a local optimum expand search
            if (resultgrid[i+1]<resultgrid[i+2]) and (resultgrid[i+1]<resultgrid[i]):
                onebrent=True
                if verbose: print "found triplet to explore"
                minlocal = opt.brent(f,brack = (evalgrid[i],evalgrid[i+1],evalgrid[i+2]),full_output=True)
                if minlocal[1]<minglobal[1]:
                    minglobal=minlocal[0:2]
                    if verbose: print "found new minimum from brent search"
    return minglobal
Esempio n. 14
0
def find_sgd_step_size0(
    model, X, y,
    initial_range=DEFAULT_INITIAL_RANGE,
    tolerance=DEFAULT_TOLERANCE, brent_output=DEFAULT_BRENT_OUTPUT):
    """Use a Brent line search to find the best step size

    Parameters
    ----------
    model: BinaryASGD
        Instance of a BinaryASGD

    X: array, shape = [n_samples, n_features]
        Array of features

    y: array, shape = [n_samples]
        Array of labels in (-1, 1)

    initial_range: tuple of float
        Initial range for the sgd_step_size0 search (low, high)

    max_iterations:
        Maximum number of interations

    Returns
    -------
    best_sgd_step_size0: float
        Optimal sgd_step_size0 given `X` and `y`.
    """
    # -- stupid scipy calls some sizes twice!?
    _cache = {}

    def eval_size0(log2_size0):
        try:
            return _cache[log2_size0]
        except KeyError:
            pass
        other = copy.deepcopy(model)
        current_step_size = 2 ** log2_size0
        other.sgd_step_size0 = current_step_size
        other.sgd_step_size = current_step_size
        other.partial_fit(X, y)
        # Hack: asgd is lower variance than sgd, but it's tuned to work
        # well asymptotically, not after just a few examples
        weights = .5 * (other.asgd_weights + other.sgd_weights)
        bias = .5 * (other.asgd_bias + other.sgd_bias)

        margin = y * (np.dot(X, weights) + bias)
        l2_cost = other.l2_regularization * (weights ** 2).sum()
        rval = np.maximum(0, 1 - margin).mean() + l2_cost
        _cache[log2_size0] = rval
        return rval

    best_sgd_step_size0 = optimize.brent(
        eval_size0, brack=np.log2(initial_range), tol=tolerance)

    return best_sgd_step_size0
Esempio n. 15
0
def _linesearch_brent(func, p, xi, tol=1e-3):
    """Line-search algorithm using Brent's method.

    Find the minimium of the function ``func(x0+ alpha*direc)``.
    """
    def myfunc(alpha):
        return func(p + alpha * xi)
    alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol)
    xi = alpha_min*xi
    return np.squeeze(fret), p+xi
Esempio n. 16
0
def _minimize(bpf, N, func=min, debug=False):
    x0, x1 = bpf.bounds()
    xs = np.linspace(x0, x1, N)
    from scipy.optimize import brent
    mins = [brent(bpf, brack=(xs[i], xs[-i])) for i in range(int(len(xs) * 0.5 + 0.5))]
    mins2 = [(bpf(m), m) for m in mins]  # if x0 <= m <= x1]
    if debug:
        print(mins2)
    if mins2:
        return float(func(mins2)[1])
    return None
Esempio n. 17
0
def _fit_maxharm_brent(v,fmax,t):
  ns=float(len(v))
  b=(fmax-1./ns,fmax,fmax+1./ns)
  try:
    fmax=brent(_maxfreq_ftomin,args=(v,t),brack=b,tol=1e-12)
  except ValueError:
    print "maxharm brent error"
    #print [(bb,_maxfreq_ftomin(bb,v,t)) for bb in b]
  coef=sum(v*exp(-2j*pi*fmax*t))/ns
  a,p=abs(coef)*2,angle(coef)
  res=sqrt(sum( (v - a*cos(2*pi*fmax*t+p))**2)/sum(v**2))/ns
  return fmax,a,p,res
Esempio n. 18
0
def append_levels( ax, ay, chi2levels, show_min=False, yax_min=0.0, **kwargs):
    axes       = kwargs.pop( 'axes', plt.gca() )
    text_sigma = kwargs.pop( 'text_sigma', True )
    textoffset = kwargs.pop( 'offset', 1 )
    flip = kwargs.pop( 'flip', False )
    topts = kwargs.pop( 'textopts', {} )
    interpolation = kwargs.pop( 'interpolation', 'linear' )
    imin = numpy.argmin( ay )
    xmin, ymin = ax[imin], ay[imin]

    from scipy.interpolate import interp1d
    from scipy.optimize import brentq, brent
    fcn = interp1d( ax, ay, kind=interpolation)

    if xmin==ax[0] or xmin==ax[-1]:
        brack = ( ax[0], ax[-1] )
    else:
        brack = ( ax[0], xmin, ax[-1] )
    print( 'bracket: ', brack )
    xmin = brent( fcn, brack=brack )
    ymin = fcn(xmin)
    if show_min:
        axes.vlines( [ xmin ], yax_min, ymin, **kwargs )
    levels = []
    for sign in chi2levels:
        level = ymin + sign**2
        try:
            x1 = brentq( lambda x: fcn(x)-level, ax[0], xmin )
        except:
            x1=0.0
        try:
            x2 = brentq( lambda x: fcn(x)-level, xmin, ax[-1] )
        except:
            print( 'Skip level for', sign )
            continue
        #print( 'Level', sign, level, x1, x2 )

        levels.append( [ x1, x2 ] )

        if flip:
            axes.hlines( [ x1, x2 ], yax_min, level, **kwargs )
            axes.vlines( level, x1, x2, **kwargs )
            if text_sigma:
                axes.text( level+textoffset, xmin, '%i$\sigma$'%sign, va='center', **topts )
        else:
            axes.vlines( [ x1, x2 ], yax_min, level, **kwargs )
            axes.hlines( level, x1, x2, **kwargs )
            if text_sigma:
                axes.text( (x1+x2)*0.5, level+textoffset, '%i$\sigma$'%sign, ha='center', va='bottom', **topts  )

    return levels
Esempio n. 19
0
def train_nullmodel(y,K,numintervals=100,ldeltamin=-5,ldeltamax=5,debug=False):
    """
    train random effects model:
    min_{delta}  1/2(n_s*log(2pi) + logdet(K) + 1/ss * y^T(K + deltaI)^{-1}y,
    
    Input:
    X: SNP matrix: n_s x n_f
    y: phenotype:  n_s x 1
    K: kinship matrix: n_s x n_s
    mu: l1-penalty parameter
    numintervals: number of intervals for delta linesearch
    ldeltamin: minimal delta value (log-space)
    ldeltamax: maximal delta value (log-space)
    """
    if debug:
        print '... train null model'
        
    n_s = y.shape[0]

    # rotate data
    S,U = LA.eigh(K)
    Uy = SP.dot(U.T,y)

    # grid search
    nllgrid=SP.ones(numintervals+1)*SP.inf
    ldeltagrid=SP.arange(numintervals+1)/(numintervals*1.0)*(ldeltamax-ldeltamin)+ldeltamin
    nllmin=SP.inf
    for i in SP.arange(numintervals+1):
        nllgrid[i]=nLLeval(ldeltagrid[i],Uy,S);
        
    # find minimum
    nll_min = nllgrid.min()
    ldeltaopt_glob = ldeltagrid[nllgrid.argmin()]

    # more accurate search around the minimum of the grid search
    for i in SP.arange(numintervals-1)+1:
        if (nllgrid[i]<nllgrid[i-1] and nllgrid[i]<nllgrid[i+1]):
            ldeltaopt,nllopt,iter,funcalls = OPT.brent(nLLeval,(Uy,S),(ldeltagrid[i-1],ldeltagrid[i],ldeltagrid[i+1]),full_output=True);
            if nllopt<nllmin:
                nllmin=nllopt;
                ldeltaopt_glob=ldeltaopt;

    monitor = {}
    monitor['nllgrid'] = nllgrid
    monitor['ldeltagrid'] = ldeltagrid
    monitor['ldeltaopt'] = ldeltaopt_glob
    monitor['nllopt'] = nllmin
    
    return S,U,ldeltaopt_glob,monitor
Esempio n. 20
0
def boxcox(x,lmbda=None,alpha=None):
    """
    Return a positive dataset transformed by a Box-Cox power transformation.

    Parameters
    ----------
    x : ndarray
        Input array.
    lmbda : {None, scalar}, optional
        If `lmbda` is not None, do the transformation for that value.

        If `lmbda` is None, find the lambda that maximizes the log-likelihood
        function and return it as the second output argument.
    alpha : {None, float}, optional
        If `alpha` is not None, return the ``100 * (1-alpha)%`` confidence
        interval for `lmbda` as the third output argument.

        If `alpha` is not None it must be between 0.0 and 1.0.

    Returns
    -------
    boxcox : ndarray
        Box-Cox power transformed array.
    maxlog : float, optional
        If the `lmbda` parameter is None, the second returned argument is
        the lambda that maximizes the log-likelihood function.
    (min_ci, max_ci) : tuple of float, optional
        If `lmbda` parameter is None and `alpha` is not None, this returned
        tuple of floats represents the minimum and maximum confidence limits
        given `alpha`.

    """
    if any(x < 0):
        raise ValueError("Data must be positive.")
    if lmbda is not None:  # single transformation
        lmbda = lmbda*(x == x)
        y = where(lmbda == 0, log(x), (x**lmbda - 1)/lmbda)
        return y

    # Otherwise find the lmbda that maximizes the log-likelihood function.
    def tempfunc(lmb, data):  # function to minimize
        return -boxcox_llf(lmb,data)
    lmax = optimize.brent(tempfunc, brack=(-2.0,2.0),args=(x,))
    y = boxcox(x, lmax)
    if alpha is None:
        return y, lmax
    # Otherwise find confidence interval
    interval = _boxcox_conf_interval(x, lmax, alpha)
    return y, lmax, interval
Esempio n. 21
0
def train_nullmodel(y, K, numintervals=500, ldeltamin=-5, ldeltamax=5, scale=0, S=None, U=None, REML=False):
    """
    train random effects model:
    min_{delta}  1/2(n_s*log(2pi) + logdet(K) + 1/ss * y^T(K + deltaI)^{-1}y,

    Input:
    X: Snp matrix: n_s x n_f
    y: phenotype:  n_s x 1
    K: kinship matrix: n_s x n_s
    mu: l1-penalty parameter
    numintervals: number of intervals for delta linesearch
    ldeltamin: minimal delta value (log-space)
    ldeltamax: maximal delta value (log-space)
    """
    ldeltamin += scale
    ldeltamax += scale

    n_s = y.shape[0]

    # rotate data
    if S is None or U is None:
        S, U = linalg.eigh(K)

    Uy = scipy.dot(U.T, y)

    # grid search
    nllgrid = scipy.ones(numintervals + 1) * scipy.inf
    ldeltagrid = scipy.arange(numintervals + 1) / (numintervals * 1.0) * (ldeltamax - ldeltamin) + ldeltamin
    nllmin = scipy.inf
    for i in scipy.arange(numintervals + 1):
        nllgrid[i] = nLLeval(ldeltagrid[i], Uy, S, REML=REML)

    # find minimum
    nllmin = nllgrid.min()
    ldeltaopt_glob = ldeltagrid[nllgrid.argmin()]

    # more accurate search around the minimum of the grid search

    for i in scipy.arange(numintervals - 1) + 1:
        if (nllgrid[i] < nllgrid[i - 1] and nllgrid[i] < nllgrid[i + 1]):
            ldeltaopt, nllopt, iter, funcalls = opt.brent(nLLeval, (Uy, S, REML),
                                                          (ldeltagrid[i - 1], ldeltagrid[i], ldeltagrid[i + 1]),
                                                          full_output=True)
            if nllopt < nllmin:
                nllmin = nllopt
                ldeltaopt_glob = ldeltaopt

    return S, U, ldeltaopt_glob
Esempio n. 22
0
def _correlate_images(im1, im2, method='brent'):
    shape = im1.shape
    f1 = fft2(im1)
    f1[0, 0] = 0
    f2 = fft2(im2)
    f2[0, 0] = 0
    ir = np.real(ifft2((f1 * f2.conjugate())))
    t0, t1 = np.unravel_index(np.argmax(ir), shape)
    if t0 >= shape[0]/2:
        t0 -= shape[0]
    if t1 >= shape[1]/2:
        t1 -= shape[1]
    if method == 'brent':
        newim2 = ndimage.shift(im2, (t0, t1))
        refine = optimize.brent(cost_function, args=(im1, newim2),
                        brack=[-1, 1], tol=1.e-2) 
    return t1 + refine
Esempio n. 23
0
def minimize(args):
    f_index, g_index = args
    pol_f_index = f_index[-2:]
    pol_g_index = g_index[-2:]
    data = dataFromShared()
    f = data[f_index]
    g = data[g_index]
    df, dg = dArraysFromShared()
    df = df[pol_f_index]
    dg = dg[pol_g_index]    
    fmin = lambda tau: -correlate(tau, f, g, df, dg) 
    appr = -f.argmax() + g.argmax()
    halfwidth = np.sum(f < f.max() / 2) / 2
    try: 
        return brent(fmin, brack=(appr - halfwidth, appr, appr + halfwidth))
    except ValueError:
        return -123.4
 def calcu0(self,E,Lz):
     """
     NAME:
        calcu0
     PURPOSE:
        calculate the minimum of the u potential
     INPUT:
        E - energy
        Lz - angular momentum
     OUTPUT:
        u0
     HISTORY:
        2012-11-29 - Written - Bovy (IAS)
     """                           
     logu0= optimize.brent(_u0Eq,
                           args=(self._delta,self._pot,
                                 E,Lz**2./2.))
     return numpy.exp(logu0)
Esempio n. 25
0
    def compute_z(self, cosmology=None):
        """
        The redshift for this distance assuming its physical distance is
        a luminosity distance.

        Parameters
        ----------
        cosmology : `~astropy.cosmology.cosmology` or None
            The cosmology to assume for this calculation, or None to use the
            current cosmology.

        """
        from ..cosmology import luminosity_distance
        from scipy import optimize

        # FIXME: array: need to make this calculation more vector-friendly

        f = lambda z, d, cos: (luminosity_distance(z, cos).value - d) ** 2
        return optimize.brent(f, (self.Mpc, cosmology))
Esempio n. 26
0
def boxcox_normmax(x,brack=(-1.0,1.0)):
    N = len(x)
    # compute uniform median statistics
    Ui = zeros(N)*1.0
    Ui[-1] = 0.5**(1.0/N)
    Ui[0] = 1-Ui[-1]
    i = arange(2,N)
    Ui[1:-1] = (i-0.3175)/(N+0.365)
    # this function computes the x-axis values of the probability plot
    #  and computes a linear regression (including the correlation)
    #  and returns 1-r so that a minimization function maximizes the
    #  correlation
    xvals = distributions.norm.ppf(Ui)
    def tempfunc(lmbda, xvals, samps):
        y = boxcox(samps,lmbda)
        yvals = sort(y)
        r, prob  = stats.pearsonr(xvals, yvals)
        return 1-r
    return optimize.brent(tempfunc, brack=brack, args=(xvals, x))
Esempio n. 27
0
def best_nsr_init_points(lin, stresscalc=None, seg_len=0.01, num_subsegs=10): #{{{
    """
    Given a lineament and a stresscalc object, find the best latitude at which
    to initiate tensile cracking when generating NSR doppelgangers.  Find one
    latitude for each value of lin.bs, and return both the lons and the lats
    found.

    Also returns max_length, which is the target length for the doppelgangers,
    based on the length of the best fit great circle segment representing lin.

    """
    from scipy.optimize import brent

    if stresscalc is None:
        stresscalc=lin.stresscalc

    # Figure out where to initiate the formation of the doppelganger:
    ep1_lon, ep1_lat, ep2_lon, ep2_lat = lin.bfgcseg_endpoints()
    mp_lon, mp_lat = spherical_midpoint(ep1_lon, ep1_lat, ep2_lon, ep2_lat)

    # Set the max_length of the doppelganger to be the length of the best
    # fit great circle, and not the prototype, since NSR features are almost
    # perfectly straight.
    max_length = spherical_distance(ep1_lon, ep1_lat, ep2_lon, ep2_lat)

    # The simplest thing to do now, in choosing the initiation point is
    # just to use (mp_lon, mp_lat), but that point won't always be very
    # close to the mapped feature.  The separation between the initiation
    # point and the mapped lineament ends up being a strong determinant of
    # how good of a fit can be attained ultimately for those features which
    # fit NSR well.  This isn't really acceptable.  One way to get around
    # this would be to use the longitude of the bfgc, and try several
    # different latitudes, choosing the one which generates the best
    # doppelganger.
    
    # Now we use the Brent scalar function optimizer to search to find the
    # right latitude for each of those longitudes.
    init_lats = []
    for b in lin.bs:
        init_lats.append(brent(mhd_by_lat, args=(mp_lon, stresscalc, seg_len, num_subsegs, lin, max_length, b), full_output=1)[0])

    return(lin.bs+mp_lon, init_lats, max_length)
Esempio n. 28
0
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
    """Returns the shape parameter that maximizes the probability plot
    correlation coefficient for the given data to a one-parameter
    family of distributions.

    See also ppcc_plot
    """
    try:
        ppf_func = eval('distributions.%s.ppf' % dist)
    except AttributeError:
        raise ValueError("%s is not a valid distribution with a ppf." % dist)
    """
    res = inspect.getargspec(ppf_func)
    if not ('loc' == res[0][-2] and 'scale' == res[0][-1] and \
            0.0==res[-1][-2] and 1.0==res[-1][-1]):
        raise ValueError("Function has does not have default location "
              "and scale parameters\n  that are 0.0 and 1.0 respectively.")
    if (1 < len(res[0])-len(res[-1])-1) or \
       (1 > len(res[0])-3):
        raise ValueError("Must be a one-parameter family.")
    """
    N = len(x)
    # compute uniform median statistics
    Ui = zeros(N) * 1.0
    Ui[-1] = 0.5**(1.0 / N)
    Ui[0] = 1 - Ui[-1]
    i = arange(2, N)
    Ui[1:-1] = (i - 0.3175) / (N + 0.365)
    osr = sort(x)

    # this function computes the x-axis values of the probability plot
    #  and computes a linear regression (including the correlation)
    #  and returns 1-r so that a minimization function maximizes the
    #  correlation
    def tempfunc(shape, mi, yvals, func):
        xvals = func(mi, shape)
        r, prob = stats.pearsonr(xvals, yvals)
        return 1 - r

    return optimize.brent(tempfunc, brack=brack, args=(Ui, osr, ppf_func))
Esempio n. 29
0
    def train_nullmodel(self,
                        y,
                        K,
                        S=None,
                        U=None,
                        numintervals=500,
                        ldeltamin=-5,
                        ldeltamax=5,
                        scale=0,
                        mode='lmm',
                        p=1):
        ldeltamin += scale
        ldeltamax += scale

        if S is None or U is None:
            S, U = linalg.eigh(K)

        Uy = scipy.dot(U.T, y)

        # grid search
        nllgrid = scipy.ones(numintervals + 1) * scipy.inf
        ldeltagrid = scipy.arange(numintervals + 1) / (numintervals * 1.0) * (
            ldeltamax - ldeltamin) + ldeltamin
        for i in scipy.arange(numintervals + 1):
            nllgrid[i] = nLLeval(ldeltagrid[i], Uy,
                                 S)  # the method is in helpingMethods

        nllmin = nllgrid.min()
        ldeltaopt_glob = ldeltagrid[nllgrid.argmin()]

        for i in scipy.arange(numintervals - 1) + 1:
            if (nllgrid[i] < nllgrid[i - 1] and nllgrid[i] < nllgrid[i + 1]):
                ldeltaopt, nllopt, iter, funcalls = opt.brent(
                    nLLeval, (Uy, S),
                    (ldeltagrid[i - 1], ldeltagrid[i], ldeltagrid[i + 1]),
                    full_output=True)
                if nllopt < nllmin:
                    nllmin = nllopt
                    ldeltaopt_glob = ldeltaopt
        return S, U, ldeltaopt_glob
Esempio n. 30
0
def optdelta(X,
             Y,
             K,
             ldeltanull=None,
             numintervals=100,
             ldeltamin=-5.0,
             ldeltamax=5.0):
    """find the optimal delta"""
    if ldeltanull == None:
        nllgrid = SP.ones(numintervals + 1) * SP.inf
        #nllgridf=SP.ones(numintervals+1)*SP.inf;
        ldeltagrid = SP.arange(numintervals + 1) / (numintervals * 1.0) * (
            ldeltamax - ldeltamin) + ldeltamin
        nllmin = SP.inf
        ldeltamin = None
        for i in SP.arange(numintervals + 1):
            #nllgridf[i]=nLLevalf(ldeltagrid[i],UY,UX,S);
            nllgrid[i] = nLLeval(ldeltagrid[i], X, Y, K)
            if nllgrid[i] < nllmin:
                nllmin = nllgrid[i]
                ldeltamin = ldeltagrid[i]
        for i in SP.arange(numintervals - 1) + 1:
            ee = 1E-8
            if (((nllgrid[i - 1] - nllgrid[i]) > ee)
                    and (nllgrid[i + 1] - nllgrid[i]) > ee):
                #search within brent if needed
                ldeltaopt, nllopt, iter, funcalls = OPT.brent(
                    nLLeval, (X, Y, K),
                    (ldeltagrid[i - 1], ldeltagrid[i], ldeltagrid[i + 1]),
                    full_output=True)
                if nllopt < nllmin:
                    nllmin = nllopt
                    ldeltamin = ldeltaopt
    else:
        ldeltamin = ldeltanull
        #nllminf=nLLevalf(ldeltamin,UY,UX,S);
        nllmin = nLLeval(ldeltamin, X, Y, K)
        #assert SP.absolute(nllmin-nllminf)<1E-5, 'outch'
    return nllmin, ldeltamin
Esempio n. 31
0
def pos(data,x0,*args):
    c = getSpeedOfLight()
    re = getEarthRadius()
    
    data[:,:3] = data[:,:3]/re    # Precondition the data
    data[:,3]  = data[:,3]*c/re
    d = -grad(x0,data)
    r = d                         # Initial conditions
    tol = 1.e-8                   
    
    newx = 10.                    # needed to make iteration work
    oldx = 9.
    while abs(linalg.norm(newx) - linalg.norm(oldx)) > tol: # Stoping criteria, norm of r wont work 
        alpha = brent(fline,args=(makeObjective,x0,d,data)) # calculate alpha for new search direction 
        oldx = x0 
        newx = x0 + alpha*d                                 # new guess
        x0 = newx
        newr = -grad(x0,data)                              
        beta = dot(newr,newr)/dot(r,r)
        d = newr + beta*d                                   # new search direction
        r = newr    
    return x0[:3]*re
Esempio n. 32
0
def energy_maxmass(eos, lowenergy, highenergy=None):
    ''' Determine properties of the maximum mass star supported by the EOS.
    Returns (central energy density in g/cm^3, mass in Solar masses, and
    radius in km  for the star). 
    
    Include a lower bound on the energy to
    avoid convergence problems especially with crusts.  About 10**14 g/cm^3
    works well for most neutron stars.

    Optional params: 

      highenergy = upper bound for search. If this is not specified, use
      the limiting enthalpy range of the EOS. 
      '''
    if highenergy == None:
        highenergy = eos.energy(eos.enthrange[1])
    # Calculate the maximum mass energy density
    massen = lambda x: 10.0 - profile(eos, x)[2, -1]
    maxen = optimize.brent(massen, brack=(lowenergy, highenergy))
    # Calculate the maximum mass model
    maxprofile = profile(eos, maxen)[:, -1]
    return maxen, maxprofile[2] / Solarmass_km, maxprofile[1]
Esempio n. 33
0
def ppcc_max(x, brack=(0.0, 1.0), dist="tukeylambda"):
    """Returns the shape parameter that maximizes the probability plot
    correlation coefficient for the given data to a one-parameter
    family of distributions.

    See also ppcc_plot
    """
    try:
        ppf_func = eval("distributions.%s.ppf" % dist)
    except AttributeError:
        raise ValueError("%s is not a valid distribution with a ppf." % dist)
    """
    res = inspect.getargspec(ppf_func)
    if not ('loc' == res[0][-2] and 'scale' == res[0][-1] and \
            0.0==res[-1][-2] and 1.0==res[-1][-1]):
        raise ValueError("Function has does not have default location "
              "and scale parameters\n  that are 0.0 and 1.0 respectively.")
    if (1 < len(res[0])-len(res[-1])-1) or \
       (1 > len(res[0])-3):
        raise ValueError("Must be a one-parameter family.")
    """
    N = len(x)
    # compute uniform median statistics
    Ui = zeros(N) * 1.0
    Ui[-1] = 0.5 ** (1.0 / N)
    Ui[0] = 1 - Ui[-1]
    i = arange(2, N)
    Ui[1:-1] = (i - 0.3175) / (N + 0.365)
    osr = sort(x)
    # this function computes the x-axis values of the probability plot
    #  and computes a linear regression (including the correlation)
    #  and returns 1-r so that a minimization function maximizes the
    #  correlation
    def tempfunc(shape, mi, yvals, func):
        xvals = func(mi, shape)
        r, prob = stats.pearsonr(xvals, yvals)
        return 1 - r

    return optimize.brent(tempfunc, brack=brack, args=(Ui, osr, ppf_func))
Esempio n. 34
0
def newton_raphson(x, y, maxcnt=100, eps=1e-5):
    params_cnt = 2
    params = np.random.rand(params_cnt)
    params_old = params + 10
    cnt = 0
    while (stability(np.abs(params - params_old), eps) and cnt < maxcnt):
        grad = np.zeros(params_cnt)
        hes = np.zeros((params_cnt, params_cnt))
        for i in range(len(X)):
            grad += derivative(X[i], y[i], params)
            hes += hessian(X[i], y[i], params)
        # find best step gamma
        f = lambda val: SSE(x, y, params - val * np.linalg.pinv(hes).dot(grad))
        gamma = optimize.brent(f)

        params_old = params.copy()
        # make iteration as difference of old params value and
        #  step * gradient * inverse(hessian)
        params -= np.linalg.pinv(hes).dot(grad) * gamma
        cnt += 1
    print("iteration cnt: ", cnt)
    return params
Esempio n. 35
0
def energy_maxmass(eos, lowenergy, highenergy=None):
    ''' Determine properties of the maximum mass star supported by the EOS.
    Returns (central energy density in g/cm^3, mass in Solar masses, and
    radius in km  for the star). 
    
    Include a lower bound on the energy to
    avoid convergence problems especially with crusts.  About 10**14 g/cm^3
    works well for most neutron stars.

    Optional params: 

      highenergy = upper bound for search. If this is not specified, use
      the limiting enthalpy range of the EOS. 
      '''
    if highenergy == None:
        highenergy = eos.energy(eos.enthrange[1])
    # Calculate the maximum mass energy density
    massen = lambda x: 10.0 - profile(eos, x)[2,-1]
    maxen = optimize.brent(massen, brack=(lowenergy,highenergy))
    # Calculate the maximum mass model
    maxprofile = profile(eos,maxen)[:,-1]
    return maxen, maxprofile[2]/Solarmass_km, maxprofile[1]
Esempio n. 36
0
    def compute_z(self, cosmology=None):
        """
        The redshift for this distance assuming its physical distance is
        a luminosity distance.

        Parameters
        ----------
        cosmology : ``Cosmology`` or `None`
            The cosmology to assume for this calculation, or `None` to use the
            current cosmology (see `astropy.cosmology` for details).

        Returns
        -------
        z : float
            The redshift of this distance given the provided ``cosmology``.
        """
        from ..cosmology import luminosity_distance
        from scipy import optimize

        # FIXME: array: need to make this calculation more vector-friendly

        f = lambda z, d, cos: (luminosity_distance(z, cos).value - d) ** 2
        return optimize.brent(f, (self.Mpc, cosmology))
Esempio n. 37
0
    def compute_z(self, cosmology=None):
        """
        The redshift for this distance assuming its physical distance is
        a luminosity distance.

        Parameters
        ----------
        cosmology : ``Cosmology`` or `None`
            The cosmology to assume for this calculation, or `None` to use the
            current cosmology (see `astropy.cosmology` for details).

        Returns
        -------
        z : float
            The redshift of this distance given the provided ``cosmology``.
        """
        from ..cosmology import luminosity_distance
        from scipy import optimize

        # FIXME: array: need to make this calculation more vector-friendly

        f = lambda z, d, cos: (luminosity_distance(z, cos).value - d) ** 2
        return optimize.brent(f, (self.Mpc, cosmology))
def descent(f, a_0, e, X):  #最急降下法
    print("初期点", (a_0[0], a_0[1]), "許容誤差", e)
    g = grad(f, X)
    n = len(X)
    x_0 = a_0
    d = sub_v(g, x_0, X)
    xs = [[x_0[i]] for i in range(n)]
    k = 0
    while norm(d, X) >= e:

        def l(x):
            return sub_s(f, [x_0[i] - x * d[i] for i in range(n)], X)

        a = optimize.brent(l)
        for i in range(n):
            x_0[i] -= a * d[i]
        d = sub_v(g, x_0, X)
        for i in range(n):
            xs[i].append(x_0[i])
        k += 1
    print("反復数", k)
    print("停止点", (x_0[0], x_0[1]))
    return x_0, xs
Esempio n. 39
0
   def getMax(self,H, X=None,REML=False):

      """
	 Helper functions for .fit(...).  
	 This function takes a set of LLs computed over a grid and finds possible regions 
	 containing a maximum.  Within these regions, a Brent search is performed to find the 
	 optimum.

      """
      n = len(self.LLs)
      HOpt = []
      for i in range(1,n-2):
         if self.LLs[i-1] < self.LLs[i] and self.LLs[i] > self.LLs[i+1]: 
            HOpt.append(optimize.brent(self.LL_brent,args=(X,REML),brack=(H[i-1],H[i+1])))
            if np.isnan(HOpt[-1]): HOpt[-1] = H[i-1]
	    #if np.isnan(HOpt[-1]): HOpt[-1] = self.LLs[i-1]
	    #if np.isnan(HOpt[-1][0]): HOpt[-1][0] = [self.LLs[i-1]]

      if len(HOpt) > 1: 
         if self.verbose: sys.stderr.write("NOTE: Found multiple optima.  Returning first...\n")
         return HOpt[0]
      elif len(HOpt) == 1: return HOpt[0]
      elif self.LLs[0] > self.LLs[n-1]: return H[0]
      else: return H[n-1]
Esempio n. 40
0
def optdelta(UY,
             UX,
             S,
             ldeltanull=None,
             numintervals=100,
             ldeltamin=-10.0,
             ldeltamax=10.0):
    """find the optimal delta"""
    if ldeltanull == None:
        nllgrid = SP.ones(numintervals + 1) * SP.inf
        ldeltagrid = SP.arange(numintervals + 1) / (numintervals * 1.0) * (
            ldeltamax - ldeltamin) + ldeltamin
        nllmin = SP.inf
        for i in SP.arange(numintervals + 1):
            nllgrid[i] = nLLeval(ldeltagrid[i], UY, UX, S)
            if nllgrid[i] < nllmin:
                nllmin = nllgrid[i]
                ldeltaopt_glob = ldeltagrid[i]
        foundMin = False
        for i in SP.arange(numintervals - 1) + 1:
            continue
            ee = 1E-8
            #carry out brent optimization within the interval
            if ((nllgrid[i - 1] - nllgrid[i]) > ee) and (
                (nllgrid[i + 1] - nllgrid[i]) > 1E-8):
                foundMin = True
                ldeltaopt, nllopt, iter, funcalls = OPT.brent(
                    nLLeval, (UY, UX, S),
                    (ldeltagrid[i - 1], ldeltagrid[i], ldeltagrid[i + 1]),
                    full_output=True)
                if nllopt < nllmin:
                    nllmin = nllopt
                    ldeltaopt_glob = ldeltaopt
    else:
        ldeltaopt_glob = ldeltanull
    return ldeltaopt_glob
Esempio n. 41
0
    def fit(self, x, y):
        '''
        Args:
            x (ndarray): an array of shape (n,d) for n points, d features NOT incl constant
            y (ndarray): an array of shape (n,) for n points        
        Returns:
            ndarray: update and return self.beta using squared loss and norm-1 regularization                     
        '''
        self.beta = self.fit_ridge(x, y, self.lamda)

        def objective(b, i):
            new_beta = np.concatenate((self.beta[:i], np.array([b]), self.beta[i+1:]))
            return np.linalg.norm(y - x.dot(new_beta))**2 + self.lamda * np.linalg.norm(new_beta, ord=1)

        has_change, count, tol = True, 0, 1e-4
        while has_change and count < 1e4:
            has_change = False
            for i in range(self.beta.size):
                prev = self.beta[i]
                self.beta[i] = brent(objective, args=(i,))                 
                if abs(prev - self.beta[i]) > tol:
                    has_change = True
            count += 1
        return self.beta
Esempio n. 42
0
def train_nullmodel(y,K,numintervals=100,ldeltamin=-5,ldeltamax=5,debug=False):
    if debug:
        print '... train null model'
        
    n_s = y.shape[0]

    # rotate data
    S,U = LA.eigh(K)
    Uy = SP.dot(U.T,y)

    # grid search
    nllgrid=SP.ones(numintervals+1)*SP.inf
    ldeltagrid=SP.arange(numintervals+1)/(numintervals*1.0)*(ldeltamax-ldeltamin)+ldeltamin
    nllmin=SP.inf
    for i in SP.arange(numintervals+1):
        nllgrid[i]=nLLeval(ldeltagrid[i],Uy,S);
        
    # find minimum
    nll_min = nllgrid.min()
    ldeltaopt_glob = ldeltagrid[nllgrid.argmin()]

    # more accurate search around the minimum of the grid search
    for i in SP.arange(numintervals-1)+1:
        if (nllgrid[i]<nllgrid[i-1] and nllgrid[i]<nllgrid[i+1]):
            ldeltaopt,nllopt,iter,funcalls = OPT.brent(nLLeval,(Uy,S),(ldeltagrid[i-1],ldeltagrid[i],ldeltagrid[i+1]),full_output=True);
            if nllopt<nllmin:
                nllmin=nllopt;
                ldeltaopt_glob=ldeltaopt;

    monitor = {}
    monitor['nllgrid'] = nllgrid
    monitor['ldeltagrid'] = ldeltagrid
    monitor['ldeltaopt'] = ldeltaopt_glob
    monitor['nllopt'] = nllmin
    
    return S,U,ldeltaopt_glob,monitor
Esempio n. 43
0
def optimize_threshold(X_pre, n_pre, n_post, delta):
    '''Returns likelihood of threshold c value

	Parameters
	----------
	X_pre : Vector of n_pre integers.
		Independently distributed random variables
	n_pre : int
		Length of X_pre vector
	n_post : int
		Length of X_post vector
	delta : int
		1 - Confidence level for the threshold

	Returns
	-------
	float
		Optimal c threhold
	'''
    c_optim, fval, iters_, funcalls = brent(c_like,
                                            (X_pre, n_pre, n_post, delta),
                                            brack=(0, 1000),
                                            full_output=True)
    return c_optim
def main():
    res = optimize.brent(propagation_function,
                         brack=(0, 1e-5, 5e-5),
                         tol=1e-3,
                         full_output=True)
    print("Output:", res)
    plt.figure('dE vs dTheta')
    plt.plot(np.array(minimizationArray)[:, 0] * 1e6,
             np.array(minimizationArray)[:, 1],
             'ro',
             ls='')
    plt.grid()
    axes = plt.gca()
    axes.set_xlabel("$d\Theta$, $\mu$rad")
    axes.set_ylabel("$\Delta$E, eV")
    plt.savefig("dE_vs_dTheta.png")

    plt.figure('Flux vs dTheta')
    plt.plot(np.array(minimizationArray)[:, 0] * 1e6,
             np.array(minimizationArray)[:, 2],
             'go',
             ls='')
    plt.grid()
    axes = plt.gca()
    axes.set_xlabel("$d\Theta$, $\mu$rad")
    axes.set_ylabel("Flux, photons/s")
    plt.savefig("Flux_vs_dTheta.png")

    plt.figure('Convergence')
    plt.plot(np.arange(len(minimizationArray)),
             np.array(minimizationArray)[:, 1], '-bo')
    axes = plt.gca()
    axes.set_xlabel("Iteration Nr.")
    axes.set_ylabel("$\Delta$E, eV")
    plt.savefig("Convergence.png")
    plt.show()
Esempio n. 45
0
                    tk = 1.0
                phreds = annot[2]
                qGenoData.append([
                    tk, [float(phreds[0]),
                         float(phreds[1]),
                         float(phreds[2])]
                ])
            else:
                qGenoData.append('missing')
        q0sumLL = sumLLq(0.0 + sm_val, qGenoData)
        q1sumLL = sumLLq(1.0 - sm_val, qGenoData)
        qMsumLL = sumLLq(qEst, qGenoData)

        if (qMsumLL > q0sumLL) and (qMsumLL > q1sumLL):
            bracket = (0.0, qEst, 1.0)
            MLE = optimize.brent(scipyLLq, brack=bracket)

        elif q0sumLL >= qMsumLL:
            MLE = 0.0
            newlyfixed += 1

        elif q1sumLL >= qMsumLL:
            MLE = 1.0
            newlyfixed += 1
        negLL = -sumLLq(MLE, qGenoData)

        sitecounter += 1
        outfile.write(contig + '\t' + position + '\t' + str(count) + '\t' +
                      str(qEst) + '\t' + str(MLE) + '\n')
print('sites that now appear fixed: ', str(newlyfixed))
outfile.close()
Esempio n. 46
0
Function minimiztion using
scipy

Mar-07-2016
Gopi Subramanian
"""

from scipy import optimize
import numpy as np
import matplotlib.pyplot as plt


# Define a function
def afunction(x):
    return -np.exp(-(x - .7)**2)


# Generate some input
vals = np.arange(-5, 5, 0.5)
fvals = [afunction(x) for x in vals]

# Plot the function
plt.figure(2)
plt.plot(fvals)

plt.show()

print optimize.brent(afunction)

#Alternatively
print optimize.minimize_scalar(afunction)
Esempio n. 47
0
def minimize1D(f,
               evalgrid=None,
               nGrid=10,
               minval=0.0,
               maxval=0.99999,
               verbose=False,
               brent=True,
               check_boundaries=True,
               resultgrid=None,
               return_grid=False):
    '''
    minimize a function f(x) in the grid between minval and maxval.
    The function will be evaluated on a grid and then all triplets,
    where the inner value is smaller than the two outer values are optimized by
    Brent's algorithm.
    --------------------------------------------------------------------------
    Input:
    f(x)    : callable target function
    evalgrid: 1-D array prespecified grid of x-values
    nGrid   : number of x-grid points to evaluate f(x)
    minval  : minimum x-value for optimization of f(x)
    maxval  : maximum x-value for optimization of f(x)
    brent   : boolean indicator whether to do Brent search or not.
              (default: True)
    --------------------------------------------------------------------------
    Output list:
    [xopt, f(xopt)]
    xopt    : x-value at the optimum
    f(xopt) : function value at the optimum
    --------------------------------------------------------------------------
    '''
    #evaluate the target function on a grid:
    if verbose: print("evaluating target function on a grid")
    if evalgrid is not None and brent:  # if brent we need to sort the input values
        i_sort = evalgrid.argsort()
        evalgrid = evalgrid[i_sort]
    if resultgrid is None:
        [evalgrid, resultgrid] = evalgrid1D(f,
                                            evalgrid=evalgrid,
                                            nGrid=nGrid,
                                            minval=minval,
                                            maxval=maxval)

    i_currentmin = resultgrid.argmin()
    minglobal = (evalgrid[i_currentmin], resultgrid[i_currentmin])
    if brent:  #do Brent search in addition to rest?
        if check_boundaries:
            if verbose:
                print(
                    "checking grid point boundaries to see if further search is required"
                )
            if resultgrid[0] < resultgrid[
                    1]:  #if the outer boundary point is a local optimum expand search bounded between the grid points
                if verbose:
                    print(
                        "resultgrid[0]<resultgrid[1]--> outer boundary point is a local optimum expand search bounded between the grid points"
                    )
                minlocal = opt.fminbound(f,
                                         evalgrid[0],
                                         evalgrid[1],
                                         full_output=True)
                if minlocal[1] < minglobal[1]:
                    if verbose: print("found a new minimum during grid search")
                    minglobal = minlocal[0:2]
            if resultgrid[-1] < resultgrid[
                    -2]:  #if the outer boundary point is a local optimum expand search bounded between the grid points
                if verbose:
                    print(
                        "resultgrid[-1]<resultgrid[-2]-->outer boundary point is a local optimum expand search bounded between the grid points"
                    )
                minlocal = opt.fminbound(f,
                                         evalgrid[-2],
                                         evalgrid[-1],
                                         full_output=True)
                if minlocal[1] < minglobal[1]:
                    if verbose: print("found a new minimum during grid search")
                    minglobal = minlocal[0:2]
        if verbose: print("exploring triplets with brent search")
        onebrent = False
        for i in range(
                resultgrid.shape[0] - 2
        ):  #if any triplet is found, where the inner point is a local optimum expand search
            if (resultgrid[i + 1] < resultgrid[i + 2]) and (resultgrid[i + 1] <
                                                            resultgrid[i]):
                onebrent = True
                if verbose: print("found triplet to explore")
                minlocal = opt.brent(f,
                                     brack=(evalgrid[i], evalgrid[i + 1],
                                            evalgrid[i + 2]),
                                     full_output=True)
                if minlocal[1] < minglobal[1]:
                    minglobal = minlocal[0:2]
                    if verbose: print("found new minimum from brent search")
    if return_grid:
        return (minglobal[0], minglobal[1], evalgrid, resultgrid)
    else:
        return minglobal
Esempio n. 48
0
    def _mle_opt(i, brck):
        def _eval_mle(lmb, data):
            # Function to minimize
            return -_yj_llf(data, lmb)

        return optimize.brent(_eval_mle, brack=brck, args=(i,))
Esempio n. 49
0
def minerr_transit_fit(flux, sigma, model):
    r"""
    Optimum scaled transit depth for data with lower bounds on errors

    Find the value of the scaling factor s that provides the best fit of the
    model m = 1 + s*(model-1) to the normalised input fluxes. It is assumed
    that the nominal standard error(s) provided in sigma are lower bounds to
    the true standard errors on the flux measurements. [1]_ The probability
    distribution for the true standard errors is assumed to be

    .. math::
        P(\sigma_{\rm true} | \sigma) = \sigma/\sigma_{\rm true}^2 

    :param flux: Array of normalised flux measurements

    :param sigma: Lower bound(s) on standard error for flux - array or scalar

    :param model: Transit model to be scaled

    :returns: s, sigma_s

.. rubric References
.. [1] Sivia, D.S. & Skilling, J., Data Analysis - A Bayesian Tutorial, 2nd
   ed., section 8.3.1

    """
    N = len(flux)
    if N < 3:
        return np.nan, np.nan

    def _negloglike(s, flux, sigma, model):
        model = 1 + s * (model - 1)
        Rsq = ((model - flux) / sigma)**2
        # In the limit Rsq -> 0, log-likelihood -> log(0.5)
        x = np.full_like(Rsq, np.log(0.5))
        _j = Rsq > np.finfo(0.0).eps
        x[_j] = np.log((1 - np.exp(-0.5 * Rsq[_j])) / Rsq[_j])
        return -np.sum(x)

    def _loglikediff(s, loglike_0, flux, sigma, model):
        return loglike_0 + _negloglike(s, flux, sigma, model)

    if np.min(model) == 1:
        return 0, 0
    # Bracket the minimum of _negloglike
    s_min = 0
    fa = _negloglike(s_min, flux, sigma, model)
    s_mid = 1
    fb = _negloglike(s_mid, flux, sigma, model)
    #print('s_min, fa, s_mid, fb',s_min, fa, s_mid, fb)
    if fb < fa:
        s_max = 2
        fc = _negloglike(s_max, flux, sigma, model)
        while fc < fb:
            s_max = 2 * s_max
            fc = _negloglike(s_max, flux, sigma, model)
    else:
        s_max = s_mid
        fc = fb
        s_mid = 0.5
        fb = _negloglike(s_mid, flux, sigma, model)
        while fb > fa:
            if s_mid < 2**-16:
                return 0, 0
            s_mid = 0.5 * s_mid
            fb = _negloglike(s_mid, flux, sigma, model)

    #print('s_min, fa, s_mid, fb, s_max, fc',s_min, fa, s_mid, fb, s_max, fc)
    s_opt, _f, _, _ = brent(_negloglike,
                            args=(flux, sigma, model),
                            brack=(s_min, s_mid, s_max),
                            full_output=True)
    loglike_0 = -_f - 0.5
    s_hi = s_max
    f_hi = _loglikediff(s_hi, loglike_0, flux, sigma, model)
    while f_hi < 0:
        s_hi = 2 * s_hi
        f_hi = _loglikediff(s_hi, loglike_0, flux, sigma, model)
    s_hi = brentq(_loglikediff,
                  s_opt,
                  s_hi,
                  args=(loglike_0, flux, sigma, model))
    s_err = s_hi - s_opt
    #print('s_opt,  s_err',s_opt, s_err)
    return s_opt, s_err
Esempio n. 50
0
    return 4*x**3 + (x-2)**2 + x**4
#可以使用 fmin_bfgs 找到函数的最小值:
x_min=optimize.fmin_bfgs(f,-2)
#诡异的迭代方法
print(x_min)

>>>
Optimization terminated successfully.
         Current function value: -3.506641
         Iterations: 5
         Function evaluations: 24
         Gradient evaluations: 8
[-2.67298151]

#也可以直接用这些函数
optimize.brent(f)
=> 0.46961743402759754
optimize.fminbound(f, -4, 2)
=> -2.6729822917513886


——————————函数求根——————————
omega_c = 3.0
def f(omega):
    # a transcendental equation: resonance frequencies of a low-Q SQUID terminated microwave resonator
    return tan(2*pi*omega) - omega_c/omega

optimize.fsolve(f, 0.1)

=> array([ 0.23743014])
#一样要小心迭代法只能求一个根,而案例中tan很多根
Esempio n. 51
0
 def brent_optimize():
     return optimize.brent(_eval_mle, brack=brck, args=(i, ))
Esempio n. 52
0
        yy2 = logistic_offset_p(xx, popti)
        if (not (noplot)):
            nxx = 100
            xx2 = np.arange(nxx) / np.float(nxx - 1)
            yy2 = logistic_offset_p(xx2, popti)
            line2 = plt.plot(xx2, yy2)
            plt.setp(line2,
                     linestyle='-',
                     linewidth=lwidth,
                     color=lcol1,
                     marker='None',
                     label=str2tex('$L$', usetex=usetex))

        # steepest curvature
        x_scaled = opt.brent(
            mcurvature,  # minimizes
            args=(dlogistic, d2logistic, popti[0], popti[1], popti[2]),
            brack=(xx[0], xx[-1]))
        curvatures = logistic_offset_p(x_scaled, popti)
        gx_scaled = curvatures  # g(n_thresh)
        mumax = np.amax(ee_masked[:, iobj])
        if (logistic_offset_p(x_scaled, popti) > 0.2) or (x_scaled < xx[0]):
            x_scaled = xx[0]
            gx_scaled = np.min(ee_masked[:, iobj]) / mumax
        cutoff1 = gx_scaled  # in 0-1 range # g(n_thresh)
        cutoff_obj[
            iobj] = cutoff1 * mumax  # in EE range  # g(n_thresh)*mu_thresh
        x33 = np.arange(0.0125, 1.03, 0.0345)
        y33 = np.array([curvatures for ii in range(np.shape(x33)[0])])
        line6 = plt.plot(x33, y33)
        plt.setp(line6,
                 linestyle='None',
Esempio n. 53
0
 def optimizer(func, args):
     return optimize.brent(func, brack=brack, args=args)
Esempio n. 54
0
 def bisect_losses(self, fn, max_l):
     ck = self.get_ck()
     fstar = lambda eta: ck * fn(eta) + eta
     return sopt.brent(fstar, brack=(0, max_l))
Esempio n. 55
0
File: eee.py Progetto: Mazzol/pyeee
def eee(func, *args, **kwargs):
    """
    Parameter screening using Efficient/Sequential Elementary Effects of
    Cuntz, Mai et al. (Water Res Research, 2015).

    Note, the input function must be callable as `func(x)`.

    Parameters
    ----------
    func : callable
        Python function callable as `func(x)` with `x` the function parameters.
    lb : array_like
        Lower bounds of parameters.
    ub : array_like
        Upper bounds of parameters.
    x0 : array_like, optional
        Parameter values used with `mask==0`.
    mask : array_like, optional
        Include (1,True) or exclude (0,False) parameters in screening (default: include all parameters).
    ntfirst : int, optional
        Number of trajectories in first step of sequential elementary effects (default: 5).
    ntlast : int, optional
        Number of trajectories in last step of sequential elementary effects (default: 5).
    nsteps : int, optional
        Number of intervals for each trajectory (default: 6)
    weight : boolean, optional
        If False, use the arithmetic mean mu* for each parameter if function has multiple outputs,
        such as the mean mu* of each time step of a time series (default).

        If True, return weighted mean mu*, weighted by sd.
    seed : int or array_like
        Seed for numpy``s random number generator (default: None).
    processes : int, optinal
        The number of processes to use to evaluate objective function and constraints (default: 1).
    pool : `schwimmbad` pool object, optinal
        Generic map function used from module `schwimmbad <https://schwimmbad.readthedocs.io/en/latest/>`_,
        which provides, serial, multiprocessor, and MPI mapping functions (default: None).

        The pool is chosen with:

            schwimmbad.choose_pool(mpi=True/False, processes=processes).

        The pool will be chosen automatically if pool is None.

        MPI pools can only be opened and closed once. If you want to use screening several
        times in one program, then you have to choose the pool, pass it to eee,
        and later close the pool in the calling progam.

    verbose : int, optional
        Print progress report during execution if verbose>0 (default: 0).
    logfile : File handle or logfile name
        File name of possible log file (default: None = no logfile will be written).
    plotfile : Plot file name
        File name of possible plot file with fit of logistic function to mu* of first trajectories
        (default: None = no plot produced).

    Returns
    -------
    mask : ndarray
        (len(lb),) Mask with 1=informative and 0=uninformative model parameters, to be used with '&' on input mask.

    See Also
    --------
    :func:`~pyeee.screening.screening` : Elementary Effects, same as

    :func:`~pyeee.screening.ee` : Elementary Effects

    Examples
    --------
    >>> import numpy as np
    >>> import pyeee
    >>> seed = 1023
    >>> np.random.seed(seed=seed)
    >>> npars = 10
    >>> lb    = np.zeros(npars)
    >>> ub    = np.ones(npars)
    >>> ntfirst = 10
    >>> ntlast  = 5
    >>> nsteps  = 6
    >>> out = pyeee.eee(pyeee.K, lb, ub, x0=None, mask=None, ntfirst=ntfirst, ntlast=ntlast, nsteps=nsteps)
    >>> print(np.where(out)[0] + 1)
    [1 2 3 4 6]


    History
    -------
    Written,  Matthias Cuntz, Nov 2017
    Modified, Matthias Cuntz, Jan 2018 - weight
                              Nov 2019 - plotfile, numpy docstring format
              Matthias Cuntz, Jan 2020 - x0 optional
                                       - verbose keyword
                                       - distinguish iterable and array_like parameter types
              Matthias Cuntz, Feb 2020 - ntsteps -> nsteps
                                       - check if logfile is string instead of checking for file handle
    """
    # Get keyword arguments
    # This allows mixing keyword arguments of eee and keyword arguments to be passed to optimiser.
    # The mixed syntax eee(func, *args, logfile=None, **kwargs) is only working in Python 3
    # so need a workaround in Python 2, i.e. read all as keyword args and take out the keywords for eee.
    x0        = kwargs.pop('x0', None)
    mask      = kwargs.pop('mask', None)
    ntfirst   = kwargs.pop('ntfirst', 5)
    ntlast    = kwargs.pop('ntlast', 5)
    nsteps    = kwargs.pop('nsteps', 6)
    weight    = kwargs.pop('weight', False)
    seed      = kwargs.pop('seed', None)
    processes = kwargs.pop('processes', 1)
    pool      = kwargs.pop('pool', None)
    verbose   = kwargs.pop('verbose', 0)
    logfile   = kwargs.pop('logfile', None)
    plotfile  = kwargs.pop('plotfile', None)

    # Set up MPI if available
    try:
        from mpi4py import MPI
        comm  = MPI.COMM_WORLD
        csize = comm.Get_size()
        crank = comm.Get_rank()
    except ImportError:
        comm  = None
        csize = 1
        crank = 0

    # Logfile
    if crank == 0:
        if logfile is None:
            lfile = None
        else:
            # haswrite = getattr(logfile, "write", None)
            # if haswrite is None:
            #     lfile = open(logfile, "w")
            # else:
            #     if not callable(haswrite):
            #         lfile = logfile
            #     else:
            #         raise InputError('x0 must be given if mask is set')
            if isinstance(logfile, str):
                lfile = open(logfile, "w")
            else:
                lfile = logfile
    else:
        lfile = None

    # Start
    if crank == 0:
        if (verbose > 0):
            tee('Start screening in eee.', file=lfile)
        else:
            if lfile is not None:
                print('Start screening in eee.', file=lfile)

    # Check
    assert len(args) == 2, 'lb and ub must be given as arguments.'
    lb, ub = args[:2]
    npara = len(lb)
    if crank == 0:
        assert len(lb) == len(ub), 'Lower and upper bounds have not the same size.'
    lb = np.array(lb)
    ub = np.array(ub)

    # mask
    if mask is None:
        ix0    = np.ones(npara)
        imask  = np.ones(npara, dtype=np.bool)
        iimask = np.arange(npara, dtype=np.int)
        nmask  = npara
    else:
        if x0 is None:
            raise InputError('x0 must be given if mask is set')
        ix0    = np.copy(x0)
        imask  = np.copy(mask)
        iimask = np.where(imask)[0]
        nmask  = iimask.size
        if nmask == 0:
            if crank == 0:
                if (verbose > 0):
                    tee('\nAll parameters masked, nothing to do.', file=lfile)
                    tee('Finished screening in eee.', file=lfile)
                else:
                    if lfile is not None:
                        print('\nAll parameters masked, nothing to do.', file=lfile)
                        print('Finished screening in eee.', file=lfile)
                if logfile is not None: lfile.close()
            # Return all true
            if mask is None:
                return np.ones(len(lb), dtype=np.bool)
            else:
                return mask
    if crank == 0:
        if (verbose > 0):
            tee('\nScreen unmasked parameters: ', nmask, iimask+1, file=lfile)
        else:
            if lfile is not None:
                print('\nScreen unmasked parameters: ', nmask, iimask+1, file=lfile)

    # Seed random number generator
    if seed is not None: np.random.seed(seed=seed)  # same on all ranks because trajectories are sampled on all ranks

    # Choose the right mapping function: single, multiprocessor or mpi
    if pool is None:
        import schwimmbad
        ipool = schwimmbad.choose_pool(mpi=False if csize == 1 else True, processes=processes)
    else:
        ipool = pool

    # Step 1 of Cuntz et al. (2015) - first screening with ntfirst trajectories, calc mu*
    res = screening( # returns (npara,3) with mu*, mu, std if nt>1
        func, lb, ub, ntfirst,
        x0=ix0, mask=imask,
        nsteps=nsteps, ntotal=10*ntfirst,
        processes=processes, pool=ipool,
        verbose=0)
    if res.ndim > 2:
        if weight:
            mustar = np.sum(res[:, iimask, 2] * res[:, iimask, 0],axis=0) / np.sum(res[:, iimask, 2], axis=0)
        else:
            mustar = np.mean(res[:, iimask, 0], axis=0)
    else:
        mustar = res[iimask, 0]

    # Step 2 of Cuntz et al. (2015) - calc eta*
    mumax  = np.amax(mustar)
    xx     = np.arange(nmask) / np.float(nmask-1)
    iisort = np.argsort(mustar)
    yy     = mustar[iisort] / mumax

    if crank == 0:
        if (verbose > 0):
            tee('\nSorted means of absolute elementary effects (mu*): ', mustar[iisort], file=lfile)
            tee('Normalised mu* = eta*: ', yy, file=lfile)
            tee('Corresponding to parameters: ', iimask[iisort] + 1, file=lfile)
        else:
            if lfile is not None:
                print('\nSorted means of absolute elementary effects (mu*): ', mustar[iisort], file=lfile)
                print('Normalised mu* = eta*: ', yy, file=lfile)
                print('Corresponding to parameters: ', iimask[iisort] + 1, file=lfile)

    # Step 3.1 of Cuntz et al. (2015) - fit logistic function
    #               [y-max,    steepness,                       inflection point, offset]
    pini = np.array([yy.max(), (yy.max() - yy.max()) / xx.max(), 0.5 * xx.max(), yy.min()])
    plogistic, f, d = opt.fmin_l_bfgs_b(cost_square,
                                        pini,
                                        args=(logistic_offset_p, xx, yy),
                                        approx_grad=1,
                                        bounds=[(None, None), (None, None), (None, None), (None, None)],
                                        iprint=0,
                                        disp=0)

    # Step 3.2 of Cuntz et al. (2015) - determine point of steepest curvature -> eta*_thresh
    def mcurvature(*args, **kwargs):
        return -curvature(*args, **kwargs)

    x_K = opt.brent(mcurvature,                     # x_K
                    args=(dlogistic, d2logistic, plogistic[0], plogistic[1], plogistic[2]),
                    brack=(xx[0], xx[-1]))
    curvmax    = logistic_offset_p(x_K, plogistic)  # L(x_K)
    eta_thresh = curvmax                            # eta*_thresh = L(x_K) # in range 0-1
    if (curvmax > 0.2) or (x_K < xx[0]):
        x_scaled   = xx[0]                          # x_K = min(x)
        eta_thresh = np.min(mustar) / mumax         # eta*_thresh = min(mu*)/max(mu*)
    mu_thresh = eta_thresh * mumax                  # mu*_thresh = eta*_thresh*max(mu*)

    if crank == 0:
        if (verbose > 0):
            tee('\nThreshold eta*_thresh, mu*_tresh: ', eta_thresh, mu_thresh, file=lfile)
            tee('L(x_K): ', logistic_offset_p(x_K, plogistic), file=lfile)
            tee('p_opt of L: ', plogistic, file=lfile)
        else:
            if lfile is not None:
                print('\nThreshold eta*_thresh, mu*_tresh: ', eta_thresh, mu_thresh, file=lfile)
                print('L(x_K): ', logistic_offset_p(x_K, plogistic), file=lfile)
                print('p_opt of L: ', plogistic, file=lfile)

    # Plot first mu* of elementary effects with logistic function and threshold
    if crank == 0:
        if plotfile is not None:
            try:
                import matplotlib as mpl
                mpl.use('Agg')
                import matplotlib.pyplot as plt
                mpl.rcParams['font.family'] = 'sans-serif'
                mpl.rcParams['font.sans-serif'] = 'Arial'  # Arial, Verdana
                mpl.rc('savefig', dpi=300, format='png')
                if npara > 99:
                    mpl.rc('font', size=8)
                else:
                    mpl.rc('font', size=11)
                fig = plt.figure()
                sub = plt.subplot(111)
                xx = xx
                yy = mustar[iisort]
                line1 = sub.plot(xx, yy, 'ro')
                nn = 1000
                xx2 = xx.min() + np.arange(nn) / float(nn - 1) * (xx.max() - xx.min())
                yy2 = logistic_offset_p(xx2, plogistic) * mumax
                line2 = sub.plot(xx2, yy2, 'b-')
                xmin, xmax = sub.get_xlim()
                line3 = sub.plot([xmin, xmax], [mu_thresh, mu_thresh], 'k-')
                if npara > 99:
                    xnames = ['{:03d}'.format(i) for i in iimask[iisort] + 1]
                else:
                    xnames = ['{:02d}'.format(i) for i in iimask[iisort] + 1]
                plt.setp(sub, xticks=xx, xticklabels=xnames)
                plt.setp(sub, xlabel='Parameter')
                plt.setp(sub, ylabel=r'$\mu*$')
                fig.savefig(plotfile, transparent=False, bbox_inches='tight', pad_inches=0.035)
                plt.close(fig)
            except ImportError:
                pass

    # Step 4 of Cuntz et al. (2015) - Discard from next steps all parameters with
    #                                 eta* >= eta*_tresh, i.e. mu* >= mu*_tresh
    imask[iimask] = imask[iimask] & (mustar < mu_thresh)

    if np.all(~imask):
        if crank == 0:
            if (verbose > 0):
                tee('\nNo more parameters to screen, i.e. all (unmasked) parameters are informative.', file=lfile)
                tee('Finished screening in eee.', file=lfile)
            else:
                if lfile is not None:
                    print('\nNo more parameters to screen, i.e. all (unmasked) parameters are informative.', file=lfile)
                    print('Finished screening in eee.', file=lfile)
            _cleanup(lfile, pool, ipool)
        # Return all true
        if mask is None:
            return np.ones(len(lb), dtype=np.bool)
        else:
            return mask

    # Step 5 and 6 of Cuntz et al. (2015) - Next trajectory with remaining parameters.
    #                                       Discard all parameters with |EE| >= mu*_tresh
    #                                     - Repeat until no |EE| >= mu*_tresh
    niter = 1
    donext = True
    while donext:
        if crank == 0:
            if (verbose > 0):
                tee('\nParameters remaining for iteration ', niter, ':', np.where(imask)[0] + 1, file=lfile)
            else:
                if lfile is not None:
                    print('\nParameters remaining for iteration ', niter, ':', np.where(imask)[0] + 1, file=lfile)
        iimask = np.where(imask)[0]
        res = screening( # returns EE(parameters) if nt=1
            func, lb, ub, 1,
            x0=ix0, mask=imask,
            nsteps=nsteps, ntotal=10,
            processes=processes, pool=ipool,
            verbose=0)

        # absolute EE
        if res.ndim > 2:
            if weight:
                mustar = np.sum(res[:, iimask, 2] * res[:, iimask, 0], axis=0) / np.sum(res[:, iimask, 2], axis=0)
            else:
                mustar = np.mean(res[:, iimask, 0], axis=0)
        else:
            mustar = res[iimask, 0]

        if crank == 0:
            if (verbose > 0):
                tee('Absolute elementary effects |EE|: ', mustar, file=lfile)
            else:
                if lfile is not None:
                    print('Absolute elementary effects |EE|: ', mustar, file=lfile)

        imask[iimask] = imask[iimask] & (mustar < mu_thresh)

        if np.all(~imask):
            if crank == 0:
                if (verbose > 0):
                    tee('\nNo more parameters to screen, i.e. all (unmasked) parameters are informative.', file=lfile)
                    tee('Finished screening in eee.', file=lfile)
                else:
                    if lfile is not None:
                        print('\nNo more parameters to screen, i.e. all (unmasked) parameters are informative.', file=lfile)
                        print('Finished screening in eee.', file=lfile)
                _cleanup(lfile, pool, ipool)
            # Return all true
            if mask is None:
                return np.ones(len(lb), dtype=np.bool)
            else:
                return mask

        # Step 6 of Cuntz et al. (2015) - Repeat until no |EE| >= mu*_tresh
        if np.all(mustar < mu_thresh): donext = False

        niter += 1

    # Step 7 of Cuntz et al. (2015) - last screening with ntlast trajectories
    #                                 all parameters with mu* < mu*_thresh are final noninformative parameters
    if crank == 0:
        if (verbose > 0):
            tee('\nParameters remaining for last screening:', np.where(imask)[0] + 1, file=lfile)
        else:
            if lfile is not None:
                print('\nParameters remaining for last screening:', np.where(imask)[0] + 1, file=lfile)

    iimask = np.where(imask)[0]

    res = screening( # (npara,3) with mu*, mu, std if nt>1
        func, lb, ub, ntlast,
        x0=ix0, mask=imask,
        nsteps=nsteps, ntotal=10 * ntlast,
        processes=processes, pool=ipool,
        verbose=0)
    if res.ndim > 2:
        if weight:
            mustar = np.sum(res[:, iimask, 2] * res[:, iimask, 0], axis=0) / np.sum(res[:, iimask, 2], axis=0)
        else:
            mustar = np.mean(res[:, iimask, 0], axis=0)
    else:
        mustar = res[iimask, 0]

    if crank == 0:
        if ntlast > 1:
            if (verbose > 0):
                tee('Final mu*: ', mustar, file=lfile)
            else:
                if lfile is not None:
                    print('Final mu*: ', mustar, file=lfile)
        else:
            if (verbose > 0):
                tee('Final absolute elementary effects |EE|: ', mustar, file=lfile)
            else:
                if lfile is not None:
                    print('Final absolute elementary effects |EE|: ', mustar, file=lfile)

    imask[iimask] = imask[iimask] & (mustar < mu_thresh)

    if np.all(~imask):
        if crank == 0:
            if (verbose > 0):
                tee('\nNo more parameters left after screening, i.e. all (unmasked) parameters are informative.',
                    file=lfile)
                tee('Finished screening in eee.', file=lfile)
            else:
                if lfile is not None:
                    print('\nNo more parameters left after screening, i.e. all (unmasked) parameters are informative.',
                    file=lfile)
                    print('Finished screening in eee.', file=lfile)
            _cleanup(lfile, pool, ipool)
        # Return all true
        if mask is None:
            return np.ones(len(lb), dtype=np.bool)
        else:
            return mask

    # Return mask with unmasked informative model parameters (to be used with 'and' on initial mask)
    if mask is None:
        out = ~imask
    else:
        out = (~imask) & mask  # (true where now zero, i.e. were masked or informative) and (initial mask)

    if crank == 0:
        if (verbose > 0):
            tee('\nFinal informative parameters:', np.sum(out), np.where(out)[0] + 1, file=lfile)
            tee('Final noninformative parameters:', np.sum(imask), np.where(imask)[0] + 1, file=lfile)
            tee('\nFinished screening in eee.', file=lfile)
        else:
            if lfile is not None:
                print('\nFinal informative parameters:', np.sum(out), np.where(out)[0] + 1, file=lfile)
                print('Final noninformative parameters:', np.sum(imask), np.where(imask)[0] + 1, file=lfile)
                print('\nFinished screening in eee.', file=lfile)
        # Close logfile and pool
        _cleanup(lfile, pool, ipool)

    return out
Esempio n. 56
0
def gpt_phasing(path_to_input_file,
                path_to_gpt_bin="",
                path_to_phasing_dist=None,
                verbose=False,
                debug_flag=False):

    settings = {}

    if (verbose == True):
        print("\nPhasing: " + path_to_input_file)

    # Interpret input arguments
    split_input_file_path = path_to_input_file.split('/')
    gpt_input_filename = split_input_file_path[-1]

    phase_input_filename = gpt_input_filename.replace('.in', '.temp.in')
    finished_phase_input_filename = gpt_input_filename.replace(
        '.in', '.phased.in')

    path_to_input_file = ''
    for x in range(len(split_input_file_path) - 1):
        path_to_input_file = path_to_input_file + split_input_file_path[x] + '/'

    gpt_input_text = readinfile(path_to_input_file + gpt_input_filename)

    # Add replace usual input distribution with single particle at centroid
    if (path_to_phasing_dist):
        dist_line_index = find_lines_containing(gpt_input_text, "setfile")[0]
        gpt_input_text[
            dist_line_index] = f'setfile("beam", "{path_to_phasing_dist}");'

    # Find all lines marked for phasing
    amplitude_flag_indices = find_lines_containing(gpt_input_text,
                                                   "phasing_amplitude_")
    amplitude_flag_indices = sort_lines_by_first_integer(
        gpt_input_text, amplitude_flag_indices)

    amplitude_indices = []
    desired_amplitude = []
    for index in amplitude_flag_indices:
        variable_name = get_variable_with_string_value(gpt_input_text[index])

        amp_index = find_line_with_variable_name(gpt_input_text, variable_name)
        amplitude_indices.append(amp_index)
        desired_amplitude.append(
            get_variable_on_line(gpt_input_text, amp_index))
        settings[variable_name] = desired_amplitude[-1]

    oncrest_flag_indices = find_lines_containing(gpt_input_text,
                                                 "phasing_on_crest_")
    oncrest_flag_indices = sort_lines_by_first_integer(gpt_input_text,
                                                       oncrest_flag_indices)

    oncrest_indices = []
    oncrest_names = []
    for index in oncrest_flag_indices:
        variable_name = get_variable_with_string_value(gpt_input_text[index])
        oncrest_names.append(variable_name)
        oncrest_index = find_line_with_variable_name(gpt_input_text,
                                                     variable_name)
        oncrest_indices.append(oncrest_index)
        settings[variable_name] = 0

    relative_flag_indices = find_lines_containing(gpt_input_text,
                                                  "phasing_relative_")
    relative_flag_indices = sort_lines_by_first_integer(
        gpt_input_text, relative_flag_indices)

    relative_indices = []
    desired_relative_phase = []
    for index in relative_flag_indices:
        variable_name = get_variable_with_string_value(gpt_input_text[index])
        rel_index = find_line_with_variable_name(gpt_input_text, variable_name)
        relative_indices.append(rel_index)
        desired_relative_phase.append(
            get_variable_on_line(gpt_input_text, rel_index))
        settings[variable_name] = desired_relative_phase[-1]

    gamma_flag_indices = find_lines_containing(gpt_input_text,
                                               "phasing_gamma_")
    gamma_flag_indices = sort_lines_by_first_integer(gpt_input_text,
                                                     gamma_flag_indices)

    gamma_indices = []
    gamma_names = []
    for index in gamma_flag_indices:
        variable_name = get_variable_with_string_value(gpt_input_text[index])
        gamma_names.append(variable_name)
        gamma_index = find_line_with_variable_name(gpt_input_text,
                                                   variable_name)
        gamma_indices.append(gamma_index)
        settings[variable_name] = 1

    # Set up phasing input file
    phase_input_text = gpt_input_text

    initial_space_charge = get_variable_by_name(phase_input_text,
                                                'space_charge')
    initial_couplers_on = get_variable_by_name(phase_input_text, 'couplers_on')
    initial_viewscreens_on = get_variable_by_name(phase_input_text,
                                                  'viewscreens_on')

    phase_input_text = set_variable_by_name(phase_input_text, 'auto_phase', 1,
                                            True)
    phase_input_text = set_variable_by_name(phase_input_text, 'space_charge',
                                            0, False)
    phase_input_text = set_variable_by_name(phase_input_text, 'couplers_on', 0,
                                            False)
    phase_input_text = set_variable_by_name(phase_input_text, 'viewscreens_on',
                                            0, False)

    #print(gamma_names,oncrest_names)

    # turn off all cavities
    for index in amplitude_indices:
        phase_input_text = set_variable_on_line(phase_input_text, index, 0.0)

    # set relative phases to zero
    for index in relative_indices:
        phase_input_text = set_variable_on_line(phase_input_text, index, 0.0)

    # set gammas to one
    for index in gamma_indices:
        phase_input_text = set_variable_on_line(phase_input_text, index, 1.0)

    # phase the cavities

    phase_step = 20
    phase_test = numpy.arange(0, 360, phase_step)

    if (verbose == True):
        print(" ")

    for cav_ii in range(len(amplitude_indices)):

        if desired_amplitude[cav_ii] > 0:

            # Tell script which cavity we are phasing

            phase_input_text = set_variable_by_name(phase_input_text,
                                                    'cavity_phasing_index',
                                                    cav_ii, False)

            # turn on the cavity
            phase_input_text = set_variable_on_line(phase_input_text,
                                                    amplitude_indices[cav_ii],
                                                    desired_amplitude[cav_ii])

            gamma_test = []
            for phase in phase_test:

                gamma = run_gpt_phase(
                    phase, path_to_gpt_bin, phase_input_text,
                    path_to_input_file + phase_input_filename,
                    oncrest_indices[cav_ii], debug_flag)

                gamma_test.append(gamma)

            gamma_test_indices = numpy.argsort(gamma_test)

            best_phase = phase_test[gamma_test_indices[-1]]
            left_bound = best_phase - phase_step
            right_bound = best_phase + phase_step

            bracket = [left_bound, best_phase, right_bound]

            if (verbose == True):
                print("Cavity " + str(cav_ii) + ": Bracketed between " +
                      str(left_bound) + " and " + str(right_bound))

            if (numpy.std(gamma_test) == 0):
                if (gamma_test[0] == 1.0):
                    sys.exit(
                        "ERROR: No particles reached a screen for any attempted phase."
                    )
                else:
                    sys.exit("ERROR: Gamma did not depend on cavity " +
                             str(cav_ii) + " phase, gamma = " +
                             str(gamma_test[0]))

            brent_output = sp.brent(
                func=neg_run_gpt_phase,
                args=(path_to_gpt_bin, phase_input_text,
                      path_to_input_file + phase_input_filename,
                      oncrest_indices[cav_ii], debug_flag),
                brack=bracket,
                tol=1.0e-5,
                full_output=1,
                maxiter=1000)

            best_phase = brent_output[0]
            best_gamma = -brent_output[1]

            phase_input_text = set_variable_on_line(phase_input_text,
                                                    oncrest_indices[cav_ii],
                                                    best_phase)
            phase_input_text = set_variable_on_line(
                phase_input_text, relative_indices[cav_ii],
                desired_relative_phase[cav_ii])

            final_gamma = run_gpt(path_to_gpt_bin, phase_input_text,
                                  path_to_input_file + phase_input_filename,
                                  debug_flag)

            if (len(gamma_indices) > 0):
                phase_input_text = set_variable_on_line(
                    phase_input_text, gamma_indices[cav_ii], final_gamma)

            if (verbose == True):
                print("Cavity " + str(cav_ii) + ": Best phase = " +
                      str(best_phase) + ", final gamma = " + str(final_gamma))
                print(" ")

        else:

            best_phase = 0.0
            phase_input_text = set_variable_on_line(phase_input_text,
                                                    oncrest_indices[cav_ii],
                                                    best_phase)
            phase_input_text = set_variable_on_line(
                phase_input_text, relative_indices[cav_ii],
                desired_relative_phase[cav_ii])

            final_gamma = run_gpt(path_to_gpt_bin, phase_input_text,
                                  path_to_input_file + phase_input_filename,
                                  debug_flag)
            if (len(gamma_indices) > 0):
                phase_input_text = set_variable_on_line(
                    phase_input_text, gamma_indices[cav_ii], final_gamma)

            if (verbose == True):
                print("Skipping: Cavity " + str(cav_ii) + ": Best phase = " +
                      str(best_phase) + ", final gamma = " + str(final_gamma))
                print(" ")

        settings[oncrest_names[cav_ii]] = best_phase
        settings[gamma_names[cav_ii]] = final_gamma

    # Put back in the original settings, turn off phasing flags, set reference gamma
    phase_input_text = set_variable_by_name(phase_input_text, 'auto_phase', 0,
                                            True)
    phase_input_text = set_variable_by_name(phase_input_text, 'space_charge',
                                            initial_space_charge, False)
    phase_input_text = set_variable_by_name(phase_input_text, 'couplers_on',
                                            initial_couplers_on, False)
    phase_input_text = set_variable_by_name(phase_input_text, 'viewscreens_on',
                                            initial_viewscreens_on, False)

    # Write phased input file
    writeinfile(phase_input_text,
                path_to_input_file + finished_phase_input_filename)

    # Delete temporary input file
    trashclean(path_to_input_file + phase_input_filename, True)

    return (finished_phase_input_filename, settings)
Esempio n. 57
0
  Author   : Yuxing Yan
  Date     : 6/6/2017
  email    : [email protected]
             [email protected]
"""

import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
# define a function
a = 3.4
b = 2.0
c = 0.8


def f(x):
    return a - b * np.exp(-(x - c)**2)


x = np.arange(-3, 3, 0.1)
y = f(x)
plt.title("y=a-b*exp(-(x-c)^2)")
plt.xlabel("x")
plt.ylabel("y")
plt.plot(x, y)
plt.show()

# find the minimum
solution = optimize.brent(f)
print(solution)
    def main_fast(self, tair, par, vpd, wind, pressure, Ca):
        """
        Version as above but using a solver for Tleaf, rather than iterating

        Parameters:
        ----------
        tair : float
            air temperature (deg C)
        par : float
            Photosynthetically active radiation (umol m-2 s-1)
        vpd : float
            Vapour pressure deficit (kPa, needs to be in Pa, see conversion
            below)
        wind : float
            wind speed (m s-1)
        pressure : float
            air pressure (using constant) (Pa)
        Ca : float
            ambient CO2 concentration

        Returns:
        --------
        An : float
            net leaf assimilation (umol m-2 s-1)
        gs : float
            stomatal conductance (mol m-2 s-1)
        et : float
            transpiration (mol H2O m-2 s-1)
        """

        F = FarquharC3(theta_J=0.85,
                       peaked_Jmax=True,
                       peaked_Vcmax=True,
                       model_Q10=True,
                       gs_model=self.gs_model,
                       gamma=self.gamma,
                       g0=self.g0,
                       g1=self.g1,
                       D0=self.D0,
                       alpha=self.alpha)
        P = PenmanMonteith(self.leaf_width, self.leaf_absorptance)

        # set initialise values
        dleaf = vpd
        dair = vpd
        Cs = Ca
        Tleaf = tair
        Tleaf_K = Tleaf + c.DEG_2_KELVIN

        (An, gsc) = F.calc_photosynthesis(Cs=Cs,
                                          Tleaf=Tleaf_K,
                                          Par=par,
                                          Jmax25=self.Jmax25,
                                          Vcmax25=self.Vcmax25,
                                          Q10=self.Q10,
                                          Eaj=self.Eaj,
                                          Eav=self.Eav,
                                          deltaSj=self.deltaSj,
                                          deltaSv=self.deltaSv,
                                          Rd25=self.Rd25,
                                          Hdv=self.Hdv,
                                          Hdj=self.Hdj,
                                          vpd=dleaf)

        # Solve new Tleaf
        from scipy import optimize
        Tleaf = optimize.brent(self.fx,
                               brack=(Tleaf - 15, Tleaf + 15),
                               args=(P, Tleaf, gsc, par, vpd, pressure, wind))
        #print(Tleaf)
        Tleaf_K = Tleaf + c.DEG_2_KELVIN
        (An, gsc) = F.calc_photosynthesis(Cs=Cs,
                                          Tleaf=Tleaf_K,
                                          Par=par,
                                          Jmax25=self.Jmax25,
                                          Vcmax25=self.Vcmax25,
                                          Q10=self.Q10,
                                          Eaj=self.Eaj,
                                          Eav=self.Eav,
                                          deltaSj=self.deltaSj,
                                          deltaSv=self.deltaSv,
                                          Rd25=self.Rd25,
                                          Hdv=self.Hdv,
                                          Hdj=self.Hdj,
                                          vpd=dleaf)

        # Clunking, but I can't be arsed to rewrite, need to get other vars
        # back
        (et, le_et, gbH,
         gw) = self.calc_leaf_temp_solved(P, Tleaf, tair, gsc, par, vpd,
                                          pressure, wind)

        gbc = gbH * c.GBH_2_GBC
        Cs = Ca - An / gbc  # boundary layer of leaf
        if et == 0.0 or gw == 0.0:
            dleaf = dair
        else:
            dleaf = (et * pressure / gw) * c.PA_2_KPA  # kPa

        gsw = gsc * c.GSC_2_GSW

        return (An, gsw, et, le_et)
Esempio n. 59
0
    def update(self, params):

        if self.model.weights is not None:
            warnings.warn(
                "weights not implemented for autoregressive cov_struct, using unweighted covariance estimate"
            )

        endog = self.model.endog_li
        time = self.model.time_li

        # Only need to compute this once
        if self.designx is not None:
            designx = self.designx
        else:
            designx = []
            for i in range(self.model.num_group):

                ngrp = len(endog[i])
                if ngrp == 0:
                    continue

                # Loop over pairs of observations within a cluster
                for j1 in range(ngrp):
                    for j2 in range(j1):
                        designx.append(
                            self.dist_func(time[i][j1, :], time[i][j2, :]))

            designx = np.array(designx)
            self.designx = designx

        scale = self.model.estimate_scale()
        varfunc = self.model.family.variance
        cached_means = self.model.cached_means

        # Weights
        var = 1. - self.dep_params**(2 * designx)
        var /= 1. - self.dep_params**2
        wts = 1. / var
        wts /= wts.sum()

        residmat = []
        for i in range(self.model.num_group):

            expval, _ = cached_means[i]
            stdev = np.sqrt(scale * varfunc(expval))
            resid = (endog[i] - expval) / stdev

            ngrp = len(resid)
            for j1 in range(ngrp):
                for j2 in range(j1):
                    residmat.append([resid[j1], resid[j2]])

        residmat = np.array(residmat)

        # Need to minimize this
        def fitfunc(a):
            dif = residmat[:, 0] - (a**designx) * residmat[:, 1]
            return np.dot(dif**2, wts)

        # Left bracket point
        b_lft, f_lft = 0., fitfunc(0.)

        # Center bracket point
        b_ctr, f_ctr = 0.5, fitfunc(0.5)
        while f_ctr > f_lft:
            b_ctr /= 2
            f_ctr = fitfunc(b_ctr)
            if b_ctr < 1e-8:
                self.dep_params = 0
                return

        # Right bracket point
        b_rgt, f_rgt = 0.75, fitfunc(0.75)
        while f_rgt < f_ctr:
            b_rgt = b_rgt + (1. - b_rgt) / 2
            f_rgt = fitfunc(b_rgt)
            if b_rgt > 1. - 1e-6:
                raise ValueError(
                    "Autoregressive: unable to find right bracket")

        from scipy.optimize import brent
        self.dep_params = brent(fitfunc, brack=[b_lft, b_ctr, b_rgt])
Esempio n. 60
0
def _opt_1d(func, grad, hess, model, start, L1_wt, tol, check_step=True):
    """
    One-dimensional helper for elastic net.

    Parameters
    ----------
    func : function
        A smooth function of a single variable to be optimized
        with L1 penaty.
    grad : function
        The gradient of `func`.
    hess : function
        The Hessian of `func`.
    model : statsmodels model
        The model being fit.
    start : real
        A starting value for the function argument
    L1_wt : non-negative real
        The weight for the L1 penalty function.
    tol : non-negative real
        A convergence threshold.
    check_step : bool
        If True, check that the first step is an improvement and
        use bisection if it is not.  If False, return after the
        first step regardless.

    Notes
    -----
    ``func``, ``grad``, and ``hess`` have argument signature (x,
    model), where ``x`` is a point in the parameter space and
    ``model`` is the model being fit.

    If the log-likelihood for the model is exactly quadratic, the
    global minimum is returned in one step.  Otherwise numerical
    bisection is used.

    Returns
    -------
    The argmin of the objective function.
    """

    # Overview:
    # We want to minimize L(x) + L1_wt*abs(x), where L() is a smooth
    # loss function that includes the log-likelihood and L2 penalty.
    # This is a 1-dimensional optimization.  If L(x) is exactly
    # quadratic we can solve for the argmin exactly.  Otherwise we
    # approximate L(x) with a quadratic function Q(x) and try to use
    # the minimizer of Q(x) + L1_wt*abs(x).  But if this yields an
    # uphill step for the actual target function L(x) + L1_wt*abs(x),
    # then we fall back to a expensive line search.  The line search
    # is never needed for OLS.

    x = start
    f = func(x, model)
    b = grad(x, model)
    c = hess(x, model)
    d = b - c * x

    # The optimum is achieved by hard thresholding to zero
    if L1_wt > np.abs(d):
        return 0.

    # x + h is the minimizer of the Q(x) + L1_wt*abs(x)
    if d >= 0:
        h = (L1_wt - b) / c
    elif d < 0:
        h = -(L1_wt + b) / c
    else:
        return np.nan

    # If the new point is not uphill for the target function, take it
    # and return.  This check is a bit expensive and un-necessary for
    # OLS
    if not check_step:
        return x + h
    f1 = func(x + h, model) + L1_wt * np.abs(x + h)
    if f1 <= f + L1_wt * np.abs(x) + 1e-10:
        return x + h

    # Fallback for models where the loss is not quadratic
    from scipy.optimize import brent
    x_opt = brent(func, args=(model, ), brack=(x - 1, x + 1), tol=tol)
    return x_opt