Esempio n. 1
0
    def Optimize(self,model,gradient='exct',maxnumlinesearch=50,random_starts = 2,verbose=False):
        
        self.gradient = gradient                     
        
        print('----------------------------------------------')
        print('          Optimizing Hyperparameters          ')
        print('---------------------------------------------- \n')
        
        print('DETAILS: ')
        print('--> Performing %i non-linear CG(s) with random initialization(s)' %(random_starts+1))
        print('--> Maximum number of line searches =  %i' %(maxnumlinesearch))
        
        if gradient == 'exact':
            print('--> Calculations of gradients: exact gradients')
        else:    
            print('--> Calculation of gradients: Forward Finite-Differencing')
        if model.M>5000:
            print('--> Gradients computed in parallel on %i cores \n' %(multiprocessing.cpu_count()))
        else:
            print('--> Gradients computed on 1 core \n')
        
        # intialize optimum parameters        
        opthyp,optML,i = CG.minimize(self,model,maxnumlinesearch=maxnumlinesearch, maxnumfuneval=None, red=1.0, verbose=verbose) 
        optrankfix = self.rank_fix
        print('CG run #1 done')
        # Discourage very small characteristic lengths        
        #if opthyp[1]>1.5:
        #    optML[-1] = np.inf 
        
        # Rerun optimization with random intializations
        for j in xrange(random_starts):
            self.hyp = 0.1*np.random.randint(-40,10,size=(len(opthyp)-1,1))
            self.hyp = np.vstack((self.hyp,np.random.randint(-2,6)))
            if self.interpolate:
                self.rank_fix = (math.exp(-self.hyp[0])**2)/1e6
            else:
                self.rank_fix = (math.exp(-self.hyp[0])**2)/1e6
            print(self.hyp)
            hyp, ML, i = CG.minimize(self,model,maxnumlinesearch=maxnumlinesearch, maxnumfuneval=None, red=1.0, verbose=verbose) 

            # if new optimum is better than the last, save it.            
            if ML[-1] != -np.inf and ML[-1] <optML[-1]: #and hyp[1]<1.5:
                optML = ML
                opthyp = hyp
                optrankfix = self.rank_fix
            print('--> CG run #%i done:' %(j+2)) 
            print('--> Optimum Marginal Likelihood: %e \n\n' %(ML[-1]))
        
        
        print('**********************************************')
        print('       Global Optimum Hyperparameters:        ')
        print(opthyp)
        print('Global Optimum Marginal Likelihood: ')
        print(optML[-1])
        print('********************************************** \n\n')
        # Save global optimum to the Kernel object
        self.hyp = opthyp
        self.rank_fix = optrankfix 
           
        return  
Esempio n. 2
0
def inexactNewtonCGiter(action_of_hessian_on_vector,
                        eval_obj_func,
                        m,
                        g,
                        n,
                        convergence_level,
                        c1=1.e-4,
                        alpha_init=1.0):
    '''
	action_of_matrix_on_vector - Function that returns action of nxn matrix H on any given n vector x
	eval_obj_func - Evaluate objective function, takes in m as input
	m - Parameters
	p - Newton direction
	g - Gradient
	convergence_level - 0 for linear, 1 for superlinear, 2 for quadratic
	c1 - Paramteter to quantify sufficient descent
	alpha_init - Initial guess for alpha, which decides the step length
	'''

    p = CG.linear_CG(action_of_hessian_on_vector, m, g, n, convergence_level)
    alpha = line_search.backtrackingArmijoLineSearch(eval_obj_func,
                                                     m,
                                                     p,
                                                     g,
                                                     c1=1.e-4)

    m_new = m + alpha * p

    return m_new
Esempio n. 3
0
def Gaussian_Kron(W,x,y,hyp,rank_fix):
    # Initialize variables    
    N,M = W.shape
    D = len(x)
    sigma = math.exp(-hyp[0]/D) 
    s = math.exp(-hyp[-1])

    # set a flag to know if the CG converged
    flag=False
    
    # iterate 5 times or until CG converged to desired accuracy
    for i in xrange(5):
        # initialize list for dimensional gram matrices, Eigenvalues, and Eigenvectors
        K = []
        E = []
        
        # Calculate and stack K, Q, and E in each dimension.
        for d in xrange(D):
            xd = x[d].reshape(-1,1)
            K.append((sigma**2.0)*np.exp(-(np.sum(xd**2.0,1).reshape(-1,1)+np.sum(xd**2.0,1)-2*np.dot(xd,xd.T))/(2.0*(math.exp(-hyp[d+1]))**2)))  
            E.append(np.real(np.linalg.eigh(K[-1])[0]))
        
        # Calculate eigenvalues of the inducing points
        '''
        L = E[0]
        for d in xrange(1,D):
            L = np.kron(L,E[d])
        
        L = np.sort(L,kind='mergesort')
        '''
        L,ind = KU.largest_eigs(E,N,M)
        
        # Approximate to eigenvalues of KSKI by a factor M/N    
        L = (float(N)/M)*L
        
        # Calculate approximate log|KSKI| from L and s    
        complexity = np.sum(np.log(L + (s**2 + rank_fix )*np.ones((N,1))))
    
        # Calculate alpha by Linear CG method
        alpha = CG.Linear_CG(W,K,y,s,rank_fix,tolerance=1e-3,maxiter=2000)
        
        # If the CG does not converge, increase the rank fixing term to give better conditioning
        if alpha[1] != 0:
            print('cg failed, reassigning rank correction term.')
            rank_fix = 100*rank_fix
        else:
            flag = True
            
        # if CG succeeded, return alpha. Else reiterate with a larger rank_fix term.
        if flag:
            break
    alpha = alpha[0]
    
    # Get negative log likelihood (objective function to be minimized)
    return 0.5*(np.dot(y.T,alpha)[0][0] + complexity + N*np.log(2*math.pi)),rank_fix    
Esempio n. 4
0
    def KISSGP(self):

        if self.noise:
            noise = self.kernel.hyp[-1]
        else:
            noise = math.log(1e6)
        self.Kuu = self.kernel.Kuu(self.grid.x)

        alpha = CG.Linear_CG(self.W,
                             self.Kuu,
                             self.y,
                             math.exp(-noise),
                             self.kernel.rank_fix,
                             tolerance=1e-5,
                             maxiter=5000)
        print(alpha[1])
        self.alpha = alpha[0]
        return
Esempio n. 5
0
def singleEuler(nu, u, v, Gu, Gv, dx, dy, dt, Nx, Ny, c1, c2):
    '''
    '''

    updGhosts(u, v)

    CalG(nu, u, v, dx, dy, Gu, Gv, c1)

    u += c2 * dt * Gu
    v += c2 * dt * Gv

    b = numpy.zeros((Nx+2, Ny+2))
    b[1:-1, 1:-1] = (u[2:-1, 1:-1] - u[1:-2, 1:-1]) / dx + \
                    (v[1:-1, 2:-1] - v[1:-1, 1:-2]) / dy

    p, Nitr = CG(Nx, Ny, numpy.zeros((Nx+2, Ny+2)),
                               b, dx, dy, 1e-15, 'N', refP=0)

    u[2:-2, 1:-1] -= (p[2:-1, 1:-1] - p[1:-2, 1:-1]) / dx
    v[1:-1, 2:-2] -= (p[1:-1, 2:-1] - p[1:-1, 1:-2]) / dy

    return u, v, p, Gu, Gv
    def setMBS(self, mbs):
            
        self.mbs = mbs

        parity = 1
        occPos = 0
        empPos = 0
        self.occList = []
        self.nextEmptyList = []
        self.emptyList = []

        # Loop through each of the sps's:
        for s in xrange(self.nStates):
            self.parity[s] = parity # set the parity

            if self.mbs & (0x1 << s): # if s is an occupied state

                if self.vsList[s]:  # if it is a valid state for the operator
                    self.occList.append(s)
                    self.nextEmptyList.append(empPos)
                    occPos = occPos + 1

                parity = -parity # if occupied, change the next state parity

            elif self.vsList[s]:
                self.emptyList.append(s) # mark unoccupied
                empPos = empPos + 1

        # So, we need to find n = p electrons choose p electron states
        # p electrons is nParticles - f electrons
        # p states is fixed (well, based on the vsList)
        # see discussion in __init__

        if self.optStates > 0:
            #print self.optStates, occPos
            return int(CG.binomialCoeff(self.optStates, self.nParticles - occPos))
        else:
            return 1
def optimize():

    try:
        tolerance = math.pow(10, -1 *
                             int(entry4.get()))  # Toleranzschwelle des Nutzers
    except:
        tolerance = 5 * math.pow(10, -1 * 8)

    if numberOfFunctions < 5:
        number = numberOfFunctions
    else:
        number = 5

    chooseFunctionsAutomatic(number)
    iteration = 0
    x = np.zeros(dim)
    p = CG(hesse_f_N(x), -1 * grad_f(x), 10 * dim, math.pow(10, -5), x)
    F = f(x)
    Grad = grad_f(x)

    alpha = 1  # Armijo-Backtrackin-Verfahren
    while f(x + (alpha * p)) > F + 0.5 * alpha * np.dot(Grad, p):
        alpha = 0.5 * alpha

    xold = np.copy(x)  # Sicherung x
    x = x + alpha * p  # Update von x
    F = f(x)
    Grad = grad_f(x)

    iteration = 1

    while ((np.linalg.norm(Grad) > tolerance * (1 + (abs(F)))) and
           (abs(np.linalg.norm(Grad) - np.linalg.norm(grad_f(xold)))) > math.pow(10, -7)) \
            or (abs(F - f(xold)) > math.pow(10, -8) * (1 + abs(F)))\
            or (np.linalg.norm(xold - x) > math.pow(10, -8) * (1 + np.linalg.norm(x))):

        if np.linalg.norm(xold -
                          x) <= math.pow(10, -6) * (1 + np.linalg.norm(x)):
            if number < numberOfFunctions:
                addFunctionsAutomatic()
                number += 1
            else:
                break

        p = CG(hesse_f_N(x), -1 * Grad, 10 * dim, math.pow(10, -4), x)

        alpha = 1  # Armijo-Backtracking-Verfahren
        while f(x + (alpha * p)) > F + 0.5 * alpha * np.dot(Grad, p):
            alpha = 0.5 * alpha
        xold = np.copy(x)  # Sicherung x
        x = x + alpha * p  # Update von x
        F = f(x)
        Grad = grad_f(x)
        iteration += 1

    print("Iterationen: " + str(iteration - 1))
    print("Anzahl gewählter Funktionen: " + str(number))
    print("Minimierer x der Funktionen ist: " + str(x))
    print("Der Fehler im Gradienten liegt bei: " + str(np.linalg.norm(Grad)))

    print("f(x) = " + str(F))
    print("Norm Gradient: " + str(np.linalg.norm(Grad)))
    print("Toleranz Gradient: " + str(tolerance * (1 + abs(F))))
    print("Unterschied Norm Gradient: " +
          str(abs(np.linalg.norm(Grad) - np.linalg.norm(Grad))))
    print("Toleranz Unterschied Gradient: " + str((10**-7)))
    print("Unterschied f: " + str(abs(F - f(xold))))
    print("Tolleranz f: " + str((10**-8) * (1 + abs(F))))
    print("Unterschied x: " + str(np.linalg.norm(xold - x)))
    print("Tolleranz x: " + str(math.pow(10, -8) * (1 + np.linalg.norm(x))))
Esempio n. 8
0
def exact_Gaussian_grad2(W,x,y,hyp,rank_fix):

        
    # Initialize variables    
    N,M = W.shape
    D = len(x)
    P = len(hyp)
    sigma = math.exp(-hyp[0]/D)
    s = math.exp(-hyp[-1])

    # set a flag to know if the CG converged
    flag=False
    
    # iterate 5 times or until CG converged to desired accuracy
    for i in xrange(5):
        # initialize list for dimensional gram matrices, Eigenvalues, Eigenvectors, and gradients
        K  = []
        E  = []
        Q  = []
  
        
        # Calculate and stack K, Q, and E in each dimension.
        for d in xrange(D):
            xd = x[d].reshape(-1,1)
            K.append((sigma**2.0)*np.exp(-(np.sum(xd**2.0,1).reshape(-1,1)+np.sum(xd**2.0,1) \
                                        -2*np.dot(xd,xd.T))/(2.0*(math.exp(-hyp[d+1]))**2)))  
            e,q = np.linalg.eigh(K[-1])
            E.append(e)
            Q.append(q)
        
        # get N largest eigenvalues
        L,ind = KU.largest_eigs(E,N,M)
        
        # Approximate to eigenvalues of KSKI by a factor M/N    
        L = (float(N)/M)*L
        
        # Calculate approximate log|KSKI| from L and s    
        complexity = np.sum(np.log(L + (s**2 + rank_fix)*np.ones((N,1))))
        
        # Calculate alpha by Linear CG method
        alpha = CG.Linear_CG(W,K,y,s,rank_fix,tolerance=1e-3,maxiter=2000)
        
        # If the CG does not converge, increase the rank fixing term to give better conditioning
        if alpha[1] != 0:
            print('cg failed, reassigning rank correction term.')
            rank_fix = 100*rank_fix
        else:
            flag = True
            
        # if CG succeeded, return alpha. Else reiterate with a larger rank_fix term.
        if flag:
            break
    alpha = alpha[0]
    
    # calculate gradients of Kuu and then the gradient of the likelihood
    grad = np.zeros((P,1))
    
    # If M> 5000 use paralell gradient computation (less than 5000 parallel comunication overcomes benefits)
    if M>5000:
        l = [(i,K,L,W,Q,alpha,ind,rank_fix,x,hyp) for i in xrange(P)]
        pool = Pool()
        res = pool.imap(d_ARD, l)
        i=0
        for g in res:
            grad[i] = g
            i+=1
        pool.close()
        pool.terminate()
    # Else compute gradients sequentially        
    else:
        for p in xrange(P):
            grad[p] = d_ARD((p,K,L,W,Q,alpha,ind,rank_fix,x,hyp))
            
    func = 0.5*(np.dot(y.T,alpha)[0][0] + complexity + N*np.log(2*math.pi))
    print(func)

 
   # Get negative log likelihood (objective function to be minimized)
    return grad,func,rank_fix              
Esempio n. 9
0
            if indRows[i] != iVert :
                spCoefs[i] = 0.
            else :
                spCoefs[i] = 1.
        # Suppression des coefficients se trouvant sur la ligne iVert
        # ( avec toujours 1 sur la diagonale )
        for iCol in xrange(nb_verts):
            if iCol != iVert :
                for ptRow in xrange(begCols[iCol],begCols[iCol+1]):
                    if indRows[ptRow] == iVert :
                        spCoefs[ptRow] = 0.
                        
# On definit ensuite la matrice comme matrice CSC :
spMatrix = sparse.csc_matrix((spCoefs, indRows, begCols))

# Visualisation second membre :
VS.view( coords, elt2verts, b, title = "second membre", visuMesh = False )

# Puis on résoud le système linaire à l'aide d'un gradient conjugué :
x0 = np.zeros(nb_verts, np.double)
for i in xrange(nb_verts):
    if coords[i,3]>0:
        x0[i] = g(coords[i,0],coords[i,1])
sol, iter, epsilon = CG.solve( spMatrix, x0, b, 100, 1.E-6 )
# On affiche l'erreur faite sur la solution ainsi que le nombre d'itérations
# nécessaire à la convergence
print "Erreur relative : {}".format(epsilon)
print "Iteration pour converger : {}".format(iter)
# Visualisation de la solution :
VS.view( coords, elt2verts, sol, title = "Solution", visuMesh=False )
Esempio n. 10
0
            thresholdValue = cv2.getTrackbarPos('filterThresh', 'image')
            thresh = cv2.threshold(blurred, thresholdValue, 255,
                                   cv2.THRESH_BINARY)[1]
            testArray = [(lower - thrs / 2).tolist(),
                         (lower + thrs / 2).tolist(),
                         lowerBound.tolist(),
                         upperBound.tolist(), thresholdValue]

            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)[1]

            areas = 1
            if cnts is not None:
                areas = int(len(cnts))
                CG.win()
            splotch = np.zeros((1, areas), dtype=np.uint8)

            if cnts is None:
                CG.runGame()

            # loop over the contours
            #THIS IS THE BLOCK THAT DETECTS GREEN
            try:
                for i, c in enumerate(cnts, 0):
                    print(cnts)
                    M = cv2.moments(c)
                    splotch[0][i] = int(M["m00"])

                try:
                    max1 = np.argmax(splotch)
Esempio n. 11
0
def uncon(func, x0, max_iter, tol):

    method = "BFGS_LS"
    mode = 0  # solve mode
    #mode = 1 # analysis mode

    # get the obj function and gradient

    f, g = func()

    n = x0.shape[0]

    if (method == "BFGS_LS"):

        V0 = np.matrix(np.eye(n))

        p = BFGS.BFGS(f, g, x0, V0, n, mode)

        if (mode == 0):

            x, J = p.optimize(tol)

        elif (mode == 1):

            x_list, n_iter_list, log_g_norm_list = p.optimize(tol)

    elif (method == "BFGS_TR"):

        B_0 = np.matrix(np.eye(n))

        Delta_0 = 1.0
        Delta_max = 10.0

        if (mode == 0):

            x, J = TR.trustRegion(Delta_0, Delta_max, tol, \
             x0, B_0, f, g, mode)

        elif (mode == 1):

            x_list, n_iter_list, log_g_norm_list = TR.trustRegion(Delta_0, Delta_max, tol, \
             x0, B_0, f, g, mode)

    elif (method == "CG"):

        p = CG.CG(f, g, x0, mode)
        if (mode == 0):

            x, J = p.optimize(tol)

        elif (mode == 1):

            x_list, n_iter_list, log_g_norm_list = p.optimize(tol)

    if (mode == 0):

        return x, J

    elif (mode == 1):

        return x_list, n_iter_list, log_g_norm_list
def genFermionStateTable(nFermions):

    plist = Plist.AtomicOrbitalPlist()
    plist.readPlist()
    nStates = plist.aoList.nStates

    debugFlag = True

    # In real situations, more than 32 states will overflow memory
    # But in small situations, this is allowed
    if nStates > 32:
        print '*** WARNING: *** nStates is larger than 32!! Continuing ... '
        

    # The number of fermions cannot be more than the number of states
    if nFermions > nStates:
        raise Exception('Number of particles exceeds the number of states! '+str(nFermions)+' '+str(nStates))
        return

    # The dimension is the total number of many-body states
    dim = CG.binomialCoeff(nStates, nFermions)
    print '<------------------------ State Table -------------------->'
    print 'Fermions = ', nFermions, ' ... States = ', nStates, '... Dimension = ', dim

    # Create an empty python dictionary
    stateTableDict = dict()
    stateTable = []

    # initialize the first state 
    # the low nFermions bits are set to 1
    state = Bit.BitStr(nStates)
    for ferm in range(nFermions):
        state.set(ferm)
        
    # cursor is the position of the highest bit in this group of ones
    cursor = nFermions - 1
    # ones is the total number of ones in this group of ones
    ones = nFermions

    # --------------------------------------------------------------------
    # Loop through each many-body state
    # --------------------------------------------------------------------
    for mbState in range(dim):

        stateTableDict[state.value] = mbState # set the key & value
        stateTable.append(state.value)
        if dim == 1:
            break

        # move the high bit up
        state.clear(cursor)
        cursor = cursor
        state.set(cursor+1)
        ones = ones - 1 # decrement the number of ones in the group
        
        if ones > 0:
            # --------------------------------------------------------------------
            # move all of the lower ones down
            # --------------------------------------------------------------------
            for sbState in range(cursor):
                if sbState < ones:
                    state.set(sbState)
                else:
                    state.clear(sbState)
            # --------------------------------------------------------------------
            cursor = ones - 1  # move the cursor to end of group

        else: # no more ones in the group
            # --------------------------------------------------------------------
            # find the next one to move
            for sbState in range(cursor+1,nStates):
                if not state.get(sbState):
                    cursor = sbState - 1
                    break
                else:
                    ones = ones + 1
            # --------------------------------------------------------------------
            # end for sbState in range(cursor+1,nStates+1):
            # --------------------------------------------------------------------

    f = open('./stateTable.pkl', 'w')
    pickle.dump(stateTable, f)
    f.close()

    f = open('./stateTableDict.pkl','w')
    pickle.dump(stateTableDict, f)
    f.close()

    del stateTable
    del stateTableDict

    # --------------------------------------------------------------------
    # Dump out the table for debugging
    # --------------------------------------------------------------------
    if debugFlag:
        # Read in the state table pickles
        f = open('stateTable.pkl', 'r')
        dbgStateTable = pickle.load(f)
        f.close()

        f = open('stateTableDict.pkl', 'r')
        dbgStateTableDict = pickle.load(f)
        f.close()

        for rStateIndex, rState in enumerate(dbgStateTable):
            state = Bit.BitStr(nStates, rState)
            print  'State %d = %s' % (rStateIndex, state.display())
            if rStateIndex != dbgStateTableDict[rState]:
                raise Exception('Mismatch between state index and dictionary!'+str(rStateIndex))
                
            

    f = open('./mbDimension.pkl','w')
    pickle.dump(dim, f)
    f.close()

    f = open('./nParticles.pkl','w')
    pickle.dump(nFermions, f)
    f.close()

    print '<------------------------ End State Table -------------------->'
    print