def init_geom(self, position_file, unitcell_file): """initialise the geometry.""" if self.Def["PBC"]==1: PBCs = True else: PBCs = False # If build geometry is turned on then build the geometry if self.Def['build_geom'] == 1: a, crys_err = self.calculate_crystal_sep() # if the calculation of the crystal separation a is successful: if crys_err == 0: mycry = Crystal(a=a, lattice="cubic") mycry.populateUnitCell(self.Def['crystal'], geom_filename=position_file, uc_filename=unitcell_file, nx=self.Def['nx'], ny=self.Def['ny'], nz=self.Def['nz'], PBCs=PBCs) # Read in the geometry from file NAtom, Pos, AtomType = ReadGeom(position_file) # If PBCs are turned on then read in the unit cell if PBCs: a1, a2, a3 = ReadUnitCell(unitcell_file) else: a1, a2, a3 = np.array((0.0, 0.0, 0.0)), np.array((0.0, 0.0, 0.0)), np.array((0.0, 0.0, 0.0)) # Write out the geometry WriteXYZ(self, NAtom, '', AtomType, Pos) # Transfer geometry to the JobClass self.NAtom = NAtom self.Pos = Pos self.UnitCell = [a1, a2, a3] self.AtomType = AtomType self.NOrb = [self.Model.atomic[self.AtomType[a]]['NOrbitals'] for a in range(self.NAtom)] verboseprint(self.Def['verbose'], "Atom positions:") verboseprint(self.Def['verbose'], self.Pos)
def optimisation_routine2(self, num_rho): """ Optimisation routine where we try to solve for the norm squared of the optimal density matrix with the constraint that the sum of the coefficients is equal to one. To include the constraint we set up the problem: minimise: alpha_i M_ij alpha_j - lambda (sum_i alpha_i - 1) where M_ij = Tr(R_i^dag R_j). We then differentiate with respect to alpha_k and set to zero to minimise: 2 M alpha - lambda = 0 We solve this equation. We have to add a buffer row and column to include lambda as well as the constraint that the sum of alpha is equal to one. We absorb the 2 into lambda: {M_11 M_12 ... -1 {alpha_1 {0 M_21 M_22 ... -1 alpha_2 0 . . . . . . . = . . . . . -1 -1 ... 0} lambda} -1} """ small = 1e-10 verboseprint(self.Job.Def['extraverbose'], "optimisation_routine2") # If there is only one density matrix the solution is simple. if num_rho == 1: return np.array([1.0], dtype='double'), 0 alpha = np.zeros(num_rho+1, dtype='double') Mmat = np.matrix(np.zeros((num_rho+1, num_rho+1), dtype='complex')) # make all the elements -1 Mmat.fill(-1.0) # replace the bottom right hand corner by 0 Mmat[-1,-1] = 0.0 # calculate the rest of the Mmat. for i in range(num_rho): Mmat[i, i] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[i]).H) for j in range(i+1, num_rho): Mmat[i, j] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[j]).H) # if np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])) != Mmat[i, j].conj(): # print "Mmat[%i,%i] = %f. Mmat[%i,%i].conj() = %f." % (j, i, np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])), i, j, Mmat[i, j].conj()) Mmat[j, i] = Mmat[i, j].conj() # if abs(np.linalg.det(Mmat)) < small: # return alpha, 1 RHS = np.zeros(num_rho+1, dtype = 'double') RHS[-1] = -1.0 alpha = np.linalg.solve(Mmat, RHS) myscale = abs(np.sum(alpha)-alpha[-1]) if abs(myscale-1.0) > small: print("ERROR: optimisation_routine2 -- sum alpha = %f. alpha must sum to 1.0." % myscale) print(alpha) return alpha, 1 # if successful then return result and no error code. return alpha, 0
def GR_Pulay(self, scf_iteration): """ This is the guaranteed reduction Pulay mixing scheme proposed in: Bowler, D. R., and M. J. Gillan. "An efficient and robust technique for achieving self consistency in electronic structure calculations." Chemical Physics Letters 325.4 (2000): 473-476. If the number of density matrices to be used, num_rho, is 1, it reduces to linear mixing. The scf_iteration is a required input because when scf_iteration is less than num_rho then scf_iteration is the number of density matrices that should be used. The output is an updated self.rhotot to be used in the construction of the Fock matrix. Also, self.inputrho, self.outputrho and self.residue are updated for the next iteration. """ num_rho = self.Job.Def['num_rho'] # If the number of scf iterations is less than num_rho replace it by # the number of scf iterations (as there will only be that number of # density matrices). if scf_iteration < num_rho: num_rho = scf_iteration # Shift along the density and residue matrices for ii in range(num_rho - 1): self.inputrho[num_rho - 1 - ii] = np.copy(self.inputrho[num_rho - 2 - ii]) self.outputrho[num_rho - 1 - ii] = np.copy(self.outputrho[num_rho - 2 - ii]) self.residue[num_rho - 1 - ii] = np.copy(self.residue[num_rho - 2 - ii]) # Add in the new density and residue matrices self.inputrho[0] = self.rhotot self.outputrho[0] = self.rho self.residue[0] = self.rho - self.rhotot # Calculate the values of alpha to minimise the residue alpha, igo = self.optimisation_routine(num_rho) if igo == 1: print( "WARNING: Unable to optimise alpha for combining density matrices. Proceeding using guess." ) # Guess for alpha is just 1.0 divided by the number of density matrices alpha = np.zeros((num_rho), dtype='double') alpha.fill(1.0 / num_rho) verboseprint(self.Job.Def['extraverbose'], "alpha: ", alpha) # Create an optimised rhotot and an optimised rho and do linear mixing to make next input matrix self.rhotot = sum(alpha[i] * self.inputrho[i] for i in range(num_rho)) self.rho = sum(alpha[i] * self.outputrho[i] for i in range(num_rho)) self.linear_mixing()
def optimisation_routine1(self, num_rho): """ Optimisation routine where we try to solve for the norm squared of the optimal density matrix with the constraint that the sum of the coefficients is equal to one. To include the constraint we set up the problem: minimise: alpha_i M_ij alpha_j - lambda (sum_i alpha_i - 1) where M_ij = Tr(R_i^dag R_j). We then differentiate with respect to alpha_k and set to zero to minimise: 2 M alpha = lambda We solve this equation for lambda = 1. We then can simply scale alpha, such that sum_i alpha_i = 1, which is equivalent to having solved for a different lambda. """ verboseprint(self.Job.Def['extraverbose'], "optimisation_routine") small = 1e-14 # If there is only one density matrix the solution is simple. if num_rho == 1: return np.array([1.0], dtype='double'), 0 alpha = np.zeros(num_rho, dtype='double') Mmat = np.matrix(np.zeros((num_rho, num_rho), dtype='complex')) lamb = 0.5 * np.ones(num_rho, dtype='double') for i in range(num_rho): Mmat[i, i] = np.trace( np.matrix(self.residue[i]) * np.matrix(self.residue[i]).H) for j in range(i + 1, num_rho): Mmat[i, j] = np.trace( np.matrix(self.residue[i]) * np.matrix(self.residue[j]).H) # if np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])) != Mmat[i, j].conj(): # print "Mmat[%i,%i] = %f. Mmat[%i,%i].conj() = %f." % (j, i, np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])), i, j, Mmat[i, j].conj()) Mmat[j, i] = Mmat[i, j].conj() # if np.linalg.det(Mmat) < small: # return alpha, 1 alpha = np.linalg.solve(Mmat, lamb) myscale = np.sum(alpha) if myscale == 0: print( "ERROR: alpha summed to 0 in optimisation_routine. Cannot be scaled to 1." ) print(alpha) return alpha, 1 else: alpha = alpha / myscale return alpha, 0
def GR_Pulay(self, scf_iteration): """ This is the guaranteed reduction Pulay mixing scheme proposed in: Bowler, D. R., and M. J. Gillan. "An efficient and robust technique for achieving self consistency in electronic structure calculations." Chemical Physics Letters 325.4 (2000): 473-476. If the number of density matrices to be used, num_rho, is 1, it reduces to linear mixing. The scf_iteration is a required input because when scf_iteration is less than num_rho then scf_iteration is the number of density matrices that should be used. The output is an updated self.rhotot to be used in the construction of the Fock matrix. Also, self.inputrho, self.outputrho and self.residue are updated for the next iteration. """ num_rho = self.Job.Def['num_rho'] # If the number of scf iterations is less than num_rho replace it by # the number of scf iterations (as there will only be that number of # density matrices). if scf_iteration < num_rho: num_rho = scf_iteration # Shift along the density and residue matrices for ii in range(num_rho-1): self.inputrho[num_rho - 1 - ii] = np.copy(self.inputrho[num_rho - 2 - ii]) self.outputrho[num_rho - 1 - ii] = np.copy(self.outputrho[num_rho - 2 - ii]) self.residue[num_rho - 1 - ii] = np.copy(self.residue[num_rho - 2 - ii]) # Add in the new density and residue matrices self.inputrho[0] = self.rhotot self.outputrho[0] = self.rho self.residue[0] = self.rho - self.rhotot # Calculate the values of alpha to minimise the residue alpha, igo = self.optimisation_routine(num_rho) if igo == 1: print("WARNING: Unable to optimise alpha for combining density matrices. Proceeding using guess.") # Guess for alpha is just 1.0 divided by the number of density matrices alpha = np.zeros((num_rho), dtype='double') alpha.fill(1.0/num_rho) verboseprint(self.Job.Def['extraverbose'], "alpha: ", alpha) # Create an optimised rhotot and an optimised rho and do linear mixing to make next input matrix self.rhotot = sum(alpha[i]*self.inputrho[i] for i in range(num_rho)) self.rho = sum(alpha[i]*self.outputrho[i] for i in range(num_rho)) self.linear_mixing()
def optimisation_routine1(self, num_rho): """ Optimisation routine where we try to solve for the norm squared of the optimal density matrix with the constraint that the sum of the coefficients is equal to one. To include the constraint we set up the problem: minimise: alpha_i M_ij alpha_j - lambda (sum_i alpha_i - 1) where M_ij = Tr(R_i^dag R_j). We then differentiate with respect to alpha_k and set to zero to minimise: 2 M alpha = lambda We solve this equation for lambda = 1. We then can simply scale alpha, such that sum_i alpha_i = 1, which is equivalent to having solved for a different lambda. """ verboseprint(self.Job.Def['extraverbose'], "optimisation_routine") small = 1e-14 # If there is only one density matrix the solution is simple. if num_rho == 1: return np.array([1.0], dtype='double'), 0 alpha = np.zeros(num_rho, dtype='double') Mmat = np.matrix(np.zeros((num_rho, num_rho), dtype='complex')) lamb = 0.5*np.ones(num_rho, dtype='double') for i in range(num_rho): Mmat[i, i] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[i]).H) for j in range(i+1, num_rho): Mmat[i, j] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[j]).H) # if np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])) != Mmat[i, j].conj(): # print "Mmat[%i,%i] = %f. Mmat[%i,%i].conj() = %f." % (j, i, np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])), i, j, Mmat[i, j].conj()) Mmat[j, i] = Mmat[i, j].conj() # if np.linalg.det(Mmat) < small: # return alpha, 1 alpha = np.linalg.solve(Mmat, lamb) myscale = np.sum(alpha) if myscale == 0: print("ERROR: alpha summed to 0 in optimisation_routine. Cannot be scaled to 1.") print(alpha) return alpha, 1 else: alpha = alpha/myscale return alpha, 0
def print_results(Job): verboseprint(Job.Def['verbose'], "Energy eigenvalues: ") verboseprint(Job.Def['verbose'], Job.e) if Job.Def['Hamiltonian'] == "collinear": verboseprint(Job.Def['extraverbose'], "Mulliken charges: ", Job.Hamilton.q) verboseprint(Job.Def['extraverbose'], (Job.Hamilton.s).T) # Write out information about the simulation if it is specified in the job definition print("\n\n\nSuccessfully completed calculation!") WriteSimulationResults(Job)
def McWeeny_iterations(self, rho): """ Iterations of the McWeeny scheme for the inputted rho. Return a True/False flag that indicates convergence, the number of iterations required to reach convergence, the error and the converged density matrix. """ converge_flag = False for ii in range(self.Job.Def['McWeeny_max_loops']): # McWeeny transformation rho = 3*np.dot(rho, np.dot(rho, rho)) - 2*np.dot(rho, rho) err = self.idempotency_error(rho) verboseprint(self.Job.Def['extraverbose'], "McWeeny iteration: ", ii, "; Idempotency error = ", err) if err < self.Job.Def['McWeeny_tol']: converge_flag = True return converge_flag, ii, err, rho # Check to make sure that the error hasn't become a nan. elif np.isnan(err): return converge_flag, ii, err, rho # if it gets to this statement then it probably hasn't converged. return converge_flag, ii, err, rho
def McWeeny_iterations(self, rho): """ Iterations of the McWeeny scheme for the inputted rho. Return a True/False flag that indicates convergence, the number of iterations required to reach convergence, the error and the converged density matrix. """ converge_flag = False for ii in range(self.Job.Def['McWeeny_max_loops']): # McWeeny transformation rho = 3 * np.dot(rho, np.dot(rho, rho)) - 2 * np.dot(rho, rho) err = self.idempotency_error(rho) verboseprint(self.Job.Def['extraverbose'], "McWeeny iteration: ", ii, "; Idempotency error = ", err) if err < self.Job.Def['McWeeny_tol']: converge_flag = True return converge_flag, ii, err, rho # Check to make sure that the error hasn't become a nan. elif np.isnan(err): return converge_flag, ii, err, rho # if it gets to this statement then it probably hasn't converged. return converge_flag, ii, err, rho
def McWeeny(self): """ Make the density matrix idempotent using the McWeeny transformation, R.McWeeny, Rev. Mod. Phys. (1960): rho_n+1 = 3*rho_n^3 - 2*rho_n^2 """ if self.Job.isNoncollinearHami: rho_temp = self.rhotot else: rho_temp = self.rho # Make sure that it isn't already idempotent err_orig = self.idempotency_error(rho_temp) if err_orig < self.Job.Def['McWeeny_tol']: # if already idempotent then don't do anything, just exit function return flag, iterations, err, rho_temp = self.McWeeny_iterations(rho_temp) # Check that the number of electrons hasn't changed num_electrons = sum(rho_temp[i, i] for i in range(len(rho_temp))) if abs(num_electrons - self.NElectrons) > self.Job.Def['McWeeny_tol']: verboseprint( self.Job.Def['verbose'], "McWeeny transformation changed the number of electrons, " "difference = ", num_electrons - self.NElectrons) # Turn off using the McWeeny transformation as once it doesn't work it seems to not work again. self.Job.Def["McWeeny"] = 0 return # if the flag is false it means that idempotency was reduced below the tolerance if not flag: # if the iterations did not converge but the idempotency error has # gotten smaller then print a warning but treat as a success. if err < err_orig: verboseprint( self.Job.Def['verbose'], "Max iterations, {} reached. Idempotency error = {}". format(iterations, err)) flag = True else: verboseprint( self.Job.Def['verbose'], "McWeeny transformation unsuccessful. Proceeding using input density matrix." ) # Turn off using the McWeeny transformation as once it doesn't work it seems to not work again. self.Job.Def["McWeeny"] = 0 # if this is going to be treated like a success then reassign rho_temp. if flag: if self.Job.isNoncollinearHami: self.rhotot = rho_temp else: self.rho = rho_temp
def McWeeny(self): """ Make the density matrix idempotent using the McWeeny transformation, R.McWeeny, Rev. Mod. Phys. (1960): rho_n+1 = 3*rho_n^3 - 2*rho_n^2 """ if self.Job.isNoncollinearHami: rho_temp = self.rhotot else: rho_temp = self.rho # Make sure that it isn't already idempotent err_orig = self.idempotency_error(rho_temp) if err_orig < self.Job.Def['McWeeny_tol']: # if already idempotent then don't do anything, just exit function return flag, iterations, err, rho_temp = self.McWeeny_iterations(rho_temp) # Check that the number of electrons hasn't changed num_electrons = sum(rho_temp[i, i] for i in range(len(rho_temp))) if abs(num_electrons - self.NElectrons) > self.Job.Def['McWeeny_tol']: verboseprint( self.Job.Def['verbose'], "McWeeny transformation changed the number of electrons, " "difference = ", num_electrons - self.NElectrons) # Turn off using the McWeeny transformation as once it doesn't work it seems to not work again. self.Job.Def["McWeeny"] = 0 return # if the flag is false it means that idempotency was reduced below the tolerance if not flag: # if the iterations did not converge but the idempotency error has # gotten smaller then print a warning but treat as a success. if err < err_orig: verboseprint(self.Job.Def['verbose'], "Max iterations, {} reached. Idempotency error = {}".format(iterations, err)) flag = True else: verboseprint(self.Job.Def['verbose'], "McWeeny transformation unsuccessful. Proceeding using input density matrix.") # Turn off using the McWeeny transformation as once it doesn't work it seems to not work again. self.Job.Def["McWeeny"] = 0 # if this is going to be treated like a success then reassign rho_temp. if flag: if self.Job.isNoncollinearHami: self.rhotot = rho_temp else: self.rho = rho_temp
def optimisation_routine2(self, num_rho): """ Optimisation routine where we try to solve for the norm squared of the optimal density matrix with the constraint that the sum of the coefficients is equal to one. To include the constraint we set up the problem: minimise: alpha_i M_ij alpha_j - lambda (sum_i alpha_i - 1) where M_ij = Tr(R_i^dag R_j). We then differentiate with respect to alpha_k and set to zero to minimise: 2 M alpha - lambda = 0 We solve this equation. We have to add a buffer row and column to include lambda as well as the constraint that the sum of alpha is equal to one. We absorb the 2 into lambda: {M_11 M_12 ... -1 {alpha_1 {0 M_21 M_22 ... -1 alpha_2 0 . . . . . . . = . . . . . -1 -1 ... 0} lambda} -1} """ small = 1e-10 verboseprint(self.Job.Def['extraverbose'], "optimisation_routine2") # If there is only one density matrix the solution is simple. if num_rho == 1: return np.array([1.0], dtype='double'), 0 alpha = np.zeros(num_rho + 1, dtype='double') Mmat = np.matrix(np.zeros((num_rho + 1, num_rho + 1), dtype='complex')) # make all the elements -1 Mmat.fill(-1.0) # replace the bottom right hand corner by 0 Mmat[-1, -1] = 0.0 # calculate the rest of the Mmat. for i in range(num_rho): Mmat[i, i] = np.trace( np.matrix(self.residue[i]) * np.matrix(self.residue[i]).H) for j in range(i + 1, num_rho): Mmat[i, j] = np.trace( np.matrix(self.residue[i]) * np.matrix(self.residue[j]).H) # if np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])) != Mmat[i, j].conj(): # print "Mmat[%i,%i] = %f. Mmat[%i,%i].conj() = %f." % (j, i, np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])), i, j, Mmat[i, j].conj()) Mmat[j, i] = Mmat[i, j].conj() # if abs(np.linalg.det(Mmat)) < small: # return alpha, 1 RHS = np.zeros(num_rho + 1, dtype='double') RHS[-1] = -1.0 alpha = np.linalg.solve(Mmat, RHS) myscale = abs(np.sum(alpha) - alpha[-1]) if abs(myscale - 1.0) > small: print( "ERROR: optimisation_routine2 -- sum alpha = %f. alpha must sum to 1.0." % myscale) print(alpha) return alpha, 1 # if successful then return result and no error code. return alpha, 0
def PerformSelfConsistency(Job): # Make the Fock matrix self-consistent SCFerror = 1.0e+99 # flag to indicate if self-consistency has been obtained. SCFflag = False # if it's a noncollinear Hamiltonian we need to initialise rhotot if Job.isNoncollinearHami: Job.Electron.rhotot = Job.Electron.rho max_loops = int(Job.Def['scf_max_loops']) assert max_loops > 0, ("Performing self consistency calculation requires positive " f"value of scf_max_loops. Current value is: {max_loops}") for ii in range(max_loops): # # Build the fock matrix (adds the density matrix dependent terms) Job.Hamilton.buildFock(Job) # # Diagonalise the Fock matrix Job.e, Job.psi = np.linalg.eigh(Job.Hamilton.fock) # # Occupy the orbitals according to the Fermi distribution Job.Electron.occupy(Job.e, Job.Def['el_kT'], Job.Def['mu_tol'], Job.Def['mu_max_loops']) # # Build the density matrix Job.Electron.densitymatrix() if Job.isNoncollinearHami: # Compare the difference between the new and the old on-site density matrix elements SCFerror = Job.Electron.SCFerror() verboseprint(Job.Def['verbose'], 'SCF loop = ', ii+1, '; SCF error = ', SCFerror) # Check if the SCF error is still larger than the tolerance if SCFerror > Job.Def['scf_tol']: # Update the density matrix by linear mixing # Job.Electron.linear_mixing() Job.Electron.GR_Pulay(ii+1) else: SCFflag = True break if Job.Def['McWeeny'] == 1: Job.Electron.McWeeny() verboseprint(Job.Def['extraverbose'], "number of electrons = "+str(Job.Electron.electronspersite().sum())) verboseprint(Job.Def['extraverbose'], "output rho idempotency error is: ", Job.Electron.idempotency_error(Job.Electron.rho)) verboseprint(Job.Def['extraverbose'], "input rho idempotency error is: ", Job.Electron.idempotency_error(Job.Electron.rhotot)) verboseprint(Job.Def['extraverbose'], "SCF charges = ", Job.Hamilton.q) verboseprint(Job.Def['extraverbose'], "Magnetic moments = ", Job.Electron.spinpersite().T) else: # # Compute the net charge on each site q = Job.Electron.chargepersite() # # Compute the net spin on each site s = Job.Electron.spinpersite() # # Compute the error in the charges, and update the charges and spin SCFerror = math.sqrt(np.vdot(q-Job.Hamilton.q, q-Job.Hamilton.q) / Job.NAtom) verboseprint(Job.Def['verbose'], 'SCF loop = ', ii+1, '; SCF error = ', SCFerror) verboseprint(Job.Def['extraverbose'], 'SCF charges = ', Job.Hamilton.q) # # IF SCF is on check if the SCF error is still larger than the tolerance. if SCFerror > Job.Def['scf_tol']: # # Update the input charges and spins Job.Hamilton.q = Job.Hamilton.q + Job.Def['scf_mix'] * (q-Job.Hamilton.q) Job.Hamilton.s = Job.Hamilton.s + Job.Def['scf_mix'] * (s-Job.Hamilton.s) # # If SCF error is smaller than or equal to the tolerance then leave loop else: SCFflag = True break # if the McWeeny flag is on then purify the density matrix if Job.Def['McWeeny'] == 1: Job.Electron.McWeeny() # Print out number of SCF loops taken verboseprint(Job.Def['verbose'], "Number of SCF loops: ", ii+1) # if self-consistency is not obtained then raise an error and exit. if SCFflag is False: raise SelfConsistencyError( ("ERROR: Self-consistency not obtained within maximum number of " "cycles: {}".format(max_loops)) ) return SCFflag
def PerformSelfConsistency(Job): # Make the Fock matrix self-consistent SCFerror = 1.0e+99 # flag to indicate if self-consistency has been obtained. SCFflag = False # if it's a noncollinear Hamiltonian we need to initialise rhotot if Job.isNoncollinearHami: Job.Electron.rhotot = Job.Electron.rho max_loops = int(Job.Def['scf_max_loops']) assert max_loops > 0, ( "Performing self consistency calculation requires positive " f"value of scf_max_loops. Current value is: {max_loops}") for ii in range(max_loops): # # Build the fock matrix (adds the density matrix dependent terms) Job.Hamilton.buildFock(Job) # # Diagonalise the Fock matrix Job.e, Job.psi = np.linalg.eigh(Job.Hamilton.fock) # # Occupy the orbitals according to the Fermi distribution Job.Electron.occupy(Job.e, Job.Def['el_kT'], Job.Def['mu_tol'], Job.Def['mu_max_loops']) # # Build the density matrix Job.Electron.densitymatrix() if Job.isNoncollinearHami: # Compare the difference between the new and the old on-site density matrix elements SCFerror = Job.Electron.SCFerror() verboseprint(Job.Def['verbose'], 'SCF loop = ', ii + 1, '; SCF error = ', SCFerror) # Check if the SCF error is still larger than the tolerance if SCFerror > Job.Def['scf_tol']: # Update the density matrix by linear mixing # Job.Electron.linear_mixing() Job.Electron.GR_Pulay(ii + 1) else: SCFflag = True break if Job.Def['McWeeny'] == 1: Job.Electron.McWeeny() verboseprint( Job.Def['extraverbose'], "number of electrons = " + str(Job.Electron.electronspersite().sum())) verboseprint(Job.Def['extraverbose'], "output rho idempotency error is: ", Job.Electron.idempotency_error(Job.Electron.rho)) verboseprint(Job.Def['extraverbose'], "input rho idempotency error is: ", Job.Electron.idempotency_error(Job.Electron.rhotot)) verboseprint(Job.Def['extraverbose'], "SCF charges = ", Job.Hamilton.q) verboseprint(Job.Def['extraverbose'], "Magnetic moments = ", Job.Electron.spinpersite().T) else: # # Compute the net charge on each site q = Job.Electron.chargepersite() # # Compute the net spin on each site s = Job.Electron.spinpersite() # # Compute the error in the charges, and update the charges and spin SCFerror = math.sqrt( np.vdot(q - Job.Hamilton.q, q - Job.Hamilton.q) / Job.NAtom) verboseprint(Job.Def['verbose'], 'SCF loop = ', ii + 1, '; SCF error = ', SCFerror) verboseprint(Job.Def['extraverbose'], 'SCF charges = ', Job.Hamilton.q) # # IF SCF is on check if the SCF error is still larger than the tolerance. if SCFerror > Job.Def['scf_tol']: # # Update the input charges and spins Job.Hamilton.q = Job.Hamilton.q + Job.Def['scf_mix'] * ( q - Job.Hamilton.q) Job.Hamilton.s = Job.Hamilton.s + Job.Def['scf_mix'] * ( s - Job.Hamilton.s) # # If SCF error is smaller than or equal to the tolerance then leave loop else: SCFflag = True break # if the McWeeny flag is on then purify the density matrix if Job.Def['McWeeny'] == 1: Job.Electron.McWeeny() # Print out number of SCF loops taken verboseprint(Job.Def['verbose'], "Number of SCF loops: ", ii + 1) # if self-consistency is not obtained then raise an error and exit. if SCFflag is False: raise SelfConsistencyError( ("ERROR: Self-consistency not obtained within maximum number of " "cycles: {}".format(max_loops))) return SCFflag