def residue(self, X, so_prop_ints): prop_a = [ core.triplet(self.Co[0], x, self.Cv[0], True, False, False) for x in so_prop_ints ] prop_b = [ core.triplet(self.Co[1], x, self.Cv[1], True, False, False) for x in so_prop_ints ] return np.array([ X[0].vector_dot(u[0]) + X[1].vector_dot(u[1]) for u in zip(prop_a, prop_b) ])
def residue(self, X, so_prop_ints): # return zeros if spin multiplicity of GS and ES differ if not self.singlet and (self.mult_gs == 1): return np.zeros(len(so_prop_ints)) prop = [core.triplet(self.Co, x, self.Cv, True, False, False) for x in so_prop_ints] return np.sqrt(2.0) * np.array([X.vector_dot(u) for u in prop])
def _compute_fxc(PQrho, half_Saux, halfp_Saux, x_alpha, rho_thresh=1.e-8): """ Computes the gridless (P|fxc|Q) ALDA tensor. """ naux = PQrho.shape[0] # Level it out PQrho_lvl = core.triplet(half_Saux, PQrho, half_Saux, False, False, False) # Rotate into a diagonal basis rho = core.Vector("rho eigenvalues", naux) U = core.Matrix("rho eigenvectors", naux, naux) PQrho_lvl.diagonalize(U, rho, core.DiagonalizeOrder.Ascending) # "Gridless DFT" mask = rho.np < rho_thresh # Values too small cause singularities rho.np[mask] = rho_thresh dft_size = rho.shape[0] inp = {"RHO_A": rho} out = { "V": core.Vector(dft_size), "V_RHO_A": core.Vector(dft_size), "V_RHO_A_RHO_A": core.Vector(dft_size) } func_x = core.LibXCFunctional('XC_LDA_X', True) func_x.compute_functional(inp, out, dft_size, 2) out["V_RHO_A_RHO_A"].scale(1.0 - x_alpha) func_c = core.LibXCFunctional('XC_LDA_C_VWN', True) func_c.compute_functional(inp, out, dft_size, 2) out["V_RHO_A_RHO_A"].np[mask] = 0 # Rotate back Ul = U.clone() Ul.np[:] *= out["V_RHO_A_RHO_A"].np tmp = core.doublet(Ul, U, False, True) # Undo the leveling return core.triplet(halfp_Saux, tmp, halfp_Saux, False, False, False)
def _compute_fxc(PQrho, half_Saux, halfp_Saux, rho_thresh=1.e-8): """ Computes the gridless (P|fxc|Q) ALDA tensor. """ naux = PQrho.shape[0] # Level it out PQrho_lvl = core.triplet(half_Saux, PQrho, half_Saux, False, False, False) # Rotate into a diagonal basis rho = core.Vector("rho eigenvalues", naux) U = core.Matrix("rho eigenvectors", naux, naux) PQrho_lvl.diagonalize(U, rho, core.DiagonalizeOrder.Ascending) # "Gridless DFT" mask = rho.np < rho_thresh # Values too small cause singularities rho.np[mask] = rho_thresh dft_size = rho.shape[0] inp = {"RHO_A": rho} out = {"V": core.Vector(dft_size), "V_RHO_A": core.Vector(dft_size), "V_RHO_A_RHO_A": core.Vector(dft_size)} func_x = core.LibXCFunctional('XC_LDA_X', True) func_x.compute_functional(inp, out, dft_size, 2) func_c = core.LibXCFunctional('XC_LDA_C_VWN', True) func_c.compute_functional(inp, out, dft_size, 2) out["V_RHO_A_RHO_A"].np[mask] = 0 # Rotate back Ul = U.clone() Ul.np[:] *= out["V_RHO_A_RHO_A"].np tmp = core.doublet(Ul, U, False, True) # Undo the leveling return core.triplet(halfp_Saux, tmp, halfp_Saux, False, False, False)
def _core_triplet(A, B, C, transA, transB, transC): """Multiply three matrices together. .. deprecated:: 1.4 Use :py:func:`psi4.core.triplet` instead. """ warnings.warn( "Using `psi4.core.Matrix.triplet` instead of `psi4.core.triplet` is deprecated, and in 1.4 it will stop working\n", category=FutureWarning, stacklevel=2) return core.triplet(A, B, C, transA, transB, transC)
def orthogonalize(C, S): nbf, nocc = C.shape eigenvectors = core.Matrix(nocc, nocc) eigvals = core.Vector(nocc) sqrt_eigvals = core.Vector(nocc) CTSC = core.triplet(C, S, C, True, False, False) CTSC.diagonalize(eigenvectors, eigvals, core.DiagonalizeOrder.Ascending) orthonormal = core.doublet(C, eigenvectors, False, False) sqrt_eigvals.np[:] = np.sqrt(eigvals.np) orthonormal.np[:, :] /= sqrt_eigvals.np[np.newaxis, :] return orthonormal
def _ROHF_orbital_gradient(self, save_fock: bool, max_diis_vectors: int) -> float: # Only the inact-act, inact-vir, and act-vir rotations are non-redundant dim_zero = core.Dimension(self.nirrep(), "Zero Dim") noccpi = self.doccpi() + self.soccpi() row_slice = core.Slice(dim_zero, noccpi) col_slice = core.Slice(self.doccpi(), self.nmopi()) MOgradient = self.moFeff().get_block(row_slice, col_slice) # Zero the active-active block for h in range(MOgradient.nirrep()): socc = self.soccpi()[h] docc = self.doccpi()[h] MOgradient.nph[h][docc:docc + socc, 0:socc] = 0 # Grab inact-act and act-vir orbs # Ct is (nmo x nmo), not the (nso x nmo) you would expect row_slice = core.Slice(dim_zero, self.nmopi()) col_slice = core.Slice(dim_zero, noccpi) Cia = self.Ct().get_block(row_slice, col_slice) col_slice = core.Slice(self.doccpi(), self.nmopi()) Cav = self.Ct().get_block(row_slice, col_slice) # Back transform MOgradient gradient = core.triplet(Cia, MOgradient, Cav, False, False, True) if save_fock: if not self.initialized_diis_manager_: self.diis_manager_ = core.DIISManager(max_diis_vectors, "HF DIIS vector", RemovalPolicy.LargestError, StoragePolicy.OnDisk) self.diis_manager_.set_error_vector_size(gradient) self.diis_manager_.set_vector_size(self.soFeff()) self.initialized_diis_manager_ = True self.diis_manager_.add_entry(gradient, self.soFeff()) if self.options().get_bool("DIIS_RMS_ERROR"): return gradient.rms() else: return gradient.absmax()
def __call__(self, mol1_wfn, mol2_wfn): nbf = self.p.dimer_basis.nbf() nocc = mol1_wfn.nalpha() + mol2_wfn.nalpha() # Take the occupied orbitals from the two HF monomer wavefunctions # and pack them (block diagonal) into the dimer basis set. m1_OCC = mol1_wfn.Ca_subset('SO', 'OCC') m2_OCC = mol2_wfn.Ca_subset('SO', 'OCC') C = core.Matrix(nbf, nocc) C.np[:mol1_wfn.nso(), :mol1_wfn.nalpha()] = m1_OCC.np[:, :] C.np[-mol2_wfn.nso():, -mol2_wfn.nalpha():] = m2_OCC.np[:, :] C = orthogonalize(C, self.p.dimer_S) # At this point, it should be the case that # C.T * S * C == I np.testing.assert_array_almost_equal( core.triplet(C, self.p.dimer_S, C, True, False, False), np.eye(nocc)) self.jk.C_clear() self.jk.C_left_add(C) self.jk.compute() J = self.jk.J()[0] K = self.jk.K()[0] D = self.jk.D()[0] # 2T + 2V + 2J - K FH = J.clone() FH.zero() FH.axpy(2, self.p.dimer_T) FH.axpy(2, self.p.dimer_V) FH.axpy(2, J) FH.axpy(-1, K) energy = FH.vector_dot(D) + self.p.dimer_basis.molecule().nuclear_repulsion_energy() hl = energy - (mol1_wfn.energy() + mol2_wfn.energy()) return hl
def build_sapt_jk_cache(wfn_A, wfn_B, jk, do_print=True): """ Constructs the DCBS cache data required to compute ELST/EXCH/IND """ core.print_out("\n ==> Preparing SAPT Data Cache <== \n\n") jk.print_header() cache = {} cache["wfn_A"] = wfn_A cache["wfn_B"] = wfn_B # First grab the orbitals cache["Cocc_A"] = wfn_A.Ca_subset("AO", "OCC") cache["Cvir_A"] = wfn_A.Ca_subset("AO", "VIR") cache["Cocc_B"] = wfn_B.Ca_subset("AO", "OCC") cache["Cvir_B"] = wfn_B.Ca_subset("AO", "VIR") cache["eps_occ_A"] = wfn_A.epsilon_a_subset("AO", "OCC") cache["eps_vir_A"] = wfn_A.epsilon_a_subset("AO", "VIR") cache["eps_occ_B"] = wfn_B.epsilon_a_subset("AO", "OCC") cache["eps_vir_B"] = wfn_B.epsilon_a_subset("AO", "VIR") # Build the densities as HF takes an extra "step" cache["D_A"] = core.doublet(cache["Cocc_A"], cache["Cocc_A"], False, True) cache["D_B"] = core.doublet(cache["Cocc_B"], cache["Cocc_B"], False, True) cache["P_A"] = core.doublet(cache["Cvir_A"], cache["Cvir_A"], False, True) cache["P_B"] = core.doublet(cache["Cvir_B"], cache["Cvir_B"], False, True) # Potential ints mints = core.MintsHelper(wfn_A.basisset()) cache["V_A"] = mints.ao_potential() # cache["V_A"].axpy(1.0, wfn_A.Va()) mints = core.MintsHelper(wfn_B.basisset()) cache["V_B"] = mints.ao_potential() # cache["V_B"].axpy(1.0, wfn_B.Va()) # Anything else we might need cache["S"] = wfn_A.S().clone() # J and K matrices jk.C_clear() # Normal J/K for Monomer A jk.C_left_add(wfn_A.Ca_subset("SO", "OCC")) jk.C_right_add(wfn_A.Ca_subset("SO", "OCC")) # Normal J/K for Monomer B jk.C_left_add(wfn_B.Ca_subset("SO", "OCC")) jk.C_right_add(wfn_B.Ca_subset("SO", "OCC")) # K_O J/K C_O_A = core.triplet(cache["D_B"], cache["S"], cache["Cocc_A"], False, False, False) jk.C_left_add(C_O_A) jk.C_right_add(cache["Cocc_A"]) jk.compute() # Clone them as the JK object will overwrite. cache["J_A"] = jk.J()[0].clone() cache["K_A"] = jk.K()[0].clone() cache["J_B"] = jk.J()[1].clone() cache["K_B"] = jk.K()[1].clone() cache["J_O"] = jk.J()[2].clone() cache["K_O"] = jk.K()[2].clone() cache["K_O"].transpose_this() monA_nr = wfn_A.molecule().nuclear_repulsion_energy() monB_nr = wfn_B.molecule().nuclear_repulsion_energy() dimer_nr = wfn_A.molecule().extract_subsets([1, 2 ]).nuclear_repulsion_energy() cache["nuclear_repulsion_energy"] = dimer_nr - monA_nr - monB_nr return cache
def induction(cache, jk, do_print=True, maxiter=12, conv=1.e-8, do_response=True, Sinf=False, sapt_jk_B=None): """ Compute Ind20 and Exch-Ind20 quantities from a SAPT cache and JK object. """ if do_print: core.print_out("\n ==> E20 Induction <== \n\n") # Build Induction and Exchange-Induction potentials S = cache["S"] D_A = cache["D_A"] V_A = cache["V_A"] J_A = cache["J_A"] K_A = cache["K_A"] D_B = cache["D_B"] V_B = cache["V_B"] J_B = cache["J_B"] K_B = cache["K_B"] K_O = cache["K_O"] J_O = cache["J_O"] jk.C_clear() jk.C_left_add(core.Matrix.chain_dot(D_B, S, cache["Cocc_A"])) jk.C_right_add(cache["Cocc_A"]) jk.C_left_add(core.Matrix.chain_dot(D_B, S, D_A, S, cache["Cocc_B"])) jk.C_right_add(cache["Cocc_B"]) jk.C_left_add(core.Matrix.chain_dot(D_A, S, D_B, S, cache["Cocc_A"])) jk.C_right_add(cache["Cocc_A"]) jk.compute() J_Ot, J_P_B, J_P_A = jk.J() K_Ot, K_P_B, K_P_A = jk.K() # Exch-Ind Potential A EX_A = K_B.clone() EX_A.scale(-1.0) EX_A.axpy(-2.0, J_O) EX_A.axpy(1.0, K_O) EX_A.axpy(2.0, J_P_B) EX_A.axpy(-1.0, core.Matrix.chain_dot(S, D_B, V_A)) EX_A.axpy(-2.0, core.Matrix.chain_dot(S, D_B, J_A)) EX_A.axpy(1.0, core.Matrix.chain_dot(S, D_B, K_A)) EX_A.axpy(1.0, core.Matrix.chain_dot(S, D_B, S, D_A, V_B)) EX_A.axpy(2.0, core.Matrix.chain_dot(S, D_B, S, D_A, J_B)) EX_A.axpy(1.0, core.Matrix.chain_dot(S, D_B, V_A, D_B, S)) EX_A.axpy(2.0, core.Matrix.chain_dot(S, D_B, J_A, D_B, S)) EX_A.axpy(-1.0, core.Matrix.chain_dot(S, D_B, K_O, trans=[False, False, True])) EX_A.axpy(-1.0, core.Matrix.chain_dot(V_B, D_B, S)) EX_A.axpy(-2.0, core.Matrix.chain_dot(J_B, D_B, S)) EX_A.axpy(1.0, core.Matrix.chain_dot(K_B, D_B, S)) EX_A.axpy(1.0, core.Matrix.chain_dot(V_B, D_A, S, D_B, S)) EX_A.axpy(2.0, core.Matrix.chain_dot(J_B, D_A, S, D_B, S)) EX_A.axpy(-1.0, core.Matrix.chain_dot(K_O, D_B, S)) EX_A = core.Matrix.chain_dot(cache["Cocc_A"], EX_A, cache["Cvir_A"], trans=[True, False, False]) # Exch-Ind Potential B EX_B = K_A.clone() EX_B.scale(-1.0) EX_B.axpy(-2.0, J_O) EX_B.axpy(1.0, K_O.transpose()) EX_B.axpy(2.0, J_P_A) EX_B.axpy(-1.0, core.Matrix.chain_dot(S, D_A, V_B)) EX_B.axpy(-2.0, core.Matrix.chain_dot(S, D_A, J_B)) EX_B.axpy(1.0, core.Matrix.chain_dot(S, D_A, K_B)) EX_B.axpy(1.0, core.Matrix.chain_dot(S, D_A, S, D_B, V_A)) EX_B.axpy(2.0, core.Matrix.chain_dot(S, D_A, S, D_B, J_A)) EX_B.axpy(1.0, core.Matrix.chain_dot(S, D_A, V_B, D_A, S)) EX_B.axpy(2.0, core.Matrix.chain_dot(S, D_A, J_B, D_A, S)) EX_B.axpy(-1.0, core.Matrix.chain_dot(S, D_A, K_O)) EX_B.axpy(-1.0, core.Matrix.chain_dot(V_A, D_A, S)) EX_B.axpy(-2.0, core.Matrix.chain_dot(J_A, D_A, S)) EX_B.axpy(1.0, core.Matrix.chain_dot(K_A, D_A, S)) EX_B.axpy(1.0, core.Matrix.chain_dot(V_A, D_B, S, D_A, S)) EX_B.axpy(2.0, core.Matrix.chain_dot(J_A, D_B, S, D_A, S)) EX_B.axpy(-1.0, core.Matrix.chain_dot(K_O, D_A, S, trans=[True, False, False])) EX_B = core.Matrix.chain_dot(cache["Cocc_B"], EX_B, cache["Cvir_B"], trans=[True, False, False]) # Build electrostatic potenital w_A = cache["V_A"].clone() w_A.axpy(2.0, cache["J_A"]) w_B = cache["V_B"].clone() w_B.axpy(2.0, cache["J_B"]) w_B_MOA = core.triplet(cache["Cocc_A"], w_B, cache["Cvir_A"], True, False, False) w_A_MOB = core.triplet(cache["Cocc_B"], w_A, cache["Cvir_B"], True, False, False) # Do uncoupled core.print_out(" => Uncoupled Induction <= \n\n") unc_x_B_MOA = w_B_MOA.clone() unc_x_B_MOA.np[:] /= (cache["eps_occ_A"].np.reshape(-1, 1) - cache["eps_vir_A"].np) unc_x_A_MOB = w_A_MOB.clone() unc_x_A_MOB.np[:] /= (cache["eps_occ_B"].np.reshape(-1, 1) - cache["eps_vir_B"].np) unc_ind_ab = 2.0 * unc_x_B_MOA.vector_dot(w_B_MOA) unc_ind_ba = 2.0 * unc_x_A_MOB.vector_dot(w_A_MOB) unc_indexch_ab = 2.0 * unc_x_B_MOA.vector_dot(EX_A) unc_indexch_ba = 2.0 * unc_x_A_MOB.vector_dot(EX_B) ret = {} ret["Ind20,u (A<-B)"] = unc_ind_ab ret["Ind20,u (A->B)"] = unc_ind_ba ret["Ind20,u"] = unc_ind_ab + unc_ind_ba ret["Exch-Ind20,u (A<-B)"] = unc_indexch_ab ret["Exch-Ind20,u (A->B)"] = unc_indexch_ba ret["Exch-Ind20,u"] = unc_indexch_ba + unc_indexch_ab plist = [ "Ind20,u (A<-B)", "Ind20,u (A->B)", "Ind20,u", "Exch-Ind20,u (A<-B)", "Exch-Ind20,u (A->B)", "Exch-Ind20,u" ] if do_print: for name in plist: # core.set_variable(name, ret[name]) core.print_out(print_sapt_var(name, ret[name], short=True)) core.print_out("\n") # Exch-Ind without S^2 if Sinf: nocc_A = cache["Cocc_A"].shape[1] nocc_B = cache["Cocc_B"].shape[1] SAB = core.triplet(cache["Cocc_A"], cache["S"], cache["Cocc_B"], True, False, False) num_occ = nocc_A + nocc_B Sab = core.Matrix(num_occ, num_occ) Sab.np[:nocc_A, nocc_A:] = SAB.np Sab.np[nocc_A:, :nocc_A] = SAB.np.T Sab.np[np.diag_indices_from(Sab.np)] += 1 Sab.power(-1.0, 1.e-14) Tmo_AA = core.Matrix.from_array(Sab.np[:nocc_A, :nocc_A]) Tmo_BB = core.Matrix.from_array(Sab.np[nocc_A:, nocc_A:]) Tmo_AB = core.Matrix.from_array(Sab.np[:nocc_A, nocc_A:]) T_A = core.triplet(cache["Cocc_A"], Tmo_AA, cache["Cocc_A"], False, False, True) T_B = core.triplet(cache["Cocc_B"], Tmo_BB, cache["Cocc_B"], False, False, True) T_AB = core.triplet(cache["Cocc_A"], Tmo_AB, cache["Cocc_B"], False, False, True) sT_A = core.Matrix.chain_dot(cache["Cvir_A"], unc_x_B_MOA, Tmo_AA, cache["Cocc_A"], trans=[False, True, False, True]) sT_B = core.Matrix.chain_dot(cache["Cvir_B"], unc_x_A_MOB, Tmo_BB, cache["Cocc_B"], trans=[False, True, False, True]) sT_AB = core.Matrix.chain_dot(cache["Cvir_A"], unc_x_B_MOA, Tmo_AB, cache["Cocc_B"], trans=[False, True, False, True]) sT_BA = core.Matrix.chain_dot(cache["Cvir_B"], unc_x_A_MOB, Tmo_AB, cache["Cocc_A"], trans=[False, True, True, True]) jk.C_clear() jk.C_left_add(core.Matrix.chain_dot(cache["Cocc_A"], Tmo_AA)) jk.C_right_add(cache["Cocc_A"]) jk.C_left_add(core.Matrix.chain_dot(cache["Cocc_B"], Tmo_BB)) jk.C_right_add(cache["Cocc_B"]) jk.C_left_add(core.Matrix.chain_dot(cache["Cocc_A"], Tmo_AB)) jk.C_right_add(cache["Cocc_B"]) jk.compute() J_AA_inf, J_BB_inf, J_AB_inf = jk.J() K_AA_inf, K_BB_inf, K_AB_inf = jk.K() # A <- B EX_AA_inf = V_B.clone() EX_AA_inf.axpy( -1.00, core.Matrix.chain_dot(S, T_AB, V_B, trans=[False, True, False])) EX_AA_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_B, V_B)) EX_AA_inf.axpy(2.00, J_AB_inf) EX_AA_inf.axpy( -2.00, core.Matrix.chain_dot(S, T_AB, J_AB_inf, trans=[False, True, False])) EX_AA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_B, J_AB_inf)) EX_AA_inf.axpy(2.00, J_BB_inf) EX_AA_inf.axpy( -2.00, core.Matrix.chain_dot(S, T_AB, J_BB_inf, trans=[False, True, False])) EX_AA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_B, J_BB_inf)) EX_AA_inf.axpy(-1.00, K_AB_inf.transpose()) EX_AA_inf.axpy( 1.00, core.Matrix.chain_dot(S, T_AB, K_AB_inf, trans=[False, True, True])) EX_AA_inf.axpy( 1.00, core.Matrix.chain_dot(S, T_B, K_AB_inf, trans=[False, False, True])) EX_AA_inf.axpy(-1.00, K_BB_inf) EX_AA_inf.axpy( 1.00, core.Matrix.chain_dot(S, T_AB, K_BB_inf, trans=[False, True, False])) EX_AA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_B, K_BB_inf)) EX_AB_inf = V_A.clone() EX_AB_inf.axpy( -1.00, core.Matrix.chain_dot(S, T_AB, V_A, trans=[False, True, False])) EX_AB_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_B, V_A)) EX_AB_inf.axpy(2.00, J_AA_inf) EX_AB_inf.axpy( -2.00, core.Matrix.chain_dot(S, T_AB, J_AA_inf, trans=[False, True, False])) EX_AB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_B, J_AA_inf)) EX_AB_inf.axpy(2.00, J_AB_inf) EX_AB_inf.axpy( -2.00, core.Matrix.chain_dot(S, T_AB, J_AB_inf, trans=[False, True, False])) EX_AB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_B, J_AB_inf)) EX_AB_inf.axpy(-1.00, K_AA_inf) EX_AB_inf.axpy( 1.00, core.Matrix.chain_dot(S, T_AB, K_AA_inf, trans=[False, True, False])) EX_AB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_B, K_AA_inf)) EX_AB_inf.axpy(-1.00, K_AB_inf) EX_AB_inf.axpy( 1.00, core.Matrix.chain_dot(S, T_AB, K_AB_inf, trans=[False, True, False])) EX_AB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_B, K_AB_inf)) # B <- A EX_BB_inf = V_A.clone() EX_BB_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_AB, V_A)) EX_BB_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_A, V_A)) EX_BB_inf.axpy(2.00, J_AB_inf) EX_BB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_AB_inf)) EX_BB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_A, J_AB_inf)) EX_BB_inf.axpy(2.00, J_AA_inf) EX_BB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_AA_inf)) EX_BB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_A, J_AA_inf)) EX_BB_inf.axpy(-1.00, K_AB_inf) EX_BB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_AB_inf)) EX_BB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_A, K_AB_inf)) EX_BB_inf.axpy(-1.00, K_AA_inf) EX_BB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_AA_inf)) EX_BB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_A, K_AA_inf)) EX_BA_inf = V_B.clone() EX_BA_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_AB, V_B)) EX_BA_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_A, V_B)) EX_BA_inf.axpy(2.00, J_BB_inf) EX_BA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_BB_inf)) EX_BA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_A, J_BB_inf)) EX_BA_inf.axpy(2.00, J_AB_inf) EX_BA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_AB_inf)) EX_BA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_A, J_AB_inf)) EX_BA_inf.axpy(-1.00, K_BB_inf) EX_BA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_BB_inf)) EX_BA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_A, K_BB_inf)) EX_BA_inf.axpy(-1.00, K_AB_inf.transpose()) EX_BA_inf.axpy( 1.00, core.Matrix.chain_dot(S, T_AB, K_AB_inf, trans=[False, False, True])) EX_BA_inf.axpy( 1.00, core.Matrix.chain_dot(S, T_A, K_AB_inf, trans=[False, False, True])) unc_ind_ab_total = 2.0 * (sT_A.vector_dot(EX_AA_inf) + sT_AB.vector_dot(EX_AB_inf)) unc_ind_ba_total = 2.0 * (sT_B.vector_dot(EX_BB_inf) + sT_BA.vector_dot(EX_BA_inf)) unc_indexch_ab_inf = unc_ind_ab_total - unc_ind_ab unc_indexch_ba_inf = unc_ind_ba_total - unc_ind_ba ret["Exch-Ind20,u (A<-B) (S^inf)"] = unc_indexch_ab_inf ret["Exch-Ind20,u (A->B) (S^inf)"] = unc_indexch_ba_inf ret["Exch-Ind20,u (S^inf)"] = unc_indexch_ba_inf + unc_indexch_ab_inf if do_print: for name in plist[3:]: name = name + ' (S^inf)' core.print_out(print_sapt_var(name, ret[name], short=True)) core.print_out("\n") # Do coupled if do_response: core.print_out("\n => Coupled Induction <= \n\n") x_B_MOA, x_A_MOB = _sapt_cpscf_solve(cache, jk, w_B_MOA, w_A_MOB, 20, 1.e-6, sapt_jk_B=sapt_jk_B) ind_ab = 2.0 * x_B_MOA.vector_dot(w_B_MOA) ind_ba = 2.0 * x_A_MOB.vector_dot(w_A_MOB) indexch_ab = 2.0 * x_B_MOA.vector_dot(EX_A) indexch_ba = 2.0 * x_A_MOB.vector_dot(EX_B) ret["Ind20,r (A<-B)"] = ind_ab ret["Ind20,r (A->B)"] = ind_ba ret["Ind20,r"] = ind_ab + ind_ba ret["Exch-Ind20,r (A<-B)"] = indexch_ab ret["Exch-Ind20,r (A->B)"] = indexch_ba ret["Exch-Ind20,r"] = indexch_ba + indexch_ab if do_print: core.print_out("\n") for name in plist: name = name.replace(",u", ",r") # core.set_variable(name, ret[name]) core.print_out(print_sapt_var(name, ret[name], short=True)) core.print_out("\n") # Exch-Ind without S^2 if Sinf: cT_A = core.Matrix.chain_dot(cache["Cvir_A"], x_B_MOA, Tmo_AA, cache["Cocc_A"], trans=[False, True, False, True]) cT_B = core.Matrix.chain_dot(cache["Cvir_B"], x_A_MOB, Tmo_BB, cache["Cocc_B"], trans=[False, True, False, True]) cT_AB = core.Matrix.chain_dot(cache["Cvir_A"], x_B_MOA, Tmo_AB, cache["Cocc_B"], trans=[False, True, False, True]) cT_BA = core.Matrix.chain_dot(cache["Cvir_B"], x_A_MOB, Tmo_AB, cache["Cocc_A"], trans=[False, True, True, True]) ind_ab_total = 2.0 * (cT_A.vector_dot(EX_AA_inf) + cT_AB.vector_dot(EX_AB_inf)) ind_ba_total = 2.0 * (cT_B.vector_dot(EX_BB_inf) + cT_BA.vector_dot(EX_BA_inf)) indexch_ab_inf = ind_ab_total - ind_ab indexch_ba_inf = ind_ba_total - ind_ba ret["Exch-Ind20,r (A<-B) (S^inf)"] = indexch_ab_inf ret["Exch-Ind20,r (A->B) (S^inf)"] = indexch_ba_inf ret["Exch-Ind20,r (S^inf)"] = indexch_ba_inf + indexch_ab_inf if do_print: for name in plist[3:]: name = name.replace(",u", ",r") + ' (S^inf)' core.print_out(print_sapt_var(name, ret[name], short=True)) core.print_out("\n") return ret
def exchange(cache, jk, do_print=True): """ Computes the E10 exchange (S^2 and S^inf) from a build_sapt_jk_cache datacache. """ if do_print: core.print_out("\n ==> E10 Exchange <== \n\n") # Build potenitals h_A = cache["V_A"].clone() h_A.axpy(2.0, cache["J_A"]) h_A.axpy(-1.0, cache["K_A"]) h_B = cache["V_B"].clone() h_B.axpy(2.0, cache["J_B"]) h_B.axpy(-1.0, cache["K_B"]) w_A = cache["V_A"].clone() w_A.axpy(2.0, cache["J_A"]) w_B = cache["V_B"].clone() w_B.axpy(2.0, cache["J_B"]) # Build inverse exchange metric nocc_A = cache["Cocc_A"].shape[1] nocc_B = cache["Cocc_B"].shape[1] SAB = core.triplet(cache["Cocc_A"], cache["S"], cache["Cocc_B"], True, False, False) num_occ = nocc_A + nocc_B Sab = core.Matrix(num_occ, num_occ) Sab.np[:nocc_A, nocc_A:] = SAB.np Sab.np[nocc_A:, :nocc_A] = SAB.np.T Sab.np[np.diag_indices_from(Sab.np)] += 1 Sab.power(-1.0, 1.e-14) Sab.np[np.diag_indices_from(Sab.np)] -= 1.0 Tmo_AA = core.Matrix.from_array(Sab.np[:nocc_A, :nocc_A]) Tmo_BB = core.Matrix.from_array(Sab.np[nocc_A:, nocc_A:]) Tmo_AB = core.Matrix.from_array(Sab.np[:nocc_A, nocc_A:]) T_A = np.dot(cache["Cocc_A"], Tmo_AA).dot(cache["Cocc_A"].np.T) T_B = np.dot(cache["Cocc_B"], Tmo_BB).dot(cache["Cocc_B"].np.T) T_AB = np.dot(cache["Cocc_A"], Tmo_AB).dot(cache["Cocc_B"].np.T) S = cache["S"] D_A = cache["D_A"] P_A = cache["P_A"] D_B = cache["D_B"] P_B = cache["P_B"] # Compute the J and K matrices jk.C_clear() jk.C_left_add(cache["Cocc_A"]) jk.C_right_add(core.doublet(cache["Cocc_A"], Tmo_AA, False, False)) jk.C_left_add(cache["Cocc_B"]) jk.C_right_add(core.doublet(cache["Cocc_A"], Tmo_AB, False, False)) jk.C_left_add(cache["Cocc_A"]) jk.C_right_add(core.Matrix.chain_dot(P_B, S, cache["Cocc_A"])) jk.compute() JT_A, JT_AB, Jij = jk.J() KT_A, KT_AB, Kij = jk.K() # Start S^2 Exch_s2 = 0.0 tmp = core.Matrix.chain_dot(D_A, S, D_B, S, P_A) Exch_s2 -= 2.0 * w_B.vector_dot(tmp) tmp = core.Matrix.chain_dot(D_B, S, D_A, S, P_B) Exch_s2 -= 2.0 * w_A.vector_dot(tmp) tmp = core.Matrix.chain_dot(P_A, S, D_B) Exch_s2 -= 2.0 * Kij.vector_dot(tmp) if do_print: core.print_out(print_sapt_var("Exch10(S^2) ", Exch_s2, short=True)) core.print_out("\n") # Start Sinf Exch10 = 0.0 Exch10 -= 2.0 * np.vdot(cache["D_A"], cache["K_B"]) Exch10 += 2.0 * np.vdot(T_A, h_B.np) Exch10 += 2.0 * np.vdot(T_B, h_A.np) Exch10 += 2.0 * np.vdot(T_AB, h_A.np + h_B.np) Exch10 += 4.0 * np.vdot(T_B, JT_AB.np - 0.5 * KT_AB.np) Exch10 += 4.0 * np.vdot(T_A, JT_AB.np - 0.5 * KT_AB.np) Exch10 += 4.0 * np.vdot(T_B, JT_A.np - 0.5 * KT_A.np) Exch10 += 4.0 * np.vdot(T_AB, JT_AB.np - 0.5 * KT_AB.np.T) if do_print: core.set_variable("Exch10", Exch10) core.print_out(print_sapt_var("Exch10", Exch10, short=True)) core.print_out("\n") return {"Exch10(S^2)": Exch_s2, "Exch10": Exch10}
def _core_triplet(A, B, C, transA, transB, transC): warnings.warn( "Using `psi4.core.Matrix.triplet` instead of `psi4.core.triplet` is deprecated, and in 1.4 it will stop working\n", category=FutureWarning, stacklevel=2) return core.triplet(A, B, C, transA, transB, transC)
def _so_to_mo(self, X): """Transform (C_occ)^T X C_vir""" return [core.triplet(self.Co[i], X[i], self.Cv[i], True, False, False) for i in (0, 1)]
def cpscf_linear_response(wfn, *args, **kwargs): """ Compute the static properties from a reference wavefunction. The currently implemented properties are - dipole polarizability - quadrupole polarizability Parameters ---------- wfn : psi4 wavefunction The reference wavefunction. args : list The list of arguments. For each argument, such as ``dipole polarizability``, will return the corresponding response. The user may also choose to pass a list or tuple of custom vectors. kwargs : dict Options that control how the response is computed. The following options are supported (with default values): - ``conv_tol``: 1e-5 - ``max_iter``: 10 - ``print_lvl``: 2 Returns ------- responses : list The list of responses. """ mints = core.MintsHelper(wfn.basisset()) # list of dictionaries to control response calculations, count how many user-supplied vectors we have complete_dict = [] n_user = 0 for arg in args: # for each string keyword, append the appropriate dictionary (vide supra) to our list if isinstance(arg, str): ret = property_dicts.get(arg) if ret: complete_dict.append(ret) else: raise ValidationError('Do not understand {}. Abort.'.format(arg)) # the user passed a list of vectors. absorb them into a dictionary elif isinstance(arg, tuple) or isinstance(arg, list): complete_dict.append({ 'name': 'User Vectors', 'length': len(arg), 'vectors': arg, 'vector names': ['User Vector {}_{}'.format(n_user, i) for i in range(len(arg))] }) n_user += len(arg) # single vector passed. stored in a dictionary as a list of length 1 (can be handled as the case above that way) # note: the length is set to '0' to designate that it was not really passed as a list else: complete_dict.append({ 'name': 'User Vector', 'length': 0, 'vectors': [arg], 'vector names': ['User Vector {}'.format(n_user)] }) n_user += 1 # vectors will be passed to the cphf solver, vector_names stores the corresponding names vectors = [] vector_names = [] # construct the list of vectors. for the keywords, fetch the appropriate tensors from MintsHelper for prop in complete_dict: if 'User' in prop['name']: for name, vec in zip(prop['vector names'], prop['vectors']): vectors.append(vec) vector_names.append(name) else: tmp_vectors = prop['mints_function'](mints) for tmp in tmp_vectors: tmp.scale(-2.0) # RHF only vectors.append(tmp) vector_names.append(tmp.name) # do we have any vectors to work with? if len(vectors) == 0: raise ValidationError('I have no vectors to work with. Aborting.') # print information on module, vectors that will be used _print_header(complete_dict, n_user) # fetch wavefunction information nbf = wfn.nmo() ndocc = wfn.nalpha() nvirt = nbf - ndocc c_occ = wfn.Ca_subset("AO", "OCC") c_vir = wfn.Ca_subset("AO", "VIR") # the vectors need to be in the MO basis. if they have the shape nbf x nbf, transform. for i in range(len(vectors)): shape = vectors[i].shape if shape == (nbf, nbf): vectors[i] = core.triplet(c_occ, vectors[i], c_vir, True, False, False) # verify that this vector already has the correct shape elif shape != (ndocc, nvirt): raise ValidationError('ERROR: "{}" has an unrecognized shape. Must be either ({}, {}) or ({}, {})'.format( vector_names[i], nbf, nbf, ndocc, nvirt)) # compute response vectors for each input vector params = [kwargs.pop("conv_tol", 1.e-5), kwargs.pop("max_iter", 10), kwargs.pop("print_lvl", 2)] responses = wfn.cphf_solve(vectors, *params) # zip vectors, responses for easy access vectors = {k: v for k, v in zip(vector_names, vectors)} responses = {k: v for k, v in zip(vector_names, responses)} # compute response values, format output output = [] for prop in complete_dict: # try to replicate the data structure of the input if 'User' in prop['name']: if prop['length'] == 0: output.append(responses[prop['vector names'][0]]) else: buf = [] for name in prop['vector names']: buf.append(responses[name]) output.append(buf) else: names = prop['vector names'] dim = len(names) buf = np.zeros((dim, dim)) for i, i_name in enumerate(names): for j, j_name in enumerate(names): buf[i, j] = -1.0 * vectors[i_name].vector_dot(responses[j_name]) output.append(buf) _print_output(complete_dict, output) return output
def cpscf_linear_response(wfn, *args, **kwargs): """ Compute the static properties from a reference wavefunction. The currently implemented properties are - dipole polarizability - quadrupole polarizability Parameters ---------- wfn : psi4 wavefunction The reference wavefunction. args : list The list of arguments. For each argument, such as ``dipole polarizability``, will return the corresponding response. The user may also choose to pass a list or tuple of custom vectors. kwargs : dict Options that control how the response is computed. The following options are supported (with default values): - ``conv_tol``: 1e-5 - ``max_iter``: 10 - ``print_lvl``: 2 Returns ------- responses : list The list of responses. """ mints = core.MintsHelper(wfn.basisset()) # list of dictionaries to control response calculations, count how many user-supplied vectors we have complete_dict = [] n_user = 0 for arg in args: # for each string keyword, append the appropriate dictionary (vide supra) to our list if isinstance(arg, str): ret = property_dicts.get(arg) if ret: complete_dict.append(ret) else: raise ValidationError( 'Do not understand {}. Abort.'.format(arg)) # the user passed a list of vectors. absorb them into a dictionary elif isinstance(arg, tuple) or isinstance(arg, list): complete_dict.append({ 'name': 'User Vectors', 'length': len(arg), 'vectors': arg, 'vector names': [ 'User Vector {}_{}'.format(n_user, i) for i in range(len(arg)) ] }) n_user += len(arg) # single vector passed. stored in a dictionary as a list of length 1 (can be handled as the case above that way) # note: the length is set to '0' to designate that it was not really passed as a list else: complete_dict.append({ 'name': 'User Vector', 'length': 0, 'vectors': [arg], 'vector names': ['User Vector {}'.format(n_user)] }) n_user += 1 # vectors will be passed to the cphf solver, vector_names stores the corresponding names vectors = [] vector_names = [] # construct the list of vectors. for the keywords, fetch the appropriate tensors from MintsHelper for prop in complete_dict: if 'User' in prop['name']: for name, vec in zip(prop['vector names'], prop['vectors']): vectors.append(vec) vector_names.append(name) else: tmp_vectors = prop['mints_function'](mints) for tmp in tmp_vectors: tmp.scale(-2.0) # RHF only vectors.append(tmp) vector_names.append(tmp.name) # do we have any vectors to work with? if len(vectors) == 0: raise ValidationError('I have no vectors to work with. Aborting.') # print information on module, vectors that will be used _print_header(complete_dict, n_user) # fetch wavefunction information nbf = wfn.nmo() ndocc = wfn.nalpha() nvirt = nbf - ndocc c_occ = wfn.Ca_subset("AO", "OCC") c_vir = wfn.Ca_subset("AO", "VIR") # the vectors need to be in the MO basis. if they have the shape nbf x nbf, transform. for i in range(len(vectors)): shape = vectors[i].shape if shape == (nbf, nbf): vectors[i] = core.triplet(c_occ, vectors[i], c_vir, True, False, False) # verify that this vector already has the correct shape elif shape != (ndocc, nvirt): raise ValidationError( 'ERROR: "{}" has an unrecognized shape. Must be either ({}, {}) or ({}, {})' .format(vector_names[i], nbf, nbf, ndocc, nvirt)) # compute response vectors for each input vector params = [ kwargs.pop("conv_tol", 1.e-5), kwargs.pop("max_iter", 10), kwargs.pop("print_lvl", 2) ] responses = wfn.cphf_solve(vectors, *params) # zip vectors, responses for easy access vectors = {k: v for k, v in zip(vector_names, vectors)} responses = {k: v for k, v in zip(vector_names, responses)} # compute response values, format output output = [] for prop in complete_dict: # try to replicate the data structure of the input if 'User' in prop['name']: if prop['length'] == 0: output.append(responses[prop['vector names'][0]]) else: buf = [] for name in prop['vector names']: buf.append(responses[name]) output.append(buf) else: names = prop['vector names'] dim = len(names) buf = np.zeros((dim, dim)) for i, i_name in enumerate(names): for j, j_name in enumerate(names): buf[i, j] = -1.0 * vectors[i_name].vector_dot( responses[j_name]) output.append(buf) _print_output(complete_dict, output) return output
def build_sapt_jk_cache(wfn_A, wfn_B, jk, do_print=True): """ Constructs the DCBS cache data required to compute ELST/EXCH/IND """ core.print_out("\n ==> Preparing SAPT Data Cache <== \n\n") jk.print_header() cache = {} cache["wfn_A"] = wfn_A cache["wfn_B"] = wfn_B # First grab the orbitals cache["Cocc_A"] = wfn_A.Ca_subset("AO", "OCC") cache["Cvir_A"] = wfn_A.Ca_subset("AO", "VIR") cache["Cocc_B"] = wfn_B.Ca_subset("AO", "OCC") cache["Cvir_B"] = wfn_B.Ca_subset("AO", "VIR") cache["eps_occ_A"] = wfn_A.epsilon_a_subset("AO", "OCC") cache["eps_vir_A"] = wfn_A.epsilon_a_subset("AO", "VIR") cache["eps_occ_B"] = wfn_B.epsilon_a_subset("AO", "OCC") cache["eps_vir_B"] = wfn_B.epsilon_a_subset("AO", "VIR") # Build the densities as HF takes an extra "step" cache["D_A"] = core.doublet( cache["Cocc_A"], cache["Cocc_A"], False, True) cache["D_B"] = core.doublet( cache["Cocc_B"], cache["Cocc_B"], False, True) cache["P_A"] = core.doublet( cache["Cvir_A"], cache["Cvir_A"], False, True) cache["P_B"] = core.doublet( cache["Cvir_B"], cache["Cvir_B"], False, True) # Potential ints mints = core.MintsHelper(wfn_A.basisset()) cache["V_A"] = mints.ao_potential() # cache["V_A"].axpy(1.0, wfn_A.Va()) mints = core.MintsHelper(wfn_B.basisset()) cache["V_B"] = mints.ao_potential() # cache["V_B"].axpy(1.0, wfn_B.Va()) # Anything else we might need cache["S"] = wfn_A.S().clone() # J and K matrices jk.C_clear() # Normal J/K for Monomer A jk.C_left_add(wfn_A.Ca_subset("SO", "OCC")) jk.C_right_add(wfn_A.Ca_subset("SO", "OCC")) # Normal J/K for Monomer B jk.C_left_add(wfn_B.Ca_subset("SO", "OCC")) jk.C_right_add(wfn_B.Ca_subset("SO", "OCC")) # K_O J/K C_O_A = core.triplet( cache["D_B"], cache["S"], cache["Cocc_A"], False, False, False) jk.C_left_add(C_O_A) jk.C_right_add(cache["Cocc_A"]) jk.compute() # Clone them as the JK object will overwrite. cache["J_A"] = jk.J()[0].clone() cache["K_A"] = jk.K()[0].clone() cache["J_B"] = jk.J()[1].clone() cache["K_B"] = jk.K()[1].clone() cache["J_O"] = jk.J()[2].clone() cache["K_O"] = jk.K()[2].clone() cache["K_O"].transpose_this() monA_nr = wfn_A.molecule().nuclear_repulsion_energy() monB_nr = wfn_B.molecule().nuclear_repulsion_energy() dimer_nr = wfn_A.molecule().extract_subsets( [1, 2]).nuclear_repulsion_energy() cache["nuclear_repulsion_energy"] = dimer_nr - monA_nr - monB_nr return cache
def induction(cache, jk, do_print=True, maxiter=12, conv=1.e-8, do_response=True, Sinf=False, sapt_jk_B=None): """ Compute Ind20 and Exch-Ind20 quantities from a SAPT cache and JK object. """ if do_print: core.print_out("\n ==> E20 Induction <== \n\n") # Build Induction and Exchange-Induction potentials S = cache["S"] D_A = cache["D_A"] V_A = cache["V_A"] J_A = cache["J_A"] K_A = cache["K_A"] D_B = cache["D_B"] V_B = cache["V_B"] J_B = cache["J_B"] K_B = cache["K_B"] K_O = cache["K_O"] J_O = cache["J_O"] jk.C_clear() jk.C_left_add(core.Matrix.chain_dot(D_B, S, cache["Cocc_A"])) jk.C_right_add(cache["Cocc_A"]) jk.C_left_add(core.Matrix.chain_dot(D_B, S, D_A, S, cache["Cocc_B"])) jk.C_right_add(cache["Cocc_B"]) jk.C_left_add(core.Matrix.chain_dot(D_A, S, D_B, S, cache["Cocc_A"])) jk.C_right_add(cache["Cocc_A"]) jk.compute() J_Ot, J_P_B, J_P_A = jk.J() K_Ot, K_P_B, K_P_A = jk.K() # Exch-Ind Potential A EX_A = K_B.clone() EX_A.scale(-1.0) EX_A.axpy(-2.0, core.Matrix.chain_dot(S, D_B, J_A)) EX_A.axpy(1.0, K_O) EX_A.axpy(-2.0, J_O) EX_A.axpy(1.0, core.Matrix.chain_dot(S, D_B, K_A)) EX_A.axpy(-2.0, core.Matrix.chain_dot(J_B, D_B, S)) EX_A.axpy(1.0, core.Matrix.chain_dot(K_B, D_B, S)) EX_A.axpy(2.0, core.Matrix.chain_dot(S, D_B, J_A, D_B, S)) EX_A.axpy(2.0, core.Matrix.chain_dot(J_B, D_A, S, D_B, S)) EX_A.axpy(-1.0, core.Matrix.chain_dot(K_O, D_B, S)) EX_A.axpy(2.0, J_P_B) EX_A.axpy(2.0, core.Matrix.chain_dot(S, D_B, S, D_A, J_B)) EX_A.axpy(-1.0, core.Matrix.chain_dot(S, D_B, K_O, trans=[False, False, True])) EX_A.axpy(-1.0, core.Matrix.chain_dot(S, D_B, V_A)) EX_A.axpy(-1.0, core.Matrix.chain_dot(V_B, D_B, S)) EX_A.axpy(1.0, core.Matrix.chain_dot(S, D_B, V_A, D_B, S)) EX_A.axpy(1.0, core.Matrix.chain_dot(V_B, D_A, S, D_B, S)) EX_A.axpy(1.0, core.Matrix.chain_dot(S, D_B, S, D_A, V_B)) EX_A = core.Matrix.chain_dot( cache["Cocc_A"], EX_A, cache["Cvir_A"], trans=[True, False, False]) # Exch-Ind Potential B EX_B = K_A.clone() EX_B.scale(-1.0) EX_B.axpy(-2.0, core.Matrix.chain_dot(S, D_A, J_B)) EX_B.axpy(1.0, K_O.transpose()) EX_B.axpy(-2.0, J_O) EX_B.axpy(1.0, core.Matrix.chain_dot(S, D_A, K_B)) EX_B.axpy(-2.0, core.Matrix.chain_dot(J_A, D_A, S)) EX_B.axpy(1.0, core.Matrix.chain_dot(K_A, D_A, S)) EX_B.axpy(2.0, core.Matrix.chain_dot(S, D_A, J_B, D_A, S)) EX_B.axpy(2.0, core.Matrix.chain_dot(J_A, D_B, S, D_A, S)) EX_B.axpy(-1.0, core.Matrix.chain_dot(K_O, D_A, S, trans=[True, False, False])) EX_B.axpy(2.0, J_P_A) EX_B.axpy(2.0, core.Matrix.chain_dot(S, D_A, S, D_B, J_A)) EX_B.axpy(-1.0, core.Matrix.chain_dot(S, D_A, K_O)) EX_B.axpy(-1.0, core.Matrix.chain_dot(S, D_A, V_B)) EX_B.axpy(-1.0, core.Matrix.chain_dot(V_A, D_A, S)) EX_B.axpy(1.0, core.Matrix.chain_dot(S, D_A, V_B, D_A, S)) EX_B.axpy(1.0, core.Matrix.chain_dot(V_A, D_B, S, D_A, S)) EX_B.axpy(1.0, core.Matrix.chain_dot(S, D_A, S, D_B, V_A)) EX_B = core.Matrix.chain_dot( cache["Cocc_B"], EX_B, cache["Cvir_B"], trans=[True, False, False]) # Build electrostatic potenital w_A = cache["V_A"].clone() w_A.axpy(2.0, cache["J_A"]) w_B = cache["V_B"].clone() w_B.axpy(2.0, cache["J_B"]) w_B_MOA = core.triplet( cache["Cocc_A"], w_B, cache["Cvir_A"], True, False, False) w_A_MOB = core.triplet( cache["Cocc_B"], w_A, cache["Cvir_B"], True, False, False) # Do uncoupled core.print_out(" => Uncoupled Induction <= \n\n") unc_x_B_MOA = w_B_MOA.clone() unc_x_B_MOA.np[ :] /= (cache["eps_occ_A"].np.reshape(-1, 1) - cache["eps_vir_A"].np) unc_x_A_MOB = w_A_MOB.clone() unc_x_A_MOB.np[ :] /= (cache["eps_occ_B"].np.reshape(-1, 1) - cache["eps_vir_B"].np) unc_ind_ab = 2.0 * unc_x_B_MOA.vector_dot(w_B_MOA) unc_ind_ba = 2.0 * unc_x_A_MOB.vector_dot(w_A_MOB) unc_indexch_ab = 2.0 * unc_x_B_MOA.vector_dot(EX_A) unc_indexch_ba = 2.0 * unc_x_A_MOB.vector_dot(EX_B) ret = {} ret["Ind20,u (A<-B)"] = unc_ind_ab ret["Ind20,u (A->B)"] = unc_ind_ba ret["Ind20,u"] = unc_ind_ab + unc_ind_ba ret["Exch-Ind20,u (A<-B)"] = unc_indexch_ab ret["Exch-Ind20,u (A->B)"] = unc_indexch_ba ret["Exch-Ind20,u"] = unc_indexch_ba + unc_indexch_ab plist = ["Ind20,u (A<-B)", "Ind20,u (A->B)", "Ind20,u", "Exch-Ind20,u (A<-B)", "Exch-Ind20,u (A->B)", "Exch-Ind20,u"] if do_print: for name in plist: # core.set_variable(name, ret[name]) core.print_out(print_sapt_var(name, ret[name], short=True)) core.print_out("\n") # Exch-Ind without S^2 if Sinf: nocc_A = cache["Cocc_A"].shape[1] nocc_B = cache["Cocc_B"].shape[1] SAB = core.triplet( cache["Cocc_A"], cache["S"], cache["Cocc_B"], True, False, False) num_occ = nocc_A + nocc_B Sab = core.Matrix(num_occ, num_occ) Sab.np[:nocc_A, nocc_A:] = SAB.np Sab.np[nocc_A:, :nocc_A] = SAB.np.T Sab.np[np.diag_indices_from(Sab.np)] += 1 Sab.power(-1.0, 1.e-14) Tmo_AA = core.Matrix.from_array(Sab.np[:nocc_A, :nocc_A]) Tmo_BB = core.Matrix.from_array(Sab.np[nocc_A:, nocc_A:]) Tmo_AB = core.Matrix.from_array(Sab.np[:nocc_A, nocc_A:]) T_A = core.triplet(cache["Cocc_A"], Tmo_AA, cache["Cocc_A"], False, False, True) T_B = core.triplet(cache["Cocc_B"], Tmo_BB, cache["Cocc_B"], False, False, True) T_AB = core.triplet(cache["Cocc_A"], Tmo_AB, cache["Cocc_B"], False, False, True) sT_A = core.Matrix.chain_dot(cache["Cvir_A"], unc_x_B_MOA, Tmo_AA, cache["Cocc_A"], trans=[False, True, False, True]) sT_B = core.Matrix.chain_dot(cache["Cvir_B"], unc_x_A_MOB, Tmo_BB, cache["Cocc_B"], trans=[False, True, False, True]) sT_AB = core.Matrix.chain_dot(cache["Cvir_A"], unc_x_B_MOA, Tmo_AB, cache["Cocc_B"], trans=[False, True, False, True]) sT_BA = core.Matrix.chain_dot(cache["Cvir_B"], unc_x_A_MOB, Tmo_AB, cache["Cocc_A"], trans=[False, True, True, True]) jk.C_clear() jk.C_left_add(core.Matrix.chain_dot(cache["Cocc_A"], Tmo_AA)) jk.C_right_add(cache["Cocc_A"]) jk.C_left_add(core.Matrix.chain_dot(cache["Cocc_B"], Tmo_BB)) jk.C_right_add(cache["Cocc_B"]) jk.C_left_add(core.Matrix.chain_dot(cache["Cocc_A"], Tmo_AB)) jk.C_right_add(cache["Cocc_B"]) jk.compute() J_AA_inf, J_BB_inf, J_AB_inf = jk.J() K_AA_inf, K_BB_inf, K_AB_inf = jk.K() # A <- B EX_AA_inf = V_B.clone() EX_AA_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_AB, V_B, trans=[False, True, False])) EX_AA_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_B, V_B)) EX_AA_inf.axpy(2.00, J_AB_inf) EX_AA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_AB_inf, trans=[False, True, False])) EX_AA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_B, J_AB_inf)) EX_AA_inf.axpy(2.00, J_BB_inf) EX_AA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_BB_inf, trans=[False, True, False])) EX_AA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_B, J_BB_inf)) EX_AA_inf.axpy(-1.00, K_AB_inf.transpose()) EX_AA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_AB_inf, trans=[False, True, True])) EX_AA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_B, K_AB_inf, trans=[False, False, True])) EX_AA_inf.axpy(-1.00, K_BB_inf) EX_AA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_BB_inf, trans=[False, True, False])) EX_AA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_B, K_BB_inf)) EX_AB_inf = V_A.clone() EX_AB_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_AB, V_A, trans=[False, True, False])) EX_AB_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_B, V_A)) EX_AB_inf.axpy(2.00, J_AA_inf) EX_AB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_AA_inf, trans=[False, True, False])) EX_AB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_B, J_AA_inf)) EX_AB_inf.axpy(2.00, J_AB_inf) EX_AB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_AB_inf, trans=[False, True, False])) EX_AB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_B, J_AB_inf)) EX_AB_inf.axpy(-1.00, K_AA_inf) EX_AB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_AA_inf, trans=[False, True, False])) EX_AB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_B, K_AA_inf)) EX_AB_inf.axpy(-1.00, K_AB_inf) EX_AB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_AB_inf, trans=[False, True, False])) EX_AB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_B, K_AB_inf)) # B <- A EX_BB_inf = V_A.clone() EX_BB_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_AB, V_A)) EX_BB_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_A, V_A)) EX_BB_inf.axpy(2.00, J_AB_inf) EX_BB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_AB_inf)) EX_BB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_A, J_AB_inf)) EX_BB_inf.axpy(2.00, J_AA_inf) EX_BB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_AA_inf)) EX_BB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_A, J_AA_inf)) EX_BB_inf.axpy(-1.00, K_AB_inf) EX_BB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_AB_inf)) EX_BB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_A, K_AB_inf)) EX_BB_inf.axpy(-1.00, K_AA_inf) EX_BB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_AA_inf)) EX_BB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_A, K_AA_inf)) EX_BA_inf = V_B.clone() EX_BA_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_AB, V_B)) EX_BA_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_A, V_B)) EX_BA_inf.axpy(2.00, J_BB_inf) EX_BA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_BB_inf)) EX_BA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_A, J_BB_inf)) EX_BA_inf.axpy(2.00, J_AB_inf) EX_BA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_AB_inf)) EX_BA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_A, J_AB_inf)) EX_BA_inf.axpy(-1.00, K_BB_inf) EX_BA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_BB_inf)) EX_BA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_A, K_BB_inf)) EX_BA_inf.axpy(-1.00, K_AB_inf.transpose()) EX_BA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_AB_inf, trans=[False, False, True])) EX_BA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_A, K_AB_inf, trans=[False, False, True])) unc_ind_ab_total = 2.0 * (sT_A.vector_dot(EX_AA_inf) + sT_AB.vector_dot(EX_AB_inf)) unc_ind_ba_total = 2.0 * (sT_B.vector_dot(EX_BB_inf) + sT_BA.vector_dot(EX_BA_inf)) unc_indexch_ab_inf = unc_ind_ab_total - unc_ind_ab unc_indexch_ba_inf = unc_ind_ba_total - unc_ind_ba ret["Exch-Ind20,u (A<-B) (S^inf)"] = unc_indexch_ab_inf ret["Exch-Ind20,u (A->B) (S^inf)"] = unc_indexch_ba_inf ret["Exch-Ind20,u (S^inf)"] = unc_indexch_ba_inf + unc_indexch_ab_inf if do_print: for name in plist[3:]: name = name + ' (S^inf)' core.print_out(print_sapt_var(name, ret[name], short=True)) core.print_out("\n") # Do coupled if do_response: core.print_out("\n => Coupled Induction <= \n\n") x_B_MOA, x_A_MOB = _sapt_cpscf_solve( cache, jk, w_B_MOA, w_A_MOB, 20, 1.e-6, sapt_jk_B=sapt_jk_B) ind_ab = 2.0 * x_B_MOA.vector_dot(w_B_MOA) ind_ba = 2.0 * x_A_MOB.vector_dot(w_A_MOB) indexch_ab = 2.0 * x_B_MOA.vector_dot(EX_A) indexch_ba = 2.0 * x_A_MOB.vector_dot(EX_B) ret["Ind20,r (A<-B)"] = ind_ab ret["Ind20,r (A->B)"] = ind_ba ret["Ind20,r"] = ind_ab + ind_ba ret["Exch-Ind20,r (A<-B)"] = indexch_ab ret["Exch-Ind20,r (A->B)"] = indexch_ba ret["Exch-Ind20,r"] = indexch_ba + indexch_ab if do_print: core.print_out("\n") for name in plist: name = name.replace(",u", ",r") # core.set_variable(name, ret[name]) core.print_out(print_sapt_var(name, ret[name], short=True)) core.print_out("\n") # Exch-Ind without S^2 if Sinf: cT_A = core.Matrix.chain_dot(cache["Cvir_A"], x_B_MOA, Tmo_AA, cache["Cocc_A"], trans=[False, True, False, True]) cT_B = core.Matrix.chain_dot(cache["Cvir_B"], x_A_MOB, Tmo_BB, cache["Cocc_B"], trans=[False, True, False, True]) cT_AB = core.Matrix.chain_dot(cache["Cvir_A"], x_B_MOA, Tmo_AB, cache["Cocc_B"], trans=[False, True, False, True]) cT_BA = core.Matrix.chain_dot(cache["Cvir_B"], x_A_MOB, Tmo_AB, cache["Cocc_A"], trans=[False, True, True, True]) ind_ab_total = 2.0 * (cT_A.vector_dot(EX_AA_inf) + cT_AB.vector_dot(EX_AB_inf)) ind_ba_total = 2.0 * (cT_B.vector_dot(EX_BB_inf) + cT_BA.vector_dot(EX_BA_inf)) indexch_ab_inf = ind_ab_total - ind_ab indexch_ba_inf = ind_ba_total - ind_ba ret["Exch-Ind20,r (A<-B) (S^inf)"] = indexch_ab_inf ret["Exch-Ind20,r (A->B) (S^inf)"] = indexch_ba_inf ret["Exch-Ind20,r (S^inf)"] = indexch_ba_inf + indexch_ab_inf if do_print: for name in plist[3:]: name = name.replace(",u", ",r") + ' (S^inf)' core.print_out(print_sapt_var(name, ret[name], short=True)) core.print_out("\n") return ret
def exchange(cache, jk, do_print=True): """ Computes the E10 exchange (S^2 and S^inf) from a build_sapt_jk_cache datacache. """ if do_print: core.print_out("\n ==> E10 Exchange <== \n\n") # Build potenitals h_A = cache["V_A"].clone() h_A.axpy(2.0, cache["J_A"]) h_A.axpy(-1.0, cache["K_A"]) h_B = cache["V_B"].clone() h_B.axpy(2.0, cache["J_B"]) h_B.axpy(-1.0, cache["K_B"]) w_A = cache["V_A"].clone() w_A.axpy(2.0, cache["J_A"]) w_B = cache["V_B"].clone() w_B.axpy(2.0, cache["J_B"]) # Build inverse exchange metric nocc_A = cache["Cocc_A"].shape[1] nocc_B = cache["Cocc_B"].shape[1] SAB = core.triplet( cache["Cocc_A"], cache["S"], cache["Cocc_B"], True, False, False) num_occ = nocc_A + nocc_B Sab = core.Matrix(num_occ, num_occ) Sab.np[:nocc_A, nocc_A:] = SAB.np Sab.np[nocc_A:, :nocc_A] = SAB.np.T Sab.np[np.diag_indices_from(Sab.np)] += 1 Sab.power(-1.0, 1.e-14) Sab.np[np.diag_indices_from(Sab.np)] -= 1.0 Tmo_AA = core.Matrix.from_array(Sab.np[:nocc_A, :nocc_A]) Tmo_BB = core.Matrix.from_array(Sab.np[nocc_A:, nocc_A:]) Tmo_AB = core.Matrix.from_array(Sab.np[:nocc_A, nocc_A:]) T_A = np.dot(cache["Cocc_A"], Tmo_AA).dot(cache["Cocc_A"].np.T) T_B = np.dot(cache["Cocc_B"], Tmo_BB).dot(cache["Cocc_B"].np.T) T_AB = np.dot(cache["Cocc_A"], Tmo_AB).dot(cache["Cocc_B"].np.T) S = cache["S"] D_A = cache["D_A"] P_A = cache["P_A"] D_B = cache["D_B"] P_B = cache["P_B"] # Compute the J and K matrices jk.C_clear() jk.C_left_add(cache["Cocc_A"]) jk.C_right_add(core.doublet(cache["Cocc_A"], Tmo_AA, False, False)) jk.C_left_add(cache["Cocc_B"]) jk.C_right_add(core.doublet(cache["Cocc_A"], Tmo_AB, False, False)) jk.C_left_add(cache["Cocc_A"]) jk.C_right_add(core.Matrix.chain_dot(P_B, S, cache["Cocc_A"])) jk.compute() JT_A, JT_AB, Jij = jk.J() KT_A, KT_AB, Kij = jk.K() # Start S^2 Exch_s2 = 0.0 tmp = core.Matrix.chain_dot(D_A, S, D_B, S, P_A) Exch_s2 -= 2.0 * w_B.vector_dot(tmp) tmp = core.Matrix.chain_dot(D_B, S, D_A, S, P_B) Exch_s2 -= 2.0 * w_A.vector_dot(tmp) tmp = core.Matrix.chain_dot(P_A, S, D_B) Exch_s2 -= 2.0 * Kij.vector_dot(tmp) if do_print: core.print_out(print_sapt_var("Exch10(S^2) ", Exch_s2, short=True)) core.print_out("\n") # Start Sinf Exch10 = 0.0 Exch10 -= 2.0 * np.vdot(cache["D_A"], cache["K_B"]) Exch10 += 2.0 * np.vdot(T_A, h_B.np) Exch10 += 2.0 * np.vdot(T_B, h_A.np) Exch10 += 2.0 * np.vdot(T_AB, h_A.np + h_B.np) Exch10 += 4.0 * np.vdot(T_B, JT_AB.np - 0.5 * KT_AB.np) Exch10 += 4.0 * np.vdot(T_A, JT_AB.np - 0.5 * KT_AB.np) Exch10 += 4.0 * np.vdot(T_B, JT_A.np - 0.5 * KT_A.np) Exch10 += 4.0 * np.vdot(T_AB, JT_AB.np - 0.5 * KT_AB.np.T) if do_print: core.set_variable("Exch10", Exch10) core.print_out(print_sapt_var("Exch10", Exch10, short=True)) core.print_out("\n") return {"Exch10(S^2)": Exch_s2, "Exch10": Exch10}
def df_fdds_dispersion(primary, auxiliary, cache, leg_points=10, leg_lambda=0.3, do_print=True): rho_thresh = core.get_option("SAPT", "SAPT_FDDS_V2_RHO_CUTOFF") if do_print: core.print_out("\n ==> E20 Dispersion (CHF FDDS) <== \n\n") core.print_out(" Legendre Points: % 10d\n" % leg_points) core.print_out(" Lambda Shift: % 10.3f\n" % leg_lambda) core.print_out(" Fxc Kernal: % 10s\n" % "ALDA") core.print_out(" (P|Fxc|Q) Thresh: % 8.3e\n" % rho_thresh) # Build object df_matrix_keys = ["Cocc_A", "Cvir_A", "Cocc_B", "Cvir_B"] fdds_matrix_cache = {key: cache[key] for key in df_matrix_keys} df_vector_keys = ["eps_occ_A", "eps_vir_A", "eps_occ_B", "eps_vir_B"] fdds_vector_cache = {key: cache[key] for key in df_vector_keys} fdds_obj = core.FDDS_Dispersion(primary, auxiliary, fdds_matrix_cache, fdds_vector_cache) # Aux Densities D = fdds_obj.project_densities([cache["D_A"], cache["D_B"]]) # Temps half_Saux = fdds_obj.aux_overlap().clone() half_Saux.power(-0.5, 1.e-12) halfp_Saux = fdds_obj.aux_overlap().clone() halfp_Saux.power(0.5, 1.e-12) # Builds potentials W_A = fdds_obj.metric().clone() W_A.axpy(1.0, _compute_fxc(D[0], half_Saux, halfp_Saux, rho_thresh=rho_thresh)) W_B = fdds_obj.metric().clone() W_B.axpy(1.0, _compute_fxc(D[1], half_Saux, halfp_Saux, rho_thresh=rho_thresh)) # Nuke the densities del D metric = fdds_obj.metric() metric_inv = fdds_obj.metric_inv() # Integrate core.print_out("\n => Time Integration <= \n\n") val_pack = ("Omega", "Weight", "Disp20,u", "Disp20", "time [s]") core.print_out("% 12s % 12s % 14s % 14s % 10s\n" % val_pack) # print("% 12s % 12s % 14s % 14s % 10s" % val_pack) start_time = time.time() total_uc = 0 total_c = 0 for point, weight in zip(*np.polynomial.legendre.leggauss(leg_points)): omega = leg_lambda * (1.0 - point) / (1.0 + point) lambda_scale = ((2.0 * leg_lambda) / (point + 1.0)**2) # Monomer A X_A = fdds_obj.form_unc_amplitude("A", omega) # Coupled A X_A_coupled = X_A.clone() XSW_A = core.triplet(X_A, metric_inv, W_A, False, False, False) amplitude_inv = metric.clone() amplitude_inv.axpy(1.0, XSW_A) nremoved = 0 amplitude = amplitude_inv.pseudoinverse(1.e-13, nremoved) amplitude.transpose_this() # Why is this coming out transposed? X_A_coupled.axpy( -1.0, core.triplet(XSW_A, amplitude, X_A, False, False, False)) del XSW_A, amplitude X_B = fdds_obj.form_unc_amplitude("B", omega) # print(np.linalg.norm(X_B)) # Coupled B X_B_coupled = X_B.clone() XSW_B = core.triplet(X_B, metric_inv, W_B, False, False, False) amplitude_inv = metric.clone() amplitude_inv.axpy(1.0, XSW_B) amplitude = amplitude_inv.pseudoinverse(1.e-13, nremoved) amplitude.transpose_this() # Why is this coming out transposed? X_B_coupled.axpy( -1.0, core.triplet(XSW_B, amplitude, X_B, False, False, False)) del XSW_B, amplitude # Make sure the results are symmetrized for tensor in [X_A, X_B, X_A_coupled, X_B_coupled]: tensor.add(tensor.transpose()) tensor.scale(0.5) # Combine tmp_uc = core.triplet(metric_inv, X_A, metric_inv, False, False, False) value_uc = tmp_uc.vector_dot(X_B) del tmp_uc tmp_c = core.triplet(metric_inv, X_A_coupled, metric_inv, False, False, False) value_c = tmp_c.vector_dot(X_B_coupled) del tmp_c # Tally total_uc += value_uc * weight * lambda_scale total_c += value_c * weight * lambda_scale if do_print: tmp_disp_unc = value_uc * weight * lambda_scale tmp_disp = value_c * weight * lambda_scale fdds_time = time.time() - start_time val_pack = (omega, weight, tmp_disp_unc, tmp_disp, fdds_time) core.print_out("% 12.3e % 12.3e % 14.3e % 14.3e %10d\n" % val_pack) # print("% 12.3e % 12.3e % 14.3e % 14.3e %10d" % val_pack) Disp20_uc = -1.0 / (2.0 * np.pi) * total_uc Disp20_c = -1.0 / (2.0 * np.pi) * total_c core.print_out("\n") core.print_out(print_sapt_var("Disp20,u", Disp20_uc, short=True) + "\n") core.print_out(print_sapt_var("Disp20", Disp20_c, short=True) + "\n") return {"Disp20,FDDS (unc)": Disp20_uc, "Disp20": Disp20_c}
def _so_to_mo(self, X): """Transform (C_occ)^T X C_vir""" return core.triplet(self.Co, X, self.Cv, True, False, False)
def mcscf_solver(ref_wfn): # Build CIWavefunction core.prepare_options_for_module("DETCI") ciwfn = core.CIWavefunction(ref_wfn) # Hush a lot of CI output ciwfn.set_print(0) # Begin with a normal two-step step_type = 'Initial CI' total_step = core.Matrix("Total step", ciwfn.get_dimension('OA'), ciwfn.get_dimension('AV')) start_orbs = ciwfn.get_orbitals("ROT").clone() ciwfn.set_orbitals("ROT", start_orbs) # Grab da options mcscf_orb_grad_conv = core.get_option("DETCI", "MCSCF_R_CONVERGENCE") mcscf_e_conv = core.get_option("DETCI", "MCSCF_E_CONVERGENCE") mcscf_max_macroiteration = core.get_option("DETCI", "MCSCF_MAXITER") mcscf_type = core.get_option("DETCI", "MCSCF_TYPE") mcscf_d_file = core.get_option("DETCI", "CI_FILE_START") + 3 mcscf_nroots = core.get_option("DETCI", "NUM_ROOTS") mcscf_wavefunction_type = core.get_option("DETCI", "WFN") mcscf_ndet = ciwfn.ndet() mcscf_nuclear_energy = ciwfn.molecule().nuclear_repulsion_energy() mcscf_steplimit = core.get_option("DETCI", "MCSCF_MAX_ROT") mcscf_rotate = core.get_option("DETCI", "MCSCF_ROTATE") # DIIS info mcscf_diis_start = core.get_option("DETCI", "MCSCF_DIIS_START") mcscf_diis_freq = core.get_option("DETCI", "MCSCF_DIIS_FREQ") mcscf_diis_error_type = core.get_option("DETCI", "MCSCF_DIIS_ERROR_TYPE") mcscf_diis_max_vecs = core.get_option("DETCI", "MCSCF_DIIS_MAX_VECS") # One-step info mcscf_target_conv_type = core.get_option("DETCI", "MCSCF_ALGORITHM") mcscf_so_start_grad = core.get_option("DETCI", "MCSCF_SO_START_GRAD") mcscf_so_start_e = core.get_option("DETCI", "MCSCF_SO_START_E") mcscf_current_step_type = 'Initial CI' # Start with SCF energy and other params scf_energy = ciwfn.variable("HF TOTAL ENERGY") eold = scf_energy norb_iter = 1 converged = False ah_step = False qc_step = False approx_integrals_only = True # Fake info to start with the initial diagonalization ediff = 1.e-4 orb_grad_rms = 1.e-3 # Grab needed objects diis_obj = solvers.DIIS(mcscf_diis_max_vecs) mcscf_obj = ciwfn.mcscf_object() # Execute the rotate command for rot in mcscf_rotate: if len(rot) != 4: raise p4util.PsiException("Each element of the MCSCF rotate command requires 4 arguements (irrep, orb1, orb2, theta).") irrep, orb1, orb2, theta = rot if irrep > ciwfn.Ca().nirrep(): raise p4util.PsiException("MCSCF_ROTATE: Expression %s irrep number is larger than the number of irreps" % (str(rot))) if max(orb1, orb2) > ciwfn.Ca().coldim()[irrep]: raise p4util.PsiException("MCSCF_ROTATE: Expression %s orbital number exceeds number of orbitals in irrep" % (str(rot))) theta = np.deg2rad(theta) x = ciwfn.Ca().nph[irrep][:, orb1].copy() y = ciwfn.Ca().nph[irrep][:, orb2].copy() xp = np.cos(theta) * x - np.sin(theta) * y yp = np.sin(theta) * x + np.cos(theta) * y ciwfn.Ca().nph[irrep][:, orb1] = xp ciwfn.Ca().nph[irrep][:, orb2] = yp # Limited RAS functionality if core.get_local_option("DETCI", "WFN") == "RASSCF" and mcscf_target_conv_type != "TS": core.print_out("\n Warning! Only the TS algorithm for RASSCF wavefunction is currently supported.\n") core.print_out(" Switching to the TS algorithm.\n\n") mcscf_target_conv_type = "TS" # Print out headers if mcscf_type == "CONV": mtype = " @MCSCF" core.print_out("\n ==> Starting MCSCF iterations <==\n\n") core.print_out(" Iter Total Energy Delta E Orb RMS CI RMS NCI NORB\n") elif mcscf_type == "DF": mtype = " @DF-MCSCF" core.print_out("\n ==> Starting DF-MCSCF iterations <==\n\n") core.print_out(" Iter Total Energy Delta E Orb RMS CI RMS NCI NORB\n") else: mtype = " @AO-MCSCF" core.print_out("\n ==> Starting AO-MCSCF iterations <==\n\n") core.print_out(" Iter Total Energy Delta E Orb RMS CI RMS NCI NORB\n") # Iterate ! for mcscf_iter in range(1, mcscf_max_macroiteration + 1): # Transform integrals, diagonalize H ciwfn.transform_mcscf_integrals(approx_integrals_only) nci_iter = ciwfn.diag_h(abs(ediff) * 1.e-2, orb_grad_rms * 1.e-3) # After the first diag we need to switch to READ ciwfn.set_ci_guess("DFILE") ciwfn.form_opdm() ciwfn.form_tpdm() ci_grad_rms = ciwfn.variable("DETCI AVG DVEC NORM") # Update MCSCF object Cocc = ciwfn.get_orbitals("DOCC") Cact = ciwfn.get_orbitals("ACT") Cvir = ciwfn.get_orbitals("VIR") opdm = ciwfn.get_opdm(-1, -1, "SUM", False) tpdm = ciwfn.get_tpdm("SUM", True) mcscf_obj.update(Cocc, Cact, Cvir, opdm, tpdm) current_energy = ciwfn.variable("MCSCF TOTAL ENERGY") orb_grad_rms = mcscf_obj.gradient_rms() ediff = current_energy - eold # Print iterations print_iteration(mtype, mcscf_iter, current_energy, ediff, orb_grad_rms, ci_grad_rms, nci_iter, norb_iter, mcscf_current_step_type) eold = current_energy if mcscf_current_step_type == 'Initial CI': mcscf_current_step_type = 'TS' # Check convergence if (orb_grad_rms < mcscf_orb_grad_conv) and (abs(ediff) < abs(mcscf_e_conv)) and\ (mcscf_iter > 3) and not qc_step: core.print_out("\n %s has converged!\n\n" % mtype); converged = True break # Which orbital convergence are we doing? if ah_step: converged, norb_iter, step = ah_iteration(mcscf_obj, print_micro=False) norb_iter += 1 if converged: mcscf_current_step_type = 'AH' else: core.print_out(" !Warning. Augmented Hessian did not converge. Taking an approx step.\n") step = mcscf_obj.approx_solve() mcscf_current_step_type = 'TS, AH failure' else: step = mcscf_obj.approx_solve() step_type = 'TS' maxstep = step.absmax() if maxstep > mcscf_steplimit: core.print_out(' Warning! Maxstep = %4.2f, scaling to %4.2f\n' % (maxstep, mcscf_steplimit)) step.scale(mcscf_steplimit / maxstep) xstep = total_step.clone() total_step.add(step) # Do or add DIIS if (mcscf_iter >= mcscf_diis_start) and ("TS" in mcscf_current_step_type): # Figure out DIIS error vector if mcscf_diis_error_type == "GRAD": error = core.triplet(ciwfn.get_orbitals("OA"), mcscf_obj.gradient(), ciwfn.get_orbitals("AV"), False, False, True) else: error = step diis_obj.add(total_step, error) if not (mcscf_iter % mcscf_diis_freq): total_step = diis_obj.extrapolate() mcscf_current_step_type = 'TS, DIIS' # Build the rotation by continuous updates if mcscf_iter == 1: totalU = mcscf_obj.form_rotation_matrix(total_step) else: xstep.axpy(-1.0, total_step) xstep.scale(-1.0) Ustep = mcscf_obj.form_rotation_matrix(xstep) totalU = core.doublet(totalU, Ustep, False, False) # Build the rotation directly (not recommended) # orbs_mat = mcscf_obj.Ck(start_orbs, total_step) # Finally rotate and set orbitals orbs_mat = core.doublet(start_orbs, totalU, False, False) ciwfn.set_orbitals("ROT", orbs_mat) # Figure out what the next step should be if (orb_grad_rms < mcscf_so_start_grad) and (abs(ediff) < abs(mcscf_so_start_e)) and\ (mcscf_iter >= 2): if mcscf_target_conv_type == 'AH': approx_integrals_only = False ah_step = True elif mcscf_target_conv_type == 'OS': approx_integrals_only = False mcscf_current_step_type = 'OS, Prep' break else: continue #raise p4util.PsiException("") # If we converged do not do onestep if converged or (mcscf_target_conv_type != 'OS'): one_step_iters = [] # If we are not converged load in Dvec and build iters array else: one_step_iters = range(mcscf_iter + 1, mcscf_max_macroiteration + 1) dvec = ciwfn.D_vector() dvec.init_io_files(True) dvec.read(0, 0) dvec.symnormalize(1.0, 0) ci_grad = ciwfn.new_civector(1, mcscf_d_file + 1, True, True) ci_grad.set_nvec(1) ci_grad.init_io_files(True) # Loop for onestep for mcscf_iter in one_step_iters: # Transform integrals and update the MCSCF object ciwfn.transform_mcscf_integrals(ciwfn.H(), False) ciwfn.form_opdm() ciwfn.form_tpdm() # Update MCSCF object Cocc = ciwfn.get_orbitals("DOCC") Cact = ciwfn.get_orbitals("ACT") Cvir = ciwfn.get_orbitals("VIR") opdm = ciwfn.get_opdm(-1, -1, "SUM", False) tpdm = ciwfn.get_tpdm("SUM", True) mcscf_obj.update(Cocc, Cact, Cvir, opdm, tpdm) orb_grad_rms = mcscf_obj.gradient_rms() # Warning! Does not work for SA-MCSCF current_energy = mcscf_obj.current_total_energy() current_energy += mcscf_nuclear_energy ciwfn.set_variable("CI ROOT %d TOTAL ENERGY" % 1, current_energy) ciwfn.set_variable("CURRENT ENERGY", current_energy) ciwfn.set_energy(current_energy) docc_energy = mcscf_obj.current_docc_energy() ci_energy = mcscf_obj.current_ci_energy() # Compute CI gradient ciwfn.sigma(dvec, ci_grad, 0, 0) ci_grad.scale(2.0, 0) ci_grad.axpy(-2.0 * ci_energy, dvec, 0, 0) ci_grad_rms = ci_grad.norm(0) orb_grad_rms = mcscf_obj.gradient().rms() ediff = current_energy - eold print_iteration(mtype, mcscf_iter, current_energy, ediff, orb_grad_rms, ci_grad_rms, nci_iter, norb_iter, mcscf_current_step_type) mcscf_current_step_type = 'OS' eold = current_energy if (orb_grad_rms < mcscf_orb_grad_conv) and (abs(ediff) < abs(mcscf_e_conv)): core.print_out("\n %s has converged!\n\n" % mtype); converged = True break # Take a step converged, norb_iter, nci_iter, step = qc_iteration(dvec, ci_grad, ciwfn, mcscf_obj) # Rotate integrals to new frame total_step.add(step) orbs_mat = mcscf_obj.Ck(ciwfn.get_orbitals("ROT"), step) ciwfn.set_orbitals("ROT", orbs_mat) core.print_out(mtype + " Final Energy: %20.15f\n" % current_energy) # Die if we did not converge if (not converged): if core.get_global_option("DIE_IF_NOT_CONVERGED"): raise p4util.PsiException("MCSCF: Iterations did not converge!") else: core.print_out("\nWarning! MCSCF iterations did not converge!\n\n") # Print out CI vector information if mcscf_target_conv_type == 'OS': dvec.close_io_files() ci_grad.close_io_files() # For orbital invariant methods we transform the orbitals to the natural or # semicanonical basis. Frozen doubly occupied and virtual orbitals are not # modified. if core.get_option("DETCI", "WFN") == "CASSCF": # Do we diagonalize the opdm? if core.get_option("DETCI", "NAT_ORBS"): ciwfn.ci_nat_orbs() else: ciwfn.semicanonical_orbs() # Retransform intragrals and update CI coeffs., OPDM, and TPDM ciwfn.transform_mcscf_integrals(approx_integrals_only) nci_iter = ciwfn.diag_h(abs(ediff) * 1.e-2, orb_grad_rms * 1.e-3) ciwfn.set_ci_guess("DFILE") ciwfn.form_opdm() ciwfn.form_tpdm() proc_util.print_ci_results(ciwfn, "MCSCF", scf_energy, current_energy, print_opdm_no=True) # Set final energy ciwfn.set_variable("CURRENT ENERGY", ciwfn.variable("MCSCF TOTAL ENERGY")) ciwfn.set_energy(ciwfn.variable("MCSCF TOTAL ENERGY")) # What do we need to cleanup? if core.get_option("DETCI", "MCSCF_CI_CLEANUP"): ciwfn.cleanup_ci() if core.get_option("DETCI", "MCSCF_DPD_CLEANUP"): ciwfn.cleanup_dpd() del diis_obj del mcscf_obj return ciwfn
def _so_to_mo(self, X): """Transform (C_occ)^T X C_vir""" return [ core.triplet(self.Co[i], X[i], self.Cv[i], True, False, False) for i in (0, 1) ]
def cpscf_linear_response(wfn, *args, **kwargs): """ Compute the static properties from a reference wavefunction. The currently implemented properties are - dipole polarizability - quadrupole polarizability Parameters ---------- wfn : psi4 wavefunction The reference wavefunction. args : list The list of arguments. For each argument, such as ``dipole polarizability``, will return the corresponding response. kwargs : dict Options that control how the response is computed. The following options are supported (with default values): - ``conv_tol``: 1e-5 - ``max_iter``: 10 - ``print_lvl``: 2 Returns ------- responses : list The list of response tensors. """ mints = core.MintsHelper(wfn.basisset()) # list of dictionaries to control response calculations complete_dict = [] for arg in args: # for each string keyword, append the appropriate dictionary (vide supra) to our list if not isinstance(arg, str): # TODO: better to raise TypeError? raise ValidationError("Property name must be of type string.") ret = property_dicts.get(arg) if ret: complete_dict.append(ret) else: raise ValidationError(f"Do not understand '{arg}'.") # vectors will be passed to the cphf solver, vector_names stores the corresponding names vectors = [] vector_names = [] restricted = wfn.same_a_b_orbs() # construct the list of vectors. for the keywords, fetch the appropriate tensors from MintsHelper for prop in complete_dict: tmp_vectors = prop['mints_function'](mints) for tmp in tmp_vectors: tmp.scale(-1.0) vectors.append(tmp) vector_names.append(tmp.name) # do we have any vectors to work with? if len(vectors) == 0: raise ValidationError('No vectors to work with. Aborting.') # print information on module, vectors that will be used _print_header(complete_dict) nbf = wfn.basisset().nbf() Co = [wfn.Ca_subset("AO", "OCC"), wfn.Cb_subset("AO", "OCC")] Cv = [wfn.Ca_subset("AO", "VIR"), wfn.Cb_subset("AO", "VIR")] vectors_transformed = [] for vector in vectors: if vector.shape != (nbf, nbf): raise ValidationError( f"Vector must be of shape ({nbf}, {nbf}) for transformation" " to the SO basis.") v_a = core.triplet(Co[0], vector, Cv[0], True, False, False) vectors_transformed.append(v_a) if not restricted: v_b = core.triplet(Co[1], vector, Cv[1], True, False, False) vectors_transformed.append(v_b) # compute response vectors for each input vector params = [ kwargs.pop("conv_tol", 1.e-5), kwargs.pop("max_iter", 10), kwargs.pop("print_lvl", 2) ] responses_list = wfn.cphf_solve(vectors_transformed, *params) # zip vectors, responses for easy access if restricted: vectors = { f"{k}_a": v for k, v in zip(vector_names, vectors_transformed) } responses = {f"{k}_a": v for k, v in zip(vector_names, responses_list)} else: vectors = { f"{k}_a": v for k, v in zip(vector_names, vectors_transformed[::2]) } vectors.update({ f"{k}_b": v for k, v in zip(vector_names, vectors_transformed[1::2]) }) responses = { f"{k}_a": v for k, v in zip(vector_names, responses_list[::2]) } responses.update( {f"{k}_b": v for k, v in zip(vector_names, responses_list[1::2])}) # compute response values, format output output = [] pref = -4.0 if restricted else -2.0 for prop in complete_dict: names = prop['vector names'] dim = len(names) buf = np.zeros((dim, dim)) for i, i_name in enumerate(names): for j, j_name in enumerate(names): buf[i, j] = pref * vectors[f"{i_name}_a"].vector_dot( responses[f"{j_name}_a"]) if not restricted: buf[i, j] += pref * vectors[f"{i_name}_b"].vector_dot( responses[f"{j_name}_b"]) output.append(buf) _print_output(complete_dict, output) return output
def df_fdds_dispersion(primary, auxiliary, cache, leg_points=10, leg_lambda=0.3, do_print=True): rho_thresh = core.get_option("SAPT", "SAPT_FDDS_V2_RHO_CUTOFF") if do_print: core.print_out("\n ==> E20 Dispersion (CHF FDDS) <== \n\n") core.print_out(" Legendre Points: % 10d\n" % leg_points) core.print_out(" Lambda Shift: % 10.3f\n" % leg_lambda) core.print_out(" Fxc Kernal: % 10s\n" % "ALDA") core.print_out(" (P|Fxc|Q) Thresh: % 8.3e\n" % rho_thresh) # Build object df_matrix_keys = ["Cocc_A", "Cvir_A", "Cocc_B", "Cvir_B"] fdds_matrix_cache = {key: cache[key] for key in df_matrix_keys} df_vector_keys = ["eps_occ_A", "eps_vir_A", "eps_occ_B", "eps_vir_B"] fdds_vector_cache = {key: cache[key] for key in df_vector_keys} fdds_obj = core.FDDS_Dispersion(primary, auxiliary, fdds_matrix_cache, fdds_vector_cache) # Aux Densities D = fdds_obj.project_densities([cache["D_A"], cache["D_B"]]) # Temps half_Saux = fdds_obj.aux_overlap().clone() half_Saux.power(-0.5, 1.e-12) halfp_Saux = fdds_obj.aux_overlap().clone() halfp_Saux.power(0.5, 1.e-12) # Builds potentials W_A = fdds_obj.metric().clone() W_A.axpy(1.0, _compute_fxc(D[0], half_Saux, halfp_Saux, rho_thresh=rho_thresh)) W_B = fdds_obj.metric().clone() W_B.axpy(1.0, _compute_fxc(D[1], half_Saux, halfp_Saux, rho_thresh=rho_thresh)) # Nuke the densities del D metric = fdds_obj.metric() metric_inv = fdds_obj.metric_inv() # Integrate core.print_out("\n => Time Integration <= \n\n") val_pack = ("Omega", "Weight", "Disp20,u", "Disp20", "time [s]") core.print_out("% 12s % 12s % 14s % 14s % 10s\n" % val_pack) # print("% 12s % 12s % 14s % 14s % 10s" % val_pack) start_time = time.time() total_uc = 0 total_c = 0 for point, weight in zip(*np.polynomial.legendre.leggauss(leg_points)): omega = leg_lambda * (1.0 - point) / (1.0 + point) lambda_scale = ((2.0 * leg_lambda) / (point + 1.0)**2) # Monomer A X_A = fdds_obj.form_unc_amplitude("A", omega) # Coupled A X_A_coupled = X_A.clone() XSW_A = core.triplet(X_A, metric_inv, W_A, False, False, False) amplitude_inv = metric.clone() amplitude_inv.axpy(1.0, XSW_A) nremoved = 0 amplitude = amplitude_inv.pseudoinverse(1.e-13, nremoved) amplitude.transpose_this() # Why is this coming out transposed? X_A_coupled.axpy(-1.0, core.triplet(XSW_A, amplitude, X_A, False, False, False)) del XSW_A, amplitude X_B = fdds_obj.form_unc_amplitude("B", omega) # print(np.linalg.norm(X_B)) # Coupled B X_B_coupled = X_B.clone() XSW_B = core.triplet(X_B, metric_inv, W_B, False, False, False) amplitude_inv = metric.clone() amplitude_inv.axpy(1.0, XSW_B) amplitude = amplitude_inv.pseudoinverse(1.e-13, nremoved) amplitude.transpose_this() # Why is this coming out transposed? X_B_coupled.axpy(-1.0, core.triplet(XSW_B, amplitude, X_B, False, False, False)) del XSW_B, amplitude # Make sure the results are symmetrized for tensor in [X_A, X_B, X_A_coupled, X_B_coupled]: tensor.add(tensor.transpose()) tensor.scale(0.5) # Combine tmp_uc = core.triplet(metric_inv, X_A, metric_inv, False, False, False) value_uc = tmp_uc.vector_dot(X_B) del tmp_uc tmp_c = core.triplet(metric_inv, X_A_coupled, metric_inv, False, False, False) value_c = tmp_c.vector_dot(X_B_coupled) del tmp_c # Tally total_uc += value_uc * weight * lambda_scale total_c += value_c * weight * lambda_scale if do_print: tmp_disp_unc = value_uc * weight * lambda_scale tmp_disp = value_c * weight * lambda_scale fdds_time = time.time() - start_time val_pack = (omega, weight, tmp_disp_unc, tmp_disp, fdds_time) core.print_out("% 12.3e % 12.3e % 14.3e % 14.3e %10d\n" % val_pack) # print("% 12.3e % 12.3e % 14.3e % 14.3e %10d" % val_pack) Disp20_uc = -1.0 / (2.0 * np.pi) * total_uc Disp20_c = -1.0 / (2.0 * np.pi) * total_c core.print_out("\n") core.print_out(print_sapt_var("Disp20,u", Disp20_uc, short=True) + "\n") core.print_out(print_sapt_var("Disp20", Disp20_c, short=True) + "\n") return {"Disp20,FDDS (unc)": Disp20_uc, "Disp20": Disp20_c}
def mcscf_solver(ref_wfn): # Build CIWavefunction core.prepare_options_for_module("DETCI") ciwfn = core.CIWavefunction(ref_wfn) ciwfn.set_module("detci") # Hush a lot of CI output ciwfn.set_print(0) # Begin with a normal two-step step_type = 'Initial CI' total_step = core.Matrix("Total step", ciwfn.get_dimension('OA'), ciwfn.get_dimension('AV')) start_orbs = ciwfn.get_orbitals("ROT").clone() ciwfn.set_orbitals("ROT", start_orbs) # Grab da options mcscf_orb_grad_conv = core.get_option("DETCI", "MCSCF_R_CONVERGENCE") mcscf_e_conv = core.get_option("DETCI", "MCSCF_E_CONVERGENCE") mcscf_max_macroiteration = core.get_option("DETCI", "MCSCF_MAXITER") mcscf_type = core.get_option("DETCI", "MCSCF_TYPE") mcscf_d_file = core.get_option("DETCI", "CI_FILE_START") + 3 mcscf_nroots = core.get_option("DETCI", "NUM_ROOTS") mcscf_wavefunction_type = core.get_option("DETCI", "WFN") mcscf_ndet = ciwfn.ndet() mcscf_nuclear_energy = ciwfn.molecule().nuclear_repulsion_energy() mcscf_steplimit = core.get_option("DETCI", "MCSCF_MAX_ROT") mcscf_rotate = core.get_option("DETCI", "MCSCF_ROTATE") # DIIS info mcscf_diis_start = core.get_option("DETCI", "MCSCF_DIIS_START") mcscf_diis_freq = core.get_option("DETCI", "MCSCF_DIIS_FREQ") mcscf_diis_error_type = core.get_option("DETCI", "MCSCF_DIIS_ERROR_TYPE") mcscf_diis_max_vecs = core.get_option("DETCI", "MCSCF_DIIS_MAX_VECS") # One-step info mcscf_target_conv_type = core.get_option("DETCI", "MCSCF_ALGORITHM") mcscf_so_start_grad = core.get_option("DETCI", "MCSCF_SO_START_GRAD") mcscf_so_start_e = core.get_option("DETCI", "MCSCF_SO_START_E") mcscf_current_step_type = 'Initial CI' # Start with SCF energy and other params scf_energy = ciwfn.variable("HF TOTAL ENERGY") eold = scf_energy norb_iter = 1 converged = False ah_step = False qc_step = False approx_integrals_only = True # Fake info to start with the initial diagonalization ediff = 1.e-4 orb_grad_rms = 1.e-3 # Grab needed objects diis_obj = solvers.DIIS(mcscf_diis_max_vecs) mcscf_obj = ciwfn.mcscf_object() # Execute the rotate command for rot in mcscf_rotate: if len(rot) != 4: raise p4util.PsiException( "Each element of the MCSCF rotate command requires 4 arguements (irrep, orb1, orb2, theta)." ) irrep, orb1, orb2, theta = rot if irrep > ciwfn.Ca().nirrep(): raise p4util.PsiException( "MCSCF_ROTATE: Expression %s irrep number is larger than the number of irreps" % (str(rot))) if max(orb1, orb2) > ciwfn.Ca().coldim()[irrep]: raise p4util.PsiException( "MCSCF_ROTATE: Expression %s orbital number exceeds number of orbitals in irrep" % (str(rot))) theta = np.deg2rad(theta) x = ciwfn.Ca().nph[irrep][:, orb1].copy() y = ciwfn.Ca().nph[irrep][:, orb2].copy() xp = np.cos(theta) * x - np.sin(theta) * y yp = np.sin(theta) * x + np.cos(theta) * y ciwfn.Ca().nph[irrep][:, orb1] = xp ciwfn.Ca().nph[irrep][:, orb2] = yp # Limited RAS functionality if core.get_local_option( "DETCI", "WFN") == "RASSCF" and mcscf_target_conv_type != "TS": core.print_out( "\n Warning! Only the TS algorithm for RASSCF wavefunction is currently supported.\n" ) core.print_out(" Switching to the TS algorithm.\n\n") mcscf_target_conv_type = "TS" # Print out headers if mcscf_type == "CONV": mtype = " @MCSCF" core.print_out("\n ==> Starting MCSCF iterations <==\n\n") core.print_out( " Iter Total Energy Delta E Orb RMS CI RMS NCI NORB\n" ) elif mcscf_type == "DF": mtype = " @DF-MCSCF" core.print_out("\n ==> Starting DF-MCSCF iterations <==\n\n") core.print_out( " Iter Total Energy Delta E Orb RMS CI RMS NCI NORB\n" ) else: mtype = " @AO-MCSCF" core.print_out("\n ==> Starting AO-MCSCF iterations <==\n\n") core.print_out( " Iter Total Energy Delta E Orb RMS CI RMS NCI NORB\n" ) # Iterate ! for mcscf_iter in range(1, mcscf_max_macroiteration + 1): # Transform integrals, diagonalize H ciwfn.transform_mcscf_integrals(approx_integrals_only) nci_iter = ciwfn.diag_h(abs(ediff) * 1.e-2, orb_grad_rms * 1.e-3) # After the first diag we need to switch to READ ciwfn.set_ci_guess("DFILE") ciwfn.form_opdm() ciwfn.form_tpdm() ci_grad_rms = ciwfn.variable("DETCI AVG DVEC NORM") # Update MCSCF object Cocc = ciwfn.get_orbitals("DOCC") Cact = ciwfn.get_orbitals("ACT") Cvir = ciwfn.get_orbitals("VIR") opdm = ciwfn.get_opdm(-1, -1, "SUM", False) tpdm = ciwfn.get_tpdm("SUM", True) mcscf_obj.update(Cocc, Cact, Cvir, opdm, tpdm) current_energy = ciwfn.variable("MCSCF TOTAL ENERGY") ciwfn.reset_ci_H0block() orb_grad_rms = mcscf_obj.gradient_rms() ediff = current_energy - eold # Print iterations print_iteration(mtype, mcscf_iter, current_energy, ediff, orb_grad_rms, ci_grad_rms, nci_iter, norb_iter, mcscf_current_step_type) eold = current_energy if mcscf_current_step_type == 'Initial CI': mcscf_current_step_type = 'TS' # Check convergence if (orb_grad_rms < mcscf_orb_grad_conv) and (abs(ediff) < abs(mcscf_e_conv)) and\ (mcscf_iter > 3) and not qc_step: core.print_out("\n %s has converged!\n\n" % mtype) converged = True break # Which orbital convergence are we doing? if ah_step: converged, norb_iter, step = ah_iteration(mcscf_obj, print_micro=False) norb_iter += 1 if converged: mcscf_current_step_type = 'AH' else: core.print_out( " !Warning. Augmented Hessian did not converge. Taking an approx step.\n" ) step = mcscf_obj.approx_solve() mcscf_current_step_type = 'TS, AH failure' else: step = mcscf_obj.approx_solve() step_type = 'TS' maxstep = step.absmax() if maxstep > mcscf_steplimit: core.print_out( ' Warning! Maxstep = %4.2f, scaling to %4.2f\n' % (maxstep, mcscf_steplimit)) step.scale(mcscf_steplimit / maxstep) xstep = total_step.clone() total_step.add(step) # Do or add DIIS if (mcscf_iter >= mcscf_diis_start) and ("TS" in mcscf_current_step_type): # Figure out DIIS error vector if mcscf_diis_error_type == "GRAD": error = core.triplet(ciwfn.get_orbitals("OA"), mcscf_obj.gradient(), ciwfn.get_orbitals("AV"), False, False, True) else: error = step diis_obj.add(total_step, error) if not (mcscf_iter % mcscf_diis_freq): total_step = diis_obj.extrapolate() mcscf_current_step_type = 'TS, DIIS' # Build the rotation by continuous updates if mcscf_iter == 1: totalU = mcscf_obj.form_rotation_matrix(total_step) else: xstep.axpy(-1.0, total_step) xstep.scale(-1.0) Ustep = mcscf_obj.form_rotation_matrix(xstep) totalU = core.doublet(totalU, Ustep, False, False) # Build the rotation directly (not recommended) # orbs_mat = mcscf_obj.Ck(start_orbs, total_step) # Finally rotate and set orbitals orbs_mat = core.doublet(start_orbs, totalU, False, False) ciwfn.set_orbitals("ROT", orbs_mat) # Figure out what the next step should be if (orb_grad_rms < mcscf_so_start_grad) and (abs(ediff) < abs(mcscf_so_start_e)) and\ (mcscf_iter >= 2): if mcscf_target_conv_type == 'AH': approx_integrals_only = False ah_step = True elif mcscf_target_conv_type == 'OS': approx_integrals_only = False mcscf_current_step_type = 'OS, Prep' break else: continue #raise p4util.PsiException("") # If we converged do not do onestep if converged or (mcscf_target_conv_type != 'OS'): one_step_iters = [] # If we are not converged load in Dvec and build iters array else: one_step_iters = range(mcscf_iter + 1, mcscf_max_macroiteration + 1) dvec = ciwfn.D_vector() dvec.init_io_files(True) dvec.read(0, 0) dvec.symnormalize(1.0, 0) ci_grad = ciwfn.new_civector(1, mcscf_d_file + 1, True, True) ci_grad.set_nvec(1) ci_grad.init_io_files(True) # Loop for onestep for mcscf_iter in one_step_iters: # Transform integrals and update the MCSCF object ciwfn.transform_mcscf_integrals(ciwfn.H(), False) ciwfn.form_opdm() ciwfn.form_tpdm() # Update MCSCF object Cocc = ciwfn.get_orbitals("DOCC") Cact = ciwfn.get_orbitals("ACT") Cvir = ciwfn.get_orbitals("VIR") opdm = ciwfn.get_opdm(-1, -1, "SUM", False) tpdm = ciwfn.get_tpdm("SUM", True) mcscf_obj.update(Cocc, Cact, Cvir, opdm, tpdm) orb_grad_rms = mcscf_obj.gradient_rms() # Warning! Does not work for SA-MCSCF current_energy = mcscf_obj.current_total_energy() current_energy += mcscf_nuclear_energy ciwfn.set_variable("CI ROOT %d TOTAL ENERGY" % 1, current_energy) ciwfn.set_variable("CURRENT ENERGY", current_energy) ciwfn.set_energy(current_energy) docc_energy = mcscf_obj.current_docc_energy() ci_energy = mcscf_obj.current_ci_energy() # Compute CI gradient ciwfn.sigma(dvec, ci_grad, 0, 0) ci_grad.scale(2.0, 0) ci_grad.axpy(-2.0 * ci_energy, dvec, 0, 0) ci_grad_rms = ci_grad.norm(0) orb_grad_rms = mcscf_obj.gradient().rms() ediff = current_energy - eold print_iteration(mtype, mcscf_iter, current_energy, ediff, orb_grad_rms, ci_grad_rms, nci_iter, norb_iter, mcscf_current_step_type) mcscf_current_step_type = 'OS' eold = current_energy if (orb_grad_rms < mcscf_orb_grad_conv) and (abs(ediff) < abs(mcscf_e_conv)): core.print_out("\n %s has converged!\n\n" % mtype) converged = True break # Take a step converged, norb_iter, nci_iter, step = qc_iteration( dvec, ci_grad, ciwfn, mcscf_obj) # Rotate integrals to new frame total_step.add(step) orbs_mat = mcscf_obj.Ck(ciwfn.get_orbitals("ROT"), step) ciwfn.set_orbitals("ROT", orbs_mat) core.print_out(mtype + " Final Energy: %20.15f\n" % current_energy) # Die if we did not converge if (not converged): if core.get_global_option("DIE_IF_NOT_CONVERGED"): raise p4util.PsiException("MCSCF: Iterations did not converge!") else: core.print_out("\nWarning! MCSCF iterations did not converge!\n\n") # Print out CI vector information if mcscf_target_conv_type == 'OS': dvec.close_io_files() ci_grad.close_io_files() # For orbital invariant methods we transform the orbitals to the natural or # semicanonical basis. Frozen doubly occupied and virtual orbitals are not # modified. if core.get_option("DETCI", "WFN") == "CASSCF": # Do we diagonalize the opdm? if core.get_option("DETCI", "NAT_ORBS"): ciwfn.ci_nat_orbs() else: ciwfn.semicanonical_orbs() # Retransform intragrals and update CI coeffs., OPDM, and TPDM ciwfn.transform_mcscf_integrals(approx_integrals_only) ciwfn.set_print(1) ciwfn.set_ci_guess("H0_BLOCK") nci_iter = ciwfn.diag_h(mcscf_e_conv, mcscf_e_conv**0.5) ciwfn.form_opdm() ciwfn.form_tpdm() proc_util.print_ci_results(ciwfn, "MCSCF", scf_energy, current_energy, print_opdm_no=True) # Set final energy ciwfn.set_variable("CURRENT ENERGY", ciwfn.variable("MCSCF TOTAL ENERGY")) ciwfn.set_energy(ciwfn.variable("MCSCF TOTAL ENERGY")) # What do we need to cleanup? if core.get_option("DETCI", "MCSCF_CI_CLEANUP"): ciwfn.cleanup_ci() if core.get_option("DETCI", "MCSCF_DPD_CLEANUP"): ciwfn.cleanup_dpd() del diis_obj del mcscf_obj return ciwfn