def new_vector(self, name=""): """Build a new object with shape symmetry like a trial vector """ return [ core.Matrix(name + 'a', self.occpi[0], self.virpi[0], self.G_trans), core.Matrix(name + 'b', self.occpi[1], self.virpi[1], self.G_trans) ]
def get_dot_product(self, i, j): key = frozenset([i, j]) try: return self.cached_dot_products[key] except KeyError: if self.storage_policy == StoragePolicy.InCore: Ri = self.stored_vectors[i][0] Rj = self.stored_vectors[j][0] dot_product = sum(Rix.vector_dot(Rjx) for Rix, Rjx in zip(Ri, Rj)) elif self.storage_policy == StoragePolicy.OnDisk: dot_product = 0 psio = core.IO.shared_object() for x, entry_dims in enumerate(self.R_template): if len(entry_dims) == 2: Rix = core.Matrix(self.get_name("R", i, x), *entry_dims) Rjx = core.Matrix(self.get_name("R", j, x), *entry_dims) Rix.load(psio, psif.PSIF_LIBDIIS, core.SaveType.SubBlocks) Rjx.load(psio, psif.PSIF_LIBDIIS, core.SaveType.SubBlocks) elif len(entry_dims) == 1: Rix = core.Vector(self.get_name("R", i, x), *entry_dims) Rjx = core.Vector(self.get_name("R", j, x), *entry_dims) Rix.load(psio, psif.PSIF_LIBDIIS) Rjx.load(psio, psif.PSIF_LIBDIIS) else: raise Exception("R_template may only have 1 or 2 dimensions. This is a bug: contact developers.") dot_product += Rix.vector_dot(Rjx) else: raise Exception(f"StoragePolicy {self.storage_policy} not recognized. This is a bug: contact developers.") self.cached_dot_products[key] = dot_product return dot_product
def _init_stack_C(self, calc, oldcalc_m1, oldcalc_m2): assert oldcalc_m1.V == 'm1' assert oldcalc_m2.V == 'm2' # print('Stacking monomer wfns', calc, oldcalc_m1, oldcalc_m2) m1_C_fn = self._fmt_mo_fn(oldcalc_m1) m2_C_fn = self._fmt_mo_fn(oldcalc_m2) m1_wfn = core.Wavefunction.from_file(m1_C_fn) m2_wfn = core.Wavefunction.from_file(m2_C_fn) m1_Ca_occ = m1_wfn.Ca_subset('SO', 'OCC') m1_Cb_occ = m1_wfn.Cb_subset('SO', 'OCC') m2_Ca_occ = m2_wfn.Ca_subset('SO', 'OCC') m2_Cb_occ = m2_wfn.Cb_subset('SO', 'OCC') m1_nso, m1_nalpha = m1_Ca_occ.shape m2_nso, m2_nalpha = m2_Ca_occ.shape m1_nbeta = m1_Cb_occ.shape[1] m2_nbeta = m2_Cb_occ.shape[1] d_nalpha = m1_nalpha + m2_nalpha d_nbeta = m1_nbeta + m2_nbeta assert m1_nso == m2_nso Ca_d = core.Matrix('Ca', (m1_nso), (m1_nso)) Cb_d = core.Matrix('Cb', (m1_nso), (m1_nso)) Ca_d.np[:, :m1_nalpha] = m1_Ca_occ.np[:, :] Ca_d.np[:, m1_nalpha:d_nalpha] = m2_Ca_occ.np[:, :] Cb_d.np[:, :m1_nbeta] = m1_Cb_occ.np[:, :] Cb_d.np[:, m1_nalpha:d_nbeta:] = m2_Cb_occ.np[:, :] assert m1_wfn.molecule().schoenflies_symbol() == m2_wfn.molecule( ).schoenflies_symbol() == 'c1' assert m1_wfn.name() == m2_wfn.name() assert m1_wfn.basisset().name() == m2_wfn.basisset().name() assert m1_wfn.basisset().has_puream() == m2_wfn.basisset().has_puream() wfn_new = m1_wfn.to_file() wfn_new['dimension']['nalphapi'] = (m1_nalpha + m2_nalpha, ) wfn_new['dimension']['nbetapi'] = (m1_nbeta + m2_nbeta, ) wfn_new['dimension']['doccpi'] = (m1_wfn.doccpi().to_tuple()[0] + m2_wfn.doccpi().to_tuple()[0], ) wfn_new['dimension']['soccpi'] = (m1_wfn.soccpi().to_tuple()[0] + m2_wfn.soccpi().to_tuple()[0], ) wfn_new['matrix']['Ca'] = Ca_d wfn_new['matrix']['Cb'] = Cb_d wfn_new = core.Wavefunction.from_file(wfn_new) m1_C_fn = self._fmt_mo_fn(calc) wfn_new.to_file(m1_C_fn) core.set_local_option('SCF', 'GUESS', 'READ')
def _init_stack_C(self, calc, oldcalc_m1, oldcalc_m2): assert oldcalc_m1.V == 'm1' assert oldcalc_m2.V == 'm2' # print('Stacking monomer wfns', calc, oldcalc_m1, oldcalc_m2) m1_C_fn = self._fmt_mo_fn(oldcalc_m1) m2_C_fn = self._fmt_mo_fn(oldcalc_m2) m1_data = np.load(m1_C_fn) m2_data = np.load(m2_C_fn) m1_Ca_occ = core.Matrix.np_read(m1_data, "Ca_occ") m1_Cb_occ = core.Matrix.np_read(m1_data, "Cb_occ") m2_Ca_occ = core.Matrix.np_read(m2_data, "Ca_occ") m2_Cb_occ = core.Matrix.np_read(m2_data, "Cb_occ") m1_nso, m1_nalpha = m1_Ca_occ.shape m2_nso, m2_nalpha = m2_Ca_occ.shape m1_nbeta = m1_Cb_occ.shape[1] m2_nbeta = m2_Cb_occ.shape[1] assert m1_nso == m2_nso d_Ca_occ = core.Matrix('Ca_occ', (m1_nso), (m1_nalpha + m2_nalpha)) d_Cb_occ = core.Matrix('Cb_occ', (m1_nso), (m1_nbeta + m2_nbeta)) d_Ca_occ.np[:, :m1_nalpha] = m1_Ca_occ.np[:, :] d_Ca_occ.np[:, -m2_nalpha:] = m2_Ca_occ.np[:, :] d_Cb_occ.np[:, :m1_nbeta] = m1_Cb_occ.np[:, :] d_Cb_occ.np[:, -m2_nbeta:] = m2_Cb_occ.np[:, :] assert m1_data['symmetry'] == m2_data['symmetry'] == 'c1' assert m1_data['reference'] == m2_data['reference'] assert m1_data['BasisSet'] == m2_data['BasisSet'] assert m1_data['BasisSet PUREAM'] == m2_data['BasisSet PUREAM'] data = { 'symmetry': m1_data['symmetry'], 'reference': m1_data['reference'], 'ndoccpi': m1_data['ndoccpi'] + m2_data['ndoccpi'], 'nsoccpi': m1_data['nsoccpi'] + m2_data['nsoccpi'], 'nalphapi': m1_data['nalphapi'] + m2_data['nalphapi'], 'nbetapi': m1_data['nbetapi'] + m2_data['nbetapi'], 'BasisSet': m1_data['BasisSet'], 'BasisSet PUREAM': m1_data['BasisSet PUREAM'], } data.update(d_Ca_occ.np_write(prefix='Ca_occ')) data.update(d_Cb_occ.np_write(prefix='Cb_occ')) m1_C_fn = self._fmt_mo_fn(calc) np.savez(m1_C_fn, **data) core.set_local_option('SCF', 'GUESS', 'READ')
def load_quantity(self, name, entry_num, item_num, force_new=True): """ Load quantity from wherever it's stored, constructing a new object if needed. """ template_object = self.template[name][item_num] if isinstance(template_object, float) or self.storage_policy == StoragePolicy.InCore: quantity = self.stored_vectors[entry_num][name][item_num] try: quantity = quantity.clone() except AttributeError: # The quantity must have been a float. No need to clone. pass elif self.storage_policy == StoragePolicy.OnDisk: entry_dims = template_object full_name = self.get_name(name, entry_num, item_num) psio = core.IO.shared_object() if len(entry_dims) == 2: quantity = core.Matrix(full_name, *entry_dims) quantity.load(psio, psif.PSIF_LIBDIIS, core.SaveType.SubBlocks) elif len(entry_dims) == 1: quantity = core.Vector(full_name, *entry_dims) quantity.load(psio, psif.PSIF_LIBDIIS) else: raise Exception( f"StoragePolicy {self.storage_policy} not recognized. This is a bug: contact developers." ) return quantity
def _np_read(self, filename, prefix=""): if isinstance(filename, np.lib.npyio.NpzFile): data = filename elif isinstance(filename, (str, unicode)): if not filename.endswith('.npz'): filename = filename + '.npz' data = np.load(filename) else: raise Exception("Filename not understood: %s" % filename) ret_data = [] if ((prefix + "Irreps") not in data.keys()) or ((prefix + "Name") not in data.keys()): raise KeyError("File %s does not appear to be a numpyz save" % filename) for h in range(data[prefix + "Irreps"]): ret_data.append(data[prefix + "IrrepData" + str(h)]) arr_type = self.__mro__[0] if arr_type == core.Matrix: dim1 = core.Dimension.from_list(data[prefix + "Dim1"]) dim2 = core.Dimension.from_list(data[prefix + "Dim2"]) ret = core.Matrix(str(data[prefix + "Name"]), dim1, dim2) elif arr_type == core.Vector: dim1 = core.Dimension.from_list(data[prefix + "Dim"]) ret = core.Vector(str(data[prefix + "Name"]), dim1) for h in range(data[prefix + "Irreps"]): ret.nph[h][:] = ret_data[h] return ret
def load_quantity(self, name, entry_num, item_num, force_new = True): """ Load quantity from wherever it's stored, constructing a new object if needed. """ template_object = self.template[name][item_num] if isinstance(template_object, float) or self.storage_policy == StoragePolicy.InCore: quantity = self.stored_vectors[entry_num][name][item_num] try: quantity = quantity.clone() except AttributeError: # The quantity must have been a float. No need to clone. pass elif self.storage_policy == StoragePolicy.OnDisk: full_name = self.get_name(name, entry_num, item_num) psio = core.IO.shared_object() if hasattr(template_object, "__len__"): # Looks like we have dimensions. if len(template_object) == 2: quantity = core.Matrix(full_name, *template_object) quantity.load(psio, psif.PSIF_LIBDIIS, core.SaveType.SubBlocks) elif len(template_object) == 1: quantity = core.Vector(full_name, *template_object) quantity.load(psio, psif.PSIF_LIBDIIS) elif which_import("ambit", return_bool=True): import ambit if template_object == ambit.BlockedTensor: quantity = ambit.BlockedTensor.load_and_build(f"libdiis.{full_name}") else: raise Exception(f"StoragePolicy {self.storage_policy} not recognized. This is a bug: contact developers.") return quantity
def copier(self, x, new_name: str): """ Copy the object x and give it a new_name. Save it to disk if needed. """ if isinstance(x, (core.Matrix, core.Vector)): copy = x.clone() elif isinstance(x, (core.dpdbuf4, core.dpdfile2)): copy = core.Matrix(x) elif isinstance(x, float): # Never cache a _number_. return x elif which_import("ambit", return_bool=True): import ambit if isinstance(x, ambit.BlockedTensor): copy = x.clone() else: raise TypeError("Unrecognized object type for DIIS.") else: raise TypeError("Unrecognized object type for DIIS.") copy.name = new_name if self.storage_policy == StoragePolicy.OnDisk: psio = core.IO.shared_object() if isinstance(copy, core.Vector): copy.save(psio, psif.PSIF_LIBDIIS) elif isinstance(copy, core.Matrix): copy.save(psio, psif.PSIF_LIBDIIS, core.SaveType.SubBlocks) elif isinstance(copy, ambit.BlockedTensor): filename = f"libdiis.{copy.name}" copy.save(filename) self.created_files.add(filename) else: raise TypeError("Unrecognized object type for DIIS. This shouldn't be possible.") copy = None return copy
def extrapolate(self): # Limit size of DIIS vector diis_count = len(self.vector) if diis_count == 0: raise Exception("DIIS: No previous vectors.") if diis_count == 1: return self.vector[0] if diis_count > self.max_vec: # Remove oldest vector del self.vector[0] del self.error[0] diis_count -= 1 # Build error matrix B B = np.empty((diis_count + 1, diis_count + 1)) B[-1, :] = 1 B[:, -1] = 1 B[-1, -1] = 0 for num1, e1 in enumerate(self.error): B[num1, num1] = e1.vector_dot(e1) for num2, e2 in enumerate(self.error): if num2 >= num1: continue val = e1.vector_dot(e2) B[num1, num2] = B[num2, num1] = val # Build residual vector resid = np.zeros(diis_count + 1) resid[-1] = 1 # Solve pulay equations # Yea, yea this is unstable make it stable iszero = np.any(np.diag(B)[:-1] <= 0.0) if iszero: S = np.ones((diis_count + 1)) else: S = np.ones((diis_count + 1)) S[:-1] = np.diag(B)[:-1] S = S**-0.5 S[-1] = 1 # Then we gotta do a custom inverse B *= S[:, None] * S invB = core.Matrix.from_array(B) invB.power(-1.0, 1.e-12) ci = np.dot(invB, resid) * S # combination of previous fock matrices V = core.Matrix("DIIS result", self.vector[0].rowdim(), self.vector[1].coldim()) for num, c in enumerate(ci[:-1]): V.axpy(c, self.vector[num]) return V
def _init_addghost_C(self, oldcalc, calc): # print('Adding ghost %s->%s' % (oldcalc, calc)) old_filename = self._fmt_mo_fn(oldcalc) data = np.load(old_filename) Ca_occ = core.Matrix.np_read(data, "Ca_occ") Cb_occ = core.Matrix.np_read(data, "Cb_occ") m1_nso = self.wfn_cache[('m1', 'm', oldcalc.Z)].nso() m2_nso = self.wfn_cache[('m2', 'm', oldcalc.Z)].nso() m1_nalpha = self.wfn_cache[('m1', 'm', oldcalc.Z)].nalpha() m2_nalpha = self.wfn_cache[('m2', 'm', oldcalc.Z)].nalpha() m1_nbeta = self.wfn_cache[('m1', 'm', oldcalc.Z)].nbeta() m2_nbeta = self.wfn_cache[('m2', 'm', oldcalc.Z)].nbeta() if calc.V == 'm1': Ca_occ_d = core.Matrix('Ca_occ', (m1_nso + m2_nso), m1_nalpha) Ca_occ_d.np[:m1_nso, :] = Ca_occ.np[:, :] Cb_occ_d = core.Matrix('Cb_occ', (m1_nso + m2_nso), m1_nbeta) Cb_occ_d.np[:m1_nso, :] = Cb_occ.np[:, :] elif calc.V == 'm2': Ca_occ_d = core.Matrix('Ca_occ', (m1_nso + m2_nso), m2_nalpha) Ca_occ_d.np[-m2_nso:, :] = Ca_occ.np[:, :] Cb_occ_d = core.Matrix('Cb_occ', (m1_nso + m2_nso), m2_nbeta) Cb_occ_d.np[-m2_nso:, :] = Cb_occ.np[:, :] data_dict = dict(data) data_dict.update(Ca_occ_d.np_write(prefix='Ca_occ')) data_dict.update(Cb_occ_d.np_write(prefix='Cb_occ')) psi_scratch = core.IOManager.shared_object().get_default_path() write_filename = os.path.join( psi_scratch, os.path.split( os.path.abspath(core.get_writer_file_prefix( self.fmt_ns(calc))))[1] + ".180.npz") np.savez(write_filename, **data_dict) extras.register_numpy_file(write_filename) core.set_local_option('SCF', 'GUESS', 'READ')
def orthogonalize(C, S): nbf, nocc = C.shape eigenvectors = core.Matrix(nocc, nocc) eigvals = core.Vector(nocc) sqrt_eigvals = core.Vector(nocc) CTSC = core.Matrix.triplet(C, S, C, True, False, False) CTSC.diagonalize(eigenvectors, eigvals, core.DiagonalizeOrder.Ascending) orthonormal = core.Matrix.doublet(C, eigenvectors, False, False) sqrt_eigvals.np[:] = np.sqrt(eigvals.np) orthonormal.np[:, :] /= sqrt_eigvals.np[np.newaxis, :] return orthonormal
def extrapolate(self, *args): dim = len(self.stored_vectors) + 1 B = np.zeros((dim, dim)) for i in range(len(self.stored_vectors)): for j in range(len(self.stored_vectors)): B[i, j] = self.get_dot_product(i, j) B[-1, :-1] = B[:-1, -1] = -1 rhs = np.zeros((dim)) rhs[-1] = -1 # Trick to improve numerical conditioning. # Instead of solving B c = r, we solve D B D^-1 D c = D r, using # D r = r. D is the diagonals ^ -1/2 matrix. # This improves the conditioning of the problem. diagonals = B.diagonal().copy() diagonals[-1] = 1 if np.all(diagonals > 0): diagonals = diagonals ** (- 0.5) B = np.einsum("i,ij,j -> ij", diagonals, B, diagonals) coeffs = np.linalg.lstsq(B, rhs, rcond=None)[0][:-1] * diagonals[:-1] else: coeffs = np.linalg.lstsq(B, rhs, rcond=None)[0][:-1] for j, Tj in enumerate(args): Tj.zero() if self.storage_policy == StoragePolicy.InCore: for ci, (_, Ti) in zip(coeffs, self.stored_vectors): axpy(Tj, ci, Ti[j]) elif self.storage_policy == StoragePolicy.OnDisk: for i, ci in enumerate(coeffs): psio = core.IO.shared_object() if isinstance(Tj, core.Vector): Tij = core.Vector(self.get_name("T", i, j), *self.T_template[j]) Tij.load(psio, psif.PSIF_LIBDIIS) elif isinstance(Tj, (core.Matrix, core.dpdfile2, core.dpdbuf4)): Tij = core.Matrix(self.get_name("T", i, j), *self.T_template[j]) Tij.load(psio, psif.PSIF_LIBDIIS, core.SaveType.SubBlocks) else: raise TypeError("Unrecognized object type for DIIS.") axpy(Tj, ci, Tij) else: raise Exception(f"StoragePolicy {self.storage_policy} not recognized. This is a bug: contact developers.") return True
def _compute_fxc(PQrho, half_Saux, halfp_Saux, rho_thresh=1.e-8): """ Computes the gridless (P|fxc|Q) ALDA tensor. """ naux = PQrho.shape[0] # Level it out PQrho_lvl = core.Matrix.triplet(half_Saux, PQrho, half_Saux, False, False, False) # Rotate into a diagonal basis rho = core.Vector("rho eigenvalues", naux) U = core.Matrix("rho eigenvectors", naux, naux) PQrho_lvl.diagonalize(U, rho, core.DiagonalizeOrder.Ascending) # "Gridless DFT" mask = rho.np < rho_thresh # Values too small cause singularities rho.np[mask] = rho_thresh dft_size = rho.shape[0] inp = {"RHO_A": rho} out = { "V": core.Vector(dft_size), "V_RHO_A": core.Vector(dft_size), "V_RHO_A_RHO_A": core.Vector(dft_size) } func_x = core.LibXCFunctional('XC_LDA_X', True) func_x.compute_functional(inp, out, dft_size, 2) func_c = core.LibXCFunctional('XC_LDA_C_VWN', True) func_c.compute_functional(inp, out, dft_size, 2) out["V_RHO_A_RHO_A"].np[mask] = 0 # Rotate back Ul = U.clone() Ul.np[:] *= out["V_RHO_A_RHO_A"].np tmp = core.Matrix.doublet(Ul, U, False, True) # Undo the leveling return core.Matrix.triplet(halfp_Saux, tmp, halfp_Saux, False, False, False)
def copier(self, x, new_name): if isinstance(x, (core.Matrix, core.Vector)): copy = x.clone() elif isinstance(x, (core.dpdbuf4, core.dpdfile2)): copy = core.Matrix(x) else: raise TypeError("Unrecognized object type for DIIS.") copy.name = new_name if self.storage_policy == StoragePolicy.OnDisk: psio = core.IO.shared_object() if isinstance(x, core.Vector): copy.save(psio, psif.PSIF_LIBDIIS) else: copy.save(psio, psif.PSIF_LIBDIIS, core.SaveType.SubBlocks) copy = None return copy
def __call__(self, mol1_wfn, mol2_wfn): nbf = self.p.dimer_basis.nbf() nocc = mol1_wfn.nalpha() + mol2_wfn.nalpha() # Take the occupied orbitals from the two HF monomer wavefunctions # and pack them (block diagonal) into the dimer basis set. m1_OCC = mol1_wfn.Ca_subset('SO', 'OCC') m2_OCC = mol2_wfn.Ca_subset('SO', 'OCC') C = core.Matrix(nbf, nocc) C.np[:mol1_wfn.nso(), :mol1_wfn.nalpha()] = m1_OCC.np[:, :] C.np[-mol2_wfn.nso():, -mol2_wfn.nalpha():] = m2_OCC.np[:, :] C = orthogonalize(C, self.p.dimer_S) # At this point, it should be the case that # C.T * S * C == I np.testing.assert_array_almost_equal( core.Matrix.triplet(C, self.p.dimer_S, C, True, False, False), np.eye(nocc)) self.jk.C_clear() self.jk.C_left_add(C) self.jk.compute() J = self.jk.J()[0] K = self.jk.K()[0] D = self.jk.D()[0] # 2T + 2V + 2J - K FH = J.clone() FH.zero() FH.axpy(2, self.p.dimer_T) FH.axpy(2, self.p.dimer_V) FH.axpy(2, J) FH.axpy(-1, K) energy = FH.vector_dot( D) + self.p.dimer_basis.molecule().nuclear_repulsion_energy() hl = energy - (mol1_wfn.energy() + mol2_wfn.energy()) return hl
def copier(self, x, new_name: str): """ Copy the object x and give it a new_name. Save it to disk if needed. """ if isinstance(x, (core.Matrix, core.Vector)): copy = x.clone() elif isinstance(x, (core.dpdbuf4, core.dpdfile2)): copy = core.Matrix(x) elif isinstance(x, float): # Never cache a _number_. return x else: raise TypeError("Unrecognized object type for DIIS.") copy.name = new_name if self.storage_policy == StoragePolicy.OnDisk: psio = core.IO.shared_object() if isinstance(x, core.Vector): copy.save(psio, psif.PSIF_LIBDIIS) else: copy.save(psio, psif.PSIF_LIBDIIS, core.SaveType.SubBlocks) copy = None return copy
def fcidump(wfn, fname='INTDUMP', oe_ints=None): """Save integrals to file in FCIDUMP format as defined in Comp. Phys. Commun. 54 75 (1989) Additional one-electron integrals, including orbital energies, can also be saved. This latter format can be used with the HANDE QMC code but is not standard. :returns: None :raises: ValidationError when SCF wavefunction is not RHF :type wfn: :py:class:`~psi4.core.Wavefunction` :param wfn: set of molecule, basis, orbitals from which to generate cube files :param fname: name of the integrals file, defaults to INTDUMP :param oe_ints: list of additional one-electron integrals to save to file. So far only EIGENVALUES is a valid option. :examples: >>> # [1] Save one- and two-electron integrals to standard FCIDUMP format >>> E, wfn = energy('scf', return_wfn=True) >>> fcidump(wfn) >>> # [2] Save orbital energies, one- and two-electron integrals. >>> E, wfn = energy('scf', return_wfn=True) >>> fcidump(wfn, oe_ints=['EIGENVALUES']) """ # Get some options reference = core.get_option('SCF', 'REFERENCE') ints_tolerance = core.get_global_option('INTS_TOLERANCE') # Some sanity checks if reference not in ['RHF', 'UHF']: raise ValidationError( 'FCIDUMP not implemented for {} references\n'.format(reference)) if oe_ints is None: oe_ints = [] molecule = wfn.molecule() docc = wfn.doccpi() frzcpi = wfn.frzcpi() frzvpi = wfn.frzvpi() active_docc = docc - frzcpi active_socc = wfn.soccpi() active_mopi = wfn.nmopi() - frzcpi - frzvpi nbf = active_mopi.sum() if wfn.same_a_b_orbs() else 2 * active_mopi.sum() nirrep = wfn.nirrep() nelectron = 2 * active_docc.sum() + active_socc.sum() irrep_map = _irrep_map(wfn) wfn_irrep = 0 for h, n_socc in enumerate(active_socc): if n_socc % 2 == 1: wfn_irrep ^= h core.print_out('Writing integrals in FCIDUMP format to ' + fname + '\n') # Generate FCIDUMP header header = '&FCI\n' header += 'NORB={:d},\n'.format(nbf) header += 'NELEC={:d},\n'.format(nelectron) header += 'MS2={:d},\n'.format(wfn.nalpha() - wfn.nbeta()) header += 'UHF=.{}.,\n'.format(not wfn.same_a_b_orbs()).upper() orbsym = '' for h in range(active_mopi.n()): for n in range(frzcpi[h], frzcpi[h] + active_mopi[h]): orbsym += '{:d},'.format(irrep_map[h]) if not wfn.same_a_b_orbs(): orbsym += '{:d},'.format(irrep_map[h]) header += 'ORBSYM={}\n'.format(orbsym) header += 'ISYM={:d},\n'.format(irrep_map[wfn_irrep]) header += '&END\n' with open(fname, 'w') as intdump: intdump.write(header) # Get an IntegralTransform object check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), wfn) spaces = [core.MOSpace.all()] trans_type = core.IntegralTransform.TransformationType.Restricted if not wfn.same_a_b_orbs(): trans_type = core.IntegralTransform.TransformationType.Unrestricted ints = core.IntegralTransform(wfn, spaces, trans_type) ints.transform_tei(core.MOSpace.all(), core.MOSpace.all(), core.MOSpace.all(), core.MOSpace.all()) core.print_out('Integral transformation complete!\n') DPD_info = { 'instance_id': ints.get_dpd_id(), 'alpha_MO': ints.DPD_ID('[A>=A]+'), 'beta_MO': 0 } if not wfn.same_a_b_orbs(): DPD_info['beta_MO'] = ints.DPD_ID("[a>=a]+") # Write TEI to fname in FCIDUMP format core.fcidump_tei_helper(nirrep, wfn.same_a_b_orbs(), DPD_info, ints_tolerance, fname) # Read-in OEI and write them to fname in FCIDUMP format # Indexing functions to translate from zero-based (C and Python) to # one-based (Fortran) mo_idx = lambda x: x + 1 alpha_mo_idx = lambda x: 2 * x + 1 beta_mo_idx = lambda x: 2 * (x + 1) with open(fname, 'a') as intdump: core.print_out('Writing frozen core operator in FCIDUMP format to ' + fname + '\n') if reference == 'RHF': PSIF_MO_FZC = 'MO-basis Frozen-Core Operator' moH = core.Matrix(PSIF_MO_FZC, wfn.nmopi(), wfn.nmopi()) moH.load(core.IO.shared_object(), psif.PSIF_OEI) mo_slice = core.Slice(frzcpi, active_mopi) MO_FZC = moH.get_block(mo_slice, mo_slice) offset = 0 for h, block in enumerate(MO_FZC.nph): il = np.tril_indices(block.shape[0]) for index, x in np.ndenumerate(block[il]): row = mo_idx(il[0][index] + offset) col = mo_idx(il[1][index] + offset) if (abs(x) > ints_tolerance): intdump.write('{:29.20E}{:4d}{:4d}{:4d}{:4d}\n'.format( x, row, col, 0, 0)) offset += block.shape[0] # Additional one-electron integrals as requested in oe_ints # Orbital energies core.print_out('Writing orbital energies in FCIDUMP format to ' + fname + '\n') if 'EIGENVALUES' in oe_ints: eigs_dump = write_eigenvalues( wfn.epsilon_a().get_block(mo_slice).to_array(), mo_idx) intdump.write(eigs_dump) else: PSIF_MO_A_FZC = 'MO-basis Alpha Frozen-Core Oper' moH_A = core.Matrix(PSIF_MO_A_FZC, wfn.nmopi(), wfn.nmopi()) moH_A.load(core.IO.shared_object(), psif.PSIF_OEI) mo_slice = core.Slice(frzcpi, active_mopi) MO_FZC_A = moH_A.get_block(mo_slice, mo_slice) offset = 0 for h, block in enumerate(MO_FZC_A.nph): il = np.tril_indices(block.shape[0]) for index, x in np.ndenumerate(block[il]): row = alpha_mo_idx(il[0][index] + offset) col = alpha_mo_idx(il[1][index] + offset) if (abs(x) > ints_tolerance): intdump.write('{:29.20E}{:4d}{:4d}{:4d}{:4d}\n'.format( x, row, col, 0, 0)) offset += block.shape[0] PSIF_MO_B_FZC = 'MO-basis Beta Frozen-Core Oper' moH_B = core.Matrix(PSIF_MO_B_FZC, wfn.nmopi(), wfn.nmopi()) moH_B.load(core.IO.shared_object(), psif.PSIF_OEI) mo_slice = core.Slice(frzcpi, active_mopi) MO_FZC_B = moH_B.get_block(mo_slice, mo_slice) offset = 0 for h, block in enumerate(MO_FZC_B.nph): il = np.tril_indices(block.shape[0]) for index, x in np.ndenumerate(block[il]): row = beta_mo_idx(il[0][index] + offset) col = beta_mo_idx(il[1][index] + offset) if (abs(x) > ints_tolerance): intdump.write('{:29.20E}{:4d}{:4d}{:4d}{:4d}\n'.format( x, row, col, 0, 0)) offset += block.shape[0] # Additional one-electron integrals as requested in oe_ints # Orbital energies core.print_out('Writing orbital energies in FCIDUMP format to ' + fname + '\n') if 'EIGENVALUES' in oe_ints: alpha_eigs_dump = write_eigenvalues( wfn.epsilon_a().get_block(mo_slice).to_array(), alpha_mo_idx) beta_eigs_dump = write_eigenvalues( wfn.epsilon_b().get_block(mo_slice).to_array(), beta_mo_idx) intdump.write(alpha_eigs_dump + beta_eigs_dump) # Dipole integrals #core.print_out('Writing dipole moment OEI in FCIDUMP format to ' + fname + '\n') # Traceless quadrupole integrals #core.print_out('Writing traceless quadrupole moment OEI in FCIDUMP format to ' + fname + '\n') # Frozen core + nuclear repulsion energy core.print_out( 'Writing frozen core + nuclear repulsion energy in FCIDUMP format to ' + fname + '\n') e_fzc = ints.get_frozen_core_energy() e_nuc = molecule.nuclear_repulsion_energy( wfn.get_dipole_field_strength()) intdump.write('{:29.20E}{:4d}{:4d}{:4d}{:4d}\n'.format( e_fzc + e_nuc, 0, 0, 0, 0)) core.print_out( 'Done generating {} with integrals in FCIDUMP format.\n'.format(fname))
def extrapolate(self, out=None): """ Extrapolates next state vector from the current set of state and error vectors. Parameters ---------- out : :py:class:`~psi4.core.Matrix`, optional A array in which to place the next state vector. Returns ------- ret : :py:class:`~psi4.core.Matrix` Returns the next state vector. """ # Limit size of DIIS vector diis_count = len(self.state) if diis_count == 0: raise ValidationError("DIIS: No previous vectors.") if diis_count == 1: return self.state[0] if diis_count > self.max_vec: if self.removal_policy == "OLDEST": pos = 0 else: pos = np.argmax([x.rms() for x in self.error]) del self.state[pos] del self.error[pos] diis_count -= 1 # Build error matrix B B = np.empty((diis_count + 1, diis_count + 1)) B[-1, :] = 1 B[:, -1] = 1 B[-1, -1] = 0 for num1, e1 in enumerate(self.error): B[num1, num1] = e1.vector_dot(e1) for num2, e2 in enumerate(self.error): if num2 >= num1: continue val = e1.vector_dot(e2) B[num1, num2] = B[num2, num1] = val # Build residual vector resid = np.zeros(diis_count + 1) resid[-1] = 1 # Solve pulay equations # Yea, yea this is unstable make it stable iszero = np.any(np.diag(B)[:-1] <= 0.0) if iszero: S = np.ones((diis_count + 1)) else: S = np.diag(B).copy() S[:-1] **= -0.5 S[-1] = 1 # Then we gotta do a custom inverse B *= S[:, None] * S invB = core.Matrix.from_array(B) invB.power(-1.0, 1.e-12) ci = np.dot(invB, resid) ci *= S # combination of previous fock matrices if out is None: out = core.Matrix("DIIS result", self.state[0].rowdim(), self.state[1].coldim()) else: out.zero() for num, c in enumerate(ci[:-1]): out.axpy(c, self.state[num]) return out
def run_gcp(self, func=None, dertype=None, verbose=False): # dashlvl=None, dashparam=None """Function to call Grimme's dftd3 program (http://toc.uni-muenster.de/DFTD3/) to compute the -D correction of level *dashlvl* using parameters for the functional *func*. The dictionary *dashparam* can be used to supply a full set of dispersion parameters in the absense of *func* or to supply individual overrides in the presence of *func*. Returns energy if *dertype* is 0, gradient if *dertype* is 1, else tuple of energy and gradient if *dertype* unspecified. The dftd3 executable must be independently compiled and found in :envvar:`PATH` or :envvar:`PSIPATH`. *self* may be either a qcdb.Molecule (sensibly) or a psi4.Molecule (works b/c psi4.Molecule has been extended by this method py-side and only public interface fns used) or a string that can be instantiated into a qcdb.Molecule. """ # Create (if necessary) and update qcdb.Molecule if isinstance(self, Molecule): # called on a qcdb.Molecule pass elif isinstance(self, core.Molecule): # called on a python export of a psi4.core.Molecule (py-side through Psi4's driver) self.create_psi4_string_from_molecule() elif isinstance(self, basestring): # called on a string representation of a psi4.Molecule (c-side through psi4.Dispersion) self = Molecule(self) else: raise ValidationError( """Argument mol must be psi4string or qcdb.Molecule""") self.update_geometry() # # Validate arguments # dashlvl = dashlvl.lower() # dashlvl = dash_alias['-' + dashlvl][1:] if ('-' + dashlvl) in dash_alias.keys() else dashlvl # if dashlvl not in dashcoeff.keys(): # raise ValidationError("""-D correction level %s is not available. Choose among %s.""" % (dashlvl, dashcoeff.keys())) if dertype is None: dertype = -1 elif der0th.match(str(dertype)): dertype = 0 elif der1st.match(str(dertype)): dertype = 1 # elif der2nd.match(str(dertype)): # raise ValidationError('Requested derivative level \'dertype\' %s not valid for run_dftd3.' % (dertype)) else: raise ValidationError( 'Requested derivative level \'dertype\' %s not valid for run_dftd3.' % (dertype)) # if func is None: # if dashparam is None: # # defunct case # raise ValidationError("""Parameters for -D correction missing. Provide a func or a dashparam kwarg.""") # else: # # case where all param read from dashparam dict (which must have all correct keys) # func = 'custom' # dashcoeff[dashlvl][func] = {} # dashparam = dict((k.lower(), v) for k, v in dashparam.iteritems()) # for key in dashcoeff[dashlvl]['b3lyp'].keys(): # if key in dashparam.keys(): # dashcoeff[dashlvl][func][key] = dashparam[key] # else: # raise ValidationError("""Parameter %s is missing from dashparam dict %s.""" % (key, dashparam)) # else: # func = func.lower() # if func not in dashcoeff[dashlvl].keys(): # raise ValidationError("""Functional %s is not available for -D level %s.""" % (func, dashlvl)) # if dashparam is None: # # (normal) case where all param taken from dashcoeff above # pass # else: # # case where items in dashparam dict can override param taken from dashcoeff above # dashparam = dict((k.lower(), v) for k, v in dashparam.iteritems()) # for key in dashcoeff[dashlvl]['b3lyp'].keys(): # if key in dashparam.keys(): # dashcoeff[dashlvl][func][key] = dashparam[key] # TODO temp until figure out paramfile allowed_funcs = [ 'HF/MINIS', 'DFT/MINIS', 'HF/MINIX', 'DFT/MINIX', 'HF/SV', 'DFT/SV', 'HF/def2-SV(P)', 'DFT/def2-SV(P)', 'HF/def2-SVP', 'DFT/def2-SVP', 'HF/DZP', 'DFT/DZP', 'HF/def-TZVP', 'DFT/def-TZVP', 'HF/def2-TZVP', 'DFT/def2-TZVP', 'HF/631Gd', 'DFT/631Gd', 'HF/def2-TZVP', 'DFT/def2-TZVP', 'HF/cc-pVDZ', 'DFT/cc-pVDZ', 'HF/aug-cc-pVDZ', 'DFT/aug-cc-pVDZ', 'DFT/SV(P/h,c)', 'DFT/LANL', 'DFT/pobTZVP', 'TPSS/def2-SVP', 'PW6B95/def2-SVP', # specials 'hf3c', 'pbeh3c' ] allowed_funcs = [f.lower() for f in allowed_funcs] if func.lower() not in allowed_funcs: raise Dftd3Error("""bad gCP func: %s. need one of: %r""" % (func, allowed_funcs)) # Move ~/.dftd3par.<hostname> out of the way so it won't interfere defaultfile = os.path.expanduser( '~') + '/.dftd3par.' + socket.gethostname() defmoved = False if os.path.isfile(defaultfile): os.rename(defaultfile, defaultfile + '_hide') defmoved = True # Find environment by merging PSIPATH and PATH environment variables lenv = { 'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \ ':' + os.environ.get('PATH'), 'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH') } # Filter out None values as subprocess will fault on them lenv = {k: v for k, v in lenv.items() if v is not None} # Find out if running from Psi4 for scratch details and such try: import psi4 except ImportError as err: isP4regime = False else: isP4regime = True # Setup unique scratch directory and move in current_directory = os.getcwd() if isP4regime: psioh = core.IOManager.shared_object() psio = core.IO.shared_object() os.chdir(psioh.get_default_path()) gcp_tmpdir = 'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \ '.gcp.' + str(uuid.uuid4())[:8] else: gcp_tmpdir = os.environ['HOME'] + os.sep + 'gcp_' + str( uuid.uuid4())[:8] if os.path.exists(gcp_tmpdir) is False: os.mkdir(gcp_tmpdir) os.chdir(gcp_tmpdir) # Write gcp_parameters file that governs cp correction # paramcontents = gcp_server(func, dashlvl, 'dftd3') # paramfile1 = 'dftd3_parameters' # older patched name # with open(paramfile1, 'w') as handle: # handle.write(paramcontents) # paramfile2 = '.gcppar' # with open(paramfile2, 'w') as handle: # handle.write(paramcontents) ###Two kinds of parameter files can be read in: A short and an extended version. Both are read from ###$HOME/.gcppar.$HOSTNAME by default. If the option -local is specified the file is read in from ###the current working directory: .gcppar ###The short version reads in: basis-keywo # Write dftd3_geometry file that supplies geometry to dispersion calc numAtoms = self.natom() geom = self.save_string_xyz() reals = [] for line in geom.splitlines(): lline = line.split() if len(lline) != 4: continue if lline[0] == 'Gh': numAtoms -= 1 else: reals.append(line) geomtext = str(numAtoms) + '\n\n' for line in reals: geomtext += line.strip() + '\n' geomfile = './gcp_geometry.xyz' with open(geomfile, 'w') as handle: handle.write(geomtext) # TODO somehow the variations on save_string_xyz and # whether natom and chgmult does or doesn't get written # have gotten all tangled. I fear this doesn't work # the same btwn libmints and qcdb or for ghosts # Call gcp program command = ['gcp', geomfile] command.extend(['-level', func]) if dertype != 0: command.append('-grad') try: #print('command', command) dashout = subprocess.Popen(command, stdout=subprocess.PIPE, env=lenv) except OSError as e: raise ValidationError('Program gcp not found in path. %s' % e) out, err = dashout.communicate() # Parse output success = False for line in out.splitlines(): line = line.decode('utf-8') if re.match(' Egcp:', line): sline = line.split() dashd = float(sline[1]) if re.match(' normal termination of gCP', line): success = True if not success: os.chdir(current_directory) raise Dftd3Error("""Unsuccessful gCP run.""") # Parse grad output if dertype != 0: derivfile = './gcp_gradient' dfile = open(derivfile, 'r') dashdderiv = [] for line in geom.splitlines(): lline = line.split() if len(lline) != 4: continue if lline[0] == 'Gh': dashdderiv.append([0.0, 0.0, 0.0]) else: dashdderiv.append([ float(x.replace('D', 'E')) for x in dfile.readline().split() ]) dfile.close() if len(dashdderiv) != self.natom(): raise ValidationError('Program gcp gradient file has %d atoms- %d expected.' % \ (len(dashdderiv), self.natom())) # Prepare results for Psi4 if isP4regime and dertype != 0: core.set_variable('GCP CORRECTION ENERGY', dashd) psi_dashdderiv = core.Matrix(self.natom(), 3) psi_dashdderiv.set(dashdderiv) # Print program output to file if verbose if not verbose and isP4regime: verbose = True if core.get_option('SCF', 'PRINT') >= 3 else False if verbose: text = '\n ==> GCP Output <==\n' text += out.decode('utf-8') if dertype != 0: with open(derivfile, 'r') as handle: text += handle.read().replace('D', 'E') text += '\n' if isP4regime: core.print_out(text) else: print(text) # # Clean up files and remove scratch directory # os.unlink(paramfile1) # os.unlink(paramfile2) # os.unlink(geomfile) # if dertype != 0: # os.unlink(derivfile) # if defmoved is True: # os.rename(defaultfile + '_hide', defaultfile) os.chdir('..') # try: # shutil.rmtree(dftd3_tmpdir) # except OSError as e: # ValidationError('Unable to remove dftd3 temporary directory %s' % e) os.chdir(current_directory) # return -D & d(-D)/dx if dertype == -1: return dashd, dashdderiv elif dertype == 0: return dashd elif dertype == 1: return psi_dashdderiv
def new_vector(self, name=""): """Obtain a blank matrix object with the correct symmetry""" return core.Matrix(name, self.occpi, self.virpi, self.G_trans)
def mcscf_solver(ref_wfn): # Build CIWavefunction core.prepare_options_for_module("DETCI") ciwfn = core.CIWavefunction(ref_wfn) # Hush a lot of CI output ciwfn.set_print(0) # Begin with a normal two-step step_type = 'Initial CI' total_step = core.Matrix("Total step", ciwfn.get_dimension('OA'), ciwfn.get_dimension('AV')) start_orbs = ciwfn.get_orbitals("ROT").clone() ciwfn.set_orbitals("ROT", start_orbs) # Grab da options mcscf_orb_grad_conv = core.get_option("DETCI", "MCSCF_R_CONVERGENCE") mcscf_e_conv = core.get_option("DETCI", "MCSCF_E_CONVERGENCE") mcscf_max_macroiteration = core.get_option("DETCI", "MCSCF_MAXITER") mcscf_type = core.get_option("DETCI", "MCSCF_TYPE") mcscf_d_file = core.get_option("DETCI", "CI_FILE_START") + 3 mcscf_nroots = core.get_option("DETCI", "NUM_ROOTS") mcscf_wavefunction_type = core.get_option("DETCI", "WFN") mcscf_ndet = ciwfn.ndet() mcscf_nuclear_energy = ciwfn.molecule().nuclear_repulsion_energy() mcscf_steplimit = core.get_option("DETCI", "MCSCF_MAX_ROT") mcscf_rotate = core.get_option("DETCI", "MCSCF_ROTATE") # DIIS info mcscf_diis_start = core.get_option("DETCI", "MCSCF_DIIS_START") mcscf_diis_freq = core.get_option("DETCI", "MCSCF_DIIS_FREQ") mcscf_diis_error_type = core.get_option("DETCI", "MCSCF_DIIS_ERROR_TYPE") mcscf_diis_max_vecs = core.get_option("DETCI", "MCSCF_DIIS_MAX_VECS") # One-step info mcscf_target_conv_type = core.get_option("DETCI", "MCSCF_ALGORITHM") mcscf_so_start_grad = core.get_option("DETCI", "MCSCF_SO_START_GRAD") mcscf_so_start_e = core.get_option("DETCI", "MCSCF_SO_START_E") mcscf_current_step_type = 'Initial CI' # Start with SCF energy and other params scf_energy = ciwfn.variable("HF TOTAL ENERGY") eold = scf_energy norb_iter = 1 converged = False ah_step = False qc_step = False approx_integrals_only = True # Fake info to start with the initial diagonalization ediff = 1.e-4 orb_grad_rms = 1.e-3 # Grab needed objects diis_obj = solvers.DIIS(mcscf_diis_max_vecs) mcscf_obj = ciwfn.mcscf_object() # Execute the rotate command for rot in mcscf_rotate: if len(rot) != 4: raise p4util.PsiException( "Each element of the MCSCF rotate command requires 4 arguements (irrep, orb1, orb2, theta)." ) irrep, orb1, orb2, theta = rot if irrep > ciwfn.Ca().nirrep(): raise p4util.PsiException( "MCSCF_ROTATE: Expression %s irrep number is larger than the number of irreps" % (str(rot))) if max(orb1, orb2) > ciwfn.Ca().coldim()[irrep]: raise p4util.PsiException( "MCSCF_ROTATE: Expression %s orbital number exceeds number of orbitals in irrep" % (str(rot))) theta = np.deg2rad(theta) x = ciwfn.Ca().nph[irrep][:, orb1].copy() y = ciwfn.Ca().nph[irrep][:, orb2].copy() xp = np.cos(theta) * x - np.sin(theta) * y yp = np.sin(theta) * x + np.cos(theta) * y ciwfn.Ca().nph[irrep][:, orb1] = xp ciwfn.Ca().nph[irrep][:, orb2] = yp # Limited RAS functionality if core.get_local_option( "DETCI", "WFN") == "RASSCF" and mcscf_target_conv_type != "TS": core.print_out( "\n Warning! Only the TS algorithm for RASSCF wavefunction is currently supported.\n" ) core.print_out(" Switching to the TS algorithm.\n\n") mcscf_target_conv_type = "TS" # Print out headers if mcscf_type == "CONV": mtype = " @MCSCF" core.print_out("\n ==> Starting MCSCF iterations <==\n\n") core.print_out( " Iter Total Energy Delta E Orb RMS CI RMS NCI NORB\n" ) elif mcscf_type == "DF": mtype = " @DF-MCSCF" core.print_out("\n ==> Starting DF-MCSCF iterations <==\n\n") core.print_out( " Iter Total Energy Delta E Orb RMS CI RMS NCI NORB\n" ) else: mtype = " @AO-MCSCF" core.print_out("\n ==> Starting AO-MCSCF iterations <==\n\n") core.print_out( " Iter Total Energy Delta E Orb RMS CI RMS NCI NORB\n" ) # Iterate ! for mcscf_iter in range(1, mcscf_max_macroiteration + 1): # Transform integrals, diagonalize H ciwfn.transform_mcscf_integrals(approx_integrals_only) nci_iter = ciwfn.diag_h(abs(ediff) * 1.e-2, orb_grad_rms * 1.e-3) # After the first diag we need to switch to READ ciwfn.set_ci_guess("DFILE") ciwfn.form_opdm() ciwfn.form_tpdm() ci_grad_rms = core.variable("DETCI AVG DVEC NORM") # Update MCSCF object Cocc = ciwfn.get_orbitals("DOCC") Cact = ciwfn.get_orbitals("ACT") Cvir = ciwfn.get_orbitals("VIR") opdm = ciwfn.get_opdm(-1, -1, "SUM", False) tpdm = ciwfn.get_tpdm("SUM", True) mcscf_obj.update(Cocc, Cact, Cvir, opdm, tpdm) current_energy = core.variable("MCSCF TOTAL ENERGY") orb_grad_rms = mcscf_obj.gradient_rms() ediff = current_energy - eold # Print iterations print_iteration(mtype, mcscf_iter, current_energy, ediff, orb_grad_rms, ci_grad_rms, nci_iter, norb_iter, mcscf_current_step_type) eold = current_energy if mcscf_current_step_type == 'Initial CI': mcscf_current_step_type = 'TS' # Check convergence if (orb_grad_rms < mcscf_orb_grad_conv) and (abs(ediff) < abs(mcscf_e_conv)) and\ (mcscf_iter > 3) and not qc_step: core.print_out("\n %s has converged!\n\n" % mtype) converged = True break # Which orbital convergence are we doing? if ah_step: converged, norb_iter, step = ah_iteration(mcscf_obj, print_micro=False) norb_iter += 1 if converged: mcscf_current_step_type = 'AH' else: core.print_out( " !Warning. Augmented Hessian did not converge. Taking an approx step.\n" ) step = mcscf_obj.approx_solve() mcscf_current_step_type = 'TS, AH failure' else: step = mcscf_obj.approx_solve() step_type = 'TS' maxstep = step.absmax() if maxstep > mcscf_steplimit: core.print_out( ' Warning! Maxstep = %4.2f, scaling to %4.2f\n' % (maxstep, mcscf_steplimit)) step.scale(mcscf_steplimit / maxstep) xstep = total_step.clone() total_step.add(step) # Do or add DIIS if (mcscf_iter >= mcscf_diis_start) and ("TS" in mcscf_current_step_type): # Figure out DIIS error vector if mcscf_diis_error_type == "GRAD": error = core.Matrix.triplet(ciwfn.get_orbitals("OA"), mcscf_obj.gradient(), ciwfn.get_orbitals("AV"), False, False, True) else: error = step diis_obj.add(total_step, error) if not (mcscf_iter % mcscf_diis_freq): total_step = diis_obj.extrapolate() mcscf_current_step_type = 'TS, DIIS' # Build the rotation by continuous updates if mcscf_iter == 1: totalU = mcscf_obj.form_rotation_matrix(total_step) else: xstep.axpy(-1.0, total_step) xstep.scale(-1.0) Ustep = mcscf_obj.form_rotation_matrix(xstep) totalU = core.Matrix.doublet(totalU, Ustep, False, False) # Build the rotation directly (not recommended) # orbs_mat = mcscf_obj.Ck(start_orbs, total_step) # Finally rotate and set orbitals orbs_mat = core.Matrix.doublet(start_orbs, totalU, False, False) ciwfn.set_orbitals("ROT", orbs_mat) # Figure out what the next step should be if (orb_grad_rms < mcscf_so_start_grad) and (abs(ediff) < abs(mcscf_so_start_e)) and\ (mcscf_iter >= 2): if mcscf_target_conv_type == 'AH': approx_integrals_only = False ah_step = True elif mcscf_target_conv_type == 'OS': approx_integrals_only = False mcscf_current_step_type = 'OS, Prep' break else: continue #raise p4util.PsiException("") # If we converged do not do onestep if converged or (mcscf_target_conv_type != 'OS'): one_step_iters = [] # If we are not converged load in Dvec and build iters array else: one_step_iters = range(mcscf_iter + 1, mcscf_max_macroiteration + 1) dvec = ciwfn.D_vector() dvec.init_io_files(True) dvec.read(0, 0) dvec.symnormalize(1.0, 0) ci_grad = ciwfn.new_civector(1, mcscf_d_file + 1, True, True) ci_grad.set_nvec(1) ci_grad.init_io_files(True) # Loop for onestep for mcscf_iter in one_step_iters: # Transform integrals and update the MCSCF object ciwfn.transform_mcscf_integrals(ciwfn.H(), False) ciwfn.form_opdm() ciwfn.form_tpdm() # Update MCSCF object Cocc = ciwfn.get_orbitals("DOCC") Cact = ciwfn.get_orbitals("ACT") Cvir = ciwfn.get_orbitals("VIR") opdm = ciwfn.get_opdm(-1, -1, "SUM", False) tpdm = ciwfn.get_tpdm("SUM", True) mcscf_obj.update(Cocc, Cact, Cvir, opdm, tpdm) orb_grad_rms = mcscf_obj.gradient_rms() # Warning! Does not work for SA-MCSCF current_energy = mcscf_obj.current_total_energy() current_energy += mcscf_nuclear_energy core.set_variable("CI ROOT %d TOTAL ENERGY" % 1, current_energy) core.set_variable("CURRENT ENERGY", current_energy) docc_energy = mcscf_obj.current_docc_energy() ci_energy = mcscf_obj.current_ci_energy() # Compute CI gradient ciwfn.sigma(dvec, ci_grad, 0, 0) ci_grad.scale(2.0, 0) ci_grad.axpy(-2.0 * ci_energy, dvec, 0, 0) ci_grad_rms = ci_grad.norm(0) orb_grad_rms = mcscf_obj.gradient().rms() ediff = current_energy - eold print_iteration(mtype, mcscf_iter, current_energy, ediff, orb_grad_rms, ci_grad_rms, nci_iter, norb_iter, mcscf_current_step_type) mcscf_current_step_type = 'OS' eold = current_energy if (orb_grad_rms < mcscf_orb_grad_conv) and (abs(ediff) < abs(mcscf_e_conv)): core.print_out("\n %s has converged!\n\n" % mtype) converged = True break # Take a step converged, norb_iter, nci_iter, step = qc_iteration( dvec, ci_grad, ciwfn, mcscf_obj) # Rotate integrals to new frame total_step.add(step) orbs_mat = mcscf_obj.Ck(ciwfn.get_orbitals("ROT"), step) ciwfn.set_orbitals("ROT", orbs_mat) core.print_out(mtype + " Final Energy: %20.15f\n" % current_energy) # Die if we did not converge if (not converged): if core.get_global_option("DIE_IF_NOT_CONVERGED"): raise p4util.PsiException("MCSCF: Iterations did not converge!") else: core.print_out("\nWarning! MCSCF iterations did not converge!\n\n") # Print out CI vector information if mcscf_target_conv_type == 'OS': dvec.close_io_files() ci_grad.close_io_files() # For orbital invariant methods we transform the orbitals to the natural or # semicanonical basis. Frozen doubly occupied and virtual orbitals are not # modified. if core.get_option("DETCI", "WFN") == "CASSCF": # Do we diagonalize the opdm? if core.get_option("DETCI", "NAT_ORBS"): ciwfn.ci_nat_orbs() else: ciwfn.semicanonical_orbs() # Retransform intragrals and update CI coeffs., OPDM, and TPDM ciwfn.transform_mcscf_integrals(approx_integrals_only) nci_iter = ciwfn.diag_h(abs(ediff) * 1.e-2, orb_grad_rms * 1.e-3) ciwfn.set_ci_guess("DFILE") ciwfn.form_opdm() ciwfn.form_tpdm() proc_util.print_ci_results(ciwfn, "MCSCF", scf_energy, current_energy, print_opdm_no=True) # Set final energy core.set_variable("CURRENT ENERGY", core.variable("MCSCF TOTAL ENERGY")) # What do we need to cleanup? if core.get_option("DETCI", "MCSCF_CI_CLEANUP"): ciwfn.cleanup_ci() if core.get_option("DETCI", "MCSCF_DPD_CLEANUP"): ciwfn.cleanup_dpd() del diis_obj del mcscf_obj return ciwfn
def _write_molden( self: core.Wavefunction, filename: Optional[str] = None, do_virtual: Optional[bool] = None, use_natural: bool = False, ): """Writes wavefunction information in *wfn* to *filename* in molden format. Will write natural orbitals from *density* (MO basis) if supplied. Warning! most post-SCF wavefunctions do not build the density as this is often much more costly than the energy. In addition, the wavefunction density attributes (Da and Db) return the SO density and must be transformed to the MO basis to use with this function. .. versionadded:: 0.5 *wfn* parameter passed explicitly :returns: None :type filename: :param filename: Destination file name for MOLDEN file. If unspecified (None), a file name will be generated from the molecule name. :type do_virtual: :param do_virtual: Do write all the MOs to the MOLDEN file (True) or discard the unoccupied MOs (False). Not valid for NO's. If unspecified (None), value taken from :term:`MOLDEN_WITH_VIRTUAL <MOLDEN_WITH_VIRTUAL (GLOBALS)>`. :type use_natural: :param use_natural: Write natural orbitals determined from density on wavefunction. :examples: 1. Molden file with the Kohn-Sham orbitals of a DFT calculation. >>> E, wfn = energy('b3lyp', return_wfn=True) >>> wfn.molden('mycalc.molden') 2. Molden file with the natural orbitals of a CCSD computation. For correlated methods, an energy call will not compute the density. "properties" or "gradient" must be called. >>> E, wfn = properties('ccsd', return_wfn=True) >>> wfn.molden('ccsd_no.molden', use_natural=True) 3. To supply a custom density matrix, manually set the Da and Db of the wavefunction. This is used, for example, to write natural orbitals coming from a root computed by a ``CIWavefunction`` computation, e.g., ``detci``, ``fci``, ``casscf``. The first two arguments of :py:meth:`~psi4.core.CIWavefunction.get_opdm` can be set to ``n, n`` where n => 0 selects the root to write out, provided these roots were computed, see :term:`NUM_ROOTS <NUM_ROOTS (DETCI)>`. The third argument controls the spin (``"A"``, ``"B"`` or ``"SUM"``) and the final boolean option determines whether inactive orbitals are included. >>> E, wfn = energy('detci', return_wfn=True) >>> wfn.Da() = wfn.get_opdm(0, 0, "A", True) >>> wfn.Db() = wfn.get_opdm(0, 0, "B", True) >>> molden(wfn, 'no_root1.molden', use_natural=True) """ if filename is None: filename = core.get_writer_file_prefix( self.molecule().name()) + ".molden" if do_virtual is None: do_virtual = bool(core.get_option("SCF", "MOLDEN_WITH_VIRTUAL")) basisset = self.basisset() mol = self.molecule() # Header and geometry (Atom, Atom #, Z, x, y, z) mol_string = '[Molden Format]\n[Atoms] (AU)\n' for atom in range(mol.natom()): mol_string += f"{mol.symbol(atom):2s} {atom+1:2d} {int(mol.Z(atom)):3d} {mol.x(atom):20.10f} {mol.y(atom):20.10f} {mol.z(atom):20.10f}\n" # Dump basis set mol_string += '[GTO]\n' for atom in range(mol.natom()): mol_string += f" {atom+1:d} 0\n" for rel_shell_idx in range(basisset.nshell_on_center(atom)): abs_shell_idx = basisset.shell_on_center(atom, rel_shell_idx) shell = basisset.shell(abs_shell_idx) mol_string += f" {shell.amchar:s}{shell.nprimitive:5d} 1.00\n" for prim in range(shell.nprimitive): mol_string += f"{shell.exp(prim):20.10f} {shell.original_coef(prim):20.10f}\n" mol_string += '\n' # if use_natural: # Alphas nmopi = self.nmopi() #MO_Da = core.Matrix("MO Alpha Density Matrix", nmopi, nmopi) #MO_Da.transform(self.Da(), self.Ca().transpose()) MO_Da = self.Da_subset("MO") #MO_Da.transform(self.Da(), self.Ca()) NO_Ra = core.Matrix("NO Alpha Rotation Matrix", nmopi, nmopi) occupation_a = core.Vector(nmopi) MO_Da.diagonalize(NO_Ra, occupation_a, core.DiagonalizeOrder.Descending) Ca = core.doublet(self.Ca(), NO_Ra, False, False) epsilon_a = occupation_a # Betas #MO_Db = core.Matrix("MO Beta Density Matrix", nmopi, nmopi) #MO_Db.transform(self.Db(), self.Cb().transpose()) MO_Db = self.Db_subset("MO") NO_Rb = core.Matrix("NO Beta Rotation Matrix", nmopi, nmopi) occupation_b = core.Vector(nmopi) MO_Db.diagonalize(NO_Rb, occupation_b, core.DiagonalizeOrder.Descending) Cb = core.doublet(self.Cb(), NO_Rb, False, False) epsilon_b = occupation_b else: Ca = self.Ca() Cb = self.Cb() occupation_a = self.occupation_a() occupation_b = self.occupation_b() epsilon_a = self.epsilon_a() epsilon_b = self.epsilon_b() # Convert C matrices to AO MO basis. Ca_subset costs information about which symmetry an orbital originally had, which is why we can't use it. aotoso = self.aotoso() Ca_ao_mo = core.doublet(aotoso, Ca, False, False).nph Cb_ao_mo = core.doublet(aotoso, Cb, False, False).nph ao_overlap = self.mintshelper().ao_overlap().np # Convert from Psi4 internal normalization to the unit normalization expected by Molden ao_normalizer = ao_overlap.diagonal()**(-1 / 2) Ca_ao_mo = core.Matrix.from_array([(i.T / ao_normalizer).T for i in Ca_ao_mo]) Cb_ao_mo = core.Matrix.from_array([(i.T / ao_normalizer).T for i in Cb_ao_mo]) # Reorder AO x MO matrix to fit Molden conventions ''' Reordering expected by Molden P: x, y, z 5D: D 0, D+1, D-1, D+2, D-2 6D: xx, yy, zz, xy, xz, yz 7F: F 0, F+1, F-1, F+2, F-2, F+3, F-3 10F: xxx, yyy, zzz, xyy, xxy, xxz, xzz, yzz, yyz, xyz 9G: G 0, G+1, G-1, G+2, G-2, G+3, G-3, G+4, G-4 15G: xxxx, yyyy, zzzz, xxxy, xxxz, yyyz, zzzx, zzzy, xxyy, xxzz, yyzz, xxyz, yyxz, zzxy Molden does not handle angular momenta higher than G ''' molden_cartesian_order = [ [2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # p [0, 3, 4, 1, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0], # d [0, 4, 5, 3, 9, 6, 1, 8, 7, 2, 0, 0, 0, 0, 0], # f [0, 3, 4, 9, 12, 10, 5, 13, 14, 7, 1, 6, 11, 8, 2] # g ] nirrep = self.nirrep() count = 0 # Keeps track of count for reordering temp_a = Ca_ao_mo.clone() # Placeholders for original AO x MO matrices temp_b = Cb_ao_mo.clone() for i in range(basisset.nshell()): am = basisset.shell(i).am if (am == 1 and basisset.has_puream()) or ( am > 1 and am < 5 and basisset.shell(i).is_cartesian()): for j in range(basisset.shell(i).nfunction): for h in range(nirrep): for k in range(Ca_ao_mo.coldim()[h]): Ca_ao_mo.set(h, count + molden_cartesian_order[am - 1][j], k, temp_a.get(h, count + j, k)) Cb_ao_mo.set(h, count + molden_cartesian_order[am - 1][j], k, temp_b.get(h, count + j, k)) count += basisset.shell(i).nfunction # Dump MO information if basisset.has_puream(): # For historical reasons, D and F can go on the same line, but setting D without F implicitly sets F. G must be on its own. mol_string += '[5D7F]\n[9G]\n\n' ct = mol.point_group().char_table() mol_string += '[MO]\n' mo_dim = self.nmopi() if do_virtual else (self.doccpi() + self.soccpi()) # Alphas. If Alphas and Betas are the same, then only Alphas with double occupation will be written (see line marked "***") mos = [] for h in range(nirrep): for n in range(mo_dim[h]): mos.append((epsilon_a.get(h, n), (h, n))) # Sort mos based on energy def mosSort(element): return element[0] mos.sort(key=mosSort) for i in range(len(mos)): h, n = mos[i][1] mol_string += f" Sym= {ct.gamma(h).symbol():s}\n Ene= {epsilon_a.get(h, n):24.10e}\n Spin= Alpha\n" if self.same_a_b_orbs() and self.epsilon_a() == self.epsilon_b( ) and self.same_a_b_dens(): mol_string += f" Occup= {occupation_a.get(h, n) + occupation_b.get(h, n):24.10e}\n" else: mol_string += f" Occup= {occupation_a.get(h, n):24.10e}\n" for so in range(self.nso()): mol_string += f"{so+1:3d} {Ca_ao_mo.get(h, so, n):24.10e}\n" # Betas mos = [] if not self.same_a_b_orbs( ) or self.epsilon_a() != self.epsilon_b() or not self.same_a_b_dens(): for h in range(nirrep): for n in range(mo_dim[h]): mos.append((self.epsilon_b().get(h, n), (h, n))) mos.sort(key=mosSort) for i in range(len(mos)): h, n = mos[i][1] mol_string += f" Sym= {ct.gamma(h).symbol():s}\n Ene= {epsilon_b.get(h, n):24.10e}\n Spin= Beta\n " \ f"Occup= {occupation_b.get(h, n):24.10e}\n" for so in range(self.nso()): mol_string += f"{so+1:3d} {Cb_ao_mo.get(h, so, n):24.10e}\n" # Write Molden string to file with open(filename, 'w') as fn: fn.write(mol_string)
def run_dftd3(self, func=None, dashlvl=None, dashparam=None, dertype=None, verbose=False): """Function to call Grimme's dftd3 program (http://toc.uni-muenster.de/DFTD3/) to compute the -D correction of level *dashlvl* using parameters for the functional *func*. The dictionary *dashparam* can be used to supply a full set of dispersion parameters in the absense of *func* or to supply individual overrides in the presence of *func*. Returns energy if *dertype* is 0, gradient if *dertype* is 1, else tuple of energy and gradient if *dertype* unspecified. The dftd3 executable must be independently compiled and found in :envvar:`PATH` or :envvar:`PSIPATH`. *self* may be either a qcdb.Molecule (sensibly) or a psi4.Molecule (works b/c psi4.Molecule has been extended by this method py-side and only public interface fns used) or a string that can be instantiated into a qcdb.Molecule. func - functional alias or None dashlvl - functional type d2gr/d3zero/d3bj/d3mzero/d3mbj dashparam - dictionary dertype = derivative level """ # Create (if necessary) and update qcdb.Molecule if isinstance(self, Molecule): # called on a qcdb.Molecule pass elif isinstance(self, core.Molecule): # called on a python export of a psi4.Molecule (py-side through Psi4's driver) self.create_psi4_string_from_molecule() elif isinstance(self, basestring): # called on a string representation of a psi4.Molecule (c-side through psi4.Dispersion) self = Molecule(self) else: raise ValidationError( """Argument mol must be psi4string or qcdb.Molecule""") self.update_geometry() # Validate arguments if dertype is None: dertype = -1 elif der0th.match(str(dertype)): dertype = 0 elif der1st.match(str(dertype)): dertype = 1 elif der2nd.match(str(dertype)): raise ValidationError( 'Requested derivative level \'dertype\' %s not valid for run_dftd3.' % (dertype)) else: raise ValidationError( 'Requested derivative level \'dertype\' %s not valid for run_dftd3.' % (dertype)) if dashlvl is not None: dashlvl = dashlvl.lower() dashlvl = dash_alias['-' + dashlvl][1:] if ( '-' + dashlvl) in dash_alias.keys() else dashlvl if dashlvl not in dashcoeff.keys(): raise ValidationError( """-D correction level %s is not available. Choose among %s.""" % (dashlvl, dashcoeff.keys())) else: raise ValidationError("""Must specify a dashlvl""") if func is not None: dftd3_params = dash_server(func, dashlvl) else: dftd3_params = {} if dashparam is not None: dftd3_params.update(dashparam) # Move ~/.dftd3par.<hostname> out of the way so it won't interfere defaultfile = os.path.expanduser( '~') + '/.dftd3par.' + socket.gethostname() defmoved = False if os.path.isfile(defaultfile): os.rename(defaultfile, defaultfile + '_hide') defmoved = True # Find environment by merging PSIPATH and PATH environment variables lenv = { 'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \ ':' + os.environ.get('PATH'), 'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH') } # Filter out None values as subprocess will fault on them lenv = {k: v for k, v in lenv.items() if v is not None} # Find out if running from Psi4 for scratch details and such # try: # import psi4 # except ImportError as err: # isP4regime = False # else: # isP4regime = True # Setup unique scratch directory and move in current_directory = os.getcwd() if isP4regime: psioh = core.IOManager.shared_object() psio = core.IO.shared_object() os.chdir(psioh.get_default_path()) dftd3_tmpdir = 'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \ '.dftd3.' + str(uuid.uuid4())[:8] else: dftd3_tmpdir = os.environ['HOME'] + os.sep + 'dftd3_' + str( uuid.uuid4())[:8] if os.path.exists(dftd3_tmpdir) is False: os.mkdir(dftd3_tmpdir) os.chdir(dftd3_tmpdir) # Write dftd3_parameters file that governs dispersion calc paramcontents = dftd3_coeff_formatter(dashlvl, dftd3_params) paramfile1 = 'dftd3_parameters' # older patched name with open(paramfile1, 'w') as handle: handle.write(paramcontents) paramfile2 = '.dftd3par.local' # new mainline name with open(paramfile2, 'w') as handle: handle.write(paramcontents) # Write dftd3_geometry file that supplies geometry to dispersion calc numAtoms = self.natom() # We seem to have a problem with one atom, force the correct result if numAtoms == 1: dashd = 0.0 dashdderiv = core.Matrix(1, 3) if dertype == -1: return dashd, dashdderiv elif dertype == 0: return dashd elif dertype == 1: return dashdderiv geom = self.save_string_xyz() reals = [] for line in geom.splitlines(): lline = line.split() if len(lline) != 4: continue if lline[0] == 'Gh': numAtoms -= 1 else: reals.append(line) geomtext = str(numAtoms) + '\n\n' for line in reals: geomtext += line.strip() + '\n' geomfile = './dftd3_geometry.xyz' with open(geomfile, 'w') as handle: handle.write(geomtext) # TODO somehow the variations on save_string_xyz and # whether natom and chgmult does or doesn't get written # have gotten all tangled. I fear this doesn't work # the same btwn libmints and qcdb or for ghosts # Call dftd3 program command = ['dftd3', geomfile] if dertype != 0: command.append('-grad') try: dashout = subprocess.Popen(command, stdout=subprocess.PIPE, env=lenv) except OSError as e: raise ValidationError('Program dftd3 not found in path. %s' % e) out, err = dashout.communicate() # Parse output (could go further and break into E6, E8, E10 and Cn coeff) success = False for line in out.splitlines(): line = line.decode('utf-8') if re.match(' Edisp /kcal,au', line): sline = line.split() dashd = float(sline[3]) if re.match(' normal termination of dftd3', line): success = True if not success: os.chdir(current_directory) raise Dftd3Error( """Unsuccessful run. Possibly -D variant not available in dftd3 version.""" ) # Parse grad output if dertype != 0: derivfile = './dftd3_gradient' dfile = open(derivfile, 'r') dashdderiv = [] for line in geom.splitlines(): lline = line.split() if len(lline) != 4: continue if lline[0] == 'Gh': dashdderiv.append([0.0, 0.0, 0.0]) else: dashdderiv.append([ float(x.replace('D', 'E')) for x in dfile.readline().split() ]) dfile.close() if len(dashdderiv) != self.natom(): raise ValidationError('Program dftd3 gradient file has %d atoms- %d expected.' % \ (len(dashdderiv), self.natom())) # Prepare results for Psi4 if isP4regime and dertype != 0: core.set_variable('DISPERSION CORRECTION ENERGY', dashd) psi_dashdderiv = core.Matrix(self.natom(), 3) psi_dashdderiv.set(dashdderiv) # Print program output to file if verbose if not verbose and isP4regime: verbose = True if core.get_option('SCF', 'PRINT') >= 3 else False if verbose: text = '\n ==> DFTD3 Output <==\n' text += out.decode('utf-8') if dertype != 0: with open(derivfile, 'r') as handle: text += handle.read().replace('D', 'E') text += '\n' if isP4regime: core.print_out(text) else: print(text) # Clean up files and remove scratch directory os.unlink(paramfile1) os.unlink(paramfile2) os.unlink(geomfile) if dertype != 0: os.unlink(derivfile) if defmoved is True: os.rename(defaultfile + '_hide', defaultfile) os.chdir('..') try: shutil.rmtree(dftd3_tmpdir) except OSError as e: ValidationError('Unable to remove dftd3 temporary directory %s' % e) os.chdir(current_directory) # return -D & d(-D)/dx if dertype == -1: return dashd, dashdderiv elif dertype == 0: return dashd elif dertype == 1: return psi_dashdderiv
def orig_run_dftd3(mol, func=None, dashlvl=None, dashparam=None, dertype=None, verbose=False): """Compute dispersion correction using Grimme's DFTD3 executable. Function to call Grimme's dftd3 program to compute the -D correction of level `dashlvl` using parameters for the functional `func`. `dashparam` can supply a full set of dispersion parameters in the absence of `func` or individual overrides in the presence of `func`. The DFTD3 executable must be independently compiled and found in :envvar:`PATH` or :envvar:`PSIPATH`. Parameters ---------- mol : qcdb.Molecule or psi4.core.Molecule or str Molecule on which to run dispersion calculation. Both qcdb and psi4.core Molecule classes have been extended by this method, so either allowed. Alternately, a string that can be instantiated into a qcdb.Molecule. func : str or None Density functional (Psi4, not Turbomole, names) for which to load parameters from dashcoeff[dashlvl][func]. This is not passed to DFTD3 and thus may be a dummy or `None`. Any or all parameters initialized can be overwritten via `dashparam`. dashlvl : {'d2p4', 'd2gr', 'd3zero', 'd3bj', 'd3mzero', d3mbj', 'd', 'd2', 'd3', 'd3m'} Flavor of a posteriori dispersion correction for which to load parameters and call procedure in DFTD3. Must be a keys in dashcoeff dict (or a key in dashalias that resolves to one). dashparam : dict, optional Dictionary of the same keys as dashcoeff[dashlvl] used to override any or all values initialized by dashcoeff[dashlvl][func]. dertype : {None, 0, 'none', 'energy', 1, 'first', 'gradient'}, optional Maximum derivative level at which to run DFTD3. For large `mol`, energy-only calculations can be significantly more efficient. Also controls return values, see below. verbose : bool, optional When `True`, additionally include DFTD3 output in output. Returns ------- energy : float, optional When `dertype` is 0, energy [Eh]. gradient : list of lists of floats or psi4.core.Matrix, optional When `dertype` is 1, (nat, 3) gradient [Eh/a0]. (energy, gradient) : float and list of lists of floats or psi4.core.Matrix, optional When `dertype` is unspecified, both energy [Eh] and (nat, 3) gradient [Eh/a0]. Notes ----- research site: https://www.chemie.uni-bonn.de/pctc/mulliken-center/software/dft-d3 Psi4 mode: When `psi4` the python module is importable at `import qcdb` time, Psi4 mode is activated, with the following alterations: * output goes to output file * gradient returned as psi4.core.Matrix, not list o'lists * scratch is written to randomly named subdirectory of psi scratch * psivar "DISPERSION CORRECTION ENERGY" is set * `verbose` triggered when PRINT keywork of SCF module >=3 """ # Create (if necessary) and update qcdb.Molecule if isinstance(mol, (Molecule, core.Molecule)): # 1st: called on a qcdb.Molecule # 2nd: called on a python export of a psi4.Molecule (py-side through Psi4's driver) pass elif isinstance(mol, basestring): # called on a string representation of a psi4.Molecule (c-side through psi4.Dispersion) mol = Molecule(mol) else: raise ValidationError( """Argument mol must be psi4string or qcdb.Molecule""") mol.update_geometry() # Validate arguments if dertype is None: dertype = -1 elif der0th.match(str(dertype)): dertype = 0 elif der1st.match(str(dertype)): dertype = 1 elif der2nd.match(str(dertype)): raise ValidationError( """Requested derivative level 'dertype' %s not valid for run_dftd3.""" % (dertype)) else: raise ValidationError( """Requested derivative level 'dertype' %s not valid for run_dftd3.""" % (dertype)) if dashlvl is not None: dashlvl = dashlvl.lower() dashlvl = dash_alias['-' + dashlvl][1:] if ( '-' + dashlvl) in dash_alias.keys() else dashlvl if dashlvl not in dashcoeff.keys(): raise ValidationError( """-D correction level %s is not available. Choose among %s.""" % (dashlvl, dashcoeff.keys())) else: raise ValidationError("""Must specify a dashlvl""") if func is not None: dftd3_params = dash_server(func, dashlvl) else: dftd3_params = {} if dashparam is not None: dftd3_params.update(dashparam) # Move ~/.dftd3par.<hostname> out of the way so it won't interfere defaultfile = os.path.expanduser( '~') + '/.dftd3par.' + socket.gethostname() defmoved = False if os.path.isfile(defaultfile): os.rename(defaultfile, defaultfile + '_hide') defmoved = True # Find environment by merging PSIPATH and PATH environment variables lenv = { 'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \ ':' + os.environ.get('PATH'), 'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH') } # Filter out None values as subprocess will fault on them lenv = {k: v for k, v in lenv.items() if v is not None} # Find out if running from Psi4 for scratch details and such # try: # import psi4 # except ImportError as err: # isP4regime = False # else: # isP4regime = True # Setup unique scratch directory and move in current_directory = os.getcwd() if isP4regime: psioh = core.IOManager.shared_object() psio = core.IO.shared_object() os.chdir(psioh.get_default_path()) dftd3_tmpdir = 'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \ '.dftd3.' + str(uuid.uuid4())[:8] else: dftd3_tmpdir = os.environ['HOME'] + os.sep + 'dftd3_' + str( uuid.uuid4())[:8] if os.path.exists(dftd3_tmpdir) is False: os.mkdir(dftd3_tmpdir) os.chdir(dftd3_tmpdir) # Write dftd3_parameters file that governs dispersion calc paramcontents = dftd3_coeff_formatter(dashlvl, dftd3_params) paramfile1 = 'dftd3_parameters' # older patched name with open(paramfile1, 'w') as handle: handle.write(paramcontents) paramfile2 = '.dftd3par.local' # new mainline name with open(paramfile2, 'w') as handle: handle.write(paramcontents) # Write dftd3_geometry file that supplies geometry to dispersion calc numAtoms = mol.natom() # We seem to have a problem with one atom, force the correct result if numAtoms == 1: dashd = 0.0 dashdderiv = core.Matrix(1, 3) if dertype == -1: return dashd, dashdderiv elif dertype == 0: return dashd elif dertype == 1: return dashdderiv geom = mol.save_string_xyz() reals = [] for line in geom.splitlines(): lline = line.split() if len(lline) != 4: continue if lline[0] == 'Gh': numAtoms -= 1 else: reals.append(line) geomtext = str(numAtoms) + '\n\n' for line in reals: geomtext += line.strip() + '\n' geomfile = './dftd3_geometry.xyz' with open(geomfile, 'w') as handle: handle.write(geomtext) # TODO somehow the variations on save_string_xyz and # whether natom and chgmult does or doesn't get written # have gotten all tangled. I fear this doesn't work # the same btwn libmints and qcdb or for ghosts # Call dftd3 program command = ['dftd3', geomfile] if dertype != 0: command.append('-grad') try: dashout = subprocess.Popen(command, stdout=subprocess.PIPE, env=lenv) except OSError as e: raise ValidationError('Program dftd3 not found in path. %s' % e) out, err = dashout.communicate() # Parse output (could go further and break into E6, E8, E10 and Cn coeff) success = False for line in out.splitlines(): line = line.decode('utf-8') if re.match(' Edisp /kcal,au', line): sline = line.split() dashd = float(sline[3]) if re.match(' normal termination of dftd3', line): success = True if not success: os.chdir(current_directory) raise Dftd3Error( """Unsuccessful run. Possibly -D variant not available in dftd3 version.""" ) # Parse grad output if dertype != 0: derivfile = './dftd3_gradient' dfile = open(derivfile, 'r') dashdderiv = [] for line in geom.splitlines(): lline = line.split() if len(lline) != 4: continue if lline[0] == 'Gh': dashdderiv.append([0.0, 0.0, 0.0]) else: dashdderiv.append([ float(x.replace('D', 'E')) for x in dfile.readline().split() ]) dfile.close() if len(dashdderiv) != mol.natom(): raise ValidationError('Program dftd3 gradient file has %d atoms- %d expected.' % \ (len(dashdderiv), mol.natom())) # Prepare results for Psi4 if isP4regime and dertype != 0: core.set_variable('DISPERSION CORRECTION ENERGY', dashd) psi_dashdderiv = core.Matrix.from_list(dashdderiv) # Print program output to file if verbose if not verbose and isP4regime: verbose = True if core.get_option('SCF', 'PRINT') >= 3 else False if verbose: text = '\n ==> DFTD3 Output <==\n' text += out.decode('utf-8') if dertype != 0: with open(derivfile, 'r') as handle: text += handle.read().replace('D', 'E') text += '\n' if isP4regime: core.print_out(text) else: print(text) # Clean up files and remove scratch directory os.unlink(paramfile1) os.unlink(paramfile2) os.unlink(geomfile) if dertype != 0: os.unlink(derivfile) if defmoved is True: os.rename(defaultfile + '_hide', defaultfile) os.chdir('..') try: shutil.rmtree(dftd3_tmpdir) except OSError as e: ValidationError('Unable to remove dftd3 temporary directory %s' % e) os.chdir(current_directory) # return -D & d(-D)/dx if dertype == -1: return dashd, dashdderiv elif dertype == 0: return dashd elif dertype == 1: return psi_dashdderiv
def _sapt_cpscf_solve(cache, jk, rhsA, rhsB, maxiter, conv): """ Solve the SAPT CPHF (or CPKS) equations. """ # Make a preconditioner function P_A = core.Matrix(cache["eps_occ_A"].shape[0], cache["eps_vir_A"].shape[0]) P_A.np[:] = (cache["eps_occ_A"].np.reshape(-1, 1) - cache["eps_vir_A"].np) P_B = core.Matrix(cache["eps_occ_B"].shape[0], cache["eps_vir_B"].shape[0]) P_B.np[:] = (cache["eps_occ_B"].np.reshape(-1, 1) - cache["eps_vir_B"].np) # Preconditioner function def apply_precon(x_vec, act_mask): if act_mask[0]: pA = x_vec[0].clone() pA.apply_denominator(P_A) else: pA = False if act_mask[1]: pB = x_vec[1].clone() pB.apply_denominator(P_B) else: pB = False return [pA, pB] # Hx function def hessian_vec(x_vec, act_mask): if act_mask[0]: xA = cache["wfn_A"].cphf_Hx([x_vec[0]])[0] else: xA = False if act_mask[1]: xB = cache["wfn_B"].cphf_Hx([x_vec[1]])[0] else: xB = False return [xA, xB] # Manipulate the printing sep_size = 51 core.print_out(" " + ("-" * sep_size) + "\n") core.print_out(" " + "SAPT Coupled Induction Solver".center(sep_size) + "\n") core.print_out(" " + ("-" * sep_size) + "\n") core.print_out(" Maxiter = %11d\n" % maxiter) core.print_out(" Convergence = %11.3E\n" % conv) core.print_out(" " + ("-" * sep_size) + "\n") tstart = time.time() core.print_out(" %4s %12s %12s %9s\n" % ("Iter", "(A<-B)", "(B->A)", "Time [s]")) core.print_out(" " + ("-" * sep_size) + "\n") start_resid = [rhsA.sum_of_squares(), rhsB.sum_of_squares()] # print function def pfunc(niter, x_vec, r_vec): if niter == 0: niter = "Guess" else: niter = ("%5d" % niter) # Compute IndAB valA = (r_vec[0].sum_of_squares() / start_resid[0]) ** 0.5 if valA < conv: cA = "*" else: cA = " " # Compute IndBA valB = (r_vec[1].sum_of_squares() / start_resid[1]) ** 0.5 if valB < conv: cB = "*" else: cB = " " core.print_out(" %5s %15.6e%1s %15.6e%1s %9d\n" % (niter, valA, cA, valB, cB, time.time() - tstart)) return [valA, valB] # Compute the solver vecs, resid = solvers.cg_solver( [rhsA, rhsB], hessian_vec, apply_precon, maxiter=maxiter, rcond=conv, printlvl=0, printer=pfunc) core.print_out(" " + ("-" * sep_size) + "\n") return vecs
def __call__(self, mol1_wfn, mol2_wfn): nbf = self.p.dimer_basis.nbf() nbf1 = mol1_wfn.nso() nbf2 = mol2_wfn.nso() U, S, VT = np.linalg.svd(mol1_wfn.Da()) C_left = np.dot(U, np.diag(np.sqrt(S))) C_right = np.dot(VT.T, np.diag(np.sqrt(S))) C_left_ = core.Matrix(nbf, max(nbf1, nbf2)) C_right_ = core.Matrix(nbf, max(nbf1, nbf2)) C_left_.np[:nbf1, :nbf1] = C_left[:, :] C_right_.np[:nbf1, :nbf1] = C_right[:, :] self.jk.C_clear() self.jk.C_left_add(C_left_) self.jk.C_right_add(C_right_) self.jk.compute() J = self.jk.J()[0] D1 = self.jk.D()[0] assert np.max(np.abs(D1.np[:nbf1, :nbf1] - mol1_wfn.Da().np)) < 1e-10 J_1to2 = J.np[nbf1:, nbf1:] elel_1to2 = 2 * np.sum(J_1to2 * mol2_wfn.Da()) nuel_1to2 = 2 * (self.p.dimer_V.vector_dot(D1) - self.p.monomer1_V.vector_dot(mol1_wfn.Da())) ovlp1 = core.Matrix.doublet(self.p.dimer_S, D1, False, False) ####################################################################### U, S, VT = np.linalg.svd(mol2_wfn.Da()) C_left = np.dot(U, np.diag(np.sqrt(S))) C_right = np.dot(VT.T, np.diag(np.sqrt(S))) C_left_ = core.Matrix(nbf, max(nbf1, nbf2)) C_right_ = core.Matrix(nbf, max(nbf1, nbf2)) C_left_.np[-nbf2:, -nbf2:] = C_left[:, :] C_right_.np[-nbf2:, -nbf2:] = C_right[:, :] self.jk.C_clear() self.jk.C_left_add(C_left_) self.jk.C_right_add(C_right_) self.jk.compute() J = self.jk.J()[0] D2 = self.jk.D()[0] assert np.max(np.abs(D2.np[nbf1:, nbf1:] - mol2_wfn.Da().np)) < 1e-10 J_2to1 = J.np[:nbf1, :nbf1] elel_2to1 = 2 * np.sum(J_2to1 * mol1_wfn.Da()) nuel_2to1 = 2 * (self.p.dimer_V.vector_dot(D2) - self.p.monomer2_V.vector_dot(mol2_wfn.Da())) ovlp2 = core.Matrix.doublet(self.p.dimer_S, D2, False, False) overlap = 4 * np.sum(ovlp1.np * ovlp2.np.T) #assert abs(elel_1to2 - elel_2to1) < 1e-10 electrostatic = self.p.nuclear_interaction_energy + nuel_1to2 + nuel_2to1 + elel_1to2 + elel_2to1 return electrostatic, overlap
def exchange(cache, jk, do_print=True): """ Computes the E10 exchange (S^2 and S^inf) from a build_sapt_jk_cache datacache. """ if do_print: core.print_out("\n ==> E10 Exchange <== \n\n") # Build potenitals h_A = cache["V_A"].clone() h_A.axpy(2.0, cache["J_A"]) h_A.axpy(-1.0, cache["K_A"]) h_B = cache["V_B"].clone() h_B.axpy(2.0, cache["J_B"]) h_B.axpy(-1.0, cache["K_B"]) w_A = cache["V_A"].clone() w_A.axpy(2.0, cache["J_A"]) w_B = cache["V_B"].clone() w_B.axpy(2.0, cache["J_B"]) # Build inverse exchange metric nocc_A = cache["Cocc_A"].shape[1] nocc_B = cache["Cocc_B"].shape[1] SAB = core.Matrix.triplet( cache["Cocc_A"], cache["S"], cache["Cocc_B"], True, False, False) num_occ = nocc_A + nocc_B Sab = core.Matrix(num_occ, num_occ) Sab.np[:nocc_A, nocc_A:] = SAB.np Sab.np[nocc_A:, :nocc_A] = SAB.np.T Sab.np[np.diag_indices_from(Sab.np)] += 1 Sab.power(-1.0, 1.e-14) Sab.np[np.diag_indices_from(Sab.np)] -= 1.0 Tmo_AA = core.Matrix.from_array(Sab.np[:nocc_A, :nocc_A]) Tmo_BB = core.Matrix.from_array(Sab.np[nocc_A:, nocc_A:]) Tmo_AB = core.Matrix.from_array(Sab.np[:nocc_A, nocc_A:]) T_A = np.dot(cache["Cocc_A"], Tmo_AA).dot(cache["Cocc_A"].np.T) T_B = np.dot(cache["Cocc_B"], Tmo_BB).dot(cache["Cocc_B"].np.T) T_AB = np.dot(cache["Cocc_A"], Tmo_AB).dot(cache["Cocc_B"].np.T) S = cache["S"] D_A = cache["D_A"] P_A = cache["P_A"] D_B = cache["D_B"] P_B = cache["P_B"] # Compute the J and K matrices jk.C_clear() jk.C_left_add(cache["Cocc_A"]) jk.C_right_add(core.Matrix.doublet(cache["Cocc_A"], Tmo_AA, False, False)) jk.C_left_add(cache["Cocc_B"]) jk.C_right_add(core.Matrix.doublet(cache["Cocc_A"], Tmo_AB, False, False)) jk.C_left_add(cache["Cocc_A"]) jk.C_right_add(core.Matrix.chain_dot(P_B, S, cache["Cocc_A"])) jk.compute() JT_A, JT_AB, Jij = jk.J() KT_A, KT_AB, Kij = jk.K() # Start S^2 Exch_s2 = 0.0 tmp = core.Matrix.chain_dot(D_A, S, D_B, S, P_A) Exch_s2 -= 2.0 * w_B.vector_dot(tmp) tmp = core.Matrix.chain_dot(D_B, S, D_A, S, P_B) Exch_s2 -= 2.0 * w_A.vector_dot(tmp) tmp = core.Matrix.chain_dot(P_A, S, D_B) Exch_s2 -= 2.0 * Kij.vector_dot(tmp) if do_print: core.print_out(print_sapt_var("Exch10(S^2) ", Exch_s2, short=True)) core.print_out("\n") # Start Sinf Exch10 = 0.0 Exch10 -= 2.0 * np.vdot(cache["D_A"], cache["K_B"]) Exch10 += 2.0 * np.vdot(T_A, h_B.np) Exch10 += 2.0 * np.vdot(T_B, h_A.np) Exch10 += 2.0 * np.vdot(T_AB, h_A.np + h_B.np) Exch10 += 4.0 * np.vdot(T_B, JT_AB.np - 0.5 * KT_AB.np) Exch10 += 4.0 * np.vdot(T_A, JT_AB.np - 0.5 * KT_AB.np) Exch10 += 4.0 * np.vdot(T_B, JT_A.np - 0.5 * KT_A.np) Exch10 += 4.0 * np.vdot(T_AB, JT_AB.np - 0.5 * KT_AB.np.T) if do_print: core.set_variable("Exch10", Exch10) core.print_out(print_sapt_var("Exch10", Exch10, short=True)) core.print_out("\n") return {"Exch10(S^2)": Exch_s2, "Exch10": Exch10}
def induction(cache, jk, do_print=True, maxiter=12, conv=1.e-8, do_response=True, Sinf=False): """ Compute Ind20 and Exch-Ind20 quantities from a SAPT cache and JK object. """ if do_print: core.print_out("\n ==> E20 Induction <== \n\n") # Build Induction and Exchange-Induction potentials S = cache["S"] D_A = cache["D_A"] V_A = cache["V_A"] J_A = cache["J_A"] K_A = cache["K_A"] D_B = cache["D_B"] V_B = cache["V_B"] J_B = cache["J_B"] K_B = cache["K_B"] K_O = cache["K_O"] J_O = cache["J_O"] jk.C_clear() jk.C_left_add(core.Matrix.chain_dot(D_B, S, cache["Cocc_A"])) jk.C_right_add(cache["Cocc_A"]) jk.C_left_add(core.Matrix.chain_dot(D_B, S, D_A, S, cache["Cocc_B"])) jk.C_right_add(cache["Cocc_B"]) jk.C_left_add(core.Matrix.chain_dot(D_A, S, D_B, S, cache["Cocc_A"])) jk.C_right_add(cache["Cocc_A"]) jk.compute() J_Ot, J_P_B, J_P_A = jk.J() K_Ot, K_P_B, K_P_A = jk.K() # Exch-Ind Potential A EX_A = K_B.clone() EX_A.scale(-1.0) EX_A.axpy(-2.0, core.Matrix.chain_dot(S, D_B, J_A)) EX_A.axpy(1.0, K_O) EX_A.axpy(-2.0, J_O) EX_A.axpy(1.0, core.Matrix.chain_dot(S, D_B, K_A)) EX_A.axpy(-2.0, core.Matrix.chain_dot(J_B, D_B, S)) EX_A.axpy(1.0, core.Matrix.chain_dot(K_B, D_B, S)) EX_A.axpy(2.0, core.Matrix.chain_dot(S, D_B, J_A, D_B, S)) EX_A.axpy(2.0, core.Matrix.chain_dot(J_B, D_A, S, D_B, S)) EX_A.axpy(-1.0, core.Matrix.chain_dot(K_O, D_B, S)) EX_A.axpy(2.0, J_P_B) EX_A.axpy(2.0, core.Matrix.chain_dot(S, D_B, S, D_A, J_B)) EX_A.axpy(-1.0, core.Matrix.chain_dot(S, D_B, K_O, trans=[False, False, True])) EX_A.axpy(-1.0, core.Matrix.chain_dot(S, D_B, V_A)) EX_A.axpy(-1.0, core.Matrix.chain_dot(V_B, D_B, S)) EX_A.axpy(1.0, core.Matrix.chain_dot(S, D_B, V_A, D_B, S)) EX_A.axpy(1.0, core.Matrix.chain_dot(V_B, D_A, S, D_B, S)) EX_A.axpy(1.0, core.Matrix.chain_dot(S, D_B, S, D_A, V_B)) EX_A = core.Matrix.chain_dot( cache["Cocc_A"], EX_A, cache["Cvir_A"], trans=[True, False, False]) # Exch-Ind Potential B EX_B = K_A.clone() EX_B.scale(-1.0) EX_B.axpy(-2.0, core.Matrix.chain_dot(S, D_A, J_B)) EX_B.axpy(1.0, K_O.transpose()) EX_B.axpy(-2.0, J_O) EX_B.axpy(1.0, core.Matrix.chain_dot(S, D_A, K_B)) EX_B.axpy(-2.0, core.Matrix.chain_dot(J_A, D_A, S)) EX_B.axpy(1.0, core.Matrix.chain_dot(K_A, D_A, S)) EX_B.axpy(2.0, core.Matrix.chain_dot(S, D_A, J_B, D_A, S)) EX_B.axpy(2.0, core.Matrix.chain_dot(J_A, D_B, S, D_A, S)) EX_B.axpy(-1.0, core.Matrix.chain_dot(K_O, D_A, S, trans=[True, False, False])) EX_B.axpy(2.0, J_P_A) EX_B.axpy(2.0, core.Matrix.chain_dot(S, D_A, S, D_B, J_A)) EX_B.axpy(-1.0, core.Matrix.chain_dot(S, D_A, K_O)) EX_B.axpy(-1.0, core.Matrix.chain_dot(S, D_A, V_B)) EX_B.axpy(-1.0, core.Matrix.chain_dot(V_A, D_A, S)) EX_B.axpy(1.0, core.Matrix.chain_dot(S, D_A, V_B, D_A, S)) EX_B.axpy(1.0, core.Matrix.chain_dot(V_A, D_B, S, D_A, S)) EX_B.axpy(1.0, core.Matrix.chain_dot(S, D_A, S, D_B, V_A)) EX_B = core.Matrix.chain_dot( cache["Cocc_B"], EX_B, cache["Cvir_B"], trans=[True, False, False]) # Build electrostatic potenital w_A = cache["V_A"].clone() w_A.axpy(2.0, cache["J_A"]) w_B = cache["V_B"].clone() w_B.axpy(2.0, cache["J_B"]) w_B_MOA = core.Matrix.triplet( cache["Cocc_A"], w_B, cache["Cvir_A"], True, False, False) w_A_MOB = core.Matrix.triplet( cache["Cocc_B"], w_A, cache["Cvir_B"], True, False, False) # Do uncoupled core.print_out(" => Uncoupled Induction <= \n\n") unc_x_B_MOA = w_B_MOA.clone() unc_x_B_MOA.np[ :] /= (cache["eps_occ_A"].np.reshape(-1, 1) - cache["eps_vir_A"].np) unc_x_A_MOB = w_A_MOB.clone() unc_x_A_MOB.np[ :] /= (cache["eps_occ_B"].np.reshape(-1, 1) - cache["eps_vir_B"].np) unc_ind_ab = 2.0 * unc_x_B_MOA.vector_dot(w_B_MOA) unc_ind_ba = 2.0 * unc_x_A_MOB.vector_dot(w_A_MOB) unc_indexch_ab = 2.0 * unc_x_B_MOA.vector_dot(EX_A) unc_indexch_ba = 2.0 * unc_x_A_MOB.vector_dot(EX_B) ret = {} ret["Ind20,u (A<-B)"] = unc_ind_ab ret["Ind20,u (A->B)"] = unc_ind_ba ret["Ind20,u"] = unc_ind_ab + unc_ind_ba ret["Exch-Ind20,u (A<-B)"] = unc_indexch_ab ret["Exch-Ind20,u (A->B)"] = unc_indexch_ba ret["Exch-Ind20,u"] = unc_indexch_ba + unc_indexch_ab plist = ["Ind20,u (A<-B)", "Ind20,u (A->B)", "Ind20,u", "Exch-Ind20,u (A<-B)", "Exch-Ind20,u (A->B)", "Exch-Ind20,u"] if do_print: for name in plist: # core.set_variable(name, ret[name]) core.print_out(print_sapt_var(name, ret[name], short=True)) core.print_out("\n") # Exch-Ind without S^2 if Sinf: nocc_A = cache["Cocc_A"].shape[1] nocc_B = cache["Cocc_B"].shape[1] SAB = core.Matrix.triplet( cache["Cocc_A"], cache["S"], cache["Cocc_B"], True, False, False) num_occ = nocc_A + nocc_B Sab = core.Matrix(num_occ, num_occ) Sab.np[:nocc_A, nocc_A:] = SAB.np Sab.np[nocc_A:, :nocc_A] = SAB.np.T Sab.np[np.diag_indices_from(Sab.np)] += 1 Sab.power(-1.0, 1.e-14) Tmo_AA = core.Matrix.from_array(Sab.np[:nocc_A, :nocc_A]) Tmo_BB = core.Matrix.from_array(Sab.np[nocc_A:, nocc_A:]) Tmo_AB = core.Matrix.from_array(Sab.np[:nocc_A, nocc_A:]) T_A = core.Matrix.triplet(cache["Cocc_A"], Tmo_AA, cache["Cocc_A"], False, False, True) T_B = core.Matrix.triplet(cache["Cocc_B"], Tmo_BB, cache["Cocc_B"], False, False, True) T_AB = core.Matrix.triplet(cache["Cocc_A"], Tmo_AB, cache["Cocc_B"], False, False, True) sT_A = core.Matrix.chain_dot(cache["Cvir_A"], unc_x_B_MOA, Tmo_AA, cache["Cocc_A"], trans=[False, True, False, True]) sT_B = core.Matrix.chain_dot(cache["Cvir_B"], unc_x_A_MOB, Tmo_BB, cache["Cocc_B"], trans=[False, True, False, True]) sT_AB = core.Matrix.chain_dot(cache["Cvir_A"], unc_x_B_MOA, Tmo_AB, cache["Cocc_B"], trans=[False, True, False, True]) sT_BA = core.Matrix.chain_dot(cache["Cvir_B"], unc_x_A_MOB, Tmo_AB, cache["Cocc_A"], trans=[False, True, True, True]) jk.C_clear() jk.C_left_add(core.Matrix.chain_dot(cache["Cocc_A"], Tmo_AA)) jk.C_right_add(cache["Cocc_A"]) jk.C_left_add(core.Matrix.chain_dot(cache["Cocc_B"], Tmo_BB)) jk.C_right_add(cache["Cocc_B"]) jk.C_left_add(core.Matrix.chain_dot(cache["Cocc_A"], Tmo_AB)) jk.C_right_add(cache["Cocc_B"]) jk.compute() J_AA_inf, J_BB_inf, J_AB_inf = jk.J() K_AA_inf, K_BB_inf, K_AB_inf = jk.K() # A <- B EX_AA_inf = V_B.clone() EX_AA_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_AB, V_B, trans=[False, True, False])) EX_AA_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_B, V_B)) EX_AA_inf.axpy(2.00, J_AB_inf) EX_AA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_AB_inf, trans=[False, True, False])) EX_AA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_B, J_AB_inf)) EX_AA_inf.axpy(2.00, J_BB_inf) EX_AA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_BB_inf, trans=[False, True, False])) EX_AA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_B, J_BB_inf)) EX_AA_inf.axpy(-1.00, K_AB_inf.transpose()) EX_AA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_AB_inf, trans=[False, True, True])) EX_AA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_B, K_AB_inf, trans=[False, False, True])) EX_AA_inf.axpy(-1.00, K_BB_inf) EX_AA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_BB_inf, trans=[False, True, False])) EX_AA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_B, K_BB_inf)) EX_AB_inf = V_A.clone() EX_AB_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_AB, V_A, trans=[False, True, False])) EX_AB_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_B, V_A)) EX_AB_inf.axpy(2.00, J_AA_inf) EX_AB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_AA_inf, trans=[False, True, False])) EX_AB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_B, J_AA_inf)) EX_AB_inf.axpy(2.00, J_AB_inf) EX_AB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_AB_inf, trans=[False, True, False])) EX_AB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_B, J_AB_inf)) EX_AB_inf.axpy(-1.00, K_AA_inf) EX_AB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_AA_inf, trans=[False, True, False])) EX_AB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_B, K_AA_inf)) EX_AB_inf.axpy(-1.00, K_AB_inf) EX_AB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_AB_inf, trans=[False, True, False])) EX_AB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_B, K_AB_inf)) # B <- A EX_BB_inf = V_A.clone() EX_BB_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_AB, V_A)) EX_BB_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_A, V_A)) EX_BB_inf.axpy(2.00, J_AB_inf) EX_BB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_AB_inf)) EX_BB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_A, J_AB_inf)) EX_BB_inf.axpy(2.00, J_AA_inf) EX_BB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_AA_inf)) EX_BB_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_A, J_AA_inf)) EX_BB_inf.axpy(-1.00, K_AB_inf) EX_BB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_AB_inf)) EX_BB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_A, K_AB_inf)) EX_BB_inf.axpy(-1.00, K_AA_inf) EX_BB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_AA_inf)) EX_BB_inf.axpy(1.00, core.Matrix.chain_dot(S, T_A, K_AA_inf)) EX_BA_inf = V_B.clone() EX_BA_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_AB, V_B)) EX_BA_inf.axpy(-1.00, core.Matrix.chain_dot(S, T_A, V_B)) EX_BA_inf.axpy(2.00, J_BB_inf) EX_BA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_BB_inf)) EX_BA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_A, J_BB_inf)) EX_BA_inf.axpy(2.00, J_AB_inf) EX_BA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_AB, J_AB_inf)) EX_BA_inf.axpy(-2.00, core.Matrix.chain_dot(S, T_A, J_AB_inf)) EX_BA_inf.axpy(-1.00, K_BB_inf) EX_BA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_BB_inf)) EX_BA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_A, K_BB_inf)) EX_BA_inf.axpy(-1.00, K_AB_inf.transpose()) EX_BA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_AB, K_AB_inf, trans=[False, False, True])) EX_BA_inf.axpy(1.00, core.Matrix.chain_dot(S, T_A, K_AB_inf, trans=[False, False, True])) unc_ind_ab_total = 2.0 * (sT_A.vector_dot(EX_AA_inf) + sT_AB.vector_dot(EX_AB_inf)) unc_ind_ba_total = 2.0 * (sT_B.vector_dot(EX_BB_inf) + sT_BA.vector_dot(EX_BA_inf)) unc_indexch_ab_inf = unc_ind_ab_total - unc_ind_ab unc_indexch_ba_inf = unc_ind_ba_total - unc_ind_ba ret["Exch-Ind20,u (A<-B) (S^inf)"] = unc_indexch_ab_inf ret["Exch-Ind20,u (A->B) (S^inf)"] = unc_indexch_ba_inf ret["Exch-Ind20,u (S^inf)"] = unc_indexch_ba_inf + unc_indexch_ab_inf if do_print: for name in plist[3:]: name = name + ' (S^inf)' core.print_out(print_sapt_var(name, ret[name], short=True)) core.print_out("\n") # Do coupled if do_response: core.print_out("\n => Coupled Induction <= \n\n") x_B_MOA, x_A_MOB = _sapt_cpscf_solve( cache, jk, w_B_MOA, w_A_MOB, 20, 1.e-6) ind_ab = 2.0 * x_B_MOA.vector_dot(w_B_MOA) ind_ba = 2.0 * x_A_MOB.vector_dot(w_A_MOB) indexch_ab = 2.0 * x_B_MOA.vector_dot(EX_A) indexch_ba = 2.0 * x_A_MOB.vector_dot(EX_B) ret["Ind20,r (A<-B)"] = ind_ab ret["Ind20,r (A->B)"] = ind_ba ret["Ind20,r"] = ind_ab + ind_ba ret["Exch-Ind20,r (A<-B)"] = indexch_ab ret["Exch-Ind20,r (A->B)"] = indexch_ba ret["Exch-Ind20,r"] = indexch_ba + indexch_ab if do_print: core.print_out("\n") for name in plist: name = name.replace(",u", ",r") # core.set_variable(name, ret[name]) core.print_out(print_sapt_var(name, ret[name], short=True)) core.print_out("\n") # Exch-Ind without S^2 if Sinf: cT_A = core.Matrix.chain_dot(cache["Cvir_A"], x_B_MOA, Tmo_AA, cache["Cocc_A"], trans=[False, True, False, True]) cT_B = core.Matrix.chain_dot(cache["Cvir_B"], x_A_MOB, Tmo_BB, cache["Cocc_B"], trans=[False, True, False, True]) cT_AB = core.Matrix.chain_dot(cache["Cvir_A"], x_B_MOA, Tmo_AB, cache["Cocc_B"], trans=[False, True, False, True]) cT_BA = core.Matrix.chain_dot(cache["Cvir_B"], x_A_MOB, Tmo_AB, cache["Cocc_A"], trans=[False, True, True, True]) ind_ab_total = 2.0 * (cT_A.vector_dot(EX_AA_inf) + cT_AB.vector_dot(EX_AB_inf)) ind_ba_total = 2.0 * (cT_B.vector_dot(EX_BB_inf) + cT_BA.vector_dot(EX_BA_inf)) indexch_ab_inf = ind_ab_total - ind_ab indexch_ba_inf = ind_ba_total - ind_ba ret["Exch-Ind20,r (A<-B) (S^inf)"] = indexch_ab_inf ret["Exch-Ind20,r (A->B) (S^inf)"] = indexch_ba_inf ret["Exch-Ind20,r (S^inf)"] = indexch_ba_inf + indexch_ab_inf if do_print: for name in plist[3:]: name = name.replace(",u", ",r") + ' (S^inf)' core.print_out(print_sapt_var(name, ret[name], short=True)) core.print_out("\n") return ret
def extrapolate(self): # Limit size of DIIS vector diis_count = len(self.vector) if diis_count == 0: raise Exception("DIIS: No previous vectors.") if diis_count == 1: return self.vector[0] if diis_count > self.max_vec: # Remove oldest vector del self.vector[0] del self.error[0] diis_count -= 1 # Build error matrix B B = np.empty((diis_count + 1, diis_count + 1)) B[-1, :] = 1 B[:, -1] = 1 B[-1, -1] = 0 for num1, e1 in enumerate(self.error): B[num1, num1] = e1.vector_dot(e1) for num2, e2 in enumerate(self.error): if num2 >= num1: continue val = e1.vector_dot(e2) B[num1, num2] = B[num2, num1] = val # Build residual vector resid = np.zeros(diis_count + 1) resid[-1] = 1 # Solve pulay equations # Yea, yea this is unstable make it stable iszero = np.any(np.diag(B)[:-1] <= 0.0) if iszero: S = np.ones((diis_count + 1)) else: S = np.ones((diis_count + 1)) S[:-1] = np.diag(B)[:-1] S = S**-0.5 S[-1] = 1 # Then we gotta do a custom inverse B *= S[:, None] * S eigvals, eigvecs = np.linalg.eigh(B) maxval = np.max(np.abs(eigvals[[0, -1]])) * 1.e-12 # If the relative is too small, zero it out eigvals[(np.abs(eigvals) < maxval)] = 0 # Make sure we dont invert actual zeros! eigvals[np.abs(eigvals) > 1.e-16] = eigvals[ np.abs(eigvals) > 1.e-16]**-1 invB = np.dot(eigvecs * eigvals, eigvecs.T) ci = np.dot(invB, resid) * S # combination of previous fock matrices V = core.Matrix("DIIS result", self.vector[0].rowdim(), self.vector[1].coldim()) for num, c in enumerate(ci[:-1]): V.axpy(c, self.vector[num]) return V