Example #1
0
def expand_psivars(pvdefs):
    """Dictionary *pvdefs* has keys with names of PsiVariables to be
    created and values with dictionary of two keys: 'args', the
    PsiVariables that contribute to the key and 'func', a function (or
    lambda) to combine them. This function builds those PsiVariables if
    all the contributors are available. Helpful printing is available when
    PRINT > 2.

    """
    verbose = core.get_global_option('PRINT')

    for pvar, action in pvdefs.items():
        if verbose >= 2:
            print("""building %s %s""" % (pvar, '.' * (50 - len(pvar))), end='')

        psivars = core.scalar_variables()
        data_rich_args = []

        for pv in action['args']:
            if isinstance(pv, str):
                if pv in psivars:
                    data_rich_args.append(psivars[pv])
                else:
                    if verbose >= 2:
                        print("""EMPTY, missing {}""".format(pv))
                    break
            else:
                data_rich_args.append(pv)
        else:
            result = action['func'](data_rich_args)
            core.set_variable(pvar, result)
            if verbose >= 2:
                print("""SUCCESS""")
Example #2
0
def _print_matrix(descriptors, content, title):
    length = len(descriptors)

    matrix_header = '         ' + ' {:^10}' * length + '\n'
    core.print_out(matrix_header.format(*descriptors))
    core.print_out('    -----' + ' ----------' * length + '\n')

    for i, desc in enumerate(descriptors):
        core.print_out('    {:^5}'.format(desc))
        for j in range(length):
            core.print_out(' {:>10.5f}'.format(content[i, j]))

            # Set the name
            var_name = title + " " + descriptors[i] + descriptors[j]
            core.set_variable(var_name, content[i, j])
        core.print_out('\n')
    def compute_gradient(self, molecule):
        """Compute dispersion gradient based on engine, dispersion level, and parameters in `self`.

        Parameters
        ----------
        molecule : psi4.core.Molecule
            System for which to compute empirical dispersion correction.

        Returns
        -------
        psi4.core.Matrix
            (nat, 3) dispersion gradient [Eh/a0].

        """
        if self.engine in ['dftd3', 'mp2d']:
            resinp = {
                'schema_name': 'qcschema_input',
                'schema_version': 1,
                'molecule': molecule.to_schema(dtype=2),
                'driver': 'gradient',
                'model': {
                    'method': self.fctldash,
                    'basis': '(auto)',
                },
                'keywords': {
                    'level_hint': self.dashlevel,
                    'params_tweaks': self.dashparams,
                    'dashcoeff_supplement': self.dashcoeff_supplement,
                    'verbose': 1,
                },
            }
            jobrec = qcng.compute(resinp, self.engine, raise_error=True)
            jobrec = jobrec.dict()

            dashd_part = core.Matrix.from_array(np.array(jobrec['extras']['qcvars']['DISPERSION CORRECTION GRADIENT']).reshape(-1, 3))
            for k, qca in jobrec['extras']['qcvars'].items():
                if not isinstance(qca, (list, np.ndarray)):
                    core.set_variable(k, qca)

            if self.fctldash in ['hf3c', 'pbeh3c']:
                gcp_part = gcp.run_gcp(molecule, self.fctldash, verbose=False, dertype=1)
                dashd_part.add(gcp_part)

            return dashd_part
        else:
            return self.disp.compute_gradient(molecule)
    def compute_energy(self, molecule):
        """Compute dispersion energy based on engine, dispersion level, and parameters in `self`.

        Parameters
        ----------
        molecule : psi4.core.Molecule
            System for which to compute empirical dispersion correction.

        Returns
        -------
        float
            Dispersion energy [Eh].

        Notes
        -----
        DISPERSION CORRECTION ENERGY
            Disp always set. Overridden in SCF finalization, but that only changes for "-3C" methods.
        self.fctldash + DISPERSION CORRECTION ENERGY
            Set if `fctldash` nonempty.

        """
        if self.engine in ['dftd3', 'mp2d']:
            resinp = {
                'schema_name': 'qcschema_input',
                'schema_version': 1,
                'molecule': molecule.to_schema(dtype=2),
                'driver': 'energy',
                'model': {
                    'method': self.fctldash,
                    'basis': '(auto)',
                },
                'keywords': {
                    'level_hint': self.dashlevel,
                    'params_tweaks': self.dashparams,
                    'dashcoeff_supplement': self.dashcoeff_supplement,
                    'verbose': 1,
                },
            }
            jobrec = qcng.compute(resinp, self.engine, raise_error=True)
            jobrec = jobrec.dict()

            dashd_part = float(jobrec['extras']['qcvars']['DISPERSION CORRECTION ENERGY'])
            for k, qca in jobrec['extras']['qcvars'].items():
                if not isinstance(qca, (list, np.ndarray)):
                    core.set_variable(k, qca)

            if self.fctldash in ['hf3c', 'pbeh3c']:
                gcp_part = gcp.run_gcp(molecule, self.fctldash, verbose=False, dertype=0)
                dashd_part += gcp_part

            return dashd_part

        else:
            ene = self.disp.compute_energy(molecule)
            core.set_variable('DISPERSION CORRECTION ENERGY', ene)
            if self.fctldash:
                core.set_variable('{} DISPERSION CORRECTION ENERGY'.format(self.fctldash), ene)
            return ene
Example #5
0
    def compute_gradient(self, molecule):
        """Compute dispersion gradient based on engine, dispersion level, and parameters in `self`.

        Parameters
        ----------
        molecule : psi4.core.Molecule
            System for which to compute empirical dispersion correction.

        Returns
        -------
        psi4.core.Matrix
            (nat, 3) dispersion gradient [Eh/a0].

        """
        if self.engine == 'dftd3':
            jobrec = intf_dftd3.run_dftd3_from_arrays(
                molrec=molecule.to_dict(np_out=False),
                name_hint=self.fctldash,
                level_hint=self.dashlevel,
                param_tweaks=self.dashparams,
                dashcoeff_supplement=self.dashcoeff_supplement,
                ptype='gradient',
                verbose=1)

            dashd_part = core.Matrix.from_array(jobrec['qcvars']['DISPERSION CORRECTION GRADIENT'].data)
            for k, qca in jobrec['qcvars'].items():
                if not isinstance(qca.data, np.ndarray):
                    core.set_variable(k, qca.data)

            if self.fctldash in ['hf3c', 'pbeh3c']:
                gcp_part = gcp.run_gcp(molecule, self.fctldash, verbose=False, dertype=1)
                dashd_part.add(gcp_part)

            return dashd_part
        else:
            return self.disp.compute_gradient(molecule)
Example #6
0
    def compute_energy(self, molecule):
        """Compute dispersion energy based on engine, dispersion level, and parameters in `self`.

        Parameters
        ----------
        molecule : psi4.core.Molecule
            System for which to compute empirical dispersion correction.

        Returns
        -------
        float
            Dispersion energy [Eh].

        Notes
        -----
        DISPERSION CORRECTION ENERGY
            Disp always set. Overridden in SCF finalization, but that only changes for "-3C" methods.
        self.fctldash + DISPERSION CORRECTION ENERGY
            Set if `fctldash` nonempty.

        """
        if self.engine == 'dftd3':
            jobrec = intf_dftd3.run_dftd3_from_arrays(
                molrec=molecule.to_dict(np_out=False),
                name_hint=self.fctldash,
                level_hint=self.dashlevel,
                param_tweaks=self.dashparams,
                dashcoeff_supplement=self.dashcoeff_supplement,
                ptype='energy',
                verbose=1)

            dashd_part = float(jobrec['qcvars']['DISPERSION CORRECTION ENERGY'].data)
            for k, qca in jobrec['qcvars'].items():
                if not isinstance(qca.data, np.ndarray):
                    core.set_variable(k, qca.data)

            if self.fctldash in ['hf3c', 'pbeh3c']:
                gcp_part = gcp.run_gcp(molecule, self.fctldash, verbose=False, dertype=0)
                dashd_part += gcp_part

            return dashd_part
        else:
            ene = self.disp.compute_energy(molecule)
            core.set_variable('DISPERSION CORRECTION ENERGY', ene)
            if self.fctldash:
                core.set_variable('{} DISPERSION CORRECTION ENERGY'.format(self.fctldash), ene)
            return ene
Example #7
0
def print_sapt_hf_summary(data, name, short=False, delta_hf=False):

    ret = "   Partial %s Results, to compute Delta HF (dHF)\n" % name
    ret += "  " + "-" * 105 + "\n"

    # Elst
    ret += print_sapt_var("Electrostatics", data["Elst10,r"]) + "\n"
    ret += print_sapt_var("  Elst10,r", data["Elst10,r"]) + "\n"
    ret += "\n"
    core.set_variable("SAPT ELST ENERGY", data["Elst10,r"])

    # Exchange
    ret += print_sapt_var("Exchange", data["Exch10"]) + "\n"
    ret += print_sapt_var("  Exch10", data["Exch10"]) + "\n"
    ret += print_sapt_var("  Exch10(S^2)", data["Exch10(S^2)"]) + "\n"
    ret += "\n"
    core.set_variable("SAPT EXCH ENERGY", data["Exch10"])

    # Induction (no dHF)
    ind = data["Ind20,r"] + data["Exch-Ind20,r"]
    ind_ab = data["Ind20,r (A<-B)"] + data["Exch-Ind20,r (A<-B)"]
    ind_ba = data["Ind20,r (A->B)"] + data["Exch-Ind20,r (A->B)"]

    ret += print_sapt_var("Induction (no dHF)", ind) + "\n"
    ret += print_sapt_var("  Ind20,r", data["Ind20,r"]) + "\n"
    ret += print_sapt_var("  Exch-Ind20,r", data["Exch-Ind20,r"]) + "\n"
    ret += print_sapt_var("  Induction (A<-B) (no dHF)", ind_ab) + "\n"
    ret += print_sapt_var("  Induction (A->B) (no dHF)", ind_ba) + "\n"
    ret += "\n"
    core.set_variable("SAPT IND ENERGY", ind)

    if delta_hf:
        total_sapt = (data["Elst10,r"] + data["Exch10"] + ind)
        sapt_hf_delta = delta_hf - total_sapt

        core.set_variable("SAPT(DFT) Delta HF", sapt_hf_delta)

        ret += print_sapt_var(
            "Subtotal SAPT(HF)", total_sapt, start_spacer="    ") + "\n"
        ret += print_sapt_var("Total HF", delta_hf, start_spacer="    ") + "\n"
        ret += print_sapt_var("Delta HF", sapt_hf_delta,
                              start_spacer="    ") + "\n"

        ret += "  " + "-" * 105 + "\n"

        # Induction with dHF and scaling
        Sdelta = (ind + sapt_hf_delta) / ind

        ret += "   Partial %s Results, alternate SAPT0-like display\n" % name
        ret += "  " + "-" * 105 + "\n"

        ret += print_sapt_var("Induction", ind + sapt_hf_delta) + "\n"
        ret += print_sapt_var("  Ind20,r", data["Ind20,r"]) + "\n"
        ret += print_sapt_var("  Exch-Ind20,r", data["Exch-Ind20,r"]) + "\n"
        ret += print_sapt_var("  delta HF,r (2)", sapt_hf_delta) + "\n"
        ret += print_sapt_var("  Induction (A<-B)", ind_ab * Sdelta) + "\n"
        ret += print_sapt_var("  Induction (A->B)", ind_ba * Sdelta) + "\n"

        ret += "  " + "-" * 105 + "\n"
        return ret

    else:
        # Dispersion
        disp = data["Disp20"] + data["Exch-Disp20,u"]
        ret += print_sapt_var("Dispersion", disp) + "\n"
        ret += print_sapt_var("  Disp20", data["Disp20,u"]) + "\n"
        ret += print_sapt_var("  Exch-Disp20", data["Exch-Disp20,u"]) + "\n"
        ret += "\n"
        core.set_variable("SAPT DISP ENERGY", disp)

        # Total energy
        total = data["Elst10,r"] + data["Exch10"] + ind + disp
        ret += print_sapt_var("Total %-15s" % name, total,
                              start_spacer="   ") + "\n"
        core.set_variable("SAPT0 TOTAL ENERGY", total)
        core.set_variable("SAPT TOTAL ENERGY", total)
        core.set_variable("CURRENT ENERGY", total)

        ret += "  " + "-" * 105 + "\n"
        return ret
Example #8
0
def run_gaussian_2(name, **kwargs):

    # throw an exception for open-shells
    if (core.get_option('SCF','REFERENCE') != 'RHF' ):
        raise ValidationError("""g2 computations require "reference rhf".""")

    # stash user options:
    optstash = p4util.OptionsState(
        ['FNOCC','COMPUTE_TRIPLES'],
        ['FNOCC','COMPUTE_MP4_TRIPLES'],
        ['FREEZE_CORE'],
        ['MP2_TYPE'],
        ['SCF','SCF_TYPE'])

    # override default scf_type
    core.set_local_option('SCF','SCF_TYPE','PK')

    # optimize geometry at scf level
    core.clean()
    core.set_global_option('BASIS',"6-31G(D)")
    driver.optimize('scf')
    core.clean()

    # scf frequencies for zpe
    # NOTE This line should not be needed, but without it there's a seg fault
    scf_e, ref = driver.frequency('scf', return_wfn=True)

    # thermodynamic properties
    du = core.get_variable('INTERNAL ENERGY CORRECTION')
    dh = core.get_variable('ENTHALPY CORRECTION')
    dg = core.get_variable('GIBBS FREE ENERGY CORRECTION')

    freqs   = ref.frequencies()
    nfreq   = freqs.dim(0)
    freqsum = 0.0
    for i in range(0, nfreq):
        freqsum += freqs.get(i)
    zpe = freqsum / constants.hartree2wavenumbers * 0.8929 * 0.5
    core.clean()

    # optimize geometry at mp2 (no frozen core) level
    # note: freeze_core isn't an option in MP2
    core.set_global_option('FREEZE_CORE',"FALSE")
    core.set_global_option('MP2_TYPE', 'CONV')
    driver.optimize('mp2')
    core.clean()

    # qcisd(t)
    core.set_local_option('FNOCC','COMPUTE_MP4_TRIPLES',"TRUE")
    core.set_global_option('FREEZE_CORE',"TRUE")
    core.set_global_option('BASIS',"6-311G(D_P)")
    ref = driver.proc.run_fnocc('qcisd(t)', return_wfn=True, **kwargs)

    # HLC: high-level correction based on number of valence electrons
    nirrep = ref.nirrep()
    frzcpi = ref.frzcpi()
    nfzc = 0
    for i in range (0,nirrep):
        nfzc += frzcpi[i]
    nalpha = ref.nalpha() - nfzc
    nbeta  = ref.nbeta() - nfzc
    # hlc of gaussian-2
    hlc = -0.00481 * nalpha -0.00019 * nbeta
    # hlc of gaussian-1
    hlc1 = -0.00614 * nalpha

    eqci_6311gdp = core.get_variable("QCISD(T) TOTAL ENERGY")
    emp4_6311gd  = core.get_variable("MP4 TOTAL ENERGY")
    emp2_6311gd  = core.get_variable("MP2 TOTAL ENERGY")
    core.clean()

    # correction for diffuse functions
    core.set_global_option('BASIS',"6-311+G(D_P)")
    driver.energy('mp4')
    emp4_6311pg_dp = core.get_variable("MP4 TOTAL ENERGY")
    emp2_6311pg_dp = core.get_variable("MP2 TOTAL ENERGY")
    core.clean()

    # correction for polarization functions
    core.set_global_option('BASIS',"6-311G(2DF_P)")
    driver.energy('mp4')
    emp4_6311g2dfp = core.get_variable("MP4 TOTAL ENERGY")
    emp2_6311g2dfp = core.get_variable("MP2 TOTAL ENERGY")
    core.clean()

    # big basis mp2
    core.set_global_option('BASIS',"6-311+G(3DF_2P)")
    #run_fnocc('_mp2',**kwargs)
    driver.energy('mp2')
    emp2_big = core.get_variable("MP2 TOTAL ENERGY")
    core.clean()
    eqci       = eqci_6311gdp
    e_delta_g2 = emp2_big + emp2_6311gd - emp2_6311g2dfp - emp2_6311pg_dp
    e_plus     = emp4_6311pg_dp - emp4_6311gd
    e_2df      = emp4_6311g2dfp - emp4_6311gd

    eg2 = eqci + e_delta_g2 + e_plus + e_2df
    eg2_mp2_0k = eqci + (emp2_big - emp2_6311gd) + hlc + zpe

    core.print_out('\n')
    core.print_out('  ==>  G1/G2 Energy Components  <==\n')
    core.print_out('\n')
    core.print_out('        QCISD(T):            %20.12lf\n' % eqci)
    core.print_out('        E(Delta):            %20.12lf\n' % e_delta_g2)
    core.print_out('        E(2DF):              %20.12lf\n' % e_2df)
    core.print_out('        E(+):                %20.12lf\n' % e_plus)
    core.print_out('        E(G1 HLC):           %20.12lf\n' % hlc1)
    core.print_out('        E(G2 HLC):           %20.12lf\n' % hlc)
    core.print_out('        E(ZPE):              %20.12lf\n' % zpe)
    core.print_out('\n')
    core.print_out('  ==>  0 Kelvin Results  <==\n')
    core.print_out('\n')
    eg2_0k = eg2 + zpe + hlc
    core.print_out('        G1:                  %20.12lf\n' % (eqci + e_plus + e_2df + hlc1 + zpe))
    core.print_out('        G2(MP2):             %20.12lf\n' % eg2_mp2_0k)
    core.print_out('        G2:                  %20.12lf\n' % eg2_0k)

    core.set_variable("G1 TOTAL ENERGY",eqci + e_plus + e_2df + hlc1 + zpe)
    core.set_variable("G2 TOTAL ENERGY",eg2_0k)
    core.set_variable("G2(MP2) TOTAL ENERGY",eg2_mp2_0k)

    core.print_out('\n')
    T = core.get_global_option('T')
    core.print_out('  ==>  %3.0lf Kelvin Results  <==\n'% T)
    core.print_out('\n')

    internal_energy = eg2_mp2_0k + du - zpe / 0.8929
    enthalpy        = eg2_mp2_0k + dh - zpe / 0.8929
    gibbs           = eg2_mp2_0k + dg - zpe / 0.8929

    core.print_out('        G2(MP2) energy:      %20.12lf\n' % internal_energy )
    core.print_out('        G2(MP2) enthalpy:    %20.12lf\n' % enthalpy)
    core.print_out('        G2(MP2) free energy: %20.12lf\n' % gibbs)
    core.print_out('\n')

    core.set_variable("G2(MP2) INTERNAL ENERGY",internal_energy)
    core.set_variable("G2(MP2) ENTHALPY",enthalpy)
    core.set_variable("G2(MP2) FREE ENERGY",gibbs)

    internal_energy = eg2_0k + du - zpe / 0.8929
    enthalpy        = eg2_0k + dh - zpe / 0.8929
    gibbs           = eg2_0k + dg - zpe / 0.8929

    core.print_out('        G2 energy:           %20.12lf\n' % internal_energy )
    core.print_out('        G2 enthalpy:         %20.12lf\n' % enthalpy)
    core.print_out('        G2 free energy:      %20.12lf\n' % gibbs)

    core.set_variable("CURRENT ENERGY",eg2_0k)

    core.set_variable("G2 INTERNAL ENERGY",internal_energy)
    core.set_variable("G2 ENTHALPY",enthalpy)
    core.set_variable("G2 FREE ENERGY",gibbs)

    core.clean()

    optstash.restore()

    # return 0K g2 results
    return eg2_0k
Example #9
0
def nbody_gufunc(func, method_string, **kwargs):
    """
    Computes the nbody interaction energy, gradient, or Hessian depending on input.
    This is a generalized univeral function for computing interaction quantities.

    :returns: *return type of func* |w--w| The interaction data.

    :returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| interaction data and wavefunction with energy/gradient/hessian set appropriately when **return_wfn** specified.

    :type func: function
    :param func: ``energy`` || etc.

        Python function that accepts method_string and a molecule. Returns a
        energy, gradient, or Hessian as requested.

    :type method_string: string
    :param method_string: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc.

        First argument, lowercase and usually unlabeled. Indicates the computational
        method to be passed to func.

    :type molecule: :ref:`molecule <op_py_molecule>`
    :param molecule: ``h2o`` || etc.

        The target molecule, if not the last molecule defined.

    :type return_wfn: :ref:`boolean <op_py_boolean>`
    :param return_wfn: ``'on'`` || |dl| ``'off'`` |dr|

        Indicate to additionally return the :py:class:`~psi4.core.Wavefunction`
        calculation result as the second element of a tuple.

    :type bsse_type: string or list
    :param bsse_type: ``'cp'`` || ``['nocp', 'vmfc']`` || |dl| ``None`` |dr| || etc.

        Type of BSSE correction to compute: CP, NoCP, or VMFC. The first in this
        list is returned by this function. By default, this function is not called.

    :type max_nbody: int
    :param max_nbody: ``3`` || etc.

        Maximum n-body to compute, cannot exceed the number of fragments in the moleucle.

    :type ptype: string
    :param ptype: ``'energy'`` || ``'gradient'`` || ``'hessian'``

        Type of the procedure passed in.

    :type return_total_data: :ref:`boolean <op_py_boolean>`
    :param return_total_data: ``'on'`` || |dl| ``'off'`` |dr|

        If True returns the total data (energy/gradient/etc) of the system,
        otherwise returns interaction data.
    """

    ### ==> Parse some kwargs <==
    kwargs = p4util.kwargs_lower(kwargs)
    return_wfn = kwargs.pop('return_wfn', False)
    ptype = kwargs.pop('ptype', None)
    return_total_data = kwargs.pop('return_total_data', False)
    molecule = kwargs.pop('molecule', core.get_active_molecule())
    molecule.update_geometry()
    core.clean_variables()

    if ptype not in ['energy', 'gradient', 'hessian']:
        raise ValidationError("""N-Body driver: The ptype '%s' is not regonized.""" % ptype)

    # Figure out BSSE types
    do_cp = False
    do_nocp = False
    do_vmfc = False
    return_method = False

    # Must be passed bsse_type
    bsse_type_list = kwargs.pop('bsse_type')
    if bsse_type_list is None:
        raise ValidationError("N-Body GUFunc: Must pass a bsse_type")
    if not isinstance(bsse_type_list, list):
        bsse_type_list = [bsse_type_list]

    for num, btype in enumerate(bsse_type_list):
        if btype.lower() == 'cp':
            do_cp = True
            if (num == 0): return_method = 'cp'
        elif btype.lower() == 'nocp':
            do_nocp = True
            if (num == 0): return_method = 'nocp'
        elif btype.lower() == 'vmfc':
            do_vmfc = True
            if (num == 0): return_method = 'vmfc'
        else:
            raise ValidationError("N-Body GUFunc: bsse_type '%s' is not recognized" % btype.lower())

    max_nbody = kwargs.get('max_nbody', -1)
    max_frag = molecule.nfragments()
    if max_nbody == -1:
        max_nbody = molecule.nfragments()
    else:
        max_nbody = min(max_nbody, max_frag)

    # What levels do we need?
    nbody_range = range(1, max_nbody + 1)
    fragment_range = range(1, max_frag + 1)

    # Flip this off for now, needs more testing
    # If we are doing CP lets save them integrals
    #if 'cp' in bsse_type_list and (len(bsse_type_list) == 1):
    #    # Set to save RI integrals for repeated full-basis computations
    #    ri_ints_io = core.get_global_option('DF_INTS_IO')

    #    # inquire if above at all applies to dfmp2 or just scf
    #    core.set_global_option('DF_INTS_IO', 'SAVE')
    #    psioh = core.IOManager.shared_object()
    #    psioh.set_specific_retention(97, True)


    bsse_str = bsse_type_list[0]
    if len(bsse_type_list) >1:
        bsse_str =  str(bsse_type_list)
    core.print_out("\n\n")
    core.print_out("   ===> N-Body Interaction Abacus <===\n")
    core.print_out("        BSSE Treatment:                     %s\n" % bsse_str)


    cp_compute_list = {x:set() for x in nbody_range}
    nocp_compute_list = {x:set() for x in nbody_range}
    vmfc_compute_list = {x:set() for x in nbody_range}
    vmfc_level_list = {x:set() for x in nbody_range} # Need to sum something slightly different

    # Build up compute sets
    if do_cp:
        # Everything is in dimer basis
        basis_tuple = tuple(fragment_range)
        for nbody in nbody_range:
            for x in it.combinations(fragment_range, nbody):
                cp_compute_list[nbody].add( (x, basis_tuple) )

    if do_nocp:
        # Everything in monomer basis
        for nbody in nbody_range:
            for x in it.combinations(fragment_range, nbody):
                nocp_compute_list[nbody].add( (x, x) )

    if do_vmfc:
        # Like a CP for all combinations of pairs or greater
        for nbody in nbody_range:
            for cp_combos in it.combinations(fragment_range, nbody):
                basis_tuple = tuple(cp_combos)
                for interior_nbody in nbody_range:
                    for x in it.combinations(cp_combos, interior_nbody):
                        combo_tuple = (x, basis_tuple)
                        vmfc_compute_list[interior_nbody].add( combo_tuple )
                        vmfc_level_list[len(basis_tuple)].add( combo_tuple )

    # Build a comprehensive compute_range
    compute_list = {x:set() for x in nbody_range}
    for n in nbody_range:
        compute_list[n] |= cp_compute_list[n]
        compute_list[n] |= nocp_compute_list[n]
        compute_list[n] |= vmfc_compute_list[n]
        core.print_out("        Number of %d-body computations:     %d\n" % (n, len(compute_list[n])))


    # Build size and slices dictionaries
    fragment_size_dict = {frag: molecule.extract_subsets(frag).natom() for
                                           frag in range(1, max_frag+1)}

    start = 0
    fragment_slice_dict = {}
    for k, v in fragment_size_dict.items():
        fragment_slice_dict[k] = slice(start, start + v)
        start += v

    molecule_total_atoms = sum(fragment_size_dict.values())

    # Now compute the energies
    energies_dict = {}
    ptype_dict = {}
    for n in compute_list.keys():
        core.print_out("\n   ==> N-Body: Now computing %d-body complexes <==\n\n" % n)
        total = len(compute_list[n])
        for num, pair in enumerate(compute_list[n]):
            core.print_out("\n       N-Body: Computing complex (%d/%d) with fragments %s in the basis of fragments %s.\n\n" %
                                                                    (num + 1, total, str(pair[0]), str(pair[1])))
            ghost = list(set(pair[1]) - set(pair[0]))

            current_mol = molecule.extract_subsets(list(pair[0]), ghost)
            ptype_dict[pair] = func(method_string, molecule=current_mol, **kwargs)
            energies_dict[pair] = core.get_variable("CURRENT ENERGY")
            core.print_out("\n       N-Body: Complex Energy (fragments = %s, basis = %s: %20.14f)\n" %
                                                                (str(pair[0]), str(pair[1]), energies_dict[pair]))

            # Flip this off for now, needs more testing
            #if 'cp' in bsse_type_list and (len(bsse_type_list) == 1):
            #    core.set_global_option('DF_INTS_IO', 'LOAD')

            core.clean()

    # Final dictionaries
    cp_energy_by_level   = {n: 0.0 for n in nbody_range}
    nocp_energy_by_level = {n: 0.0 for n in nbody_range}

    cp_energy_body_dict =   {n: 0.0 for n in nbody_range}
    nocp_energy_body_dict = {n: 0.0 for n in nbody_range}
    vmfc_energy_body_dict = {n: 0.0 for n in nbody_range}

    # Build out ptype dictionaries if needed
    if ptype != 'energy':
        if ptype == 'gradient':
            arr_shape = (molecule_total_atoms, 3)
        elif ptype == 'hessian':
            arr_shape = (molecule_total_atoms * 3, molecule_total_atoms * 3)
        else:
            raise KeyError("N-Body: ptype '%s' not recognized" % ptype)

        cp_ptype_by_level   =  {n: np.zeros(arr_shape) for n in nbody_range}
        nocp_ptype_by_level =  {n: np.zeros(arr_shape) for n in nbody_range}
        vmfc_ptype_by_level = {n: np.zeros(arr_shape) for n in nbody_range}

        cp_ptype_body_dict   = {n: np.zeros(arr_shape) for n in nbody_range}
        nocp_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range}
        vmfc_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range}
    else:
        cp_ptype_by_level, cp_ptype_body_dict = None, None
        nocp_ptype_by_level, nocp_ptype_body_dict = None, None
        vmfc_ptype_body_dict = None


    # Sum up all of the levels
    for n in nbody_range:

        # Energy
        cp_energy_by_level[n]   = sum(energies_dict[v] for v in cp_compute_list[n])
        nocp_energy_by_level[n] = sum(energies_dict[v] for v in nocp_compute_list[n])

        # Special vmfc case
        if n > 1:
            vmfc_energy_body_dict[n] = vmfc_energy_body_dict[n - 1]
        for tup in vmfc_level_list[n]:
            vmfc_energy_body_dict[n] += ((-1) ** (n - len(tup[0]))) * energies_dict[tup]


        # Do ptype
        if ptype != 'energy':
            _sum_cluster_ptype_data(ptype, ptype_dict, cp_compute_list[n],
                                      fragment_slice_dict, fragment_size_dict,
                                      cp_ptype_by_level[n])
            _sum_cluster_ptype_data(ptype, ptype_dict, nocp_compute_list[n],
                                      fragment_slice_dict, fragment_size_dict,
                                      nocp_ptype_by_level[n])
            _sum_cluster_ptype_data(ptype, ptype_dict, vmfc_level_list[n],
                                      fragment_slice_dict, fragment_size_dict,
                                      vmfc_ptype_by_level[n], vmfc=True)

    # Compute cp energy and ptype
    if do_cp:
        for n in nbody_range:
            if n == max_frag:
                cp_energy_body_dict[n] = cp_energy_by_level[n]
                if ptype != 'energy':
                    cp_ptype_body_dict[n][:] = cp_ptype_by_level[n]
                continue

            for k in range(1, n + 1):
                take_nk =  nCr(max_frag - k - 1, n - k)
                sign = ((-1) ** (n - k))
                value = cp_energy_by_level[k]
                cp_energy_body_dict[n] += take_nk * sign * value

                if ptype != 'energy':
                    value = cp_ptype_by_level[k]
                    cp_ptype_body_dict[n] += take_nk * sign * value

        _print_nbody_energy(cp_energy_body_dict, "Counterpoise Corrected (CP)")
        cp_interaction_energy = cp_energy_body_dict[max_nbody] - cp_energy_body_dict[1]
        core.set_variable('Counterpoise Corrected Total Energy', cp_energy_body_dict[max_nbody])
        core.set_variable('Counterpoise Corrected Interaction Energy', cp_interaction_energy)

        for n in nbody_range[1:]:
            var_key = 'CP-CORRECTED %d-BODY INTERACTION ENERGY' % n
            core.set_variable(var_key, cp_energy_body_dict[n] - cp_energy_body_dict[1])

    # Compute nocp energy and ptype
    if do_nocp:
        for n in nbody_range:
            if n == max_frag:
                nocp_energy_body_dict[n] = nocp_energy_by_level[n]
                if ptype != 'energy':
                    nocp_ptype_body_dict[n][:] = nocp_ptype_by_level[n]
                continue

            for k in range(1, n + 1):
                take_nk =  nCr(max_frag - k - 1, n - k)
                sign = ((-1) ** (n - k))
                value = nocp_energy_by_level[k]
                nocp_energy_body_dict[n] += take_nk * sign * value

                if ptype != 'energy':
                    value = nocp_ptype_by_level[k]
                    nocp_ptype_body_dict[n] += take_nk * sign * value

        _print_nbody_energy(nocp_energy_body_dict, "Non-Counterpoise Corrected (NoCP)")
        nocp_interaction_energy = nocp_energy_body_dict[max_nbody] - nocp_energy_body_dict[1]
        core.set_variable('Non-Counterpoise Corrected Total Energy', nocp_energy_body_dict[max_nbody])
        core.set_variable('Non-Counterpoise Corrected Interaction Energy', nocp_interaction_energy)

        for n in nbody_range[1:]:
            var_key = 'NOCP-CORRECTED %d-BODY INTERACTION ENERGY' % n
            core.set_variable(var_key, nocp_energy_body_dict[n] - nocp_energy_body_dict[1])


    # Compute vmfc energy and ptype
    if do_vmfc:
        _print_nbody_energy(vmfc_energy_body_dict, "Valiron-Mayer Function Couterpoise (VMFC)")
        vmfc_interaction_energy = vmfc_energy_body_dict[max_nbody] - vmfc_energy_body_dict[1]
        core.set_variable('Valiron-Mayer Function Couterpoise Total Energy', vmfc_energy_body_dict[max_nbody])
        core.set_variable('Valiron-Mayer Function Couterpoise Interaction Energy', vmfc_interaction_energy)

        for n in nbody_range[1:]:
            var_key = 'VMFC-CORRECTED %d-BODY INTERACTION ENERGY' % n
            core.set_variable(var_key, vmfc_energy_body_dict[n] - vmfc_energy_body_dict[1])

    if return_method == 'cp':
        ptype_body_dict = cp_ptype_body_dict
        energy_body_dict = cp_energy_body_dict
    elif return_method == 'nocp':
        ptype_body_dict = nocp_ptype_body_dict
        energy_body_dict = nocp_energy_body_dict
    elif return_method == 'vmfc':
        ptype_body_dict = vmfc_ptype_body_dict
        energy_body_dict = vmfc_energy_body_dict
    else:
        raise ValidationError("N-Body Wrapper: Invalid return type. Should never be here, please post this error on github.")


    # Figure out and build return types
    if return_total_data:
        ret_energy = energy_body_dict[max_nbody]
    else:
        ret_energy = energy_body_dict[max_nbody]
        ret_energy -= energy_body_dict[1]


    if ptype != 'energy':
        if return_total_data:
            np_final_ptype = ptype_body_dict[max_nbody].copy()
        else:
            np_final_ptype = ptype_body_dict[max_nbody].copy()
            np_final_ptype -= ptype_body_dict[1]

            ret_ptype = core.Matrix.from_array(np_final_ptype)
    else:
        ret_ptype = ret_energy

    # Build and set a wavefunction
    wfn = core.Wavefunction.build(molecule, 'sto-3g')
    wfn.nbody_energy = energies_dict
    wfn.nbody_ptype = ptype_dict
    wfn.nbody_body_energy = energy_body_dict
    wfn.nbody_body_ptype = ptype_body_dict

    if ptype == 'gradient':
        wfn.set_gradient(ret_ptype)
    elif ptype == 'hessian':
        wfn.set_hessian(ret_ptype)

    core.set_variable("CURRENT ENERGY", ret_energy)

    if return_wfn:
        return (ret_ptype, wfn)
    else:
        return ret_ptype
Example #10
0
def run_sapt_dft(name, **kwargs):
    optstash = p4util.OptionsState(['SCF_TYPE'], ['SCF', 'REFERENCE'], ['SCF', 'DFT_GRAC_SHIFT'], ['SCF', 'SAVE_JK'])

    core.tstart()
    # Alter default algorithm
    if not core.has_global_option_changed('SCF_TYPE'):
        core.set_global_option('SCF_TYPE', 'DF')

    core.prepare_options_for_module("SAPT")

    # Get the molecule of interest
    ref_wfn = kwargs.get('ref_wfn', None)
    if ref_wfn is None:
        sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
    else:
        core.print_out('Warning! SAPT argument "ref_wfn" is only able to use molecule information.')
        sapt_dimer = ref_wfn.molecule()

    sapt_dimer, monomerA, monomerB = proc_util.prepare_sapt_molecule(sapt_dimer, "dimer")

    # Grab overall settings
    mon_a_shift = core.get_option("SAPT", "SAPT_DFT_GRAC_SHIFT_A")
    mon_b_shift = core.get_option("SAPT", "SAPT_DFT_GRAC_SHIFT_B")
    do_delta_hf = core.get_option("SAPT", "SAPT_DFT_DO_DHF")
    sapt_dft_functional = core.get_option("SAPT", "SAPT_DFT_FUNCTIONAL")

    # Print out the title and some information
    core.print_out("\n")
    core.print_out("         ---------------------------------------------------------\n")
    core.print_out("         " + "SAPT(DFT) Procedure".center(58) + "\n")
    core.print_out("\n")
    core.print_out("         " + "by Daniel G. A. Smith".center(58) + "\n")
    core.print_out("         ---------------------------------------------------------\n")
    core.print_out("\n")

    core.print_out("  !!!  WARNING:  SAPT(DFT) capability is in beta. Please use with caution. !!!\n\n")

    core.print_out("  ==> Algorithm <==\n\n")
    core.print_out("   SAPT DFT Functional     %12s\n" % str(sapt_dft_functional))
    core.print_out("   Monomer A GRAC Shift    %12.6f\n" % mon_a_shift)
    core.print_out("   Monomer B GRAC Shift    %12.6f\n" % mon_b_shift)
    core.print_out("   Delta HF                %12s\n" % ("True" if do_delta_hf else "False"))
    core.print_out("   JK Algorithm            %12s\n" % core.get_global_option("SCF_TYPE"))
    core.print_out("\n")
    core.print_out("   Required computations:\n")
    if (do_delta_hf):
        core.print_out("     HF  (Dimer)\n")
        core.print_out("     HF  (Monomer A)\n")
        core.print_out("     HF  (Monomer B)\n")
    core.print_out("     DFT (Monomer A)\n")
    core.print_out("     DFT (Monomer B)\n")
    core.print_out("\n")

    if (sapt_dft_functional != "HF") and ((mon_a_shift == 0.0) or (mon_b_shift == 0.0)):
        raise ValidationError('SAPT(DFT): must set both "SAPT_DFT_GRAC_SHIFT_A" and "B".')

    if (core.get_option('SCF', 'REFERENCE') != 'RHF'):
        raise ValidationError('SAPT(DFT) currently only supports restricted references.')

    core.IO.set_default_namespace('dimer')
    data = {}

    if (core.get_global_option('SCF_TYPE') == 'DF'):
        # core.set_global_option('DF_INTS_IO', 'LOAD')
        core.set_global_option('DF_INTS_IO', 'SAVE')

    # # Compute dimer wavefunction
    hf_wfn_dimer = None
    if do_delta_hf:
        if (core.get_global_option('SCF_TYPE') == 'DF'):
            core.set_global_option('DF_INTS_IO', 'SAVE')

        core.timer_on("SAPT(DFT): Dimer SCF")
        hf_data = {}
        hf_wfn_dimer = scf_helper("SCF", molecule=sapt_dimer, banner="SAPT(DFT): delta HF Dimer", **kwargs)
        hf_data["HF DIMER"] = core.variable("CURRENT ENERGY")
        core.timer_off("SAPT(DFT): Dimer SCF")

        core.timer_on("SAPT(DFT): Monomer A SCF")
        if (core.get_global_option('SCF_TYPE') == 'DF'):
            core.IO.change_file_namespace(97, 'dimer', 'monomerA')

        hf_wfn_A = scf_helper("SCF", molecule=monomerA, banner="SAPT(DFT): delta HF Monomer A", **kwargs)
        hf_data["HF MONOMER A"] = core.variable("CURRENT ENERGY")
        core.timer_off("SAPT(DFT): Monomer A SCF")

        core.timer_on("SAPT(DFT): Monomer B SCF")
        core.set_global_option("SAVE_JK", True)
        if (core.get_global_option('SCF_TYPE') == 'DF'):
            core.IO.change_file_namespace(97, 'monomerA', 'monomerB')

        hf_wfn_B = scf_helper("SCF", molecule=monomerB, banner="SAPT(DFT): delta HF Monomer B", **kwargs)
        hf_data["HF MONOMER B"] = core.variable("CURRENT ENERGY")
        core.set_global_option("SAVE_JK", False)
        core.timer_off("SAPT(DFT): Monomer B SCF")

        # Grab JK object and set to A (so we do not save many JK objects)
        sapt_jk = hf_wfn_B.jk()
        hf_wfn_A.set_jk(sapt_jk)
        core.set_global_option("SAVE_JK", False)

        # Move it back to monomer A
        if (core.get_global_option('SCF_TYPE') == 'DF'):
            core.IO.change_file_namespace(97, 'monomerB', 'dimer')

        core.print_out("\n")
        core.print_out("         ---------------------------------------------------------\n")
        core.print_out("         " + "SAPT(DFT): delta HF Segment".center(58) + "\n")
        core.print_out("\n")
        core.print_out("         " + "by Daniel G. A. Smith and Rob Parrish".center(58) + "\n")
        core.print_out("         ---------------------------------------------------------\n")
        core.print_out("\n")

        # Build cache
        hf_cache = sapt_jk_terms.build_sapt_jk_cache(hf_wfn_A, hf_wfn_B, sapt_jk, True)

        # Electrostatics
        core.timer_on("SAPT(DFT):SAPT:elst")
        elst = sapt_jk_terms.electrostatics(hf_cache, True)
        hf_data.update(elst)
        core.timer_off("SAPT(DFT):SAPT:elst")

        # Exchange
        core.timer_on("SAPT(DFT):SAPT:exch")
        exch = sapt_jk_terms.exchange(hf_cache, sapt_jk, True)
        hf_data.update(exch)
        core.timer_off("SAPT(DFT):SAPT:exch")

        # Induction
        core.timer_on("SAPT(DFT):SAPT:ind")
        ind = sapt_jk_terms.induction(hf_cache,
                                      sapt_jk,
                                      True,
                                      maxiter=core.get_option("SAPT", "MAXITER"),
                                      conv=core.get_option("SAPT", "D_CONVERGENCE"),
                                      Sinf=core.get_option("SAPT", "DO_IND_EXCH_SINF"))
        hf_data.update(ind)
        core.timer_off("SAPT(DFT):SAPT:ind")

        dhf_value = hf_data["HF DIMER"] - hf_data["HF MONOMER A"] - hf_data["HF MONOMER B"]

        core.print_out("\n")
        core.print_out(print_sapt_hf_summary(hf_data, "SAPT(HF)", delta_hf=dhf_value))

        data["Delta HF Correction"] = core.variable("SAPT(DFT) Delta HF")
        sapt_jk.finalize()

        del hf_wfn_A, hf_wfn_B, sapt_jk

    if hf_wfn_dimer is None:
        dimer_wfn = core.Wavefunction.build(sapt_dimer, core.get_global_option("BASIS"))
    else:
        dimer_wfn = hf_wfn_dimer

    # Set the primary functional
    core.set_local_option('SCF', 'REFERENCE', 'RKS')

    # Compute Monomer A wavefunction
    core.timer_on("SAPT(DFT): Monomer A DFT")
    if (core.get_global_option('SCF_TYPE') == 'DF'):
        core.IO.change_file_namespace(97, 'dimer', 'monomerA')

    if mon_a_shift:
        core.set_global_option("DFT_GRAC_SHIFT", mon_a_shift)

    core.IO.set_default_namespace('monomerA')
    wfn_A = scf_helper(sapt_dft_functional,
                       post_scf=False,
                       molecule=monomerA,
                       banner="SAPT(DFT): DFT Monomer A",
                       **kwargs)
    data["DFT MONOMERA"] = core.variable("CURRENT ENERGY")

    core.set_global_option("DFT_GRAC_SHIFT", 0.0)
    core.timer_off("SAPT(DFT): Monomer A DFT")

    # Compute Monomer B wavefunction
    core.timer_on("SAPT(DFT): Monomer B DFT")
    if (core.get_global_option('SCF_TYPE') == 'DF'):
        core.IO.change_file_namespace(97, 'monomerA', 'monomerB')

    if mon_b_shift:
        core.set_global_option("DFT_GRAC_SHIFT", mon_b_shift)

    core.set_global_option("SAVE_JK", True)
    core.IO.set_default_namespace('monomerB')
    wfn_B = scf_helper(sapt_dft_functional,
                       post_scf=False,
                       molecule=monomerB,
                       banner="SAPT(DFT): DFT Monomer B",
                       **kwargs)
    data["DFT MONOMERB"] = core.variable("CURRENT ENERGY")

    # Save JK object
    sapt_jk = wfn_B.jk()
    wfn_A.set_jk(sapt_jk)
    core.set_global_option("SAVE_JK", False)

    core.set_global_option("DFT_GRAC_SHIFT", 0.0)
    core.timer_off("SAPT(DFT): Monomer B DFT")

    # Write out header
    scf_alg = core.get_global_option("SCF_TYPE")
    sapt_dft_header(sapt_dft_functional, mon_a_shift, mon_b_shift, bool(do_delta_hf), scf_alg)

    # Call SAPT(DFT)
    sapt_jk = wfn_B.jk()
    sapt_dft(dimer_wfn, wfn_A, wfn_B, sapt_jk=sapt_jk, data=data, print_header=False)

    # Copy data back into globals
    for k, v in data.items():
        core.set_variable(k, v)

    core.tstop()

    optstash.restore()

    return dimer_wfn
Example #11
0
def scf_finalize_energy(self):
    """Performs stability analysis and calls back SCF with new guess
    if needed, Returns the SCF energy. This function should be called
    once orbitals are ready for energy/property computations, usually
    after iterations() is called.

    """

    # post-scf vv10 correlation
    if core.get_option(
            'SCF', "DFT_VV10_POSTSCF") and self.functional().vv10_b() > 0.0:
        self.functional().set_lock(False)
        self.functional().set_do_vv10(True)
        self.functional().set_lock(True)
        core.print_out(
            "  ==> Computing Non-Self-Consistent VV10 Energy Correction <==\n\n"
        )
        SCFE = 0.0
        self.form_V()
        SCFE += self.compute_E()
        self.set_energies("Total Energy", SCFE)

    # Perform wavefunction stability analysis before doing
    # anything on a wavefunction that may not be truly converged.
    if core.get_option('SCF', 'STABILITY_ANALYSIS') != "NONE":

        # Don't bother computing needed integrals if we can't do anything with them.
        if self.functional().needs_xc():
            raise ValidationError(
                "Stability analysis not yet supported for XC functionals.")

        # We need the integral file, make sure it is written and
        # compute it if needed
        if core.get_option('SCF', 'REFERENCE') != "UHF":
            #psio = core.IO.shared_object()
            #psio.open(constants.PSIF_SO_TEI, 1)  # PSIO_OPEN_OLD
            #try:
            #    psio.tocscan(constants.PSIF_SO_TEI, "IWL Buffers")
            #except TypeError:
            #    # "IWL Buffers" actually found but psio_tocentry can't be returned to Py
            #    psio.close(constants.PSIF_SO_TEI, 1)
            #else:
            #    # tocscan returned None
            #    psio.close(constants.PSIF_SO_TEI, 1)

            # logic above foiled by psio_tocentry not returning None<--nullptr in pb11 2.2.1
            #   so forcibly recomputing for now until stability revamp
            core.print_out("    SO Integrals not on disk. Computing...")
            mints = core.MintsHelper(self.basisset())
            #next 2 lines fix a bug that prohibits relativistic stability analysis

            mints.integrals()
            core.print_out("done.\n")

            # Q: Not worth exporting all the layers of psio, right?

        follow = self.stability_analysis()

        while follow and self.attempt_number_ <= core.get_option(
                'SCF', 'MAX_ATTEMPTS'):
            self.attempt_number_ += 1
            core.print_out(
                "    Running SCF again with the rotated orbitals.\n")

            if self.initialized_diis_manager_:
                self.diis_manager().reset_subspace()
            # reading the rotated orbitals in before starting iterations
            self.form_D()
            self.set_energies("Total Energy", self.compute_initial_E())
            self.iterations()
            follow = self.stability_analysis()

        if follow and self.attempt_number_ > core.get_option(
                'SCF', 'MAX_ATTEMPTS'):
            core.print_out(
                "    There's still a negative eigenvalue. Try modifying FOLLOW_STEP_SCALE\n"
            )
            core.print_out(
                "    or increasing MAX_ATTEMPTS (not available for PK integrals).\n"
            )

    # At this point, we are not doing any more SCF cycles
    #   and we can compute and print final quantities.

    if hasattr(self.molecule(), 'EFP'):
        efpobj = self.molecule().EFP

        efpobj.compute()  # do_gradient=do_gradient)
        efpene = efpobj.get_energy(label='psi')
        efp_wfn_independent_energy = efpene['total'] - efpene['ind']
        self.set_energies("EFP", efpene['total'])

        SCFE = self.get_energies("Total Energy")
        SCFE += efp_wfn_independent_energy
        self.set_energies("Total Energy", SCFE)
        core.print_out(efpobj.energy_summary(scfefp=SCFE, label='psi'))

        self.set_variable(
            'EFP ELST ENERGY',
            efpene['electrostatic'] + efpene['charge_penetration'] +
            efpene['electrostatic_point_charges'])
        self.set_variable('EFP IND ENERGY', efpene['polarization'])
        self.set_variable('EFP DISP ENERGY', efpene['dispersion'])
        self.set_variable('EFP EXCH ENERGY', efpene['exchange_repulsion'])
        self.set_variable('EFP TOTAL ENERGY', efpene['total'])
        self.set_variable('CURRENT ENERGY', efpene['total'])

    core.print_out("\n  ==> Post-Iterations <==\n\n")

    if self.V_potential():
        quad = self.V_potential().quadrature_values()
        rho_a = quad['RHO_A'] / 2 if self.same_a_b_dens() else quad['RHO_A']
        rho_b = quad['RHO_B'] / 2 if self.same_a_b_dens() else quad['RHO_B']
        rho_ab = (rho_a + rho_b)
        self.set_variable("GRID ELECTRONS TOTAL", rho_ab)
        self.set_variable("GRID ELECTRONS ALPHA", rho_a)
        self.set_variable("GRID ELECTRONS BETA", rho_b)
        dev_a = rho_a - self.nalpha()
        dev_b = rho_b - self.nbeta()
        core.print_out(f"   Electrons on quadrature grid:\n")
        if self.same_a_b_dens():
            core.print_out(
                f"      Ntotal   = {rho_ab:15.10f} ; deviation = {dev_b+dev_a:.3e} \n\n"
            )
        else:
            core.print_out(
                f"      Nalpha   = {rho_a:15.10f} ; deviation = {dev_a:.3e}\n")
            core.print_out(
                f"      Nbeta    = {rho_b:15.10f} ; deviation = {dev_b:.3e}\n")
            core.print_out(
                f"      Ntotal   = {rho_ab:15.10f} ; deviation = {dev_b+dev_a:.3e} \n\n"
            )
        if ((dev_b + dev_a) > 0.1):
            core.print_out(
                "   WARNING: large deviation in the electron count on grid detected. Check grid size!"
            )
    self.check_phases()
    self.compute_spin_contamination()
    self.frac_renormalize()
    reference = core.get_option("SCF", "REFERENCE")

    energy = self.get_energies("Total Energy")

    #    fail_on_maxiter = core.get_option("SCF", "FAIL_ON_MAXITER")
    #    if converged or not fail_on_maxiter:
    #
    #        if print_lvl > 0:
    #            self.print_orbitals()
    #
    #        if converged:
    #            core.print_out("  Energy converged.\n\n")
    #        else:
    #            core.print_out("  Energy did not converge, but proceeding anyway.\n\n")

    if core.get_option('SCF', 'PRINT') > 0:
        self.print_orbitals()

    is_dfjk = core.get_global_option('SCF_TYPE').endswith('DF')
    core.print_out("  @%s%s Final Energy: %20.14f" %
                   ('DF-' if is_dfjk else '', reference, energy))
    # if (perturb_h_) {
    #     core.print_out(" with %f %f %f perturbation" %
    #                    (dipole_field_strength_[0], dipole_field_strength_[1], dipole_field_strength_[2]))
    # }
    core.print_out("\n\n")
    self.print_energies()

    self.clear_external_potentials()
    if core.get_option('SCF', 'PCM'):
        calc_type = core.PCM.CalcType.Total
        if core.get_option("PCM", "PCM_SCF_TYPE") == "SEPARATE":
            calc_type = core.PCM.CalcType.NucAndEle
        Dt = self.Da().clone()
        Dt.add(self.Db())
        _, Vpcm = self.get_PCM().compute_PCM_terms(Dt, calc_type)
        self.push_back_external_potential(Vpcm)
        # Set callback function for CPSCF
        self.set_external_cpscf_perturbation(
            "PCM", lambda pert_dm: self.get_PCM().compute_V(pert_dm))

    if core.get_option('SCF', 'PE'):
        Dt = self.Da().clone()
        Dt.add(self.Db())
        _, Vpe = self.pe_state.get_pe_contribution(Dt, elec_only=False)
        self.push_back_external_potential(Vpe)
        # Set callback function for CPSCF
        self.set_external_cpscf_perturbation(
            "PE", lambda pert_dm: self.pe_state.get_pe_contribution(
                pert_dm, elec_only=True)[1])

    # Properties
    #  Comments so that autodoc utility will find these PSI variables
    #  Process::environment.globals["SCF DIPOLE X"] =
    #  Process::environment.globals["SCF DIPOLE Y"] =
    #  Process::environment.globals["SCF DIPOLE Z"] =
    #  Process::environment.globals["SCF QUADRUPOLE XX"] =
    #  Process::environment.globals["SCF QUADRUPOLE XY"] =
    #  Process::environment.globals["SCF QUADRUPOLE XZ"] =
    #  Process::environment.globals["SCF QUADRUPOLE YY"] =
    #  Process::environment.globals["SCF QUADRUPOLE YZ"] =
    #  Process::environment.globals["SCF QUADRUPOLE ZZ"] =

    # Orbitals are always saved, in case an MO guess is requested later
    # save_orbitals()

    # Shove variables into global space
    for k, v in self.variables().items():
        core.set_variable(k, v)

    # TODO re-enable
    self.finalize()
    if self.V_potential():
        self.V_potential().clear_collocation_cache()

    core.print_out("\nComputation Completed\n")

    return energy
Example #12
0
def multi_level(func, **kwargs):
    """
    Use different levels of theory for different expansion levels
    See kwargs description in driver_nbody.nbody_gufunc

    :returns: *return type of func* |w--w| The data.

    :returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| data and wavefunction with energy/gradient/hessian set appropriately when **return_wfn** specified.

    """
    from psi4.driver.driver_nbody import nbody_gufunc
    from psi4.driver.driver_nbody import _print_nbody_energy

    ptype = kwargs['ptype']
    return_wfn = kwargs.get('return_wfn', False)
    kwargs['return_wfn'] = True
    levels = kwargs.pop('levels')
    for i in levels:
        if isinstance(i, str): levels[i.lower()] = levels.pop(i)
    supersystem = levels.pop('supersystem', False)
    molecule = kwargs.get('molecule', core.get_active_molecule())
    kwargs['bsse_type'] = [kwargs['bsse_type']] if isinstance(kwargs['bsse_type'], str) else kwargs['bsse_type']
    natoms = molecule.natom()

    # Initialize with zeros
    energy_result, gradient_result, hessian_result = 0, None, None
    energy_body_contribution = {b: {} for b in kwargs['bsse_type']}
    energy_body_dict = {b: {} for b in kwargs['bsse_type']}
    wfns = {}
    if ptype in ['gradient', 'hessian']:
        gradient_result = np.zeros((natoms, 3))
    if ptype == 'hessian':
        hessian_result = np.zeros((natoms * 3, natoms * 3))

    if kwargs.get('charge_method', False) and not kwargs.get('embedding_charges', False):
        kwargs['embedding_charges'] = compute_charges(kwargs['charge_method'],
                                      kwargs.get('charge_type', 'MULLIKEN_CHARGES').upper(), molecule)

    for n in sorted(levels)[::-1]:
        molecule.set_name('%i' %n)
        kwargs_copy = kwargs.copy()
        kwargs_copy['max_nbody'] = n
        energy_bsse_dict = {b: 0 for b in kwargs['bsse_type']}
        if isinstance(levels[n], str):
            # If a new level of theory is provided, compute contribution
            ret, wfn = nbody_gufunc(func, levels[n], **kwargs_copy)
            wfns[n] = wfn
        else:
            # For the n-body contribution, use available data from the higher order levels[n]-body
            wfn = wfns[levels[n]]

        for m in range(n - 1, n + 1):
            if m == 0: continue
            # Subtract the (n-1)-body contribution from the n-body contribution to get the n-body effect
            sign = (-1)**(1 - m // n)
            for b in kwargs['bsse_type']:
                energy_bsse_dict[b] += sign * wfn.variable('%i%s' % (m, b.lower()))
            if ptype in ['gradient', 'hessian']:
                gradient_result += sign * np.array(wfn.variable('GRADIENT ' + str(m)))
                # Keep 1-body contribution to compute interaction data
                if n == 1:
                    gradient1 = np.array(wfn.variable('GRADIENT ' + str(m)))
            if ptype == 'hessian':
                hessian_result += sign * np.array(wfn.variable('HESSIAN ' + str(m)))
                if n == 1:
                    hessian1 = np.array(wfn.variable('HESSIAN ' + str(m)))
        energy_result += energy_bsse_dict[kwargs['bsse_type'][0]]
        for b in kwargs['bsse_type']:
            energy_body_contribution[b][n] = energy_bsse_dict[b]

    if supersystem:
        # Super system recovers higher order effects at a lower level
        molecule.set_name('supersystem')
        kwargs_copy = kwargs.copy()
        kwargs_copy.pop('bsse_type')
        kwargs_copy.pop('ptype')
        ret, wfn_super = func(supersystem, **kwargs_copy)
        core.clean()
        kwargs_copy = kwargs.copy()
        kwargs_copy['bsse_type'] = 'nocp'
        kwargs_copy['max_nbody'] = max(levels)
        # Subtract lower order effects to avoid double counting
        ret, wfn = nbody_gufunc(func, supersystem, **kwargs_copy)
        energy_result += wfn_super.energy() - wfn.variable(str(max(levels)))
        for b in kwargs['bsse_type']:
            energy_body_contribution[b][molecule.nfragments()] = wfn_super.energy() - wfn.variable(
                str(max(levels)))

        if ptype in ['gradient', 'hessian']:
            gradient_result += np.array(wfn_super.gradient()) - np.array(wfn.variable('GRADIENT ' + str(max(levels))))
        if ptype == 'hessian':
            hessian_result += np.array(wfn_super.hessian()) - np.array(wfn.variable('HESSIAN ' + str(max(levels))))
        levels['supersystem'] = supersystem

    for b in kwargs['bsse_type']:
        for n in energy_body_contribution[b]:
            energy_body_dict[b][n] = sum(
                [energy_body_contribution[b][i] for i in range(1, n + 1) if i in energy_body_contribution[b]])

    is_embedded = kwargs.get('embedding_charges', False) or kwargs.get('charge_method', False)
    for b in kwargs['bsse_type']:
        _print_nbody_energy(energy_body_dict[b], '%s-corrected multilevel many-body expansion' % b.upper(),
                            is_embedded)

    if not kwargs['return_total_data']:
        # Remove monomer cotribution for interaction data
        energy_result -= energy_body_dict[kwargs['bsse_type'][0]][1]
        if ptype in ['gradient', 'hessian']:
            gradient_result -= gradient1
        if ptype == 'hessian':
            hessian_result -= hessian1
    wfn_out = core.Wavefunction.build(molecule, 'def2-svp')
    core.set_variable("CURRENT ENERGY", energy_result)
    wfn_out.set_variable("CURRENT ENERGY", energy_result)
    gradient_result = core.Matrix.from_array(gradient_result) if gradient_result is not None else None
    wfn_out.set_gradient(gradient_result)
    hessian_result = core.Matrix.from_array(hessian_result) if hessian_result is not None else None
    wfn_out.set_hessian(hessian_result)
    ptype_result = eval(ptype + '_result')
    for b in kwargs['bsse_type']:
        for i in energy_body_dict[b]:
            wfn_out.set_variable(str(i) + b, energy_body_dict[b][i])

    if kwargs['return_wfn']:
        return (ptype_result, wfn_out)
    else:
        return ptype_result
Example #13
0
    def compute_energy(self,
                       molecule: core.Molecule,
                       wfn: core.Wavefunction = None) -> float:
        """Compute dispersion energy based on engine, dispersion level, and parameters in `self`.

        Parameters
        ----------
        molecule
            System for which to compute empirical dispersion correction.
        wfn
            Location to set QCVariables

        Returns
        -------
        float
            Dispersion energy [Eh].

        Notes
        -----
        :psivar:`DISPERSION CORRECTION ENERGY`
            Disp always set. Overridden in SCF finalization, but that only changes for "-3C" methods.
        :psivar:`fctl DISPERSION CORRECTION ENERGY`
            Set if :py:attr:`fctldash` nonempty.

        """
        if self.engine in ['dftd3', 'mp2d']:
            resi = AtomicInput(
                **{
                    'driver': 'energy',
                    'model': {
                        'method': self.fctldash,
                        'basis': '(auto)',
                    },
                    'keywords': {
                        'level_hint': self.dashlevel,
                        'params_tweaks': self.dashparams,
                        'dashcoeff_supplement': self.dashcoeff_supplement,
                        'pair_resolved': self.save_pairwise_disp,
                        'verbose': 1,
                    },
                    'molecule': molecule.to_schema(dtype=2),
                    'provenance': p4util.provenance_stamp(__name__),
                })
            jobrec = qcng.compute(
                resi,
                self.engine,
                raise_error=True,
                local_options={
                    "scratch_directory":
                    core.IOManager.shared_object().get_default_path()
                })

            dashd_part = float(
                jobrec.extras['qcvars']['DISPERSION CORRECTION ENERGY'])
            if wfn is not None:
                for k, qca in jobrec.extras['qcvars'].items():
                    wfn.set_variable(
                        k,
                        float(qca) if isinstance(qca, str) else qca)

                # Pass along the pairwise dispersion decomposition if we need it
                if self.save_pairwise_disp is True:
                    wfn.set_variable(
                        "PAIRWISE DISPERSION CORRECTION ANALYSIS",
                        jobrec.extras['qcvars']
                        ["2-BODY PAIRWISE DISPERSION CORRECTION ANALYSIS"])

            if self.fctldash in ['hf3c', 'pbeh3c']:
                jobrec = qcng.compute(
                    resi,
                    "gcp",
                    raise_error=True,
                    local_options={
                        "scratch_directory":
                        core.IOManager.shared_object().get_default_path()
                    })
                gcp_part = jobrec.return_result
                dashd_part += gcp_part

            return dashd_part

        else:
            ene = self.disp.compute_energy(molecule)
            core.set_variable('DISPERSION CORRECTION ENERGY', ene)
            if self.fctldash:
                core.set_variable(
                    f"{self.fctldash} DISPERSION CORRECTION ENERGY", ene)
            return ene
Example #14
0
def run_gcp(self,
            func=None,
            dertype=None,
            verbose=False):  # dashlvl=None, dashparam=None
    """Function to call Grimme's dftd3 program (http://toc.uni-muenster.de/DFTD3/)
    to compute the -D correction of level *dashlvl* using parameters for
    the functional *func*. The dictionary *dashparam* can be used to supply
    a full set of dispersion parameters in the absense of *func* or to supply
    individual overrides in the presence of *func*. Returns energy if *dertype* is 0,
    gradient if *dertype* is 1, else tuple of energy and gradient if *dertype*
    unspecified. The dftd3 executable must be independently compiled and found in
    :envvar:`PATH` or :envvar:`PSIPATH`.
    *self* may be either a qcdb.Molecule (sensibly) or a psi4.Molecule
    (works b/c psi4.Molecule has been extended by this method py-side and
    only public interface fns used) or a string that can be instantiated
    into a qcdb.Molecule.

    """
    # Create (if necessary) and update qcdb.Molecule
    if isinstance(self, Molecule):
        # called on a qcdb.Molecule
        pass
    elif isinstance(self, core.Molecule):
        # called on a python export of a psi4.core.Molecule (py-side through Psi4's driver)
        self.create_psi4_string_from_molecule()
    elif isinstance(self, basestring):
        # called on a string representation of a psi4.Molecule (c-side through psi4.Dispersion)
        self = Molecule(self)
    else:
        raise ValidationError(
            """Argument mol must be psi4string or qcdb.Molecule""")
    self.update_geometry()

    #    # Validate arguments
    #    dashlvl = dashlvl.lower()
    #    dashlvl = dash_alias['-' + dashlvl][1:] if ('-' + dashlvl) in dash_alias.keys() else dashlvl
    #    if dashlvl not in dashcoeff.keys():
    #        raise ValidationError("""-D correction level %s is not available. Choose among %s.""" % (dashlvl, dashcoeff.keys()))

    if dertype is None:
        dertype = -1
    elif der0th.match(str(dertype)):
        dertype = 0
    elif der1st.match(str(dertype)):
        dertype = 1
#    elif der2nd.match(str(dertype)):
#        raise ValidationError('Requested derivative level \'dertype\' %s not valid for run_dftd3.' % (dertype))
    else:
        raise ValidationError(
            'Requested derivative level \'dertype\' %s not valid for run_dftd3.'
            % (dertype))

#    if func is None:
#        if dashparam is None:
#            # defunct case
#            raise ValidationError("""Parameters for -D correction missing. Provide a func or a dashparam kwarg.""")
#        else:
#            # case where all param read from dashparam dict (which must have all correct keys)
#            func = 'custom'
#            dashcoeff[dashlvl][func] = {}
#            dashparam = dict((k.lower(), v) for k, v in dashparam.iteritems())
#            for key in dashcoeff[dashlvl]['b3lyp'].keys():
#                if key in dashparam.keys():
#                    dashcoeff[dashlvl][func][key] = dashparam[key]
#                else:
#                    raise ValidationError("""Parameter %s is missing from dashparam dict %s.""" % (key, dashparam))
#    else:
#        func = func.lower()
#        if func not in dashcoeff[dashlvl].keys():
#            raise ValidationError("""Functional %s is not available for -D level %s.""" % (func, dashlvl))
#        if dashparam is None:
#            # (normal) case where all param taken from dashcoeff above
#            pass
#        else:
#            # case where items in dashparam dict can override param taken from dashcoeff above
#            dashparam = dict((k.lower(), v) for k, v in dashparam.iteritems())
#            for key in dashcoeff[dashlvl]['b3lyp'].keys():
#                if key in dashparam.keys():
#                    dashcoeff[dashlvl][func][key] = dashparam[key]

# TODO temp until figure out paramfile
    allowed_funcs = [
        'HF/MINIS',
        'DFT/MINIS',
        'HF/MINIX',
        'DFT/MINIX',
        'HF/SV',
        'DFT/SV',
        'HF/def2-SV(P)',
        'DFT/def2-SV(P)',
        'HF/def2-SVP',
        'DFT/def2-SVP',
        'HF/DZP',
        'DFT/DZP',
        'HF/def-TZVP',
        'DFT/def-TZVP',
        'HF/def2-TZVP',
        'DFT/def2-TZVP',
        'HF/631Gd',
        'DFT/631Gd',
        'HF/def2-TZVP',
        'DFT/def2-TZVP',
        'HF/cc-pVDZ',
        'DFT/cc-pVDZ',
        'HF/aug-cc-pVDZ',
        'DFT/aug-cc-pVDZ',
        'DFT/SV(P/h,c)',
        'DFT/LANL',
        'DFT/pobTZVP',
        'TPSS/def2-SVP',
        'PW6B95/def2-SVP',
        # specials
        'hf3c',
        'pbeh3c'
    ]
    allowed_funcs = [f.lower() for f in allowed_funcs]
    if func.lower() not in allowed_funcs:
        raise Dftd3Error("""bad gCP func: %s. need one of: %r""" %
                         (func, allowed_funcs))

    # Move ~/.dftd3par.<hostname> out of the way so it won't interfere
    defaultfile = os.path.expanduser(
        '~') + '/.dftd3par.' + socket.gethostname()
    defmoved = False
    if os.path.isfile(defaultfile):
        os.rename(defaultfile, defaultfile + '_hide')
        defmoved = True

    # Find environment by merging PSIPATH and PATH environment variables
    lenv = {
        'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \
                ':' + os.environ.get('PATH'),
        'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
        }
    #   Filter out None values as subprocess will fault on them
    lenv = {k: v for k, v in lenv.items() if v is not None}

    # Find out if running from Psi4 for scratch details and such
    try:
        import psi4
    except ImportError as err:
        isP4regime = False
    else:
        isP4regime = True

    # Setup unique scratch directory and move in
    current_directory = os.getcwd()
    if isP4regime:
        psioh = core.IOManager.shared_object()
        psio = core.IO.shared_object()
        os.chdir(psioh.get_default_path())
        gcp_tmpdir = 'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
            '.gcp.' + str(uuid.uuid4())[:8]
    else:
        gcp_tmpdir = os.environ['HOME'] + os.sep + 'gcp_' + str(
            uuid.uuid4())[:8]
    if os.path.exists(gcp_tmpdir) is False:
        os.mkdir(gcp_tmpdir)
    os.chdir(gcp_tmpdir)

    # Write gcp_parameters file that governs cp correction
    #    paramcontents = gcp_server(func, dashlvl, 'dftd3')
    #    paramfile1 = 'dftd3_parameters'  # older patched name
    #    with open(paramfile1, 'w') as handle:
    #        handle.write(paramcontents)
    #    paramfile2 = '.gcppar'
    #    with open(paramfile2, 'w') as handle:
    #        handle.write(paramcontents)

    ###Two kinds of parameter files can be read in: A short and an extended version. Both are read from
    ###$HOME/.gcppar.$HOSTNAME by default. If the option -local is specified the file is read in from
    ###the current working directory: .gcppar
    ###The short version reads in: basis-keywo

    # Write dftd3_geometry file that supplies geometry to dispersion calc
    numAtoms = self.natom()
    geom = self.save_string_xyz()
    reals = []
    for line in geom.splitlines():
        lline = line.split()
        if len(lline) != 4:
            continue
        if lline[0] == 'Gh':
            numAtoms -= 1
        else:
            reals.append(line)

    geomtext = str(numAtoms) + '\n\n'
    for line in reals:
        geomtext += line.strip() + '\n'
    geomfile = './gcp_geometry.xyz'
    with open(geomfile, 'w') as handle:
        handle.write(geomtext)
    # TODO somehow the variations on save_string_xyz and
    #   whether natom and chgmult does or doesn't get written
    #   have gotten all tangled. I fear this doesn't work
    #   the same btwn libmints and qcdb or for ghosts

    # Call gcp program
    command = ['gcp', geomfile]
    command.extend(['-level', func])
    if dertype != 0:
        command.append('-grad')
    try:
        #print('command', command)
        dashout = subprocess.Popen(command, stdout=subprocess.PIPE, env=lenv)
    except OSError as e:
        raise ValidationError('Program gcp not found in path. %s' % e)
    out, err = dashout.communicate()

    # Parse output
    success = False
    for line in out.splitlines():
        line = line.decode('utf-8')
        if re.match('  Egcp:', line):
            sline = line.split()
            dashd = float(sline[1])
        if re.match('     normal termination of gCP', line):
            success = True

    if not success:
        os.chdir(current_directory)
        raise Dftd3Error("""Unsuccessful gCP run.""")

    # Parse grad output
    if dertype != 0:
        derivfile = './gcp_gradient'
        dfile = open(derivfile, 'r')
        dashdderiv = []
        for line in geom.splitlines():
            lline = line.split()
            if len(lline) != 4:
                continue
            if lline[0] == 'Gh':
                dashdderiv.append([0.0, 0.0, 0.0])
            else:
                dashdderiv.append([
                    float(x.replace('D', 'E'))
                    for x in dfile.readline().split()
                ])
        dfile.close()

        if len(dashdderiv) != self.natom():
            raise ValidationError('Program gcp gradient file has %d atoms- %d expected.' % \
                (len(dashdderiv), self.natom()))

    # Prepare results for Psi4
    if isP4regime and dertype != 0:
        core.set_variable('GCP CORRECTION ENERGY', dashd)
        psi_dashdderiv = core.Matrix.from_list(dashdderiv)

    # Print program output to file if verbose
    if not verbose and isP4regime:
        verbose = True if core.get_option('SCF', 'PRINT') >= 3 else False
    if verbose:

        text = '\n  ==> GCP Output <==\n'
        text += out.decode('utf-8')
        if dertype != 0:
            with open(derivfile, 'r') as handle:
                text += handle.read().replace('D', 'E')
            text += '\n'
        if isP4regime:
            core.print_out(text)
        else:
            print(text)


#    # Clean up files and remove scratch directory
#    os.unlink(paramfile1)
#    os.unlink(paramfile2)
#    os.unlink(geomfile)
#    if dertype != 0:
#        os.unlink(derivfile)
#    if defmoved is True:
#        os.rename(defaultfile + '_hide', defaultfile)

    os.chdir('..')
    #    try:
    #        shutil.rmtree(dftd3_tmpdir)
    #    except OSError as e:
    #        ValidationError('Unable to remove dftd3 temporary directory %s' % e)
    os.chdir(current_directory)

    # return -D & d(-D)/dx
    if dertype == -1:
        return dashd, dashdderiv
    elif dertype == 0:
        return dashd
    elif dertype == 1:
        return psi_dashdderiv
Example #15
0
def nbody_gufunc(func, method_string, **kwargs):
    """
    Computes the nbody interaction energy, gradient, or Hessian depending on input.
    This is a generalized univeral function for computing interaction quantities.

    :returns: *return type of func* |w--w| The interaction data.

    :returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| interaction data and wavefunction with energy/gradient/hessian set appropriately when **return_wfn** specified.

    :type func: function
    :param func: ``energy`` || etc.

        Python function that accepts method_string and a molecule. Returns a
        energy, gradient, or Hessian as requested.

    :type method_string: string
    :param method_string: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc.

        First argument, lowercase and usually unlabeled. Indicates the computational
        method to be passed to func.

    :type molecule: :ref:`molecule <op_py_molecule>`
    :param molecule: ``h2o`` || etc.

        The target molecule, if not the last molecule defined.

    :type return_wfn: :ref:`boolean <op_py_boolean>`
    :param return_wfn: ``'on'`` || |dl| ``'off'`` |dr|

        Indicate to additionally return the :py:class:`~psi4.core.Wavefunction`
        calculation result as the second element of a tuple.

    :type bsse_type: string or list
    :param bsse_type: ``'cp'`` || ``['nocp', 'vmfc']`` || |dl| ``None`` |dr| || etc.

        Type of BSSE correction to compute: CP, NoCP, or VMFC. The first in this
        list is returned by this function. By default, this function is not called.

    :type max_nbody: int
    :param max_nbody: ``3`` || etc.

        Maximum n-body to compute, cannot exceed the number of fragments in the moleucle.

    :type ptype: string
    :param ptype: ``'energy'`` || ``'gradient'`` || ``'hessian'``

        Type of the procedure passed in.

    :type return_total_data: :ref:`boolean <op_py_boolean>`
    :param return_total_data: ``'on'`` || |dl| ``'off'`` |dr|

        If True returns the total data (energy/gradient/etc) of the system,
        otherwise returns interaction data.
    """

    # Initialize dictionaries for easy data passing
    metadata, component_results, nbody_results = {}, {}, {}

    # Parse some kwargs
    kwargs = p4util.kwargs_lower(kwargs)
    metadata['ptype'] = kwargs.pop('ptype', None)
    metadata['return_wfn'] = kwargs.pop('return_wfn', False)
    metadata['return_total_data'] = kwargs.pop('return_total_data', False)
    metadata['molecule'] = kwargs.pop('molecule', core.get_active_molecule())
    metadata['molecule'].update_geometry()
    metadata['kwargs'] = kwargs
    core.clean_variables()

    if metadata['ptype'] not in ['energy', 'gradient', 'hessian']:
        raise ValidationError("""N-Body driver: The ptype '%s' is not regonized.""" % metadata['ptype'])

    # Parse bsse_type, raise exception if not provided or unrecognized
    metadata['bsse_type_list'] = kwargs.pop('bsse_type')
    if metadata['bsse_type_list'] is None:
        raise ValidationError("N-Body GUFunc: Must pass a bsse_type")
    if not isinstance(metadata['bsse_type_list'], list):
        metadata['bsse_type_list'] = [metadata['bsse_type_list']]

    for num, btype in enumerate(metadata['bsse_type_list']):
        metadata['bsse_type_list'][num] = btype.lower()
        if btype.lower() not in ['cp', 'nocp', 'vmfc']:
            raise ValidationError("N-Body GUFunc: bsse_type '%s' is not recognized" % btype.lower())

    metadata['max_nbody'] = kwargs.get('max_nbody', -1)
    metadata['max_frag'] = metadata['molecule'].nfragments()
    if metadata['max_nbody'] == -1:
        metadata['max_nbody'] = metadata['molecule'].nfragments()
    else:
        metadata['max_nbody'] = min(metadata['max_nbody'], metadata['max_frag'])

    # Flip this off for now, needs more testing
    # If we are doing CP lets save them integrals
    #if 'cp' in bsse_type_list and (len(bsse_type_list) == 1):
    #    # Set to save RI integrals for repeated full-basis computations
    #    ri_ints_io = core.get_global_option('DF_INTS_IO')

    #    # inquire if above at all applies to dfmp2 or just scf
    #    core.set_global_option('DF_INTS_IO', 'SAVE')
    #    psioh = core.IOManager.shared_object()
    #    psioh.set_specific_retention(97, True)

    bsse_str = metadata['bsse_type_list'][0]
    if len(metadata['bsse_type_list']) > 1:
        bsse_str =  str(metadata['bsse_type_list'])
    core.print_out("\n\n")
    core.print_out("   ===> N-Body Interaction Abacus <===\n")
    core.print_out("        BSSE Treatment:                     %s\n" % bsse_str)

    # Get compute list
    metadata = build_nbody_compute_list(metadata)

    # Compute N-Body components
    component_results = compute_nbody_components(func, method_string, metadata)

    # Assemble N-Body quantities
    nbody_results = assemble_nbody_components(metadata, component_results)

    # Figure out returns
    if metadata['ptype'] != 'energy':
        if metadata['return_total_data']:
            np_final_ptype = nbody_results['ptype_body_dict'][metadata['max_nbody']].copy()
        else:
            np_final_ptype = nbody_results['ptype_body_dict'][metadata['max_nbody']].copy()
            np_final_ptype -= ptype_body_dict[1]

        nbody_results['ret_ptype'] = core.Matrix.from_array(np_final_ptype)
    else:
        nbody_results['ret_ptype'] = nbody_results['ret_energy']

    # Build wfn and bind variables
    wfn = core.Wavefunction.build(metadata['molecule'], 'def2-svp')
    dicts = ['energies', 'ptype', 'intermediates', 'energy_body_dict', 'ptype_body_dict', 'nbody']
    for r in [component_results, nbody_results]:
        for d in r:
            if d in dicts:
                for var, value in r[d].items():
                    wfn.set_variable(str(var), value)
                    core.set_variable(str(var), value)

    if metadata['ptype'] == 'gradient':
        wfn.set_gradient(ret_ptype)
    elif metadata['ptype'] == 'hessian':
        wfn.set_hessian(ret_ptype)

    core.set_variable("CURRENT ENERGY", nbody_results['ret_energy'])
    wfn.set_variable("CURRENT ENERGY", nbody_results['ret_energy'])

    if metadata['return_wfn']:
        return (nbody_results['ret_ptype'], wfn)
    else:
        return nbody_results['ret_ptype']
Example #16
0
def print_sapt_dft_summary(data, name, short=False):

    ret = "   %s Results\n" % name
    ret += "  " + "-" * 97 + "\n"

    # Elst
    ret += print_sapt_var("Electrostatics", data["Elst10,r"]) + "\n"
    ret += print_sapt_var("  Elst1,r", data["Elst10,r"]) + "\n"
    ret += "\n"
    core.set_variable("SAPT ELST ENERGY", data["Elst10,r"])

    # Exchange
    ret += print_sapt_var("Exchange", data["Exch10"]) + "\n"
    ret += print_sapt_var("  Exch1", data["Exch10"]) + "\n"
    ret += print_sapt_var("  Exch1(S^2)", data["Exch10(S^2)"]) + "\n"
    ret += "\n"
    core.set_variable("SAPT EXCH ENERGY", data["Exch10"])

    # Induction
    ind = data["Ind20,r"] + data["Exch-Ind20,r"]
    ind_ab = data["Ind20,r (A<-B)"] + data["Exch-Ind20,r (A<-B)"]
    ind_ba = data["Ind20,r (A->B)"] + data["Exch-Ind20,r (A->B)"]

    if "Delta HF Correction" in list(data):
        ind +=  data["Delta HF Correction"]

    ret += print_sapt_var("Induction", ind) + "\n"

    ret += print_sapt_var("  Ind2,r", data["Ind20,r"]) + "\n"
    ret += print_sapt_var("  Exch-Ind2,r", data["Exch-Ind20,r"]) + "\n"
    ret += print_sapt_var("  Induction (A<-B)", ind_ab) + "\n"
    ret += print_sapt_var("  Induction (A->B)", ind_ba) + "\n"

    if "Delta HF Correction" in list(data):
        ret += print_sapt_var("  delta HF,r (2)", data["Delta HF Correction"]) + "\n"

    ret += "\n"
    core.set_variable("SAPT IND ENERGY", ind)

    # Dispersion
    disp = data["Disp20"] + data["Exch-Disp20,u"]
    ret += print_sapt_var("Dispersion", disp) + "\n"
    ret += print_sapt_var("  Disp2,r", data["Disp20"]) + "\n"
    ret += print_sapt_var("  Disp2,u", data["Disp20,u"]) + "\n"
    ret += print_sapt_var("  Exch-Disp2,u", data["Exch-Disp20,u"]) + "\n"
    ret += "\n"
    core.set_variable("SAPT DISP ENERGY", disp)

    # Total energy
    total = data["Elst10,r"] + data["Exch10"] + ind + disp
    ret += print_sapt_var("Total %-15s" % name, total, start_spacer="   ") + "\n"
    core.set_variable("SAPT(DFT) TOTAL ENERGY", total)
    core.set_variable("SAPT TOTAL ENERGY", total)
    core.set_variable("CURRENT ENERGY", total)

    ret += "  " + "-" * 97 + "\n"
    return ret
Example #17
0
def print_sapt_hf_summary(data, name, short=False, delta_hf=False):

    ret = "   %s Results\n" % name
    ret += "  " + "-" * 97 + "\n"

    # Elst
    ret += print_sapt_var("Electrostatics", data["Elst10,r"]) + "\n"
    ret += print_sapt_var("  Elst10,r", data["Elst10,r"]) + "\n"
    ret += "\n"
    core.set_variable("SAPT ELST ENERGY", data["Elst10,r"])

    # Exchange
    ret += print_sapt_var("Exchange", data["Exch10"]) + "\n"
    ret += print_sapt_var("  Exch10", data["Exch10"]) + "\n"
    ret += print_sapt_var("  Exch10(S^2)", data["Exch10(S^2)"]) + "\n"
    ret += "\n"
    core.set_variable("SAPT EXCH ENERGY", data["Exch10"])


    ind = data["Ind20,r"] + data["Exch-Ind20,r"]
    ind_ab = data["Ind20,r (A<-B)"] + data["Exch-Ind20,r (A<-B)"]
    ind_ba = data["Ind20,r (A->B)"] + data["Exch-Ind20,r (A->B)"]

    ret += print_sapt_var("Induction", ind) + "\n"
    ret += print_sapt_var("  Ind20,r", data["Ind20,r"]) + "\n"
    ret += print_sapt_var("  Exch-Ind20,r", data["Exch-Ind20,r"]) + "\n"
    ret += print_sapt_var("  Induction (A<-B)", ind_ab) + "\n"
    ret += print_sapt_var("  Induction (A->B)", ind_ba) + "\n"
    ret += "\n"
    core.set_variable("SAPT IND ENERGY", ind)

    if delta_hf:
        total_sapt = (data["Elst10,r"] + data["Exch10"] + ind)
        sapt_hf_delta = delta_hf - total_sapt

        core.set_variable("SAPT(DFT) Delta HF", sapt_hf_delta)

        ret += print_sapt_var("%-21s" % "Total SAPT", total_sapt, start_spacer="   ") + "\n"
        ret += print_sapt_var("%-21s" % "Total HF", delta_hf, start_spacer="   ") + "\n"
        ret += print_sapt_var("%-21s" % "Delta HF", sapt_hf_delta, start_spacer="   ") + "\n"

        ret += "  " + "-" * 97 + "\n"
        return ret

    else:
        # Dispersion
        disp = data["Disp20"] + data["Exch-Disp20,u"]
        ret += print_sapt_var("Dispersion", disp) + "\n"
        ret += print_sapt_var("  Disp20", data["Disp20,u"]) + "\n"
        ret += print_sapt_var("  Exch-Disp20", data["Exch-Disp20,u"]) + "\n"
        ret += "\n"
        core.set_variable("SAPT DISP ENERGY", disp)

        # Total energy
        total = data["Elst10,r"] + data["Exch10"] + ind + disp
        ret += print_sapt_var("Total %-15s" % name, total, start_spacer="   ") + "\n"
        core.set_variable("SAPT0 TOTAL ENERGY", total)
        core.set_variable("SAPT TOTAL ENERGY", total)
        core.set_variable("CURRENT ENERGY", total)

        ret += "  " + "-" * 97 + "\n"
        return ret
Example #18
0
    def compute_energy(self, molecule: 'psi4.core.Molecule', wfn: 'psi4.core.Wavefunction' = None) -> float:
        """Compute dispersion energy based on engine, dispersion level, and parameters in `self`.

        Parameters
        ----------
        molecule : psi4.core.Molecule
            System for which to compute empirical dispersion correction.
        wfn :
            Location to set QCVariables

        Returns
        -------
        float
            Dispersion energy [Eh].

        Notes
        -----
        DISPERSION CORRECTION ENERGY
            Disp always set. Overridden in SCF finalization, but that only changes for "-3C" methods.
        self.fctldash + DISPERSION CORRECTION ENERGY
            Set if `fctldash` nonempty.

        """
        if self.engine in ['dftd3', 'mp2d']:
            resi = AtomicInput(
                **{
                    'driver': 'energy',
                    'model': {
                        'method': self.fctldash,
                        'basis': '(auto)',
                    },
                    'keywords': {
                        'level_hint': self.dashlevel,
                        'params_tweaks': self.dashparams,
                        'dashcoeff_supplement': self.dashcoeff_supplement,
                        'verbose': 1,
                    },
                    'molecule': molecule.to_schema(dtype=2),
                    'provenance': p4util.provenance_stamp(__name__),
                })
            jobrec = qcng.compute(resi, self.engine, raise_error=True,
                                  local_options={"scratch_directory": core.IOManager.shared_object().get_default_path()})

            dashd_part = float(jobrec.extras['qcvars']['DISPERSION CORRECTION ENERGY'])
            if wfn is not None:
                for k, qca in jobrec.extras['qcvars'].items():
                    if 'CURRENT' not in k:
                        wfn.set_variable(k, p4util.plump_qcvar(qca, k))

            if self.fctldash in ['hf3c', 'pbeh3c']:
                gcp_part = gcp.run_gcp(molecule, self.fctldash, verbose=False, dertype=0)
                dashd_part += gcp_part

            return dashd_part

        else:
            ene = self.disp.compute_energy(molecule)
            core.set_variable('DISPERSION CORRECTION ENERGY', ene)
            if self.fctldash:
                core.set_variable('{} DISPERSION CORRECTION ENERGY'.format(self.fctldash), ene)
            return ene
Example #19
0
def run_sns_mp2(name, molecule, **kwargs):
    """Run the SNS-MP2 calculation
    """
    if len(kwargs) > 0:
        core.print_out('Unrecognized options: %s' % str(kwargs))

    if parse_version(psi4.__version__) < parse_version('1.3rc2'):
        raise ImportError(
            'Psi4 {:s} is not compatible (v1.3+ necessary)'.format(
                psi4.__version__))

    # Force to c1
    molecule = molecule.clone()
    molecule.reset_point_group('c1')
    molecule.fix_orientation(True)
    molecule.fix_com(True)
    molecule.update_geometry()

    nfrag = molecule.nfragments()
    if nfrag != 2:
        raise ValueError(
            'NN-MP2 requires active molecule to have 2 fragments, not %s.' %
            (nfrag))

    LOW = 'DESAVTZ'
    HIGH = 'DESAVQZ'
    inject_desres_basis()

    with WavefunctionCache(molecule, low=LOW, high=HIGH) as c:

        m1mlow = c.compute('m1', 'm', 'low', mp2=False)
        m2mlow = c.compute('m2', 'm', 'low', mp2=False)
        m1mhigh = c.compute('m1', 'm', 'high', mp2=True, mp2_dm=True)
        m2mhigh = c.compute('m2', 'm', 'high', mp2=True, mp2_dm=True)
        m1dlow = c.compute('m1', 'd', 'low', mp2=True)
        m2dlow = c.compute('m2', 'd', 'low', mp2=True)
        m1dhigh = c.compute('m1', 'd', 'high', mp2=True)
        m2dhigh = c.compute('m2', 'd', 'high', mp2=True)
        ddlow = c.compute('d', 'd', 'low', mp2=True)
        ddhigh = c.compute('d', 'd', 'high', mp2=True)

        @psiopts(
            'SCF_TYPE DF',
            'BASIS %s' % HIGH,
            'DF_BASIS_SCF %s-jkfit' % HIGH,
            'DF_BASIS_MP2 %s-ri' % HIGH,
            'DF_INTS_IO LOAD',
        )
        def run_espx():
            core.tstart()
            p4util.banner('ESPX')

            dimer_basis = ddhigh.basisset()
            aux_basis = core.BasisSet.build(molecule, "DF_BASIS_SCF",
                                            HIGH + '-JKFIT', "JKFIT", HIGH)

            decomp = ESHLOVLPDecomposition(m1_basis=m1mhigh.basisset(),
                                           m2_basis=m2mhigh.basisset(),
                                           dimer_basis=dimer_basis,
                                           dimer_aux_basis=aux_basis)

            esovlpfunc = decomp.esovlp()
            eshf, ovlhf = esovlpfunc(m1mhigh.reference_wavefunction(),
                                     m2mhigh.reference_wavefunction())
            esmp, ovlmp = esovlpfunc(m1mhigh, m2mhigh)

            del esovlpfunc

            hlfunc = decomp.hl()
            hl = hlfunc(m1mhigh.reference_wavefunction(),
                        m2mhigh.reference_wavefunction())

            core.tstop()
            return {
                'eshf': eshf,
                'ovlhf': ovlhf,
                'esmp': esmp,
                'ovlmp': ovlmp,
                'hl': hl
            }, dimer_basis

        ###################################################################

        @psiopts(
            'SAPT SAPT_LEVEL SAPT0',
            'SAPT E_CONVERGENCE 10e-10',
            'SAPT D_CONVERGENCE 10e-10',
            'SAPT NAT_ORBS_T2 TRUE',
            'SCF_TYPE DF',
            'BASIS %s' % LOW,
            'DF_BASIS_SAPT %s-RI' % LOW,
        )
        def run_sapt():
            if ddlow.name() == 'SCF':
                dimer_wfn = ddlow
            else:
                dimer_wfn = ddlow.reference_wavefunction()

            aux_basis = core.BasisSet.build(
                dimer_wfn.molecule(), "DF_BASIS_SAPT",
                core.get_global_option("DF_BASIS_SAPT"), "RIFIT",
                core.get_global_option("BASIS"))
            dimer_wfn.set_basisset("DF_BASIS_SAPT", aux_basis)
            dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
            core.sapt(dimer_wfn, m1mlow, m2mlow)
            return {
                k: core.variable(k)
                for k in (
                    'SAPT ELST10,R ENERGY',
                    'SAPT EXCH10 ENERGY',
                    'SAPT EXCH10(S^2) ENERGY',
                    'SAPT IND20,R ENERGY',
                    'SAPT EXCH-IND20,R ENERGY',
                    'SAPT EXCH-DISP20 ENERGY',
                    'SAPT DISP20 ENERGY',
                    'SAPT SAME-SPIN EXCH-DISP20 ENERGY',
                    'SAPT SAME-SPIN DISP20 ENERGY',
                    'SAPT HF TOTAL ENERGY',
                )
            }

        ###################################################################

        # Run the three previously defined functions
        espx_data, dimer_high_basis = run_espx()
        sapt_data = run_sapt()

    data = format_espx_human(HIGH, espx_data)
    data.update(sapt_data)
    data.update(format_intene_human(c))

    core.tstart()
    e, lines = sns_mp2_model(data)
    core.set_variable('SNS-MP2 TOTAL ENERGY', e * KCAL2MEH * 0.001)
    core.set_variable('CURRENT ENERGY', e * KCAL2MEH * 0.001)
    if os.environ.get('TEST_SNSMP2', False):
        for k, v in data.items():
            core.set_variable(k, v)
    core.print_out(lines)
    core.tstop()

    return e
Example #20
0
def database(name, db_name, **kwargs):
    r"""Function to access the molecule objects and reference energies of
    popular chemical databases.

    :aliases: db()

    :returns: (*float*) Mean absolute deviation of the database in kcal/mol

    :PSI variables:

    .. hlist::
       :columns: 1

       * :psivar:`db_name DATABASE MEAN SIGNED DEVIATION`
       * :psivar:`db_name DATABASE MEAN ABSOLUTE DEVIATION`
       * :psivar:`db_name DATABASE ROOT-MEAN-SQUARE DEVIATION`
       * Python dictionaries of results accessible as ``DB_RGT`` and ``DB_RXN``.

    .. note:: It is very easy to make a database from a collection of xyz files
        using the script :source:`psi4/share/psi4/scripts/ixyz2database.py`.
        See :ref:`sec:createDatabase` for details.

    .. caution:: Some features are not yet implemented. Buy a developer some coffee.

       - In sow/reap mode, use only global options (e.g., the local option set by ``set scf scf_type df`` will not be respected).

    .. note:: To access a database that is not embedded in a |PSIfour|
        distribution, add the path to the directory containing the database
        to the environment variable :envvar:`PYTHONPATH`.

    :type name: str
    :param name: ``'scf'`` || ``'sapt0'`` || ``'ccsd(t)'`` || etc.

        First argument, usually unlabeled. Indicates the computational method
        to be applied to the database. May be any valid argument to
        :py:func:`psi4.driver.energy`.

    :type db_name: str
    :param db_name: ``'BASIC'`` || ``'S22'`` || ``'HTBH'`` || etc.

        Second argument, usually unlabeled. Indicates the requested database
        name, matching (case insensitive) the name of a python file in
        ``psi4/share/databases`` or :envvar:`PYTHONPATH`.  Consult that
        directory for available databases and literature citations.

    :type func: :ref:`function <op_py_function>`
    :param func: |dl| ``energy`` |dr| || ``optimize`` || ``cbs``

        Indicates the type of calculation to be performed on each database
        member. The default performs a single-point ``energy('name')``, while
        ``optimize`` perfoms a geometry optimization on each reagent, and
        ``cbs`` performs a compound single-point energy. If a nested series
        of python functions is intended (see :ref:`sec:intercalls`), use
        keyword ``db_func`` instead of ``func``.

    :type mode: str
    :param mode: |dl| ``'continuous'`` |dr| || ``'sow'`` || ``'reap'``

        Indicates whether the calculations required to complete the
        database are to be run in one file (``'continuous'``) or are to be
        farmed out in an embarrassingly parallel fashion
        (``'sow'``/``'reap'``).  For the latter, run an initial job with
        ``'sow'`` and follow instructions in its output file.

    :type cp: :ref:`boolean <op_py_boolean>`
    :param cp: ``'on'`` || |dl| ``'off'`` |dr|

        Indicates whether counterpoise correction is employed in computing
        interaction energies. Use this option and NOT the ``bsse_type="cp"``
        function for BSSE correction in database().  Option available
        (See :ref:`sec:availableDatabases`) only for databases of bimolecular complexes.

    :type rlxd: :ref:`boolean <op_py_boolean>`
    :param rlxd: ``'on'`` || |dl| ``'off'`` |dr|

        Indicates whether correction for deformation energy is
        employed in computing interaction energies.  Option available
        (See :ref:`sec:availableDatabases`) only for databases of bimolecular complexes
        with non-frozen monomers, e.g., HBC6.

    :type symm: :ref:`boolean <op_py_boolean>`
    :param symm: |dl| ``'on'`` |dr| || ``'off'``

        Indicates whether the native symmetry of the database reagents is
        employed (``'on'``) or whether it is forced to :math:`C_1` symmetry
        (``'off'``). Some computational methods (e.g., SAPT) require no
        symmetry, and this will be set by database().

    :type zpe: :ref:`boolean <op_py_boolean>`
    :param zpe: ``'on'`` || |dl| ``'off'`` |dr|

        Indicates whether zero-point-energy corrections are appended to
        single-point energy values. Option valid only for certain
        thermochemical databases. Disabled until Hessians ready.

    :type benchmark: str
    :param benchmark: |dl| ``'default'`` |dr| || ``'S22A'`` || etc.

        Indicates whether a non-default set of reference energies, if
        available (See :ref:`sec:availableDatabases`), are employed for the
        calculation of error statistics.

    :type tabulate: List[str]
    :param tabulate: |dl| ``[]`` |dr| || ``['scf total energy', 'natom']`` || etc.

        Indicates whether to form tables of variables other than the
        primary requested energy.  Available for any PSI variable.

    :type subset: Union[str, List[str]]
    :param subset:

        Indicates a subset of the full database to run. This is a very
        flexible option and can be used in three distinct ways, outlined
        below. Note that two take a string and the last takes an array.
        See :ref:`sec:availableDatabases` for available values.

        * ``'small'`` || ``'large'`` || ``'equilibrium'``
            Calls predefined subsets of the requested database, either
            ``'small'``, a few of the smallest database members,
            ``'large'``, the largest of the database members, or
            ``'equilibrium'``, the equilibrium geometries for a database
            composed of dissociation curves.
        * ``'BzBz_S'`` || ``'FaOOFaON'`` || ``'ArNe'`` ||  ``'HB'`` || etc.
            For databases composed of dissociation curves, or otherwise
            divided into subsets, individual curves and subsets can be
            called by name. Consult the database python files for available
            molecular systems (case insensitive).
        * ``[1,2,5]`` || ``['1','2','5']`` || ``['BzMe-3.5', 'MeMe-5.0']`` || etc.
            Specify a list of database members to run. Consult the
            database python files for available molecular systems.  This
            is the only portion of database input that is case sensitive;
            choices for this keyword must match the database python file.

    :examples:

    >>> # [1] Two-stage SCF calculation on short, equilibrium, and long helium dimer
    >>> db('scf','RGC10',cast_up='sto-3g',subset=['HeHe-0.85','HeHe-1.0','HeHe-1.5'], tabulate=['scf total energy','natom'])

    >>> # [2] Counterpoise-corrected interaction energies for three complexes in S22
    >>> #     Error statistics computed wrt an old benchmark, S22A
    >>> database('mp2','S22',cp=1,subset=[16,17,8],benchmark='S22A')

    >>> # [3] SAPT0 on the neon dimer dissociation curve
    >>> db('sapt0',subset='NeNe',cp=0,symm=0,db_name='RGC10')

    >>> # [4] Optimize system 1 in database S22, producing tables of scf and mp2 energy
    >>> db('mp2','S22',db_func=optimize,subset=[1], tabulate=['mp2 total energy','current energy'])

    >>> # [5] CCSD on the smallest systems of HTBH, a hydrogen-transfer database
    >>> database('ccsd','HTBH',subset='small', tabulate=['ccsd total energy', 'mp2 total energy'])

    """
    lowername = name  #TODO
    kwargs = p4util.kwargs_lower(kwargs)

    # Wrap any positional arguments into kwargs (for intercalls among wrappers)
    if not ('name' in kwargs) and name:
        kwargs['name'] = name  #.lower()
    if not ('db_name' in kwargs) and db_name:
        kwargs['db_name'] = db_name

    # Establish function to call
    func = kwargs.pop('db_func', kwargs.pop('func', energy))
    kwargs['db_func'] = func
    # Bounce to CP if bsse kwarg (someday)
    if kwargs.get('bsse_type', None) is not None:
        raise ValidationError(
            """Database: Cannot specify bsse_type for database. Use the cp keyword withing database instead."""
        )

    allowoptexceeded = kwargs.get('allowoptexceeded', False)
    optstash = p4util.OptionsState(['WRITER_FILE_LABEL'], ['SCF', 'REFERENCE'])

    # Wrapper wholly defines molecule. discard any passed-in
    kwargs.pop('molecule', None)

    # Paths to search for database files: here + PSIPATH + library + PYTHONPATH
    db_paths = []
    db_paths.append(os.getcwd())
    db_paths.extend(os.environ.get('PSIPATH', '').split(os.path.pathsep))
    db_paths.append(os.path.join(core.get_datadir(), 'databases'))
    db_paths.append(os.path.dirname(__file__))
    db_paths = list(map(os.path.abspath, db_paths))
    sys.path[1:1] = db_paths
    # TODO this should be modernized a la interface_cfour

    # Define path and load module for requested database
    database = p4util.import_ignorecase(db_name)
    if database is None:
        core.print_out('\nPython module for database %s failed to load\n\n' %
                       (db_name))
        core.print_out('\nSearch path that was tried:\n')
        core.print_out(", ".join(map(str, sys.path)))
        raise ValidationError("Python module loading problem for database " +
                              str(db_name))
    else:
        dbse = database.dbse
        HRXN = database.HRXN
        ACTV = database.ACTV
        RXNM = database.RXNM
        BIND = database.BIND
        TAGL = database.TAGL
        GEOS = database.GEOS
        try:
            DATA = database.DATA
        except AttributeError:
            DATA = {}

    user_writer_file_label = core.get_global_option('WRITER_FILE_LABEL')
    user_reference = core.get_global_option('REFERENCE')

    # Configuration based upon e_name & db_name options
    #   Force non-supramolecular if needed
    if not hasattr(lowername, '__call__') and re.match(r'^.*sapt', lowername):
        try:
            database.ACTV_SA
        except AttributeError:
            raise ValidationError(
                'Database %s not suitable for non-supramolecular calculation.'
                % (db_name))
        else:
            ACTV = database.ACTV_SA
    #   Force open-shell if needed
    openshell_override = 0
    if user_reference in ['RHF', 'RKS']:
        try:
            database.isOS
        except AttributeError:
            pass
        else:
            if p4util.yes.match(str(database.isOS)):
                openshell_override = 1
                core.print_out(
                    '\nSome reagents in database %s require an open-shell reference; will be reset to UHF/UKS as needed.\n'
                    % (db_name))

    # Configuration based upon database keyword options
    #   Option symmetry- whether symmetry treated normally or turned off (currently req'd for dfmp2 & dft)
    db_symm = kwargs.get('symm', True)

    symmetry_override = 0
    if db_symm is False:
        symmetry_override = 1
    elif db_symm is True:
        pass
    else:
        raise ValidationError("""Symmetry mode '%s' not valid.""" % (db_symm))

    #   Option mode of operation- whether db run in one job or files farmed out
    db_mode = kwargs.pop('db_mode', kwargs.pop('mode', 'continuous')).lower()
    kwargs['db_mode'] = db_mode

    if db_mode == 'continuous':
        pass
    elif db_mode == 'sow':
        pass
    elif db_mode == 'reap':
        db_linkage = kwargs.get('linkage', None)
        if db_linkage is None:
            raise ValidationError(
                """Database execution mode 'reap' requires a linkage option."""
            )
    else:
        raise ValidationError("""Database execution mode '%s' not valid.""" %
                              (db_mode))

    #   Option counterpoise- whether for interaction energy databases run in bsse-corrected or not
    db_cp = kwargs.get('cp', False)

    if db_cp is True:
        try:
            database.ACTV_CP
        except AttributeError:
            raise ValidationError(
                """Counterpoise correction mode 'yes' invalid for database %s."""
                % (db_name))
        else:
            ACTV = database.ACTV_CP
    elif db_cp is False:
        pass
    else:
        raise ValidationError(
            """Counterpoise correction mode '%s' not valid.""" % (db_cp))

    #   Option relaxed- whether for non-frozen-monomer interaction energy databases include deformation correction or not?
    db_rlxd = kwargs.get('rlxd', False)

    if db_rlxd is True:
        if db_cp is True:
            try:
                database.ACTV_CPRLX
                database.RXNM_CPRLX
            except AttributeError:
                raise ValidationError(
                    'Deformation and counterpoise correction mode \'yes\' invalid for database %s.'
                    % (db_name))
            else:
                ACTV = database.ACTV_CPRLX
                RXNM = database.RXNM_CPRLX
        elif db_cp is False:
            try:
                database.ACTV_RLX
            except AttributeError:
                raise ValidationError(
                    'Deformation correction mode \'yes\' invalid for database %s.'
                    % (db_name))
            else:
                ACTV = database.ACTV_RLX
    elif db_rlxd is False:
        #elif no.match(str(db_rlxd)):
        pass
    else:
        raise ValidationError('Deformation correction mode \'%s\' not valid.' %
                              (db_rlxd))

    #   Option zero-point-correction- whether for thermochem databases jobs are corrected by zpe
    db_zpe = kwargs.get('zpe', False)

    if db_zpe is True:
        raise ValidationError(
            'Zero-point-correction mode \'yes\' not yet implemented.')
    elif db_zpe is False:
        pass
    else:
        raise ValidationError('Zero-point-correction \'mode\' %s not valid.' %
                              (db_zpe))

    #   Option benchmark- whether error statistics computed wrt alternate reference energies
    db_benchmark = 'default'
    if 'benchmark' in kwargs:
        db_benchmark = kwargs['benchmark']

        if db_benchmark.lower() == 'default':
            pass
        else:
            BIND = p4util.getattr_ignorecase(database, 'BIND_' + db_benchmark)
            if BIND is None:
                raise ValidationError(
                    'Special benchmark \'%s\' not available for database %s.' %
                    (db_benchmark, db_name))

    #   Option tabulate- whether tables of variables other than primary energy method are formed
    # TODO db(func=cbs,tabulate=[non-current-energy])  # broken
    db_tabulate = []
    if 'tabulate' in kwargs:
        db_tabulate = kwargs['tabulate']

    #   Option subset- whether all of the database or just a portion is run
    db_subset = HRXN
    if 'subset' in kwargs:
        db_subset = kwargs['subset']

    if isinstance(db_subset, (str, bytes)):
        if db_subset.lower() == 'small':
            try:
                database.HRXN_SM
            except AttributeError:
                raise ValidationError(
                    """Special subset 'small' not available for database %s."""
                    % (db_name))
            else:
                HRXN = database.HRXN_SM
        elif db_subset.lower() == 'large':
            try:
                database.HRXN_LG
            except AttributeError:
                raise ValidationError(
                    """Special subset 'large' not available for database %s."""
                    % (db_name))
            else:
                HRXN = database.HRXN_LG
        elif db_subset.lower() == 'equilibrium':
            try:
                database.HRXN_EQ
            except AttributeError:
                raise ValidationError(
                    """Special subset 'equilibrium' not available for database %s."""
                    % (db_name))
            else:
                HRXN = database.HRXN_EQ
        else:
            HRXN = p4util.getattr_ignorecase(database, db_subset)
            if HRXN is None:
                HRXN = p4util.getattr_ignorecase(database, 'HRXN_' + db_subset)
                if HRXN is None:
                    raise ValidationError(
                        """Special subset '%s' not available for database %s."""
                        % (db_subset, db_name))
    else:
        temp = []
        for rxn in db_subset:
            if rxn in HRXN:
                temp.append(rxn)
            else:
                raise ValidationError(
                    """Subset element '%s' not a member of database %s.""" %
                    (str(rxn), db_name))
        HRXN = temp

    temp = []
    for rxn in HRXN:
        temp.append(ACTV['%s-%s' % (dbse, rxn)])
    HSYS = p4util.drop_duplicates(sum(temp, []))

    # Sow all the necessary reagent computations
    core.print_out("\n\n")
    p4util.banner(("Database %s Computation" % (db_name)))
    core.print_out("\n")

    #   write index of calcs to output file
    instructions = """\n    The database single-job procedure has been selected through mode='continuous'.\n"""
    instructions += """    Calculations for the reagents will proceed in the order below and will be followed\n"""
    instructions += """    by summary results for the database.\n\n"""
    for rgt in HSYS:
        instructions += """                    %-s\n""" % (rgt)
    core.print_out(instructions)

    #   Loop through chemical systems
    ERGT = {}
    ERXN = {}
    VRGT = {}
    VRXN = {}
    for rgt in HSYS:
        VRGT[rgt] = {}

        core.print_out('\n')
        p4util.banner(' Database {} Computation: Reagent {} \n   {}'.format(
            db_name, rgt, TAGL[rgt]))
        core.print_out('\n')

        molecule = core.Molecule.from_dict(GEOS[rgt].to_dict())
        molecule.set_name(rgt)
        molecule.update_geometry()

        if symmetry_override:
            molecule.reset_point_group('c1')
            molecule.fix_orientation(True)
            molecule.fix_com(True)
            molecule.update_geometry()

        if (openshell_override) and (molecule.multiplicity() != 1):
            if user_reference == 'RHF':
                core.set_global_option('REFERENCE', 'UHF')
            elif user_reference == 'RKS':
                core.set_global_option('REFERENCE', 'UKS')

        core.set_global_option(
            'WRITER_FILE_LABEL', user_writer_file_label +
            ('' if user_writer_file_label == '' else '-') + rgt)

        if allowoptexceeded:
            try:
                ERGT[rgt] = func(molecule=molecule, **kwargs)
            except ConvergenceError:
                core.print_out(f"Optimization exceeded cycles for {rgt}")
                ERGT[rgt] = 0.0
        else:
            ERGT[rgt] = func(molecule=molecule, **kwargs)
        core.print_variables()
        core.print_out("   Database Contributions Map:\n   {}\n".format('-' *
                                                                        75))
        for rxn in HRXN:
            db_rxn = dbse + '-' + str(rxn)
            if rgt in ACTV[db_rxn]:
                core.print_out(
                    '   reagent {} contributes by {:.4f} to reaction {}\n'.
                    format(rgt, RXNM[db_rxn][rgt], db_rxn))
        core.print_out('\n')
        for envv in db_tabulate:
            VRGT[rgt][envv.upper()] = core.variable(envv)
        core.set_global_option("REFERENCE", user_reference)
        core.clean()
        #core.opt_clean()
        core.clean_variables()

    # Reap all the necessary reaction computations
    core.print_out("\n")
    p4util.banner(("Database %s Results" % (db_name)))
    core.print_out("\n")

    maxactv = []
    for rxn in HRXN:
        maxactv.append(len(ACTV[dbse + '-' + str(rxn)]))
    maxrgt = max(maxactv)
    table_delimit = '-' * (62 + 20 * maxrgt)
    tables = ''

    #   find any reactions that are incomplete
    FAIL = collections.defaultdict(int)
    for rxn in HRXN:
        db_rxn = dbse + '-' + str(rxn)
        for i in range(len(ACTV[db_rxn])):
            if abs(ERGT[ACTV[db_rxn][i]]) < 1.0e-12:
                if not allowoptexceeded:
                    FAIL[rxn] = 1

    #   tabulate requested process::environment variables
    tables += """   For each VARIABLE requested by tabulate, a 'Reaction Value' will be formed from\n"""
    tables += """   'Reagent' values according to weightings 'Wt', as for the REQUESTED ENERGY below.\n"""
    tables += """   Depending on the nature of the variable, this may or may not make any physical sense.\n"""
    for rxn in HRXN:
        db_rxn = dbse + '-' + str(rxn)
        VRXN[db_rxn] = {}

    for envv in db_tabulate:
        envv = envv.upper()
        tables += """\n   ==> %s <==\n\n""" % (envv.title())
        tables += _tblhead(maxrgt, table_delimit, 2)

        for rxn in HRXN:
            db_rxn = dbse + '-' + str(rxn)

            if FAIL[rxn]:
                tables += """\n%23s   %8s %8s %8s %8s""" % (db_rxn, '', '****',
                                                            '', '')
                for i in range(len(ACTV[db_rxn])):
                    tables += """ %16.8f %2.0f""" % (VRGT[
                        ACTV[db_rxn][i]][envv], RXNM[db_rxn][ACTV[db_rxn][i]])

            else:
                VRXN[db_rxn][envv] = 0.0
                for i in range(len(ACTV[db_rxn])):
                    VRXN[db_rxn][envv] += VRGT[
                        ACTV[db_rxn][i]][envv] * RXNM[db_rxn][ACTV[db_rxn][i]]

                tables += """\n%23s        %16.8f                  """ % (
                    db_rxn, VRXN[db_rxn][envv])
                for i in range(len(ACTV[db_rxn])):
                    tables += """ %16.8f %2.0f""" % (VRGT[
                        ACTV[db_rxn][i]][envv], RXNM[db_rxn][ACTV[db_rxn][i]])
        tables += """\n   %s\n""" % (table_delimit)

    #   tabulate primary requested energy variable with statistics
    count_rxn = 0
    minDerror = 100000.0
    maxDerror = 0.0
    MSDerror = 0.0
    MADerror = 0.0
    RMSDerror = 0.0

    tables += """\n   ==> %s <==\n\n""" % ('Requested Energy')
    tables += _tblhead(maxrgt, table_delimit, 1)
    for rxn in HRXN:
        db_rxn = dbse + '-' + str(rxn)

        if FAIL[rxn]:
            tables += """\n%23s   %8.4f %8s %10s %10s""" % (
                db_rxn, BIND[db_rxn], '****', '****', '****')
            for i in range(len(ACTV[db_rxn])):
                tables += """ %16.8f %2.0f""" % (ERGT[ACTV[db_rxn][i]],
                                                 RXNM[db_rxn][ACTV[db_rxn][i]])

        else:
            ERXN[db_rxn] = 0.0
            for i in range(len(ACTV[db_rxn])):
                ERXN[db_rxn] += ERGT[ACTV[db_rxn][i]] * RXNM[db_rxn][
                    ACTV[db_rxn][i]]
            error = constants.hartree2kcalmol * ERXN[db_rxn] - BIND[db_rxn]

            tables += """\n%23s   %8.4f %8.4f %10.4f %10.4f""" % (
                db_rxn, BIND[db_rxn], constants.hartree2kcalmol * ERXN[db_rxn],
                error, error * constants.cal2J)
            for i in range(len(ACTV[db_rxn])):
                tables += """ %16.8f %2.0f""" % (ERGT[ACTV[db_rxn][i]],
                                                 RXNM[db_rxn][ACTV[db_rxn][i]])

            if abs(error) < abs(minDerror):
                minDerror = error
            if abs(error) > abs(maxDerror):
                maxDerror = error
            MSDerror += error
            MADerror += abs(error)
            RMSDerror += error * error
            count_rxn += 1
    tables += """\n   %s\n""" % (table_delimit)

    if count_rxn:

        MSDerror /= float(count_rxn)
        MADerror /= float(count_rxn)
        RMSDerror = math.sqrt(RMSDerror / float(count_rxn))

        tables += """%23s %19s %10.4f %10.4f\n""" % (
            'Minimal Dev', '', minDerror, minDerror * constants.cal2J)
        tables += """%23s %19s %10.4f %10.4f\n""" % (
            'Maximal Dev', '', maxDerror, maxDerror * constants.cal2J)
        tables += """%23s %19s %10.4f %10.4f\n""" % (
            'Mean Signed Dev', '', MSDerror, MSDerror * constants.cal2J)
        tables += """%23s %19s %10.4f %10.4f\n""" % (
            'Mean Absolute Dev', '', MADerror, MADerror * constants.cal2J)
        tables += """%23s %19s %10.4f %10.4f\n""" % (
            'RMS Dev', '', RMSDerror, RMSDerror * constants.cal2J)
        tables += """   %s\n""" % (table_delimit)

        core.set_variable('%s DATABASE MEAN SIGNED DEVIATION' % (db_name),
                          MSDerror)
        core.set_variable('%s DATABASE MEAN ABSOLUTE DEVIATION' % (db_name),
                          MADerror)
        core.set_variable('%s DATABASE ROOT-MEAN-SQUARE DEVIATION' % (db_name),
                          RMSDerror)

        core.print_out(tables)
        finalenergy = MADerror

    else:
        finalenergy = 0.0

    optstash.restore()

    DB_RGT.clear()
    DB_RGT.update(VRGT)
    DB_RXN.clear()
    DB_RXN.update(VRXN)
    return finalenergy
Example #21
0
def run_sf_sapt(name, **kwargs):
    optstash = p4util.OptionsState(['SCF_TYPE'],
                                   ['SCF', 'REFERENCE'],
                                   ['SCF', 'DFT_GRAC_SHIFT'],
                                   ['SCF', 'SAVE_JK'])

    core.tstart()

    # Alter default algorithm
    if not core.has_global_option_changed('SCF_TYPE'):
        core.set_global_option('SCF_TYPE', 'DF')

    core.prepare_options_for_module("SAPT")

    # Get the molecule of interest
    ref_wfn = kwargs.get('ref_wfn', None)
    if ref_wfn is None:
        sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
    else:
        core.print_out('Warning! SAPT argument "ref_wfn" is only able to use molecule information.')
        sapt_dimer = ref_wfn.molecule()

    sapt_dimer, monomerA, monomerB = proc_util.prepare_sapt_molecule(sapt_dimer, "dimer")

    # Print out the title and some information
    core.print_out("\n")
    core.print_out("         ---------------------------------------------------------\n")
    core.print_out("         " + "Spin-Flip SAPT Procedure".center(58) + "\n")
    core.print_out("\n")
    core.print_out("         " + "by Daniel G. A. Smith and Konrad Patkowski".center(58) + "\n")
    core.print_out("         ---------------------------------------------------------\n")
    core.print_out("\n")

    core.print_out("  ==> Algorithm <==\n\n")
    core.print_out("   JK Algorithm            %12s\n" % core.get_option("SCF", "SCF_TYPE"))
    core.print_out("\n")
    core.print_out("   Required computations:\n")
    core.print_out("     HF  (Monomer A)\n")
    core.print_out("     HF  (Monomer B)\n")
    core.print_out("\n")

    if (core.get_option('SCF', 'REFERENCE') != 'ROHF'):
        raise ValidationError('Spin-Flip SAPT currently only supports restricted open-shell references.')

    # Run the two monomer computations
    core.IO.set_default_namespace('dimer')
    data = {}

    if (core.get_global_option('SCF_TYPE') == 'DF'):
        core.set_global_option('DF_INTS_IO', 'SAVE')

    # Compute dimer wavefunction
    wfn_A = scf_helper("SCF", molecule=monomerA, banner="SF-SAPT: HF Monomer A", **kwargs)

    core.set_global_option("SAVE_JK", True)
    wfn_B = scf_helper("SCF", molecule=monomerB, banner="SF-SAPT: HF Monomer B", **kwargs)
    sapt_jk = wfn_B.jk()
    core.set_global_option("SAVE_JK", False)
    core.print_out("\n")
    core.print_out("         ---------------------------------------------------------\n")
    core.print_out("         " + "Spin-Flip SAPT Exchange and Electrostatics".center(58) + "\n")
    core.print_out("\n")
    core.print_out("         " + "by Daniel G. A. Smith and Konrad Patkowski".center(58) + "\n")
    core.print_out("         ---------------------------------------------------------\n")
    core.print_out("\n")

    sf_data = sapt_sf_terms.compute_sapt_sf(sapt_dimer, sapt_jk, wfn_A, wfn_B)

    # Print the results
    core.print_out("   Spin-Flip SAPT Results\n")
    core.print_out("  " + "-" * 103 + "\n")

    for key, value in sf_data.items():
        value = sf_data[key]
        print_vals = (key, value * 1000, value * constants.hartree2kcalmol, value * constants.hartree2kJmol)
        string = "    %-26s % 15.8f [mEh] % 15.8f [kcal/mol] % 15.8f [kJ/mol]\n" % print_vals
        core.print_out(string)
    core.print_out("  " + "-" * 103 + "\n\n")

    dimer_wfn = core.Wavefunction.build(sapt_dimer, wfn_A.basisset())

    # Set variables
    psivar_tanslator = {
        "Elst10": "SAPT ELST ENERGY",
        "Exch10(S^2) [diagonal]": "SAPT EXCH10(S^2),DIAGONAL ENERGY",
        "Exch10(S^2) [off-diagonal]": "SAPT EXCH10(S^2),OFF-DIAGONAL ENERGY",
        "Exch10(S^2) [highspin]": "SAPT EXCH10(S^2),HIGHSPIN ENERGY",
    }

    for k, v in sf_data.items():
        psi_k = psivar_tanslator[k]
        
        dimer_wfn.set_variable(psi_k, v)
        core.set_variable(psi_k, v)

    # Copy over highspin
    core.set_variable("SAPT EXCH ENERGY", sf_data["Exch10(S^2) [highspin]"])

    core.tstop()

    return dimer_wfn
Example #22
0
def exchange(cache, jk, do_print=True):
    """
    Computes the E10 exchange (S^2 and S^inf) from a build_sapt_jk_cache datacache.
    """

    if do_print:
        core.print_out("\n  ==> E10 Exchange <== \n\n")

    # Build potenitals
    h_A = cache["V_A"].clone()
    h_A.axpy(2.0, cache["J_A"])
    h_A.axpy(-1.0, cache["K_A"])

    h_B = cache["V_B"].clone()
    h_B.axpy(2.0, cache["J_B"])
    h_B.axpy(-1.0, cache["K_B"])

    w_A = cache["V_A"].clone()
    w_A.axpy(2.0, cache["J_A"])

    w_B = cache["V_B"].clone()
    w_B.axpy(2.0, cache["J_B"])

    # Build inverse exchange metric
    nocc_A = cache["Cocc_A"].shape[1]
    nocc_B = cache["Cocc_B"].shape[1]
    SAB = core.triplet(
        cache["Cocc_A"], cache["S"], cache["Cocc_B"], True, False, False)
    num_occ = nocc_A + nocc_B

    Sab = core.Matrix(num_occ, num_occ)
    Sab.np[:nocc_A, nocc_A:] = SAB.np
    Sab.np[nocc_A:, :nocc_A] = SAB.np.T
    Sab.np[np.diag_indices_from(Sab.np)] += 1
    Sab.power(-1.0, 1.e-14)
    Sab.np[np.diag_indices_from(Sab.np)] -= 1.0

    Tmo_AA = core.Matrix.from_array(Sab.np[:nocc_A, :nocc_A])
    Tmo_BB = core.Matrix.from_array(Sab.np[nocc_A:, nocc_A:])
    Tmo_AB = core.Matrix.from_array(Sab.np[:nocc_A, nocc_A:])

    T_A = np.dot(cache["Cocc_A"], Tmo_AA).dot(cache["Cocc_A"].np.T)
    T_B = np.dot(cache["Cocc_B"], Tmo_BB).dot(cache["Cocc_B"].np.T)
    T_AB = np.dot(cache["Cocc_A"], Tmo_AB).dot(cache["Cocc_B"].np.T)

    S = cache["S"]

    D_A = cache["D_A"]
    P_A = cache["P_A"]

    D_B = cache["D_B"]
    P_B = cache["P_B"]

    # Compute the J and K matrices
    jk.C_clear()

    jk.C_left_add(cache["Cocc_A"])
    jk.C_right_add(core.doublet(cache["Cocc_A"], Tmo_AA, False, False))

    jk.C_left_add(cache["Cocc_B"])
    jk.C_right_add(core.doublet(cache["Cocc_A"], Tmo_AB, False, False))

    jk.C_left_add(cache["Cocc_A"])
    jk.C_right_add(core.Matrix.chain_dot(P_B, S, cache["Cocc_A"]))

    jk.compute()

    JT_A, JT_AB, Jij = jk.J()
    KT_A, KT_AB, Kij = jk.K()

    # Start S^2
    Exch_s2 = 0.0

    tmp = core.Matrix.chain_dot(D_A, S, D_B, S, P_A)
    Exch_s2 -= 2.0 * w_B.vector_dot(tmp)

    tmp = core.Matrix.chain_dot(D_B, S, D_A, S, P_B)
    Exch_s2 -= 2.0 * w_A.vector_dot(tmp)

    tmp = core.Matrix.chain_dot(P_A, S, D_B)
    Exch_s2 -= 2.0 * Kij.vector_dot(tmp)

    if do_print:
        core.print_out(print_sapt_var("Exch10(S^2) ", Exch_s2, short=True))
        core.print_out("\n")

    # Start Sinf
    Exch10 = 0.0
    Exch10 -= 2.0 * np.vdot(cache["D_A"], cache["K_B"])
    Exch10 += 2.0 * np.vdot(T_A, h_B.np)
    Exch10 += 2.0 * np.vdot(T_B, h_A.np)
    Exch10 += 2.0 * np.vdot(T_AB, h_A.np + h_B.np)
    Exch10 += 4.0 * np.vdot(T_B, JT_AB.np - 0.5 * KT_AB.np)
    Exch10 += 4.0 * np.vdot(T_A, JT_AB.np - 0.5 * KT_AB.np)
    Exch10 += 4.0 * np.vdot(T_B, JT_A.np - 0.5 * KT_A.np)
    Exch10 += 4.0 * np.vdot(T_AB, JT_AB.np - 0.5 * KT_AB.np.T)

    if do_print:
        core.set_variable("Exch10", Exch10)
        core.print_out(print_sapt_var("Exch10", Exch10, short=True))
        core.print_out("\n")

    return {"Exch10(S^2)": Exch_s2, "Exch10": Exch10}
Example #23
0
def run_dftd3(self, func=None, dashlvl=None, dashparam=None, dertype=None, verbose=False):
    """Function to call Grimme's dftd3 program (http://toc.uni-muenster.de/DFTD3/)
    to compute the -D correction of level *dashlvl* using parameters for
    the functional *func*. The dictionary *dashparam* can be used to supply
    a full set of dispersion parameters in the absense of *func* or to supply
    individual overrides in the presence of *func*. Returns energy if *dertype* is 0,
    gradient if *dertype* is 1, else tuple of energy and gradient if *dertype*
    unspecified. The dftd3 executable must be independently compiled and found in
    :envvar:`PATH` or :envvar:`PSIPATH`.
    *self* may be either a qcdb.Molecule (sensibly) or a psi4.Molecule
    (works b/c psi4.Molecule has been extended by this method py-side and
    only public interface fns used) or a string that can be instantiated
    into a qcdb.Molecule.

    func - functional alias or None
    dashlvl - functional type d2gr/d3zero/d3bj/d3mzero/d3mbj
    dashparam - dictionary
    dertype = derivative level

    """
    # Create (if necessary) and update qcdb.Molecule
    if isinstance(self, Molecule):
        # called on a qcdb.Molecule
        pass
    elif isinstance(self, core.Molecule):
        # called on a python export of a psi4.Molecule (py-side through Psi4's driver)
        self.create_psi4_string_from_molecule()
    elif isinstance(self, basestring):
        # called on a string representation of a psi4.Molecule (c-side through psi4.Dispersion)
        self = Molecule(self)
    else:
        raise ValidationError("""Argument mol must be psi4string or qcdb.Molecule""")
    self.update_geometry()

    # Validate arguments
    if dertype is None:
        dertype = -1
    elif der0th.match(str(dertype)):
        dertype = 0
    elif der1st.match(str(dertype)):
        dertype = 1
    elif der2nd.match(str(dertype)):
        raise ValidationError('Requested derivative level \'dertype\' %s not valid for run_dftd3.' % (dertype))
    else:
        raise ValidationError('Requested derivative level \'dertype\' %s not valid for run_dftd3.' % (dertype))

    if dashlvl is not None:
        dashlvl = dashlvl.lower()
        dashlvl = dash_alias['-' + dashlvl][1:] if ('-' + dashlvl) in dash_alias.keys() else dashlvl
        if dashlvl not in dashcoeff.keys():
            raise ValidationError("""-D correction level %s is not available. Choose among %s.""" % (dashlvl, dashcoeff.keys()))
    else:
        raise ValidationError("""Must specify a dashlvl""")

    if func is not None:
        dftd3_params = dash_server(func, dashlvl)
    else:
        dftd3_params = {}

    if dashparam is not None:
        dftd3_params.update(dashparam)

    # Move ~/.dftd3par.<hostname> out of the way so it won't interfere
    defaultfile = os.path.expanduser('~') + '/.dftd3par.' + socket.gethostname()
    defmoved = False
    if os.path.isfile(defaultfile):
        os.rename(defaultfile, defaultfile + '_hide')
        defmoved = True

    # Find environment by merging PSIPATH and PATH environment variables
    lenv = {
        'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \
                ':' + os.environ.get('PATH'),
        'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
        }
    #   Filter out None values as subprocess will fault on them
    lenv = {k: v for k, v in lenv.items() if v is not None}

    # Find out if running from Psi4 for scratch details and such
    # try:
    #     import psi4
    # except ImportError as err:
    #     isP4regime = False
    # else:
    #     isP4regime = True

    # Setup unique scratch directory and move in
    current_directory = os.getcwd()
    if isP4regime:
        psioh = core.IOManager.shared_object()
        psio = core.IO.shared_object()
        os.chdir(psioh.get_default_path())
        dftd3_tmpdir = 'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
            '.dftd3.' + str(uuid.uuid4())[:8]
    else:
        dftd3_tmpdir = os.environ['HOME'] + os.sep + 'dftd3_' + str(uuid.uuid4())[:8]
    if os.path.exists(dftd3_tmpdir) is False:
        os.mkdir(dftd3_tmpdir)
    os.chdir(dftd3_tmpdir)

    # Write dftd3_parameters file that governs dispersion calc
    paramcontents = dftd3_coeff_formatter(dashlvl, dftd3_params)
    paramfile1 = 'dftd3_parameters'  # older patched name
    with open(paramfile1, 'w') as handle:
        handle.write(paramcontents)
    paramfile2 = '.dftd3par.local'  # new mainline name
    with open(paramfile2, 'w') as handle:
        handle.write(paramcontents)

    # Write dftd3_geometry file that supplies geometry to dispersion calc
    numAtoms = self.natom()

    # We seem to have a problem with one atom, force the correct result
    if numAtoms == 1:
        dashd = 0.0
        dashdderiv = core.Matrix(1, 3)

        if dertype == -1:
            return dashd, dashdderiv
        elif dertype == 0:
            return dashd
        elif dertype == 1:
            return dashdderiv


    geom = self.save_string_xyz()
    reals = []
    for line in geom.splitlines():
        lline = line.split()
        if len(lline) != 4:
            continue
        if lline[0] == 'Gh':
            numAtoms -= 1
        else:
            reals.append(line)

    geomtext = str(numAtoms) + '\n\n'
    for line in reals:
        geomtext += line.strip() + '\n'
    geomfile = './dftd3_geometry.xyz'
    with open(geomfile, 'w') as handle:
        handle.write(geomtext)
    # TODO somehow the variations on save_string_xyz and
    #   whether natom and chgmult does or doesn't get written
    #   have gotten all tangled. I fear this doesn't work
    #   the same btwn libmints and qcdb or for ghosts

    # Call dftd3 program
    command = ['dftd3', geomfile]
    if dertype != 0:
        command.append('-grad')
    try:
        dashout = subprocess.Popen(command, stdout=subprocess.PIPE, env=lenv)
    except OSError as e:
        raise ValidationError('Program dftd3 not found in path. %s' % e)
    out, err = dashout.communicate()

    # Parse output (could go further and break into E6, E8, E10 and Cn coeff)
    success = False
    for line in out.splitlines():
        line = line.decode('utf-8')
        if re.match(' Edisp /kcal,au', line):
            sline = line.split()
            dashd = float(sline[3])
        if re.match(' normal termination of dftd3', line):
            success = True

    if not success:
        os.chdir(current_directory)
        raise Dftd3Error("""Unsuccessful run. Possibly -D variant not available in dftd3 version.""")

    # Parse grad output
    if dertype != 0:
        derivfile = './dftd3_gradient'
        dfile = open(derivfile, 'r')
        dashdderiv = []
        for line in geom.splitlines():
            lline = line.split()
            if len(lline) != 4:
                continue
            if lline[0] == 'Gh':
                dashdderiv.append([0.0, 0.0, 0.0])
            else:
                dashdderiv.append([float(x.replace('D', 'E')) for x in dfile.readline().split()])
        dfile.close()

        if len(dashdderiv) != self.natom():
            raise ValidationError('Program dftd3 gradient file has %d atoms- %d expected.' % \
                (len(dashdderiv), self.natom()))

    # Prepare results for Psi4
    if isP4regime and dertype != 0:
        core.set_variable('DISPERSION CORRECTION ENERGY', dashd)
        psi_dashdderiv = core.Matrix(self.natom(), 3)
        psi_dashdderiv.set(dashdderiv)

    # Print program output to file if verbose
    if not verbose and isP4regime:
        verbose = True if core.get_option('SCF', 'PRINT') >= 3 else False
    if verbose:

        text = '\n  ==> DFTD3 Output <==\n'
        text += out.decode('utf-8')
        if dertype != 0:
            with open(derivfile, 'r') as handle:
                text += handle.read().replace('D', 'E')
            text += '\n'
        if isP4regime:
            core.print_out(text)
        else:
            print(text)

    # Clean up files and remove scratch directory
    os.unlink(paramfile1)
    os.unlink(paramfile2)
    os.unlink(geomfile)
    if dertype != 0:
        os.unlink(derivfile)
    if defmoved is True:
        os.rename(defaultfile + '_hide', defaultfile)

    os.chdir('..')
    try:
        shutil.rmtree(dftd3_tmpdir)
    except OSError as e:
        ValidationError('Unable to remove dftd3 temporary directory %s' % e)
    os.chdir(current_directory)

    # return -D & d(-D)/dx
    if dertype == -1:
        return dashd, dashdderiv
    elif dertype == 0:
        return dashd
    elif dertype == 1:
        return psi_dashdderiv
Example #24
0
def run_cfour(name, **kwargs):
    """Function that prepares environment and input files
    for a calculation calling Stanton and Gauss's CFOUR code.
    Also processes results back into Psi4 format.

    This function is not called directly but is instead called by
    :py:func:`~psi4.driver.energy` or :py:func:`~psi4.driver.optimize` when a Cfour
    method is requested (through *name* argument). In order to function
    correctly, the Cfour executable ``xcfour`` must be present in
    :envvar:`PATH` or :envvar:`PSIPATH`.

    .. hlist::
       :columns: 1

       * Many :ref:`PSI Variables <apdx:cfour_psivar>` extracted from the Cfour output
       * Python dictionary of associated file constants accessible as ``P4C4_INFO['zmat']``, ``P4C4_INFO['output']``, ``P4C4_INFO['grd']``, *etc.*


    :type name: str
    :param name: ``'c4-scf'`` || ``'c4-ccsd(t)'`` || ``'cfour'`` || etc.

        First argument, usually unlabeled. Indicates the computational
        method to be applied to the system.

    :type keep: :ref:`boolean <op_py_boolean>`
    :param keep: ``'on'`` || |dl| ``'off'`` |dr|

        Indicates whether to delete the Cfour scratch directory upon
        completion of the Cfour job.

    :type path: str
    :param path:

        Indicates path to Cfour scratch directory (with respect to Psi4
        scratch directory). Otherwise, the default is a subdirectory
        within the Psi4 scratch directory.

        If specified, GENBAS and/or ZMAT within will be used.

    :type genbas: str
    :param genbas:

        Indicates that contents should be used for GENBAS file.

    GENBAS is a complicated topic. It is quite unnecessary if the
    molecule is from a molecule {...} block and basis is set through
    |Psifours| BASIS keyword. In that case, a GENBAS is written from
    LibMints and all is well. Otherwise, a GENBAS is looked for in
    the usual places: PSIPATH, PATH, PSIDATADIR/basis. If path kwarg is
    specified, also looks there preferentially for a GENBAS. Can
    also specify GENBAS within an input file through a string and
    setting the genbas kwarg. Note that due to the input parser's
    aggression, blank lines need to be replaced by the text blankline.

    """
    lowername = name.lower()
    internal_p4c4_info = {}
    return_wfn = kwargs.pop('return_wfn', False)

    # Make sure the molecule the user provided is the active one
    molecule = kwargs.pop('molecule', core.get_active_molecule())
    molecule.update_geometry()

    optstash = p4util.OptionsState(['CFOUR', 'TRANSLATE_PSI4'])

    # Determine calling function and hence dertype
    calledby = inspect.stack()[1][3]
    dertype = ['energy', 'gradient', 'hessian'].index(calledby)
    #print('I am %s called by %s called by %s.\n' %
    #    (inspect.stack()[0][3], inspect.stack()[1][3], inspect.stack()[2][3]))

    # Save submission directory
    current_directory = os.getcwd()

    # Move into job scratch directory
    psioh = core.IOManager.shared_object()
    psio = core.IO.shared_object()
    os.chdir(psioh.get_default_path())

    # Construct and move into cfour subdirectory of job scratch directory
    cfour_tmpdir = kwargs['path'] if 'path' in kwargs else \
        'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
        '.cfour.' + str(uuid.uuid4())[:8]
    if not os.path.exists(cfour_tmpdir):
        os.mkdir(cfour_tmpdir)
    os.chdir(cfour_tmpdir)

    # Find environment by merging PSIPATH and PATH environment variables
    lenv = {
        'PATH':
        ':'.join([
            os.path.abspath(x)
            for x in os.environ.get('PSIPATH', '').split(':') if x != ''
        ]) + ':' + os.environ.get('PATH') + ':' + core.get_datadir() +
        '/basis',
        'GENBAS_PATH':
        core.get_datadir() + '/basis',
        'CFOUR_NUM_CORES':
        os.environ.get('CFOUR_NUM_CORES'),
        'MKL_NUM_THREADS':
        os.environ.get('MKL_NUM_THREADS'),
        'OMP_NUM_THREADS':
        os.environ.get('OMP_NUM_THREADS'),
        'LD_LIBRARY_PATH':
        os.environ.get('LD_LIBRARY_PATH')
    }

    if 'path' in kwargs:
        lenv['PATH'] = kwargs['path'] + ':' + lenv['PATH']
    #   Filter out None values as subprocess will fault on them
    lenv = {k: v for k, v in lenv.items() if v is not None}

    # Load the GENBAS file
    genbas_path = qcdb.search_file('GENBAS', lenv['GENBAS_PATH'])
    if genbas_path:
        try:
            shutil.copy2(genbas_path, psioh.get_default_path() + cfour_tmpdir)
        except shutil.Error:  # should only fail if src and dest equivalent
            pass
        core.print_out("\n  GENBAS loaded from %s\n" % (genbas_path))
        core.print_out("  CFOUR to be run from %s\n" %
                       (psioh.get_default_path() + cfour_tmpdir))
    else:
        message = """
  GENBAS file for CFOUR interface not found. Either:
  [1] Supply a GENBAS by placing it in PATH or PSIPATH
      [1a] Use cfour {} block with molecule and basis directives.
      [1b] Use molecule {} block and CFOUR_BASIS keyword.
  [2] Allow Psi4's internal basis sets to convert to GENBAS
      [2a] Use molecule {} block and BASIS keyword.

"""
        core.print_out(message)
        core.print_out('  Search path that was tried:\n')
        core.print_out(lenv['PATH'].replace(':', ', '))

    # Generate the ZMAT input file in scratch
    if 'path' in kwargs and os.path.isfile('ZMAT'):
        core.print_out("  ZMAT loaded from %s\n" %
                       (psioh.get_default_path() + kwargs['path'] + '/ZMAT'))
    else:
        with open('ZMAT', 'w') as cfour_infile:
            cfour_infile.write(write_zmat(lowername, dertype, molecule))

    internal_p4c4_info['zmat'] = open('ZMAT', 'r').read()
    #core.print_out('\n====== Begin ZMAT input for CFOUR ======\n')
    #core.print_out(open('ZMAT', 'r').read())
    #core.print_out('======= End ZMAT input for CFOUR =======\n\n')
    #print('\n====== Begin ZMAT input for CFOUR ======')
    #print(open('ZMAT', 'r').read())
    #print('======= End ZMAT input for CFOUR =======\n')

    if 'genbas' in kwargs:
        with open('GENBAS', 'w') as cfour_basfile:
            cfour_basfile.write(kwargs['genbas'].replace(
                '\nblankline\n', '\n\n'))
        core.print_out('  GENBAS loaded from kwargs string\n')

    # Close psi4 output file and reopen with filehandle
    print('output in', current_directory + '/' + core.outfile_name())
    pathfill = '' if os.path.isabs(
        core.outfile_name()) else current_directory + os.path.sep

    # Handle threading
    #   OMP_NUM_THREADS from env is in lenv from above
    #   threads from psi4 -n (core.get_num_threads()) is ignored
    #   CFOUR_OMP_NUM_THREADS psi4 option takes precedence, handled below
    if core.has_option_changed('CFOUR', 'CFOUR_OMP_NUM_THREADS'):
        lenv['OMP_NUM_THREADS'] = str(
            core.get_option('CFOUR', 'CFOUR_OMP_NUM_THREADS'))

    #print("""\n\n<<<<<  RUNNING CFOUR ...  >>>>>\n\n""")
    # Call executable xcfour, directing cfour output to the psi4 output file
    cfour_executable = kwargs['c4exec'] if 'c4exec' in kwargs else 'xcfour'
    try:
        retcode = subprocess.Popen([cfour_executable],
                                   bufsize=0,
                                   stdout=subprocess.PIPE,
                                   env=lenv)
    except OSError as e:
        sys.stderr.write(
            'Program %s not found in path or execution failed: %s\n' %
            (cfour_executable, e.strerror))
        message = ('Program %s not found in path or execution failed: %s\n' %
                   (cfour_executable, e.strerror))
        raise ValidationError(message)

    c4out = ''
    while True:
        data = retcode.stdout.readline()
        data = data.decode('utf-8')
        if not data:
            break
        core.print_out(data)
        c4out += data
    internal_p4c4_info['output'] = c4out

    c4files = {}
    core.print_out('\n')
    for item in ['GRD', 'FCMFINAL', 'DIPOL']:
        try:
            with open(psioh.get_default_path() + cfour_tmpdir + '/' + item,
                      'r') as handle:
                c4files[item] = handle.read()
                core.print_out('  CFOUR scratch file %s has been read\n' %
                               (item))
                core.print_out('%s\n' % c4files[item])
                internal_p4c4_info[item.lower()] = c4files[item]
        except IOError:
            pass
    core.print_out('\n')

    if molecule.name() == 'blank_molecule_psi4_yo':
        qcdbmolecule = None
    else:
        molecule.update_geometry()
        qcdbmolecule = qcdb.Molecule(
            molecule.create_psi4_string_from_molecule())
        qcdbmolecule.update_geometry()

    # c4mol, if it exists, is dinky, just a clue to geometry of cfour results
    psivar, c4grad, c4mol = qcdb.cfour.harvest(qcdbmolecule, c4out, **c4files)

    # Absorb results into psi4 data structures
    for key in psivar.keys():
        core.set_variable(key.upper(), float(psivar[key]))

    if qcdbmolecule is None and c4mol is not None:

        molrec = qcel.molparse.from_string(
            c4mol.create_psi4_string_from_molecule(),
            enable_qm=True,
            missing_enabled_return_qm='minimal',
            enable_efp=False,
            missing_enabled_return_efp='none',
        )
        molecule = core.Molecule.from_dict(molrec['qm'])
        molecule.set_name('blank_molecule_psi4_yo')
        core.set_active_molecule(molecule)
        molecule.update_geometry()
        # This case arises when no Molecule going into calc (cfour {} block) but want
        #   to know the orientation at which grad, properties, etc. are returned (c4mol).
        #   c4mol is dinky, w/o chg, mult, dummies and retains name
        #   blank_molecule_psi4_yo so as to not interfere with future cfour {} blocks

    if c4grad is not None:
        mat = core.Matrix.from_list(c4grad)
        core.set_gradient(mat)

        #print '    <<<   [3] C4-GRD-GRAD   >>>'
        #mat.print()


#    exit(1)

# # Things needed core.so module to do
# collect c4out string
# read GRD
# read FCMFINAL
# see if theres an active molecule

# # Things delegatable to qcdb
# parsing c4out
# reading GRD and FCMFINAL strings
# reconciling p4 and c4 molecules (orient)
# reconciling c4out and GRD and FCMFINAL results
# transforming frame of results back to p4

# # Things run_cfour needs to have back
# psivar
# qcdb.Molecule of c4?
# coordinates?
# gradient in p4 frame

#    # Process the cfour output
#    psivar, c4coord, c4grad = qcdb.cfour.cfour_harvest(c4out)
#    for key in psivar.keys():
#        core.set_variable(key.upper(), float(psivar[key]))
#
#    # Awful Hack - Go Away TODO
#    if c4grad:
#        molecule = core.get_active_molecule()
#        molecule.update_geometry()
#
#        if molecule.name() == 'blank_molecule_psi4_yo':
#            p4grad = c4grad
#            p4coord = c4coord
#        else:
#            qcdbmolecule = qcdb.Molecule(molecule.create_psi4_string_from_molecule())
#            #p4grad = qcdbmolecule.deorient_array_from_cfour(c4coord, c4grad)
#            #p4coord = qcdbmolecule.deorient_array_from_cfour(c4coord, c4coord)
#
#            with open(psioh.get_default_path() + cfour_tmpdir + '/GRD', 'r') as cfour_grdfile:
#                c4outgrd = cfour_grdfile.read()
#            print('GRD\n',c4outgrd)
#            c4coordGRD, c4gradGRD = qcdb.cfour.cfour_harvest_files(qcdbmolecule, grd=c4outgrd)
#
#        p4mat = core.Matrix.from_list(p4grad)
#        core.set_gradient(p4mat)

#    print('    <<<  P4 PSIVAR  >>>')
#    for item in psivar:
#        print('       %30s %16.8f' % (item, psivar[item]))
#print('    <<<  P4 COORD   >>>')
#for item in p4coord:
#    print('       %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
#    print('    <<<   P4 GRAD   >>>')
#    for item in c4grad:
#        print('       %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))

# Clean up cfour scratch directory unless user instructs otherwise
    keep = yes.match(str(kwargs['keep'])) if 'keep' in kwargs else False
    os.chdir('..')
    try:
        if keep or ('path' in kwargs):
            core.print_out('\n  CFOUR scratch files have been kept in %s\n' %
                           (psioh.get_default_path() + cfour_tmpdir))
        else:
            shutil.rmtree(cfour_tmpdir)
    except OSError as e:
        print('Unable to remove CFOUR temporary directory %s' % e,
              file=sys.stderr)
        exit(1)

    # Return to submission directory and reopen output file
    os.chdir(current_directory)

    core.print_out('\n')
    p4util.banner(' Cfour %s %s Results ' %
                  (name.lower(), calledby.capitalize()))
    core.print_variables()
    if c4grad is not None:
        core.get_gradient().print_out()

    core.print_out('\n')
    p4util.banner(' Cfour %s %s Results ' %
                  (name.lower(), calledby.capitalize()))
    core.print_variables()
    if c4grad is not None:
        core.get_gradient().print_out()

    # Quit if Cfour threw error
    if 'CFOUR ERROR CODE' in core.variables():
        raise ValidationError("""Cfour exited abnormally.""")

    P4C4_INFO.clear()
    P4C4_INFO.update(internal_p4c4_info)

    optstash.restore()

    # new skeleton wavefunction w/mol, highest-SCF basis (just to choose one), & not energy
    #   Feb 2017 hack. Could get proper basis in skel wfn even if not through p4 basis kw
    if core.get_global_option('BASIS') in ["", "(AUTO)"]:
        gobas = "sto-3g"
    else:
        gobas = core.get_global_option('BASIS')
    basis = core.BasisSet.build(molecule, "ORBITAL", gobas)
    if basis.has_ECP():
        raise ValidationError("""ECPs not hooked up for Cfour""")
    wfn = core.Wavefunction(molecule, basis)
    for k, v in psivar.items():
        wfn.set_variable(k.upper(), float(v))

    optstash.restore()

    if dertype == 0:
        finalquantity = psivar['CURRENT ENERGY']
    elif dertype == 1:
        finalquantity = core.get_gradient()
        wfn.set_gradient(finalquantity)
        if finalquantity.rows(0) < 20:
            core.print_out('CURRENT GRADIENT')
            finalquantity.print_out()
    elif dertype == 2:
        pass
        #finalquantity = finalhessian
        #wfn.set_hessian(finalquantity)
        #if finalquantity.rows(0) < 20:
        #    core.print_out('CURRENT HESSIAN')
        #    finalquantity.print_out()

    return wfn
Example #25
0
def run_sapt_dft(name, **kwargs):
    optstash = p4util.OptionsState(['SCF', 'SCF_TYPE'],
                                   ['SCF', 'REFERENCE'],
                                   ['SCF', 'DFT_FUNCTIONAL'],
                                   ['SCF', 'DFT_GRAC_SHIFT'],
                                   ['SCF', 'SAVE_JK'])

    core.tstart()
    # Alter default algorithm
    if not core.has_option_changed('SCF', 'SCF_TYPE'):
        core.set_local_option('SCF', 'SCF_TYPE', 'DF')

    core.prepare_options_for_module("SAPT")

    # Get the molecule of interest
    ref_wfn = kwargs.get('ref_wfn', None)
    if ref_wfn is None:
        sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
    else:
        core.print_out('Warning! SAPT argument "ref_wfn" is only able to use molecule information.')
        sapt_dimer = ref_wfn.molecule()

    sapt_dimer, monomerA, monomerB = proc_util.prepare_sapt_molecule(sapt_dimer, "dimer")

    # Grab overall settings
    mon_a_shift = core.get_option("SAPT", "SAPT_DFT_GRAC_SHIFT_A")
    mon_b_shift = core.get_option("SAPT", "SAPT_DFT_GRAC_SHIFT_B")
    do_delta_hf = core.get_option("SAPT", "SAPT_DFT_DO_DHF")
    sapt_dft_functional = core.get_option("SAPT", "SAPT_DFT_FUNCTIONAL")

    # Print out the title and some information
    core.print_out("\n")
    core.print_out("         ---------------------------------------------------------\n")
    core.print_out("         " + "SAPT(DFT) Procedure".center(58) + "\n")
    core.print_out("\n")
    core.print_out("         " + "by Daniel G. A. Smith".center(58) + "\n")
    core.print_out("         ---------------------------------------------------------\n")
    core.print_out("\n")

    core.print_out("  ==> Algorithm <==\n\n")
    core.print_out("   SAPT DFT Functional     %12s\n" % str(sapt_dft_functional))
    core.print_out("   Monomer A GRAC Shift    %12.6f\n" % mon_a_shift)
    core.print_out("   Monomer B GRAC Shift    %12.6f\n" % mon_b_shift)
    core.print_out("   Delta HF                %12s\n" % ("True" if do_delta_hf else "False"))
    core.print_out("   JK Algorithm            %12s\n" % core.get_option("SCF", "SCF_TYPE"))
    core.print_out("\n")
    core.print_out("   Required computations:\n")
    if (do_delta_hf):
        core.print_out("     HF  (Dimer)\n")
        core.print_out("     HF  (Monomer A)\n")
        core.print_out("     HF  (Monomer B)\n")
    core.print_out("     DFT (Monomer A)\n")
    core.print_out("     DFT (Monomer B)\n")
    core.print_out("\n")

    if (sapt_dft_functional != "HF") and ((mon_a_shift == 0.0) or (mon_b_shift == 0.0)):
        raise ValidationError('SAPT(DFT): must set both "SAPT_DFT_GRAC_SHIFT_A" and "B".')

    if (core.get_option('SCF', 'REFERENCE') != 'RHF'):
        raise ValidationError('SAPT(DFT) currently only supports restricted references.')


    core.IO.set_default_namespace('dimer')
    data = {}

    core.set_global_option("SAVE_JK", True)
    if (core.get_option('SCF', 'SCF_TYPE') == 'DF'):
        # core.set_global_option('DF_INTS_IO', 'LOAD')
        core.set_global_option('DF_INTS_IO', 'SAVE')

    # # Compute dimer wavefunction
    hf_cache = {}
    hf_wfn_dimer = None
    if do_delta_hf:
        if (core.get_option('SCF', 'SCF_TYPE') == 'DF'):
            core.set_global_option('DF_INTS_IO', 'SAVE')

        hf_data = {}
        hf_wfn_dimer = scf_helper(
            "SCF", molecule=sapt_dimer, banner="SAPT(DFT): delta HF Dimer", **kwargs)
        hf_data["HF DIMER"] = core.get_variable("CURRENT ENERGY")

        if (core.get_option('SCF', 'SCF_TYPE') == 'DF'):
            core.IO.change_file_namespace(97, 'dimer', 'monomerA')
        hf_wfn_A = scf_helper(
            "SCF", molecule=monomerA, banner="SAPT(DFT): delta HF Monomer A", **kwargs)
        hf_data["HF MONOMER A"] = core.get_variable("CURRENT ENERGY")

        if (core.get_option('SCF', 'SCF_TYPE') == 'DF'):
            core.IO.change_file_namespace(97, 'monomerA', 'monomerB')
        hf_wfn_B = scf_helper(
            "SCF", molecule=monomerB, banner="SAPT(DFT): delta HF Monomer B", **kwargs)
        hf_data["HF MONOMER B"] = core.get_variable("CURRENT ENERGY")

        # Move it back to monomer A
        if (core.get_option('SCF', 'SCF_TYPE') == 'DF'):
            core.IO.change_file_namespace(97, 'monomerB', 'dimer')

        core.print_out("\n")
        core.print_out("         ---------------------------------------------------------\n")
        core.print_out("         " + "SAPT(DFT): delta HF Segement".center(58) + "\n")
        core.print_out("\n")
        core.print_out("         " + "by Daniel G. A. Smith and Rob Parrish".center(58) + "\n")
        core.print_out("         ---------------------------------------------------------\n")
        core.print_out("\n")

        # Build cache and JK
        sapt_jk = hf_wfn_B.jk()

        hf_cache = sapt_jk_terms.build_sapt_jk_cache(hf_wfn_A, hf_wfn_B, sapt_jk, True)

        # Electostatics
        elst = sapt_jk_terms.electrostatics(hf_cache, True)
        hf_data.update(elst)

        # Exchange
        exch = sapt_jk_terms.exchange(hf_cache, sapt_jk, True)
        hf_data.update(exch)

        # Induction
        ind = sapt_jk_terms.induction(
            hf_cache,
            sapt_jk,
            True,
            maxiter=core.get_option("SAPT", "MAXITER"),
            conv=core.get_option("SAPT", "D_CONVERGENCE"))
        hf_data.update(ind)

        dhf_value = hf_data["HF DIMER"] - hf_data["HF MONOMER A"] - hf_data["HF MONOMER B"]

        core.print_out("\n")
        core.print_out(print_sapt_hf_summary(hf_data, "SAPT(HF)", delta_hf=dhf_value))

        data["Delta HF Correction"] = core.get_variable("SAPT(DFT) Delta HF")

    if hf_wfn_dimer is None:
        dimer_wfn = core.Wavefunction.build(sapt_dimer, core.get_global_option("BASIS"))
    else:
        dimer_wfn = hf_wfn_dimer

    # Set the primary functional
    core.set_global_option("DFT_FUNCTIONAL", core.get_option("SAPT", "SAPT_DFT_FUNCTIONAL"))
    core.set_local_option('SCF', 'REFERENCE', 'RKS')

    # Compute Monomer A wavefunction
    if (core.get_option('SCF', 'SCF_TYPE') == 'DF'):
        core.IO.change_file_namespace(97, 'dimer', 'monomerA')

    if mon_a_shift:
        core.set_global_option("DFT_GRAC_SHIFT", mon_a_shift)

    # Save the JK object
    core.IO.set_default_namespace('monomerA')
    wfn_A = scf_helper("SCF", molecule=monomerA, banner="SAPT(DFT): DFT Monomer A", **kwargs)
    data["DFT MONOMERA"] = core.get_variable("CURRENT ENERGY")

    core.set_global_option("DFT_GRAC_SHIFT", 0.0)

    # Compute Monomer B wavefunction
    if (core.get_option('SCF', 'SCF_TYPE') == 'DF'):
        core.IO.change_file_namespace(97, 'monomerA', 'monomerB')

    if mon_b_shift:
        core.set_global_option("DFT_GRAC_SHIFT", mon_b_shift)

    core.IO.set_default_namespace('monomerB')
    wfn_B = scf_helper("SCF", molecule=monomerB, banner="SAPT(DFT): DFT Monomer B", **kwargs)
    data["DFT MONOMERB"] = core.get_variable("CURRENT ENERGY")

    core.set_global_option("DFT_GRAC_SHIFT", 0.0)

    # Print out the title and some information
    core.print_out("\n")
    core.print_out("         ---------------------------------------------------------\n")
    core.print_out("         " + "SAPT(DFT): Intermolecular Interaction Segment".center(58) + "\n")
    core.print_out("\n")
    core.print_out("         " + "by Daniel G. A. Smith and Rob Parrish".center(58) + "\n")
    core.print_out("         ---------------------------------------------------------\n")
    core.print_out("\n")

    core.print_out("  ==> Algorithm <==\n\n")
    core.print_out("   SAPT DFT Functional     %12s\n" % str(sapt_dft_functional))
    core.print_out("   Monomer A GRAC Shift    %12.6f\n" % mon_a_shift)
    core.print_out("   Monomer B GRAC Shift    %12.6f\n" % mon_b_shift)
    core.print_out("   Delta HF                %12s\n" % ("True" if do_delta_hf else "False"))
    core.print_out("   JK Algorithm            %12s\n" % core.get_option("SCF", "SCF_TYPE"))

    # Build cache and JK
    sapt_jk = wfn_B.jk()

    cache = sapt_jk_terms.build_sapt_jk_cache(wfn_A, wfn_B, sapt_jk, True)

    # Electostatics
    elst = sapt_jk_terms.electrostatics(cache, True)
    data.update(elst)

    # Exchange
    exch = sapt_jk_terms.exchange(cache, sapt_jk, True)
    data.update(exch)

    # Induction
    ind = sapt_jk_terms.induction(
        cache,
        sapt_jk,
        True,
        maxiter=core.get_option("SAPT", "MAXITER"),
        conv=core.get_option("SAPT", "D_CONVERGENCE"))
    data.update(ind)

    # Dispersion
    primary_basis = wfn_A.basisset()
    core.print_out("\n")
    aux_basis = core.BasisSet.build(sapt_dimer, "DF_BASIS_MP2",
                                    core.get_option("DFMP2", "DF_BASIS_MP2"), "RIFIT",
                                    core.get_global_option('BASIS'))
    fdds_disp = sapt_mp2_terms.df_fdds_dispersion(primary_basis, aux_basis, cache)
    data.update(fdds_disp)

    if core.get_option("SAPT", "SAPT_DFT_MP2_DISP_ALG") == "FISAPT":
        mp2_disp = sapt_mp2_terms.df_mp2_fisapt_dispersion(wfn_A, primary_basis, aux_basis, cache, do_print=True)
    else:
        mp2_disp = sapt_mp2_terms.df_mp2_sapt_dispersion(
            dimer_wfn, wfn_A, wfn_B, primary_basis, aux_basis, cache, do_print=True)
    data.update(mp2_disp)

    # Print out final data
    core.print_out("\n")
    core.print_out(print_sapt_dft_summary(data, "SAPT(DFT)"))

    # Copy data back into globals
    for k, v in data.items():
        core.set_variable(k, v)

    core.tstop()

    return dimer_wfn
Example #26
0
def sapt_empirical_dispersion(name, dimer_wfn, **kwargs):
    sapt_dimer = dimer_wfn.molecule()
    sapt_dimer, monomerA, monomerB = prepare_sapt_molecule(sapt_dimer, "dimer")
    disp_name = name.split("-")[1]

    # Get the names right between SAPT0 and FISAPT0
    saptd_name = name.split('-')[0].upper()
    if saptd_name == "SAPT0":
        sapt0_name = "SAPT0"
    else:
        sapt0_name = "SAPT"

    save_pair = (saptd_name == "FISAPT0")

    from .proc import build_disp_functor
    _, _disp_functor = build_disp_functor('hf-' + disp_name, restricted=True, save_pairwise_disp=save_pair, **kwargs)

    ## Dimer dispersion
    dimer_disp_energy = _disp_functor.compute_energy(dimer_wfn.molecule(), dimer_wfn)
    ## Monomer dispersion
    mon_disp_energy = _disp_functor.compute_energy(monomerA)
    mon_disp_energy += _disp_functor.compute_energy(monomerB)

    disp_interaction_energy = dimer_disp_energy - mon_disp_energy
    core.set_variable(saptd_name + "-D DISP ENERGY", disp_interaction_energy)
    core.set_variable("SAPT DISP ENERGY", disp_interaction_energy)
    core.set_variable("DISPERSION CORRECTION ENERGY", disp_interaction_energy)
    core.set_variable(saptd_name + "DISPERSION CORRECTION ENERGY", disp_interaction_energy)

    ## Set SAPT0-D3 variables
    total = disp_interaction_energy
    saptd_en = {}
    saptd_en['DISP'] = disp_interaction_energy
    for term in ['ELST', 'EXCH', 'IND']:
        en = core.variable(' '.join([sapt0_name, term, 'ENERGY']))
        saptd_en[term] = en
        core.set_variable(' '.join([saptd_name + '-D', term, 'ENERGY']), en)
        core.set_variable(' '.join(['SAPT', term, 'ENERGY']), en)
        total += en

    core.set_variable(saptd_name + '-D TOTAL ENERGY', total)
    core.set_variable('SAPT TOTAL ENERGY', total)
    core.set_variable('CURRENT ENERGY', total)

    ## Print Energy Summary
    units = (1000.0, constants.hartree2kcalmol, constants.hartree2kJmol)
    core.print_out(f"    => {saptd_name +'-D'} Energy Summary <=\n")

    core.print_out("  " + "-" * 104 + "\n")
    core.print_out(
        "    %-25s % 16.8f [mEh] % 16.8f [kcal/mol] % 16.8f [kJ/mol]\n" %
        ("Electrostatics", saptd_en['ELST'] * units[0], saptd_en['ELST'] * units[1], saptd_en['ELST'] * units[2]))
    core.print_out("    %-25s % 16.8f [mEh] % 16.8f [kcal/mol] % 16.8f [kJ/mol]\n" %
                   ("Exchange", saptd_en['EXCH'] * units[0], saptd_en['EXCH'] * units[1], saptd_en['EXCH'] * units[2]))
    core.print_out("    %-25s % 16.8f [mEh] % 16.8f [kcal/mol] % 16.8f [kJ/mol]\n" %
                   ("Induction", saptd_en['IND'] * units[0], saptd_en['IND'] * units[1], saptd_en['IND'] * units[2]))
    core.print_out(
        "    %-25s % 16.8f [mEh] % 16.8f [kcal/mol] % 16.8f [kJ/mol]\n" %
        ("Dispersion", saptd_en['DISP'] * units[0], saptd_en['DISP'] * units[1], saptd_en['DISP'] * units[2]))
    core.print_out("  %-27s % 16.8f [mEh] % 16.8f [kcal/mol] % 16.8f [kJ/mol]\n" %
                   ("Total " + saptd_name + "-D", total * units[0], total * units[1], total * units[2]))
    core.print_out("  " + "-" * 104 + "\n")

    if saptd_name == "FISAPT0":
        pw_disp = dimer_wfn.variable("PAIRWISE DISPERSION CORRECTION ANALYSIS")
        pw_disp.name = 'Empirical_Disp'
        filepath = core.get_option("FISAPT", "FISAPT_FSAPT_FILEPATH")
        fisapt_proc._drop(pw_disp, filepath)

    return dimer_wfn
Example #27
0
def run_sf_sapt(name, **kwargs):
    optstash = p4util.OptionsState(['SCF_TYPE'], ['SCF', 'REFERENCE'], ['SCF', 'DFT_GRAC_SHIFT'], ['SCF', 'SAVE_JK'])

    core.tstart()

    # Alter default algorithm
    if not core.has_global_option_changed('SCF_TYPE'):
        core.set_global_option('SCF_TYPE', 'DF')

    core.prepare_options_for_module("SAPT")

    # Get the molecule of interest
    ref_wfn = kwargs.get('ref_wfn', None)
    if ref_wfn is None:
        sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
    else:
        core.print_out('Warning! SAPT argument "ref_wfn" is only able to use molecule information.')
        sapt_dimer = ref_wfn.molecule()

    sapt_dimer, monomerA, monomerB = proc_util.prepare_sapt_molecule(sapt_dimer, "dimer")

    # Print out the title and some information
    core.print_out("\n")
    core.print_out("         ---------------------------------------------------------\n")
    core.print_out("         " + "Spin-Flip SAPT Procedure".center(58) + "\n")
    core.print_out("\n")
    core.print_out("         " + "by Daniel G. A. Smith and Konrad Patkowski".center(58) + "\n")
    core.print_out("         ---------------------------------------------------------\n")
    core.print_out("\n")

    core.print_out("  ==> Algorithm <==\n\n")
    core.print_out("   JK Algorithm            %12s\n" % core.get_option("SCF", "SCF_TYPE"))
    core.print_out("\n")
    core.print_out("   Required computations:\n")
    core.print_out("     HF  (Monomer A)\n")
    core.print_out("     HF  (Monomer B)\n")
    core.print_out("\n")

    if (core.get_option('SCF', 'REFERENCE') != 'ROHF'):
        raise ValidationError('Spin-Flip SAPT currently only supports restricted open-shell references.')

    # Run the two monomer computations
    core.IO.set_default_namespace('dimer')
    data = {}

    if (core.get_global_option('SCF_TYPE') == 'DF'):
        core.set_global_option('DF_INTS_IO', 'SAVE')

    # Compute dimer wavefunction
    wfn_A = scf_helper("SCF", molecule=monomerA, banner="SF-SAPT: HF Monomer A", **kwargs)

    core.set_global_option("SAVE_JK", True)
    wfn_B = scf_helper("SCF", molecule=monomerB, banner="SF-SAPT: HF Monomer B", **kwargs)
    sapt_jk = wfn_B.jk()
    core.set_global_option("SAVE_JK", False)
    core.print_out("\n")
    core.print_out("         ---------------------------------------------------------\n")
    core.print_out("         " + "Spin-Flip SAPT Exchange and Electrostatics".center(58) + "\n")
    core.print_out("\n")
    core.print_out("         " + "by Daniel G. A. Smith and Konrad Patkowski".center(58) + "\n")
    core.print_out("         ---------------------------------------------------------\n")
    core.print_out("\n")

    sf_data = sapt_sf_terms.compute_sapt_sf(sapt_dimer, sapt_jk, wfn_A, wfn_B)

    # Print the results
    core.print_out("   Spin-Flip SAPT Results\n")
    core.print_out("  " + "-" * 103 + "\n")

    for key, value in sf_data.items():
        value = sf_data[key]
        print_vals = (key, value * 1000, value * constants.hartree2kcalmol, value * constants.hartree2kJmol)
        string = "    %-26s % 15.8f [mEh] % 15.8f [kcal/mol] % 15.8f [kJ/mol]\n" % print_vals
        core.print_out(string)
    core.print_out("  " + "-" * 103 + "\n\n")

    dimer_wfn = core.Wavefunction.build(sapt_dimer, wfn_A.basisset())

    # Set variables
    psivar_tanslator = {
        "Elst10": "SAPT ELST ENERGY",
        "Exch10(S^2) [diagonal]": "SAPT EXCH10(S^2),DIAGONAL ENERGY",
        "Exch10(S^2) [off-diagonal]": "SAPT EXCH10(S^2),OFF-DIAGONAL ENERGY",
        "Exch10(S^2) [highspin]": "SAPT EXCH10(S^2),HIGHSPIN ENERGY",
    }

    for k, v in sf_data.items():
        psi_k = psivar_tanslator[k]

        dimer_wfn.set_variable(psi_k, v)
        core.set_variable(psi_k, v)

    # Copy over highspin
    core.set_variable("SAPT EXCH ENERGY", sf_data["Exch10(S^2) [highspin]"])

    core.tstop()

    return dimer_wfn
Example #28
0
def mcscf_solver(ref_wfn):

    # Build CIWavefunction
    core.prepare_options_for_module("DETCI")
    ciwfn = core.CIWavefunction(ref_wfn)

    # Hush a lot of CI output
    ciwfn.set_print(0)

    # Begin with a normal two-step
    step_type = 'Initial CI'
    total_step = core.Matrix("Total step", ciwfn.get_dimension('OA'),
                             ciwfn.get_dimension('AV'))
    start_orbs = ciwfn.get_orbitals("ROT").clone()
    ciwfn.set_orbitals("ROT", start_orbs)

    # Grab da options
    mcscf_orb_grad_conv = core.get_option("DETCI", "MCSCF_R_CONVERGENCE")
    mcscf_e_conv = core.get_option("DETCI", "MCSCF_E_CONVERGENCE")
    mcscf_max_macroiteration = core.get_option("DETCI", "MCSCF_MAXITER")
    mcscf_type = core.get_option("DETCI", "MCSCF_TYPE")
    mcscf_d_file = core.get_option("DETCI", "CI_FILE_START") + 3
    mcscf_nroots = core.get_option("DETCI", "NUM_ROOTS")
    mcscf_wavefunction_type = core.get_option("DETCI", "WFN")
    mcscf_ndet = ciwfn.ndet()
    mcscf_nuclear_energy = ciwfn.molecule().nuclear_repulsion_energy()
    mcscf_steplimit = core.get_option("DETCI", "MCSCF_MAX_ROT")
    mcscf_rotate = core.get_option("DETCI", "MCSCF_ROTATE")

    # DIIS info
    mcscf_diis_start = core.get_option("DETCI", "MCSCF_DIIS_START")
    mcscf_diis_freq = core.get_option("DETCI", "MCSCF_DIIS_FREQ")
    mcscf_diis_error_type = core.get_option("DETCI", "MCSCF_DIIS_ERROR_TYPE")
    mcscf_diis_max_vecs = core.get_option("DETCI", "MCSCF_DIIS_MAX_VECS")

    # One-step info
    mcscf_target_conv_type = core.get_option("DETCI", "MCSCF_ALGORITHM")
    mcscf_so_start_grad = core.get_option("DETCI", "MCSCF_SO_START_GRAD")
    mcscf_so_start_e = core.get_option("DETCI", "MCSCF_SO_START_E")
    mcscf_current_step_type = 'Initial CI'

    # Start with SCF energy and other params
    scf_energy = core.get_variable("HF TOTAL ENERGY")
    eold = scf_energy
    norb_iter = 1
    converged = False
    ah_step = False
    qc_step = False

    # Fake info to start with the inital diagonalization
    ediff = 1.e-4
    orb_grad_rms = 1.e-3

    # Grab needed objects
    diis_obj = diis_helper.DIIS_helper(mcscf_diis_max_vecs)
    mcscf_obj = ciwfn.mcscf_object()

    # Execute the rotate command
    for rot in mcscf_rotate:
        if len(rot) != 4:
            raise p4util.PsiException(
                "Each element of the MCSCF rotate command requires 4 arguements (irrep, orb1, orb2, theta)."
            )

        irrep, orb1, orb2, theta = rot
        if irrep > ciwfn.Ca().nirrep():
            raise p4util.PsiException(
                "MCSCF_ROTATE: Expression %s irrep number is larger than the number of irreps"
                % (str(rot)))

        if max(orb1, orb2) > ciwfn.Ca().coldim()[irrep]:
            raise p4util.PsiException(
                "MCSCF_ROTATE: Expression %s orbital number exceeds number of orbitals in irrep"
                % (str(rot)))

        theta = np.deg2rad(theta)

        x = ciwfn.Ca().nph[irrep][:, orb1].copy()
        y = ciwfn.Ca().nph[irrep][:, orb2].copy()

        xp = np.cos(theta) * x - np.sin(theta) * y
        yp = np.sin(theta) * x + np.cos(theta) * y

        ciwfn.Ca().nph[irrep][:, orb1] = xp
        ciwfn.Ca().nph[irrep][:, orb2] = yp

    # Limited RAS functionality
    if core.get_local_option(
            "DETCI", "WFN") == "RASSCF" and mcscf_target_conv_type != "TS":
        core.print_out(
            "\n  Warning! Only the TS algorithm for RASSCF wavefunction is currently supported.\n"
        )
        core.print_out("             Switching to the TS algorithm.\n\n")
        mcscf_target_conv_type = "TS"

    # Print out headers
    if mcscf_type == "CONV":
        mtype = "   @MCSCF"
        core.print_out("\n   ==> Starting MCSCF iterations <==\n\n")
        core.print_out(
            "        Iter         Total Energy       Delta E   Orb RMS    CI RMS  NCI NORB\n"
        )
    else:
        mtype = "   @DF-MCSCF"
        core.print_out("\n   ==> Starting DF-MCSCF iterations <==\n\n")
        core.print_out(
            "           Iter         Total Energy       Delta E   Orb RMS    CI RMS  NCI NORB\n"
        )

    # Iterate !
    for mcscf_iter in range(1, mcscf_max_macroiteration + 1):

        # Transform integrals, diagonalize H
        ciwfn.transform_mcscf_integrals(mcscf_current_step_type == 'TS')
        nci_iter = ciwfn.diag_h(abs(ediff) * 1.e-2, orb_grad_rms * 1.e-3)

        ciwfn.form_opdm()
        ciwfn.form_tpdm()
        ci_grad_rms = core.get_variable("DETCI AVG DVEC NORM")

        # Update MCSCF object
        Cocc = ciwfn.get_orbitals("DOCC")
        Cact = ciwfn.get_orbitals("ACT")
        Cvir = ciwfn.get_orbitals("VIR")
        opdm = ciwfn.get_opdm(-1, -1, "SUM", False)
        tpdm = ciwfn.get_tpdm("SUM", True)
        mcscf_obj.update(Cocc, Cact, Cvir, opdm, tpdm)

        current_energy = core.get_variable('CURRENT ENERGY')

        orb_grad_rms = mcscf_obj.gradient_rms()
        ediff = current_energy - eold

        # Print iterations
        print_iteration(mtype, mcscf_iter, current_energy, ediff, orb_grad_rms,
                        ci_grad_rms, nci_iter, norb_iter,
                        mcscf_current_step_type)
        eold = current_energy

        if mcscf_current_step_type == 'Initial CI':
            mcscf_current_step_type = 'TS'

        # Check convergence
        if (orb_grad_rms < mcscf_orb_grad_conv) and (abs(ediff) < abs(mcscf_e_conv)) and\
            (mcscf_iter > 3) and not qc_step:

            core.print_out("\n       %s has converged!\n\n" % mtype)
            converged = True
            break

        # Which orbital convergence are we doing?
        if ah_step:
            converged, norb_iter, step = ah_iteration(mcscf_obj,
                                                      print_micro=False)
            norb_iter += 1

            if converged:
                mcscf_current_step_type = 'AH'
            else:
                core.print_out(
                    "      !Warning. Augmented Hessian did not converge. Taking an approx step.\n"
                )
                step = mcscf_obj.approx_solve()
                mcscf_current_step_type = 'TS, AH failure'

        else:
            step = mcscf_obj.approx_solve()
            step_type = 'TS'

        maxstep = step.absmax()
        if maxstep > mcscf_steplimit:
            core.print_out(
                '      Warning! Maxstep = %4.2f, scaling to %4.2f\n' %
                (maxstep, mcscf_steplimit))
            step.scale(mcscf_steplimit / maxstep)

        total_step.add(step)

        # Do or add DIIS
        if (mcscf_iter >= mcscf_diis_start) and ("TS"
                                                 in mcscf_current_step_type):

            # Figure out DIIS error vector
            if mcscf_diis_error_type == "GRAD":
                error = core.Matrix.triplet(ciwfn.get_orbitals("OA"),
                                            mcscf_obj.gradient(),
                                            ciwfn.get_orbitals("AV"), False,
                                            False, True)
            else:
                error = step

            diis_obj.add(total_step, error)

            if not (mcscf_iter % mcscf_diis_freq):
                total_step = diis_obj.extrapolate()
                mcscf_current_step_type = 'TS, DIIS'

        # Finally rotate and set orbitals
        orbs_mat = mcscf_obj.Ck(start_orbs, total_step)
        ciwfn.set_orbitals("ROT", orbs_mat)

        # Figure out what the next step should be
        if (orb_grad_rms < mcscf_so_start_grad) and (abs(ediff) < abs(mcscf_so_start_e)) and\
                (mcscf_iter >= 2):

            if mcscf_target_conv_type == 'AH':
                ah_step = True
            elif mcscf_target_conv_type == 'OS':
                mcscf_current_step_type = 'OS, Prep'
                break
            else:
                continue
        #raise p4util.PsiException("")

    # If we converged do not do onestep
    if converged or (mcscf_target_conv_type != 'OS'):
        one_step_iters = []

    # If we are not converged load in Dvec and build iters array
    else:
        one_step_iters = range(mcscf_iter + 1, mcscf_max_macroiteration + 1)
        dvec = ciwfn.new_civector(1, mcscf_d_file, True, True)
        dvec.set_nvec(1)
        dvec.init_io_files(True)
        dvec.read(0, 0)
        dvec.symnormalize(1.0, 0)

        ci_grad = ciwfn.new_civector(1, mcscf_d_file + 1, True, True)
        ci_grad.set_nvec(1)
        ci_grad.init_io_files(True)

    # Loop for onestep
    for mcscf_iter in one_step_iters:

        # Transform integrals and update the MCSCF object
        ciwfn.transform_mcscf_integrals(False)
        ciwfn.form_opdm()
        ciwfn.form_tpdm()

        # Update MCSCF object
        Cocc = ciwfn.get_orbitals("DOCC")
        Cact = ciwfn.get_orbitals("ACT")
        Cvir = ciwfn.get_orbitals("VIR")
        opdm = ciwfn.get_opdm(-1, -1, "SUM", False)
        tpdm = ciwfn.get_tpdm("SUM", True)
        mcscf_obj.update(Cocc, Cact, Cvir, opdm, tpdm)

        orb_grad_rms = mcscf_obj.gradient_rms()

        # Warning! Does not work for SA-MCSCF
        current_energy = mcscf_obj.current_total_energy()
        current_energy += mcscf_nuclear_energy

        core.set_variable("CI ROOT %d TOTAL ENERGY" % 1, current_energy)
        core.set_variable("CURRENT ENERGY", current_energy)

        docc_energy = mcscf_obj.current_docc_energy()
        ci_energy = mcscf_obj.current_ci_energy()

        # Compute CI gradient
        ciwfn.sigma(dvec, ci_grad, 0, 0)
        ci_grad.scale(2.0, 0)
        ci_grad.axpy(-2.0 * ci_energy, dvec, 0, 0)

        ci_grad_rms = ci_grad.norm(0)
        orb_grad_rms = mcscf_obj.gradient().rms()

        ediff = current_energy - eold

        print_iteration(mtype, mcscf_iter, current_energy, ediff, orb_grad_rms,
                        ci_grad_rms, nci_iter, norb_iter,
                        mcscf_current_step_type)
        mcscf_current_step_type = 'OS'

        eold = current_energy

        if (orb_grad_rms < mcscf_orb_grad_conv) and (abs(ediff) <
                                                     abs(mcscf_e_conv)):

            core.print_out("\n       %s has converged!\n\n" % mtype)
            converged = True
            break

        # Take a step
        converged, norb_iter, nci_iter, step = qc_iteration(
            dvec, ci_grad, ciwfn, mcscf_obj)

        # Rotate integrals to new frame
        total_step.add(step)
        orbs_mat = mcscf_obj.Ck(ciwfn.get_orbitals("ROT"), step)
        ciwfn.set_orbitals("ROT", orbs_mat)

    core.print_out(mtype + " Final Energy: %20.15f\n" % current_energy)

    # Die if we did not converge
    if (not converged):
        if core.get_global_option("DIE_IF_NOT_CONVERGED"):
            raise p4util.PsiException("MCSCF: Iterations did not converge!")
        else:
            core.print_out("\nWarning! MCSCF iterations did not converge!\n\n")

    # Print out energetics
    core.print_out("\n   ==> Energetics <==\n\n")
    core.print_out("    SCF energy =         %20.15f\n" % scf_energy)
    core.print_out("    Total CI energy =    %20.15f\n\n" % current_energy)

    # Print out CI vector information
    if mcscf_target_conv_type != 'SO':
        dvec = ciwfn.new_civector(mcscf_nroots, mcscf_d_file, True, True)
        dvec.init_io_files(True)

    irrep_labels = ciwfn.molecule().irrep_labels()
    for root in range(mcscf_nroots):
        core.print_out("\n   ==> CI root %d information <==\n\n" % (root + 1))

        # Print total energy
        root_e = core.get_variable("CI ROOT %d TOTAL ENERGY" % (root + 1))
        core.print_out("    CI Root %2d energy =  %20.15f\n" %
                       (root + 1, root_e))

        # Print natural occupations
        core.print_out("\n   Natural occupation numbers:\n\n")

        occs_list = []
        r_opdm = ciwfn.get_opdm(root, root, "SUM", False)
        for h in range(len(r_opdm.nph)):
            if 0 in r_opdm.nph[h].shape:
                continue
            nocc, rot = np.linalg.eigh(r_opdm.nph[h])
            for e in nocc:
                occs_list.append((e, irrep_labels[h]))

        occs_list.sort(key=lambda x: -x[0])

        cnt = 0
        for value, label in occs_list:
            value, label = occs_list[cnt]
            core.print_out("      %4s  % 8.6f" % (label, value))
            cnt += 1
            if (cnt % 3) == 0:
                core.print_out("\n")

        # Print CIVector information
        ciwfn.print_vector(dvec, root)

    # What do we need to cleanup?
    if core.get_option("DETCI", "MCSCF_CI_CLEANUP"):
        ciwfn.cleanup_ci()
    if core.get_option("DETCI", "MCSCF_DPD_CLEANUP"):
        # print('Cleaning up DPD data!')
        ciwfn.cleanup_dpd()

    del diis_obj
    del mcscf_obj
    return ciwfn
Example #29
0
def scf_iterate(self, e_conv=None, d_conv=None):

    is_dfjk = core.get_global_option('SCF_TYPE').endswith('DF')
    verbose = core.get_option('SCF', "PRINT")
    reference = core.get_option('SCF', "REFERENCE")

    # self.member_data_ signals are non-local, used internally by c-side fns
    self.diis_enabled_ = _validate_diis()
    self.MOM_excited_ = _validate_MOM()
    self.diis_start_ = core.get_option('SCF', 'DIIS_START')
    damping_enabled = _validate_damping()
    soscf_enabled = _validate_soscf()
    frac_enabled = _validate_frac()
    efp_enabled = hasattr(self.molecule(), 'EFP')
    diis_rms = core.get_option('SCF', 'DIIS_RMS_ERROR')

    if self.iteration_ < 2:
        core.print_out("  ==> Iterations <==\n\n")
        core.print_out(
            "%s                        Total Energy        Delta E     %s |[F,P]|\n\n"
            % ("   " if is_dfjk else "", "RMS" if diis_rms else "MAX"))

    # SCF iterations!
    SCFE_old = 0.0
    Dnorm = 0.0
    while True:
        self.iteration_ += 1

        diis_performed = False
        soscf_performed = False
        self.frac_performed_ = False
        #self.MOM_performed_ = False  # redundant from common_init()

        self.save_density_and_energy()

        if efp_enabled:
            # EFP: Add efp contribution to Fock matrix
            self.H().copy(self.Horig)
            global mints_psi4_yo
            mints_psi4_yo = core.MintsHelper(self.basisset())
            Vefp = modify_Fock_induced(self.molecule().EFP,
                                       mints_psi4_yo,
                                       verbose=verbose - 1)
            Vefp = core.Matrix.from_array(Vefp)
            self.H().add(Vefp)

        SCFE = 0.0
        self.clear_external_potentials()

        core.timer_on("HF: Form G")
        self.form_G()
        core.timer_off("HF: Form G")

        upcm = 0.0
        if core.get_option('SCF', 'PCM'):
            calc_type = core.PCM.CalcType.Total
            if core.get_option("PCM", "PCM_SCF_TYPE") == "SEPARATE":
                calc_type = core.PCM.CalcType.NucAndEle
            Dt = self.Da().clone()
            Dt.add(self.Db())
            upcm, Vpcm = self.get_PCM().compute_PCM_terms(Dt, calc_type)
            SCFE += upcm
            self.push_back_external_potential(Vpcm)
        self.set_variable("PCM POLARIZATION ENERGY", upcm)
        self.set_energies("PCM Polarization", upcm)

        upe = 0.0
        if core.get_option('SCF', 'PE'):
            Dt = self.Da().clone()
            Dt.add(self.Db())
            upe, Vpe = self.pe_state.get_pe_contribution(Dt, elec_only=False)
            SCFE += upe
            self.push_back_external_potential(Vpe)
        self.set_variable("PE ENERGY", upe)
        self.set_energies("PE Energy", upe)

        core.timer_on("HF: Form F")
        # SAD: since we don't have orbitals yet, we might not be able
        # to form the real Fock matrix. Instead, build an initial one
        if (self.iteration_ == 0) and self.sad_:
            self.form_initial_F()
        else:
            self.form_F()
        core.timer_off("HF: Form F")

        if verbose > 3:
            self.Fa().print_out()
            self.Fb().print_out()

        SCFE += self.compute_E()
        if efp_enabled:
            global efp_Dt_psi4_yo

            # EFP: Add efp contribution to energy
            efp_Dt_psi4_yo = self.Da().clone()
            efp_Dt_psi4_yo.add(self.Db())
            SCFE += self.molecule().EFP.get_wavefunction_dependent_energy()

        self.set_energies("Total Energy", SCFE)
        core.set_variable("SCF ITERATION ENERGY", SCFE)
        Ediff = SCFE - SCFE_old
        SCFE_old = SCFE

        status = []

        # Check if we are doing SOSCF
        if (soscf_enabled and (self.iteration_ >= 3) and
            (Dnorm < core.get_option('SCF', 'SOSCF_START_CONVERGENCE'))):
            Dnorm = self.compute_orbital_gradient(
                False, core.get_option('SCF', 'DIIS_MAX_VECS'))
            diis_performed = False
            if self.functional().needs_xc():
                base_name = "SOKS, nmicro="
            else:
                base_name = "SOSCF, nmicro="

            if not _converged(Ediff, Dnorm, e_conv=e_conv, d_conv=d_conv):
                nmicro = self.soscf_update(
                    core.get_option('SCF', 'SOSCF_CONV'),
                    core.get_option('SCF', 'SOSCF_MIN_ITER'),
                    core.get_option('SCF', 'SOSCF_MAX_ITER'),
                    core.get_option('SCF', 'SOSCF_PRINT'))
                # if zero, the soscf call bounced for some reason
                soscf_performed = (nmicro > 0)

                if soscf_performed:
                    self.find_occupation()
                    status.append(base_name + str(nmicro))
                else:
                    if verbose > 0:
                        core.print_out(
                            "Did not take a SOSCF step, using normal convergence methods\n"
                        )

            else:
                # need to ensure orthogonal orbitals and set epsilon
                status.append(base_name + "conv")
                core.timer_on("HF: Form C")
                self.form_C()
                core.timer_off("HF: Form C")
                soscf_performed = True  # Stops DIIS

        if not soscf_performed:
            # Normal convergence procedures if we do not do SOSCF

            # SAD: form initial orbitals from the initial Fock matrix, and
            # reset the occupations. From here on, the density matrices
            # are correct.
            if (self.iteration_ == 0) and self.sad_:
                self.form_initial_C()
                self.reset_occupation()
                self.find_occupation()

            else:
                # Run DIIS
                core.timer_on("HF: DIIS")
                diis_performed = False
                add_to_diis_subspace = self.diis_enabled_ and self.iteration_ >= self.diis_start_

                Dnorm = self.compute_orbital_gradient(
                    add_to_diis_subspace,
                    core.get_option('SCF', 'DIIS_MAX_VECS'))

                if (add_to_diis_subspace
                        and core.get_option('SCF', 'DIIS_MIN_VECS') - 1):
                    diis_performed = self.diis()

                if diis_performed:
                    status.append("DIIS")

                core.timer_off("HF: DIIS")

                if verbose > 4 and diis_performed:
                    core.print_out("  After DIIS:\n")
                    self.Fa().print_out()
                    self.Fb().print_out()

                # frac, MOM invoked here from Wfn::HF::find_occupation
                core.timer_on("HF: Form C")
                self.form_C()
                core.timer_off("HF: Form C")

                if self.MOM_performed_:
                    status.append("MOM")

                if self.frac_performed_:
                    status.append("FRAC")

                # Reset occupations if necessary
                if (self.iteration_ == 0) and self.reset_occ_:
                    self.reset_occupation()
                    self.find_occupation()

        # Form new density matrix
        core.timer_on("HF: Form D")
        self.form_D()
        core.timer_off("HF: Form D")

        self.set_variable("SCF ITERATION ENERGY", SCFE)

        # After we've built the new D, damp the update
        if (damping_enabled and self.iteration_ > 1
                and Dnorm > core.get_option('SCF', 'DAMPING_CONVERGENCE')):
            damping_percentage = core.get_option('SCF', "DAMPING_PERCENTAGE")
            self.damping_update(damping_percentage * 0.01)
            status.append("DAMP={}%".format(round(damping_percentage)))

        if verbose > 3:
            self.Ca().print_out()
            self.Cb().print_out()
            self.Da().print_out()
            self.Db().print_out()

        # Print out the iteration
        core.print_out(
            "   @%s%s iter %3s: %20.14f   %12.5e   %-11.5e %s\n" %
            ("DF-" if is_dfjk else "", reference, "SAD" if
             ((self.iteration_ == 0) and self.sad_) else self.iteration_, SCFE,
             Ediff, Dnorm, '/'.join(status)))

        # if a an excited MOM is requested but not started, don't stop yet
        if self.MOM_excited_ and not self.MOM_performed_:
            continue

        # if a fractional occupation is requested but not started, don't stop yet
        if frac_enabled and not self.frac_performed_:
            continue

        # Call any postiteration callbacks
        if not ((self.iteration_ == 0) and self.sad_) and _converged(
                Ediff, Dnorm, e_conv=e_conv, d_conv=d_conv):
            break
        if self.iteration_ >= core.get_option('SCF', 'MAXITER'):
            raise SCFConvergenceError("""SCF iterations""", self.iteration_,
                                      self, Ediff, Dnorm)
Example #30
0
def orig_run_dftd3(mol,
                   func=None,
                   dashlvl=None,
                   dashparam=None,
                   dertype=None,
                   verbose=False):
    """Compute dispersion correction using Grimme's DFTD3 executable.

    Function to call Grimme's dftd3 program to compute the -D correction
    of level `dashlvl` using parameters for the functional `func`.
    `dashparam` can supply a full set of dispersion parameters in the
    absence of `func` or individual overrides in the presence of `func`.

    The DFTD3 executable must be independently compiled and found in
    :envvar:`PATH` or :envvar:`PSIPATH`.

    Parameters
    ----------
    mol : qcdb.Molecule or psi4.core.Molecule or str
	    Molecule on which to run dispersion calculation. Both qcdb and
	    psi4.core Molecule classes have been extended by this method, so
	    either allowed. Alternately, a string that can be instantiated
	    into a qcdb.Molecule.
    func : str or None
	    Density functional (Psi4, not Turbomole, names) for which to
	    load parameters from dashcoeff[dashlvl][func]. This is not
	    passed to DFTD3 and thus may be a dummy or `None`. Any or all
	    parameters initialized can be overwritten via `dashparam`.
    dashlvl : {'d2p4', 'd2gr', 'd3zero', 'd3bj', 'd3mzero', d3mbj', 'd', 'd2', 'd3', 'd3m'}
	    Flavor of a posteriori dispersion correction for which to load
	    parameters and call procedure in DFTD3. Must be a keys in
	    dashcoeff dict (or a key in dashalias that resolves to one).
    dashparam : dict, optional
	    Dictionary of the same keys as dashcoeff[dashlvl] used to
	    override any or all values initialized by
	    dashcoeff[dashlvl][func].
    dertype : {None, 0, 'none', 'energy', 1, 'first', 'gradient'}, optional
	    Maximum derivative level at which to run DFTD3. For large
	    `mol`, energy-only calculations can be significantly more
	    efficient. Also controls return values, see below.
    verbose : bool, optional
        When `True`, additionally include DFTD3 output in output.

    Returns
    -------
    energy : float, optional
        When `dertype` is 0, energy [Eh].
    gradient : list of lists of floats or psi4.core.Matrix, optional
        When `dertype` is 1, (nat, 3) gradient [Eh/a0].
    (energy, gradient) : float and list of lists of floats or psi4.core.Matrix, optional
        When `dertype` is unspecified, both energy [Eh] and (nat, 3) gradient [Eh/a0].

    Notes
    -----
    research site: https://www.chemie.uni-bonn.de/pctc/mulliken-center/software/dft-d3
    Psi4 mode: When `psi4` the python module is importable at `import qcdb`
               time, Psi4 mode is activated, with the following alterations:
               * output goes to output file
               * gradient returned as psi4.core.Matrix, not list o'lists
               * scratch is written to randomly named subdirectory of psi scratch
               * psivar "DISPERSION CORRECTION ENERGY" is set
               * `verbose` triggered when PRINT keywork of SCF module >=3

    """
    # Create (if necessary) and update qcdb.Molecule
    if isinstance(mol, (Molecule, core.Molecule)):
        # 1st: called on a qcdb.Molecule
        # 2nd: called on a python export of a psi4.Molecule (py-side through Psi4's driver)
        pass
    elif isinstance(mol, basestring):
        # called on a string representation of a psi4.Molecule (c-side through psi4.Dispersion)
        mol = Molecule(mol)
    else:
        raise ValidationError(
            """Argument mol must be psi4string or qcdb.Molecule""")
    mol.update_geometry()

    # Validate arguments
    if dertype is None:
        dertype = -1
    elif der0th.match(str(dertype)):
        dertype = 0
    elif der1st.match(str(dertype)):
        dertype = 1
    elif der2nd.match(str(dertype)):
        raise ValidationError(
            """Requested derivative level 'dertype' %s not valid for run_dftd3."""
            % (dertype))
    else:
        raise ValidationError(
            """Requested derivative level 'dertype' %s not valid for run_dftd3."""
            % (dertype))

    if dashlvl is not None:
        dashlvl = dashlvl.lower()
        dashlvl = dash_alias['-' + dashlvl][1:] if (
            '-' + dashlvl) in dash_alias.keys() else dashlvl
        if dashlvl not in dashcoeff.keys():
            raise ValidationError(
                """-D correction level %s is not available. Choose among %s."""
                % (dashlvl, dashcoeff.keys()))
    else:
        raise ValidationError("""Must specify a dashlvl""")

    if func is not None:
        dftd3_params = dash_server(func, dashlvl)
    else:
        dftd3_params = {}

    if dashparam is not None:
        dftd3_params.update(dashparam)

    # Move ~/.dftd3par.<hostname> out of the way so it won't interfere
    defaultfile = os.path.expanduser(
        '~') + '/.dftd3par.' + socket.gethostname()
    defmoved = False
    if os.path.isfile(defaultfile):
        os.rename(defaultfile, defaultfile + '_hide')
        defmoved = True

    # Find environment by merging PSIPATH and PATH environment variables
    lenv = {
        'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \
                ':' + os.environ.get('PATH'),
        'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
        }
    #   Filter out None values as subprocess will fault on them
    lenv = {k: v for k, v in lenv.items() if v is not None}

    # Find out if running from Psi4 for scratch details and such
    # try:
    #     import psi4
    # except ImportError as err:
    #     isP4regime = False
    # else:
    #     isP4regime = True

    # Setup unique scratch directory and move in
    current_directory = os.getcwd()
    if isP4regime:
        psioh = core.IOManager.shared_object()
        psio = core.IO.shared_object()
        os.chdir(psioh.get_default_path())
        dftd3_tmpdir = 'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
            '.dftd3.' + str(uuid.uuid4())[:8]
    else:
        dftd3_tmpdir = os.environ['HOME'] + os.sep + 'dftd3_' + str(
            uuid.uuid4())[:8]
    if os.path.exists(dftd3_tmpdir) is False:
        os.mkdir(dftd3_tmpdir)
    os.chdir(dftd3_tmpdir)

    # Write dftd3_parameters file that governs dispersion calc
    paramcontents = dftd3_coeff_formatter(dashlvl, dftd3_params)
    paramfile1 = 'dftd3_parameters'  # older patched name
    with open(paramfile1, 'w') as handle:
        handle.write(paramcontents)
    paramfile2 = '.dftd3par.local'  # new mainline name
    with open(paramfile2, 'w') as handle:
        handle.write(paramcontents)

    # Write dftd3_geometry file that supplies geometry to dispersion calc
    numAtoms = mol.natom()

    # We seem to have a problem with one atom, force the correct result
    if numAtoms == 1:
        dashd = 0.0
        dashdderiv = core.Matrix(1, 3)

        if dertype == -1:
            return dashd, dashdderiv
        elif dertype == 0:
            return dashd
        elif dertype == 1:
            return dashdderiv

    geom = mol.save_string_xyz()
    reals = []
    for line in geom.splitlines():
        lline = line.split()
        if len(lline) != 4:
            continue
        if lline[0] == 'Gh':
            numAtoms -= 1
        else:
            reals.append(line)

    geomtext = str(numAtoms) + '\n\n'
    for line in reals:
        geomtext += line.strip() + '\n'
    geomfile = './dftd3_geometry.xyz'
    with open(geomfile, 'w') as handle:
        handle.write(geomtext)
    # TODO somehow the variations on save_string_xyz and
    #   whether natom and chgmult does or doesn't get written
    #   have gotten all tangled. I fear this doesn't work
    #   the same btwn libmints and qcdb or for ghosts

    # Call dftd3 program
    command = ['dftd3', geomfile]
    if dertype != 0:
        command.append('-grad')
    try:
        dashout = subprocess.Popen(command, stdout=subprocess.PIPE, env=lenv)
    except OSError as e:
        raise ValidationError('Program dftd3 not found in path. %s' % e)
    out, err = dashout.communicate()

    # Parse output (could go further and break into E6, E8, E10 and Cn coeff)
    success = False
    for line in out.splitlines():
        line = line.decode('utf-8')
        if re.match(' Edisp /kcal,au', line):
            sline = line.split()
            dashd = float(sline[3])
        if re.match(' normal termination of dftd3', line):
            success = True

    if not success:
        os.chdir(current_directory)
        raise Dftd3Error(
            """Unsuccessful run. Possibly -D variant not available in dftd3 version."""
        )

    # Parse grad output
    if dertype != 0:
        derivfile = './dftd3_gradient'
        dfile = open(derivfile, 'r')
        dashdderiv = []
        for line in geom.splitlines():
            lline = line.split()
            if len(lline) != 4:
                continue
            if lline[0] == 'Gh':
                dashdderiv.append([0.0, 0.0, 0.0])
            else:
                dashdderiv.append([
                    float(x.replace('D', 'E'))
                    for x in dfile.readline().split()
                ])
        dfile.close()

        if len(dashdderiv) != mol.natom():
            raise ValidationError('Program dftd3 gradient file has %d atoms- %d expected.' % \
                (len(dashdderiv), mol.natom()))

    # Prepare results for Psi4
    if isP4regime and dertype != 0:
        core.set_variable('DISPERSION CORRECTION ENERGY', dashd)
        psi_dashdderiv = core.Matrix.from_list(dashdderiv)

    # Print program output to file if verbose
    if not verbose and isP4regime:
        verbose = True if core.get_option('SCF', 'PRINT') >= 3 else False
    if verbose:

        text = '\n  ==> DFTD3 Output <==\n'
        text += out.decode('utf-8')
        if dertype != 0:
            with open(derivfile, 'r') as handle:
                text += handle.read().replace('D', 'E')
            text += '\n'
        if isP4regime:
            core.print_out(text)
        else:
            print(text)

    # Clean up files and remove scratch directory
    os.unlink(paramfile1)
    os.unlink(paramfile2)
    os.unlink(geomfile)
    if dertype != 0:
        os.unlink(derivfile)
    if defmoved is True:
        os.rename(defaultfile + '_hide', defaultfile)

    os.chdir('..')
    try:
        shutil.rmtree(dftd3_tmpdir)
    except OSError as e:
        ValidationError('Unable to remove dftd3 temporary directory %s' % e)
    os.chdir(current_directory)

    # return -D & d(-D)/dx
    if dertype == -1:
        return dashd, dashdderiv
    elif dertype == 0:
        return dashd
    elif dertype == 1:
        return psi_dashdderiv
Example #31
0
def nbody_gufunc(func, method_string, **kwargs):
    """
    Computes the nbody interaction energy, gradient, or Hessian depending on input.
    This is a generalized univeral function for computing interaction quantities.

    :returns: *return type of func* |w--w| The interaction data.

    :returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| interaction data and wavefunction with energy/gradient/hessian set appropriately when **return_wfn** specified.

    :type func: function
    :param func: ``energy`` || etc.

        Python function that accepts method_string and a molecule. Returns a
        energy, gradient, or Hessian as requested.

    :type method_string: string
    :param method_string: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc.

        First argument, lowercase and usually unlabeled. Indicates the computational
        method to be passed to func.

    :type molecule: :ref:`molecule <op_py_molecule>`
    :param molecule: ``h2o`` || etc.

        The target molecule, if not the last molecule defined.

    :type return_wfn: :ref:`boolean <op_py_boolean>`
    :param return_wfn: ``'on'`` || |dl| ``'off'`` |dr|

        Indicate to additionally return the :py:class:`~psi4.core.Wavefunction`
        calculation result as the second element of a tuple.

    :type bsse_type: string or list
    :param bsse_type: ``'cp'`` || ``['nocp', 'vmfc']`` || |dl| ``None`` |dr| || etc.

        Type of BSSE correction to compute: CP, NoCP, or VMFC. The first in this
        list is returned by this function. By default, this function is not called.

    :type max_nbody: int
    :param max_nbody: ``3`` || etc.

        Maximum n-body to compute, cannot exceed the number of fragments in the moleucle.

    :type ptype: string
    :param ptype: ``'energy'`` || ``'gradient'`` || ``'hessian'``

        Type of the procedure passed in.

    :type return_total_data: :ref:`boolean <op_py_boolean>`
    :param return_total_data: ``'on'`` || |dl| ``'off'`` |dr|

        If True returns the total data (energy/gradient/etc) of the system,
        otherwise returns interaction data.
    """

    ### ==> Parse some kwargs <==
    kwargs = p4util.kwargs_lower(kwargs)
    return_wfn = kwargs.pop('return_wfn', False)
    ptype = kwargs.pop('ptype', None)
    return_total_data = kwargs.pop('return_total_data', False)
    molecule = kwargs.pop('molecule', core.get_active_molecule())
    molecule.update_geometry()
    core.clean_variables()

    if ptype not in ['energy', 'gradient', 'hessian']:
        raise ValidationError(
            """N-Body driver: The ptype '%s' is not regonized.""" % ptype)

    # Figure out BSSE types
    do_cp = False
    do_nocp = False
    do_vmfc = False
    return_method = False

    # Must be passed bsse_type
    bsse_type_list = kwargs.pop('bsse_type')
    if bsse_type_list is None:
        raise ValidationError("N-Body GUFunc: Must pass a bsse_type")
    if not isinstance(bsse_type_list, list):
        bsse_type_list = [bsse_type_list]

    for num, btype in enumerate(bsse_type_list):
        if btype.lower() == 'cp':
            do_cp = True
            if (num == 0): return_method = 'cp'
        elif btype.lower() == 'nocp':
            do_nocp = True
            if (num == 0): return_method = 'nocp'
        elif btype.lower() == 'vmfc':
            do_vmfc = True
            if (num == 0): return_method = 'vmfc'
        else:
            raise ValidationError(
                "N-Body GUFunc: bsse_type '%s' is not recognized" %
                btype.lower())

    max_nbody = kwargs.get('max_nbody', -1)
    max_frag = molecule.nfragments()
    if max_nbody == -1:
        max_nbody = molecule.nfragments()
    else:
        max_nbody = min(max_nbody, max_frag)

    # What levels do we need?
    nbody_range = range(1, max_nbody + 1)
    fragment_range = range(1, max_frag + 1)

    # Flip this off for now, needs more testing
    # If we are doing CP lets save them integrals
    #if 'cp' in bsse_type_list and (len(bsse_type_list) == 1):
    #    # Set to save RI integrals for repeated full-basis computations
    #    ri_ints_io = core.get_global_option('DF_INTS_IO')

    #    # inquire if above at all applies to dfmp2 or just scf
    #    core.set_global_option('DF_INTS_IO', 'SAVE')
    #    psioh = core.IOManager.shared_object()
    #    psioh.set_specific_retention(97, True)

    bsse_str = bsse_type_list[0]
    if len(bsse_type_list) > 1:
        bsse_str = str(bsse_type_list)
    core.print_out("\n\n")
    core.print_out("   ===> N-Body Interaction Abacus <===\n")
    core.print_out("        BSSE Treatment:                     %s\n" %
                   bsse_str)

    cp_compute_list = {x: set() for x in nbody_range}
    nocp_compute_list = {x: set() for x in nbody_range}
    vmfc_compute_list = {x: set() for x in nbody_range}
    vmfc_level_list = {x: set()
                       for x in nbody_range
                       }  # Need to sum something slightly different

    # Build up compute sets
    if do_cp:
        # Everything is in dimer basis
        basis_tuple = tuple(fragment_range)
        for nbody in nbody_range:
            for x in it.combinations(fragment_range, nbody):
                cp_compute_list[nbody].add((x, basis_tuple))

    if do_nocp:
        # Everything in monomer basis
        for nbody in nbody_range:
            for x in it.combinations(fragment_range, nbody):
                nocp_compute_list[nbody].add((x, x))

    if do_vmfc:
        # Like a CP for all combinations of pairs or greater
        for nbody in nbody_range:
            for cp_combos in it.combinations(fragment_range, nbody):
                basis_tuple = tuple(cp_combos)
                for interior_nbody in nbody_range:
                    for x in it.combinations(cp_combos, interior_nbody):
                        combo_tuple = (x, basis_tuple)
                        vmfc_compute_list[interior_nbody].add(combo_tuple)
                        vmfc_level_list[len(basis_tuple)].add(combo_tuple)

    # Build a comprehensive compute_range
    compute_list = {x: set() for x in nbody_range}
    for n in nbody_range:
        compute_list[n] |= cp_compute_list[n]
        compute_list[n] |= nocp_compute_list[n]
        compute_list[n] |= vmfc_compute_list[n]
        core.print_out("        Number of %d-body computations:     %d\n" %
                       (n, len(compute_list[n])))

    # Build size and slices dictionaries
    fragment_size_dict = {
        frag: molecule.extract_subsets(frag).natom()
        for frag in range(1, max_frag + 1)
    }

    start = 0
    fragment_slice_dict = {}
    for k, v in fragment_size_dict.items():
        fragment_slice_dict[k] = slice(start, start + v)
        start += v

    molecule_total_atoms = sum(fragment_size_dict.values())

    # Now compute the energies
    energies_dict = {}
    ptype_dict = {}
    for n in compute_list.keys():
        core.print_out(
            "\n   ==> N-Body: Now computing %d-body complexes <==\n\n" % n)
        total = len(compute_list[n])
        for num, pair in enumerate(compute_list[n]):
            core.print_out(
                "\n       N-Body: Computing complex (%d/%d) with fragments %s in the basis of fragments %s.\n\n"
                % (num + 1, total, str(pair[0]), str(pair[1])))
            ghost = list(set(pair[1]) - set(pair[0]))

            current_mol = molecule.extract_subsets(list(pair[0]), ghost)
            ptype_dict[pair] = func(method_string,
                                    molecule=current_mol,
                                    **kwargs)
            energies_dict[pair] = core.get_variable("CURRENT ENERGY")
            core.print_out(
                "\n       N-Body: Complex Energy (fragments = %s, basis = %s: %20.14f)\n"
                % (str(pair[0]), str(pair[1]), energies_dict[pair]))

            # Flip this off for now, needs more testing
            #if 'cp' in bsse_type_list and (len(bsse_type_list) == 1):
            #    core.set_global_option('DF_INTS_IO', 'LOAD')

            core.clean()

    # Final dictionaries
    cp_energy_by_level = {n: 0.0 for n in nbody_range}
    nocp_energy_by_level = {n: 0.0 for n in nbody_range}

    cp_energy_body_dict = {n: 0.0 for n in nbody_range}
    nocp_energy_body_dict = {n: 0.0 for n in nbody_range}
    vmfc_energy_body_dict = {n: 0.0 for n in nbody_range}

    # Build out ptype dictionaries if needed
    if ptype != 'energy':
        if ptype == 'gradient':
            arr_shape = (molecule_total_atoms, 3)
        elif ptype == 'hessian':
            arr_shape = (molecule_total_atoms * 3, molecule_total_atoms * 3)
        else:
            raise KeyError("N-Body: ptype '%s' not recognized" % ptype)

        cp_ptype_by_level = {n: np.zeros(arr_shape) for n in nbody_range}
        nocp_ptype_by_level = {n: np.zeros(arr_shape) for n in nbody_range}
        vmfc_ptype_by_level = {n: np.zeros(arr_shape) for n in nbody_range}

        cp_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range}
        nocp_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range}
        vmfc_ptype_body_dict = {n: np.zeros(arr_shape) for n in nbody_range}
    else:
        cp_ptype_by_level, cp_ptype_body_dict = None, None
        nocp_ptype_by_level, nocp_ptype_body_dict = None, None
        vmfc_ptype_body_dict = None

    # Sum up all of the levels
    for n in nbody_range:

        # Energy
        cp_energy_by_level[n] = sum(energies_dict[v]
                                    for v in cp_compute_list[n])
        nocp_energy_by_level[n] = sum(energies_dict[v]
                                      for v in nocp_compute_list[n])

        # Special vmfc case
        if n > 1:
            vmfc_energy_body_dict[n] = vmfc_energy_body_dict[n - 1]
        for tup in vmfc_level_list[n]:
            vmfc_energy_body_dict[n] += (
                (-1)**(n - len(tup[0]))) * energies_dict[tup]

        # Do ptype
        if ptype != 'energy':
            _sum_cluster_ptype_data(ptype, ptype_dict, cp_compute_list[n],
                                    fragment_slice_dict, fragment_size_dict,
                                    cp_ptype_by_level[n])
            _sum_cluster_ptype_data(ptype, ptype_dict, nocp_compute_list[n],
                                    fragment_slice_dict, fragment_size_dict,
                                    nocp_ptype_by_level[n])
            _sum_cluster_ptype_data(ptype,
                                    ptype_dict,
                                    vmfc_level_list[n],
                                    fragment_slice_dict,
                                    fragment_size_dict,
                                    vmfc_ptype_by_level[n],
                                    vmfc=True)

    # Compute cp energy and ptype
    if do_cp:
        for n in nbody_range:
            if n == max_frag:
                cp_energy_body_dict[n] = cp_energy_by_level[n]
                if ptype != 'energy':
                    cp_ptype_body_dict[n][:] = cp_ptype_by_level[n]
                continue

            for k in range(1, n + 1):
                take_nk = nCr(max_frag - k - 1, n - k)
                sign = ((-1)**(n - k))
                value = cp_energy_by_level[k]
                cp_energy_body_dict[n] += take_nk * sign * value

                if ptype != 'energy':
                    value = cp_ptype_by_level[k]
                    cp_ptype_body_dict[n] += take_nk * sign * value

        _print_nbody_energy(cp_energy_body_dict, "Counterpoise Corrected (CP)")
        cp_interaction_energy = cp_energy_body_dict[
            max_nbody] - cp_energy_body_dict[1]
        core.set_variable('Counterpoise Corrected Total Energy',
                          cp_energy_body_dict[max_nbody])
        core.set_variable('Counterpoise Corrected Interaction Energy',
                          cp_interaction_energy)

        for n in nbody_range[1:]:
            var_key = 'CP-CORRECTED %d-BODY INTERACTION ENERGY' % n
            core.set_variable(var_key,
                              cp_energy_body_dict[n] - cp_energy_body_dict[1])

    # Compute nocp energy and ptype
    if do_nocp:
        for n in nbody_range:
            if n == max_frag:
                nocp_energy_body_dict[n] = nocp_energy_by_level[n]
                if ptype != 'energy':
                    nocp_ptype_body_dict[n][:] = nocp_ptype_by_level[n]
                continue

            for k in range(1, n + 1):
                take_nk = nCr(max_frag - k - 1, n - k)
                sign = ((-1)**(n - k))
                value = nocp_energy_by_level[k]
                nocp_energy_body_dict[n] += take_nk * sign * value

                if ptype != 'energy':
                    value = nocp_ptype_by_level[k]
                    nocp_ptype_body_dict[n] += take_nk * sign * value

        _print_nbody_energy(nocp_energy_body_dict,
                            "Non-Counterpoise Corrected (NoCP)")
        nocp_interaction_energy = nocp_energy_body_dict[
            max_nbody] - nocp_energy_body_dict[1]
        core.set_variable('Non-Counterpoise Corrected Total Energy',
                          nocp_energy_body_dict[max_nbody])
        core.set_variable('Non-Counterpoise Corrected Interaction Energy',
                          nocp_interaction_energy)

        for n in nbody_range[1:]:
            var_key = 'NOCP-CORRECTED %d-BODY INTERACTION ENERGY' % n
            core.set_variable(
                var_key, nocp_energy_body_dict[n] - nocp_energy_body_dict[1])

    # Compute vmfc energy and ptype
    if do_vmfc:
        _print_nbody_energy(vmfc_energy_body_dict,
                            "Valiron-Mayer Function Couterpoise (VMFC)")
        vmfc_interaction_energy = vmfc_energy_body_dict[
            max_nbody] - vmfc_energy_body_dict[1]
        core.set_variable('Valiron-Mayer Function Couterpoise Total Energy',
                          vmfc_energy_body_dict[max_nbody])
        core.set_variable(
            'Valiron-Mayer Function Couterpoise Interaction Energy',
            vmfc_interaction_energy)

        for n in nbody_range[1:]:
            var_key = 'VMFC-CORRECTED %d-BODY INTERACTION ENERGY' % n
            core.set_variable(
                var_key, vmfc_energy_body_dict[n] - vmfc_energy_body_dict[1])

    if return_method == 'cp':
        ptype_body_dict = cp_ptype_body_dict
        energy_body_dict = cp_energy_body_dict
    elif return_method == 'nocp':
        ptype_body_dict = nocp_ptype_body_dict
        energy_body_dict = nocp_energy_body_dict
    elif return_method == 'vmfc':
        ptype_body_dict = vmfc_ptype_body_dict
        energy_body_dict = vmfc_energy_body_dict
    else:
        raise ValidationError(
            "N-Body Wrapper: Invalid return type. Should never be here, please post this error on github."
        )

    # Figure out and build return types
    if return_total_data:
        ret_energy = energy_body_dict[max_nbody]
    else:
        ret_energy = energy_body_dict[max_nbody]
        ret_energy -= energy_body_dict[1]

    if ptype != 'energy':
        if return_total_data:
            np_final_ptype = ptype_body_dict[max_nbody].copy()
        else:
            np_final_ptype = ptype_body_dict[max_nbody].copy()
            np_final_ptype -= ptype_body_dict[1]

            ret_ptype = core.Matrix.from_array(np_final_ptype)
    else:
        ret_ptype = ret_energy

    # Build and set a wavefunction
    wfn = core.Wavefunction.build(molecule, 'sto-3g')
    wfn.nbody_energy = energies_dict
    wfn.nbody_ptype = ptype_dict
    wfn.nbody_body_energy = energy_body_dict
    wfn.nbody_body_ptype = ptype_body_dict

    if ptype == 'gradient':
        wfn.set_gradient(ret_ptype)
    elif ptype == 'hessian':
        wfn.set_hessian(ret_ptype)

    core.set_variable("CURRENT ENERGY", ret_energy)

    if return_wfn:
        return (ret_ptype, wfn)
    else:
        return ret_ptype
Example #32
0
def scf_iterate(self, e_conv=None, d_conv=None):

    is_dfjk = core.get_global_option('SCF_TYPE').endswith('DF')
    verbose = core.get_option('SCF', "PRINT")
    reference = core.get_option('SCF', "REFERENCE")

    # self.member_data_ signals are non-local, used internally by c-side fns
    self.diis_enabled_ = _validate_diis()
    self.MOM_excited_ = _validate_MOM()
    self.diis_start_ = core.get_option('SCF', 'DIIS_START')
    damping_enabled = _validate_damping()
    soscf_enabled = _validate_soscf()
    frac_enabled = _validate_frac()
    efp_enabled = hasattr(self.molecule(), 'EFP')

    if self.iteration_ < 2:
        core.print_out("  ==> Iterations <==\n\n")
        core.print_out("%s                        Total Energy        Delta E     RMS |[F,P]|\n\n" % ("   "
                                                                                                      if is_dfjk else ""))

    # SCF iterations!
    SCFE_old = 0.0
    SCFE = 0.0
    Drms = 0.0
    while True:
        self.iteration_ += 1

        diis_performed = False
        soscf_performed = False
        self.frac_performed_ = False
        #self.MOM_performed_ = False  # redundant from common_init()

        self.save_density_and_energy()

        if efp_enabled:
            # EFP: Add efp contribution to Fock matrix
            self.H().copy(self.Horig)
            global mints
            mints = core.MintsHelper(self.basisset())
            Vefp = modify_Fock_induced(self.molecule().EFP, mints, verbose=verbose-1)
            Vefp = core.Matrix.from_array(Vefp)
            self.H().add(Vefp)

        SCFE = 0.0
        self.clear_external_potentials()

        core.timer_on("HF: Form G")
        self.form_G()
        core.timer_off("HF: Form G")

        # reset fractional SAD occupation
        if (self.iteration_ == 0) and self.reset_occ_:
            self.reset_occupation()

        upcm = 0.0
        if core.get_option('SCF', 'PCM'):
            calc_type = core.PCM.CalcType.Total
            if core.get_option("PCM", "PCM_SCF_TYPE") == "SEPARATE":
                calc_type = core.PCM.CalcType.NucAndEle
            Dt = self.Da().clone()
            Dt.add(self.Db())
            upcm, Vpcm = self.get_PCM().compute_PCM_terms(Dt, calc_type)
            SCFE += upcm
            self.push_back_external_potential(Vpcm)
        self.set_variable("PCM POLARIZATION ENERGY", upcm)
        self.set_energies("PCM Polarization", upcm)

        core.timer_on("HF: Form F")
        self.form_F()
        core.timer_off("HF: Form F")

        if verbose > 3:
            self.Fa().print_out()
            self.Fb().print_out()

        SCFE += self.compute_E()
        if efp_enabled:
            global efp_Dt

            # EFP: Add efp contribution to energy
            efp_Dt = self.Da().clone()
            efp_Dt.add(self.Db())
            SCFE += self.molecule().EFP.get_wavefunction_dependent_energy()

        self.set_energies("Total Energy", SCFE)
        Ediff = SCFE - SCFE_old
        SCFE_old = SCFE

        status = []

        # We either do SOSCF or DIIS
        if (soscf_enabled and (self.iteration_ > 3) and (Drms < core.get_option('SCF', 'SOSCF_START_CONVERGENCE'))):

            Drms = self.compute_orbital_gradient(False, core.get_option('SCF', 'DIIS_MAX_VECS'))
            diis_performed = False
            if self.functional().needs_xc():
                base_name = "SOKS, nmicro="
            else:
                base_name = "SOSCF, nmicro="

            if not _converged(Ediff, Drms, e_conv=e_conv, d_conv=d_conv):
                nmicro = self.soscf_update(
                    core.get_option('SCF', 'SOSCF_CONV'),
                    core.get_option('SCF', 'SOSCF_MIN_ITER'),
                    core.get_option('SCF', 'SOSCF_MAX_ITER'), core.get_option('SCF', 'SOSCF_PRINT'))
                if nmicro > 0:
                    # if zero, the soscf call bounced for some reason
                    self.find_occupation()
                    status.append(base_name + str(nmicro))
                    soscf_performed = True  # Stops DIIS
                else:
                    if verbose > 0:
                        core.print_out("Did not take a SOSCF step, using normal convergence methods\n")
                    soscf_performed = False  # Back to DIIS

            else:
                # need to ensure orthogonal orbitals and set epsilon
                status.append(base_name + "conv")
                core.timer_on("HF: Form C")
                self.form_C()
                core.timer_off("HF: Form C")
                soscf_performed = True  # Stops DIIS

        if not soscf_performed:
            # Normal convergence procedures if we do not do SOSCF

            core.timer_on("HF: DIIS")
            diis_performed = False
            add_to_diis_subspace = False

            if self.diis_enabled_ and self.iteration_ >= self.diis_start_:
                add_to_diis_subspace = True

            Drms = self.compute_orbital_gradient(add_to_diis_subspace, core.get_option('SCF', 'DIIS_MAX_VECS'))

            if (self.diis_enabled_
                    and self.iteration_ >= self.diis_start_ + core.get_option('SCF', 'DIIS_MIN_VECS') - 1):
                diis_performed = self.diis()

            if diis_performed:
                status.append("DIIS")

            core.timer_off("HF: DIIS")

            if verbose > 4 and diis_performed:
                core.print_out("  After DIIS:\n")
                self.Fa().print_out()
                self.Fb().print_out()

            # frac, MOM invoked here from Wfn::HF::find_occupation
            core.timer_on("HF: Form C")
            self.form_C()
            core.timer_off("HF: Form C")

        if self.MOM_performed_:
            status.append("MOM")

        if self.frac_performed_:
            status.append("FRAC")

        core.timer_on("HF: Form D")
        self.form_D()
        core.timer_off("HF: Form D")

        core.set_variable("SCF ITERATION ENERGY", SCFE)

        # After we've built the new D, damp the update
        if (damping_enabled and self.iteration_ > 1 and Drms > core.get_option('SCF', 'DAMPING_CONVERGENCE')):
            damping_percentage = core.get_option('SCF', "DAMPING_PERCENTAGE")
            self.damping_update(damping_percentage * 0.01)
            status.append("DAMP={}%".format(round(damping_percentage)))

        if verbose > 3:
            self.Ca().print_out()
            self.Cb().print_out()
            self.Da().print_out()
            self.Db().print_out()

        # Print out the iteration
        core.print_out("   @%s%s iter %3d: %20.14f   %12.5e   %-11.5e %s\n" %
                       ("DF-" if is_dfjk else "", reference, self.iteration_, SCFE, Ediff, Drms, '/'.join(status)))

        # if a an excited MOM is requested but not started, don't stop yet
        if self.MOM_excited_ and not self.MOM_performed_:
            continue

        # if a fractional occupation is requested but not started, don't stop yet
        if frac_enabled and not self.frac_performed_:
            continue

        # Call any postiteration callbacks

        if _converged(Ediff, Drms, e_conv=e_conv, d_conv=d_conv):
            break
        if self.iteration_ >= core.get_option('SCF', 'MAXITER'):
            raise ConvergenceError("""SCF iterations""", self.iteration_)
Example #33
0
def multi_level(func, **kwargs):
    """
    Use different levels of theory for different expansion levels
    See kwargs description in driver_nbody.nbody_gufunc

    :returns: *return type of func* |w--w| The data.

    :returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| data and wavefunction with energy/gradient/hessian set appropriately when **return_wfn** specified.

    """
    from psi4.driver.driver_nbody import _print_nbody_energy, nbody_gufunc

    ptype = kwargs['ptype']
    return_wfn = kwargs.get('return_wfn', False)
    kwargs['return_wfn'] = True
    levels = {}
    for k, v in kwargs.pop('levels').items():
        if isinstance(k, str):
            levels[k.lower()] = v
        else:
            levels[k] = v
    supersystem = levels.pop('supersystem', False)
    molecule = kwargs.get('molecule', core.get_active_molecule())
    kwargs['bsse_type'] = [kwargs['bsse_type']] if isinstance(
        kwargs['bsse_type'], str) else kwargs['bsse_type']
    natoms = molecule.natom()

    # Initialize with zeros
    energy_result, gradient_result, hessian_result = 0, None, None
    energy_body_contribution = {b: {} for b in kwargs['bsse_type']}
    energy_body_dict = {b: {} for b in kwargs['bsse_type']}
    wfns = {}
    if ptype in ['gradient', 'hessian']:
        gradient_result = np.zeros((natoms, 3))
    if ptype == 'hessian':
        hessian_result = np.zeros((natoms * 3, natoms * 3))

    if kwargs.get('charge_method',
                  False) and not kwargs.get('embedding_charges', False):
        kwargs['embedding_charges'] = compute_charges(
            kwargs['charge_method'],
            kwargs.get('charge_type', 'MULLIKEN_CHARGES').upper(), molecule)

    for n in sorted(levels)[::-1]:
        molecule.set_name('%i' % n)
        kwargs_copy = kwargs.copy()
        kwargs_copy['max_nbody'] = n
        energy_bsse_dict = {b: 0 for b in kwargs['bsse_type']}
        if isinstance(levels[n], str):
            # If a new level of theory is provided, compute contribution
            ret, wfn = nbody_gufunc(func, levels[n], **kwargs_copy)
            wfns[n] = wfn
        else:
            # For the n-body contribution, use available data from the higher order levels[n]-body
            wfn = wfns[levels[n]]

        for m in range(n - 1, n + 1):
            if m == 0: continue
            # Subtract the (n-1)-body contribution from the n-body contribution to get the n-body effect
            sign = (-1)**(1 - m // n)
            for b in kwargs['bsse_type']:
                energy_bsse_dict[b] += sign * wfn.variable('%i%s' %
                                                           (m, b.lower()))
            if ptype in ['gradient', 'hessian']:
                gradient_result += sign * np.array(
                    wfn.variable('GRADIENT ' + str(m)))
                # Keep 1-body contribution to compute interaction data
                if n == 1:
                    gradient1 = np.array(wfn.variable('GRADIENT ' + str(m)))
            if ptype == 'hessian':
                hessian_result += sign * np.array(
                    wfn.variable('HESSIAN ' + str(m)))
                if n == 1:
                    hessian1 = np.array(wfn.variable('HESSIAN ' + str(m)))
        energy_result += energy_bsse_dict[kwargs['bsse_type'][0]]
        for b in kwargs['bsse_type']:
            energy_body_contribution[b][n] = energy_bsse_dict[b]

    if supersystem:
        # Super system recovers higher order effects at a lower level
        molecule.set_name('supersystem')
        kwargs_copy = kwargs.copy()
        kwargs_copy.pop('bsse_type')
        kwargs_copy.pop('ptype')
        ret, wfn_super = func(supersystem, **kwargs_copy)
        core.clean()
        kwargs_copy = kwargs.copy()
        kwargs_copy['bsse_type'] = 'nocp'
        kwargs_copy['max_nbody'] = max(levels)
        # Subtract lower order effects to avoid double counting
        ret, wfn = nbody_gufunc(func, supersystem, **kwargs_copy)
        energy_result += wfn_super.energy() - wfn.variable(str(max(levels)))
        for b in kwargs['bsse_type']:
            energy_body_contribution[b][molecule.nfragments(
            )] = wfn_super.energy() - wfn.variable(str(max(levels)))

        if ptype in ['gradient', 'hessian']:
            gradient_result += np.array(wfn_super.gradient()) - np.array(
                wfn.variable('GRADIENT ' + str(max(levels))))
        if ptype == 'hessian':
            hessian_result += np.array(wfn_super.hessian()) - np.array(
                wfn.variable('HESSIAN ' + str(max(levels))))
        levels['supersystem'] = supersystem

    for b in kwargs['bsse_type']:
        for n in energy_body_contribution[b]:
            energy_body_dict[b][n] = sum([
                energy_body_contribution[b][i] for i in range(1, n + 1)
                if i in energy_body_contribution[b]
            ])

    is_embedded = kwargs.get('embedding_charges', False) or kwargs.get(
        'charge_method', False)
    for b in kwargs['bsse_type']:
        _print_nbody_energy(
            energy_body_dict[b],
            '%s-corrected multilevel many-body expansion' % b.upper(),
            is_embedded)

    if not kwargs['return_total_data']:
        # Remove monomer contribution for interaction data
        energy_result -= energy_body_dict[kwargs['bsse_type'][0]][1]
        if ptype in ['gradient', 'hessian']:
            gradient_result -= gradient1
        if ptype == 'hessian':
            hessian_result -= hessian1
    wfn_out = core.Wavefunction.build(molecule, 'def2-svp')
    core.set_variable("CURRENT ENERGY", energy_result)
    wfn_out.set_variable("CURRENT ENERGY", energy_result)
    if gradient_result is not None:
        wfn_out.set_gradient(core.Matrix.from_array(gradient_result))
    if hessian_result is not None:
        wfn_out.set_hessian(core.Matrix.from_array(hessian_result))
    ptype_result = eval(ptype + '_result')
    for b in kwargs['bsse_type']:
        for i in energy_body_dict[b]:
            wfn_out.set_variable(str(i) + b, energy_body_dict[b][i])

    if kwargs['return_wfn']:
        return (ptype_result, wfn_out)
    else:
        return ptype_result
Example #34
0
def scf_finalize_energy(self):
    """Performs stability analysis and calls back SCF with new guess
    if needed, Returns the SCF energy. This function should be called
    once orbitals are ready for energy/property computations, usually
    after iterations() is called.

    """

    # post-scf vv10 correlation
    if core.get_option('SCF', "DFT_VV10_POSTSCF"):
        self.functional().set_lock(False)
        self.functional().set_do_vv10(True)
        self.functional().set_lock(True)
        core.print_out("  ==> Computing Non-Self-Consistent VV10 Energy Correction <==\n\n")
        SCFE = 0.0
        self.form_V()
        SCFE += self.compute_E()
        self.set_energies("Total Energy", SCFE)

    # Perform wavefunction stability analysis before doing
    # anything on a wavefunction that may not be truly converged.
    if core.get_option('SCF', 'STABILITY_ANALYSIS') != "NONE":
        # We need the integral file, make sure it is written and
        # compute it if needed
        if core.get_option('SCF', 'REFERENCE') != "UHF":
            psio = core.IO.shared_object()
            #psio.open(constants.PSIF_SO_TEI, 1)  # PSIO_OPEN_OLD
            #try:
            #    psio.tocscan(constants.PSIF_SO_TEI, "IWL Buffers")
            #except TypeError:
            #    # "IWL Buffers" actually found but psio_tocentry can't be returned to Py
            #    psio.close(constants.PSIF_SO_TEI, 1)
            #else:
            #    # tocscan returned None
            #    psio.close(constants.PSIF_SO_TEI, 1)

            # logic above foiled by psio_tocentry not returning None<--nullptr in pb11 2.2.1
            #   so forcibly recomputing for now until stability revamp
            core.print_out("    SO Integrals not on disk. Computing...")
            mints = core.MintsHelper(self.basisset())
            mints.integrals()
            core.print_out("done.\n")

            # Q: Not worth exporting all the layers of psio, right?

        follow = self.stability_analysis()

        while follow and self.attempt_number_ <= core.get_option('SCF', 'MAX_ATTEMPTS'):
            self.attempt_number_ += 1
            core.print_out("    Running SCF again with the rotated orbitals.\n")

            if self.initialized_diis_manager_:
                self.diis_manager().reset_subspace()
            # reading the rotated orbitals in before starting iterations
            self.form_D()
            self.set_energies("Total Energy", self.compute_initial_E())
            self.iterations()
            follow = self.stability_analysis()

        if follow and self.attempt_number_ > core.get_option('SCF', 'MAX_ATTEMPTS'):
            core.print_out("    There's still a negative eigenvalue. Try modifying FOLLOW_STEP_SCALE\n")
            core.print_out("    or increasing MAX_ATTEMPTS (not available for PK integrals).\n")

    # At this point, we are not doing any more SCF cycles
    #   and we can compute and print final quantities.

    if hasattr(self.molecule(), 'EFP'):
        efpobj = self.molecule().EFP

        efpobj.compute()  # do_gradient=do_gradient)
        efpene = efpobj.get_energy(label='psi')
        efp_wfn_independent_energy = efpene['total'] - efpene['ind']
        self.set_energies("EFP", efpene['total'])

        SCFE = self.get_energies("Total Energy")
        SCFE += efp_wfn_independent_energy
        self.set_energies("Total Energy", SCFE)
        core.print_out(efpobj.energy_summary(scfefp=SCFE, label='psi'))

        core.set_variable('EFP ELST ENERGY', efpene['electrostatic'] + efpene['charge_penetration'] + efpene['electrostatic_point_charges'])
        core.set_variable('EFP IND ENERGY', efpene['polarization'])
        core.set_variable('EFP DISP ENERGY', efpene['dispersion'])
        core.set_variable('EFP EXCH ENERGY', efpene['exchange_repulsion'])
        core.set_variable('EFP TOTAL ENERGY', efpene['total'])
        core.set_variable('CURRENT ENERGY', efpene['total'])

    core.print_out("\n  ==> Post-Iterations <==\n\n")

    self.check_phases()
    self.compute_spin_contamination()
    self.frac_renormalize()
    reference = core.get_option("SCF", "REFERENCE")

    energy = self.get_energies("Total Energy")

#    fail_on_maxiter = core.get_option("SCF", "FAIL_ON_MAXITER")
#    if converged or not fail_on_maxiter:
#
#        if print_lvl > 0:
#            self.print_orbitals()
#
#        if converged:
#            core.print_out("  Energy converged.\n\n")
#        else:
#            core.print_out("  Energy did not converge, but proceeding anyway.\n\n")

    if core.get_option('SCF', 'PRINT') > 0:
        self.print_orbitals()

    is_dfjk = core.get_global_option('SCF_TYPE').endswith('DF')
    core.print_out("  @%s%s Final Energy: %20.14f" % ('DF-' if is_dfjk else '', reference, energy))
    # if (perturb_h_) {
    #     core.print_out(" with %f %f %f perturbation" %
    #                    (dipole_field_strength_[0], dipole_field_strength_[1], dipole_field_strength_[2]))
    # }
    core.print_out("\n\n")
    self.print_energies()

    self.clear_external_potentials()
    if core.get_option('SCF', 'PCM'):
        calc_type = core.PCM.CalcType.Total
        if core.get_option("PCM", "PCM_SCF_TYPE") == "SEPARATE":
            calc_type = core.PCM.CalcType.NucAndEle
        Dt = self.Da().clone()
        Dt.add(self.Db())
        _, Vpcm = self.get_PCM().compute_PCM_terms(Dt, calc_type)
        self.push_back_external_potential(Vpcm)

    # Properties
    #  Comments so that autodoc utility will find these PSI variables
    #  Process::environment.globals["SCF DIPOLE X"] =
    #  Process::environment.globals["SCF DIPOLE Y"] =
    #  Process::environment.globals["SCF DIPOLE Z"] =
    #  Process::environment.globals["SCF QUADRUPOLE XX"] =
    #  Process::environment.globals["SCF QUADRUPOLE XY"] =
    #  Process::environment.globals["SCF QUADRUPOLE XZ"] =
    #  Process::environment.globals["SCF QUADRUPOLE YY"] =
    #  Process::environment.globals["SCF QUADRUPOLE YZ"] =
    #  Process::environment.globals["SCF QUADRUPOLE ZZ"] =

    # Orbitals are always saved, in case an MO guess is requested later
    # save_orbitals()

    # Shove variables into global space
    for k, v in self.variables().items():
        core.set_variable(k, v)

    self.finalize()

    core.print_out("\nComputation Completed\n")

    return energy
Example #35
0
def run_gcp(self, func=None, dertype=None, verbose=False):  # dashlvl=None, dashparam=None
    """Function to call Grimme's dftd3 program (http://toc.uni-muenster.de/DFTD3/)
    to compute the -D correction of level *dashlvl* using parameters for
    the functional *func*. The dictionary *dashparam* can be used to supply
    a full set of dispersion parameters in the absense of *func* or to supply
    individual overrides in the presence of *func*. Returns energy if *dertype* is 0,
    gradient if *dertype* is 1, else tuple of energy and gradient if *dertype*
    unspecified. The dftd3 executable must be independently compiled and found in
    :envvar:`PATH` or :envvar:`PSIPATH`.
    *self* may be either a qcdb.Molecule (sensibly) or a psi4.Molecule
    (works b/c psi4.Molecule has been extended by this method py-side and
    only public interface fns used) or a string that can be instantiated
    into a qcdb.Molecule.

    """
    # Create (if necessary) and update qcdb.Molecule
    if isinstance(self, Molecule):
        # called on a qcdb.Molecule
        pass
    elif isinstance(self, core.Molecule):
        # called on a python export of a psi4.core.Molecule (py-side through Psi4's driver)
        self.create_psi4_string_from_molecule()
    elif isinstance(self, basestring):
        # called on a string representation of a psi4.Molecule (c-side through psi4.Dispersion)
        self = Molecule(self)
    else:
        raise ValidationError("""Argument mol must be psi4string or qcdb.Molecule""")
    self.update_geometry()

#    # Validate arguments
#    dashlvl = dashlvl.lower()
#    dashlvl = dash_alias['-' + dashlvl][1:] if ('-' + dashlvl) in dash_alias.keys() else dashlvl
#    if dashlvl not in dashcoeff.keys():
#        raise ValidationError("""-D correction level %s is not available. Choose among %s.""" % (dashlvl, dashcoeff.keys()))

    if dertype is None:
        dertype = -1
    elif der0th.match(str(dertype)):
        dertype = 0
    elif der1st.match(str(dertype)):
        dertype = 1
#    elif der2nd.match(str(dertype)):
#        raise ValidationError('Requested derivative level \'dertype\' %s not valid for run_dftd3.' % (dertype))
    else:
        raise ValidationError('Requested derivative level \'dertype\' %s not valid for run_dftd3.' % (dertype))

#    if func is None:
#        if dashparam is None:
#            # defunct case
#            raise ValidationError("""Parameters for -D correction missing. Provide a func or a dashparam kwarg.""")
#        else:
#            # case where all param read from dashparam dict (which must have all correct keys)
#            func = 'custom'
#            dashcoeff[dashlvl][func] = {}
#            dashparam = dict((k.lower(), v) for k, v in dashparam.iteritems())
#            for key in dashcoeff[dashlvl]['b3lyp'].keys():
#                if key in dashparam.keys():
#                    dashcoeff[dashlvl][func][key] = dashparam[key]
#                else:
#                    raise ValidationError("""Parameter %s is missing from dashparam dict %s.""" % (key, dashparam))
#    else:
#        func = func.lower()
#        if func not in dashcoeff[dashlvl].keys():
#            raise ValidationError("""Functional %s is not available for -D level %s.""" % (func, dashlvl))
#        if dashparam is None:
#            # (normal) case where all param taken from dashcoeff above
#            pass
#        else:
#            # case where items in dashparam dict can override param taken from dashcoeff above
#            dashparam = dict((k.lower(), v) for k, v in dashparam.iteritems())
#            for key in dashcoeff[dashlvl]['b3lyp'].keys():
#                if key in dashparam.keys():
#                    dashcoeff[dashlvl][func][key] = dashparam[key]

    # TODO temp until figure out paramfile
    allowed_funcs = ['HF/MINIS', 'DFT/MINIS', 'HF/MINIX', 'DFT/MINIX',
        'HF/SV', 'DFT/SV', 'HF/def2-SV(P)', 'DFT/def2-SV(P)', 'HF/def2-SVP',
        'DFT/def2-SVP', 'HF/DZP', 'DFT/DZP', 'HF/def-TZVP', 'DFT/def-TZVP',
        'HF/def2-TZVP', 'DFT/def2-TZVP', 'HF/631Gd', 'DFT/631Gd',
        'HF/def2-TZVP', 'DFT/def2-TZVP', 'HF/cc-pVDZ', 'DFT/cc-pVDZ',
        'HF/aug-cc-pVDZ', 'DFT/aug-cc-pVDZ', 'DFT/SV(P/h,c)', 'DFT/LANL',
        'DFT/pobTZVP', 'TPSS/def2-SVP', 'PW6B95/def2-SVP',
        # specials
        'hf3c', 'pbeh3c']
    allowed_funcs = [f.lower() for f in allowed_funcs]
    if func.lower() not in allowed_funcs:
        raise Dftd3Error("""bad gCP func: %s. need one of: %r""" % (func, allowed_funcs))

    # Move ~/.dftd3par.<hostname> out of the way so it won't interfere
    defaultfile = os.path.expanduser('~') + '/.dftd3par.' + socket.gethostname()
    defmoved = False
    if os.path.isfile(defaultfile):
        os.rename(defaultfile, defaultfile + '_hide')
        defmoved = True

    # Find environment by merging PSIPATH and PATH environment variables
    lenv = {
        'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \
                ':' + os.environ.get('PATH'),
        'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
        }
    #   Filter out None values as subprocess will fault on them
    lenv = {k: v for k, v in lenv.items() if v is not None}

    # Find out if running from Psi4 for scratch details and such
    try:
        import psi4
    except ImportError as err:
        isP4regime = False
    else:
        isP4regime = True

    # Setup unique scratch directory and move in
    current_directory = os.getcwd()
    if isP4regime:
        psioh = core.IOManager.shared_object()
        psio = core.IO.shared_object()
        os.chdir(psioh.get_default_path())
        gcp_tmpdir = 'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
            '.gcp.' + str(uuid.uuid4())[:8]
    else:
        gcp_tmpdir = os.environ['HOME'] + os.sep + 'gcp_' + str(uuid.uuid4())[:8]
    if os.path.exists(gcp_tmpdir) is False:
        os.mkdir(gcp_tmpdir)
    os.chdir(gcp_tmpdir)

    # Write gcp_parameters file that governs cp correction
#    paramcontents = gcp_server(func, dashlvl, 'dftd3')
#    paramfile1 = 'dftd3_parameters'  # older patched name
#    with open(paramfile1, 'w') as handle:
#        handle.write(paramcontents)
#    paramfile2 = '.gcppar'
#    with open(paramfile2, 'w') as handle:
#        handle.write(paramcontents)

###Two kinds of parameter files can be read in: A short and an extended version. Both are read from
###$HOME/.gcppar.$HOSTNAME by default. If the option -local is specified the file is read in from
###the current working directory: .gcppar
###The short version reads in: basis-keywo

    # Write dftd3_geometry file that supplies geometry to dispersion calc
    numAtoms = self.natom()
    geom = self.save_string_xyz()
    reals = []
    for line in geom.splitlines():
        lline = line.split()
        if len(lline) != 4:
            continue
        if lline[0] == 'Gh':
            numAtoms -= 1
        else:
            reals.append(line)

    geomtext = str(numAtoms) + '\n\n'
    for line in reals:
        geomtext += line.strip() + '\n'
    geomfile = './gcp_geometry.xyz'
    with open(geomfile, 'w') as handle:
        handle.write(geomtext)
    # TODO somehow the variations on save_string_xyz and
    #   whether natom and chgmult does or doesn't get written
    #   have gotten all tangled. I fear this doesn't work
    #   the same btwn libmints and qcdb or for ghosts

    # Call gcp program
    command = ['gcp', geomfile]
    command.extend(['-level', func])
    if dertype != 0:
        command.append('-grad')
    try:
        #print('command', command)
        dashout = subprocess.Popen(command, stdout=subprocess.PIPE, env=lenv)
    except OSError as e:
        raise ValidationError('Program gcp not found in path. %s' % e)
    out, err = dashout.communicate()

    # Parse output
    success = False
    for line in out.splitlines():
        line = line.decode('utf-8')
        if re.match('  Egcp:', line):
            sline = line.split()
            dashd = float(sline[1])
        if re.match('     normal termination of gCP', line):
            success = True

    if not success:
        os.chdir(current_directory)
        raise Dftd3Error("""Unsuccessful gCP run.""")

    # Parse grad output
    if dertype != 0:
        derivfile = './gcp_gradient'
        dfile = open(derivfile, 'r')
        dashdderiv = []
        for line in geom.splitlines():
            lline = line.split()
            if len(lline) != 4:
                continue
            if lline[0] == 'Gh':
                dashdderiv.append([0.0, 0.0, 0.0])
            else:
                dashdderiv.append([float(x.replace('D', 'E')) for x in dfile.readline().split()])
        dfile.close()

        if len(dashdderiv) != self.natom():
            raise ValidationError('Program gcp gradient file has %d atoms- %d expected.' % \
                (len(dashdderiv), self.natom()))

    # Prepare results for Psi4
    if isP4regime and dertype != 0:
        core.set_variable('GCP CORRECTION ENERGY', dashd)
        psi_dashdderiv = core.Matrix.from_list(dashdderiv)

    # Print program output to file if verbose
    if not verbose and isP4regime:
        verbose = True if core.get_option('SCF', 'PRINT') >= 3 else False
    if verbose:

        text = '\n  ==> GCP Output <==\n'
        text += out.decode('utf-8')
        if dertype != 0:
            with open(derivfile, 'r') as handle:
                text += handle.read().replace('D', 'E')
            text += '\n'
        if isP4regime:
            core.print_out(text)
        else:
            print(text)

#    # Clean up files and remove scratch directory
#    os.unlink(paramfile1)
#    os.unlink(paramfile2)
#    os.unlink(geomfile)
#    if dertype != 0:
#        os.unlink(derivfile)
#    if defmoved is True:
#        os.rename(defaultfile + '_hide', defaultfile)

    os.chdir('..')
#    try:
#        shutil.rmtree(dftd3_tmpdir)
#    except OSError as e:
#        ValidationError('Unable to remove dftd3 temporary directory %s' % e)
    os.chdir(current_directory)

    # return -D & d(-D)/dx
    if dertype == -1:
        return dashd, dashdderiv
    elif dertype == 0:
        return dashd
    elif dertype == 1:
        return psi_dashdderiv
Example #36
0
def nbody_gufunc(func, method_string, **kwargs):
    """
    Computes the nbody interaction energy, gradient, or Hessian depending on input.
    This is a generalized univeral function for computing interaction and total quantities.

    :returns: *return type of func* |w--w| The data.

    :returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| data and wavefunction with energy/gradient/hessian set appropriately when **return_wfn** specified.

    :type func: function
    :param func: ``energy`` || etc.

        Python function that accepts method_string and a molecule. Returns a
        energy, gradient, or Hessian as requested.

    :type method_string: string
    :param method_string: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc.

        First argument, lowercase and usually unlabeled. Indicates the computational
        method to be passed to func.

    :type molecule: :ref:`molecule <op_py_molecule>`
    :param molecule: ``h2o`` || etc.

        The target molecule, if not the last molecule defined.

    :type return_wfn: :ref:`boolean <op_py_boolean>`
    :param return_wfn: ``'on'`` || |dl| ``'off'`` |dr|

        Indicate to additionally return the :py:class:`~psi4.core.Wavefunction`
        calculation result as the second element of a tuple.

    :type bsse_type: string or list
    :param bsse_type: ``'cp'`` || ``['nocp', 'vmfc']`` || |dl| ``None`` |dr| || etc.

        Type of BSSE correction to compute: CP, NoCP, or VMFC. The first in this
        list is returned by this function. By default, this function is not called.

    :type max_nbody: int
    :param max_nbody: ``3`` || etc.

        Maximum n-body to compute, cannot exceed the number of fragments in the moleucle.

    :type ptype: string
    :param ptype: ``'energy'`` || ``'gradient'`` || ``'hessian'``

        Type of the procedure passed in.

    :type return_total_data: :ref:`boolean <op_py_boolean>`
    :param return_total_data: ``'on'`` || |dl| ``'off'`` |dr|

        If True returns the total data (energy/gradient/etc) of the system,
        otherwise returns interaction data.

    :type levels: dict
    :param levels: ``{1: 'ccsd(t)', 2: 'mp2', 'supersystem': 'scf'}`` || ``{1: 2, 2: 'ccsd(t)', 3: 'mp2'}`` || etc

        Dictionary of different levels of theory for different levels of expansion
        Note that method_string is not used in this case.
        supersystem computes all higher order n-body effects up to nfragments.

    :type embedding_charges: dict
    :param embedding_charges: ``{1: [-0.834, 0.417, 0.417], ..}``

        Dictionary of atom-centered point charges. keys: 1-based index of fragment, values: list of charges for each fragment.

    :type charge_method: string
    :param charge_method: ``scf/6-31g`` || ``b3lyp/6-31g*`` || etc

        Method to compute point charges for monomers. Overridden by embedding_charges if both are provided.

    :type charge_type: string
    :param charge_type: ``MULLIKEN_CHARGES`` || ``LOWDIN_CHARGES`` 

        Default is ``MULLIKEN_CHARGES``
    """

    # Initialize dictionaries for easy data passing
    metadata, component_results, nbody_results = {}, {}, {}

    # Parse some kwargs
    kwargs = p4util.kwargs_lower(kwargs)
    if kwargs.get('levels', False):
        return driver_nbody_helper.multi_level(func, **kwargs)
    metadata['ptype'] = kwargs.pop('ptype', None)
    metadata['return_wfn'] = kwargs.pop('return_wfn', False)
    metadata['return_total_data'] = kwargs.pop('return_total_data', False)
    metadata['molecule'] = kwargs.pop('molecule', core.get_active_molecule())
    metadata['molecule'].update_geometry()
    metadata['molecule'].fix_com(True)
    metadata['molecule'].fix_orientation(True)
    metadata['embedding_charges'] = kwargs.get('embedding_charges', False)
    metadata['kwargs'] = kwargs
    core.clean_variables()

    if metadata['ptype'] not in ['energy', 'gradient', 'hessian']:
        raise ValidationError("""N-Body driver: The ptype '%s' is not regonized.""" % metadata['ptype'])

    # Parse bsse_type, raise exception if not provided or unrecognized
    metadata['bsse_type_list'] = kwargs.pop('bsse_type')
    if metadata['bsse_type_list'] is None:
        raise ValidationError("N-Body GUFunc: Must pass a bsse_type")
    if not isinstance(metadata['bsse_type_list'], list):
        metadata['bsse_type_list'] = [metadata['bsse_type_list']]

    for num, btype in enumerate(metadata['bsse_type_list']):
        metadata['bsse_type_list'][num] = btype.lower()
        if btype.lower() not in ['cp', 'nocp', 'vmfc']:
            raise ValidationError("N-Body GUFunc: bsse_type '%s' is not recognized" % btype.lower())

    metadata['max_nbody'] = kwargs.get('max_nbody', -1)
    metadata['max_frag'] = metadata['molecule'].nfragments()
    if metadata['max_nbody'] == -1:
        metadata['max_nbody'] = metadata['molecule'].nfragments()
    else:
        metadata['max_nbody'] = min(metadata['max_nbody'], metadata['max_frag'])

    # Flip this off for now, needs more testing
    # If we are doing CP lets save them integrals
    #if 'cp' in bsse_type_list and (len(bsse_type_list) == 1):
    #    # Set to save RI integrals for repeated full-basis computations
    #    ri_ints_io = core.get_global_option('DF_INTS_IO')

    #    # inquire if above at all applies to dfmp2 or just scf
    #    core.set_global_option('DF_INTS_IO', 'SAVE')
    #    psioh = core.IOManager.shared_object()
    #    psioh.set_specific_retention(97, True)

    bsse_str = metadata['bsse_type_list'][0]
    if len(metadata['bsse_type_list']) > 1:
        bsse_str = str(metadata['bsse_type_list'])
    core.print_out("\n\n")
    core.print_out("   ===> N-Body Interaction Abacus <===\n")
    core.print_out("        BSSE Treatment:                     %s\n" % bsse_str)

    # Get compute list
    metadata = build_nbody_compute_list(metadata)

    # Compute N-Body components
    component_results = compute_nbody_components(func, method_string, metadata)

    # Assemble N-Body quantities
    nbody_results = assemble_nbody_components(metadata, component_results)

    # Build wfn and bind variables
    wfn = core.Wavefunction.build(metadata['molecule'], 'def2-svp')
    dicts = [
        'energies', 'ptype', 'intermediates', 'energy_body_dict', 'gradient_body_dict', 'hessian_body_dict', 'nbody',
        'cp_energy_body_dict', 'nocp_energy_body_dict', 'vmfc_energy_body_dict'
    ]
    if metadata['ptype'] == 'gradient':
        wfn.set_gradient(nbody_results['ret_ptype'])
        nbody_results['gradient_body_dict'] = nbody_results['ptype_body_dict']
    elif metadata['ptype'] == 'hessian':
        nbody_results['hessian_body_dict'] = nbody_results['ptype_body_dict']
        wfn.set_hessian(nbody_results['ret_ptype'])
        component_results_gradient = component_results.copy()
        component_results_gradient['ptype'] = component_results_gradient['gradients']
        metadata['ptype'] = 'gradient'
        nbody_results_gradient = assemble_nbody_components(metadata, component_results_gradient)
        wfn.set_gradient(nbody_results_gradient['ret_ptype'])
        nbody_results['gradient_body_dict'] = nbody_results_gradient['ptype_body_dict']

    for r in [component_results, nbody_results]:
        for d in r:
            if d in dicts:
                for var, value in r[d].items():
                    try:
                        wfn.set_scalar_variable(str(var), value)
                        core.set_scalar_variable(str(var), value)
                    except:
                        wfn.set_array_variable(d.split('_')[0].upper() + ' ' + str(var), core.Matrix.from_array(value))

    core.set_variable("CURRENT ENERGY", nbody_results['ret_energy'])
    wfn.set_variable("CURRENT ENERGY", nbody_results['ret_energy'])

    if metadata['return_wfn']:
        return (nbody_results['ret_ptype'], wfn)
    else:
        return nbody_results['ret_ptype']
Example #37
0
def print_sapt_dft_summary(data, name, short=False):

    ret = "   %s Results\n" % name
    ret += "  " + "-" * 105 + "\n"

    # Elst
    ret += print_sapt_var("Electrostatics", data["Elst10,r"]) + "\n"
    ret += print_sapt_var("  Elst1,r", data["Elst10,r"]) + "\n"
    ret += "\n"
    core.set_variable("SAPT ELST ENERGY", data["Elst10,r"])

    # Exchange
    ret += print_sapt_var("Exchange", data["Exch10"]) + "\n"
    ret += print_sapt_var("  Exch1", data["Exch10"]) + "\n"
    ret += print_sapt_var("  Exch1(S^2)", data["Exch10(S^2)"]) + "\n"
    ret += "\n"
    core.set_variable("SAPT EXCH ENERGY", data["Exch10"])

    # Induction
    ind = data["Ind20,r"] + data["Exch-Ind20,r"]
    ind_ab = data["Ind20,r (A<-B)"] + data["Exch-Ind20,r (A<-B)"]
    ind_ba = data["Ind20,r (A->B)"] + data["Exch-Ind20,r (A->B)"]

    if "Delta HF Correction" in list(data):
        ind += data["Delta HF Correction"]

    ret += print_sapt_var("Induction", ind) + "\n"

    ret += print_sapt_var("  Ind2,r", data["Ind20,r"]) + "\n"
    ret += print_sapt_var("  Exch-Ind2,r", data["Exch-Ind20,r"]) + "\n"
    ret += print_sapt_var("  Induction (A<-B)", ind_ab) + "\n"
    ret += print_sapt_var("  Induction (A->B)", ind_ba) + "\n"

    if "Delta HF Correction" in list(data):
        ret += print_sapt_var("  delta HF,r (2)",
                              data["Delta HF Correction"]) + "\n"

    ret += "\n"
    core.set_variable("SAPT IND ENERGY", ind)

    # Exchange-dispersion scaling
    exch_disp_scheme = core.get_option("SAPT",
                                       "SAPT_DFT_EXCH_DISP_SCALE_SCHEME")
    if exch_disp_scheme == "NONE":
        data["Exch-Disp20,r"] = data["Exch-Disp20,u"]
    elif exch_disp_scheme == "FIXED":
        exch_disp_scale = core.get_option("SAPT",
                                          "SAPT_DFT_EXCH_DISP_FIXED_SCALE")
        data["Exch-Disp20,r"] = exch_disp_scale * data["Exch-Disp20,u"]
    elif exch_disp_scheme == "DISP":
        exch_disp_scale = data["Disp20"] / data["Disp20,u"]
        data["Exch-Disp20,r"] = exch_disp_scale * data["Exch-Disp20,u"]

    # Dispersion
    disp = data["Disp20"] + data["Exch-Disp20,r"]
    ret += print_sapt_var("Dispersion", disp) + "\n"
    ret += print_sapt_var("  Disp2,r", data["Disp20"]) + "\n"
    ret += print_sapt_var("  Disp2,u", data["Disp20,u"]) + "\n"
    if exch_disp_scheme != "NONE":
        ret += print_sapt_var("  Est. Exch-Disp2,r",
                              data["Exch-Disp20,r"]) + "\n"
    ret += print_sapt_var("  Exch-Disp2,u", data["Exch-Disp20,u"]) + "\n"
    ret += "\n"
    core.set_variable("SAPT DISP ENERGY", disp)

    # Total energy
    total = data["Elst10,r"] + data["Exch10"] + ind + disp
    ret += print_sapt_var("Total %-17s" % name, total,
                          start_spacer="    ") + "\n"
    core.set_variable("SAPT(DFT) TOTAL ENERGY", total)
    core.set_variable("SAPT TOTAL ENERGY", total)
    core.set_variable("CURRENT ENERGY", total)

    ret += "  " + "-" * 105 + "\n"
    return ret
Example #38
0
def print_sapt_hf_summary(data, name, short=False, delta_hf=False):

    ret = "   %s Results\n" % name
    ret += "  " + "-" * 97 + "\n"

    # Elst
    ret += print_sapt_var("Electrostatics", data["Elst10,r"]) + "\n"
    ret += print_sapt_var("  Elst10,r", data["Elst10,r"]) + "\n"
    ret += "\n"
    core.set_variable("SAPT ELST ENERGY", data["Elst10,r"])

    # Exchange
    ret += print_sapt_var("Exchange", data["Exch10"]) + "\n"
    ret += print_sapt_var("  Exch10", data["Exch10"]) + "\n"
    ret += print_sapt_var("  Exch10(S^2)", data["Exch10(S^2)"]) + "\n"
    ret += "\n"
    core.set_variable("SAPT EXCH ENERGY", data["Exch10"])

    ind = data["Ind20,r"] + data["Exch-Ind20,r"]
    ind_ab = data["Ind20,r (A<-B)"] + data["Exch-Ind20,r (A<-B)"]
    ind_ba = data["Ind20,r (A->B)"] + data["Exch-Ind20,r (A->B)"]

    ret += print_sapt_var("Induction", ind) + "\n"
    ret += print_sapt_var("  Ind20,r", data["Ind20,r"]) + "\n"
    ret += print_sapt_var("  Exch-Ind20,r", data["Exch-Ind20,r"]) + "\n"
    ret += print_sapt_var("  Induction (A<-B)", ind_ab) + "\n"
    ret += print_sapt_var("  Induction (A->B)", ind_ba) + "\n"
    ret += "\n"
    core.set_variable("SAPT IND ENERGY", ind)

    if delta_hf:
        total_sapt = (data["Elst10,r"] + data["Exch10"] + ind)
        sapt_hf_delta = delta_hf - total_sapt

        core.set_variable("SAPT(DFT) Delta HF", sapt_hf_delta)

        ret += print_sapt_var(
            "%-21s" % "Total SAPT", total_sapt, start_spacer="   ") + "\n"
        ret += print_sapt_var(
            "%-21s" % "Total HF", delta_hf, start_spacer="   ") + "\n"
        ret += print_sapt_var(
            "%-21s" % "Delta HF", sapt_hf_delta, start_spacer="   ") + "\n"

        ret += "  " + "-" * 97 + "\n"
        return ret

    else:
        # Dispersion
        disp = data["Disp20"] + data["Exch-Disp20,u"]
        ret += print_sapt_var("Dispersion", disp) + "\n"
        ret += print_sapt_var("  Disp20", data["Disp20,u"]) + "\n"
        ret += print_sapt_var("  Exch-Disp20", data["Exch-Disp20,u"]) + "\n"
        ret += "\n"
        core.set_variable("SAPT DISP ENERGY", disp)

        # Total energy
        total = data["Elst10,r"] + data["Exch10"] + ind + disp
        ret += print_sapt_var("Total %-15s" % name, total,
                              start_spacer="   ") + "\n"
        core.set_variable("SAPT0 TOTAL ENERGY", total)
        core.set_variable("SAPT TOTAL ENERGY", total)
        core.set_variable("CURRENT ENERGY", total)

        ret += "  " + "-" * 97 + "\n"
        return ret
Example #39
0
def run_sapt_dft(name, **kwargs):
    optstash = p4util.OptionsState(['SCF_TYPE'], ['SCF', 'REFERENCE'], ['SCF', 'DFT_GRAC_SHIFT'],
                                   ['SCF', 'SAVE_JK'])

    core.tstart()
    # Alter default algorithm
    if not core.has_global_option_changed('SCF_TYPE'):
        core.set_global_option('SCF_TYPE', 'DF')

    core.prepare_options_for_module("SAPT")

    # Get the molecule of interest
    ref_wfn = kwargs.get('ref_wfn', None)
    if ref_wfn is None:
        sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
    else:
        core.print_out('Warning! SAPT argument "ref_wfn" is only able to use molecule information.')
        sapt_dimer = ref_wfn.molecule()

    sapt_dimer, monomerA, monomerB = proc_util.prepare_sapt_molecule(sapt_dimer, "dimer")

    # Grab overall settings
    mon_a_shift = core.get_option("SAPT", "SAPT_DFT_GRAC_SHIFT_A")
    mon_b_shift = core.get_option("SAPT", "SAPT_DFT_GRAC_SHIFT_B")
    do_delta_hf = core.get_option("SAPT", "SAPT_DFT_DO_DHF")
    sapt_dft_functional = core.get_option("SAPT", "SAPT_DFT_FUNCTIONAL")

    # Print out the title and some information
    core.print_out("\n")
    core.print_out("         ---------------------------------------------------------\n")
    core.print_out("         " + "SAPT(DFT) Procedure".center(58) + "\n")
    core.print_out("\n")
    core.print_out("         " + "by Daniel G. A. Smith".center(58) + "\n")
    core.print_out("         ---------------------------------------------------------\n")
    core.print_out("\n")

    core.print_out("  !!!  WARNING:  SAPT(DFT) capability is in beta. Please use with caution. !!!\n\n")

    core.print_out("  ==> Algorithm <==\n\n")
    core.print_out("   SAPT DFT Functional     %12s\n" % str(sapt_dft_functional))
    core.print_out("   Monomer A GRAC Shift    %12.6f\n" % mon_a_shift)
    core.print_out("   Monomer B GRAC Shift    %12.6f\n" % mon_b_shift)
    core.print_out("   Delta HF                %12s\n" % ("True" if do_delta_hf else "False"))
    core.print_out("   JK Algorithm            %12s\n" % core.get_global_option("SCF_TYPE"))
    core.print_out("\n")
    core.print_out("   Required computations:\n")
    if (do_delta_hf):
        core.print_out("     HF  (Dimer)\n")
        core.print_out("     HF  (Monomer A)\n")
        core.print_out("     HF  (Monomer B)\n")
    core.print_out("     DFT (Monomer A)\n")
    core.print_out("     DFT (Monomer B)\n")
    core.print_out("\n")

    if (sapt_dft_functional != "HF") and ((mon_a_shift == 0.0) or (mon_b_shift == 0.0)):
        raise ValidationError('SAPT(DFT): must set both "SAPT_DFT_GRAC_SHIFT_A" and "B".')

    if (core.get_option('SCF', 'REFERENCE') != 'RHF'):
        raise ValidationError('SAPT(DFT) currently only supports restricted references.')

    core.IO.set_default_namespace('dimer')
    data = {}

    if (core.get_global_option('SCF_TYPE') == 'DF'):
        # core.set_global_option('DF_INTS_IO', 'LOAD')
        core.set_global_option('DF_INTS_IO', 'SAVE')

    # # Compute dimer wavefunction
    hf_wfn_dimer = None
    if do_delta_hf:
        if (core.get_global_option('SCF_TYPE') == 'DF'):
            core.set_global_option('DF_INTS_IO', 'SAVE')

        hf_data = {}
        hf_wfn_dimer = scf_helper("SCF", molecule=sapt_dimer, banner="SAPT(DFT): delta HF Dimer", **kwargs)
        hf_data["HF DIMER"] = core.get_variable("CURRENT ENERGY")

        if (core.get_global_option('SCF_TYPE') == 'DF'):
            core.IO.change_file_namespace(97, 'dimer', 'monomerA')

        hf_wfn_A = scf_helper("SCF", molecule=monomerA, banner="SAPT(DFT): delta HF Monomer A", **kwargs)
        hf_data["HF MONOMER A"] = core.get_variable("CURRENT ENERGY")

        core.set_global_option("SAVE_JK", True)
        if (core.get_global_option('SCF_TYPE') == 'DF'):
            core.IO.change_file_namespace(97, 'monomerA', 'monomerB')

        hf_wfn_B = scf_helper("SCF", molecule=monomerB, banner="SAPT(DFT): delta HF Monomer B", **kwargs)
        hf_data["HF MONOMER B"] = core.get_variable("CURRENT ENERGY")
        core.set_global_option("SAVE_JK", False)

        # Grab JK object and set to A (so we do not save many JK objects)
        sapt_jk = hf_wfn_B.jk()
        hf_wfn_A.set_jk(sapt_jk)
        core.set_global_option("SAVE_JK", False)

        # Move it back to monomer A
        if (core.get_global_option('SCF_TYPE') == 'DF'):
            core.IO.change_file_namespace(97, 'monomerB', 'dimer')

        core.print_out("\n")
        core.print_out("         ---------------------------------------------------------\n")
        core.print_out("         " + "SAPT(DFT): delta HF Segement".center(58) + "\n")
        core.print_out("\n")
        core.print_out("         " + "by Daniel G. A. Smith and Rob Parrish".center(58) + "\n")
        core.print_out("         ---------------------------------------------------------\n")
        core.print_out("\n")

        # Build cache
        hf_cache = sapt_jk_terms.build_sapt_jk_cache(hf_wfn_A, hf_wfn_B, sapt_jk, True)

        # Electostatics
        elst = sapt_jk_terms.electrostatics(hf_cache, True)
        hf_data.update(elst)

        # Exchange
        exch = sapt_jk_terms.exchange(hf_cache, sapt_jk, True)
        hf_data.update(exch)

        # Induction
        ind = sapt_jk_terms.induction(
            hf_cache,
            sapt_jk,
            True,
            maxiter=core.get_option("SAPT", "MAXITER"),
            conv=core.get_option("SAPT", "D_CONVERGENCE"),
            Sinf=core.get_option("SAPT", "DO_IND_EXCH_SINF"))
        hf_data.update(ind)

        dhf_value = hf_data["HF DIMER"] - hf_data["HF MONOMER A"] - hf_data["HF MONOMER B"]

        core.print_out("\n")
        core.print_out(print_sapt_hf_summary(hf_data, "SAPT(HF)", delta_hf=dhf_value))

        data["Delta HF Correction"] = core.get_variable("SAPT(DFT) Delta HF")
        sapt_jk.finalize()

        del hf_wfn_A, hf_wfn_B, sapt_jk

    if hf_wfn_dimer is None:
        dimer_wfn = core.Wavefunction.build(sapt_dimer, core.get_global_option("BASIS"))
    else:
        dimer_wfn = hf_wfn_dimer

    # Set the primary functional
    core.set_local_option('SCF', 'REFERENCE', 'RKS')

    # Compute Monomer A wavefunction
    if (core.get_global_option('SCF_TYPE') == 'DF'):
        core.IO.change_file_namespace(97, 'dimer', 'monomerA')

    if mon_a_shift:
        core.set_global_option("DFT_GRAC_SHIFT", mon_a_shift)

    # Save the JK object
    core.IO.set_default_namespace('monomerA')
    wfn_A = scf_helper(
        sapt_dft_functional, post_scf=False, molecule=monomerA, banner="SAPT(DFT): DFT Monomer A", **kwargs)
    data["DFT MONOMERA"] = core.get_variable("CURRENT ENERGY")

    core.set_global_option("DFT_GRAC_SHIFT", 0.0)

    # Compute Monomer B wavefunction
    if (core.get_global_option('SCF_TYPE') == 'DF'):
        core.IO.change_file_namespace(97, 'monomerA', 'monomerB')

    if mon_b_shift:
        core.set_global_option("DFT_GRAC_SHIFT", mon_b_shift)

    core.set_global_option("SAVE_JK", True)
    core.IO.set_default_namespace('monomerB')
    wfn_B = scf_helper(
        sapt_dft_functional, post_scf=False, molecule=monomerB, banner="SAPT(DFT): DFT Monomer B", **kwargs)
    data["DFT MONOMERB"] = core.get_variable("CURRENT ENERGY")

    # Save JK object
    sapt_jk = wfn_B.jk()
    wfn_A.set_jk(sapt_jk)
    core.set_global_option("SAVE_JK", False)

    core.set_global_option("DFT_GRAC_SHIFT", 0.0)

    # Write out header
    scf_alg = core.get_global_option("SCF_TYPE")
    sapt_dft_header(sapt_dft_functional, mon_a_shift, mon_b_shift, bool(do_delta_hf), scf_alg)

    # Call SAPT(DFT)
    sapt_jk = wfn_B.jk()
    sapt_dft(dimer_wfn, wfn_A, wfn_B, sapt_jk=sapt_jk, data=data, print_header=False)

    # Copy data back into globals
    for k, v in data.items():
        core.set_variable(k, v)

    core.tstop()

    return dimer_wfn
Example #40
0
def nbody_gufunc(func, method_string, **kwargs):
    """
    Computes the nbody interaction energy, gradient, or Hessian depending on input.
    This is a generalized univeral function for computing interaction and total quantities.

    :returns: *return type of func* |w--w| The data.

    :returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| data and wavefunction with energy/gradient/hessian set appropriately when **return_wfn** specified.

    :type func: function
    :param func: ``energy`` || etc.

        Python function that accepts method_string and a molecule. Returns a
        energy, gradient, or Hessian as requested.

    :type method_string: string
    :param method_string: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc.

        First argument, lowercase and usually unlabeled. Indicates the computational
        method to be passed to func.

    :type molecule: :ref:`molecule <op_py_molecule>`
    :param molecule: ``h2o`` || etc.

        The target molecule, if not the last molecule defined.

    :type return_wfn: :ref:`boolean <op_py_boolean>`
    :param return_wfn: ``'on'`` || |dl| ``'off'`` |dr|

        Indicate to additionally return the :py:class:`~psi4.core.Wavefunction`
        calculation result as the second element of a tuple.

    :type bsse_type: string or list
    :param bsse_type: ``'cp'`` || ``['nocp', 'vmfc']`` || |dl| ``None`` |dr| || etc.

        Type of BSSE correction to compute: CP, NoCP, or VMFC. The first in this
        list is returned by this function. By default, this function is not called.

    :type max_nbody: int
    :param max_nbody: ``3`` || etc.

        Maximum n-body to compute, cannot exceed the number of fragments in the moleucle.

    :type ptype: string
    :param ptype: ``'energy'`` || ``'gradient'`` || ``'hessian'``

        Type of the procedure passed in.

    :type return_total_data: :ref:`boolean <op_py_boolean>`
    :param return_total_data: ``'on'`` || |dl| ``'off'`` |dr|

        If True returns the total data (energy/gradient/etc) of the system,
        otherwise returns interaction data.

    :type levels: dict
    :param levels: ``{1: 'ccsd(t)', 2: 'mp2', 'supersystem': 'scf'}`` || ``{1: 2, 2: 'ccsd(t)', 3: 'mp2'}`` || etc

        Dictionary of different levels of theory for different levels of expansion
        Note that method_string is not used in this case.
        supersystem computes all higher order n-body effects up to nfragments.

    :type embedding_charges: dict
    :param embedding_charges: ``{1: [-0.834, 0.417, 0.417], ..}``

        Dictionary of atom-centered point charges. keys: 1-based index of fragment, values: list of charges for each fragment.

    :type charge_method: string
    :param charge_method: ``scf/6-31g`` || ``b3lyp/6-31g*`` || etc

        Method to compute point charges for monomers. Overridden by embedding_charges if both are provided.

    :type charge_type: string
    :param charge_type: ``MULLIKEN_CHARGES`` || ``LOWDIN_CHARGES`` 

        Default is ``MULLIKEN_CHARGES``
    """

    # Initialize dictionaries for easy data passing
    metadata, component_results, nbody_results = {}, {}, {}

    # Parse some kwargs
    kwargs = p4util.kwargs_lower(kwargs)
    if kwargs.get('levels', False):
        return driver_nbody_helper.multi_level(func, **kwargs)
    metadata['ptype'] = kwargs.pop('ptype', None)
    metadata['return_wfn'] = kwargs.pop('return_wfn', False)
    metadata['return_total_data'] = kwargs.pop('return_total_data', False)
    metadata['molecule'] = kwargs.pop('molecule', core.get_active_molecule())
    metadata['molecule'].update_geometry()
    metadata['molecule'].fix_com(True)
    metadata['molecule'].fix_orientation(True)
    metadata['embedding_charges'] = kwargs.get('embedding_charges', False)
    metadata['kwargs'] = kwargs
    core.clean_variables()

    if metadata['ptype'] not in ['energy', 'gradient', 'hessian']:
        raise ValidationError(
            """N-Body driver: The ptype '%s' is not regonized.""" %
            metadata['ptype'])

    # Parse bsse_type, raise exception if not provided or unrecognized
    metadata['bsse_type_list'] = kwargs.pop('bsse_type')
    if metadata['bsse_type_list'] is None:
        raise ValidationError("N-Body GUFunc: Must pass a bsse_type")
    if not isinstance(metadata['bsse_type_list'], list):
        metadata['bsse_type_list'] = [metadata['bsse_type_list']]

    for num, btype in enumerate(metadata['bsse_type_list']):
        metadata['bsse_type_list'][num] = btype.lower()
        if btype.lower() not in ['cp', 'nocp', 'vmfc']:
            raise ValidationError(
                "N-Body GUFunc: bsse_type '%s' is not recognized" %
                btype.lower())

    metadata['max_nbody'] = kwargs.get('max_nbody', -1)
    if metadata['molecule'].nfragments() == 1:
        raise ValidationError(
            "N-Body requires active molecule to have more than 1 fragment.")
    metadata['max_frag'] = metadata['molecule'].nfragments()
    if metadata['max_nbody'] == -1:
        metadata['max_nbody'] = metadata['molecule'].nfragments()
    else:
        metadata['max_nbody'] = min(metadata['max_nbody'],
                                    metadata['max_frag'])

    # Flip this off for now, needs more testing
    # If we are doing CP lets save them integrals
    #if 'cp' in bsse_type_list and (len(bsse_type_list) == 1):
    #    # Set to save RI integrals for repeated full-basis computations
    #    ri_ints_io = core.get_global_option('DF_INTS_IO')

    #    # inquire if above at all applies to dfmp2 or just scf
    #    core.set_global_option('DF_INTS_IO', 'SAVE')
    #    psioh = core.IOManager.shared_object()
    #    psioh.set_specific_retention(97, True)

    bsse_str = metadata['bsse_type_list'][0]
    if len(metadata['bsse_type_list']) > 1:
        bsse_str = str(metadata['bsse_type_list'])
    core.print_out("\n\n")
    core.print_out("   ===> N-Body Interaction Abacus <===\n")
    core.print_out("        BSSE Treatment:                     %s\n" %
                   bsse_str)

    # Get compute list
    metadata = build_nbody_compute_list(metadata)

    # Compute N-Body components
    component_results = compute_nbody_components(func, method_string, metadata)

    # Assemble N-Body quantities
    nbody_results = assemble_nbody_components(metadata, component_results)

    # Build wfn and bind variables
    wfn = core.Wavefunction.build(metadata['molecule'], 'def2-svp')
    dicts = [
        'energies', 'ptype', 'intermediates', 'energy_body_dict',
        'gradient_body_dict', 'hessian_body_dict', 'nbody',
        'cp_energy_body_dict', 'nocp_energy_body_dict', 'vmfc_energy_body_dict'
    ]
    if metadata['ptype'] == 'gradient':
        wfn.set_gradient(nbody_results['ret_ptype'])
        nbody_results['gradient_body_dict'] = nbody_results['ptype_body_dict']
    elif metadata['ptype'] == 'hessian':
        nbody_results['hessian_body_dict'] = nbody_results['ptype_body_dict']
        wfn.set_hessian(nbody_results['ret_ptype'])
        component_results_gradient = component_results.copy()
        component_results_gradient['ptype'] = component_results_gradient[
            'gradients']
        metadata['ptype'] = 'gradient'
        nbody_results_gradient = assemble_nbody_components(
            metadata, component_results_gradient)
        wfn.set_gradient(nbody_results_gradient['ret_ptype'])
        nbody_results['gradient_body_dict'] = nbody_results_gradient[
            'ptype_body_dict']

    for r in [component_results, nbody_results]:
        for d in r:
            if d in dicts:
                for var, value in r[d].items():
                    try:
                        wfn.set_scalar_variable(str(var), value)
                        core.set_scalar_variable(str(var), value)
                    except:
                        wfn.set_array_variable(
                            d.split('_')[0].upper() + ' ' + str(var),
                            core.Matrix.from_array(value))

    core.set_variable("CURRENT ENERGY", nbody_results['ret_energy'])
    wfn.set_variable("CURRENT ENERGY", nbody_results['ret_energy'])

    if metadata['return_wfn']:
        return (nbody_results['ret_ptype'], wfn)
    else:
        return nbody_results['ret_ptype']
Example #41
0
def run_gaussian_2(name, **kwargs):

    # throw an exception for open-shells
    if (core.get_option('SCF','REFERENCE') != 'RHF' ):
        raise ValidationError("""g2 computations require "reference rhf".""")

    # stash user options:
    optstash = p4util.OptionsState(
        ['FNOCC','COMPUTE_TRIPLES'],
        ['FNOCC','COMPUTE_MP4_TRIPLES'],
        ['FREEZE_CORE'],
        ['MP2_TYPE'],
        ['SCF','SCF_TYPE'])

    # override default scf_type
    core.set_local_option('SCF','SCF_TYPE','PK')

    # optimize geometry at scf level
    core.clean()
    core.set_global_option('BASIS',"6-31G(D)")
    driver.optimize('scf')
    core.clean()

    # scf frequencies for zpe
    # NOTE This line should not be needed, but without it there's a seg fault
    scf_e, ref = driver.frequency('scf', return_wfn=True)

    # thermodynamic properties
    du = core.get_variable('INTERNAL ENERGY CORRECTION')
    dh = core.get_variable('ENTHALPY CORRECTION')
    dg = core.get_variable('GIBBS FREE ENERGY CORRECTION')

    freqs   = ref.frequencies()
    nfreq   = freqs.dim(0)
    freqsum = 0.0
    for i in range(0, nfreq):
        freqsum += freqs.get(i)
    zpe = freqsum / p4const.psi_hartree2wavenumbers * 0.8929 * 0.5
    core.clean()

    # optimize geometry at mp2 (no frozen core) level
    # note: freeze_core isn't an option in MP2
    core.set_global_option('FREEZE_CORE',"FALSE")
    core.set_global_option('MP2_TYPE', 'CONV')
    driver.optimize('mp2')
    core.clean()

    # qcisd(t)
    core.set_local_option('FNOCC','COMPUTE_MP4_TRIPLES',"TRUE")
    core.set_global_option('FREEZE_CORE',"TRUE")
    core.set_global_option('BASIS',"6-311G(D_P)")
    ref = driver.proc.run_fnocc('qcisd(t)', return_wfn=True, **kwargs)

    # HLC: high-level correction based on number of valence electrons
    nirrep = ref.nirrep()
    frzcpi = ref.frzcpi()
    nfzc = 0
    for i in range (0,nirrep):
        nfzc += frzcpi[i]
    nalpha = ref.nalpha() - nfzc
    nbeta  = ref.nbeta() - nfzc
    # hlc of gaussian-2
    hlc = -0.00481 * nalpha -0.00019 * nbeta
    # hlc of gaussian-1
    hlc1 = -0.00614 * nalpha

    eqci_6311gdp = core.get_variable("QCISD(T) TOTAL ENERGY")
    emp4_6311gd  = core.get_variable("MP4 TOTAL ENERGY")
    emp2_6311gd  = core.get_variable("MP2 TOTAL ENERGY")
    core.clean()

    # correction for diffuse functions
    core.set_global_option('BASIS',"6-311+G(D_P)")
    driver.energy('mp4')
    emp4_6311pg_dp = core.get_variable("MP4 TOTAL ENERGY")
    emp2_6311pg_dp = core.get_variable("MP2 TOTAL ENERGY")
    core.clean()

    # correction for polarization functions
    core.set_global_option('BASIS',"6-311G(2DF_P)")
    driver.energy('mp4')
    emp4_6311g2dfp = core.get_variable("MP4 TOTAL ENERGY")
    emp2_6311g2dfp = core.get_variable("MP2 TOTAL ENERGY")
    core.clean()

    # big basis mp2
    core.set_global_option('BASIS',"6-311+G(3DF_2P)")
    #run_fnocc('_mp2',**kwargs)
    driver.energy('mp2')
    emp2_big = core.get_variable("MP2 TOTAL ENERGY")
    core.clean()
    eqci       = eqci_6311gdp
    e_delta_g2 = emp2_big + emp2_6311gd - emp2_6311g2dfp - emp2_6311pg_dp
    e_plus     = emp4_6311pg_dp - emp4_6311gd
    e_2df      = emp4_6311g2dfp - emp4_6311gd

    eg2 = eqci + e_delta_g2 + e_plus + e_2df
    eg2_mp2_0k = eqci + (emp2_big - emp2_6311gd) + hlc + zpe

    core.print_out('\n')
    core.print_out('  ==>  G1/G2 Energy Components  <==\n')
    core.print_out('\n')
    core.print_out('        QCISD(T):            %20.12lf\n' % eqci)
    core.print_out('        E(Delta):            %20.12lf\n' % e_delta_g2)
    core.print_out('        E(2DF):              %20.12lf\n' % e_2df)
    core.print_out('        E(+):                %20.12lf\n' % e_plus)
    core.print_out('        E(G1 HLC):           %20.12lf\n' % hlc1)
    core.print_out('        E(G2 HLC):           %20.12lf\n' % hlc)
    core.print_out('        E(ZPE):              %20.12lf\n' % zpe)
    core.print_out('\n')
    core.print_out('  ==>  0 Kelvin Results  <==\n')
    core.print_out('\n')
    eg2_0k = eg2 + zpe + hlc
    core.print_out('        G1:                  %20.12lf\n' % (eqci + e_plus + e_2df + hlc1 + zpe))
    core.print_out('        G2(MP2):             %20.12lf\n' % eg2_mp2_0k)
    core.print_out('        G2:                  %20.12lf\n' % eg2_0k)

    core.set_variable("G1 TOTAL ENERGY",eqci + e_plus + e_2df + hlc1 + zpe)
    core.set_variable("G2 TOTAL ENERGY",eg2_0k)
    core.set_variable("G2(MP2) TOTAL ENERGY",eg2_mp2_0k)

    core.print_out('\n')
    T = core.get_global_option('T')
    core.print_out('  ==>  %3.0lf Kelvin Results  <==\n'% T)
    core.print_out('\n')

    internal_energy = eg2_mp2_0k + du - zpe / 0.8929
    enthalpy        = eg2_mp2_0k + dh - zpe / 0.8929
    gibbs           = eg2_mp2_0k + dg - zpe / 0.8929

    core.print_out('        G2(MP2) energy:      %20.12lf\n' % internal_energy )
    core.print_out('        G2(MP2) enthalpy:    %20.12lf\n' % enthalpy)
    core.print_out('        G2(MP2) free energy: %20.12lf\n' % gibbs)
    core.print_out('\n')

    core.set_variable("G2(MP2) INTERNAL ENERGY",internal_energy)
    core.set_variable("G2(MP2) ENTHALPY",enthalpy)
    core.set_variable("G2(MP2) FREE ENERGY",gibbs)

    internal_energy = eg2_0k + du - zpe / 0.8929
    enthalpy        = eg2_0k + dh - zpe / 0.8929
    gibbs           = eg2_0k + dg - zpe / 0.8929

    core.print_out('        G2 energy:           %20.12lf\n' % internal_energy )
    core.print_out('        G2 enthalpy:         %20.12lf\n' % enthalpy)
    core.print_out('        G2 free energy:      %20.12lf\n' % gibbs)

    core.set_variable("CURRENT ENERGY",eg2_0k)

    core.set_variable("G2 INTERNAL ENERGY",internal_energy)
    core.set_variable("G2 ENTHALPY",enthalpy)
    core.set_variable("G2 FREE ENERGY",gibbs)

    core.clean()

    optstash.restore()

    # return 0K g2 results
    return eg2_0k
Example #42
0
def exchange(cache, jk, do_print=True):
    """
    Computes the E10 exchange (S^2 and S^inf) from a build_sapt_jk_cache datacache.
    """

    if do_print:
        core.print_out("\n  ==> E10 Exchange <== \n\n")

    # Build potenitals
    h_A = cache["V_A"].clone()
    h_A.axpy(2.0, cache["J_A"])
    h_A.axpy(-1.0, cache["K_A"])

    h_B = cache["V_B"].clone()
    h_B.axpy(2.0, cache["J_B"])
    h_B.axpy(-1.0, cache["K_B"])

    w_A = cache["V_A"].clone()
    w_A.axpy(2.0, cache["J_A"])

    w_B = cache["V_B"].clone()
    w_B.axpy(2.0, cache["J_B"])

    # Build inverse exchange metric
    nocc_A = cache["Cocc_A"].shape[1]
    nocc_B = cache["Cocc_B"].shape[1]
    SAB = core.triplet(cache["Cocc_A"], cache["S"], cache["Cocc_B"], True,
                       False, False)
    num_occ = nocc_A + nocc_B

    Sab = core.Matrix(num_occ, num_occ)
    Sab.np[:nocc_A, nocc_A:] = SAB.np
    Sab.np[nocc_A:, :nocc_A] = SAB.np.T
    Sab.np[np.diag_indices_from(Sab.np)] += 1
    Sab.power(-1.0, 1.e-14)
    Sab.np[np.diag_indices_from(Sab.np)] -= 1.0

    Tmo_AA = core.Matrix.from_array(Sab.np[:nocc_A, :nocc_A])
    Tmo_BB = core.Matrix.from_array(Sab.np[nocc_A:, nocc_A:])
    Tmo_AB = core.Matrix.from_array(Sab.np[:nocc_A, nocc_A:])

    T_A = np.dot(cache["Cocc_A"], Tmo_AA).dot(cache["Cocc_A"].np.T)
    T_B = np.dot(cache["Cocc_B"], Tmo_BB).dot(cache["Cocc_B"].np.T)
    T_AB = np.dot(cache["Cocc_A"], Tmo_AB).dot(cache["Cocc_B"].np.T)

    S = cache["S"]

    D_A = cache["D_A"]
    P_A = cache["P_A"]

    D_B = cache["D_B"]
    P_B = cache["P_B"]

    # Compute the J and K matrices
    jk.C_clear()

    jk.C_left_add(cache["Cocc_A"])
    jk.C_right_add(core.doublet(cache["Cocc_A"], Tmo_AA, False, False))

    jk.C_left_add(cache["Cocc_B"])
    jk.C_right_add(core.doublet(cache["Cocc_A"], Tmo_AB, False, False))

    jk.C_left_add(cache["Cocc_A"])
    jk.C_right_add(core.Matrix.chain_dot(P_B, S, cache["Cocc_A"]))

    jk.compute()

    JT_A, JT_AB, Jij = jk.J()
    KT_A, KT_AB, Kij = jk.K()

    # Start S^2
    Exch_s2 = 0.0

    tmp = core.Matrix.chain_dot(D_A, S, D_B, S, P_A)
    Exch_s2 -= 2.0 * w_B.vector_dot(tmp)

    tmp = core.Matrix.chain_dot(D_B, S, D_A, S, P_B)
    Exch_s2 -= 2.0 * w_A.vector_dot(tmp)

    tmp = core.Matrix.chain_dot(P_A, S, D_B)
    Exch_s2 -= 2.0 * Kij.vector_dot(tmp)

    if do_print:
        core.print_out(print_sapt_var("Exch10(S^2) ", Exch_s2, short=True))
        core.print_out("\n")

    # Start Sinf
    Exch10 = 0.0
    Exch10 -= 2.0 * np.vdot(cache["D_A"], cache["K_B"])
    Exch10 += 2.0 * np.vdot(T_A, h_B.np)
    Exch10 += 2.0 * np.vdot(T_B, h_A.np)
    Exch10 += 2.0 * np.vdot(T_AB, h_A.np + h_B.np)
    Exch10 += 4.0 * np.vdot(T_B, JT_AB.np - 0.5 * KT_AB.np)
    Exch10 += 4.0 * np.vdot(T_A, JT_AB.np - 0.5 * KT_AB.np)
    Exch10 += 4.0 * np.vdot(T_B, JT_A.np - 0.5 * KT_A.np)
    Exch10 += 4.0 * np.vdot(T_AB, JT_AB.np - 0.5 * KT_AB.np.T)

    if do_print:
        core.set_variable("Exch10", Exch10)
        core.print_out(print_sapt_var("Exch10", Exch10, short=True))
        core.print_out("\n")

    return {"Exch10(S^2)": Exch_s2, "Exch10": Exch10}
Example #43
0
def run_dftd3(mol, func=None, dashlvl=None, dashparam=None, dertype=None, verbose=False):
    """Compute dispersion correction using Grimme's DFTD3 executable.

    Function to call Grimme's dftd3 program to compute the -D correction
    of level `dashlvl` using parameters for the functional `func`.
    `dashparam` can supply a full set of dispersion parameters in the
    absence of `func` or individual overrides in the presence of `func`.

    The DFTD3 executable must be independently compiled and found in
    :envvar:`PATH` or :envvar:`PSIPATH`.

    Parameters
    ----------
    mol : qcdb.Molecule or psi4.core.Molecule or str
	    Molecule on which to run dispersion calculation. Both qcdb and
	    psi4.core Molecule classes have been extended by this method, so
	    either allowed. Alternately, a string that can be instantiated
	    into a qcdb.Molecule.
    func : str or None
	    Density functional (Psi4, not Turbomole, names) for which to
	    load parameters from dashcoeff[dashlvl][func]. This is not
	    passed to DFTD3 and thus may be a dummy or `None`. Any or all
	    parameters initialized can be overwritten via `dashparam`.
    dashlvl : {'d2p4', 'd2gr', 'd3zero', 'd3bj', 'd3mzero', d3mbj', 'd', 'd2', 'd3', 'd3m'}
	    Flavor of a posteriori dispersion correction for which to load
	    parameters and call procedure in DFTD3. Must be a keys in
	    dashcoeff dict (or a key in dashalias that resolves to one).
    dashparam : dict, optional
	    Dictionary of the same keys as dashcoeff[dashlvl] used to
	    override any or all values initialized by
	    dashcoeff[dashlvl][func].
    dertype : {None, 0, 'none', 'energy', 1, 'first', 'gradient'}, optional
	    Maximum derivative level at which to run DFTD3. For large
	    `mol`, energy-only calculations can be significantly more
	    efficient. Also controls return values, see below.
    verbose : bool, optional
        When `True`, additionally include DFTD3 output in output.

    Returns
    -------
    energy : float, optional
        When `dertype` is 0, energy [Eh].
    gradient : list of lists of floats or psi4.core.Matrix, optional
        When `dertype` is 1, (nat, 3) gradient [Eh/a0].
    (energy, gradient) : float and list of lists of floats or psi4.core.Matrix, optional
        When `dertype` is unspecified, both energy [Eh] and (nat, 3) gradient [Eh/a0].

    Notes
    -----
    research site: https://www.chemie.uni-bonn.de/pctc/mulliken-center/software/dft-d3
    Psi4 mode: When `psi4` the python module is importable at `import qcdb`
               time, Psi4 mode is activated, with the following alterations:
               * output goes to output file
               * gradient returned as psi4.core.Matrix, not list o'lists
               * scratch is written to randomly named subdirectory of psi scratch
               * psivar "DISPERSION CORRECTION ENERGY" is set
               * `verbose` triggered when PRINT keywork of SCF module >=3

    """
    # Create (if necessary) and update qcdb.Molecule
    if isinstance(mol, (Molecule, core.Molecule)):
        # 1st: called on a qcdb.Molecule
        # 2nd: called on a python export of a psi4.Molecule (py-side through Psi4's driver)
        pass
    elif isinstance(mol, basestring):
        # called on a string representation of a psi4.Molecule (c-side through psi4.Dispersion)
        mol = Molecule(mol)
    else:
        raise ValidationError("""Argument mol must be psi4string or qcdb.Molecule""")
    mol.update_geometry()

    # Validate arguments
    if dertype is None:
        dertype = -1
    elif der0th.match(str(dertype)):
        dertype = 0
    elif der1st.match(str(dertype)):
        dertype = 1
    elif der2nd.match(str(dertype)):
        raise ValidationError("""Requested derivative level 'dertype' %s not valid for run_dftd3.""" % (dertype))
    else:
        raise ValidationError("""Requested derivative level 'dertype' %s not valid for run_dftd3.""" % (dertype))

    if dashlvl is not None:
        dashlvl = dashlvl.lower()
        dashlvl = get_dispersion_aliases()[dashlvl] if dashlvl in get_dispersion_aliases() else dashlvl
        if dashlvl not in dashcoeff.keys():
            raise ValidationError("""-D correction level %s is not available. Choose among %s.""" % (dashlvl, dashcoeff.keys()))
    else:
        raise ValidationError("""Must specify a dashlvl""")

    if func is not None:
        dftd3_params = dash_server(func, dashlvl)
    else:
        dftd3_params = {}

    if dashparam is not None:
        dftd3_params.update(dashparam)

    # Move ~/.dftd3par.<hostname> out of the way so it won't interfere
    defaultfile = os.path.expanduser('~') + '/.dftd3par.' + socket.gethostname()
    defmoved = False
    if os.path.isfile(defaultfile):
        os.rename(defaultfile, defaultfile + '_hide')
        defmoved = True

    # Find environment by merging PSIPATH and PATH environment variables
    lenv = {
        'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \
                ':' + os.environ.get('PATH'),
        'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
        }
    #   Filter out None values as subprocess will fault on them
    lenv = {k: v for k, v in lenv.items() if v is not None}

    # Find out if running from Psi4 for scratch details and such
    # try:
    #     import psi4
    # except ImportError as err:
    #     isP4regime = False
    # else:
    #     isP4regime = True

    # Setup unique scratch directory and move in
    current_directory = os.getcwd()
    if isP4regime:
        psioh = core.IOManager.shared_object()
        psio = core.IO.shared_object()
        os.chdir(psioh.get_default_path())
        dftd3_tmpdir = 'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
            '.dftd3.' + str(uuid.uuid4())[:8]
    else:
        dftd3_tmpdir = os.environ['HOME'] + os.sep + 'dftd3_' + str(uuid.uuid4())[:8]
    if os.path.exists(dftd3_tmpdir) is False:
        os.mkdir(dftd3_tmpdir)
    os.chdir(dftd3_tmpdir)

    # Write dftd3_parameters file that governs dispersion calc
    paramcontents = dftd3_coeff_formatter(dashlvl, dftd3_params)
    paramfile1 = 'dftd3_parameters'  # older patched name
    with open(paramfile1, 'w') as handle:
        handle.write(paramcontents)
    paramfile2 = '.dftd3par.local'  # new mainline name
    with open(paramfile2, 'w') as handle:
        handle.write(paramcontents)

    # Write dftd3_geometry file that supplies geometry to dispersion calc
    numAtoms = mol.natom()

    # We seem to have a problem with one atom, force the correct result
    if numAtoms == 1:
        os.chdir(current_directory)
        dashd = 0.0
        dashdderiv = core.Matrix(1, 3)

        if dertype == -1:
            return dashd, dashdderiv
        elif dertype == 0:
            return dashd
        elif dertype == 1:
            return dashdderiv


    geom = mol.save_string_xyz()
    reals = []
    for line in geom.splitlines():
        lline = line.split()
        if len(lline) != 4:
            continue
        if lline[0] == 'Gh':
            numAtoms -= 1
        else:
            reals.append(line)

    geomtext = str(numAtoms) + '\n\n'
    for line in reals:
        geomtext += line.strip() + '\n'
    geomfile = './dftd3_geometry.xyz'
    with open(geomfile, 'w') as handle:
        handle.write(geomtext)
    # TODO somehow the variations on save_string_xyz and
    #   whether natom and chgmult does or doesn't get written
    #   have gotten all tangled. I fear this doesn't work
    #   the same btwn libmints and qcdb or for ghosts

    # Call dftd3 program
    command = ['dftd3', geomfile]
    if dertype != 0:
        command.append('-grad')
    try:
        dashout = subprocess.Popen(command, stdout=subprocess.PIPE, env=lenv)
    except OSError as e:
        raise ValidationError('Program dftd3 not found in path. %s' % e)
    out, err = dashout.communicate()

    # Parse output (could go further and break into E6, E8, E10 and Cn coeff)
    success = False
    for line in out.splitlines():
        line = line.decode('utf-8')
        if re.match(' Edisp /kcal,au', line):
            sline = line.split()
            dashd = float(sline[3])
        if re.match(' normal termination of dftd3', line):
            success = True

    if not success:
        os.chdir(current_directory)
        raise Dftd3Error("""Unsuccessful run. Possibly -D variant not available in dftd3 version.""")

    # Parse grad output
    if dertype != 0:
        derivfile = './dftd3_gradient'
        dfile = open(derivfile, 'r')
        dashdderiv = []
        for line in geom.splitlines():
            lline = line.split()
            if len(lline) != 4:
                continue
            if lline[0] == 'Gh':
                dashdderiv.append([0.0, 0.0, 0.0])
            else:
                dashdderiv.append([float(x.replace('D', 'E')) for x in dfile.readline().split()])
        dfile.close()

        if len(dashdderiv) != mol.natom():
            raise ValidationError('Program dftd3 gradient file has %d atoms- %d expected.' % \
                (len(dashdderiv), mol.natom()))

    # Prepare results for Psi4
    if isP4regime and dertype != 0:
        core.set_variable('DISPERSION CORRECTION ENERGY', dashd)
        psi_dashdderiv = core.Matrix.from_list(dashdderiv)

    # Print program output to file if verbose
    if not verbose and isP4regime:
        verbose = True if core.get_option('SCF', 'PRINT') >= 3 else False
    if verbose:

        text = '\n  ==> DFTD3 Output <==\n'
        text += out.decode('utf-8')
        if dertype != 0:
            with open(derivfile, 'r') as handle:
                text += handle.read().replace('D', 'E')
            text += '\n'
        if isP4regime:
            core.print_out(text)
        else:
            print(text)

    # Clean up files and remove scratch directory
    os.unlink(paramfile1)
    os.unlink(paramfile2)
    os.unlink(geomfile)
    if dertype != 0:
        os.unlink(derivfile)
    if defmoved is True:
        os.rename(defaultfile + '_hide', defaultfile)

    os.chdir('..')
    try:
        shutil.rmtree(dftd3_tmpdir)
    except OSError as e:
        ValidationError('Unable to remove dftd3 temporary directory %s' % e)
    os.chdir(current_directory)

    # return -D & d(-D)/dx
    if dertype == -1:
        return dashd, dashdderiv
    elif dertype == 0:
        return dashd
    elif dertype == 1:
        return psi_dashdderiv
Example #44
0
def run_dftd3(self,
              func=None,
              dashlvl=None,
              dashparam=None,
              dertype=None,
              verbose=False):
    """Function to call Grimme's dftd3 program (http://toc.uni-muenster.de/DFTD3/)
    to compute the -D correction of level *dashlvl* using parameters for
    the functional *func*. The dictionary *dashparam* can be used to supply
    a full set of dispersion parameters in the absense of *func* or to supply
    individual overrides in the presence of *func*. Returns energy if *dertype* is 0,
    gradient if *dertype* is 1, else tuple of energy and gradient if *dertype*
    unspecified. The dftd3 executable must be independently compiled and found in
    :envvar:`PATH` or :envvar:`PSIPATH`.
    *self* may be either a qcdb.Molecule (sensibly) or a psi4.Molecule
    (works b/c psi4.Molecule has been extended by this method py-side and
    only public interface fns used) or a string that can be instantiated
    into a qcdb.Molecule.

    func - functional alias or None
    dashlvl - functional type d2gr/d3zero/d3bj/d3mzero/d3mbj
    dashparam - dictionary
    dertype = derivative level

    """
    # Create (if necessary) and update qcdb.Molecule
    if isinstance(self, Molecule):
        # called on a qcdb.Molecule
        pass
    elif isinstance(self, core.Molecule):
        # called on a python export of a psi4.Molecule (py-side through Psi4's driver)
        self.create_psi4_string_from_molecule()
    elif isinstance(self, basestring):
        # called on a string representation of a psi4.Molecule (c-side through psi4.Dispersion)
        self = Molecule(self)
    else:
        raise ValidationError(
            """Argument mol must be psi4string or qcdb.Molecule""")
    self.update_geometry()

    # Validate arguments
    if dertype is None:
        dertype = -1
    elif der0th.match(str(dertype)):
        dertype = 0
    elif der1st.match(str(dertype)):
        dertype = 1
    elif der2nd.match(str(dertype)):
        raise ValidationError(
            'Requested derivative level \'dertype\' %s not valid for run_dftd3.'
            % (dertype))
    else:
        raise ValidationError(
            'Requested derivative level \'dertype\' %s not valid for run_dftd3.'
            % (dertype))

    if dashlvl is not None:
        dashlvl = dashlvl.lower()
        dashlvl = dash_alias['-' + dashlvl][1:] if (
            '-' + dashlvl) in dash_alias.keys() else dashlvl
        if dashlvl not in dashcoeff.keys():
            raise ValidationError(
                """-D correction level %s is not available. Choose among %s."""
                % (dashlvl, dashcoeff.keys()))
    else:
        raise ValidationError("""Must specify a dashlvl""")

    if func is not None:
        dftd3_params = dash_server(func, dashlvl)
    else:
        dftd3_params = {}

    if dashparam is not None:
        dftd3_params.update(dashparam)

    # Move ~/.dftd3par.<hostname> out of the way so it won't interfere
    defaultfile = os.path.expanduser(
        '~') + '/.dftd3par.' + socket.gethostname()
    defmoved = False
    if os.path.isfile(defaultfile):
        os.rename(defaultfile, defaultfile + '_hide')
        defmoved = True

    # Find environment by merging PSIPATH and PATH environment variables
    lenv = {
        'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \
                ':' + os.environ.get('PATH'),
        'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
        }
    #   Filter out None values as subprocess will fault on them
    lenv = {k: v for k, v in lenv.items() if v is not None}

    # Find out if running from Psi4 for scratch details and such
    # try:
    #     import psi4
    # except ImportError as err:
    #     isP4regime = False
    # else:
    #     isP4regime = True

    # Setup unique scratch directory and move in
    current_directory = os.getcwd()
    if isP4regime:
        psioh = core.IOManager.shared_object()
        psio = core.IO.shared_object()
        os.chdir(psioh.get_default_path())
        dftd3_tmpdir = 'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
            '.dftd3.' + str(random.randint(0, 99999))
    else:
        dftd3_tmpdir = os.environ['HOME'] + os.sep + 'dftd3_' + str(
            random.randint(0, 99999))
    if os.path.exists(dftd3_tmpdir) is False:
        os.mkdir(dftd3_tmpdir)
    os.chdir(dftd3_tmpdir)

    # Write dftd3_parameters file that governs dispersion calc
    paramcontents = dftd3_coeff_formatter(dashlvl, dftd3_params)
    paramfile1 = 'dftd3_parameters'  # older patched name
    with open(paramfile1, 'w') as handle:
        handle.write(paramcontents)
    paramfile2 = '.dftd3par.local'  # new mainline name
    with open(paramfile2, 'w') as handle:
        handle.write(paramcontents)

    # Write dftd3_geometry file that supplies geometry to dispersion calc
    numAtoms = self.natom()

    # We seem to have a problem with one atom, force the correct result
    if numAtoms == 1:
        dashd = 0.0
        dashdderiv = core.Matrix(1, 3)

        if dertype == -1:
            return dashd, dashdderiv
        elif dertype == 0:
            return dashd
        elif dertype == 1:
            return dashdderiv

    geom = self.save_string_xyz()
    reals = []
    for line in geom.splitlines():
        lline = line.split()
        if len(lline) != 4:
            continue
        if lline[0] == 'Gh':
            numAtoms -= 1
        else:
            reals.append(line)

    geomtext = str(numAtoms) + '\n\n'
    for line in reals:
        geomtext += line.strip() + '\n'
    geomfile = './dftd3_geometry.xyz'
    with open(geomfile, 'w') as handle:
        handle.write(geomtext)
    # TODO somehow the variations on save_string_xyz and
    #   whether natom and chgmult does or doesn't get written
    #   have gotten all tangled. I fear this doesn't work
    #   the same btwn libmints and qcdb or for ghosts

    # Call dftd3 program
    command = ['dftd3', geomfile]
    if dertype != 0:
        command.append('-grad')
    try:
        dashout = subprocess.Popen(command, stdout=subprocess.PIPE, env=lenv)
    except OSError as e:
        raise ValidationError('Program dftd3 not found in path. %s' % e)
    out, err = dashout.communicate()

    # Parse output (could go further and break into E6, E8, E10 and Cn coeff)
    success = False
    for line in out.splitlines():
        line = line.decode('utf-8')
        if re.match(' Edisp /kcal,au', line):
            sline = line.split()
            dashd = float(sline[3])
        if re.match(' normal termination of dftd3', line):
            success = True

    if not success:
        os.chdir(current_directory)
        raise Dftd3Error(
            """Unsuccessful run. Possibly -D variant not available in dftd3 version."""
        )

    # Parse grad output
    if dertype != 0:
        derivfile = './dftd3_gradient'
        dfile = open(derivfile, 'r')
        dashdderiv = []
        for line in geom.splitlines():
            lline = line.split()
            if len(lline) != 4:
                continue
            if lline[0] == 'Gh':
                dashdderiv.append([0.0, 0.0, 0.0])
            else:
                dashdderiv.append([
                    float(x.replace('D', 'E'))
                    for x in dfile.readline().split()
                ])
        dfile.close()

        if len(dashdderiv) != self.natom():
            raise ValidationError('Program dftd3 gradient file has %d atoms- %d expected.' % \
                (len(dashdderiv), self.natom()))

    # Prepare results for Psi4
    if isP4regime and dertype != 0:
        core.set_variable('DISPERSION CORRECTION ENERGY', dashd)
        psi_dashdderiv = core.Matrix(self.natom(), 3)
        psi_dashdderiv.set(dashdderiv)

    # Print program output to file if verbose
    if isP4regime:
        verbose = True if core.get_option('SCF', 'PRINT') >= 3 else False
    if verbose:

        text = '\n  ==> DFTD3 Output <==\n'
        text += out
        if dertype != 0:
            with open(derivfile, 'r') as handle:
                text += handle.read().replace('D', 'E')
            text += '\n'
        if isP4regime:
            core.print_out(text)
        else:
            print(text)

    # Clean up files and remove scratch directory
    os.unlink(paramfile1)
    os.unlink(paramfile2)
    os.unlink(geomfile)
    if dertype != 0:
        os.unlink(derivfile)
    if defmoved is True:
        os.rename(defaultfile + '_hide', defaultfile)

    os.chdir('..')
    try:
        shutil.rmtree(dftd3_tmpdir)
    except OSError as e:
        ValidationError('Unable to remove dftd3 temporary directory %s' % e)
    os.chdir(current_directory)

    # return -D & d(-D)/dx
    if dertype == -1:
        return dashd, dashdderiv
    elif dertype == 0:
        return dashd
    elif dertype == 1:
        return psi_dashdderiv
Example #45
0
def mcscf_solver(ref_wfn):

    # Build CIWavefunction
    core.prepare_options_for_module("DETCI")
    ciwfn = core.CIWavefunction(ref_wfn)

    # Hush a lot of CI output
    ciwfn.set_print(0)

    # Begin with a normal two-step
    step_type = 'Initial CI'
    total_step = core.Matrix("Total step", ciwfn.get_dimension('OA'), ciwfn.get_dimension('AV'))
    start_orbs = ciwfn.get_orbitals("ROT").clone()
    ciwfn.set_orbitals("ROT", start_orbs)

    # Grab da options
    mcscf_orb_grad_conv = core.get_option("DETCI", "MCSCF_R_CONVERGENCE")
    mcscf_e_conv = core.get_option("DETCI", "MCSCF_E_CONVERGENCE")
    mcscf_max_macroiteration = core.get_option("DETCI", "MCSCF_MAXITER")
    mcscf_type = core.get_option("DETCI", "MCSCF_TYPE")
    mcscf_d_file = core.get_option("DETCI", "CI_FILE_START") + 3
    mcscf_nroots = core.get_option("DETCI", "NUM_ROOTS")
    mcscf_wavefunction_type = core.get_option("DETCI", "WFN")
    mcscf_ndet = ciwfn.ndet()
    mcscf_nuclear_energy = ciwfn.molecule().nuclear_repulsion_energy()
    mcscf_steplimit = core.get_option("DETCI", "MCSCF_MAX_ROT")
    mcscf_rotate = core.get_option("DETCI", "MCSCF_ROTATE")

    # DIIS info
    mcscf_diis_start = core.get_option("DETCI", "MCSCF_DIIS_START")
    mcscf_diis_freq = core.get_option("DETCI", "MCSCF_DIIS_FREQ")
    mcscf_diis_error_type = core.get_option("DETCI", "MCSCF_DIIS_ERROR_TYPE")
    mcscf_diis_max_vecs = core.get_option("DETCI", "MCSCF_DIIS_MAX_VECS")

    # One-step info
    mcscf_target_conv_type = core.get_option("DETCI", "MCSCF_ALGORITHM")
    mcscf_so_start_grad = core.get_option("DETCI", "MCSCF_SO_START_GRAD")
    mcscf_so_start_e = core.get_option("DETCI", "MCSCF_SO_START_E")
    mcscf_current_step_type = 'Initial CI'

    # Start with SCF energy and other params
    scf_energy = core.get_variable("HF TOTAL ENERGY")
    eold = scf_energy
    norb_iter = 1
    converged = False
    ah_step = False
    qc_step = False
    approx_integrals_only = True

    # Fake info to start with the inital diagonalization
    ediff = 1.e-4
    orb_grad_rms = 1.e-3

    # Grab needed objects
    diis_obj = solvers.DIIS(mcscf_diis_max_vecs)
    mcscf_obj = ciwfn.mcscf_object()

    # Execute the rotate command
    for rot in mcscf_rotate:
        if len(rot) != 4:
            raise p4util.PsiException("Each element of the MCSCF rotate command requires 4 arguements (irrep, orb1, orb2, theta).")

        irrep, orb1, orb2, theta = rot
        if irrep > ciwfn.Ca().nirrep():
            raise p4util.PsiException("MCSCF_ROTATE: Expression %s irrep number is larger than the number of irreps" %
                                    (str(rot)))

        if max(orb1, orb2) > ciwfn.Ca().coldim()[irrep]:
            raise p4util.PsiException("MCSCF_ROTATE: Expression %s orbital number exceeds number of orbitals in irrep" %
                                    (str(rot)))

        theta = np.deg2rad(theta)

        x = ciwfn.Ca().nph[irrep][:, orb1].copy()
        y = ciwfn.Ca().nph[irrep][:, orb2].copy()

        xp = np.cos(theta) * x - np.sin(theta) * y
        yp = np.sin(theta) * x + np.cos(theta) * y

        ciwfn.Ca().nph[irrep][:, orb1] = xp
        ciwfn.Ca().nph[irrep][:, orb2] = yp


    # Limited RAS functionality
    if core.get_local_option("DETCI", "WFN") == "RASSCF" and mcscf_target_conv_type != "TS":
        core.print_out("\n  Warning! Only the TS algorithm for RASSCF wavefunction is currently supported.\n")
        core.print_out("             Switching to the TS algorithm.\n\n")
        mcscf_target_conv_type = "TS"

    # Print out headers
    if mcscf_type == "CONV":
        mtype = "   @MCSCF"
        core.print_out("\n   ==> Starting MCSCF iterations <==\n\n")
        core.print_out("        Iter         Total Energy       Delta E   Orb RMS    CI RMS  NCI NORB\n")
    elif mcscf_type == "DF":
        mtype = "   @DF-MCSCF"
        core.print_out("\n   ==> Starting DF-MCSCF iterations <==\n\n")
        core.print_out("           Iter         Total Energy       Delta E   Orb RMS    CI RMS  NCI NORB\n")
    else:
        mtype = "   @AO-MCSCF"
        core.print_out("\n   ==> Starting AO-MCSCF iterations <==\n\n")
        core.print_out("           Iter         Total Energy       Delta E   Orb RMS    CI RMS  NCI NORB\n")

    # Iterate !
    for mcscf_iter in range(1, mcscf_max_macroiteration + 1):

        # Transform integrals, diagonalize H
        ciwfn.transform_mcscf_integrals(approx_integrals_only)
        nci_iter = ciwfn.diag_h(abs(ediff) * 1.e-2, orb_grad_rms * 1.e-3)

        # After the first diag we need to switch to READ
        ciwfn.set_ci_guess("DFILE")

        ciwfn.form_opdm()
        ciwfn.form_tpdm()
        ci_grad_rms = core.get_variable("DETCI AVG DVEC NORM")

        # Update MCSCF object
        Cocc = ciwfn.get_orbitals("DOCC")
        Cact = ciwfn.get_orbitals("ACT")
        Cvir = ciwfn.get_orbitals("VIR")
        opdm = ciwfn.get_opdm(-1, -1, "SUM", False)
        tpdm = ciwfn.get_tpdm("SUM", True)
        mcscf_obj.update(Cocc, Cact, Cvir, opdm, tpdm)

        current_energy = core.get_variable("MCSCF TOTAL ENERGY")

        orb_grad_rms = mcscf_obj.gradient_rms()
        ediff = current_energy - eold

        # Print iterations
        print_iteration(mtype, mcscf_iter, current_energy, ediff, orb_grad_rms, ci_grad_rms,
                        nci_iter, norb_iter, mcscf_current_step_type)
        eold = current_energy

        if mcscf_current_step_type == 'Initial CI':
            mcscf_current_step_type = 'TS'

        # Check convergence
        if (orb_grad_rms < mcscf_orb_grad_conv) and (abs(ediff) < abs(mcscf_e_conv)) and\
            (mcscf_iter > 3) and not qc_step:

            core.print_out("\n       %s has converged!\n\n" % mtype);
            converged = True
            break


        # Which orbital convergence are we doing?
        if ah_step:
            converged, norb_iter, step = ah_iteration(mcscf_obj, print_micro=False)
            norb_iter += 1

            if converged:
                mcscf_current_step_type = 'AH'
            else:
                core.print_out("      !Warning. Augmented Hessian did not converge. Taking an approx step.\n")
                step = mcscf_obj.approx_solve()
                mcscf_current_step_type = 'TS, AH failure'

        else:
            step = mcscf_obj.approx_solve()
            step_type = 'TS'

        maxstep = step.absmax()
        if maxstep > mcscf_steplimit:
            core.print_out('      Warning! Maxstep = %4.2f, scaling to %4.2f\n' % (maxstep, mcscf_steplimit))
            step.scale(mcscf_steplimit / maxstep)

        xstep = total_step.clone()
        total_step.add(step)

        # Do or add DIIS
        if (mcscf_iter >= mcscf_diis_start) and ("TS" in mcscf_current_step_type):

            # Figure out DIIS error vector
            if mcscf_diis_error_type == "GRAD":
                error = core.Matrix.triplet(ciwfn.get_orbitals("OA"),
                                            mcscf_obj.gradient(),
                                            ciwfn.get_orbitals("AV"),
                                            False, False, True)
            else:
                error = step

            diis_obj.add(total_step, error)

            if not (mcscf_iter % mcscf_diis_freq):
                total_step = diis_obj.extrapolate()
                mcscf_current_step_type = 'TS, DIIS'

        # Build the rotation by continuous updates
        if mcscf_iter == 1:
            totalU = mcscf_obj.form_rotation_matrix(total_step)
        else:
            xstep.axpy(-1.0, total_step)
            xstep.scale(-1.0)
            Ustep = mcscf_obj.form_rotation_matrix(xstep)
            totalU = core.Matrix.doublet(totalU, Ustep, False, False)

        # Build the rotation directly (not recommended)
        # orbs_mat = mcscf_obj.Ck(start_orbs, total_step)

        # Finally rotate and set orbitals
        orbs_mat = core.Matrix.doublet(start_orbs, totalU, False, False)
        ciwfn.set_orbitals("ROT", orbs_mat)

        # Figure out what the next step should be
        if (orb_grad_rms < mcscf_so_start_grad) and (abs(ediff) < abs(mcscf_so_start_e)) and\
                (mcscf_iter >= 2):

            if mcscf_target_conv_type == 'AH':
                approx_integrals_only = False
                ah_step = True
            elif mcscf_target_conv_type == 'OS':
                approx_integrals_only = False
                mcscf_current_step_type = 'OS, Prep'
                break
            else:
                continue
        #raise p4util.PsiException("")

    # If we converged do not do onestep
    if converged or (mcscf_target_conv_type != 'OS'):
        one_step_iters = []

    # If we are not converged load in Dvec and build iters array
    else:
        one_step_iters = range(mcscf_iter + 1, mcscf_max_macroiteration + 1)
        dvec = ciwfn.D_vector()
        dvec.init_io_files(True)
        dvec.read(0, 0)
        dvec.symnormalize(1.0, 0)

        ci_grad = ciwfn.new_civector(1, mcscf_d_file + 1, True, True)
        ci_grad.set_nvec(1)
        ci_grad.init_io_files(True)

    # Loop for onestep
    for mcscf_iter in one_step_iters:

        # Transform integrals and update the MCSCF object
        ciwfn.transform_mcscf_integrals(ciwfn.H(), False)
        ciwfn.form_opdm()
        ciwfn.form_tpdm()

        # Update MCSCF object
        Cocc = ciwfn.get_orbitals("DOCC")
        Cact = ciwfn.get_orbitals("ACT")
        Cvir = ciwfn.get_orbitals("VIR")
        opdm = ciwfn.get_opdm(-1, -1, "SUM", False)
        tpdm = ciwfn.get_tpdm("SUM", True)
        mcscf_obj.update(Cocc, Cact, Cvir, opdm, tpdm)

        orb_grad_rms = mcscf_obj.gradient_rms()

        # Warning! Does not work for SA-MCSCF
        current_energy = mcscf_obj.current_total_energy()
        current_energy += mcscf_nuclear_energy

        core.set_variable("CI ROOT %d TOTAL ENERGY" % 1, current_energy)
        core.set_variable("CURRENT ENERGY", current_energy)

        docc_energy = mcscf_obj.current_docc_energy()
        ci_energy = mcscf_obj.current_ci_energy()

        # Compute CI gradient
        ciwfn.sigma(dvec, ci_grad, 0, 0)
        ci_grad.scale(2.0, 0)
        ci_grad.axpy(-2.0 * ci_energy, dvec, 0, 0)

        ci_grad_rms = ci_grad.norm(0)
        orb_grad_rms = mcscf_obj.gradient().rms()

        ediff = current_energy - eold

        print_iteration(mtype, mcscf_iter, current_energy, ediff, orb_grad_rms, ci_grad_rms,
                        nci_iter, norb_iter, mcscf_current_step_type)
        mcscf_current_step_type = 'OS'

        eold = current_energy

        if (orb_grad_rms < mcscf_orb_grad_conv) and (abs(ediff) < abs(mcscf_e_conv)):

            core.print_out("\n       %s has converged!\n\n" % mtype);
            converged = True
            break

        # Take a step
        converged, norb_iter, nci_iter, step = qc_iteration(dvec, ci_grad, ciwfn, mcscf_obj)

        # Rotate integrals to new frame
        total_step.add(step)
        orbs_mat = mcscf_obj.Ck(ciwfn.get_orbitals("ROT"), step)
        ciwfn.set_orbitals("ROT", orbs_mat)


    core.print_out(mtype + " Final Energy: %20.15f\n" % current_energy)

    # Die if we did not converge
    if (not converged):
        if core.get_global_option("DIE_IF_NOT_CONVERGED"):
            raise p4util.PsiException("MCSCF: Iterations did not converge!")
        else:
            core.print_out("\nWarning! MCSCF iterations did not converge!\n\n")

    # Print out CI vector information
    if mcscf_target_conv_type == 'OS':
        dvec.close_io_files()
        ci_grad.close_io_files()

    # For orbital invariant methods we transform the orbitals to the natural or
    # semicanonical basis. Frozen doubly occupied and virtual orbitals are not
    # modified.
    if core.get_option("DETCI", "WFN") == "CASSCF":
        # Do we diagonalize the opdm?
        if core.get_option("DETCI", "NAT_ORBS"):
            ciwfn.ci_nat_orbs()
        else:
            ciwfn.semicanonical_orbs()

        # Retransform intragrals and update CI coeffs., OPDM, and TPDM
        ciwfn.transform_mcscf_integrals(approx_integrals_only)
        nci_iter = ciwfn.diag_h(abs(ediff) * 1.e-2, orb_grad_rms * 1.e-3)

        ciwfn.set_ci_guess("DFILE")

        ciwfn.form_opdm()
        ciwfn.form_tpdm()

    proc_util.print_ci_results(ciwfn, "MCSCF", scf_energy, current_energy, print_opdm_no=True)

    # Set final energy
    core.set_variable("CURRENT ENERGY", core.get_variable("MCSCF TOTAL ENERGY"))

    # What do we need to cleanup?
    if core.get_option("DETCI", "MCSCF_CI_CLEANUP"):
        ciwfn.cleanup_ci()
    if core.get_option("DETCI", "MCSCF_DPD_CLEANUP"):
        ciwfn.cleanup_dpd()

    del diis_obj
    del mcscf_obj
    return ciwfn
Example #46
0
def print_sapt_dft_summary(data, name, short=False):

    ret = "   %s Results\n" % name
    ret += "  " + "-" * 97 + "\n"

    # Elst
    ret += print_sapt_var("Electrostatics", data["Elst10,r"]) + "\n"
    ret += print_sapt_var("  Elst1,r", data["Elst10,r"]) + "\n"
    ret += "\n"
    core.set_variable("SAPT ELST ENERGY", data["Elst10,r"])

    # Exchange
    ret += print_sapt_var("Exchange", data["Exch10"]) + "\n"
    ret += print_sapt_var("  Exch1", data["Exch10"]) + "\n"
    ret += print_sapt_var("  Exch1(S^2)", data["Exch10(S^2)"]) + "\n"
    ret += "\n"
    core.set_variable("SAPT EXCH ENERGY", data["Exch10"])

    # Induction
    ind = data["Ind20,r"] + data["Exch-Ind20,r"]
    ind_ab = data["Ind20,r (A<-B)"] + data["Exch-Ind20,r (A<-B)"]
    ind_ba = data["Ind20,r (A->B)"] + data["Exch-Ind20,r (A->B)"]

    if "Delta HF Correction" in list(data):
        ind += data["Delta HF Correction"]

    ret += print_sapt_var("Induction", ind) + "\n"

    ret += print_sapt_var("  Ind2,r", data["Ind20,r"]) + "\n"
    ret += print_sapt_var("  Exch-Ind2,r", data["Exch-Ind20,r"]) + "\n"
    ret += print_sapt_var("  Induction (A<-B)", ind_ab) + "\n"
    ret += print_sapt_var("  Induction (A->B)", ind_ba) + "\n"

    if "Delta HF Correction" in list(data):
        ret += print_sapt_var("  delta HF,r (2)",
                              data["Delta HF Correction"]) + "\n"

    ret += "\n"
    core.set_variable("SAPT IND ENERGY", ind)

    # Dispersion
    disp = data["Disp20"] + data["Exch-Disp20,u"]
    ret += print_sapt_var("Dispersion", disp) + "\n"
    ret += print_sapt_var("  Disp2,r", data["Disp20"]) + "\n"
    ret += print_sapt_var("  Disp2,u", data["Disp20,u"]) + "\n"
    ret += print_sapt_var("  Exch-Disp2,u", data["Exch-Disp20,u"]) + "\n"
    ret += "\n"
    core.set_variable("SAPT DISP ENERGY", disp)

    # Total energy
    total = data["Elst10,r"] + data["Exch10"] + ind + disp
    ret += print_sapt_var("Total %-15s" % name, total,
                          start_spacer="   ") + "\n"
    core.set_variable("SAPT(DFT) TOTAL ENERGY", total)
    core.set_variable("SAPT TOTAL ENERGY", total)
    core.set_variable("CURRENT ENERGY", total)

    ret += "  " + "-" * 97 + "\n"
    return ret