def test_wavefunction(settings, atomic_input):
    atomic_input_dict = atomic_input.dict()
    atomic_input_dict.pop("protocols", None)
    with_wf = AtomicInput(**atomic_input_dict,
                          protocols={"wavefunction": "all"})
    with TCPBClient(settings["tcpb_host"], settings["tcpb_port"]) as TC:
        result = TC.compute(with_wf)

    # Restricted
    assert result.wavefunction is not None
    assert isinstance(result.wavefunction.scf_eigenvalues_a, ndarray)
    assert isinstance(result.wavefunction.scf_occupations_a, ndarray)
    assert result.wavefunction.scf_eigenvalues_b is None
    assert result.wavefunction.scf_occupations_b is None

    atomic_input_dict["keywords"]["restricted"] = False
    with_wf = AtomicInput(**atomic_input_dict,
                          protocols={"wavefunction": "all"})
    with TCPBClient(settings["tcpb_host"], settings["tcpb_port"]) as TC:
        result = TC.compute(with_wf)

    # B occupations since restricted=False
    assert result.wavefunction is not None
    assert isinstance(result.wavefunction.scf_eigenvalues_a, ndarray)
    assert isinstance(result.wavefunction.scf_occupations_a, ndarray)
    assert isinstance(result.wavefunction.scf_eigenvalues_b, ndarray)
    assert isinstance(result.wavefunction.scf_occupations_b, ndarray)
Exemple #2
0
    def compute(self, input_data: AtomicInput,
                config: "TaskConfig") -> AtomicResult:
        # Get the error correction configuration
        error_policy = input_data.protocols.error_correction

        # Create a local copy of the input data
        local_input_data = input_data

        # Run the method and, if it fails, assess if the failure is restartable
        observed_errors = {}  # Errors that have been observed previously
        while True:
            try:
                result = self._compute(local_input_data, config)
                break
            except KnownErrorException as e:
                logger.info(f"Caught a {type(e)} error.")

                # Determine whether this specific type of error is allowed
                correction_allowed = error_policy.allows(e.error_name)
                if not correction_allowed:
                    logger.info(
                        f'Error correction for "{e.error_name}" is not allowed'
                    )
                    raise e
                logger.info(
                    f'Error correction for "{e.error_name}" is allowed')

                # Check if it has run before
                # TODO (wardlt): Should we allow errors to be run >1 time?
                previously_run = e.error_name in observed_errors
                if previously_run:
                    logger.info(
                        "Error has been observed before and mitigation did not fix the issue. Raising exception"
                    )
                    raise e

                # Generate and apply the updated keywords
                keyword_updates = e.create_keyword_update(local_input_data)
                new_keywords = local_input_data.keywords.copy()
                new_keywords.update(keyword_updates)
                local_input_data = AtomicInput(**local_input_data.dict(
                    exclude={"keywords"}),
                                               keywords=new_keywords)

                # Store the error details and mitigations employed
                observed_errors[e.error_name] = {
                    "details": e.details,
                    "keyword_updates": keyword_updates
                }

        # Add the errors observed and corrected for, if any
        if len(observed_errors) > 0:
            result.extras["observed_errors"] = observed_errors
        return result
Exemple #3
0
def test_openmm_gaff_keywords(gaff_settings):
    """
    Test the different running settings with gaff.
    """
    program = "openmm"
    water = qcng.get_molecule("water")

    water_dict = water.dict()
    # add water cmiles to the molecule
    water_dict["extras"] = {
        "cmiles": {
            "canonical_isomeric_explicit_hydrogen_mapped_smiles":
            "[H:2][O:1][H:3]"
        }
    }

    molecule = Molecule.from_data(water_dict)
    keywords, error, expected_result = gaff_settings
    model = {"method": "gaff-2.1", "basis": "antechamber"}
    inp = AtomicInput(molecule=molecule,
                      driver="energy",
                      model=model,
                      keywords=keywords)
    if error is not None:
        with pytest.raises(error):
            _ = qcng.compute(inp, program, raise_error=True)
    else:
        ret = qcng.compute(inp, program, raise_error=False)
        assert ret.success is True
        assert ret.return_result == pytest.approx(expected_result, rel=1e-6)
Exemple #4
0
def run_psi4(name: str, molecule: "Molecule", options: "Keywords",
             **kwargs) -> Dict:

    local_options = kwargs.get("local_options", None)
    mode_options = get_mode_config(mode_options=kwargs.get("mode_options"))

    resi = AtomicInput(
        **{
            "driver": inspect.stack()[1][3],
            "extras": {
                "qcdb:options": copy.deepcopy(options),
                "qcdb:mode_config": mode_options,
            },
            "model": {
                "method": name,
                "basis": "(auto)",
            },
            "molecule": molecule.to_schema(dtype=2),
            "provenance": provenance_stamp(__name__),
        })

    jobrec = qcng.compute(resi,
                          "qcdb-psi4",
                          local_options=local_options,
                          raise_error=True).dict()
    hold_qcvars = jobrec["extras"].pop("qcdb:qcvars")
    jobrec["qcvars"] = {
        key: qcel.Datum(**dval)
        for key, dval in hold_qcvars.items()
    }

    return jobrec
Exemple #5
0
def test_compute_gradient(program, model, keywords):
    if not has_program(program):
        pytest.skip("Program '{}' not found.".format(program))

    molecule = _get_molecule(program)

    inp = AtomicInput(molecule=molecule,
                      driver="gradient",
                      model=model,
                      extras={"mytag": "something"},
                      keywords=keywords)
    if program in ["adcc", "mrchem"]:
        with pytest.raises(qcng.exceptions.InputError) as e:
            qcng.compute(inp, program, raise_error=True)

        assert "Driver gradient not implemented" in str(e.value)

    else:
        ret = qcng.compute(inp, program, raise_error=True)

        assert ret.success is True
        assert isinstance(ret.return_result, np.ndarray)
        assert len(ret.return_result.shape) == 2
        assert ret.return_result.shape[1] == 3
        assert "mytag" in ret.extras, ret.extras
def run_single_point(xyz: str,
                     driver: DriverEnum,
                     qc_config: QCInputSpecification,
                     charge: int = 0,
                     compute_config: Optional[Union[TaskConfig, Dict]] = None,
                     code: str = _code) -> AtomicResult:
    """Run a single point calculation

    Args:
        xyz: Structure in XYZ format
        driver: What type of property to compute: energy, gradient, hessian
        qc_config (dict): Quantum Chemistry configuration used for evaluating the energy
        charge (int): Charge of the molecule
        compute_config (TaskConfig): Configuration for the quantum chemistry code, such as parallelization settings
        code (str): Which QC code to use for the evaluation
    Returns:
        QCElemental-format result of the output
    """

    # Parse the molecule
    mol = Molecule.from_data(xyz, dtype="xyz", molecular_charge=charge)

    # Run the computation
    input_spec = AtomicInput(molecule=mol,
                             driver=driver,
                             **qc_config.dict(exclude={'driver'}))
    return compute(input_spec,
                   code,
                   local_options=compute_config,
                   raise_error=True)
def compute_reference_energy(element: str,
                             qc_config: QCInputSpecification,
                             n_open: int,
                             code: str = _code) -> float:
    """Compute the energy of an isolated atom in vacuum

    Args:
        element (str): Symbol of the element
        qc_config (QCInputSpecification): Quantum Chemistry configuration used for evaluating he energy
        n_open (int): Number of open atomic orbitals
        code (str): Which QC code to use for the evaluation
    Returns:
        (float): Energy of the isolated atom
    """

    # Make the molecule
    xyz = f'1\n{element}\n{element} 0 0 0'
    mol = Molecule.from_data(xyz,
                             dtype='xyz',
                             molecular_multiplicity=n_open,
                             molecular_charge=0)

    # Run the atomization energy calculation
    input_spec = AtomicInput(molecule=mol,
                             driver='energy',
                             **qc_config.dict(exclude={'driver'}))
    result = compute(input_spec, code, raise_error=True)

    return result.return_result
Exemple #8
0
def test_gaussian_solvent_template(tmpdir, water):
    """
    Make sure that the template rendered with solvent settings matches what we expect.
    """
    with tmpdir.as_cwd():
        # get the charge method and implicit solvent engine
        charge_engine = DDECCharges()
        solvent_settings = charge_engine._get_calculation_settings()
        # now make an atomic input for the harness
        task = AtomicInput(
            molecule=water.to_qcschema(),
            driver="energy",
            model={
                "method": "b3lyp-d3bj",
                "basis": "6-311G"
            },
            keywords=solvent_settings,
        )
        # we need the harness as this will render the template
        gaussian_harness = GaussianHarness()
        config = get_config(task_config={"ncores": 1, "memory": 1})
        job_inputs = gaussian_harness.build_input(task, config)
        # make sure the job file matches or expected reference
        with open(get_data("gaussian_solvent_example.com")) as g_out:
            assert g_out.read() == job_inputs["infiles"]["gaussian.com"]
Exemple #9
0
def test_openmm_cmiles_gradient_nomatch():
    program = "openmm"

    water = qcng.get_molecule("water")

    water_dict = water.dict()
    # add ethane cmiles to the molecule
    water_dict["extras"] = {
        "cmiles": {
            "canonical_isomeric_explicit_hydrogen_mapped_smiles":
            "[H:3][C:1]([H:4])([H:5])[C:2]([H:6])([H:7])[H:8]"
        }
    }

    molecule = Molecule.from_data(water_dict)

    model = {"method": "openff-1.0.0", "basis": "smirnoff"}

    inp = AtomicInput(molecule=molecule, driver="gradient", model=model)
    ret = qcng.compute(inp, program, raise_error=False)

    # if we correctly find the cmiles this should fail as the molecule and cmiles are different
    assert ret.success is False
    assert (
        "molecule.add_conformer given input of the wrong shape: Given (3, 3), expected (8, 3)"
        in ret.error.error_message)
    def compute_gradient(self,
                         molecule: core.Molecule,
                         wfn: core.Wavefunction = None) -> core.Matrix:
        """Compute dispersion gradient based on engine, dispersion level, and parameters in `self`.

        Parameters
        ----------
        molecule
            System for which to compute empirical dispersion correction.
        wfn
            Location to set QCVariables

        Returns
        -------
        Matrix
            (nat, 3) dispersion gradient [Eh/a0].

        """
        if self.engine in ['dftd3', 'mp2d']:
            resi = AtomicInput(
                **{
                    'driver': 'gradient',
                    'model': {
                        'method': self.fctldash,
                        'basis': '(auto)',
                    },
                    'keywords': {
                        'level_hint': self.dashlevel,
                        'params_tweaks': self.dashparams,
                        'dashcoeff_supplement': self.dashcoeff_supplement,
                        'verbose': 1,
                    },
                    'molecule': molecule.to_schema(dtype=2),
                    'provenance': p4util.provenance_stamp(__name__),
                })
            jobrec = qcng.compute(
                resi,
                self.engine,
                raise_error=True,
                local_options={"scratch_directory": core.IOManager.shared_object().get_default_path()})

            dashd_part = core.Matrix.from_array(jobrec.extras['qcvars']['DISPERSION CORRECTION GRADIENT'])
            if wfn is not None:
                for k, qca in jobrec.extras['qcvars'].items():
                    if "CURRENT" not in k:
                        wfn.set_variable(k, float(qca) if isinstance(qca, str) else qca)

            if self.fctldash in ['hf3c', 'pbeh3c']:
                jobrec = qcng.compute(
                    resi,
                    "gcp",
                    raise_error=True,
                    local_options={"scratch_directory": core.IOManager.shared_object().get_default_path()})
                gcp_part = core.Matrix.from_array(jobrec.return_result)
                dashd_part.add(gcp_part)

            return dashd_part
        else:
            return self.disp.compute_gradient(molecule)
def test_local_options_memory_gib(program, model, keywords, memory_trickery, request):
    """Ensure memory handling implemented in harness (if applicable).

    For available harnesses, run minimal calc at specific total node memory, both through runtime
      config alone and with clashing (and non-QCEngine-like) keyword spec. Check memory quantity
      shows up in ``TaskConfig``.
    For ``managed-memory``-active harnesses, check that memory registers in output.

    New Harness Instructions
    ------------------------
    * Make sure minimal calc is in _canonical_methods above.
    * If ``managed_memory=True`` in harness, add regex to ``stdout_ref`` below to check that memory
      is specifiable.
    * If this test doesn't work, implement or adjust ``config.memory`` in your harness.

    """
    if not has_program(program):
        pytest.skip(f"Program '{program}' not found.")

    harness = qcng.get_program(program)
    molecule = _get_molecule(program, model["method"])

    addl_keywords = memory_trickery.get(program, memory_trickery)
    use_keywords = {**keywords, **addl_keywords}

    #  <<  Config

    config = qcng.config.get_config(
        hostname="something",
        task_config={
            "ncores": 1,
            "nnodes": 1,
            "memory": 1.555,
        },
    )

    #  <<  Run

    inp = AtomicInput(molecule=molecule, driver="energy", model=model, keywords=use_keywords)
    ret = qcng.compute(inp, program, raise_error=True, local_options=config.dict())
    pprint.pprint(ret.dict(), width=200)
    assert ret.success is True

    #  <<  Reference

    stdout_ref = {  # 1.555 GiB = 208708567 quad-words
        "cfour": "Allocated    1592 MB of main memory",
        "gamess": "208000000 WORDS OF MEMORY AVAILABLE",
        "nwchem": r"total    =  2087085\d\d doubles =   1592.3 Mbytes",  # doubles is quad-words. Mbytes is MiB
        "psi4": "1592 MiB Core",
    }

    #  <<  Test

    assert config.ncores == 1
    assert pytest.approx(config.memory, 0.1) == 1.555

    if harness._defaults["managed_memory"] is True:
        assert re.search(stdout_ref[program], ret.stdout), f"Memory pattern not found: {stdout_ref[program]}"
Exemple #12
0
    def generate_schema_input(self, driver):

        molecule = Molecule(**self.molecule)
        inp = AtomicInput(molecule=molecule,
                          model=self.model,
                          keywords=self.keywords,
                          driver=driver)

        return inp
def test_local_options_ncores(program, model, keywords, ncores):
    """Ensure multithreading implemented in harness (if applicable) or multithreaded runs don't
    break harness (if inapplicable).

    For available harnesses, run minimal calc with single and multiple cores; check ncores count
      shows up in ``TaskConfig``.
    For ``thread_parallel``-active harnesses, check ncores count registers in output.

    New Harness Instructions
    ------------------------
    * Make sure minimal calc is in _canonical_methods above.
    * If ``thread_parallel=True`` in harness, add regex to ``stdout_ref`` below to check ncores the
      program sees.
    * If this test doesn't work, implement or adjust ``config.ncores`` in your harness.

    """
    if not has_program(program):
        pytest.skip(f"Program '{program}' not found.")

    harness = qcng.get_program(program)
    molecule = _get_molecule(program, model["method"])

    #  <<  Config

    config = qcng.config.get_config(
        hostname="something",
        task_config={
            "ncores": ncores,
            "nnodes": 1,
        },
    )

    #  <<  Run

    inp = AtomicInput(molecule=molecule, driver="energy", model=model, keywords=keywords)
    ret = qcng.compute(inp, program, raise_error=True, local_options=config.dict())
    pprint.pprint(ret.dict(), width=200)
    assert ret.success is True

    #  <<  Reference

    stdout_ref = {
        "cfour": rf"Running with {ncores} threads/proc",
        "gamess": rf"MEMDDI DISTRIBUTED OVER\s+{ncores} PROCESSORS",
        # "gamess": rf"PARALLEL VERSION RUNNING ON\s+{ncores} PROCESSORS IN\s+1 NODES",  # no line for serial
        # nwchem is node_parallel only
        "psi4": rf"Threads:\s+{ncores}",
    }

    #  <<  Test

    assert config.ncores == ncores
    assert config.nnodes == 1

    if harness._defaults["thread_parallel"] is True:
        assert re.search(stdout_ref[program], ret.stdout), f"Thread pattern not found: {stdout_ref[program]}"
Exemple #14
0
    def qcdb_build_input(self,
                         input_model: AtomicInput,
                         config: "JobConfig",
                         template: Optional[str] = None) -> Dict[str, Any]:
        input_data = input_model.dict()

        ropts = input_model.extras["qcdb:options"]
        mode_config = input_model.extras["qcdb:mode_config"]

        ropts.require("QCDB",
                      "MEMORY",
                      f"{config.memory} gib",
                      accession="00000000",
                      verbose=False)

        muster_inherited_keywords(ropts, mode_config)
        mtd = input_data["model"]["method"]
        mtd = mtd[3:] if mtd.startswith("p4-") else mtd
        input_data["model"]["method"] = mtd

        # should we put this memory in the JobConfig object? I don't think the units agree
        ropts.scroll["QCDB"].pop("MEMORY")
        # print(config.memory, '!!')
        # config.memory = omem.value #???
        # print(config.memory, '!!')

        input_data["extras"] = {"wfn_qcvars_only": True}
        # input_data['kwargs'] = jobrec['kwargs']
        # input_data['return_output'] = True

        # print("Touched Keywords")  # debug
        # print(ropts.print_changed(history=True))  # debug

        popts = {}
        function_kwargs = {}
        # was recently active
        # for k, v in ropts.scroll["QCDB"].items():
        #     if v.disputed():
        #         popts[k] = v.value

        for k, v in ropts.scroll["PSI4"].items():
            if v.disputed2():
                if k.startswith("FUNCTION_KWARGS_"):
                    function_kwargs[k[16:]] = v.value
                else:
                    popts[k] = v.value
        input_data["keywords"] = popts
        input_data["keywords"]["function_kwargs"] = function_kwargs

        # print("Collected Keywords")  # debug
        # pp.pprint(popts)  # debug

        if "BASIS" in input_data["keywords"]:
            input_data["model"]["basis"] = input_data["keywords"]["BASIS"]

        return input_data
Exemple #15
0
def test_run_psi4(tmp_path):
    """Tests qcengine run with psi4 and JSON input"""

    def check_result(stdout):
        output = json.loads(stdout)
        assert output["provenance"]["creator"].lower() == "psi4"
        assert output["success"] is True

    inp = AtomicInput(molecule=get_molecule("hydrogen"), driver="energy", model={"method": "hf", "basis": "6-31G"})

    args = ["run", "psi4", inp.json()]
    check_result(run_qcengine_cli(args))

    args = ["run", "psi4", os.path.join(tmp_path, "input.json")]
    with util.disk_files({"input.json": inp.json()}, {}, cwd=tmp_path):
        check_result(run_qcengine_cli(args))

    args = ["run", "psi4", "-"]
    check_result(run_qcengine_cli(args, stdin=inp.json()))
def test_compute_energy(program, model, keywords):
    if not has_program(program):
        pytest.skip(f"Program '{program}' not found.")

    molecule = _get_molecule(program)

    inp = AtomicInput(molecule=molecule, driver="energy", model=model, keywords=keywords)
    ret = qcng.compute(inp, program, raise_error=True)
    assert ret.success is True
    assert isinstance(ret.return_result, float)
def test_compute_bad_models(program, model):
    if not has_program(program):
        pytest.skip("Program '{}' not found.".format(program))

    adriver = model.pop("driver", "energy")
    amodel = model
    inp = AtomicInput(molecule=qcng.get_molecule("hydrogen"), driver=adriver, model=amodel)

    with pytest.raises(qcng.exceptions.InputError) as exc:
        ret = qcng.compute(inp, program, raise_error=True)
def test_compute_energy(program, model):
    if not has_program(program):
        pytest.skip("Program '{}' not found.".format(program))

    inp = AtomicInput(molecule=qcng.get_molecule("hydrogen"),
                      driver="energy",
                      model=model)
    ret = qcng.compute(inp, program, raise_error=True)

    assert ret.success is True
    assert isinstance(ret.return_result, float)
def test_compute_energy_qcsk_basis(program, model, keywords):
    if not has_program(program):
        pytest.skip("Program '{}' not found.".format(program))

    molecule = _get_molecule(program)
    inp = AtomicInput(molecule=molecule, driver="energy", model=model, keywords=keywords)

    with pytest.raises(qcng.exceptions.InputError) as e:
        qcng.compute(inp, program, raise_error=True)

    assert "QCSchema BasisSet for model.basis not implemented" in str(e.value)
Exemple #20
0
    def parse_output(self, outfiles: Dict[str, str],
                     input_model: AtomicInput) -> AtomicResult:
        """
        For the set of output files parse them to extract as much info as possible and return the atomic result.
        From the fchk file we get the energy and hessian, the gradient is taken from the log file.
        """
        properties = {}
        qcvars = {}
        # make sure we got valid exit status
        self.check_convergence(logfile=outfiles["gaussian.log"])
        version = self.parse_version(logfile=outfiles["gaussian.log"])
        # build the main data dict
        output_data = input_model.dict()
        provenance = {
            "version": version,
            "creator": "gaussian",
            "routine": "CLI"
        }
        # collect the total energy from the fchk file
        logfile = outfiles["lig.fchk"]
        for line in logfile.split("\n"):
            if "Total Energy" in line:
                energy = float(line.split()[3])
                properties["return_energy"] = energy
                properties["scf_total_energy"] = energy
                if input_model.driver == "energy":
                    output_data["return_result"] = energy
        if input_model.driver == "gradient":
            # now we need to parse out the forces
            gradient = self.parse_gradient(fchfile=outfiles["lig.fchk"])
            output_data["return_result"] = gradient
        elif input_model.driver == "hessian":
            hessian = self.parse_hessian(fchkfile=outfiles["lig.fchk"])
            output_data["return_result"] = hessian

        # parse scf_properties
        if "scf_properties" in input_model.keywords:
            qcvars["WIBERG_LOWDIN_INDICES"] = self.parse_wbo(
                logfile=outfiles["gaussian.log"],
                natoms=len(input_model.molecule.symbols),
            )

        # if there is an extra output file grab it
        if "gaussian.wfx" in outfiles:
            output_data["extras"]["gaussian.wfx"] = outfiles["gaussian.wfx"]
        if qcvars:
            output_data["extras"]["qcvars"] = qcvars
        output_data["properties"] = properties
        output_data["schema_name"] = "qcschema_output"
        output_data["stdout"] = outfiles["gaussian.log"]
        output_data["success"] = True
        output_data["provenance"] = provenance
        return AtomicResult(**output_data)
Exemple #21
0
    def build_input(
        self,
        input_model: AtomicInput,
        config: "TaskConfig",
        template: Optional[str] = None,
    ) -> Dict[str, Any]:
        """
        Use the template files stored in QUBEKit to build a gaussian input file for the given driver.
        """
        import os

        from jinja2 import Template

        template_file = get_data(os.path.join("templates", "gaussian.com"))
        with open(template_file) as file:
            template = Template(file.read())

        template_data = {
            "memory": int(config.memory),
            "threads": config.ncores,
            "driver": self.driver_conversion(driver=input_model.driver),
            "title": "gaussian job",
        }
        molecule = input_model.molecule
        spec = input_model.model
        theory = self.functional_converter(method=spec.method)
        template_data["theory"] = theory
        template_data["basis"] = spec.basis
        template_data["charge"] = int(molecule.molecular_charge)
        template_data["multiplicity"] = molecule.molecular_multiplicity
        template_data["scf_maxiter"] = input_model.extras.get("maxiter", 300)
        # work around for extra cmdline args
        template_data["cmdline_extra"] = input_model.keywords.get(
            "cmdline_extra", [])
        # work around for extra trailing input
        template_data["add_input"] = input_model.keywords.get("add_input", [])
        template_data.update(input_model.keywords)
        # now we need to build the coords data
        data = []
        for i, symbol in enumerate(molecule.symbols):
            # we must convert the atomic input back to angstroms
            data.append(
                (symbol, molecule.geometry[i] * constants.BOHR_TO_ANGS))
        template_data["data"] = data

        rendered_template = template.render(**template_data)
        return {
            "infiles": {
                "gaussian.com": rendered_template
            },
            "scratch_directory": config.scratch_directory,
            "input_result": input_model.copy(deep=True),
        }
Exemple #22
0
    def parse_output(
        self, outfiles: Dict[str, str], input_model: AtomicInput
    ) -> AtomicResult:  # lgtm: [py/similar-function]

        stdout = outfiles.pop("stdout")
        stderr = outfiles.pop("stderr")

        # c4mol, if it exists, is dinky, just a clue to geometry of cfour results
        try:
            qcvars, c4hess, c4grad, c4mol, version, errorTMP = harvest(input_model.molecule, stdout, **outfiles)
        except Exception as e:
            raise UnknownError(stdout)

        if c4grad is not None:
            qcvars["CURRENT GRADIENT"] = c4grad
            qcvars[f"{input_model.model.method.upper()[3:]} TOTAL GRADIENT"] = c4grad

        if c4hess is not None:
            qcvars[f"{input_model.model.method.upper()[3:]} TOTAL HESSIAN"] = c4hess
            qcvars["CURRENT HESSIAN"] = c4hess

        if input_model.driver.upper() == "PROPERTIES":
            retres = qcvars[f"CURRENT ENERGY"]
        else:
            retres = qcvars[f"CURRENT {input_model.driver.upper()}"]

        if isinstance(retres, Decimal):
            retres = float(retres)
        elif isinstance(retres, np.ndarray):
            retres = retres.ravel().tolist()

        build_out(qcvars)
        atprop = build_atomicproperties(qcvars)

        output_data = {
            "schema_version": 1,
            "extras": {"outfiles": outfiles, **input_model.extras},
            "properties": atprop,
            "provenance": Provenance(creator="CFOUR", version=self.get_version(), routine="xcfour"),
            "return_result": retres,
            "stderr": stderr,
            "stdout": stdout,
            "success": True,
        }

        # got to even out who needs plump/flat/Decimal/float/ndarray/list
        # Decimal --> str preserves precision
        # * formerly unnp(qcvars, flat=True).items()
        output_data["extras"]["qcvars"] = {
            k.upper(): str(v) if isinstance(v, Decimal) else v for k, v in qcvars.items()
        }

        return AtomicResult(**{**input_model.dict(), **output_data})
Exemple #23
0
    def compute(self, input_model: AtomicInput,
                config: "JobConfig") -> "AtomicResult":
        self.found(raise_error=True)

        verbose = 1
        print_jobrec(f"[1] {self.name} RESULTINPUT PRE-PLANT",
                     input_model.dict(), verbose >= 3)

        input_data = self.qcdb_build_input(input_model, config)
        input_model = AtomicInput(**input_data)

        print_jobrec(f"[2] {self.name} RESULTINPUT PRE-ENGINE",
                     input_model.dict(), verbose >= 4)

        # 'PATH': (':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) +
        #          ':' + os.environ.get('PATH')),# +
        # 'PSI_SCRATCH': tmpdir,
        # 'PYTHONPATH': os.environ.get('PYTHONPATH'),
        # 'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')

        output_model = Psi4Harness.compute(self,
                                           input_model=input_model,
                                           config=config)

        print_jobrec(f"[3] {self.name} RESULT POST-ENGINE",
                     output_model.dict(), verbose >= 4)

        # ???
        if not output_model.success:
            return output_model

        print_jobrec(f"[4a] {self.name} RESULT POST-HARVEST",
                     output_model.dict(), verbose >= 5)

        output_model = self.qcdb_post_parse_output(input_model, output_model)

        print_jobrec(f"[4] {self.name} RESULT POST-POST-HARVEST",
                     output_model.dict(), verbose >= 2)

        return output_model
def test_compute_gradient(program, model):
    if not has_program(program):
        pytest.skip("Program '{}' not found.".format(program))

    molecule = _get_molecule(program)

    inp = AtomicInput(molecule=molecule, driver="gradient", model=model, extras={"mytag": "something"})
    ret = qcng.compute(inp, program, raise_error=True)

    assert ret.success is True
    assert isinstance(ret.return_result, np.ndarray)
    assert len(ret.return_result.shape) == 2
    assert ret.return_result.shape[1] == 3
    assert "mytag" in ret.extras, ret.extras
def test_cisno_casci_atomic_input(settings, ethylene, job_output):
    # Construct Geometry in bohr
    geom_angstrom = qcel.Datum("geometry", "angstrom", np.array(ethylene["geometry"]))
    geom_bohr = geom_angstrom.to_units("bohr")

    # Construct Molecule object
    m_ethylene = Molecule.from_data(
        {
            "symbols": ethylene["atoms"],
            "geometry": geom_bohr,
            "molecular_multiplicity": cisno_options["spinmult"],
            "molecular_charge": cisno_options["charge"],
        }
    )

    # Construct AtomicInput
    atomic_input = AtomicInput(
        molecule=m_ethylene,
        driver="energy",
        model=base_options,
        keywords=cisno_options,
    )

    with TCPBClient(host=settings["tcpb_host"], port=settings["tcpb_port"]) as TC:
        # Add in Ethylene atoms
        results = TC.compute(atomic_input)

    # compare only relevant attributes (computed values)
    attrs_to_compare = []
    for attr in dir(results):
        if (
            not (
                attr.startswith("__")
                or attr.startswith("_")
                or callable(attr)
                or attr[0].isupper()
            )
            and attr in fields_to_check
        ):
            attrs_to_compare.append(attr)

    for attr in attrs_to_compare:
        if isinstance(getattr(results, attr), RepeatedScalarFieldContainer):
            assert _round([a for a in getattr(results, attr)]) == _round(
                [a for a in getattr(job_output, attr)]
            )
        else:
            assert getattr(results, attr) == getattr(job_output, attr)
Exemple #26
0
    def compute(self, input_data: OptimizationInput,
                config: TaskConfig) -> "BaseModel":
        nwc_harness = NWChemHarness()
        self.found(raise_error=True)

        # Unify the keywords from the OptimizationInput and QCInputSpecification
        #  Optimization input will override, but don't tell users this as it seems unnecessary
        keywords = input_data.keywords.copy()
        keywords.update(input_data.input_specification.keywords)
        if keywords.get("program", "nwchem").lower() != "nwchem":
            raise InputError("NWChemDriver procedure only works with NWChem")

        # Make an atomic input
        atomic_input = AtomicInput(
            molecule=input_data.initial_molecule,
            driver="energy",
            keywords=keywords,
            **input_data.input_specification.dict(
                exclude={"driver", "keywords"}),
        )

        # Build the inputs for the job
        job_inputs = nwc_harness.build_input(atomic_input, config)

        # Replace the last line with a "task {} optimize"
        input_file: str = job_inputs["infiles"]["nwchem.nw"].strip()
        beginning, last_line = input_file.rsplit("\n", 1)
        assert last_line.startswith("task")
        last_line = f"task {last_line.split(' ')[1]} optimize"
        job_inputs["infiles"]["nwchem.nw"] = f"{beginning}\n{last_line}"

        # Run it!
        success, dexe = nwc_harness.execute(job_inputs)

        # Check for common errors
        if "There is an error in the input file" in dexe["stdout"]:
            raise InputError(dexe["stdout"])
        if "not compiled" in dexe["stdout"]:
            # recoverable with a different compilation with optional modules
            raise InputError(dexe["stdout"])

        # Parse it
        if success:
            dexe["outfiles"]["stdout"] = dexe["stdout"]
            dexe["outfiles"]["stderr"] = dexe["stderr"]
            return self.parse_output(dexe["outfiles"], input_data)
        else:
            raise UnknownError(dexe["stdout"])
Exemple #27
0
def test_repr_result():

    result = AtomicInput(
        **{
            "driver": "gradient",
            "model": {
                "method": "UFF"
            },
            "molecule": {
                "symbols": ["He"],
                "geometry": [0, 0, 0]
            }
        })
    assert "molecule_hash" in str(result)
    assert "molecule_hash" in repr(result)
    assert "'gradient'" in str(result)
Exemple #28
0
    def compute(self, input_model: AtomicInput,
                config: "TaskConfig") -> "AtomicResult":
        self.found(raise_error=True)

        verbose = 1

        print_jobrec(f"[1] {self.name} RESULTINPUT PRE-PLANT",
                     input_model.dict(), verbose >= 3)

        job_inputs = self.qcdb_build_input(input_model, config)

        print_jobrec(f"[2] {self.name}REC PRE-ENGINE", job_inputs,
                     verbose >= 4)

        success, dexe = self.execute(job_inputs)

        print_jobrec(f"[3] {self.name}REC POST-ENGINE", dexe, verbose >= 4)

        if "INPUT HAS AT LEAST ONE SPELLING OR LOGIC MISTAKE" in dexe[
                "stdout"]:
            raise InputError(
                error_stamp(job_inputs["infiles"]["gamess.inp"],
                            dexe["stdout"], dexe["stderr"]))

        if not success:
            output_model = input_model
            output_model["error"] = {
                "error_type": "execution_error",
                "error_message": dexe["stderr"]
            }

        dexe["outfiles"]["stdout"] = dexe["stdout"]
        dexe["outfiles"]["stderr"] = dexe["stderr"]
        dexe["outfiles"]["input"] = job_inputs["infiles"]["gamess.inp"]
        output_model = self.parse_output(dexe["outfiles"], input_model)

        print_jobrec(f"[4a] {self.name} RESULT POST-HARVEST",
                     output_model.dict(), verbose >= 5)

        output_model = self.qcdb_post_parse_output(input_model, output_model)

        print_jobrec(f"[4] {self.name} RESULT POST-POST-HARVEST",
                     output_model.dict(), verbose >= 2)

        return output_model
Exemple #29
0
def atomic_input_to_job_input(atomic_input: AtomicInput) -> pb.JobInput:
    """Convert AtomicInput to JobInput"""
    # Don't mutate original atomic_input object
    ai_copy = atomic_input.copy(deep=True)

    # Create Mol instance
    mol_msg = pb.Mol()
    mol_msg.atoms.extend(ai_copy.molecule.symbols)
    mol_msg.xyz.extend(ai_copy.molecule.geometry.flatten())
    mol_msg.units = pb.Mol.UnitType.BOHR  # Molecule always in bohr
    mol_msg.charge = int(ai_copy.molecule.molecular_charge)
    mol_msg.multiplicity = ai_copy.molecule.molecular_multiplicity
    mol_msg.closed = ai_copy.keywords.pop("closed_shell", True)
    mol_msg.restricted = ai_copy.keywords.pop("restricted", True)
    # Drop keyword terms already applied from Molecule object
    ai_copy.keywords.pop("charge", None)  # mol_msg.charge
    ai_copy.keywords.pop("spinmult", None)  # mol_msg.multiplicity

    # Create JobInput message
    ji = pb.JobInput(mol=mol_msg)
    # Set driver
    driver = ai_copy.driver.upper()
    if driver not in SUPPORTED_DRIVERS:
        # Only support QCEngine supported drivers; energy, gradient, hessian, properties
        raise ValueError(
            f"Driver '{driver}' not supported, please select from {SUPPORTED_DRIVERS}"
        )
    ji.run = pb.JobInput.RunType.Value(driver)
    # Set Method
    ji.method = pb.JobInput.MethodType.Value(ai_copy.model.method.upper())
    # Set Basis
    ji.basis = ai_copy.model.basis

    # Get keywords that have specific protobuf fields
    ji.return_bond_order = ai_copy.keywords.pop("bond_order", False)
    # Request AO and MO information
    if ai_copy.keywords.pop("mo_output", False):
        ji.imd_orbital_type = pb.JobInput.ImdOrbitalType.WHOLE_C

    # Set all other keywords under the "user_options" catch all
    for key, value in ai_copy.keywords.items():
        ji.user_options.extend([key, str(value)])

    return ji
Exemple #30
0
def run_nwchem(name: str, molecule: "Molecule", options: "Keywords",
               **kwargs) -> Dict:
    """QCDB API to QCEngine connection for NWChem."""

    local_options = kwargs.get("local_options", None)
    mode_options = get_mode_config(mode_options=kwargs.get("mode_options"))

    resi = AtomicInput(
        **{
            "driver": inspect.stack()[1][3],
            "extras": {
                "qcdb:options": copy.deepcopy(options),
                "qcdb:mode_config": mode_options,
            },
            "model": {
                "method": name,
                "basis": "(auto)",
            },
            "molecule": molecule.to_schema(dtype=2) | {
                "fix_com": True,
                "fix_orientation": True
            },
            "protocols": {
                "native_files": "input"
            },
            "provenance": provenance_stamp(__name__),
        })

    jobrec = qcng.compute(resi,
                          "qcdb-nwchem",
                          local_options=local_options,
                          raise_error=True).dict()

    hold_qcvars = jobrec["extras"].pop("qcdb:qcvars")
    jobrec["qcvars"] = {
        key: qcel.Datum(**dval)
        for key, dval in hold_qcvars.items()
    }
    jobrec["molecule"]["fix_com"] = molecule.com_fixed()
    jobrec["molecule"]["fix_orientation"] = molecule.orientation_fixed()

    return jobrec