예제 #1
0
    def qcdb_build_input(self,
                         input_model: AtomicInput,
                         config: "JobConfig",
                         template: Optional[str] = None) -> Dict[str, Any]:
        input_data = input_model.dict()

        ropts = input_model.extras["qcdb:options"]
        mode_config = input_model.extras["qcdb:mode_config"]

        ropts.require("QCDB",
                      "MEMORY",
                      f"{config.memory} gib",
                      accession="00000000",
                      verbose=False)

        muster_inherited_keywords(ropts, mode_config)
        mtd = input_data["model"]["method"]
        mtd = mtd[3:] if mtd.startswith("p4-") else mtd
        input_data["model"]["method"] = mtd

        # should we put this memory in the JobConfig object? I don't think the units agree
        ropts.scroll["QCDB"].pop("MEMORY")
        # print(config.memory, '!!')
        # config.memory = omem.value #???
        # print(config.memory, '!!')

        input_data["extras"] = {"wfn_qcvars_only": True}
        # input_data['kwargs'] = jobrec['kwargs']
        # input_data['return_output'] = True

        # print("Touched Keywords")  # debug
        # print(ropts.print_changed(history=True))  # debug

        popts = {}
        function_kwargs = {}
        # was recently active
        # for k, v in ropts.scroll["QCDB"].items():
        #     if v.disputed():
        #         popts[k] = v.value

        for k, v in ropts.scroll["PSI4"].items():
            if v.disputed2():
                if k.startswith("FUNCTION_KWARGS_"):
                    function_kwargs[k[16:]] = v.value
                else:
                    popts[k] = v.value
        input_data["keywords"] = popts
        input_data["keywords"]["function_kwargs"] = function_kwargs

        # print("Collected Keywords")  # debug
        # pp.pprint(popts)  # debug

        if "BASIS" in input_data["keywords"]:
            input_data["model"]["basis"] = input_data["keywords"]["BASIS"]

        return input_data
예제 #2
0
    def compute(self, input_data: AtomicInput,
                config: "TaskConfig") -> AtomicResult:
        # Get the error correction configuration
        error_policy = input_data.protocols.error_correction

        # Create a local copy of the input data
        local_input_data = input_data

        # Run the method and, if it fails, assess if the failure is restartable
        observed_errors = {}  # Errors that have been observed previously
        while True:
            try:
                result = self._compute(local_input_data, config)
                break
            except KnownErrorException as e:
                logger.info(f"Caught a {type(e)} error.")

                # Determine whether this specific type of error is allowed
                correction_allowed = error_policy.allows(e.error_name)
                if not correction_allowed:
                    logger.info(
                        f'Error correction for "{e.error_name}" is not allowed'
                    )
                    raise e
                logger.info(
                    f'Error correction for "{e.error_name}" is allowed')

                # Check if it has run before
                # TODO (wardlt): Should we allow errors to be run >1 time?
                previously_run = e.error_name in observed_errors
                if previously_run:
                    logger.info(
                        "Error has been observed before and mitigation did not fix the issue. Raising exception"
                    )
                    raise e

                # Generate and apply the updated keywords
                keyword_updates = e.create_keyword_update(local_input_data)
                new_keywords = local_input_data.keywords.copy()
                new_keywords.update(keyword_updates)
                local_input_data = AtomicInput(**local_input_data.dict(
                    exclude={"keywords"}),
                                               keywords=new_keywords)

                # Store the error details and mitigations employed
                observed_errors[e.error_name] = {
                    "details": e.details,
                    "keyword_updates": keyword_updates
                }

        # Add the errors observed and corrected for, if any
        if len(observed_errors) > 0:
            result.extras["observed_errors"] = observed_errors
        return result
예제 #3
0
    def parse_output(self, outfiles: Dict[str, str],
                     input_model: AtomicInput) -> AtomicResult:
        """
        For the set of output files parse them to extract as much info as possible and return the atomic result.
        From the fchk file we get the energy and hessian, the gradient is taken from the log file.
        """
        properties = {}
        qcvars = {}
        # make sure we got valid exit status
        self.check_convergence(logfile=outfiles["gaussian.log"])
        version = self.parse_version(logfile=outfiles["gaussian.log"])
        # build the main data dict
        output_data = input_model.dict()
        provenance = {
            "version": version,
            "creator": "gaussian",
            "routine": "CLI"
        }
        # collect the total energy from the fchk file
        logfile = outfiles["lig.fchk"]
        for line in logfile.split("\n"):
            if "Total Energy" in line:
                energy = float(line.split()[3])
                properties["return_energy"] = energy
                properties["scf_total_energy"] = energy
                if input_model.driver == "energy":
                    output_data["return_result"] = energy
        if input_model.driver == "gradient":
            # now we need to parse out the forces
            gradient = self.parse_gradient(fchfile=outfiles["lig.fchk"])
            output_data["return_result"] = gradient
        elif input_model.driver == "hessian":
            hessian = self.parse_hessian(fchkfile=outfiles["lig.fchk"])
            output_data["return_result"] = hessian

        # parse scf_properties
        if "scf_properties" in input_model.keywords:
            qcvars["WIBERG_LOWDIN_INDICES"] = self.parse_wbo(
                logfile=outfiles["gaussian.log"],
                natoms=len(input_model.molecule.symbols),
            )

        # if there is an extra output file grab it
        if "gaussian.wfx" in outfiles:
            output_data["extras"]["gaussian.wfx"] = outfiles["gaussian.wfx"]
        if qcvars:
            output_data["extras"]["qcvars"] = qcvars
        output_data["properties"] = properties
        output_data["schema_name"] = "qcschema_output"
        output_data["stdout"] = outfiles["gaussian.log"]
        output_data["success"] = True
        output_data["provenance"] = provenance
        return AtomicResult(**output_data)
예제 #4
0
    def parse_output(
        self, outfiles: Dict[str, str], input_model: AtomicInput
    ) -> AtomicResult:  # lgtm: [py/similar-function]

        stdout = outfiles.pop("stdout")
        stderr = outfiles.pop("stderr")

        # c4mol, if it exists, is dinky, just a clue to geometry of cfour results
        try:
            qcvars, c4hess, c4grad, c4mol, version, errorTMP = harvest(input_model.molecule, stdout, **outfiles)
        except Exception as e:
            raise UnknownError(stdout)

        if c4grad is not None:
            qcvars["CURRENT GRADIENT"] = c4grad
            qcvars[f"{input_model.model.method.upper()[3:]} TOTAL GRADIENT"] = c4grad

        if c4hess is not None:
            qcvars[f"{input_model.model.method.upper()[3:]} TOTAL HESSIAN"] = c4hess
            qcvars["CURRENT HESSIAN"] = c4hess

        if input_model.driver.upper() == "PROPERTIES":
            retres = qcvars[f"CURRENT ENERGY"]
        else:
            retres = qcvars[f"CURRENT {input_model.driver.upper()}"]

        if isinstance(retres, Decimal):
            retres = float(retres)
        elif isinstance(retres, np.ndarray):
            retres = retres.ravel().tolist()

        build_out(qcvars)
        atprop = build_atomicproperties(qcvars)

        output_data = {
            "schema_version": 1,
            "extras": {"outfiles": outfiles, **input_model.extras},
            "properties": atprop,
            "provenance": Provenance(creator="CFOUR", version=self.get_version(), routine="xcfour"),
            "return_result": retres,
            "stderr": stderr,
            "stdout": stdout,
            "success": True,
        }

        # got to even out who needs plump/flat/Decimal/float/ndarray/list
        # Decimal --> str preserves precision
        # * formerly unnp(qcvars, flat=True).items()
        output_data["extras"]["qcvars"] = {
            k.upper(): str(v) if isinstance(v, Decimal) else v for k, v in qcvars.items()
        }

        return AtomicResult(**{**input_model.dict(), **output_data})
예제 #5
0
    def compute(self, input_model: AtomicInput,
                config: "JobConfig") -> "AtomicResult":
        self.found(raise_error=True)

        verbose = 1
        print_jobrec(f"[1] {self.name} RESULTINPUT PRE-PLANT",
                     input_model.dict(), verbose >= 3)

        input_data = self.qcdb_build_input(input_model, config)
        input_model = AtomicInput(**input_data)

        print_jobrec(f"[2] {self.name} RESULTINPUT PRE-ENGINE",
                     input_model.dict(), verbose >= 4)

        # 'PATH': (':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) +
        #          ':' + os.environ.get('PATH')),# +
        # 'PSI_SCRATCH': tmpdir,
        # 'PYTHONPATH': os.environ.get('PYTHONPATH'),
        # 'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')

        output_model = Psi4Harness.compute(self,
                                           input_model=input_model,
                                           config=config)

        print_jobrec(f"[3] {self.name} RESULT POST-ENGINE",
                     output_model.dict(), verbose >= 4)

        # ???
        if not output_model.success:
            return output_model

        print_jobrec(f"[4a] {self.name} RESULT POST-HARVEST",
                     output_model.dict(), verbose >= 5)

        output_model = self.qcdb_post_parse_output(input_model, output_model)

        print_jobrec(f"[4] {self.name} RESULT POST-POST-HARVEST",
                     output_model.dict(), verbose >= 2)

        return output_model
예제 #6
0
    def compute(self, input_model: AtomicInput,
                config: "TaskConfig") -> "AtomicResult":
        self.found(raise_error=True)

        verbose = 1

        print_jobrec(f"[1] {self.name} RESULTINPUT PRE-PLANT",
                     input_model.dict(), verbose >= 3)

        job_inputs = self.qcdb_build_input(input_model, config)

        print_jobrec(f"[2] {self.name}REC PRE-ENGINE", job_inputs,
                     verbose >= 4)

        success, dexe = self.execute(job_inputs)

        print_jobrec(f"[3] {self.name}REC POST-ENGINE", dexe, verbose >= 4)

        if "INPUT HAS AT LEAST ONE SPELLING OR LOGIC MISTAKE" in dexe[
                "stdout"]:
            raise InputError(
                error_stamp(job_inputs["infiles"]["gamess.inp"],
                            dexe["stdout"], dexe["stderr"]))

        if not success:
            output_model = input_model
            output_model["error"] = {
                "error_type": "execution_error",
                "error_message": dexe["stderr"]
            }

        dexe["outfiles"]["stdout"] = dexe["stdout"]
        dexe["outfiles"]["stderr"] = dexe["stderr"]
        dexe["outfiles"]["input"] = job_inputs["infiles"]["gamess.inp"]
        output_model = self.parse_output(dexe["outfiles"], input_model)

        print_jobrec(f"[4a] {self.name} RESULT POST-HARVEST",
                     output_model.dict(), verbose >= 5)

        output_model = self.qcdb_post_parse_output(input_model, output_model)

        print_jobrec(f"[4] {self.name} RESULT POST-POST-HARVEST",
                     output_model.dict(), verbose >= 2)

        return output_model
예제 #7
0
파일: runner.py 프로젝트: sjrl/QCEngine
    def parse_output(self, outfiles: Dict[str, str], input_model: AtomicInput) -> AtomicResult:

        # Get the stdout from the calculation (required)
        stdout = outfiles.pop("stdout")
        stderr = outfiles.pop("stderr")

        # gamessmol, if it exists, is dinky, just a clue to geometry of gamess results
        qcvars, gamessgrad, gamessmol = harvest(input_model.molecule, stdout, **outfiles)

        if gamessgrad is not None:
            qcvars["CURRENT GRADIENT"] = gamessgrad

        if input_model.driver.upper() == "PROPERTIES":
            retres = qcvars[f"CURRENT ENERGY"]
        else:
            retres = qcvars[f"CURRENT {input_model.driver.upper()}"]

        build_out(qcvars)
        atprop = build_atomicproperties(qcvars)

        output_data = {
            "schema_version": 1,
            "molecule": gamessmol,
            "extras": {"outfiles": outfiles, **input_model.extras},
            "properties": atprop,
            "provenance": Provenance(creator="GAMESS", version=self.get_version(), routine="rungms"),
            "return_result": retres,
            "stderr": stderr,
            "stdout": stdout,
            "success": True,
        }

        # got to even out who needs plump/flat/Decimal/float/ndarray/list
        output_data["extras"]["qcvars"] = {
            k.upper(): str(v) if isinstance(v, Decimal) else v for k, v in unnp(qcvars, flat=True).items()
        }

        return AtomicResult(**{**input_model.dict(), **output_data})
예제 #8
0
def job_output_to_atomic_result(*, atomic_input: AtomicInput,
                                job_output: pb.JobOutput) -> AtomicResult:
    """Convert JobOutput to AtomicResult"""
    # Convert job_output to python types
    # NOTE: Required so that AtomicResult is JSON serializable. Protobuf types are not.
    jo_dict = MessageToDict(job_output, preserving_proto_field_name=True)

    if atomic_input.driver.upper() == "ENERGY":
        # Select first element in list (ground state); may need to modify for excited
        # states
        return_result: Union[float, List[float]] = jo_dict["energy"][0]

    elif atomic_input.driver.upper() == "GRADIENT":
        return_result = jo_dict["gradient"]

    else:
        raise ValueError(
            f"Unsupported driver: {atomic_input.driver.upper()}, supported drivers "
            f"include: {SUPPORTED_DRIVERS}")

    if atomic_input.keywords.get("molden"):
        # Molden file was request
        try:
            molden_string = tcpb_imd_fields2molden_string(job_output)
        except Exception:
            # Don't know how this code will blow up, so except everything for now :/
            # NOTE: mo_output will set imd_orbital_type to "WHOLE_C"
            molden_string = "Unable to create molden output. Did you include the 'mo_output' keyword??"
    else:
        molden_string = None

    # Prepare AtomicInput to be base input for AtomicResult
    atomic_input_dict = atomic_input.dict()
    atomic_input_dict.pop("provenance", None)

    # Create AtomicResult as superset of AtomicInput values
    atomic_result = AtomicResult(
        **atomic_input_dict,
        # Create new provenance object
        provenance=Provenance(
            creator="terachem_pbs",
            version="1.9-2021.01-dev",
            routine="tcpb.TCProtobufClient.compute",
        ),
        return_result=return_result,
        properties=to_atomic_result_properties(job_output),
        # NOTE: Wavefunction will only be added if atomic_input.protocols.wavefunction != 'none'
        wavefunction=to_wavefunction_properties(job_output, atomic_input),
        success=True,
    )
    # And extend extras to include values additional to input extras
    atomic_result.extras.update({
        "qcvars": {
            "charges": jo_dict.get("charges"),
            "spins": jo_dict.get("spins"),
            "meyer_bond_order": jo_dict.get("bond_order"),
            "orb_size": jo_dict.get("orb_size"),
            "excited_state_energies": jo_dict.get("energy"),
            "cis_transition_dipoles": jo_dict.get("cis_transition_dipoles"),
            "compressed_bond_order": jo_dict.get("compressed_bond_order"),
            "compressed_hessian": jo_dict.get("compressed_hessian"),
            "compressed_ao_data": jo_dict.get("compressed_ao_data"),
            "compressed_primitive_data":
            jo_dict.get("compressed_primitive_data"),
            "compressed_mo_vector": jo_dict.get("compressed_mo_vector"),
            "imd_mmatom_gradient": jo_dict.get("imd_mmatom_gradient"),
        },
        "job_extras": {
            "job_dir": jo_dict.get("job_dir"),
            "job_scr_dir": jo_dict.get("job_scr_dir"),
            "server_job_id": jo_dict.get("server_job_id"),
            "orb1afile": jo_dict.get("orb1afile"),
            "orb1bfile": jo_dict.get("orb1bfile"),
        },
        "molden": molden_string,
    })
    return atomic_result
예제 #9
0
    def compute(self, input_model: AtomicInput,
                config: "TaskConfig") -> "AtomicResult":
        self.found(raise_error=True)

        verbose = 1

        print_jobrec(f"[1] {self.name} RESULTINPUT PRE-PLANT",
                     input_model.dict(), verbose >= 3)

        job_inputs = self.qcdb_build_input(input_model, config)

        print_jobrec(f"[2] {self.name}REC PRE-ENGINE", job_inputs,
                     verbose >= 4)

        # 'NWCHEM_OMP_NUM_CORES': os.environ.get('NWCHEM_OMP_NUM_CORES'),

        success, dexe = self.execute(job_inputs)

        stdin = job_inputs["infiles"]["nwchem.nw"]

        print_jobrec(f"[3] {self.name}REC POST-ENGINE", dexe, verbose >= 4)

        if "There is an error in the input file" in dexe["stdout"]:
            raise InputError(error_stamp(stdin, dexe["stdout"],
                                         dexe["stderr"]))
        if "not compiled" in dexe["stdout"]:
            # recoverable with a different compilation with optional modules
            raise InputError(error_stamp(stdin, dexe["stdout"],
                                         dexe["stderr"]))

        if success:
            dexe["outfiles"]["stdout"] = dexe["stdout"]
            dexe["outfiles"]["stderr"] = dexe["stderr"]
            dexe["outfiles"]["input"] = stdin
            output_model = self.parse_output(dexe["outfiles"], input_model)

            print_jobrec(f"[4a] {self.name} RESULT POST-HARVEST",
                         output_model.dict(), verbose >= 5)

            output_model = self.qcdb_post_parse_output(input_model,
                                                       output_model)

            print_jobrec(f"[4] {self.name} RESULT POST-POST-HARVEST",
                         output_model.dict(), verbose >= 2)

        else:
            ## Check if any of the errors are known
            #for error in all_errors:
            #    error.detect_error(dexe)
            output_model = FailedOperation(
                success=False,
                error={
                    "error_type":
                    "execution_error",
                    "error_message":
                    error_stamp(stdin, dexe["stdout"], dexe["stderr"]),
                },
                input_data=input_model.dict(),
            )

        return output_model
예제 #10
0
파일: dftd4.py 프로젝트: mtzgroup/QCEngine
    def compute(self, input_model: AtomicInput,
                config: TaskConfig) -> AtomicResult:
        """
        Actual interface to the dftd4 package. The compute function is just a thin
        wrapper around the native QCSchema interface of the dftd4 Python-API.
        """

        self.found(raise_error=True)

        import dftd4
        from dftd4.qcschema import run_qcschema

        # strip engine hint
        input_data = input_model.dict()
        method = input_model.model.method
        if method.startswith("d4-"):
            method = method[3:]
            input_data["model"]["method"] = method
        qcvkey = method.upper() if method is not None else None

        # send `from_arrays` the dftd4 behavior of functional specification overrides explicit parameters specification
        # * differs from dftd3 harness behavior where parameters extend or override functional
        # * stash the resolved plan in extras or, if errored, leave it for the proper dftd4 api to reject
        param_tweaks = None if method else input_model.keywords.get(
            "params_tweaks", None)
        try:
            planinfo = from_arrays(
                verbose=1,
                name_hint=method,
                level_hint=input_model.keywords.get("level_hint", None),
                param_tweaks=param_tweaks,
                dashcoeff_supplement=input_model.keywords.get(
                    "dashcoeff_supplement", None),
            )
        except InputError:
            pass
        else:
            input_data["extras"]["info"] = planinfo

        # strip dispersion level from method
        for alias, d4 in get_dispersion_aliases().items():
            if d4 == "d4bjeeqatm" and method.lower().endswith(alias):
                method = method[:-(len(alias) + 1)]
                input_data["model"]["method"] = method

        # consolidate dispersion level aliases
        level_hint = input_model.keywords.get("level_hint", None)
        if level_hint and get_dispersion_aliases()[
                level_hint.lower()] == "d4bjeeqatm":
            level_hint = "d4"
            input_data["keywords"]["level_hint"] = level_hint

        input_model = AtomicInput(**input_data)

        # Run the Harness
        output = run_qcschema(input_model)

        if "info" in output.extras:
            qcvkey = output.extras["info"]["fctldash"].upper()

        calcinfo = {}
        energy = output.properties.return_energy
        calcinfo["CURRENT ENERGY"] = energy
        calcinfo["DISPERSION CORRECTION ENERGY"] = energy
        if qcvkey:
            calcinfo[f"{qcvkey} DISPERSION CORRECTION ENERGY"] = energy

        if output.driver == "gradient":
            gradient = output.return_result
            calcinfo["CURRENT GRADIENT"] = gradient
            calcinfo["DISPERSION CORRECTION GRADIENT"] = gradient
            if qcvkey:
                calcinfo[f"{qcvkey} DISPERSION CORRECTION GRADIENT"] = gradient

        if output.keywords.get("pair_resolved", False):
            pw2 = output.extras["dftd4"]["additive pairwise energy"]
            pw3 = output.extras["dftd4"]["non-additive pairwise energy"]
            assert abs(pw2.sum() + pw3.sum() - energy
                       ) < 1.0e-8, f"{pw2.sum()} + {pw3.sum()} != {energy}"
            calcinfo["2-BODY DISPERSION CORRECTION ENERGY"] = pw2.sum()
            calcinfo["3-BODY DISPERSION CORRECTION ENERGY"] = pw3.sum()
            calcinfo["2-BODY PAIRWISE DISPERSION CORRECTION ANALYSIS"] = pw2
            calcinfo["3-BODY PAIRWISE DISPERSION CORRECTION ANALYSIS"] = pw3

        output.extras["qcvars"] = calcinfo

        return output
예제 #11
0
    def parse_output(
        self, outfiles: Dict[str, str], input_model: AtomicInput
    ) -> AtomicResult:  # lgtm: [py/similar-function]

        stdout = outfiles.pop("stdout")
        stderr = outfiles.pop("stderr")

        method = input_model.model.method.lower()
        method = method[3:] if method.startswith("c4-") else method

        # c4mol, if it exists, is dinky, just a clue to geometry of cfour results
        try:
            # July 2021: c4mol & vector returns now atin/outfile orientation depending on fix_com,orientation=T/F. previously always atin orientation
            qcvars, c4hess, c4grad, c4mol, version, module, errorTMP = harvest(
                input_model.molecule, method, stdout, **outfiles
            )
        except Exception:
            raise UnknownError(error_stamp(outfiles["input"], stdout, stderr))

        if errorTMP != "":
            raise UnknownError(error_stamp(outfiles["input"], stdout, stderr))

        try:
            if c4grad is not None:
                qcvars["CURRENT GRADIENT"] = c4grad
                qcvars[f"{method.upper()} TOTAL GRADIENT"] = c4grad

            if c4hess is not None:
                qcvars[f"{method.upper()} TOTAL HESSIAN"] = c4hess
                qcvars["CURRENT HESSIAN"] = c4hess

            if input_model.driver.upper() == "PROPERTIES":
                retres = qcvars[f"CURRENT ENERGY"]
            else:
                retres = qcvars[f"CURRENT {input_model.driver.upper()}"]
        except KeyError:
            raise UnknownError(error_stamp(outfiles["input"], stdout, stderr))

        # TODO: "xalloc(): memory allocation failed!"

        if isinstance(retres, Decimal):
            retres = float(retres)
        elif isinstance(retres, np.ndarray):
            retres = retres.ravel().tolist()

        build_out(qcvars)
        atprop = build_atomicproperties(qcvars)

        provenance = Provenance(creator="CFOUR", version=self.get_version(), routine="xcfour").dict()
        if module is not None:
            provenance["module"] = module

        output_data = {
            "schema_version": 1,
            "molecule": c4mol,  # overwrites with outfile Cartesians in case fix_*=F
            "extras": {**input_model.extras},
            "native_files": {k: v for k, v in outfiles.items() if v is not None},
            "properties": atprop,
            "provenance": provenance,
            "return_result": retres,
            "stderr": stderr,
            "stdout": stdout,
            "success": True,
        }

        # got to even out who needs plump/flat/Decimal/float/ndarray/list
        # Decimal --> str preserves precision
        # * formerly unnp(qcvars, flat=True).items()
        output_data["extras"]["qcvars"] = {
            k.upper(): str(v) if isinstance(v, Decimal) else v for k, v in qcvars.items()
        }

        return AtomicResult(**{**input_model.dict(), **output_data})
예제 #12
0
    def parse_output(self, outfiles: Dict[str, str],
                     input_model: AtomicInput) -> AtomicResult:

        # Get the stdout from the calculation (required)
        stdout = outfiles.pop("stdout")
        stderr = outfiles.pop("stderr")

        method = input_model.model.method.lower()
        method = method[4:] if method.startswith("gms-") else method

        # gamessmol, if it exists, is dinky, just a clue to geometry of gamess results
        try:
            qcvars, gamesshess, gamessgrad, gamessmol, module = harvest(
                input_model.molecule, method, stdout, **outfiles)

        except Exception as e:
            raise UnknownError(
                "STDOUT:\n" + stdout + "\nSTDERR:\n" + stderr +
                "\nTRACEBACK:\n" +
                "".join(traceback.format_exception(*sys.exc_info())))

        try:
            if gamessgrad is not None:
                qcvars[f"{method.upper()} TOTAL GRADIENT"] = gamessgrad
                qcvars["CURRENT GRADIENT"] = gamessgrad

            if gamesshess is not None:
                qcvars[f"{method.upper()} TOTAL HESSIAN"] = gamesshess
                qcvars["CURRENT HESSIAN"] = gamesshess

            if input_model.driver.upper() == "PROPERTIES":
                retres = qcvars[f"CURRENT ENERGY"]
            else:
                retres = qcvars[f"CURRENT {input_model.driver.upper()}"]
        except KeyError as e:
            raise UnknownError(
                "STDOUT:\n" + stdout + "\nSTDERR:\n" + stderr +
                "\nTRACEBACK:\n" +
                "".join(traceback.format_exception(*sys.exc_info())))

        build_out(qcvars)
        atprop = build_atomicproperties(qcvars)

        provenance = Provenance(creator="GAMESS",
                                version=self.get_version(),
                                routine="rungms").dict()
        if module is not None:
            provenance["module"] = module

        output_data = {
            "schema_version": 1,
            "molecule": gamessmol,
            "extras": {
                "outfiles": outfiles,
                **input_model.extras
            },
            "properties": atprop,
            "provenance": provenance,
            "return_result": retres,
            "stderr": stderr,
            "stdout": stdout,
            "success": True,
        }

        # got to even out who needs plump/flat/Decimal/float/ndarray/list
        # * formerly unnp(qcvars, flat=True).items()
        output_data["extras"]["qcvars"] = {
            k.upper(): str(v) if isinstance(v, Decimal) else v
            for k, v in qcvars.items()
        }

        return AtomicResult(**{**input_model.dict(), **output_data})
예제 #13
0
파일: runner.py 프로젝트: eljost/QCEngine
    def parse_output(self, outfiles: Dict[str, str],
                     input_model: AtomicInput) -> AtomicResult:

        # Get the stdout from the calculation (required)
        stdout = outfiles.pop("stdout")
        stderr = outfiles.pop("stderr")

        method = input_model.model.method.lower()
        method = method[4:] if method.startswith("gms-") else method

        # gamessmol, if it exists, is dinky, just a clue to geometry of gamess results
        try:
            # July 2021: gamessmol & vector returns now atin/outfile orientation depending on fix_com,orientation=T/F. previously always outfile orientation
            qcvars, gamesshess, gamessgrad, gamessmol, module = harvest(
                input_model.molecule, method, stdout, **outfiles)
            # TODO:  "EXECUTION OF GAMESS TERMINATED -ABNORMALLY-" in dexe["stdout"]:

        except Exception:
            raise UnknownError(error_stamp(outfiles["input"], stdout, stderr))

        try:
            if gamessgrad is not None:
                qcvars[f"{method.upper()} TOTAL GRADIENT"] = gamessgrad
                qcvars["CURRENT GRADIENT"] = gamessgrad

            if gamesshess is not None:
                qcvars[f"{method.upper()} TOTAL HESSIAN"] = gamesshess
                qcvars["CURRENT HESSIAN"] = gamesshess

            if input_model.driver.upper() == "PROPERTIES":
                retres = qcvars[f"CURRENT ENERGY"]
            else:
                retres = qcvars[f"CURRENT {input_model.driver.upper()}"]
        except KeyError:
            if "EXETYP=CHECK" in stdout and "EXECUTION OF GAMESS TERMINATED NORMALLY" in stdout:
                # check run that completed normally
                # * on one hand, it's still an error return_result-wise
                # * but on the other hand, often the reason for the job is to get gamessmol, so let it return success=T below
                retres = 0.0
            else:
                raise UnknownError(
                    error_stamp(outfiles["input"], stdout, stderr))

        build_out(qcvars)
        atprop = build_atomicproperties(qcvars)

        provenance = Provenance(creator="GAMESS",
                                version=self.get_version(),
                                routine="rungms").dict()
        if module is not None:
            provenance["module"] = module

        output_data = {
            "schema_version": 1,
            "molecule":
            gamessmol,  # overwrites with outfile Cartesians in case fix_*=F
            "extras": {
                **input_model.extras
            },
            "native_files":
            {k: v
             for k, v in outfiles.items() if v is not None},
            "properties": atprop,
            "provenance": provenance,
            "return_result": retres,
            "stderr": stderr,
            "stdout": stdout,
            "success": True,
        }

        # got to even out who needs plump/flat/Decimal/float/ndarray/list
        # * formerly unnp(qcvars, flat=True).items()
        output_data["extras"]["qcvars"] = {
            k.upper(): str(v) if isinstance(v, Decimal) else v
            for k, v in qcvars.items()
        }

        return AtomicResult(**{**input_model.dict(), **output_data})