コード例 #1
0
 def run_define_runner(self, define_opt, mol_path, run_type="full"):
     if mol_path:
         shutil.copyfile(mol_path, 'coord')
     dr = DefineRunner(define_opt, timeout=ItestConfig.define_timeout)
     # run should return True
     if run_type == "full":
         assert dr.run_full()
     elif run_type == "internal":
         assert dr.run_update_internal_coords()
     elif run_type == "mo":
         assert dr.run_generate_mo_files()
     else:
         raise ValueError("wrong run_type {}".format(run_type))
コード例 #2
0
ファイル: utils.py プロジェクト: davidwaroquiers/turbomoleio
def run_itest(executables,
              define_options,
              coord_filename,
              control_reference_filename,
              file_classes,
              arguments=None,
              datagroups_options=None):
    """
    Runs the integration tests. First define is executed and the produced control file is compared
    with the reference. If successful the required turbomole executables are run and the numerical values
    of the outputs will be checked with the reference.

    Args:
        executables: string or list of strings identifying the list of programs that should be executed.
        define_options (dict): the options passed to DefineRunner.
        coord_filename (str): the filename of the coord used. Will be taken from the testfiles/structures folder.
        control_reference_filename (str): the name of the reference control file used to check the correctness of
            the execution of DefineRunner. Will be taken from the testfiles/integration/control folder.
        file_classes: a list of classes subclassing BaseData (or even a single one if only one program required)
            they will be used to generate outputs from stdout and compared with the reference.
        arguments: string or list of strings with the arguments to be passed to the each executable.
        datagroups_options (dict): a dict of the form {"datagroup_name": "datagroup_value"}. Can contain
            additional datagroups that will be set with the cdg utility before running the calculation.

    Returns:
        bool: True if the test passed successfully
    """

    if not isinstance(executables, (list, tuple)):
        executables = [executables]

    if not isinstance(file_classes, (list, tuple)):
        file_classes = [file_classes]

    if arguments is None:
        arguments = [None] * len(executables)
    elif not isinstance(arguments, (list, tuple)):
        arguments = [arguments]

    opt_define_timeout = ItestConfig.define_timeout
    opt_generate_ref = ItestConfig.generate_ref
    opt_tol = ItestConfig.tol

    with temp_dir(ItestConfig.delete_tmp_dir) as tmp_dir:
        # get the coord file (for the structure defined in the string)
        shutil.copyfile(get_sp(coord_filename), 'coord')

        dr = DefineRunner(define_options, timeout=opt_define_timeout)
        define_out = dr.run_full()

        # define should complete normally
        if not define_out:
            raise ItestError("Define did not complete normally.")

        if datagroups_options:
            c = Control.from_file()
            for k, v in datagroups_options.items():
                c.cdg(k, v)
            c.to_file()

        if opt_generate_ref:  # pragma: no cover
            shutil.copy2("control",
                         get_control_integration(control_reference_filename))

        ref_control = Control.from_file(
            get_control_integration(control_reference_filename))

        current_control = Control.from_file("control")
        compare_control = current_control.compare(ref_control, tol=opt_tol)
        # print the output of Control.compare if the compare fails
        assert compare_control is None, compare_control

        for executable, exec_args, out_parser in zip(executables, arguments,
                                                     file_classes):
            cmd = [executable]
            if exec_args:
                cmd += shlex.split(exec_args)
            process = subprocess.Popen(cmd,
                                       stdin=subprocess.PIPE,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE,
                                       encoding='utf-8')
            program_std_out, program_std_err = process.communicate()

            try:
                ret_code = process.wait()

                if ret_code or "ended normally" not in program_std_err:
                    raise ItestError(
                        "Executable {} has failed with return code {}".format(
                            executable, ret_code))

                if out_parser:
                    # for jobex the outputs are not in the stdout but in the job.last file
                    if executable == "jobex":
                        out = out_parser.from_file("job.last").as_dict()
                    else:
                        out = out_parser.from_string(program_std_out).as_dict()
                    out_ref_path = os.path.join(
                        get_tfp(), "integration", "logs_json",
                        "{}_{}.json".format(control_reference_filename,
                                            executable))
                    if opt_generate_ref:
                        dumpfn(out, out_ref_path)
                    out_ref = loadfn(out_ref_path, cls=None)
                    assert_almost_equal(
                        out,
                        out_ref,
                        atol=opt_tol,
                        ignored_values=ignored_itest_parsed_keys)

                c = Control.from_file("control")
                e = c.energy
                if e is not None:
                    e_ref_path = os.path.join(
                        get_tfp(), "integration", "energy",
                        "{}_{}.json".format(control_reference_filename,
                                            executable))
                    if opt_generate_ref:
                        dumpfn(e, e_ref_path)
                    e_ref = loadfn(e_ref_path)
                    np.testing.assert_allclose(e.scf, e_ref.scf, atol=opt_tol)
                    np.testing.assert_allclose(e.total,
                                               e_ref.total,
                                               atol=opt_tol)

                g = c.gradient
                if g is not None:
                    g_ref_path = os.path.join(
                        get_tfp(), "integration", "gradient",
                        "{}_{}.json".format(control_reference_filename,
                                            executable))
                    if opt_generate_ref:
                        dumpfn(g, g_ref_path)
                    g_ref = loadfn(g_ref_path)
                    np.testing.assert_allclose(g.gradients,
                                               g_ref.gradients,
                                               atol=opt_tol)

                # check that the output from eiger and our parser give the same results
                states = States.from_file()
                eiger_runner = EigerRunner()
                eiger_runner.run()
                eiger_out = eiger_runner.get_eiger_output()
                assert eiger_out.compare_states(states) is None

                with open("{}_stdout".format(executable), "w") as f:
                    f.write(program_std_out)
            except:
                # if an exception is raised write down the output file for debugging then reraise
                with open("{}_stdout".format(executable), "w") as f:
                    f.write(program_std_out)
                with open("{}_stderr".format(executable), "w") as f:
                    f.write(program_std_err)

                raise

        return True
コード例 #3
0
def run_define_runner(define_opt, mol_path):
    if mol_path:
        shutil.copyfile(mol_path, 'coord')
    dr = DefineRunner(define_opt, timeout=ItestConfig.define_timeout)
    assert dr.run_full()