Exemplo n.º 1
0
    def test_error_parse(self):
        with TestAreaContext("ecl_run") as ta:
            self.init_config()
            ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.ERROR.DATA"))
            ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.DATA"))

            argv = ["run_ecl100" , "2014.2" , "SPE1.DATA"]
            ecl_run = EclRun(argv)
            ecl_run.runEclipse( )

            prt_file = os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/parse/ERROR.PRT")
            shutil.copy(prt_file , "SPE1.PRT")

            error_list = ecl_run.parseErrors( )
            self.assertEqual( len(error_list) , 2 )


            # NB: The ugly white space in the error0 literal is actually part of
            #     the string we are matching; i.e. it must be retained.
            error0 = """ @--  ERROR  AT TIME        0.0   DAYS    ( 1-JAN-0):
 @           UNABLE TO OPEN INCLUDED FILE                                    
 @           /private/joaho/ERT/git/Gurbat/XXexample_grid_sim.GRDECL         
 @           SYSTEM ERROR CODE IS       29                                   """

            error1 = """ @--  ERROR  AT TIME        0.0   DAYS    ( 1-JAN-0):
 @           INCLUDE FILES MISSING.                                          """
            
            self.assertEqual( error_list[0] , error0 )
            self.assertEqual( error_list[1] , error1 )
Exemplo n.º 2
0
    def test_env(self):
        self.init_eclrun_config()
        with open("eclrun", "w") as f, open("DUMMY.DATA", "w"):
            f.write("""#!/usr/bin/env python
import os
import json
with open("env.json", "w") as f:
    json.dump(dict(os.environ), f)
""")
        os.chmod("eclrun", os.stat("eclrun").st_mode | stat.S_IEXEC)
        ecl_config = Ecl100Config()
        eclrun_config = EclrunConfig(ecl_config, "2019.3")
        ecl_run = EclRun("DUMMY", None, check_status=False)
        with mock.patch.object(ecl_run, "_get_run_command",
                               mock.MagicMock(return_value="./eclrun")):
            ecl_run.runEclipse(eclrun_config=eclrun_config)
        with open("env.json") as f:
            run_env = json.load(f)

        eclrun_env = self._eclrun_conf()["eclrun_env"]
        for k, v in eclrun_env.items():
            if v is None:
                assert k not in run_env
                continue

            if k == "PATH":
                assert run_env[k].startswith(v)
            else:
                assert v == run_env[k]
Exemplo n.º 3
0
    def test_flow(self):
        self.init_flow_config()
        shutil.copy(
            os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1.DATA"),
            "SPE1.DATA",
        )
        shutil.copy(
            os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1_ERROR.DATA"),
            "SPE1_ERROR.DATA",
        )
        flow_config = FlowConfig()
        sim = flow_config.sim()
        flow_run = EclRun("SPE1.DATA", sim)
        flow_run.runEclipse()

        run(flow_config, ["SPE1.DATA"])

        flow_run = EclRun("SPE1_ERROR.DATA", sim)
        with self.assertRaises(Exception):
            flow_run.runEclipse()

        run(flow_config, ["SPE1_ERROR.DATA", "--ignore-errors"])

        # Invalid version
        with self.assertRaises(Exception):
            run(flow_config, ["SPE1.DATA", "--version=no/such/version"])
Exemplo n.º 4
0
 def test_mpi_run(self):
     with TestAreaContext("ecl_run") as ta:
         self.init_config()
         ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1_PARALLELL.DATA"))
         argv = ["run_ecl100" , "2014.2" , "SPE1_PARALLELL.DATA" , "2"]
         ecl_run = EclRun(argv)
         ecl_run.runEclipse( )
         self.assertTrue( os.path.isfile( os.path.join( ecl_run.runPath() , "%s.stderr" % ecl_run.baseName())))
         self.assertTrue( os.path.isfile( os.path.join( ecl_run.runPath() , "%s.LOG" % ecl_run.baseName())))
Exemplo n.º 5
0
    def test_running_flow_given_env_variables_with_same_name_as_parent_env_variables_will_overwrite(  # noqa
        self,
    ):
        version = "1111.11"

        # create a script that prints env vars ENV1 and ENV2 to a file
        with open("flow", "w") as f:
            f.write("#!/bin/bash\n")
            f.write("echo $ENV1 > out.txt\n")
            f.write("echo $ENV2 >> out.txt\n")
        executable = os.path.join(os.getcwd(), "flow")
        os.chmod(executable, 0o777)

        # create a flow_config.yml with environment extension ENV2
        conf = {
            "default_version": version,
            "versions": {
                version: {
                    "scalar": {
                        "executable": executable,
                        "env": {"ENV1": "OVERWRITTEN1", "ENV2": "OVERWRITTEN2"},
                    },
                }
            },
        }

        with open("flow_config.yml", "w") as f:
            f.write(yaml.dump(conf))

        # set the environment variable ENV1
        self.monkeypatch.setenv("ENV1", "VAL1")
        self.monkeypatch.setenv("ENV2", "VAL2")
        self.monkeypatch.setenv("FLOW_SITE_CONFIG", "flow_config.yml")

        with open("DUMMY.DATA", "w") as f:
            f.write("dummy")

        with open("DUMMY.PRT", "w") as f:
            f.write("Errors 0\n")
            f.write("Bugs 0\n")

        # run the script
        flow_config = FlowConfig()
        sim = flow_config.sim()
        flow_run = EclRun("DUMMY.DATA", sim)
        flow_run.runEclipse()

        # assert that the script was able to read both the variables correctly
        with open("out.txt") as f:
            lines = f.readlines()

        self.assertEqual(len(lines), 2)
        self.assertEqual(lines[0].strip(), "OVERWRITTEN1")
        self.assertEqual(lines[1].strip(), "OVERWRITTEN2")
Exemplo n.º 6
0
    def test_failed_run_OK(self):
        with TestAreaContext("ecl_run") as ta:
            self.init_config()
            ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.ERROR.DATA"))
            argv = ["run_ecl100_nocheck" , "2014.2" , "SPE1.ERROR"]
            ecl_run = EclRun(argv)
            ecl_run.runEclipse( )

            # Monkey patching the ecl_run to use an executable which will fail with exit(1),
            # in the nocheck mode that should also be OK.
            ecl_run.sim.executable = os.path.join( self.SOURCE_ROOT , "python/tests/res/fm/ecl_run_fail")
            ecl_run.runEclipse( )
Exemplo n.º 7
0
 def test_failed_run(self):
     self.init_eclrun_config()
     shutil.copy(
         os.path.join(self.SOURCE_ROOT, "test-data/local/eclipse/SPE1_ERROR.DATA"),
         "SPE1_ERROR.DATA",
     )
     ecl_config = Ecl100Config()
     eclrun_config = EclrunConfig(ecl_config, "2019.3")
     ecl_run = EclRun("SPE1_ERROR", None)
     with self.assertRaises(Exception) as error_context:
         ecl_run.runEclipse(eclrun_config=eclrun_config)
     self.assertIn("ERROR", str(error_context.exception))
Exemplo n.º 8
0
    def test_summary_block(self):
        with TestAreaContext("ecl_run") as ta:
            self.init_config()
            ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.ERROR.DATA"))
            ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.DATA"))
            argv = ["run_ecl100" , "2014.2" , "SPE1"]
            ecl_run = EclRun(argv)
            ret_value = ecl_run.summary_block( )
            self.assertTrue( ret_value is None )

            ecl_run.runEclipse( )
            ecl_sum = ecl_run.summary_block( )
            self.assertTrue(isinstance(ecl_sum, EclSum))
Exemplo n.º 9
0
    def test_failed_run(self):
        with TestAreaContext("ecl_run") as ta:
            self.init_config()
            ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.ERROR.DATA"))
            argv = ["run_ecl100" , "2014.2" , "SPE1.ERROR"]
            ecl_run = EclRun(argv)
            with self.assertRaises(Exception):
                ecl_run.runEclipse( )

            try:
                ecl_run.runEclipse( )
            except Exception as e:
                self.assertTrue( "ERROR" in str(e) )
Exemplo n.º 10
0
    def test_flow(self):
        with TestAreaContext("ecl_run") as ta:
            self.init_config()
            ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.DATA"))
            ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.ERROR.DATA"))
            os.makedirs("ecl_run")
            shutil.move("SPE1.DATA" , "ecl_run")
            argv = ["run_flow" , "daily" , "ecl_run/SPE1.DATA"]
            flow_run = EclRun(argv)
            flow_run.runEclipse( )

            flow_run = EclRun(["run_flow" , "daily" , "SPE1.ERROR.DATA"])
            with self.assertRaises(Exception):
                flow_run.runEclipse( )
Exemplo n.º 11
0
    def test_summary_block(self):
        self.init_eclrun_config()
        shutil.copy(
            os.path.join(self.SOURCE_ROOT, "test-data/local/eclipse/SPE1.DATA"),
            "SPE1.DATA",
        )
        ecl_config = Ecl100Config()
        ecl_run = EclRun("SPE1.DATA", None)
        ret_value = ecl_run.summary_block()
        self.assertTrue(ret_value is None)

        ecl_run.runEclipse(eclrun_config=EclrunConfig(ecl_config, "2019.3"))
        ecl_sum = ecl_run.summary_block()
        self.assertTrue(isinstance(ecl_sum, EclSum))
Exemplo n.º 12
0
    def test_run(self):
        self.init_eclrun_config()
        shutil.copy(
            os.path.join(self.SOURCE_ROOT, "test-data/local/eclipse/SPE1.DATA"),
            "SPE1.DATA",
        )
        ecl_config = Ecl100Config()

        ecl_run = EclRun("SPE1.DATA", None)
        ecl_run.runEclipse(eclrun_config=EclrunConfig(ecl_config, "2019.3"))

        ok_path = os.path.join(ecl_run.runPath(), "{}.OK".format(ecl_run.baseName()))
        log_path = os.path.join(ecl_run.runPath(), "{}.LOG".format(ecl_run.baseName()))

        self.assertTrue(os.path.isfile(ok_path))
        self.assertTrue(os.path.isfile(log_path))
        self.assertTrue(os.path.getsize(log_path) > 0)

        errors = ecl_run.parseErrors()
        self.assertEqual(0, len(errors))
Exemplo n.º 13
0
    def test_run(self):
        with TestAreaContext("ecl_run") as ta:
            self.init_config()
            ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.DATA"))
            os.makedirs("ecl_run")
            shutil.move("SPE1.DATA" , "ecl_run")
            argv = ["run_ecl100" , "2014.2" , "ecl_run/SPE1.DATA"]
            ecl_run = EclRun(argv)
            ecl_run.runEclipse( )

            self.assertTrue( os.path.isfile( os.path.join( ecl_run.runPath() , "%s.stderr" % ecl_run.baseName())))
            self.assertTrue( os.path.isfile( os.path.join( ecl_run.runPath() , "%s.LOG" % ecl_run.baseName())))
            self.assertTrue( os.path.isfile( os.path.join( ecl_run.runPath() , "%s.OK" % ecl_run.baseName())))

            errors = ecl_run.parseErrors( )
            self.assertEqual( 0 , len(errors ))

            # Monkey patching the ecl_run to use an executable which
            # will fail with exit(1); don't think Eclipse actually
            # fails with exit(1) - but let us at least be prepared
            # when/if it does.
            ecl_run.sim.executable = os.path.join( self.SOURCE_ROOT , "tests/classes/ecl_run_fail")
            with self.assertRaises(Exception):
                ecl_run.runEclipse( )