def test_error_parse(self): with TestAreaContext("ecl_run") as ta: self.init_config() ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.ERROR.DATA")) ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.DATA")) argv = ["run_ecl100" , "2014.2" , "SPE1.DATA"] ecl_run = EclRun(argv) ecl_run.runEclipse( ) prt_file = os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/parse/ERROR.PRT") shutil.copy(prt_file , "SPE1.PRT") error_list = ecl_run.parseErrors( ) self.assertEqual( len(error_list) , 2 ) # NB: The ugly white space in the error0 literal is actually part of # the string we are matching; i.e. it must be retained. error0 = """ @-- ERROR AT TIME 0.0 DAYS ( 1-JAN-0): @ UNABLE TO OPEN INCLUDED FILE @ /private/joaho/ERT/git/Gurbat/XXexample_grid_sim.GRDECL @ SYSTEM ERROR CODE IS 29 """ error1 = """ @-- ERROR AT TIME 0.0 DAYS ( 1-JAN-0): @ INCLUDE FILES MISSING. """ self.assertEqual( error_list[0] , error0 ) self.assertEqual( error_list[1] , error1 )
def test_env(self): self.init_eclrun_config() with open("eclrun", "w") as f, open("DUMMY.DATA", "w"): f.write("""#!/usr/bin/env python import os import json with open("env.json", "w") as f: json.dump(dict(os.environ), f) """) os.chmod("eclrun", os.stat("eclrun").st_mode | stat.S_IEXEC) ecl_config = Ecl100Config() eclrun_config = EclrunConfig(ecl_config, "2019.3") ecl_run = EclRun("DUMMY", None, check_status=False) with mock.patch.object(ecl_run, "_get_run_command", mock.MagicMock(return_value="./eclrun")): ecl_run.runEclipse(eclrun_config=eclrun_config) with open("env.json") as f: run_env = json.load(f) eclrun_env = self._eclrun_conf()["eclrun_env"] for k, v in eclrun_env.items(): if v is None: assert k not in run_env continue if k == "PATH": assert run_env[k].startswith(v) else: assert v == run_env[k]
def test_mpi_run(self): with TestAreaContext("ecl_run") as ta: self.init_config() ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1_PARALLELL.DATA")) argv = ["run_ecl100" , "2014.2" , "SPE1_PARALLELL.DATA" , "2"] ecl_run = EclRun(argv) ecl_run.runEclipse( ) self.assertTrue( os.path.isfile( os.path.join( ecl_run.runPath() , "%s.stderr" % ecl_run.baseName()))) self.assertTrue( os.path.isfile( os.path.join( ecl_run.runPath() , "%s.LOG" % ecl_run.baseName())))
def test_running_flow_given_env_variables_with_same_name_as_parent_env_variables_will_overwrite( # noqa self, ): version = "1111.11" # create a script that prints env vars ENV1 and ENV2 to a file with open("flow", "w") as f: f.write("#!/bin/bash\n") f.write("echo $ENV1 > out.txt\n") f.write("echo $ENV2 >> out.txt\n") executable = os.path.join(os.getcwd(), "flow") os.chmod(executable, 0o777) # create a flow_config.yml with environment extension ENV2 conf = { "default_version": version, "versions": { version: { "scalar": { "executable": executable, "env": {"ENV1": "OVERWRITTEN1", "ENV2": "OVERWRITTEN2"}, }, } }, } with open("flow_config.yml", "w") as f: f.write(yaml.dump(conf)) # set the environment variable ENV1 self.monkeypatch.setenv("ENV1", "VAL1") self.monkeypatch.setenv("ENV2", "VAL2") self.monkeypatch.setenv("FLOW_SITE_CONFIG", "flow_config.yml") with open("DUMMY.DATA", "w") as f: f.write("dummy") with open("DUMMY.PRT", "w") as f: f.write("Errors 0\n") f.write("Bugs 0\n") # run the script flow_config = FlowConfig() sim = flow_config.sim() flow_run = EclRun("DUMMY.DATA", sim) flow_run.runEclipse() # assert that the script was able to read both the variables correctly with open("out.txt") as f: lines = f.readlines() self.assertEqual(len(lines), 2) self.assertEqual(lines[0].strip(), "OVERWRITTEN1") self.assertEqual(lines[1].strip(), "OVERWRITTEN2")
def test_failed_run(self): self.init_eclrun_config() shutil.copy( os.path.join(self.SOURCE_ROOT, "test-data/local/eclipse/SPE1_ERROR.DATA"), "SPE1_ERROR.DATA", ) ecl_config = Ecl100Config() eclrun_config = EclrunConfig(ecl_config, "2019.3") ecl_run = EclRun("SPE1_ERROR", None) with self.assertRaises(Exception) as error_context: ecl_run.runEclipse(eclrun_config=eclrun_config) self.assertIn("ERROR", str(error_context.exception))
def test_create(self): # This test can make do with a mock simulator; - just something executable conf = { "versions": { "2014.2": { "scalar": {"executable": "bin/scalar_exe"}, "mpi": {"executable": "bin/mpi_exe", "mpirun": "bin/mpirun"}, } } } with open("ecl100_config.yml", "w") as f: f.write(yaml.dump(conf)) os.mkdir("bin") self.monkeypatch.setenv("ECL100_SITE_CONFIG", "ecl100_config.yml") for f in ["scalar_exe", "mpi_exe", "mpirun"]: fname = os.path.join("bin", f) with open(fname, "w") as fh: fh.write("This is an exectable ...") os.chmod(fname, stat.S_IEXEC) with open("ECLIPSE.DATA", "w") as f: f.write("Mock eclipse data file") ecl_config = Ecl100Config() sim = ecl_config.sim("2014.2") mpi_sim = ecl_config.mpi_sim("2014.2") ecl_run = EclRun("ECLIPSE.DATA", sim) self.assertEqual(ecl_run.runPath(), os.getcwd()) os.mkdir("path") with open("path/ECLIPSE.DATA", "w") as f: f.write("Mock eclipse data file") ecl_run = EclRun("path/ECLIPSE.DATA", sim) self.assertEqual(ecl_run.runPath(), os.path.join(os.getcwd(), "path")) self.assertEqual(ecl_run.baseName(), "ECLIPSE") self.assertEqual(1, ecl_run.numCpu()) # invalid number of CPU with self.assertRaises(ValueError): ecl_run = EclRun("path/ECLIPSE.DATA", sim, num_cpu="xxx") ecl_run = EclRun("path/ECLIPSE.DATA", mpi_sim, num_cpu="10") self.assertEqual(10, ecl_run.numCpu()) # Missing datafile with self.assertRaises(IOError): ecl_run = EclRun("DOES/NOT/EXIST", mpi_sim, num_cpu="10")
def test_flow(self): self.init_flow_config() shutil.copy( os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1.DATA"), "SPE1.DATA", ) shutil.copy( os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1_ERROR.DATA"), "SPE1_ERROR.DATA", ) flow_config = FlowConfig() sim = flow_config.sim() flow_run = EclRun("SPE1.DATA", sim) flow_run.runEclipse() run(flow_config, ["SPE1.DATA"]) flow_run = EclRun("SPE1_ERROR.DATA", sim) with self.assertRaises(Exception): flow_run.runEclipse() run(flow_config, ["SPE1_ERROR.DATA", "--ignore-errors"]) # Invalid version with self.assertRaises(Exception): run(flow_config, ["SPE1.DATA", "--version=no/such/version"])
def test_run_new_log_file(self): self.init_ecl100_config() shutil.copy( os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1.DATA"), "SPE1.DATA", ) ecl_config = Ecl100Config() sim = ecl_config.sim("2019.3") ecl_run = EclRun("SPE1.DATA", sim) ecl_run.runEclipse() ok_path = os.path.join(ecl_run.runPath(), "{}.OK".format(ecl_run.baseName())) log_path = os.path.join(ecl_run.runPath(), "{}.OUT".format(ecl_run.baseName())) self.assertTrue(os.path.isfile(ok_path)) self.assertTrue(os.path.isfile(log_path)) self.assertTrue(os.path.getsize(log_path) > 0) errors = ecl_run.parseErrors() self.assertEqual(0, len(errors)) # Monkey patching the ecl_run to use an executable which # will fail with exit(1); don't think Eclipse actually # fails with exit(1) - but let us at least be prepared # when/if it does. ecl_run.sim.executable = os.path.join( self.SOURCE_ROOT, "tests/libres_tests/res/fm/ecl_run_fail" ) with self.assertRaises(Exception): ecl_run.runEclipse()
def test_error_parse(self): self.init_ecl100_config() shutil.copy( os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1.DATA"), "SPE1.DATA", ) prt_file = os.path.join(self.TESTDATA_ROOT, "local/eclipse/parse/ERROR.PRT") shutil.copy(prt_file, "SPE1.PRT") ecl_config = Ecl100Config() sim = ecl_config.sim("2014.2") ecl_run = EclRun("SPE1.DATA", sim) error_list = ecl_run.parseErrors() self.assertEqual(len(error_list), 2) # NB: The ugly white space in the error0 literal is actually part of # the string we are matching; i.e. it must be retained. error0 = """ @-- ERROR AT TIME 0.0 DAYS ( 1-JAN-0): @ UNABLE TO OPEN INCLUDED FILE @ /private/joaho/ERT/git/Gurbat/XXexample_grid_sim.GRDECL @ SYSTEM ERROR CODE IS 29 """ # noqa error1 = """ @-- ERROR AT TIME 0.0 DAYS ( 1-JAN-0): @ INCLUDE FILES MISSING. """ # noqa self.assertEqual(error_list[0], error0) self.assertEqual(error_list[1], error1)
def test_check(self): full_case = os.path.join(self.SOURCE_ROOT, "test-data/Equinor/ECLIPSE/Gurbat/ECLIPSE") short_case = os.path.join( self.SOURCE_ROOT, "test-data/Equinor/ECLIPSE/ShortSummary/ECLIPSE") failed_case = os.path.join( self.SOURCE_ROOT, "test-data/Equinor/ECLIPSE/SummaryFail/NOR-2013A_R002_1208-0", ) with self.assertRaises(IOError): self.assertTrue(EclRun.checkCase(full_case, failed_case)) with self.assertRaises(IOError): self.assertTrue(EclRun.checkCase(full_case, "DOES-NOT-EXIST")) with self.assertRaises(IOError): self.assertTrue(EclRun.checkCase("DOES-NOT-EXIST", full_case)) with self.assertRaises(ValueError): EclRun.checkCase(full_case, short_case) self.assertTrue(not os.path.isfile("CHECK_ECLIPSE_RUN.OK")) self.assertTrue(EclRun.checkCase(full_case, full_case)) self.assertTrue(os.path.isfile("CHECK_ECLIPSE_RUN.OK")) os.remove("CHECK_ECLIPSE_RUN.OK") self.assertTrue(EclRun.checkCase( short_case, full_case)) # Simulation is longer than refcase - OK self.assertTrue(os.path.isfile("CHECK_ECLIPSE_RUN.OK"))
def test_flow(self): with TestAreaContext("ecl_run") as ta: self.init_config() ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.DATA")) ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.ERROR.DATA")) os.makedirs("ecl_run") shutil.move("SPE1.DATA" , "ecl_run") argv = ["run_flow" , "daily" , "ecl_run/SPE1.DATA"] flow_run = EclRun(argv) flow_run.runEclipse( ) flow_run = EclRun(["run_flow" , "daily" , "SPE1.ERROR.DATA"]) with self.assertRaises(Exception): flow_run.runEclipse( )
def test_run(self): self.init_eclrun_config() shutil.copy( os.path.join(self.SOURCE_ROOT, "test-data/local/eclipse/SPE1.DATA"), "SPE1.DATA", ) ecl_config = Ecl100Config() ecl_run = EclRun("SPE1.DATA", None) ecl_run.runEclipse(eclrun_config=EclrunConfig(ecl_config, "2019.3")) ok_path = os.path.join(ecl_run.runPath(), "{}.OK".format(ecl_run.baseName())) log_path = os.path.join(ecl_run.runPath(), "{}.LOG".format(ecl_run.baseName())) self.assertTrue(os.path.isfile(ok_path)) self.assertTrue(os.path.isfile(log_path)) self.assertTrue(os.path.getsize(log_path) > 0) errors = ecl_run.parseErrors() self.assertEqual(0, len(errors))
def test_failed_run(self): self.init_ecl100_config() shutil.copy( os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1_ERROR.DATA"), "SPE1_ERROR.DATA", ) ecl_config = Ecl100Config() sim = ecl_config.sim("2014.2") ecl_run = EclRun("SPE1_ERROR", sim) with self.assertRaises(Exception): ecl_run.runEclipse() try: ecl_run.runEclipse() except Exception as e: self.assertTrue("ERROR" in str(e))
def test_summary_block(self): self.init_ecl100_config() shutil.copy( os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1.DATA"), "SPE1.DATA", ) ecl_config = Ecl100Config() sim = ecl_config.sim("2014.2") ecl_run = EclRun("SPE1.DATA", sim) ret_value = ecl_run.summary_block() self.assertTrue(ret_value is None) ecl_run.runEclipse() ecl_sum = ecl_run.summary_block() self.assertTrue(isinstance(ecl_sum, EclSum))
def test_failed_run_OK(self): self.init_ecl100_config() shutil.copy( os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1_ERROR.DATA"), "SPE1_ERROR.DATA", ) ecl_config = Ecl100Config() run(ecl_config, ["SPE1_ERROR", "--version=2014.2", "--ignore-errors"]) # Monkey patching the ecl_run to use an executable which will fail with exit(1), # in the nocheck mode that should also be OK. sim = ecl_config.sim("2014.2") ecl_run = EclRun("SPE1_ERROR", sim, check_status=False) ecl_run.sim.executable = os.path.join( self.SOURCE_ROOT, "tests/libres_tests/res/fm/ecl_run_fail" ) ecl_run.runEclipse()
def test_failed_run_OK(self): with TestAreaContext("ecl_run") as ta: self.init_config() ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.ERROR.DATA")) argv = ["run_ecl100_nocheck" , "2014.2" , "SPE1.ERROR"] ecl_run = EclRun(argv) ecl_run.runEclipse( ) # Monkey patching the ecl_run to use an executable which will fail with exit(1), # in the nocheck mode that should also be OK. ecl_run.sim.executable = os.path.join( self.SOURCE_ROOT , "python/tests/res/fm/ecl_run_fail") ecl_run.runEclipse( )
def test_summary_block(self): with TestAreaContext("ecl_run") as ta: self.init_config() ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.ERROR.DATA")) ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.DATA")) argv = ["run_ecl100" , "2014.2" , "SPE1"] ecl_run = EclRun(argv) ret_value = ecl_run.summary_block( ) self.assertTrue( ret_value is None ) ecl_run.runEclipse( ) ecl_sum = ecl_run.summary_block( ) self.assertTrue(isinstance(ecl_sum, EclSum))
def test_failed_run(self): with TestAreaContext("ecl_run") as ta: self.init_config() ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.ERROR.DATA")) argv = ["run_ecl100" , "2014.2" , "SPE1.ERROR"] ecl_run = EclRun(argv) with self.assertRaises(Exception): ecl_run.runEclipse( ) try: ecl_run.runEclipse( ) except Exception as e: self.assertTrue( "ERROR" in str(e) )
def test_run(self): with TestAreaContext("ecl_run") as ta: self.init_config() ta.copy_file( os.path.join(self.SOURCE_ROOT , "test-data/local/eclipse/SPE1.DATA")) os.makedirs("ecl_run") shutil.move("SPE1.DATA" , "ecl_run") argv = ["run_ecl100" , "2014.2" , "ecl_run/SPE1.DATA"] ecl_run = EclRun(argv) ecl_run.runEclipse( ) self.assertTrue( os.path.isfile( os.path.join( ecl_run.runPath() , "%s.stderr" % ecl_run.baseName()))) self.assertTrue( os.path.isfile( os.path.join( ecl_run.runPath() , "%s.LOG" % ecl_run.baseName()))) self.assertTrue( os.path.isfile( os.path.join( ecl_run.runPath() , "%s.OK" % ecl_run.baseName()))) errors = ecl_run.parseErrors( ) self.assertEqual( 0 , len(errors )) # Monkey patching the ecl_run to use an executable which # will fail with exit(1); don't think Eclipse actually # fails with exit(1) - but let us at least be prepared # when/if it does. ecl_run.sim.executable = os.path.join( self.SOURCE_ROOT , "tests/classes/ecl_run_fail") with self.assertRaises(Exception): ecl_run.runEclipse( )
def test_summary_block(self): self.init_eclrun_config() shutil.copy( os.path.join(self.SOURCE_ROOT, "test-data/local/eclipse/SPE1.DATA"), "SPE1.DATA", ) ecl_config = Ecl100Config() ecl_run = EclRun("SPE1.DATA", None) ret_value = ecl_run.summary_block() self.assertTrue(ret_value is None) ecl_run.runEclipse(eclrun_config=EclrunConfig(ecl_config, "2019.3")) ecl_sum = ecl_run.summary_block() self.assertTrue(isinstance(ecl_sum, EclSum))
def test_create(self): # This test can make do with a mock simulator; - just something executable with TestAreaContext("ecl_run"): self.init_config() conf = {"simulators" : {"ecl100" : {"2014.2" : {"scalar": {"executable" : "bin/scalar_exe"}, "mpi" : {"executable" : "bin/mpi_exe", "mpirun" : "bin/mpirun"}}}}} with open("ecl_config.yml","w") as f: f.write( yaml.dump(conf) ) os.mkdir("bin") os.environ["ECL_SITE_CONFIG"] = "ecl_config.yml" for f in ["scalar_exe", "mpi_exe", "mpirun"]: fname = os.path.join("bin", f) with open( fname, "w") as fh: fh.write("This is an exectable ...") os.chmod(fname, stat.S_IEXEC) # Wrong arg count with self.assertRaises(ValueError): argv = [] ecl_run = EclRun(argv) # Wrong arg count with self.assertRaises(ValueError): argv = [1,2,3,4,5] ecl_run = EclRun(argv) with open("ECLIPSE.DATA" , "w") as f: f.write("Mock eclipse data file") #Unknown simulator in argv[0] with self.assertRaises(ValueError): argv = ["Simulator" , "2014.2" , "ECLIPSE.DATA"] ecl_run = EclRun(argv) ecl_run = EclRun(["run_ecl100" , "2014.2" , "ECLIPSE.DATA"]) self.assertEqual( ecl_run.runPath() , os.getcwd()) os.mkdir("path") with open("path/ECLIPSE.DATA" , "w") as f: f.write("Mock eclipse data file") ecl_run = EclRun(["run_ecl100" , "2014.2" , "path/ECLIPSE.DATA"]) self.assertEqual( ecl_run.runPath() , os.path.join(os.getcwd() , "path")) self.assertEqual( ecl_run.baseName() , "ECLIPSE") argv = ["run_ecl100" , "2014.2" , "ECLIPSE.DATA"] ecl_run = EclRun(argv) self.assertEqual( 1 , ecl_run.numCpu()) # invalid number of CPU with self.assertRaises(ValueError): argv = ["run_ecl100" , "2014.2" , "ECLIPSE.DATA" , "xx"] ecl_run = EclRun(argv) argv = ["run_ecl100" , "2014.2" , "ECLIPSE.DATA" , "10"] ecl_run = EclRun(argv) self.assertEqual( 10 , ecl_run.numCpu()) argv = ["run_ecl100" , "MISSING_VERSION" , "ECLIPSE.DATA" , "10"] with self.assertRaises(KeyError): ecl_run = EclRun(argv) #Missing datafile with self.assertRaises(IOError): argv = ["run_ecl100" , "2014.2" , "ECLIPSE_DOES_NOT_EXIST.DATA"] ecl_run = EclRun(argv)