def test_formatting(): """Test that compressed output is only 79 characters wide""" numbers = " ".join([str(number) for number in np.random.rand(1, 100)[0]]) filelines = ["PORO", numbers, "/"] formatted = compress_multiple_keywordsets(find_keyword_sets(filelines), filelines) assert max([len(line) for line in formatted]) <= 79 # But, some keywords will not tolerate random # newlines in their data-section, at least the multi-record keywords. # So we should never wrap a line with a slash in it: filelines = ["VFPPROD", " FOO" * 30 + " /"] # If this is fed through eclcompress, it will be wrapped due to its # length: formatted = compress_multiple_keywordsets(find_keyword_sets(filelines), filelines) assert len(formatted) > 2
def test_eclcompress(): """Test a given set of lines, and ensure that the output can be parsed by opm.io""" kwsets = find_keyword_sets(FILELINES) compressed = compress_multiple_keywordsets(kwsets, FILELINES) compressedstr = "\n".join(compressed) assert opm.io.Parser().parse_string(compressedstr, OPMIO_PARSECONTEXT)
def test_grid_grdecl(): """A typical grid.grdecl file must be able to do compression on the COORDS/ZCORN keywords, while conserving the other two""" filelines = """ SPECGRID 214 669 49 1 F / GDORIENT INC INC INC DOWN RIGHT / ZCORN 1 1 1 1 1 1 / """.split( "\n" ) kwsets = find_keyword_sets(filelines) assert ( compress_multiple_keywordsets(kwsets, filelines) == """ SPECGRID 214 669 49 1 F / GDORIENT INC INC INC DOWN RIGHT / ZCORN 6*1 / """.split( "\n" ) )
def test_multiplerecords(): """Test compression on keywords with multiple records, for which eclcompress only supports compressing the first records Conservation of the remainder of the keyword is critical to test. """ filelines = [ "EQUALS", " MULTZ 0.017101 1 40 1 64 5 5 / nasty comment without comment characters", "/", ] kwsets = find_keyword_sets(filelines) assert compress_multiple_keywordsets(kwsets, filelines) == [ "EQUALS", " MULTZ 0.017101 1 40 1 64 2*5 / nasty comment without comment characters", "/", ] filelines = [ "EQUALS", "1 1 / nasty comment/", "2 2 / foo", "3 3 /", "/", "PERMX", "1 1 /", ] kwsets = find_keyword_sets(filelines) assert compress_multiple_keywordsets(kwsets, filelines) == [ "EQUALS", " 2*1 / nasty comment/", # (only compressing first record) "2 2 / foo", "3 3 /", "/", "PERMX", " 2*1 /", ] filelines = ["EQUALS", "1 1//", "2 2 / foo", "/"] kwsets = find_keyword_sets(filelines) assert compress_multiple_keywordsets(kwsets, filelines) == [ "EQUALS", " 2*1 //", "2 2 / foo", "/", ]
def test_eclcompress(): """Test a given set of lines, and ensure that the output can be parsed by opm.io""" kwsets = find_keyword_sets(FILELINES) compressed = compress_multiple_keywordsets(kwsets, FILELINES) compressedstr = "\n".join(compressed) # Feed the compressed string into opm.io. OPM hopefully chokes on whatever # Eclipse would choke on (and hopefully not on more..) parsecontext = opm.io.ParseContext( [("PARSE_MISSING_DIMS_KEYWORD", opm.io.action.ignore)] ) assert opm.io.Parser().parse_string(compressedstr, parsecontext)
def test_find_keyword_sets(): """Check the indexing of list of strings into Eclipse keywords""" assert find_keyword_sets(["PORO", "0 1 2 3", "4 5 6", "/"]) == [(0, 3)] # Missing slash, then nothing found: assert find_keyword_sets(["PORO", "0 1 2 3", "4 5 6"]) == [] # MORE!! # Keyword with no data, will be found, but untouched by compression kw_nodata = ["PORO", "/"] kw_sets = find_keyword_sets(kw_nodata) assert kw_sets == [(0, 1)] assert compress_multiple_keywordsets(kw_sets, kw_nodata) == kw_nodata
def test_whitespace(tmpdir): """Ensure excessive whitespace is not added""" kw_string = """ MULTIPLY 'PORO' 2 / /""" filelines = kw_string.splitlines() assert (compress_multiple_keywordsets(find_keyword_sets(filelines), filelines) == filelines) # Test the same when the string is read from a file: tmpdir.chdir() Path("test.inc").write_text(kw_string) eclcompress("test.inc") compressed_lines = Path("test.inc").read_text().splitlines() # The compressed output should have only two header lines added and one # empty lines after the header added: assert len(compressed_lines) == len(filelines) + 3
def test_eclkw_regexp(tmpdir): """Test that custom regular expressions can be supplied to compress otherwise unknown (which implies no compression) keywords""" tmpdir.chdir() uncompressed_str = "G1\n0 0 0 0 0 0 0 0 0 0 0 0 0\n/" # Nothing is found by default here. assert not find_keyword_sets(uncompressed_str.split()) # Only if we specify a regexp catching this odd keyword name: kw_sets = find_keyword_sets(uncompressed_str.split(), eclkw_regexp="G1") kwend_idx = len(uncompressed_str.split()) - 1 assert kw_sets == [(0, kwend_idx)] assert compress_multiple_keywordsets(kw_sets, uncompressed_str.split()) == [ "G1", " 13*0", "/", ] with open("g1.grdecl", "w") as f_handle: f_handle.write(uncompressed_str) # Alternative regexpes that should also work with this G1: kw_sets = find_keyword_sets( uncompressed_str.split(), eclkw_regexp="[A-Z]{1-8}$" ) == [(0, kwend_idx)] kw_sets = find_keyword_sets( uncompressed_str.split(), eclkw_regexp="[A-Z0-9]{2-8}$" ) == [(0, kwend_idx)] sys.argv = ["eclcompress", "g1.grdecl", "--eclkw_regexp", "G1"] main() compressed = open("g1.grdecl").read() assert "File compressed with eclcompress" in compressed assert "13*0" in compressed
def test_eclkw_regexp(tmp_path, mocker): """Test that custom regular expressions can be supplied to compress otherwise unknown (which implies no compression) keywords""" os.chdir(tmp_path) uncompressed_str = "G1\n0 0 0 0 0 0 0 0 0 0 0 0 0\n/" # Nothing is found by default here. assert not find_keyword_sets(uncompressed_str.split()) # Only if we specify a regexp catching this odd keyword name: kw_sets = find_keyword_sets(uncompressed_str.split(), eclkw_regexp="G1") kwend_idx = len(uncompressed_str.split()) - 1 assert kw_sets == [(0, kwend_idx)] assert compress_multiple_keywordsets(kw_sets, uncompressed_str.split()) == [ "G1", " 13*0", "/", ] Path("g1.grdecl").write_text(uncompressed_str, encoding="utf8") # Alternative regexpes that should also work with this G1: kw_sets = find_keyword_sets( uncompressed_str.split(), eclkw_regexp="[A-Z]{1-8}$" ) == [(0, kwend_idx)] kw_sets = find_keyword_sets( uncompressed_str.split(), eclkw_regexp="[A-Z0-9]{2-8}$" ) == [(0, kwend_idx)] mocker.patch("sys.argv", ["eclcompress", "g1.grdecl", "--eclkw_regexp", "G1"]) main() compressed = Path("g1.grdecl").read_text(encoding="utf8") assert "File compressed with eclcompress" in compressed assert "13*0" in compressed
def test_include_statement(): """A file with an INCLUDE statement has been tricky not to destroy while compressing""" filelines = ["INCLUDE", " '../include/grid/grid.grdecl' /"] kwsets = find_keyword_sets(filelines) assert compress_multiple_keywordsets(kwsets, filelines) == filelines
def test_compress_multiple_keywordsets(): """Test compression of sample lines""" filelines = ["PORO", "0 0 0 3", "4 5 6", "/ postslashcomment"] kwsets = find_keyword_sets(filelines) assert compress_multiple_keywordsets(kwsets, filelines) == [ "PORO", " 3*0 3 4 5 6", "/ postslashcomment", ] filelines = ["PORO", "0 0 0 3", "4 5 6", "/"] kwsets = find_keyword_sets(filelines) assert compress_multiple_keywordsets(kwsets, filelines) == [ "PORO", " 3*0 3 4 5 6", "/", ] filelines = ["PORO", "0 0 0 3", "4 5 6 /"] kwsets = find_keyword_sets(filelines) assert compress_multiple_keywordsets(kwsets, filelines) == [ "PORO", " 3*0 3 4 5 6 /", ] filelines = ["PORO", "0 0 0 3", "4 5 6 / postslashcomment"] kwsets = find_keyword_sets(filelines) assert compress_multiple_keywordsets(kwsets, filelines) == [ "PORO", " 3*0 3 4 5 6 / postslashcomment", ] filelines = ["PORO", "0 0 0 3 4 5 6 / postslashcomment"] kwsets = find_keyword_sets(filelines) assert compress_multiple_keywordsets(kwsets, filelines) == [ "PORO", " 3*0 3 4 5 6 / postslashcomment", ] filelines = ["PORO", "0 0 /", "PERMX", "1 1 /"] kwsets = find_keyword_sets(filelines) assert compress_multiple_keywordsets(kwsets, filelines) == [ "PORO", " 2*0 /", "PERMX", " 2*1 /", ] filelines = ["PORO", "0 0 /", "", "PERMX", "1 1 /"] kwsets = find_keyword_sets(filelines) assert compress_multiple_keywordsets(kwsets, filelines) == [ "PORO", " 2*0 /", "", "PERMX", " 2*1 /", ] filelines = ["-- comment", "PORO", "0 0", "/"] kwsets = find_keyword_sets(filelines) assert compress_multiple_keywordsets(kwsets, filelines) == [ "-- comment", "PORO", " 2*0", "/", ] filelines = ["-- nastycomment with / slashes", "PORO", "0 0", "/"] kwsets = find_keyword_sets(filelines) assert compress_multiple_keywordsets(kwsets, filelines) == [ "-- nastycomment with / slashes", "PORO", " 2*0", "/", ]