def test_LLINEAR(self): sum = EclSum( self.createTestPath( "Equinor/ECLIPSE/Heidrun/LGRISSUE/EM-LTAA-ISEG_CARFIN_NWPROPS" ) ) self.assertTrue(sum.has_key("LLINEARS"))
def test_invalid(self): case = create_case() with TestAreaContext("sum_invalid"): case.fwrite( ) with open("CASE.txt", "w") as f: f.write("No - this is not EclKW file ....") with self.assertRaises( IOError ): case2 = EclSum.load( "CSV.SMSPEC" , "CASE.txt" ) with self.assertRaises( IOError ): case2 = EclSum.load( "CASE.txt" , "CSV.UNSMRY" ) kw1 = EclKW("TEST1", 30, EclDataType.ECL_INT) kw2 = EclKW("TEST2", 30, EclDataType.ECL_INT) with openFortIO( "CASE.KW" , FortIO.WRITE_MODE) as f: kw1.fwrite( f ) kw2.fwrite( f ) with self.assertRaises( IOError ): case2 = EclSum.load( "CSV.SMSPEC" , "CASE.KW") with self.assertRaises( IOError ): case2 = EclSum.load( "CASE.KW" , "CSV.UNSMRY" )
def test_identify_var_type(self): self.assertEnumIsFullyDefined( EclSumVarType , "ecl_smspec_var_type" , "lib/include/ert/ecl/smspec_node.h") self.assertEqual( EclSum.varType( "WWCT:OP_X") , EclSumVarType.ECL_SMSPEC_WELL_VAR ) self.assertEqual( EclSum.varType( "RPR") , EclSumVarType.ECL_SMSPEC_REGION_VAR ) self.assertEqual( EclSum.varType( "WNEWTON") , EclSumVarType.ECL_SMSPEC_MISC_VAR ) self.assertEqual( EclSum.varType( "AARQ:4") , EclSumVarType.ECL_SMSPEC_AQUIFER_VAR ) case = createEclSum("CSV" , [("FOPT", None , 0, "SM3") , ("FOPR" , None , 0, "SM3/DAY"), ("AARQ" , None , 10, "???"), ("RGPT" , None ,1, "SM3")]) node1 = case.smspec_node( "FOPT" ) self.assertEqual( node1.varType( ) , EclSumVarType.ECL_SMSPEC_FIELD_VAR ) node2 = case.smspec_node( "AARQ:10" ) self.assertEqual( node2.varType( ) , EclSumVarType.ECL_SMSPEC_AQUIFER_VAR ) self.assertEqual( node2.getNum( ) , 10 ) node3 = case.smspec_node("RGPT:1") self.assertEqual( node3.varType( ) , EclSumVarType.ECL_SMSPEC_REGION_VAR ) self.assertEqual( node3.getNum( ) , 1 ) self.assertTrue( node3.isTotal( )) self.assertLess( node1, node3 ) self.assertGreater( node2, node3 ) self.assertEqual( node1, node1 ) self.assertNotEqual( node1, node2 ) with self.assertRaises(TypeError): a = node1 < 1
def test_identify_var_type(self): self.assertEnumIsFullyDefined(EclSumVarType, "ecl_smspec_var_type", "lib/include/ert/ecl/smspec_node.h") self.assertEqual(EclSum.varType("WWCT:OP_X"), EclSumVarType.ECL_SMSPEC_WELL_VAR) self.assertEqual(EclSum.varType("RPR"), EclSumVarType.ECL_SMSPEC_REGION_VAR) self.assertEqual(EclSum.varType("WNEWTON"), EclSumVarType.ECL_SMSPEC_MISC_VAR) self.assertEqual(EclSum.varType("AARQ:4"), EclSumVarType.ECL_SMSPEC_AQUIFER_VAR) case = createEclSum("CSV", [("FOPT", None, 0), ("FOPR", None, 0), ("AARQ", None, 10), ("RGPT", None, 1)]) node1 = case.smspec_node("FOPT") self.assertEqual(node1.varType(), EclSumVarType.ECL_SMSPEC_FIELD_VAR) node2 = case.smspec_node("AARQ:10") self.assertEqual(node2.varType(), EclSumVarType.ECL_SMSPEC_AQUIFER_VAR) self.assertEqual(node2.getNum(), 10) node3 = case.smspec_node("RGPT:1") self.assertEqual(node3.varType(), EclSumVarType.ECL_SMSPEC_REGION_VAR) self.assertEqual(node3.getNum(), 1) self.assertTrue(node3.isTotal()) self.assertLess(node1, node3) self.assertGreater(node2, node3) self.assertEqual(node1, node1) self.assertNotEqual(node1, node2) with self.assertRaises(TypeError): a = node1 < 1
def test_file_io(self): with TestAreaContext('test_file_io'): ecl_sum = nex2ecl(self.plt, 'ECL_CASE', format=False, field_name='FIELD') ecl_sum.fwrite() self.assertTrue( os.path.exists(os.path.join(os.getcwd(), 'ECL_CASE.SMSPEC'))) ecl_sum_loaded = EclSum('ECL_CASE') self.assertEqual(len(ecl_sum), len(ecl_sum_loaded)) self.assertIn('WGPT:2', ecl_sum_loaded) WGPT2 = list(ecl_sum_loaded.get_values('WGPT:2')) CGP = self.plt.loc[(self.plt['classname'] == 'WELL') & (self.plt['instancename'] == '2') & (self.plt['varname'] == 'CGP')]['value'].tolist() self.assertEqual(WGPT2, CGP) dates_loaded = ecl_sum_loaded.dates dates_plt = [ self.plt.start_date + datetime.timedelta(days=days) for days in self.plt.time.unique() ] self.assertEqual(dates_loaded, dates_plt)
def create_result(name, keys, indicator): summary = EclSum('model_folder/%s.DATA' % name) dates = summary.dates results = [] all_keys = [] if keys is None: keys = ["WOPR:*"] for key in keys: key_all_wells = summary.keys(key) all_keys = all_keys + list(key_all_wells) for key in all_keys: results.append(list(summary.numpy_vector(key))) if len(results) == 0: return print( 'Результаты из модели не загрузились. Файл с результатами не был создан' ) result_df = pd.DataFrame(data=np.array(results).T, index=dates, columns=all_keys) result_df.index.name = 'Time' if indicator is not None: if os.path.exists(f'csv_folder/{indicator}') is False: os.mkdir(f'csv_folder/{indicator}') result_df.to_csv(f'csv_folder/{indicator}/%s.csv' % name) else: result_df.to_csv('csv_folder/%s.csv' % name) print('%s_RESULT.csv is created' % name)
def test_segment(self): sum = EclSum(self.createTestPath("Statoil/ECLIPSE/Oseberg/F8MLT/F8MLT-F4")) segment_vars = sum.keys("SOFR:F-8:*") self.assertIn("SOFR:F-8:1", segment_vars) for var in segment_vars: tmp = var.split(":") nr = int(tmp[2]) self.assertTrue(nr >= 0)
def test_segment(self): sum = EclSum(self.createTestPath("Equinor/ECLIPSE/Oseberg/F8MLT/F8MLT-F4")) segment_vars = sum.keys("SOFR:F-8:*") self.assertIn("SOFR:F-8:1", segment_vars) for var in segment_vars: tmp = var.split(":") nr = int(tmp[2]) self.assertTrue(nr >= 0)
def _sync_eclsum_to_record(location: Path, smry_keys: List[str]) -> Dict[str, NumericalRecord]: eclsum = EclSum(str(location)) record_dict = {} for key in smry_keys: record_dict[key] = NumericalRecord(data=dict( zip(map(str, eclsum.dates), map(float, eclsum.numpy_vector(key))))) return record_dict
def test_load_case(self): path = os.path.join(self.TESTDATA_ROOT, "local/ECLIPSE/cp_simple3/SIMPLE_SUMMARY3") case = EclSum( path ) self.assertFloatEqual(case.sim_length, 545.0) fopr = case.numpy_vector("FOPR") for time_index,value in enumerate(fopr): self.assertEqual(fopr[time_index], value)
def test_restart(self): hist = EclSum(self.createTestPath("Equinor/ECLIPSE/sum-restart/history/T07-4A-W2011-18-P1")) base = EclSum(self.createTestPath("Equinor/ECLIPSE/sum-restart/prediction/BASECASE")) pred = EclSum(self.createTestPath("Equinor/ECLIPSE/sum-restart/prediction/BASECASE"), include_restart=False) self.assertIsNotNone(hist) self.assertIsNotNone(base) self.assertIsNotNone(pred)
def test_ix_caseII(self): troll_summary = EclSum( self.createTestPath( "Equinor/ECLIPSE/ix/troll/IX_NOPH3_R04_75X75X1_GRID2.SMSPEC" ) ) self.assertIsNotNone(troll_summary) self.assertTrue("WMCTL:Q21BH1" in list(troll_summary.keys()))
def test_summary_collector(): res_config = ResConfig("snake_oil.ert") ert = EnKFMain(res_config) summary = EclSum("refcase/SNAKE_OIL_FIELD.UNSMRY") data = SummaryObservationCollector.loadObservationData(ert, "default_0") assert (data["FOPR"].values.tolist() == summary.numpy_vector( "FOPRH", report_only=True).tolist())
def _load_smry_into_table(smry_filename: str) -> pa.Table: """ Reads data from SMRY file into PyArrow Table. DATE column is stored as an Arrow timetamp with ms resolution, timestamp[ms] All numeric columns will be stored as 32 bit float Summary meta data will be attached per field/column of the table's schema under the 'smry_meta' key """ eclsum = EclSum(smry_filename, include_restart=False, lazy_load=False) # For now, we go via a set to prune out duplicate entries being returned by EclSumKeyWordVector, # see: https://github.com/equinor/ecl/issues/816#issuecomment-865881283 column_names: List[str] = list( set(EclSumKeyWordVector(eclsum, add_keywords=True))) # Exclude CPI columns from export org_col_count = len(column_names) column_names = [ colname for colname in column_names if not _is_cpi_column(colname) ] if len(column_names) != org_col_count: logger.info( f"Excluding {org_col_count - len(column_names)} CPI columns from export" ) # Fetch the dates as a numpy array with ms resolution np_dates_ms = eclsum.numpy_dates smry_meta_dict = _create_smry_meta_dict(eclsum, column_names) # Datatypes to use for DATE column and all the numeric columns dt_timestamp_ms = pa.timestamp("ms") dt_float32 = pa.float32() # Build schema for the table field_list: List[pa.Field] = [] field_list.append(pa.field("DATE", dt_timestamp_ms)) for colname in column_names: field_metadata = {b"smry_meta": json.dumps(smry_meta_dict[colname])} field_list.append( pa.field(colname, dt_float32, metadata=field_metadata)) schema = pa.schema(field_list) # Now extract all the summary vectors one by one # We do this through EclSum.numpy_vector() instead of EclSum.pandas_frame() since # the latter throws an exception if the SMRY data has timestamps beyond 2262, # see: https://github.com/equinor/ecl/issues/802 column_arrays = [np_dates_ms] for colname in column_names: colvector = eclsum.numpy_vector(colname) column_arrays.append(colvector) table = pa.table(column_arrays, schema=schema) return table
def test_load_case_lazy_and_eager(self): path = os.path.join(self.TESTDATA_ROOT, "local/ECLIPSE/cp_simple3/SHORT.UNSMRY") lazy_dates = EclSum(path, lazy_load=True).numpy_dates eager_dates = EclSum(path, lazy_load=False).numpy_dates self.assertEqual(len(lazy_dates), 107) self.assertEqual(len(eager_dates), 107) for l, e in zip(lazy_dates, eager_dates): self.assertEqual(l, e)
def test_Heidrun(self): sum = EclSum( self.createTestPath("Statoil/ECLIPSE/Heidrun/Summary/FF12_2013B3_CLEAN_RS")) self.assertEqual( 452 , len(sum)) self.assertFloatEqual( 1.8533144e+8 , sum.last_value("FOPT")) trange = sum.timeRange( start = datetime.date( 2015 , 1 , 1), interval = "1M") self.assertTrue( trange[0] == datetime.date( 2016 , 2 , 1 )) for t in trange: sum.get_interp( "FOPT" , date = t )
class EclCase(object): def __init__(self, case): self.case = case self.grid = None self.restart = None self.init = None self.summary = None self.loadSummary() def __contains__(self, key): return key in self.summary def keys(self): return self.summary.keys() def wells(self): return self.summary.wells() def load_summary(self): self.summary = EclSum(self.case) def start_time_equal(self, other): if self.summary.getDataStartTime() == other.summary.getDataStartTime(): return True else: return False def end_time_equal(self, other): if self.summary.getEndTime() == other.summary.getEndTime(): return True else: return False def cmp_summary_vector(self, other, key, sample=100): if key in self and key in other: days_total = min(self.summary.getSimulationLength(), other.summary.getSimulationLength()) dt = days_total / (sample - 1) days = [ x * dt for x in range(sample) ] ref_data = self.summary.get_interp_vector(key, days_list=days) test_data = other.summary.get_interp_vector(key, days_list=days) diff_data = ref_data - test_data ref_sum = sum(ref_data) diff_sum = sum(abs(diff_data)) return (diff_sum, ref_sum) else: raise KeyError("Key:%s was not present in both cases" % key)
def directSimulation(self,controls,outputDir='DIRECT'): """Executes a Direct Simulation (no additional algorithm) and saves info to a .mat file. Args: controls (dict): Schedule controls. outputDir: Folder to save data (default: 'DIRECT') """ fIn = 'BASE/{}UNI.DATA'.format(self.caseName) # fOut = 'UNI{}.DATA'.format(self.caseName) # File settings _x = np.shape(controls['prod'])[0] # StormEclSupport.schedule('UNISCHEDULE.INC','incremental',self.wells,controls,90,_x,('BHP','BHP')) # Generate Files StormEclSupport.configureEclFiles(fIn,fOut,schName='UNISCHEDULE.INC') # tmp = self.dumpName self.dumpName = outputDir # Change Flow's dump directory... self.execFlow(fOut) # ... execute Flow... self.dumpName = tmp # ... and retrieve the original info dump = '{}/UNI{}.UNSMRY'.format(outputDir,self.caseName) # self.curData = EclSum(dump) # Load resultant summary file t = self.curData.get_days() # Time array _n = len(self.wells['prod'] + self.wells['inje']) # _n: number of wells self.rates['wopr'] = [[] for i in range(_n)] # Rate initializations self.rates['woir'] = [[] for i in range(_n)] self.rates['wwpr'] = [[] for i in range(_n)] self.rates['wwir'] = [[] for i in range(_n)] self.rates['wgpr'] = [[] for i in range(_n)] self.rates['wgir'] = [[] for i in range(_n)] for well in self.curData.wells(): _j = self.wellIndex(well) wopr,woir,wwpr,wwir,wgpr,wgir = self.genVectors(well) # Get rates for each well self.rates['wopr'][_j].extend(list(wopr)) self.rates['woir'][_j].extend(list(woir)) self.rates['wwpr'][_j].extend(list(wwpr)) self.rates['wwir'][_j].extend(list(wwir)) self.rates['wgpr'][_j].extend(list(wgpr)) self.rates['wgir'][_j].extend(list(wgir)) s0 = self.s0 # (TODO: make initial saturation flexible) t0 = self.t0 eps = self.eps info, infoMod = self.calculateNPV(t0=t0,modified=False), self.calculateNPV(t0=t0,s0=s0,eps=eps,mnpv=True) # Calculate NPV and MNPV dataMat = { # Bundle data into a dictionary 'wellNPV' : info['wellNPVs'], 'wellMNPV' : infoMod['wellNPVs'], 'wellSat' : info['s'], 't' : np.array(t), 'NPV' : info['NPV'], 'MNPV' : infoMod['NPV'], 'wopr' : self.rates['wopr'], 'wwpr' : self.rates['wwpr'], 'wwir' : self.rates['wwir'] } sio.savemat('dumpUniData.mat',dataMat) # Save data into a .mat file
def test_stringlist_setitem(self): sum = EclSum(self.case) wells = sum.wells() wells[0] = "Bjarne" well0 = wells[0] self.assertTrue(well0 == "Bjarne") self.assertTrue(wells[0] == "Bjarne") wells[0] = "XXX" self.assertTrue(well0 == "Bjarne") self.assertTrue(wells[0] == "XXX")
def test_ix_case(self): intersect_summary = EclSum( self.createTestPath("Equinor/ECLIPSE/ix/summary/CREATE_REGION_AROUND_WELL") ) self.assertIsNotNone(intersect_summary) self.assertTrue( "HWELL_PROD" in [ intersect_summary.smspec_node(key).wgname for key in intersect_summary.keys() ] ) eclipse_summary = EclSum( self.createTestPath( "Equinor/ECLIPSE/ix/summary/ECL100/E100_CREATE_REGION_AROUND_WELL" ) ) self.assertIsNotNone(eclipse_summary) hwell_padder = ( lambda key: key if key.split(":")[-1] != "HWELL_PR" else key + "OD" ) self.assertEqual( intersect_summary.keys("WWCT*"), list(map(hwell_padder, eclipse_summary.keys("WWCT*"))), )
def _libecl_eclsum_pandas_frame( eclsum: EclSum, time_index: Optional[Union[List[dt.date], List[dt.datetime]]] = None, column_keys: Optional[List[str]] = None, ) -> pd.DataFrame: """Build a Pandas dataframe from an EclSum object. Temporarily copied from libecl to circumvent bug https://github.com/equinor/ecl/issues/802 """ if column_keys is None: keywords = EclSumKeyWordVector(eclsum, add_keywords=True) else: keywords = EclSumKeyWordVector(eclsum) for key in column_keys: keywords.add_keywords(key) if len(keywords) == 0: raise ValueError("No valid key") # pylint: disable=protected-access if time_index is None: time_index = eclsum.dates # Changed from libecl data = np.zeros([len(time_index), len(keywords)]) EclSum._init_pandas_frame( eclsum, keywords, data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) ) else: time_points = eclsum._make_time_vector(time_index) data = np.zeros([len(time_points), len(keywords)]) EclSum._init_pandas_frame_interp( eclsum, keywords, time_points, data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), ) # Do not give datetime64[ms] to Pandas, it will try to convert it # to datetime64[ns] and error hard if it is out of bounds (year 2262) assert isinstance(time_index[0], (dt.date, dt.datetime)) frame = pd.DataFrame( index=time_index, columns=list(keywords), data=data, ) # frame.index.type is now either datetime64[ns] or datetime.datetime (object) # depending on whether the date range ended before 2262. return frame
def loadSummary(self,fname): """Loads a summary file to the @curData attribute. Args: fname (str): Summary File Name Raises: StormBackendException: File not found. Returns: Nones. """ try: self.curData = EclSum(fname) # Creates an Eclipse Summary class for data handling except: raise StormBackendException('Eclipse Summary File "{}" not found.'.format(fname))
def test_different_names(self): case = create_case() with TestAreaContext("sum_different"): case.fwrite( ) shutil.move("CSV.SMSPEC" , "CSVX.SMSPEC") with self.assertRaises(IOError): case2 = EclSum.load( "Does/not/exist" , "CSV.UNSMRY") with self.assertRaises(IOError): case2 = EclSum.load( "CSVX.SMSPEC" , "CSVX.UNSMRY") case2 = EclSum.load( "CSVX.SMSPEC" , "CSV.UNSMRY" ) self.assert_solve( case2 ) self.assertEqual(case.unit("FOPR"), "SM3/DAY")
def test_run_default(self): with TestAreaContext(""): self.case.fwrite() # Too few arguments with self.assertRaises(CallError): subprocess.check_call([self.script]) # Too few arguments with self.assertRaises(CallError): subprocess.check_call([self.script, "CSV"]) # Invalid first arguments with self.assertRaises(CallError): subprocess.check_call([self.script, "DOES_NOT_EXIST", "OUTPUT"]) # Should run OK: subprocess.check_call([self.script, "CSV", "OUTPUT"]) output_case = EclSum("OUTPUT") self.assertEqual( output_case.get_data_start_time(), self.case.get_data_start_time()) self.assertEqual( output_case.get_end_time(), self.case.get_end_time()) with self.assertRaises(CallError): subprocess.check_call([self.script, "CSV", "OUTPUT", "--refcase=does/not/exist"]) refcase = create_case( num_mini_step = 7, case = "REFCASE") refcase.fwrite() subprocess.check_call([self.script, "CSV", "OUTPUT", "--refcase=REFCASE"]) output_case = EclSum("OUTPUT") self.assertEqual( output_case.get_data_start_time(), refcase.get_data_start_time()) self.assertEqual( output_case.get_end_time(), refcase.get_end_time()) time_points = output_case.alloc_time_vector(False) t1 = output_case.alloc_time_vector(False) t2 = refcase.alloc_time_vector(False) self.assertEqual(t1,t2)
def getReportStepTimeFromRefcase(self, refcase, report_step): if not report_step in self.report_times: self.report_times[report_step] = ( EclSum.cNamespace().get_report_time(refcase, report_step).ctime() ) return self.report_times[report_step]
def __init__( self, input_case: Union[Path, str], perforation_handling_strategy: str = "bottom_point", ): super().__init__() self._input_case: Path = Path(input_case) self._eclsum = EclSum(str(self._input_case)) self._grid = EclGrid(str(self._input_case.with_suffix(".EGRID"))) self._restart = EclFile(str(self._input_case.with_suffix(".UNRST"))) self._wells = WellInfo( self._grid, rst_file=self._restart, load_segment_information=True ) self._perforation_handling_strategy: str = perforation_handling_strategy
def __init__(self, argv): args = parse_args(argv) if not os.path.isfile(args.config_file): raise IOError("No such file:{}".format(args.config_file)) try: schema = _build_schema() defaults = {"stea_server": "https://ws2291.statoil.net"} with open(args.config_file, "r") as config_file: config_dict = yaml.safe_load(config_file) if args.ecl_case: config_dict["ecl_case"] = args.ecl_case config = configsuite.ConfigSuite( config_dict, schema, layers=(defaults, ), deduce_required=True, ) if not config.valid: raise ValueError( "Config file is not a valid config file: {}".format( config.errors)) self.config = config except Exception as ex: raise ValueError( "Could not load config file: {file}\nFull message: {ex}". format(file=args.config_file, ex=ex)) if self.ecl_case is not None: self.ecl_case = EclSum(self.ecl_case)
def __init__(self, argv): args = parse_args(argv) if not os.path.isfile(args.config_file): raise IOError("No such file:{}".format(args.config_file)) try: config = yaml.load(open(args.config_file)) except: raise ValueError("Could not load config file: {}".format( args.config_file)) if args.ecl_case: config[SteaInputKeys.ECL_CASE] = args.ecl_case self.config_date = parse_date(config[SteaInputKeys.CONFIG_DATE]) self.project_id = config[SteaInputKeys.PROJECT_ID] self.project_version = config[SteaInputKeys.PROJECT_VERSION] self.results = config[SteaInputKeys.RESULTS] self.server = config.get(SteaInputKeys.SERVER, stea_server) self.profiles = {} for profile_id, profile_data in config.get(SteaInputKeys.PROFILES, {}).items(): self.profiles[profile_id] = profile_data self.ecl_profiles = {} for profile_id, profile_data in config.get(SteaInputKeys.ECL_PROFILES, {}).items(): self.ecl_profiles[profile_id] = profile_data self.ecl_case = None if SteaInputKeys.ECL_CASE in config: self.ecl_case = EclSum(config[SteaInputKeys.ECL_CASE])
def test_convert_historical(self): plt = nex.load('test-data/SPE1.plt') hplt = pandas.DataFrame({ 'timestep': [0] * 5, 'time': [400, 800, 1200, 1600, 2000], 'classname': ['WELL'] * 5, 'instancename': ['1'] * 5, 'varname': ['QOP'] * 5, 'value': [100, 200, 300, 400, 500], }) hplt.start_date = plt.start_date hplt.nx = plt.nx hplt.ny = plt.ny hplt.nz = plt.nz hplt._unit_system = plt._unit_system with TestAreaContext('nex_test_convert_historical'): ecl_sum = nex._nex2ecl.nex2ecl(plt, 'SPE1', field_name='FIELD', hist_data=hplt) ecl_sum.fwrite() loaded = EclSum('SPE1') woprh = [n.value for n in list(loaded['WOPRH:1'])] qop = hplt.value.tolist() self.assertEqual(woprh[1:5], qop[1:])
def test_get_weighted_avg_press_time_derivative_lag2(): """Test that weighted_avg_press_time_derivative_lag2 is calcuated correctly""" summary = EclSum(DATAFILEPATH) wbhp = welltest_dpds.summary_vec(summary, "WBHP:55_33-1") rate = welltest_dpds.summary_vec(summary, "WOPR:55_33-1") time = np.array(summary.days) * 24.0 bu_start, bu_end = welltest_dpds.get_buildup_indices(rate) supertime = welltest_dpds.supertime(time, rate, bu_start[0], bu_end[0]) d_press = np.diff(wbhp[bu_start[0] + 1:bu_end[0] + 1]) dspt = np.diff(supertime) dpdspt_w_lag2 = welltest_dpds.weighted_avg_press_time_derivative_lag2( d_press, dspt, supertime, wbhp, bu_start[0], bu_end[0], ) print(len(dpdspt_w_lag2)) print(dpdspt_w_lag2) assert len(dpdspt_w_lag2) == 247 assert dpdspt_w_lag2[0] == pytest.approx(0.43083638) assert dpdspt_w_lag2[-1] == pytest.approx(0.12729989)
class EclSumVectorTest(EclTest): def setUp(self): self.test_file = self.createTestPath("Statoil/ECLIPSE/Gurbat/ECLIPSE.SMSPEC") self.ecl_sum = EclSum(self.test_file) def test_reportOnly_warns(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") vector = EclSumVector(self.ecl_sum, "FOPT", True) assert len(w) == 1 assert issubclass(w[-1].category, DeprecationWarning) def test_basic(self): self.assertEqual(512, len(self.ecl_sum.keys())) pfx = 'EclSum(name' self.assertEqual(pfx, repr(self.ecl_sum)[:len(pfx)]) it = iter(self.ecl_sum) #t = self.ecl_sum[it.next()] # EclSumVector t = self.ecl_sum[next(it)] # EclSumVector self.assertEqual(63, len(t)) self.assertEqual('BARSA', t.unit) pfx = 'EclSumVector(key = ' self.assertEqual(pfx, repr(t)[:len(pfx)])
def test_create(self): ensemble_config = EnsembleConfig() obs = EnkfObs(ensemble_config) self.assertEqual(len(obs), 0) self.assertFalse(obs.valid) with self.assertRaises(ValueError): obs.load(self.obs_config) self.assertEqual(len(obs), 0) time_map = TimeMap() obs = EnkfObs(ensemble_config, external_time_map=time_map) self.assertEqual(len(obs), 0) grid = EclGrid(self.grid) refcase = EclSum(self.refcase) history = History(refcase, False) obs = EnkfObs(ensemble_config, grid=grid, history=history) self.assertTrue(obs.valid) with self.assertRaises(IOError): obs.load("/does/not/exist") obs.load(self.obs_config) self.assertTrue(obs.valid) self.assertEqual(len(obs), 33) obs.clear() self.assertEqual(len(obs), 0) obs.load(self.obs_config) self.assertEqual(len(obs), 33) self.assertNotIn("RFT2", obs) obs.load(self.obs_config2) self.assertEqual(len(obs), 35) self.assertIn("RFT2", obs)
class EclSumVectorTest(EclTest): def setUp(self): self.test_file = self.createTestPath( "Equinor/ECLIPSE/Gurbat/ECLIPSE.SMSPEC") self.ecl_sum = EclSum(self.test_file) def test_reportOnly_warns(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") vector = EclSumVector(self.ecl_sum, "FOPT", True) self.assertEqual(len(w), 1) assert issubclass(w[-1].category, DeprecationWarning) def test_basic(self): self.assertEqual(512, len(self.ecl_sum.keys())) pfx = "EclSum(name" self.assertEqual(pfx, repr(self.ecl_sum)[:len(pfx)]) it = iter(self.ecl_sum) # t = self.ecl_sum[it.next()] # EclSumVector t = self.ecl_sum[next(it)] # EclSumVector self.assertEqual(63, len(t)) self.assertEqual("BARSA", t.unit) pfx = "EclSumVector(key = " self.assertEqual(pfx, repr(t)[:len(pfx)])
def __init__(self, argv): args = parse_args(argv) if not os.path.isfile(args.config_file): raise IOError("No such file:{}".format(args.config_file)) try: schema = _build_schema() defaults = { 'stea_server': "https://ws2291.statoil.net", 'profiles': {} } config = configsuite.ConfigSuite(yaml.load(open(args.config_file)), schema, layers=(defaults, )) if not config.valid: raise ValueError( 'Config file is not a vadid config file: {}'.format( config.errors)) self.config = config except Exception as ex: raise ValueError( "Could not load config file: {file}\nFull message: {ex}". format(file=args.config_file, ex=ex)) if args.ecl_case: ecl_case_data = {'ecl_case': args.ecl_case} self.config = self.config.push(ecl_case_data) if self.ecl_case is not None: self.ecl_case = EclSum(self.ecl_case)
def test_write_not_implemented(self): path = os.path.join(self.TESTDATA_ROOT, "local/ECLIPSE/cp_simple3/SIMPLE_SUMMARY3") case = EclSum(path, lazy_load=True) self.assertFalse(case.can_write()) with self.assertRaises(NotImplementedError): case.fwrite()
def test_rates(self): grid_path = self.createTestPath("Statoil/ECLIPSE/Gurbat/ECLIPSE.EGRID") rst_path = self.createTestPath("Statoil/ECLIPSE/Gurbat/ECLIPSE.UNRST") sum_path = self.createTestPath("Statoil/ECLIPSE/Gurbat/ECLIPSE.SMSPEC") grid = EclGrid(grid_path) well_info = WellInfo(grid, rst_path) sum = EclSum(sum_path) for wtl in well_info: for well_state in wtl: # print "%03d %g %g " % (R , well_state.oilRate(), sum.get_from_report( "WOPR:%s" % well , R)) if wtl.getName() == "OP_4": pass # print well_state.oilRate(), well_state.waterRate(), well_state.gasRate(), well_state.volumeRate() # print well_state.oilRateSI(), well_state.waterRateSI(), well_state.gasRateSI(), well_state.volumeRateSI() self.assertEqual(well_state.oilRate(), well_state.oilRateSI()) self.assertEqual(well_state.waterRate(), well_state.waterRateSI()) self.assertEqual(well_state.gasRate(), well_state.gasRateSI()) self.assertEqual(well_state.volumeRate(), well_state.volumeRateSI()) # print sum.get_from_report("WOPR:%s" % wtl.getName(), 1) # print sum.get_from_report( "WWPR:%s" % wtl.getName(), 30 ) for conn in well_state.globalConnections(): # print conn.gasRate(), conn.waterRate(), conn.oilRate() # print conn.gasRateSI(), conn.waterRateSI(), conn.oilRateSI() self.assertEqual(conn.gasRate(), conn.gasRateSI()) self.assertEqual(conn.waterRate(), conn.waterRateSI()) self.assertEqual(conn.oilRate(), conn.oilRateSI()) self.assertEqual(conn.volumeRate(), conn.volumeRateSI())
def __init__( self, input_case: Union[Path, str], layers: Tuple = (), ): super().__init__() self._input_case: Path = Path(input_case) self._eclsum = EclSum(str(self._input_case)) self._init = EclFile(str(self._input_case.with_suffix(".INIT"))) self._grid = EclGrid(str(self._input_case.with_suffix(".EGRID"))) self._restart = EclFile(str(self._input_case.with_suffix(".UNRST"))) self._init = EclInitFile(self._grid, str(self._input_case.with_suffix(".INIT"))) self._wells = compdat.df(EclFiles(str(self._input_case))) self._layers = layers
def test_writer(self): writer = EclSum.writer("CASE", datetime.date(2000, 1, 1), 10, 10, 5) self.assertIsInstance(self.ecl_sum, EclSum) writer.addVariable("FOPT") self.assertTrue(writer.has_key("FOPT")) writer.addTStep(1, 100)
def __init__( self, input_case: Union[Path, str], perforation_handling_strategy: str = "bottom_point", ): super().__init__() self._input_case: Path = Path(input_case) self._eclsum = EclSum(str(self._input_case)) self._init = EclFile(str(self._input_case.with_suffix(".INIT"))) self._grid = EclGrid(str(self._input_case.with_suffix(".EGRID"))) self._restart = EclFile(str(self._input_case.with_suffix(".UNRST"))) self._init = EclInitFile(self._grid, str(self._input_case.with_suffix(".INIT"))) self._wells = compdat.df(EclFiles(str(self._input_case))) self._perforation_handling_strategy: str = perforation_handling_strategy
def test_writer(self): writer = EclSum.writer("CASE" , datetime.date( 2000 , 1 , 1) , 10 , 10 , 5) self.assertIsInstance(self.ecl_sum, EclSum) writer.addVariable( "FOPT" ) self.assertTrue( writer.has_key( "FOPT" )) writer.addTStep( 1 , 100 )
def test_labscale(self): case = self.createTestPath("Statoil/ECLIPSE/LabScale/HDMODEL") sum = EclSum(case, lazy_load=True) self.assertEqual(sum.getStartTime(), datetime.datetime(2013,1,1,0,0,0)) self.assertEqual(sum.getEndTime() , datetime.datetime(2013,1,1,19,30,0)) self.assertFloatEqual(sum.getSimulationLength(), 19.50) sum = EclSum(case, lazy_load=False) self.assertEqual(sum.getStartTime(), datetime.datetime(2013,1,1,0,0,0)) self.assertEqual(sum.getEndTime() , datetime.datetime(2013,1,1,19,30,0)) self.assertFloatEqual(sum.getSimulationLength(), 19.50)
def test_eval(self): npv = EclNPV(self.case) npv.compile("[FOPT]") npv1 = npv.evalNPV() npv2 = 0 sum = EclSum(self.case) trange = sum.timeRange() fopr = sum.blockedProduction("FOPT" , trange) for v in fopr: npv2 += v self.assertAlmostEqual( npv1 , npv2 ) npv.compile("[FOPT] - 0.5*[FOPT] - 0.5*[FOPT]") npv1 = npv.evalNPV() self.assertTrue( abs(npv1) < 1e-2 ) npv.compile("[WOPT:OP_1] - 0.5*[WOPT:OP_1] - 0.5*[WOPT:OP_1]") npv1 = npv.evalNPV() self.assertTrue( abs(npv1) < 1e-2 )
def test_total_and_rate(self): self.assertTrue( EclSum.is_total("FOPT")) self.assertTrue( EclSum.is_total("WWPT:OP_3")) self.assertFalse( EclSum.is_total("RPR:2")) self.assertTrue( EclSum.is_rate("WOPR:OP_4")) self.assertFalse( EclSum.is_rate("BPR:123")) self.assertTrue(EclSum.is_rate("FWIR"))
def test_restart_abs_path(self): with TestAreaContext("restart_test"): history = create_case(case = "HISTORY") history.fwrite() pred_path = "prediction" create_prediction(history, pred_path) pred = EclSum(os.path.join(pred_path, "PREDICTION")) # The restart case has a maximum length of 72 characters, depending # on the path used for $TMP and so on we do not really know here if # the restart_case has been set or not. if pred.restart_case: self.assertTrue(isinstance(pred.restart_case, EclSum)) self.assertEqual(pred.restart_case.case, os.path.join(os.getcwd(), history.case)) self.assertEqual(pred.restart_step, history.last_report) length = pred.sim_length pred_times = pred.alloc_time_vector(False) hist_times = history.alloc_time_vector(False) for index in range(len(hist_times)): self.assertEqual(hist_times[index], pred_times[index])
def test_restart_mapping(self): history = EclSum( self.createTestPath( "Statoil/ECLIPSE/SummaryRestart/iter-1/NOR-2013A_R007-0") ) total = EclSum( self.createTestPath( "Statoil/ECLIPSE/SummaryRestart/Prediction/NOR-2013A_R007_PRED-0") , include_restart = True) history_dates = history.get_dates( ) total_dates = total.get_dates( ) for i in range(len(history_dates)): self.assertEqual( history_dates[i] , total_dates[i] ) keys = history.keys( pattern = "W*") for key in keys: if key in total: self.assertEqual( history.iget( key , 5 ) , total.iget( key , 5 )) self.assertFalse( "WGPR:NOT_21_D" in history ) self.assertTrue( "WGPR:NOT_21_D" in total ) node = total.smspec_node("WGPR:NOT_21_D") self.assertEqual( total.iget( "WGPR:NOT_21_D", 5) , node.default)
def test_timeRange(self): sum = EclSum(self.case) with self.assertRaises(TypeError): trange = sum.timeRange(interval = "1") trange = sum.timeRange(interval = "1X") trange = sum.timeRange(interval = "YY") trange = sum.timeRange(interval = "MY") with self.assertRaises(ValueError): trange = sum.timeRange( start = datetime.datetime(2000,1,1) , end = datetime.datetime(1999,1,1) ) sim_start = datetime.datetime(2000, 1, 1, 0, 0, 0) sim_end = datetime.datetime(2004, 12, 31, 0, 0, 0) trange = sum.timeRange( interval = "1Y") self.assertTrue( trange[0] == datetime.date( 2000 , 1 , 1 )) self.assertTrue( trange[1] == datetime.date( 2001 , 1 , 1 )) self.assertTrue( trange[2] == datetime.date( 2002 , 1 , 1 )) self.assertTrue( trange[3] == datetime.date( 2003 , 1 , 1 )) self.assertTrue( trange[4] == datetime.date( 2004 , 1 , 1 )) self.assertTrue( trange[5] == datetime.date( 2005 , 1 , 1 )) trange = sum.timeRange( interval = "1M") self.assertTrue( trange[0] == datetime.date( 2000 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2005 , 1 , 1 )) trange = sum.timeRange( start = datetime.date( 2002 , 1 , 15), interval = "1M") self.assertTrue( trange[0] == datetime.date( 2002 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2005 , 1 , 1 )) trange = sum.timeRange( start = datetime.date( 2002 , 1 , 15) , end = datetime.date( 2003 , 1 , 15), interval = "1M") self.assertTrue( trange[0] == datetime.date( 2002 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2003 , 2 , 1 )) trange = sum.timeRange( start = datetime.date( 2002 , 1 , 15) , end = datetime.datetime( 2003 , 1 , 15,0,0,0), interval = "1M") self.assertTrue( trange[0] == datetime.date( 2002 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2003 , 2 , 1 ))
def test_write(self): with TestAreaContext("my_space") as area: intersect_summary = EclSum( self.createTestPath( "Statoil/ECLIPSE/SummaryRestart/iter-1/NOR-2013A_R007-0"), lazy_load=False ) self.assertIsNotNone(intersect_summary) write_location = os.path.join(os.getcwd(), "CASE") intersect_summary.fwrite(ecl_case=write_location) reloaded_summary = EclSum(write_location) self.assertEqual(intersect_summary.keys(), reloaded_summary.keys())
def createEclSum( case, keys, sim_start = datetime.date(2010 , 1, 1), data_start = None, sim_length_days = 5 * 365, num_report_step = 5, num_mini_step = 10, dims = (20,10,5) , func_table = {}, restart_case = None, restart_step = -1): ecl_sum = EclSum.restart_writer(case , restart_case, restart_step, sim_start , dims[0] , dims[1] , dims[2]) var_list = [] for (kw,wgname,num,unit) in keys: var_list.append( ecl_sum.addVariable( kw , wgname = wgname , num = num, unit =unit) ) # This is a bug! This should not be integer division, but tests are written # around that assumption. report_step_length = float(sim_length_days // num_report_step) mini_step_length = float(report_step_length // num_mini_step) if data_start is None: time_offset = 0 else: dt = data_start - sim_start time_offset = dt.total_seconds() / 86400.0 for report_step in range(num_report_step): for mini_step in range(num_mini_step): days = time_offset + report_step * report_step_length + mini_step * mini_step_length t_step = ecl_sum.addTStep( report_step + 1 , sim_days = days ) for var in var_list: key = var.getKey1( ) if key in func_table: func = func_table[key] t_step[key] = func( days ) else: t_step[key] = mock_func( ecl_sum , key , days) return ecl_sum
def test_ix_case(self): intersect_summary = EclSum(self.createTestPath("Statoil/ECLIPSE/ix/summary/CREATE_REGION_AROUND_WELL")) self.assertIsNotNone(intersect_summary) self.assertTrue( "HWELL_PROD" in [intersect_summary.smspec_node(key).wgname for key in intersect_summary.keys()] ) eclipse_summary = EclSum(self.createTestPath("Statoil/ECLIPSE/ix/summary/ECL100/E100_CREATE_REGION_AROUND_WELL")) self.assertIsNotNone(eclipse_summary) hwell_padder = lambda key : key if key.split(":")[-1] != "HWELL_PR" else key + "OD" self.assertEqual( intersect_summary.keys("WWCT*"), list(map(hwell_padder, eclipse_summary.keys("WWCT*"))) )
def test_ix_write(self): for data_set in [ "Statoil/ECLIPSE/ix/summary/CREATE_REGION_AROUND_WELL", "Statoil/ECLIPSE/ix/troll/IX_NOPH3_R04_75X75X1_GRID2.SMSPEC" ]: with TestAreaContext("my_space" + data_set.split("/")[-1]) as area: intersect_summary = EclSum(self.createTestPath(data_set), lazy_load=False) self.assertIsNotNone(intersect_summary) write_location = os.path.join(os.getcwd(), "CASE") intersect_summary.fwrite(ecl_case=write_location) reloaded_summary = EclSum(write_location) self.assertEqual( list(intersect_summary.keys()), list(reloaded_summary.keys()) )
"2014-09-01": 93.21, "2014-10-01": 84.40, "2014-11-01": 75.79, "2014-12-01": 59.29, "2015-01-01": 47.22, "2015-02-01": 50.58, "2015-03-01": 47.82, "2015-04-01": 54.45, "2015-05-01": 59.27, "2015-06-01": 59.82, "2015-07-01": 50.90, "2015-08-01": 42.87, "2015-09-01": 45.48} if __name__ == '__main__': ecl_sum = EclSum("SNAKE_OIL_FIELD") start_time = ecl_sum.getStartTime() date_ranges = ecl_sum.timeRange(start_time, interval="1M") production_sums = ecl_sum.blockedProduction("FOPT", date_ranges) npv = 0.0 for index in range(0, len(date_ranges) - 1): date = date_ranges[index + 1] # end of period production_sum = production_sums[index] oil_price = OIL_PRICES[date.date().strftime("%Y-%m-%d")] production_value = oil_price * production_sum npv += production_value with open("snake_oil_npv.txt", "w") as output_file:
def setUp(self): self.test_file = self.createTestPath("Statoil/ECLIPSE/Gurbat/ECLIPSE.SMSPEC") self.ecl_sum = EclSum(self.test_file)
class EclSumTest(EclTest): def setUp(self): self.test_file = self.createTestPath("Statoil/ECLIPSE/Gurbat/ECLIPSE.SMSPEC") self.ecl_sum = EclSum(self.test_file) def test_time_range_year(self): real_range = self.ecl_sum.timeRange(interval="1y", extend_end=False) extended_range = self.ecl_sum.timeRange(interval="1y", extend_end=True) assert real_range[-1] < extended_range[-1] def test_time_range_day(self): real_range = self.ecl_sum.timeRange(interval="1d", extend_end=False) extended_range = self.ecl_sum.timeRange(interval="1d", extend_end=True) assert real_range[-1] == extended_range[-1] def test_time_range_month(self): real_range = self.ecl_sum.timeRange(interval="1m", extend_end=False) extended_range = self.ecl_sum.timeRange(interval="1m", extend_end=True) assert real_range[-1] < extended_range[-1] def test_dump_csv_line(self): ecl_sum_vector = EclSumKeyWordVector(self.ecl_sum) ecl_sum_vector.addKeywords("F*") with self.assertRaises(KeyError): ecl_sum_vector.addKeyword("MISSING") dtime = datetime.datetime(2002, 1, 1, 0, 0, 0) with TestAreaContext("EclSum/csv_dump"): test_file_name = self.createTestPath("dump.csv") outputH = copen(test_file_name, "w") self.ecl_sum.dumpCSVLine(dtime, ecl_sum_vector, outputH) assert os.path.isfile(test_file_name) def test_truncated_smspec(self): with TestAreaContext("EclSum/truncated_smspec") as ta: ta.copy_file(self.test_file) ta.copy_file(self.createTestPath("Statoil/ECLIPSE/Gurbat/ECLIPSE.UNSMRY")) file_size = os.path.getsize("ECLIPSE.SMSPEC") with open("ECLIPSE.SMSPEC","r+") as f: f.truncate(file_size / 2) with self.assertRaises(IOError): EclSum("ECLIPSE") def test_truncated_data(self): with TestAreaContext("EclSum/truncated_data") as ta: ta.copy_file(self.test_file) ta.copy_file(self.createTestPath("Statoil/ECLIPSE/Gurbat/ECLIPSE.UNSMRY")) file_size = os.path.getsize("ECLIPSE.UNSMRY") with open("ECLIPSE.UNSMRY","r+") as f: f.truncate(file_size / 2) with self.assertRaises(IOError): EclSum("ECLIPSE") def test_missing_smspec_keyword(self): with TestAreaContext("EclSum/truncated_data") as ta: ta.copy_file(self.test_file) ta.copy_file(self.createTestPath("Statoil/ECLIPSE/Gurbat/ECLIPSE.UNSMRY")) with openEclFile("ECLIPSE.SMSPEC") as f: kw_list = [] for kw in f: kw_list.append(EclKW.copy(kw)) with openFortIO("ECLIPSE.SMSPEC", mode=FortIO.WRITE_MODE) as f: for kw in kw_list: if kw.getName() == "KEYWORDS": continue kw.fwrite(f) with self.assertRaises(IOError): EclSum("ECLIPSE") def test_missing_unsmry_keyword(self): with TestAreaContext("EclSum/truncated_data") as ta: ta.copy_file(self.test_file) ta.copy_file(self.createTestPath("Statoil/ECLIPSE/Gurbat/ECLIPSE.UNSMRY")) with openEclFile("ECLIPSE.UNSMRY") as f: kw_list = [] for kw in f: kw_list.append(EclKW.copy(kw)) with openFortIO("ECLIPSE.UNSMRY", mode=FortIO.WRITE_MODE) as f: c = 0 for kw in kw_list: if kw.getName() == "PARAMS": if c % 5 == 0: continue c += 1 kw.fwrite(f) with self.assertRaises(IOError): EclSum("ECLIPSE") def test_labscale(self): case = self.createTestPath("Statoil/ECLIPSE/LabScale/HDMODEL") sum = EclSum(case, lazy_load=True) self.assertEqual(sum.getStartTime(), datetime.datetime(2013,1,1,0,0,0)) self.assertEqual(sum.getEndTime() , datetime.datetime(2013,1,1,19,30,0)) self.assertFloatEqual(sum.getSimulationLength(), 19.50) sum = EclSum(case, lazy_load=False) self.assertEqual(sum.getStartTime(), datetime.datetime(2013,1,1,0,0,0)) self.assertEqual(sum.getEndTime() , datetime.datetime(2013,1,1,19,30,0)) self.assertFloatEqual(sum.getSimulationLength(), 19.50)
def test_ix_caseII(self): troll_summary = EclSum( self.createTestPath("Statoil/ECLIPSE/ix/troll/IX_NOPH3_R04_75X75X1_GRID2.SMSPEC")) self.assertIsNotNone(troll_summary) self.assertTrue("WMCTL:Q21BH1" in list(troll_summary.keys()))
def setUp(self): self.case = self.createTestPath(case) self.ecl_sum = EclSum(self.case) self.assertIsInstance(self.ecl_sum, EclSum)
class SumTest(EclTest): def setUp(self): self.case = self.createTestPath(case) self.ecl_sum = EclSum(self.case) self.assertIsInstance(self.ecl_sum, EclSum) def test_load(self): self.assertIsNotNone(self.ecl_sum, "Load failed") def test_invalid(self): with self.assertRaises(IOError): sum = EclSum("Does/not/exist") def test_KeyError(self): sum = self.ecl_sum with self.assertRaises(KeyError): v = sum.numpy_vector("KeyMissing") with self.assertRaises(KeyError): v = sum.get_interp("Missing" , days = 750) with self.assertRaises(KeyError): v = sum.get_interp_vector("Missing" , days_list = [750]) def test_contains(self): self.assertTrue( "FOPT" in self.ecl_sum) self.assertFalse( "MISSING" in self.ecl_sum ) def test_interp(self): sum = self.ecl_sum self.assertAlmostEqual(sum.get_interp("WWCT:OP_3", days=750), 0.11719122) self.assertAlmostEqual(sum.get_interp("WWCT:OP_3", date=datetime.date(2004, 1, 1)), 0.603358387947) v = sum.get_interp_vector("WOPT:OP_1", days_list=[100, 200, 400]) self.assertAlmostEqualList([805817.11875, 1614955.34677419, 3289267.67857143 ], v) v = sum.get_interp_vector("WGPT:OP_2", date_list=[datetime.date(2002, 1, 1), datetime.date(2003, 1, 1), datetime.date(2004, 1, 1)]) self.assertAlmostEqualList(v, [8.20773632e+08, 9.68444032e+08, 1.02515213e+09]) self.assertEqual(sum.get_interp("FOPT" , days = 0) , 0) self.assertEqual(sum.get_interp("WOPR:OP_1" , days = 0) , 0) self.assertEqual(sum.get_interp("WOPR:OP_1" , date=datetime.date(2000,1,1)) , 0) self.assertEqual(sum.get_interp("WOPR:OP_1" , days = 31) , 7996) self.assertEqual(sum.get_interp("WOPR:OP_1" , date=datetime.date(2000,2,1)) , 7996) FPR = sum.numpy_vector("FPR") self.assertFloatEqual(sum.get_interp("FPR" , days = 0) , FPR[0]) self.assertFloatEqual(sum.get_interp("FPR" , days = 31) , FPR[1]) with self.assertRaises(ValueError): sum.get_interp("WOPR:OP_1") with self.assertRaises(ValueError): sum.get_interp("WOPR:OP_1" , days=10 , date = datetime.date(2000,1,1)) def test_LLINEAR(self): sum = EclSum( self.createTestPath("Statoil/ECLIPSE/Heidrun/LGRISSUE/EM-LTAA-ISEG_CARFIN_NWPROPS")) self.assertTrue( sum.has_key("LLINEARS") ) def test_wells(self): wells = self.ecl_sum.wells() wells.sort() self.assertListEqual([well for well in wells], ["OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "WI_1", "WI_2", "WI_3"]) wells = self.ecl_sum.wells(pattern="*_3") wells.sort() self.assertListEqual([well for well in wells], ["OP_3", "WI_3"]) groups = self.ecl_sum.groups() groups.sort() self.assertListEqual([group for group in groups], ['GMWIN', 'OP', 'WI']) def test_last( self ): last = self.ecl_sum.get_last("FOPT") self.assertFloatEqual(last.value, 38006336.0) self.assertFloatEqual(last.days, 1826.0) self.assertEqual(last.date, datetime.datetime(2004, 12, 31, 0, 0, 0)) self.assertFloatEqual(self.ecl_sum.last_value("FGPT"), 6605249024.0) self.assertEqual( len(self.ecl_sum) , 63 ) def test_dates( self ): sum = self.ecl_sum d = sum.dates self.assertEqual(d[0], datetime.datetime(2000, 1, 1, 0, 0, 0)) self.assertEqual(d[62], datetime.datetime(2004, 12, 31, 0, 0, 0)) self.assertEqual(len(d), 63) self.assertEqual(d[25], datetime.datetime(2001, 12, 1, 0, 0, 0)) self.assertEqual(sum.iget_date(25), datetime.datetime(2001, 12, 1, 0, 0, 0)) mpl_dates = sum.mpl_dates self.assertAlmostEqual(mpl_dates[25], 730820) days = sum.days self.assertAlmostEqual(days[50], 1461) self.assertEqual(sum.start_time, datetime.datetime(2000, 1, 1, 0, 0, 0)) self.assertEqual(sum.end_time, datetime.datetime(2004, 12, 31, 0, 0, 0)) self.assertTrue(sum.check_sim_time(datetime.datetime(2004, 12, 31, 0, 0, 0))) self.assertEqual(sum.end_date , datetime.date(2004, 12, 31)) def test_dates2( self ): sum = EclSum(self.createTestPath("Statoil/ECLIPSE/FF12/FF12_2013B3_AMAP2")) self.assertEqual(sum.end_date , datetime.date(2045, 1, 1)) def test_keys(self): sum = self.ecl_sum self.assertRaises(KeyError, sum.__getitem__, "BJARNE") v = sum["FOPT"] self.assertEqual(len(v), 63) def test_index(self): sum = self.ecl_sum index = sum.get_key_index("TCPUDAY") self.assertEqual(index, 10239) def test_report(self): sum = self.ecl_sum self.assertEqual(sum.get_report(date=datetime.date(2000, 10, 1)), 10) self.assertEqual(sum.get_report(date=datetime.date(2000, 10, 3)), -1) self.assertEqual(sum.get_report(date=datetime.date(1980, 10, 3)), -1) self.assertEqual(sum.get_report(date=datetime.date(2012, 10, 3)), -1) self.assertEqual(sum.get_report(days=91), 3) self.assertEqual(sum.get_report(days=92), -1) self.assertAlmostEqual(sum.get_interp("FOPT", days=91), sum.get_from_report("FOPT", 3)) self.assertEqual(sum.first_report, 1) self.assertEqual(sum.last_report, 62) self.assertEqual(sum.get_report_time(10), datetime.date(2000, 10, 1)) self.assertFloatEqual(sum.get_from_report("FOPT", 10), 6.67447e+06) def test_fwrite(self): ecl_sum = EclSum(self.case, lazy_load=False) with TestAreaContext("python/sum-test/fwrite") as work_area: ecl_sum.fwrite(ecl_case="CASE") self.assertTrue(True) def test_block(self): sum = self.ecl_sum index_ijk = sum.get_key_index("BPR:15,28,1") index_num = sum.get_key_index("BPR:1095") self.assertEqual(index_ijk, index_num) def test_restart(self): hist = EclSum(self.createTestPath("Statoil/ECLIPSE/sum-restart/history/T07-4A-W2011-18-P1")) base = EclSum(self.createTestPath("Statoil/ECLIPSE/sum-restart/prediction/BASECASE")) pred = EclSum(self.createTestPath("Statoil/ECLIPSE/sum-restart/prediction/BASECASE"), include_restart=False) self.assertIsNotNone(hist) self.assertIsNotNone(base) self.assertIsNotNone(pred) def test_case1(self ): self.assertTrue(self.ecl_sum.path == self.createTestPath(path)) self.assertTrue(self.ecl_sum.base == base) self.assertTrue(self.ecl_sum.case == self.createTestPath(case)) self.assertTrue(self.ecl_sum.abs_path == self.createTestPath(path)) def test_case2( self ): cwd = os.getcwd() os.chdir(self.createTestPath(path)) sum = EclSum(base) self.assertIsNone(sum.path) self.assertTrue(sum.base == base) self.assertTrue(sum.case == base) self.assertTrue(sum.abs_path == self.createTestPath(path)) os.chdir(cwd) def test_var_properties( self ): sum = self.ecl_sum self.assertRaises(KeyError, sum.smspec_node, "BJARNE") node = sum.smspec_node("FOPT") self.assertTrue(node.isTotal()) self.assertFalse(node.isHistorical()) node = sum.smspec_node("FOPR") self.assertFalse(node.isTotal()) self.assertFalse(node.isHistorical()) self.assertTrue(node.keyword == "FOPR") node = sum.smspec_node("FOPRH") self.assertFalse(node.isTotal()) self.assertTrue(node.isHistorical()) self.assertTrue(node.isRate()) self.assertTrue(node.keyword == "FOPRH") node = sum.smspec_node("WOPR:OP_1") self.assertFalse(node.isTotal()) self.assertTrue(node.isRate()) self.assertTrue(node.keyword == "WOPR") node = sum.smspec_node("WOPT:OP_1") self.assertTrue(node.isTotal()) self.assertFalse(node.isRate()) self.assertTrue(node.unit == "SM3") self.assertTrue(node.wgname == "OP_1") self.assertTrue(node.keyword == "WOPT") self.assertTrue(sum.unit("FOPR") == "SM3/DAY") node = sum.smspec_node("FOPTH") self.assertTrue(node.isTotal()) self.assertFalse(node.isRate()) self.assertIsNone(node.wgname) node = sum.smspec_node("BPR:1095") self.assertEqual(node.num, 1095) def test_stringlist_gc(self): sum = EclSum(self.case) wells = sum.wells() well1 = wells[0] del wells self.assertTrue(well1 == "OP_1") def test_stringlist_reference(self): sum = EclSum(self.case) wells = sum.wells() self.assertListEqual([well for well in wells], ['OP_1', 'OP_2', 'OP_3', 'OP_4', 'OP_5', 'WI_1', 'WI_2', 'WI_3']) self.assertIsInstance(wells, StringList) def test_stringlist_setitem(self): sum = EclSum(self.case) wells = sum.wells() wells[0] = "Bjarne" well0 = wells[0] self.assertTrue(well0 == "Bjarne") self.assertTrue(wells[0] == "Bjarne") wells[0] = "XXX" self.assertTrue(well0 == "Bjarne") self.assertTrue(wells[0] == "XXX") def test_segment(self): sum = EclSum(self.createTestPath("Statoil/ECLIPSE/Oseberg/F8MLT/F8MLT-F4")) segment_vars = sum.keys("SOFR:F-8:*") self.assertIn("SOFR:F-8:1", segment_vars) for var in segment_vars: tmp = var.split(":") nr = int(tmp[2]) self.assertTrue(nr >= 0) def test_return_types(self): self.assertIsInstance(self.ecl_sum.alloc_time_vector(True), TimeVector) key_index = self.ecl_sum.get_general_var_index("FOPT") self.assertIsInstance(self.ecl_sum.alloc_data_vector(key_index, True), DoubleVector) def test_timeRange(self): sum = EclSum(self.case) with self.assertRaises(TypeError): trange = sum.timeRange(interval = "1") trange = sum.timeRange(interval = "1X") trange = sum.timeRange(interval = "YY") trange = sum.timeRange(interval = "MY") with self.assertRaises(ValueError): trange = sum.timeRange( start = datetime.datetime(2000,1,1) , end = datetime.datetime(1999,1,1) ) sim_start = datetime.datetime(2000, 1, 1, 0, 0, 0) sim_end = datetime.datetime(2004, 12, 31, 0, 0, 0) trange = sum.timeRange( interval = "1Y") self.assertTrue( trange[0] == datetime.date( 2000 , 1 , 1 )) self.assertTrue( trange[1] == datetime.date( 2001 , 1 , 1 )) self.assertTrue( trange[2] == datetime.date( 2002 , 1 , 1 )) self.assertTrue( trange[3] == datetime.date( 2003 , 1 , 1 )) self.assertTrue( trange[4] == datetime.date( 2004 , 1 , 1 )) self.assertTrue( trange[5] == datetime.date( 2005 , 1 , 1 )) trange = sum.timeRange( interval = "1M") self.assertTrue( trange[0] == datetime.date( 2000 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2005 , 1 , 1 )) trange = sum.timeRange( start = datetime.date( 2002 , 1 , 15), interval = "1M") self.assertTrue( trange[0] == datetime.date( 2002 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2005 , 1 , 1 )) trange = sum.timeRange( start = datetime.date( 2002 , 1 , 15) , end = datetime.date( 2003 , 1 , 15), interval = "1M") self.assertTrue( trange[0] == datetime.date( 2002 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2003 , 2 , 1 )) trange = sum.timeRange( start = datetime.date( 2002 , 1 , 15) , end = datetime.datetime( 2003 , 1 , 15,0,0,0), interval = "1M") self.assertTrue( trange[0] == datetime.date( 2002 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2003 , 2 , 1 )) # Loading this dataset is a test of loading a case where one report step is missing. def test_Heidrun(self): sum = EclSum( self.createTestPath("Statoil/ECLIPSE/Heidrun/Summary/FF12_2013B3_CLEAN_RS")) self.assertEqual( 452 , len(sum)) self.assertFloatEqual( 1.8533144e+8 , sum.last_value("FOPT")) trange = sum.timeRange( start = datetime.date( 2015 , 1 , 1), interval = "1M") self.assertTrue( trange[0] == datetime.date( 2016 , 2 , 1 )) for t in trange: sum.get_interp( "FOPT" , date = t ) def test_regularProduction(self): sum = EclSum(self.case) with self.assertRaises(TypeError): trange = TimeVector.createRegular( sum.start_time , sum.end_time , "1M" ) prod = sum.blockedProduction("FOPR" , trange) with self.assertRaises(KeyError): trange = TimeVector.createRegular( sum.start_time , sum.end_time , "1M" ) prod = sum.blockedProduction("NoNotThis" , trange) trange = sum.timeRange(interval = "2Y") self.assertTrue( trange[0] == datetime.date( 2000 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2006 , 1 , 1 )) trange = sum.timeRange(interval = "5Y") self.assertTrue( trange[0] == datetime.date( 2000 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2005 , 1 , 1 )) trange = sum.timeRange(interval = "6M") wprod1 = sum.blockedProduction("WOPT:OP_1" , trange) wprod2 = sum.blockedProduction("WOPT:OP_2" , trange) wprod3 = sum.blockedProduction("WOPT:OP_3" , trange) wprod4 = sum.blockedProduction("WOPT:OP_4" , trange) wprod5 = sum.blockedProduction("WOPT:OP_5" , trange) fprod = sum.blockedProduction("FOPT" , trange) gprod = sum.blockedProduction("GOPT:OP" , trange) wprod = wprod1 + wprod2 + wprod3 + wprod4 + wprod5 for (w,f,g) in zip(wprod, fprod,gprod): self.assertFloatEqual( w , f ) self.assertFloatEqual( w , g ) def test_writer(self): writer = EclSum.writer("CASE" , datetime.date( 2000 , 1 , 1) , 10 , 10 , 5) self.assertIsInstance(self.ecl_sum, EclSum) writer.addVariable( "FOPT" ) self.assertTrue( writer.has_key( "FOPT" )) writer.addTStep( 1 , 100 ) def test_aquifer(self): case = EclSum( self.createTestPath( "Statoil/ECLIPSE/Aquifer/06_PRESSURE_R009-0")) self.assertTrue( "AAQR:2" in case ) def test_restart_mapping(self): history = EclSum( self.createTestPath( "Statoil/ECLIPSE/SummaryRestart/iter-1/NOR-2013A_R007-0") ) total = EclSum( self.createTestPath( "Statoil/ECLIPSE/SummaryRestart/Prediction/NOR-2013A_R007_PRED-0") , include_restart = True) history_dates = history.get_dates( ) total_dates = total.get_dates( ) for i in range(len(history_dates)): self.assertEqual( history_dates[i] , total_dates[i] ) keys = history.keys( pattern = "W*") for key in keys: if key in total: self.assertEqual( history.iget( key , 5 ) , total.iget( key , 5 )) self.assertFalse( "WGPR:NOT_21_D" in history ) self.assertTrue( "WGPR:NOT_21_D" in total ) node = total.smspec_node("WGPR:NOT_21_D") self.assertEqual( total.iget( "WGPR:NOT_21_D", 5) , node.default) def test_write(self): with TestAreaContext("my_space") as area: intersect_summary = EclSum( self.createTestPath( "Statoil/ECLIPSE/SummaryRestart/iter-1/NOR-2013A_R007-0"), lazy_load=False ) self.assertIsNotNone(intersect_summary) write_location = os.path.join(os.getcwd(), "CASE") intersect_summary.fwrite(ecl_case=write_location) reloaded_summary = EclSum(write_location) self.assertEqual(intersect_summary.keys(), reloaded_summary.keys()) def test_ix_case(self): intersect_summary = EclSum(self.createTestPath("Statoil/ECLIPSE/ix/summary/CREATE_REGION_AROUND_WELL")) self.assertIsNotNone(intersect_summary) self.assertTrue( "HWELL_PROD" in [intersect_summary.smspec_node(key).wgname for key in intersect_summary.keys()] ) eclipse_summary = EclSum(self.createTestPath("Statoil/ECLIPSE/ix/summary/ECL100/E100_CREATE_REGION_AROUND_WELL")) self.assertIsNotNone(eclipse_summary) hwell_padder = lambda key : key if key.split(":")[-1] != "HWELL_PR" else key + "OD" self.assertEqual( intersect_summary.keys("WWCT*"), list(map(hwell_padder, eclipse_summary.keys("WWCT*"))) ) def test_ix_write(self): for data_set in [ "Statoil/ECLIPSE/ix/summary/CREATE_REGION_AROUND_WELL", "Statoil/ECLIPSE/ix/troll/IX_NOPH3_R04_75X75X1_GRID2.SMSPEC" ]: with TestAreaContext("my_space" + data_set.split("/")[-1]) as area: intersect_summary = EclSum(self.createTestPath(data_set), lazy_load=False) self.assertIsNotNone(intersect_summary) write_location = os.path.join(os.getcwd(), "CASE") intersect_summary.fwrite(ecl_case=write_location) reloaded_summary = EclSum(write_location) self.assertEqual( list(intersect_summary.keys()), list(reloaded_summary.keys()) ) def test_ix_caseII(self): troll_summary = EclSum( self.createTestPath("Statoil/ECLIPSE/ix/troll/IX_NOPH3_R04_75X75X1_GRID2.SMSPEC")) self.assertIsNotNone(troll_summary) self.assertTrue("WMCTL:Q21BH1" in list(troll_summary.keys())) def test_resample(self): time_points = TimeVector() start_time = self.ecl_sum.get_data_start_time() end_time = self.ecl_sum.get_end_time() delta = end_time - start_time N = 25 time_points.initRange( CTime(start_time), CTime(end_time), CTime(int(delta.total_seconds()/(N - 1)))) time_points.append(CTime(end_time)) resampled = self.ecl_sum.resample( "OUTPUT_CASE", time_points ) for key in self.ecl_sum.keys(): self.assertIn( key, resampled ) self.assertEqual(self.ecl_sum.get_data_start_time(), resampled.get_data_start_time()) delta = self.ecl_sum.get_end_time() - resampled.get_end_time() self.assertTrue( delta.total_seconds() <= 1 ) keys = ["FOPT", "FOPR", "BPR:15,28,1", "WGOR:OP_1"] for key in keys: for time_index,t in enumerate(time_points): self.assertFloatEqual(resampled.iget( key, time_index), self.ecl_sum.get_interp_direct( key, t)) def test_summary_units(self): self.assertEqual(self.ecl_sum.unit_system, EclUnitTypeEnum.ECL_METRIC_UNITS) # The case loaded in this test originates in a simulation # which was shut down brutally. This test verifies that we # can create a valid ecl_sum instance from what we find. def test_broken_case(self): ecl_sum = EclSum( self.createTestPath("Statoil/ECLIPSE/SummaryFail3/COMBINED-AUTUMN2018_CARBSENS-0"))
def test_stringlist_reference(self): sum = EclSum(self.case) wells = sum.wells() self.assertListEqual([well for well in wells], ['OP_1', 'OP_2', 'OP_3', 'OP_4', 'OP_5', 'WI_1', 'WI_2', 'WI_3']) self.assertIsInstance(wells, StringList)
def test_regularProduction(self): sum = EclSum(self.case) with self.assertRaises(TypeError): trange = TimeVector.createRegular( sum.start_time , sum.end_time , "1M" ) prod = sum.blockedProduction("FOPR" , trange) with self.assertRaises(KeyError): trange = TimeVector.createRegular( sum.start_time , sum.end_time , "1M" ) prod = sum.blockedProduction("NoNotThis" , trange) trange = sum.timeRange(interval = "2Y") self.assertTrue( trange[0] == datetime.date( 2000 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2006 , 1 , 1 )) trange = sum.timeRange(interval = "5Y") self.assertTrue( trange[0] == datetime.date( 2000 , 1 , 1 )) self.assertTrue( trange[-1] == datetime.date( 2005 , 1 , 1 )) trange = sum.timeRange(interval = "6M") wprod1 = sum.blockedProduction("WOPT:OP_1" , trange) wprod2 = sum.blockedProduction("WOPT:OP_2" , trange) wprod3 = sum.blockedProduction("WOPT:OP_3" , trange) wprod4 = sum.blockedProduction("WOPT:OP_4" , trange) wprod5 = sum.blockedProduction("WOPT:OP_5" , trange) fprod = sum.blockedProduction("FOPT" , trange) gprod = sum.blockedProduction("GOPT:OP" , trange) wprod = wprod1 + wprod2 + wprod3 + wprod4 + wprod5 for (w,f,g) in zip(wprod, fprod,gprod): self.assertFloatEqual( w , f ) self.assertFloatEqual( w , g )
def runSimulator(simulator, history_simulator, time_step_count): """ @rtype: EclSum """ ecl_sum = EclSum.writer("SNAKE_OIL_FIELD", datetime(2010, 1, 1), 10, 10, 10) ecl_sum.addVariable("FOPT") ecl_sum.addVariable("FOPR") ecl_sum.addVariable("FGPT") ecl_sum.addVariable("FGPR") ecl_sum.addVariable("FWPT") ecl_sum.addVariable("FWPR") ecl_sum.addVariable("FGOR") ecl_sum.addVariable("FWCT") ecl_sum.addVariable("FOPTH") ecl_sum.addVariable("FOPRH") ecl_sum.addVariable("FGPTH") ecl_sum.addVariable("FGPRH") ecl_sum.addVariable("FWPTH") ecl_sum.addVariable("FWPRH") ecl_sum.addVariable("FGORH") ecl_sum.addVariable("FWCTH") ecl_sum.addVariable("WOPR", wgname="OP1") ecl_sum.addVariable("WOPR", wgname="OP2") ecl_sum.addVariable("WWPR", wgname="OP1") ecl_sum.addVariable("WWPR", wgname="OP2") ecl_sum.addVariable("WGPR", wgname="OP1") ecl_sum.addVariable("WGPR", wgname="OP2") ecl_sum.addVariable("WGOR", wgname="OP1") ecl_sum.addVariable("WGOR", wgname="OP2") ecl_sum.addVariable("WWCT", wgname="OP1") ecl_sum.addVariable("WWCT", wgname="OP2") ecl_sum.addVariable("WOPRH", wgname="OP1") ecl_sum.addVariable("WOPRH", wgname="OP2") ecl_sum.addVariable("WWPRH", wgname="OP1") ecl_sum.addVariable("WWPRH", wgname="OP2") ecl_sum.addVariable("WGPRH", wgname="OP1") ecl_sum.addVariable("WGPRH", wgname="OP2") ecl_sum.addVariable("WGORH", wgname="OP1") ecl_sum.addVariable("WGORH", wgname="OP2") ecl_sum.addVariable("WWCTH", wgname="OP1") ecl_sum.addVariable("WWCTH", wgname="OP2") ecl_sum.addVariable("BPR", num=globalIndex(5, 5, 5)) ecl_sum.addVariable("BPR", num=globalIndex(1, 3, 8)) time_map = [] mini_step_count = 10 total_step_count = time_step_count * mini_step_count for report_step in range(time_step_count): for mini_step in range(mini_step_count): t_step = ecl_sum.addTStep(report_step + 1, sim_days=report_step * mini_step_count + mini_step) time_map.append(t_step.getSimTime().datetime().strftime("%d/%m/%Y")) simulator.step(scale=1.0 / total_step_count) history_simulator.step(scale=1.0 / total_step_count) t_step["FOPR"] = simulator.fopr() t_step["FOPT"] = simulator.fopt() t_step["FGPR"] = simulator.fgpr() t_step["FGPT"] = simulator.fgpt() t_step["FWPR"] = simulator.fwpr() t_step["FWPT"] = simulator.fwpt() t_step["FGOR"] = simulator.fgor() t_step["FWCT"] = simulator.fwct() t_step["WOPR:OP1"] = simulator.opr("OP1") t_step["WOPR:OP2"] = simulator.opr("OP2") t_step["WGPR:OP1"] = simulator.gpr("OP1") t_step["WGPR:OP2"] = simulator.gpr("OP2") t_step["WWPR:OP1"] = simulator.wpr("OP1") t_step["WWPR:OP2"] = simulator.wpr("OP2") t_step["WGOR:OP1"] = simulator.gor("OP1") t_step["WGOR:OP2"] = simulator.gor("OP2") t_step["WWCT:OP1"] = simulator.wct("OP1") t_step["WWCT:OP2"] = simulator.wct("OP2") t_step["BPR:5,5,5"] = simulator.bpr("5,5,5") t_step["BPR:1,3,8"] = simulator.bpr("1,3,8") t_step["FOPRH"] = history_simulator.fopr() t_step["FOPTH"] = history_simulator.fopt() t_step["FGPRH"] = history_simulator.fgpr() t_step["FGPTH"] = history_simulator.fgpt() t_step["FWPRH"] = history_simulator.fwpr() t_step["FWPTH"] = history_simulator.fwpt() t_step["FGORH"] = history_simulator.fgor() t_step["FWCTH"] = history_simulator.fwct() t_step["WOPRH:OP1"] = history_simulator.opr("OP1") t_step["WOPRH:OP2"] = history_simulator.opr("OP2") t_step["WGPRH:OP1"] = history_simulator.gpr("OP1") t_step["WGPRH:OP2"] = history_simulator.gpr("OP2") t_step["WWPRH:OP1"] = history_simulator.wpr("OP1") t_step["WWPRH:OP2"] = history_simulator.wpr("OP2") t_step["WGORH:OP1"] = history_simulator.gor("OP1") t_step["WGORH:OP2"] = history_simulator.gor("OP2") t_step["WWCTH:OP1"] = history_simulator.wct("OP1") t_step["WWCTH:OP2"] = history_simulator.wct("OP2") return ecl_sum, time_map