def loadVector(self, plot_block_data, fs, report_step, realization_number): """ @type plot_block_data: PlotBlockData @type fs: EnkfFs @type report_step: int @type realization_number: int @rtype PlotBlockVector """ config_node = self.__obs_vector.getConfigNode() is_private_container = ( config_node.getImplementationType() == ErtImplType.CONTAINER) data_node = EnkfNode(config_node, private=is_private_container) node_id = NodeId(report_step, realization_number) if data_node.tryLoad(fs, node_id): block_obs = self.getBlockObservation(report_step) data = DoubleVector() for index in range(len(block_obs)): value = block_obs.getData(data_node.valuePointer(), index, node_id) data.append(value) data.permute(self.__permutation_vector) plot_block_vector = PlotBlockVector(realization_number, data) plot_block_data.addPlotBlockVector(plot_block_vector)
def exportField(self, keyword, path, iactive, file_type, report_step, selected_case): """ @type keyword: str @type path: str @type iactive: BoolVector @type file_type: EnkfFieldFileFormatEnum @type report_step: int @type selected_case: str """ fs = ERT.ert.getEnkfFsManager().getFileSystem(selected_case) if file_type == EnkfFieldFileFormatEnum.ECL_GRDECL_FILE: extension = ".grdecl" elif file_type == EnkfFieldFileFormatEnum.RMS_ROFF_FILE: extension = ".roff" iens_list = iactive.createActiveList() path_fmt = os.path.join(path, keyword + "_%d" + extension) config_node = ERT.ert.ensembleConfig()[keyword] mc = ERT.ert.getModelConfig() init_file = config_node.getInitFile(mc.getRunpathFormat()) if init_file: print('Using init file:%s' % init_file) EnkfNode.exportMany(config_node, path_fmt, fs, iens_list, file_type=file_type, arg=init_file) return True
def exportGenData(self, keyword, path, iactive, file_type, report_step, selected_case): """ @type keyword: str @type path: str @type iactive: BoolVector @type file_type: EnkfFieldFileFormatEnum @type report_step: int @type selected_case: str """ fs = ERT.ert.getEnkfFsManager().getFileSystem(selected_case) config_node = ERT.ert.ensembleConfig().getNode(keyword) gen_data_config_node = config_node.getDataModelConfig() export_type = gen_data_config_node.getOutputFormat() if export_type == GenDataFileType.GEN_DATA_UNDEFINED: export_type = gen_data_config_node.getInputFormat() node = EnkfNode(config_node) for index, active in enumerate(iactive): if active: node_id = NodeId(int(report_step), index) if node.tryLoad(fs, node_id): gen_data = node.asGenData() filename = str(path + "/" + keyword + "_{0}").format(index) + ".txt" gen_data.export(filename, export_type, None)
def test_update(self): config = self.createTestPath("local/snake_oil/snake_oil.ert") with ErtTestContext("update_test", config) as context: ert = context.getErt() es_update = ESUpdate(ert) fsm = ert.getEnkfFsManager() sim_fs = fsm.getFileSystem("default_0") target_fs = fsm.getFileSystem("target") mask = BoolVector(initial_size=ert.getEnsembleSize(), default_value=True) run_context = ErtRunContext.ensemble_smoother_update(sim_fs, target_fs) es_update.smootherUpdate(run_context) conf = ert.ensembleConfig()["SNAKE_OIL_PARAM"] sim_node = EnkfNode(conf) target_node = EnkfNode(conf) node_id = NodeId(0, 0) sim_node.load(sim_fs, node_id) target_node.load(target_fs, node_id) sim_gen_kw = sim_node.asGenKw() target_gen_kw = target_node.asGenKw() # Test that an update has actually taken place for index in range(len(sim_gen_kw)): self.assertNotEqual(sim_gen_kw[index], target_gen_kw[index])
def exportGenKw(self, keyword, path, iactive, file_type, report_step, selected_case): """ @type keyword: str @type path: str @type iactive: BoolVector @type file_type: EnkfFieldFileFormatEnum @type report_step: int @type selected_case: str """ enkf_config_node = ERT.ert.ensembleConfig().getNode(keyword) assert isinstance(enkf_config_node, EnkfConfigNode) node = EnkfNode(enkf_config_node) fs = ERT.ert.getEnkfFsManager().getFileSystem(selected_case) for index, value in enumerate(iactive): if value: if node.tryLoad(fs, NodeId(report_step, index)): gen_kw = GenKw.createCReference(node.valuePointer()) filename = str(path + "/" + keyword + "_{0}").format(index) if file_type == "Parameter list": filename += ".txt" gen_kw.exportParameters(filename) else: filename += ".inc" gen_kw.exportTemplate(filename)
def _setup_sim(self, sim_id, controls, file_system): def _set_ext_param(ext_param, key, assignment): if isinstance(assignment, dict): # handle suffixes suffixes = ext_param.config[key] if len(assignment) != len(suffixes): raise KeyError( "Key {} is missing values for these suffixes: {}". format( key, set(suffixes).difference(set(assignment.keys())))) for suffix, value in assignment.items(): ext_node[key, suffix] = value else: # assume assignment is a single numerical value ext_node[key] = assignment node_id = NodeId(0, sim_id) if set(controls.keys()) != self.control_keys: err_msg = "Mismatch between initialized and provided control names." raise KeyError(err_msg) for control_name, control in controls.items(): ens_config = self.res_config.ensemble_config node = EnkfNode(ens_config[control_name]) ext_node = node.as_ext_param() if len(ext_node) != len(control.keys()): raise KeyError(("Expected {} variables for control {}, " "received {}.").format(len(ext_node), control_name, len(control.keys()))) for var_name, var_setting in control.items(): _set_ext_param(ext_node, var_name, var_setting) node.save(file_system, node_id)
def _export_field_param(config_node, file_system, ensemble_size, output_path): # Get/export the updated Field parameters EnkfNode.exportMany( config_node, output_path, file_system, np.arange(0, ensemble_size), )
def init_data(main): fsm = main.getEnkfFsManager() init_fs = fsm.getFileSystem("init") grid = main.eclConfig().getGrid() # Model: bhp = poro * 1000 poro_mean = 0.15 poro_std = 0.10 bhp_std = 125 # Model: wct = poro * 4 wct_std = 0.30 bhp = [] wct = [] num_realisations = main.getEnsembleSize() # The path fields/poro{}.grdecl must be consistent with the INIT_FILES: # argument in the PORO configuration in the configuration file used for the # testcase. os.mkdir("fields") random.seed(12345) for i in range(num_realisations): with open("fields/poro{}.grdecl".format(i), "w") as f: poro = random.gauss(poro_mean, poro_std) f.write("PORO") for i in range(grid.get_num_active()): if i % 10 == 0: f.write("\n") f.write("{:<7.5} ".format(poro)) f.write("\n/\n") bhp.append(poro * 1000 + random.gauss(0, bhp_std)) wct.append(poro * 4 + random.gauss(0, wct_std)) mask = [True] * main.getEnsembleSize() init_context = ErtRunContext.case_init(init_fs, mask) main.initRun(init_context) ens_config = main.ensembleConfig() bhp_config = ens_config["WBHP"] wct_config = ens_config["WWCT"] state_map = init_fs.getStateMap() for iens in range(main.getEnsembleSize()): bhp_node = EnkfNode(bhp_config) bhp_summary = bhp_node.as_summary() bhp_summary[1] = bhp[iens] wct_node = EnkfNode(wct_config) wct_summary = wct_node.as_summary() wct_summary[1] = wct[iens] node_id = NodeId(1, iens) bhp_node.save(init_fs, node_id) wct_node.save(init_fs, node_id) state_map[iens] = RealizationStateEnum.STATE_HAS_DATA return init_fs
def test_localization(self): config = self.createTestPath("local/snake_oil/snake_oil.ert") with ErtTestContext("localization_test", config) as context: ert = context.getErt() es_update = ESUpdate(ert) fsm = ert.getEnkfFsManager() sim_fs = fsm.getFileSystem("default_0") target_fs = fsm.getFileSystem("target") # perform localization localized_idxs = (1, 2) local_config = ert.getLocalConfig() local_config.clear() dataset = local_config.createDataset("DATASET_SCALAR_LOCA") dataset.addNode("SNAKE_OIL_PARAM") active_list = dataset.getActiveList("SNAKE_OIL_PARAM") for i in localized_idxs: active_list.addActiveIndex(i) obs = local_config.createObsdata("OBSSET_LOCA") obs.addNode("WOPR_OP1_72") ministep = local_config.createMinistep("MINISTEP_LOCA") ministep.attachDataset(dataset) ministep.attachObsset(obs) updatestep = local_config.getUpdatestep() updatestep.attachMinistep(ministep) # Run enseble smoother mask = BoolVector(initial_size=ert.getEnsembleSize(), default_value=True) model_config = ert.getModelConfig() path_fmt = model_config.getRunpathFormat() jobname_fmt = model_config.getJobnameFormat() subst_list = None run_context = ErtRunContext.ensemble_smoother( sim_fs, target_fs, mask, path_fmt, jobname_fmt, subst_list, 0 ) es_update.smootherUpdate(run_context) conf = ert.ensembleConfig()["SNAKE_OIL_PARAM"] sim_node = EnkfNode(conf) target_node = EnkfNode(conf) node_id = NodeId(0, 0) sim_node.load(sim_fs, node_id) target_node.load(target_fs, node_id) sim_gen_kw = sim_node.asGenKw() target_gen_kw = target_node.asGenKw() # Test that the localized values has been updated for i in localized_idxs: self.assertNotEqual(sim_gen_kw[i], target_gen_kw[i]) # test that all the other values are left unchanged non_localized_idxs = ( x for x in range(len(sim_gen_kw)) if x not in localized_idxs ) for i in non_localized_idxs: self.assertEqual(sim_gen_kw[i], target_gen_kw[i])
def test_localization(setup_case, expected_target_gen_kw): """ Note that this is now a snapshot test, so there is no guarantee that the snapshots are correct, they are just documenting the current behavior. """ res_config = setup_case("local/snake_oil", "snake_oil.ert") ert = EnKFMain(res_config) es_update = ESUpdate(ert) fsm = ert.getEnkfFsManager() sim_fs = fsm.getFileSystem("default_0") target_fs = fsm.getFileSystem("target") # perform localization localized_idxs = (1, 2) local_config = ert.getLocalConfig() local_config.clear() obs = local_config.createObsdata("OBSSET_LOCA") obs.addNode("WOPR_OP1_72") ministep = local_config.createMinistep("MINISTEP_LOCA") ministep.addActiveData("SNAKE_OIL_PARAM") # replace dataset.addNode() active_list = ministep.getActiveList("SNAKE_OIL_PARAM") for i in localized_idxs: active_list.addActiveIndex(i) ministep.attachObsset(obs) updatestep = local_config.getUpdatestep() updatestep.attachMinistep(ministep) # Run ensemble smoother mask = [True] * ert.getEnsembleSize() model_config = ert.getModelConfig() path_fmt = model_config.getRunpathFormat() jobname_fmt = model_config.getJobnameFormat() subst_list = None run_context = ErtRunContext.ensemble_smoother( sim_fs, target_fs, mask, path_fmt, jobname_fmt, subst_list, 0 ) es_update.smootherUpdate(run_context) conf = ert.ensembleConfig()["SNAKE_OIL_PARAM"] sim_node = EnkfNode(conf) target_node = EnkfNode(conf) node_id = NodeId(0, 0) sim_node.load(sim_fs, node_id) target_node.load(target_fs, node_id) sim_gen_kw = list(sim_node.asGenKw()) target_gen_kw = list(target_node.asGenKw()) # Test that the localized values has been updated assert sim_gen_kw[1:3] != target_gen_kw[1:3] # test that all the other values are left unchanged assert sim_gen_kw[3:] == target_gen_kw[3:] assert sim_gen_kw[0] == target_gen_kw[0] assert target_gen_kw == pytest.approx(expected_target_gen_kw)
def test_update(setup_case, module, expected_gen_kw): """ Note that this is now a snapshot test, so there is no guarantee that the snapshots are correct, they are just documenting the current behavior. """ res_config = setup_case("local/snake_oil", "snake_oil.ert") ert = EnKFMain(res_config) es_update = ESUpdate(ert) ert.analysisConfig().selectModule(module) fsm = ert.getEnkfFsManager() sim_fs = fsm.getFileSystem("default_0") target_fs = fsm.getFileSystem("target") run_context = ErtRunContext.ensemble_smoother_update(sim_fs, target_fs) es_update.smootherUpdate(run_context) conf = ert.ensembleConfig()["SNAKE_OIL_PARAM"] sim_node = EnkfNode(conf) target_node = EnkfNode(conf) node_id = NodeId(0, 0) sim_node.load(sim_fs, node_id) target_node.load(target_fs, node_id) sim_gen_kw = list(sim_node.asGenKw()) target_gen_kw = list(target_node.asGenKw()) assert sim_gen_kw != target_gen_kw assert sim_gen_kw == pytest.approx( [ -1.3035319087841115, 0.8222709205428339, -1.1400029486153482, 0.7477534046493867, -0.10400064074767973, -1.7223242794585338, 0.0761604027734105, 0.4039137216428462, 0.10001691562080614, 0.09549338450036506, ] ) assert target_gen_kw == pytest.approx(expected_gen_kw)
def loadAllCustomKWData(ert, case_name, keys=None): """ @type ert: EnKFMain @type case_name: str @type keys: list of str @rtype: DataFrame """ fs = ert.getEnkfFsManager().getFileSystem(case_name) realizations = fs.realizationList( RealizationStateEnum.STATE_HAS_DATA | RealizationStateEnum.STATE_INITIALIZED) custom_kw_keys = CustomKWCollector.getAllCustomKWKeys(ert) if keys is not None: custom_kw_keys = [key for key in keys if key in custom_kw_keys ] # ignore keys that doesn't exist realizations = numpy.array(realizations) custom_kw_data = DataFrame(index=realizations, columns=custom_kw_keys) custom_kw_data.index.name = "Realization" custom_kw_keys = CustomKWCollector.groupKeys(custom_kw_keys) for name in custom_kw_keys: ensemble_config_node = ert.ensembleConfig().getNode(name) enkf_node = EnkfNode(ensemble_config_node) keys = custom_kw_keys[name] for realization_number in realizations: node_id = NodeId(0, realization_number) if enkf_node.tryLoad(fs, node_id): custom_kw = enkf_node.asCustomKW() for key in keys: value = custom_kw[key] custom_kw_data["%s:%s" % (name, key)][realization_number] = value return custom_kw_data
def results(self): """Will return the results of the simulations. Observe that this function will raise RuntimeError if the simulations have not been completed. To be certain that the simulations have completed you can call the join() method which will block until all simulations have completed. The function will return all the results which were configured with the @results when the simulator was created. The results will come as a list of dictionaries of arrays of double values, i.e. if the @results argument was: results = ["CMODE", "order"] when the simulator was created the results will be returned as: [ {"CMODE" : [1,2,3], "order" : [1,1,3]}, {"CMODE" : [1,4,1], "order" : [0,7,8]}, None, {"CMODE" : [6,1,0], "order" : [0,0,8]} ] For a simulation which consist of a total of four simulations, where the None value indicates that the simulator was unable to compute a request. The order of the list corresponds to case_data provided in the start call. """ if self.running(): raise RuntimeError( "Simulations are still running - need to wait before gettting results" ) res = [] nodes = [ EnkfNode(self.res_config.ensemble_config[key]) for key in self.result_keys ] for sim_id in range(len(self)): node_id = NodeId(0, sim_id) if not self.didRealizationSucceed(sim_id): logging.error("Simulation %d (node %s) failed." % (sim_id, str(node_id))) res.append(None) continue d = {} for node in nodes: node.load(self.get_sim_fs(), node_id) data = node.asGenData().getData() d[node.name()] = np.array(data) res.append(d) return res
def results(self): """Will return the results of the simulations. Observe that this function will raise RuntimeError if the simulations have not been completed. To be certain that the simulations have completed you can call the join() method which will block until all simulations have completed. The function will return all the results which were configured with the @results when the simulator was created. The results will come as a list of dictionaries of arrays of double values, i.e. if the @results argument was: results = ["CMODE", "order"] when the simulator was created the results will be returned as: [ {"CMODE" : [1,2,3], "order" : [1,1,3]}, {"CMODE" : [1,4,1], "order" : [0,7,8]}, {"CMODE" : [6,1,0], "order" : [0,0,8] ] For a simulation which consist of a total of three "sub-simulations". """ if self.running(): raise RuntimeError( "Simulations are still running - need to wait before gettting results" ) res = [] nodes = [ EnkfNode(self.res_config.ensemble_config[key]) for key in self.result_keys ] for sim_id in range(len(self)): node_id = NodeId(0, sim_id) d = {} for node in nodes: node.load(self.get_sim_fs(), node_id) gen_data = node.asGenData() d[node.name()] = gen_data.getData() res.append(d) return res
def test_row_scaling_using_assign_vector(self): random_seed = "ABCDEFGHIJK0123456" with ErtTestContext("row_scaling", self.config_file) as tc: main = tc.getErt() init_fs = init_data(main) update_fs1 = main.getEnkfFsManager().getFileSystem("target1") # The first smoother update without row scaling es_update = ESUpdate(main) run_context = ErtRunContext.ensemble_smoother_update(init_fs, update_fs1) rng = main.rng() rng.setState(random_seed) es_update.smootherUpdate(run_context) # Configure the local updates local_config = main.getLocalConfig() local_config.clear() local_data = local_config.createDataset("LOCAL") local_data.addNode("PORO") obs = local_config.createObsdata("OBSSET_LOCAL") obs.addNode("WWCT0") obs.addNode("WBHP0") ministep = local_config.createMinistep("MINISTEP_LOCAL") ministep.attachDataset(local_data) ministep.attachObsset(obs) updatestep = local_config.getUpdatestep() updatestep.attachMinistep(ministep) # Apply the row scaling row_scaling = local_data.row_scaling("PORO") ens_config = main.ensembleConfig() poro_config = ens_config["PORO"] field_config = poro_config.getFieldModelConfig() grid = main.eclConfig().getGrid() scaling = ScalingTest(grid) scaling_vector = np.ndarray( [field_config.get_data_size()], dtype=np.float32 ) for i in range(field_config.get_data_size()): scaling_vector[i] = scaling(i) row_scaling.assign_vector(scaling_vector) # Second update with row scaling update_fs2 = main.getEnkfFsManager().getFileSystem("target2") es_update = ESUpdate(main) run_context = ErtRunContext.ensemble_smoother_update(init_fs, update_fs2) rng.setState(random_seed) es_update.smootherUpdate(run_context) # Fetch the three values initial, update without row scaling and # update with row scaling and verify that the row scaling has been # correctly applied. init_node = EnkfNode(poro_config) update_node1 = EnkfNode(poro_config) update_node2 = EnkfNode(poro_config) for iens in range(main.getEnsembleSize()): node_id = NodeId(0, iens) init_node.load(init_fs, node_id) update_node1.load(update_fs1, node_id) update_node2.load(update_fs2, node_id) assert_field_update( grid, init_node.asField(), update_node1.asField(), update_node2.asField(), )
def test_run(self): ens_size = 2 config_file = self.createTestPath( "local/config/simulation_batch/config.ert") with ErtTestContext("simulation_batch", model_config=config_file) as ctx: ert = ctx.getErt() ens_config = ert.ensembleConfig() # Observe that a significant amount of hardcoding # regarding the GEN_DATA and EXT_PARAM nodes is assumed # between this test, the config file and the forward model. # Add control nodes order_control = EnkfConfigNode.create_ext_param( "WELL_ORDER", ["W1", "W2", "W3"]) injection_control = EnkfConfigNode.create_ext_param( "WELL_INJECTION", ["W1", "W4"]) ens_config.addNode(order_control) ens_config.addNode(injection_control) # Add result nodes order_result = EnkfConfigNode.create_gen_data("ORDER", "order_%d") injection_result = EnkfConfigNode.create_gen_data( "INJECTION", "injection_%d") ens_config.addNode(order_result) ens_config.addNode(injection_result) order_node = EnkfNode(order_control) order_node_ext = order_node.as_ext_param() injection_node = EnkfNode(injection_control) injection_node_ext = injection_node.as_ext_param() fs_manager = ert.getEnkfFsManager() sim_fs = fs_manager.getFileSystem("sim_fs") state_map = sim_fs.getStateMap() batch_size = ens_size + 2 for iens in range(batch_size): node_id = NodeId(0, iens) order_node_ext["W1"] = iens order_node_ext["W2"] = iens * 10 order_node_ext["W3"] = iens * 100 order_node.save(sim_fs, node_id) injection_node_ext["W1"] = iens + 1 injection_node_ext["W4"] = 3 * (iens + 1) injection_node.save(sim_fs, node_id) state_map[iens] = RealizationStateEnum.STATE_INITIALIZED mask = BoolVector(default_value=True, initial_size=batch_size) model_config = ert.getModelConfig() runpath_fmt = model_config.getRunpathFormat() jobname_fmt = model_config.getJobnameFormat() subst_list = ert.getDataKW() itr = 0 run_context = ErtRunContext.ensemble_experiment( sim_fs, mask, runpath_fmt, jobname_fmt, subst_list, itr) ert.getEnkfSimulationRunner().createRunPath(run_context) job_queue = ert.get_queue_config().create_job_queue() ert.createRunpath(run_context) num = ert.getEnkfSimulationRunner().runEnsembleExperiment( job_queue, run_context) self.assertEqual(num, batch_size) order_result = EnkfNode(ens_config["ORDER"]) injection_result = EnkfNode(ens_config["INJECTION"]) for iens in range(batch_size): node_id = NodeId(0, iens) order_result.load(sim_fs, node_id) data = order_result.asGenData() order_node.load(sim_fs, node_id) self.assertEqual(order_node_ext["W1"], data[0]) self.assertEqual(order_node_ext["W2"], data[1]) self.assertEqual(order_node_ext["W3"], data[2])
class BatchSimulator(object): def __init__(self, res_config, controls, results): """Will create simulator which can be used to run multiple simulations. The @res_config argument should be a ResConfig object, representing the fully configured state of libres. The @controls argument configures which parameters the simulator should get when actually simulating. The @controls argument should be a dictionary like this: controls = {"cmode": ["Well", "Group"], "order": ["W1", "W2", "W3"]} In this example the simulator will expect two arrays 'cmode' and 'order', consisting of two and three elements respectively. When actually simualating these values will be written to json files looking like: cmode.json = {"Well": 1.0, "Group": 2.0} order.json = {"W1": 1, "W2": 1.0, "W3": 1.0} When later invoking the start() method the simulator expects to get values for all parameters configured with the @controls argument, otherwise an exception will be raised. Internally in libres code the controls will be implemented as 'ext_param' instances. The @results argument is a list of keys of results which the simulator expects to be generated by the forward model. If argument @results looks like: results = ["CMODE", "order"] The simulator will look for the files 'CMODE_0' and 'order_0' in the simulation folder. If those files are not produced by the simulator an exception will be raised. """ if not isinstance(res_config, ResConfig): raise ValueError("The first argument must be valid ResConfig instance") self.res_config = res_config self.ert = EnKFMain(self.res_config) self.control_keys = tuple(controls.keys()) self.result_keys = tuple(results) ens_config = self.res_config.ensemble_config for control_name, variable_names in controls.iteritems(): ens_config.addNode(EnkfConfigNode.create_ext_param(control_name, variable_names)) for key in results: ens_config.addNode(EnkfConfigNode.create_gen_data(key, "{}_%d".format(key))) def _setup_case(self, case, file_system): for sim_id, (geo_id, controls) in enumerate(case): assert isinstance(geo_id, int) node_id = NodeId(0, sim_id) if set(controls.keys()) != set(self.control_keys): err_msg = "Mismatch between initialized and provided control names." raise KeyError(err_msg) for control_name, control in controls.iteritems(): ens_config = self.res_config.ensemble_config node = EnkfNode(ens_config[control_name]) ext_node = node.as_ext_param() if len(ext_node) != len(control.keys()): err_msg = "Expected %d variables for control: %s, received %d." err_in = (len(ext_node), control_name, len(control.keys())) raise KeyError(err_msg % err_in) for var_name, value in control.iteritems(): ext_node[var_name] = value node.save(file_system, node_id)
def start(self, case_name, controls): """Will start batch simulation, returning a handle to query status and results. The start method will submit simulations to the queue system and then return a RobustContext handle which can be used to query for simulation status and results. The @case_name argument should just be string which will be used as name for the storage of these simulations in the system. The @controls argument is the set of control values, and the corresponding ID of the external realisation used for the simulations. The @control argument must match the control argument used when the simulator was instantiated. Assuming the following @control argument was passed to simulator construction: controls = {"cmode": ["Well","Group"], "order" : ["W1", "W2", "W3"]} Then the following @controls argument can be used in the start method to simulate four simulations: [ (1, {"cmode" : [1 ,2], "order" : [2,2,5]}), (1, {"cmode" : [1, 3], "order" : [2,2,7]}), (1, {"cmode" : [1, 7], "order" : [2,0,5]}), (2, {"cmode" : [1,-1], "order" : [2,2,1]})] The first integer argument in the tuple is the realisation id, so this simulation batch will consist of a total of four simulations, where the three first are based on realisation 1, and the last is based on realisation 2. Observe that only one BatchSimulator should actually be running at a time, so when you have called the 'start' method you need to let that batch complete before you start a new batch. """ ens_config = self.res_config.ensemble_config fsm = self.ert.getEnkfFsManager() fs = fsm.getFileSystem(case_name) for sim_id, (geo_id, control_dict) in enumerate(controls): assert isinstance(geo_id, int) node_id = NodeId(0, sim_id) if len(control_dict) != len(self.control_keys): raise ValueError("Not all keys supplied in controls") for key in control_dict.keys(): config_node = ens_config[key] ext_config = config_node.getModelConfig() values = control_dict[key] if not len(values) == len(ext_config): raise ValueError("Wrong number of values for:%s" % key) node = EnkfNode(config_node) ext_node = node.as_ext_param() ext_node.set_vector(values) node.save(fs, node_id) # The input should be validated before we instantiate the BatchContext # object, at that stage a job_queue object with multiple threads is # started, and things will typically be in a quite sorry state if an # exception occurs. itr = 0 mask = BoolVector(default_value=True, initial_size=len(controls)) sim_context = BatchContext(self.result_keys, self.ert, fs, mask, itr) for sim_id, (geo_id, control_dict) in enumerate(controls): sim_context.addSimulation(sim_id, geo_id) return sim_context
def test_2ministep(self): with ErtTestContext("row_scaling", self.config_file) as tc: main = tc.getErt() init_fs = init_data(main) update_fs1 = main.getEnkfFsManager().getFileSystem("target1") # The first smoother update without row scaling es_update = ESUpdate(main) run_context = ErtRunContext.ensemble_smoother_update(init_fs, update_fs1) rng = main.rng() es_update.smootherUpdate(run_context) # Configure the local updates local_config = main.getLocalConfig() local_config.clear() obs = local_config.createObsdata("OBSSET_LOCAL") obs.addNode("WBHP0") ministep1 = local_config.createMinistep("MINISTEP1") local_data1 = local_config.createDataset("LOCAL1") local_data1.addNode("PORO") row_scaling1 = local_data1.row_scaling("PORO") ministep1.attachDataset(local_data1) ministep1.attachObsset(obs) ministep2 = local_config.createMinistep("MINISTEP2") local_data2 = local_config.createDataset("LOCAL2") local_data2.addNode("PORO") row_scaling2 = local_data2.row_scaling("PORO") ministep2.attachDataset(local_data2) ministep2.attachObsset(obs) updatestep = local_config.getUpdatestep() updatestep.attachMinistep(ministep1) updatestep.attachMinistep(ministep2) # Apply the row scaling ens_config = main.ensembleConfig() poro_config = ens_config["PORO"] field_config = poro_config.getFieldModelConfig() grid = main.eclConfig().getGrid() row_scaling1.assign(field_config.get_data_size(), SelectLayer(0, grid)) row_scaling2.assign(field_config.get_data_size(), SelectLayer(1, grid)) update_fs2 = main.getEnkfFsManager().getFileSystem("target2") es_update = ESUpdate(main) run_context = ErtRunContext.ensemble_smoother_update(init_fs, update_fs2) es_update.smootherUpdate(run_context) init_node = EnkfNode(poro_config) node1 = EnkfNode(poro_config) node2 = EnkfNode(poro_config) for iens in range(main.getEnsembleSize()): node_id = NodeId(0, iens) init_node.load(init_fs, node_id) node1.load(update_fs1, node_id) node2.load(update_fs2, node_id) init_field = init_node.asField() field1 = node1.asField() field2 = node2.asField() for iv, v1, v2 in zip(init_field, field1, field2): assert iv != v1
def test_reuse_ALL_ACTIVE(self): random_seed = "ABCDEFGHIJK0123456" with ErtTestContext("row_scaling", self.config_file) as tc: main = tc.getErt() grid = main.eclConfig().getGrid() init_fs = init_data(main) es_update = ESUpdate(main) update_fs1 = main.getEnkfFsManager().getFileSystem("target1") run_context = ErtRunContext.ensemble_smoother_update(init_fs, update_fs1) rng = main.rng() rng.setState(random_seed) # Normal update without any local configuration es_update.smootherUpdate(run_context) local_config = main.getLocalConfig() local_config.clear_active() with self.assertRaises(KeyError): obs_data = local_config.copyObsdata("NO_SUCH_OBS", "my_obs") local_data = local_config.createDataset("LOCAL") local_data.addNode("PORO") obs_data = local_config.copyObsdata("ALL_OBS", "my_obs") ministep = local_config.createMinistep("MINISTEP_LOCAL") ministep.attachDataset(local_data) ministep.attachObsset(obs_data) updatestep = local_config.getUpdatestep() updatestep.attachMinistep(ministep) update_fs2 = main.getEnkfFsManager().getFileSystem("target2") run_context = ErtRunContext.ensemble_smoother_update(init_fs, update_fs2) rng.setState(random_seed) # Local update with reused ALL_OBS observation configuration es_update.smootherUpdate(run_context) del obs_data["WBHP0"] ministep2 = local_config.createMinistep("MINISTEP_LOCAL2") obs_data2 = local_config.createObsdata("OBSDATA2") obs_data2.addNode("WBHP0") ministep2.attachDataset(local_data) ministep2.attachObsset(obs_data2) updatestep.attachMinistep(ministep2) update_fs3 = main.getEnkfFsManager().getFileSystem("target3") run_context = ErtRunContext.ensemble_smoother_update(init_fs, update_fs3) # Local update with two ministeps - where one observation has been removed from the first es_update.smootherUpdate(run_context) ens_config = main.ensembleConfig() poro_config = ens_config["PORO"] update_node1 = EnkfNode(poro_config) update_node2 = EnkfNode(poro_config) update_node3 = EnkfNode(poro_config) for iens in range(main.getEnsembleSize()): node_id = NodeId(0, iens) update_node1.load(update_fs1, node_id) update_node2.load(update_fs2, node_id) update_node3.load(update_fs3, node_id) field1 = update_node1.asField() field2 = update_node2.asField() field3 = update_node3.asField() for k in range(grid.nz): for j in range(grid.ny): for i in range(grid.nx): assert field1.ijk_get_double( i, j, k ) == field2.ijk_get_double(i, j, k) f1 = field1.ijk_get_double(i, j, k) f3 = field3.ijk_get_double(i, j, k) # Due to the randomness in the sampling process, # which becomes different when the update steps is # split in two ministeps we can not enforce # equality here. diff = abs(f1 - f3) assert diff < 0.01