def test_build_ensemble(): ensemble = ee.create_ensemble_builder().add_realization( ee.create_realization_builder().set_iens(0).add_step( ee.create_step_builder().add_job( ee.create_legacy_job_builder().set_id(0).set_name( "echo_command").set_ext_job(Mock())).set_id("0").set_name( "some_step").set_dummy_io()).active(True)) ensemble = ensemble.build() real = ensemble.get_reals()[0] assert real.is_active()
def evaluator(ee_config): ensemble = (create_ensemble_builder().add_realization( real=create_realization_builder().active(True).set_iens(0).add_stage( stage=create_stage_builder().add_step( step=create_step_builder().set_id(0).add_job( job=create_legacy_job_builder().set_id(0).set_name( "cat").set_ext_job(Mock())).add_job( job=create_legacy_job_builder().set_id(1).set_name( "cat2").set_ext_job(Mock())).set_dummy_io()). set_id(0).set_status("Unknown"))).set_ensemble_size(2).build()) ee = EnsembleEvaluator(ensemble=ensemble, config=ee_config) yield ee ee.stop()
def _get_reals(self): real_builders = [] for iens in range(0, self.config[ids.REALIZATIONS]): real_builder = create_realization_builder().active(True).set_iens(iens) for step in self.config[ids.STEPS]: step_id = uuid.uuid4() step_source = f"/ert/ee/{{ee_id}}/real/{iens}/step/{step_id}" step_builder = ( create_step_builder() .set_id(step_id) .set_name(step[ids.NAME]) .set_source(step_source) .set_type(step[ids.TYPE]) ) for io in step.get(ids.INPUTS, []): input_builder = ( create_file_io_builder() .set_name(io[ids.RECORD]) .set_path(io[ids.LOCATION]) .set_mime(io[ids.MIME]) ) if io.get(ids.IS_EXECUTABLE): input_builder.set_executable() step_builder.add_input(input_builder) for io in step.get(ids.OUTPUTS, []): step_builder.add_output( create_file_io_builder() .set_name(io[ids.RECORD]) .set_path(io[ids.LOCATION]) .set_mime(io[ids.MIME]) ) for job in step[ids.JOBS]: job_builder = ( create_job_builder() .set_id(str(uuid.uuid4())) .set_name(job[ids.NAME]) .set_executable(job[ids.EXECUTABLE]) .set_args(job.get(ids.ARGS)) .set_step_source(step_source) ) step_builder.add_job(job_builder) real_builder.add_step(step_builder) real_builders.append(real_builder) return [builder.build() for builder in real_builders]
def evaluator(ee_config): ensemble = (create_ensemble_builder().add_realization( real=create_realization_builder().active(True).set_iens(0).add_step( step=create_step_builder().set_id("0").set_name("cats").add_job( job=create_legacy_job_builder().set_id(0).set_name("cat"). set_ext_job(Mock())).add_job(job=create_legacy_job_builder( ).set_id(1).set_name("cat2").set_ext_job( Mock())).set_dummy_io())).set_ensemble_size(2).build()) ee = EnsembleEvaluator( ensemble, ee_config, 0, ee_id="ee-0", ) yield ee ee.stop()
def test_topological_sort(steps, expected, ambiguous): """Checks that steps are topologically sortable. For all ambiguous steps, assert that they are at least present in the sorted step. An ambiguous step is an isolated vertex in the topology graph, i.e. it does not depend on the input of any other step, nor does any other step depend on its output. It is ambiguous because it does not matter where in the topological sorting it appears. For expected steps, assert that they are equal to the sorted steps, minus any ambiguous steps. """ real = ee.create_realization_builder().set_iens(0).active(True) for step_def in steps: step = ee.create_step_builder().set_id("0").set_name(step_def["name"]) for input_ in step_def["inputs"]: step.add_input(ee.create_input_builder().set_name(input_)) for output in step_def["outputs"]: step.add_output(ee.create_output_builder().set_name(output)) real.add_step(step) ensemble = ee.create_ensemble_builder().add_realization(real).build() real = ensemble.get_reals()[0] if ambiguous: sorted_ = [ step.get_name() for step in list(real.get_steps_sorted_topologically()) ] for step in ambiguous: assert step in sorted_ if expected: assert expected == [ step.get_name() for step in real.get_steps_sorted_topologically() if step.get_name() not in ambiguous ]
def _make_ensemble_builder(tmpdir, num_reals, num_jobs): builder = create_ensemble_builder() with tmpdir.as_cwd(): ext_job_list = [] for job_index in range(0, num_jobs): ext_job_config = Path(tmpdir) / f"EXT_JOB_{job_index}" with open(ext_job_config, "w") as f: f.write(f"EXECUTABLE ext_{job_index}.py\n") ext_job_exec = Path(tmpdir) / f"ext_{job_index}.py" with open(ext_job_exec, "w") as f: f.write( "#!/usr/bin/env python\n" 'if __name__ == "__main__":\n' f' print("stdout from {job_index}")\n' ) ext_job_list.append( ExtJob(str(ext_job_config), False, name=f"ext_job_{job_index}") ) for iens in range(0, num_reals): run_path = Path(tmpdir / f"real_{iens}") os.mkdir(run_path) with open(run_path / "jobs.json", "w") as f: json.dump( { "jobList": [ _dump_ext_job(ext_job, index) for index, ext_job in enumerate(ext_job_list) ], "umask": "0022", }, f, ) step = ( create_step_builder() .set_id("0") .set_job_name("job dispatch") .set_job_script("job_dispatch.py") .set_max_runtime(10000) .set_run_arg(Mock(iens=iens)) .set_done_callback(lambda _: True) .set_exit_callback(lambda _: True) # the first callback_argument is expected to be a run_arg # from the run_arg, the queue wants to access the iens prop .set_callback_arguments([]) .set_run_path(str(run_path)) .set_num_cpu(1) .set_name("dummy step") .set_dummy_io() ) for index, job in enumerate(ext_job_list): step.add_job( create_legacy_job_builder() .set_id(index) .set_name(f"dummy job {index}") .set_ext_job(job) ) builder.add_realization( create_realization_builder() .active(True) .set_iens(iens) .add_step(step) ) analysis_config = Mock() analysis_config.get_stop_long_running = Mock(return_value=False) ecl_config = Mock() ecl_config.assert_restart = Mock() builder.set_legacy_dependencies( queue_config, analysis_config, ) return builder