예제 #1
0
    def test_color_always(self):
        out = StringIO()  # not atty, so coloring is automatically disabled
        monitor = Monitor(out=out, color_always=True)

        self.assertEqual(
            "\x1b[38;2;255;0;0mFoo\x1b[0m", monitor._colorize("Foo", fg=(255, 0, 0))
        )
예제 #2
0
    def test_result_success(self):
        out = StringIO()
        monitor = Monitor(out=out)

        monitor._print_result(False, None)

        self.assertEqual("Simulations completed.\n", out.getvalue())
예제 #3
0
    def test_result_failure(self):
        out = StringIO()
        monitor = Monitor(out=out)

        monitor._print_result(True, "fail")

        self.assertEqual("Simulations failed with the following error: fail\n",
                         out.getvalue())
예제 #4
0
def run_cli(args):
    logging.basicConfig(level=logging.INFO, format="%(message)s")
    res_config = ResConfig(args.config)
    os.chdir(res_config.config_path)
    ert = EnKFMain(res_config, strict=True, verbose=args.verbose)
    notifier = ErtCliNotifier(ert, args.config)
    ERT.adapt(notifier)

    if args.mode == WORKFLOW_MODE:
        execute_workflow(args.name)
        return

    model, argument = create_model(args)
    # Test run does not have a current_case
    if "current_case" in args and args.current_case:
        ERT.enkf_facade.select_or_create_new_case(args.current_case)

    if (
        args.mode
        in [ENSEMBLE_SMOOTHER_MODE, ITERATIVE_ENSEMBLE_SMOOTHER_MODE, ES_MDA_MODE]
        and args.target_case == ERT.enkf_facade.get_current_case_name()
    ):
        msg = (
            "ERROR: Target file system and source file system can not be the same. "
            "They were both: {}.".format(args.target_case)
        )
        _clear_and_exit(msg)

    ee_config = None
    if FeatureToggling.is_enabled("ensemble-evaluator"):
        ee_config = EvaluatorServerConfig()
        argument.update({"ee_config": ee_config})

    thread = threading.Thread(
        name="ert_cli_simulation_thread",
        target=model.startSimulations,
        args=(argument,),
    )
    thread.start()

    tracker = create_tracker(model, detailed_interval=0, ee_config=ee_config)

    out = open(os.devnull, "w") if args.disable_monitoring else sys.stdout
    monitor = Monitor(out=out, color_always=args.color_always)

    try:
        monitor.monitor(tracker)
    except (SystemExit, KeyboardInterrupt):
        print("\nKilling simulations...")
        tracker.request_termination()

    if args.disable_monitoring:
        out.close()

    thread.join()

    if model.hasRunFailed():
        _clear_and_exit(1)  # the monitor has already reported the error message
예제 #5
0
    def test_legends(self):
        done_state = SimulationStateStatus("Finished", None, None)
        done_state.count = 10
        done_state.total_count = 100
        monitor = Monitor(out=StringIO())

        legends = monitor._get_legends([done_state])

        self.assertEqual("Finished       10/100", legends[done_state])
예제 #6
0
파일: main.py 프로젝트: kvashchuka/ert
def run_cli(args):
    logging.basicConfig(level=logging.INFO, format='%(message)s')
    res_config = ResConfig(args.config)
    os.chdir(res_config.config_path)
    ert = EnKFMain(res_config, strict=True, verbose=args.verbose)
    notifier = ErtCliNotifier(ert, args.config)
    ERT.adapt(notifier)

    if args.mode == WORKFLOW_MODE:
        execute_workflow(args.name)
        return

    model, argument = create_model(args)
    if args.disable_monitoring:
        model.startSimulations(argument)
        if model.hasRunFailed():
            _clear_and_exit(model.getFailMessage())
    else:
        # Test run does not have a current_case
        if "current_case" in args and args.current_case:
            ERT.enkf_facade.select_or_create_new_case(args.current_case)

        if (args.mode in [ENSEMBLE_SMOOTHER_MODE, ITERATIVE_ENSEMBLE_SMOOTHER_MODE, ES_MDA_MODE] and 
            args.target_case == ERT.enkf_facade.get_current_case_name()):
            msg = (
                "ERROR: Target file system and source file system can not be the same. "
                "They were both: {}.".format(args.target_case)
            )
            _clear_and_exit(msg)

        thread = threading.Thread(
            name="ert_cli_simulation_thread",
            target=model.startSimulations,
            args=(argument,)
        )
        thread.start()

        tracker = create_tracker(model, tick_interval=0, detailed_interval=0)
        monitor = Monitor(color_always=args.color_always)

        try:
            monitor.monitor(tracker)
        except (SystemExit, KeyboardInterrupt):
            print("\nKilling simulations...")
            model.killAllSimulations()

        thread.join()

        if model.hasRunFailed():
            _clear_and_exit(1)  # the monitor has already reported the error message
예제 #7
0
    def test_legends(self):
        monitor = Monitor(out=StringIO())
        sd = SnapshotDict(status="")
        for i in range(0, 100):
            status = REALIZATION_STATE_FINISHED if i < 10 else REALIZATION_STATE_RUNNING
            sd.reals[i] = Realization(status=status, active=True)
        monitor._snapshots[0] = Snapshot(sd.dict())
        legends = monitor._get_legends()

        self.assertEqual(
            """    Waiting         0/100
    Pending         0/100
    Running        90/100
    Failed          0/100
    Finished       10/100
    Unknown         0/100
""",
            legends,
        )
예제 #8
0
def evaluate(
    ensemble: ert.ensemble_evaluator.Ensemble,
    custom_port_range: Optional[range] = None,
) -> Dict[int, Dict[str, ert.data.RecordTransmitter]]:
    config = EvaluatorServerConfig(custom_port_range=custom_port_range)

    run_model = ERT3RunModel()
    tracker = ert.ensemble_evaluator.EvaluatorTracker(
        run_model,
        config.get_connection_info(),
    )
    monitor = Monitor(out=sys.stderr, color_always=False)  # type: ignore

    ee = EnsembleEvaluator(ensemble=ensemble, config=config, iter_=0)

    executor = futures.ThreadPoolExecutor()
    future = executor.submit(_run, ee, run_model)
    monitor.monitor(tracker)  # type: ignore
    result: Dict[int, Dict[str, ert.data.RecordTransmitter]] = future.result()
    return result
예제 #9
0
    def test_print_progress(self):
        out = StringIO()
        monitor = Monitor(out=out)
        sd = SnapshotDict(status="")
        for i in range(0, 100):
            status = REALIZATION_STATE_FINISHED if i < 50 else REALIZATION_STATE_WAITING
            sd.reals[i] = Realization(status=status, active=True)
        monitor._snapshots[0] = Snapshot(sd.dict())
        monitor._start_time = datetime.now()
        general_event = _UpdateEvent(
            phase_name="Test Phase",
            current_phase=0,
            total_phases=2,
            progress=0.5,
            indeterminate=False,
            iteration=0,
        )

        monitor._print_progress(general_event)

        self.assertEqual(
            """\r
    --> Test Phase

    1/2 |███████████████               | 50% Running time: 0 seconds

    Waiting        50/100
    Pending         0/100
    Running         0/100
    Failed          0/100
    Finished       50/100
    Unknown         0/100
""",
            out.getvalue(),
        )
예제 #10
0
    def test_print_progress(self):
        out = StringIO()
        monitor = Monitor(out=out)
        states = [
            SimulationStateStatus("Finished", None, None),
            SimulationStateStatus("Waiting", None, None),
        ]
        states[0].count = 10
        states[0].total_count = 100
        general_event = GeneralEvent("Test Phase", 0, 2, 0.5, False, states,
                                     10)

        monitor._print_progress(general_event)

        self.assertEqual(
            """\r
    --> Test Phase

    1/2 |███████████████               | 50% Running time: 10 seconds

    Finished       10/100
    Waiting           0/1
""", out.getvalue())
예제 #11
0
파일: main.py 프로젝트: JotaBeDM/ert
def run_cli(args):
    logging.basicConfig(level=logging.INFO, format='%(message)s')
    res_config = ResConfig(args.config)
    os.chdir(res_config.config_path)
    ert = EnKFMain(res_config, strict=True, verbose=args.verbose)
    notifier = ErtCliNotifier(ert, args.config)
    ERT.adapt(notifier)

    if args.mode == 'workflow':
        execute_workflow(args.name)
        return

    model, argument = create_model(args)

    if args.disable_monitoring:
        model.startSimulations(argument)
        if model.hasRunFailed():
            sys.exit(model.getFailMessage())
    else:
        thread = threading.Thread(name="ert_cli_simulation_thread",
                                  target=model.startSimulations,
                                  args=(argument, ))
        thread.start()

        tracker = create_tracker(model, tick_interval=0, detailed_interval=0)
        monitor = Monitor(color_always=args.color_always)

        try:
            monitor.monitor(tracker)
        except (SystemExit, KeyboardInterrupt):
            print("\nKilling simulations...")
            model.killAllSimulations()

        thread.join()

        if model.hasRunFailed():
            sys.exit(1)  # the monitor has already reported the error message
예제 #12
0
    def test_print_progress(self):
        out = StringIO()
        monitor = Monitor(out=out)
        sd = SnapshotDict(status="")
        for i in range(0, 100):
            status = REALIZATION_STATE_FINISHED if i < 50 else REALIZATION_STATE_WAITING
            sd.reals[i] = Realization(status=status, active=True)
        monitor._snapshots[0] = Snapshot(sd.dict())
        monitor._start_time = datetime.now()
        general_event = _UpdateEvent(
            phase_name="Test Phase",
            current_phase=0,
            total_phases=2,
            progress=0.5,
            indeterminate=False,
            iteration=0,
        )

        monitor._print_progress(general_event)

        # For some reason, `tqdm` adds an extra line containing a progress-bar,
        # even though this test only calls it once.
        # I suspect this has something to do with the way `tqdm` does refresh,
        # but do not know how to fix it.
        # Seems not be a an issue when used normally.
        expected = """    --> Test Phase


    |                                                                                      |   0% it
    1/2 |##############################5                              |  50% Running time: 0 seconds

    Waiting        50/100
    Pending         0/100
    Running         0/100
    Failed          0/100
    Finished       50/100
    Unknown         0/100

"""

        assert out.getvalue().replace("\r", "\n") == expected
예제 #13
0
def run_cli(args):
    res_config = ResConfig(args.config)

    # Create logger inside function to make sure all handlers have been added to
    # the root-logger.
    logger = logging.getLogger(__name__)
    logger.info(
        "Logging forward model jobs",
        extra={
            "workflow_jobs":
            str(res_config.model_config.getForwardModel().joblist())
        },
    )

    os.chdir(res_config.config_path)
    ert = EnKFMain(res_config, strict=True, verbose=args.verbose)
    facade = LibresFacade(ert)

    if args.mode == WORKFLOW_MODE:
        execute_workflow(ert, args.name)
        return
    model = create_model(
        ert,
        facade.get_ensemble_size(),
        facade.get_current_case_name(),
        args,
    )
    # Test run does not have a current_case
    if "current_case" in args and args.current_case:
        facade.select_or_create_new_case(args.current_case)

    if (args.mode in [
            ENSEMBLE_SMOOTHER_MODE, ITERATIVE_ENSEMBLE_SMOOTHER_MODE,
            ES_MDA_MODE
    ] and args.target_case == facade.get_current_case_name()):
        msg = (
            "ERROR: Target file system and source file system can not be the same. "
            "They were both: {}.".format(args.target_case))
        raise ErtCliError(msg)

    evaluator_server_config = EvaluatorServerConfig(
        custom_port_range=args.port_range)

    thread = threading.Thread(
        name="ert_cli_simulation_thread",
        target=model.start_simulations_thread,
        args=(evaluator_server_config, ),
    )
    thread.start()

    tracker = EvaluatorTracker(
        model, ee_con_info=evaluator_server_config.get_connection_info())

    out = open(os.devnull, "w") if args.disable_monitoring else sys.stderr
    monitor = Monitor(out=out, color_always=args.color_always)

    try:
        monitor.monitor(tracker)
    except (SystemExit, KeyboardInterrupt):
        print("\nKilling simulations...")
        tracker.request_termination()

    if args.disable_monitoring:
        out.close()

    thread.join()

    if model.hasRunFailed():
        raise ErtCliError(model.getFailMessage())