Пример #1
0
def test_launch_pbs_orc(fileutils, wlmutils):
    """test single node orchestrator"""
    launcher = wlmutils.get_test_launcher()
    if launcher != "pbs":
        pytest.skip("Test only runs on systems with PBSPro as WLM")

    exp_name = "test-launch-pbs-orc"
    exp = Experiment(exp_name, launcher=launcher)
    test_dir = fileutils.make_test_dir(exp_name)

    # batch = False to launch on existing allocation
    orc = PBSOrchestrator(6780, batch=False)
    orc.set_path(test_dir)

    exp.start(orc, block=True)
    status = exp.get_status(orc)

    # don't use assert so that orc we don't leave an orphan process
    if constants.STATUS_FAILED in status:
        exp.stop(orc)
        assert False

    exp.stop(orc)
    status = exp.get_status(orc)
    assert all([stat == constants.STATUS_CANCELLED for stat in status])
Пример #2
0
def test_launch_pbs_cluster_orc(fileutils, wlmutils):
    """test clustered 3-node orchestrator

    This test will fail if the PBS allocation is not
    obtained with `-l place=scatter`

    It will also fail if there are not enough nodes in the
    allocation to support a 3 node deployment
    """
    launcher = wlmutils.get_test_launcher()
    if launcher != "pbs":
        pytest.skip("Test only runs on systems with PBSPro as WLM")

    exp_name = "test-launch-pbs-cluster-orc"
    exp = Experiment(exp_name, launcher=launcher)
    test_dir = fileutils.make_test_dir(exp_name)

    # batch = False to launch on existing allocation
    orc = PBSOrchestrator(6780, db_nodes=3, batch=False, inter_op_threads=4)
    orc.set_path(test_dir)

    exp.start(orc, block=True)
    status = exp.get_status(orc)

    # don't use assert so that orc we don't leave an orphan process
    if constants.STATUS_FAILED in status:
        exp.stop(orc)
        assert False

    exp.stop(orc)
    status = exp.get_status(orc)
    assert all([stat == constants.STATUS_CANCELLED for stat in status])
Пример #3
0
def test_launch_slurm_cluster_orc(fileutils, wlmutils):
    """test clustered 3-node orchestrator"""

    # TODO detect number of nodes in allocation and skip if not sufficent
    launcher = wlmutils.get_test_launcher()
    if launcher != "slurm":
        pytest.skip("Test only runs on systems with Slurm as WLM")

    exp_name = "test-launch-slurm-cluster-orc"
    exp = Experiment(exp_name, launcher=launcher)
    test_dir = fileutils.make_test_dir(exp_name)

    # batch = False to launch on existing allocation
    orc = SlurmOrchestrator(6780, db_nodes=3, batch=False)
    orc.set_path(test_dir)

    exp.start(orc, block=True)
    status = exp.get_status(orc)

    # don't use assert so that orc we don't leave an orphan process
    if constants.STATUS_FAILED in status:
        exp.stop(orc)
        assert False

    exp.stop(orc)
    status = exp.get_status(orc)
    assert all([stat == constants.STATUS_CANCELLED for stat in status])
Пример #4
0
def test_consumer(fileutils):
    """Run three processes, each one of the first two processes
    puts a tensor on the DB; the third process accesses the
    tensors put by the two producers.
    Finally, the tensor is used to run a model by each producer
    and the consumer accesses the two results.
    """
    test_dir = fileutils.make_test_dir("smartredis_ensemble_consumer_test")
    exp = Experiment("smartredis_ensemble_consumer",
                     exp_path=test_dir,
                     launcher="local")

    # create and start a database
    orc = Orchestrator(port=REDIS_PORT)
    exp.generate(orc)
    exp.start(orc, block=False)

    rs_prod = RunSettings("python", "producer.py")
    rs_consumer = RunSettings("python", "consumer.py")
    params = {"mult": [1, -10]}
    ensemble = Ensemble(name="producer",
                        params=params,
                        run_settings=rs_prod,
                        perm_strat="step")

    consumer = Model("consumer",
                     params={},
                     path=ensemble.path,
                     run_settings=rs_consumer)
    ensemble.add_model(consumer)

    ensemble.register_incoming_entity(ensemble[0])
    ensemble.register_incoming_entity(ensemble[1])

    config = fileutils.get_test_conf_path("smartredis")
    ensemble.attach_generator_files(to_copy=[config])

    exp.generate(ensemble)

    # start the models
    exp.start(ensemble, summary=False)

    # get and confirm statuses
    statuses = exp.get_status(ensemble)
    assert all([stat == constants.STATUS_COMPLETED for stat in statuses])

    # stop the orchestrator
    exp.stop(orc)

    print(exp.summary())
Пример #5
0
def test_stop_entity(fileutils, wlmutils):
    exp_name = "test-launch-stop-model"
    exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher())
    test_dir = fileutils.make_test_dir(exp_name)

    script = fileutils.get_test_conf_path("sleep.py")
    settings = wlmutils.get_run_settings("python", f"{script} --time=10")
    M1 = exp.create_model("m1", path=test_dir, run_settings=settings)

    exp.start(M1, block=False)
    time.sleep(5)
    exp.stop(M1)
    assert M1.name in exp._control._jobs.completed
    assert exp.get_status(M1)[0] == constants.STATUS_CANCELLED
Пример #6
0
def test_orchestrator_relaunch(fileutils):
    """Test error when users try to launch second orchestrator"""
    exp_name = "test-orc-error-on-relaunch"
    exp = Experiment(exp_name, launcher="local")
    test_dir = fileutils.make_test_dir(exp_name)

    orc = Orchestrator(port=6780)
    orc.set_path(test_dir)
    orc_1 = Orchestrator(port=6790)
    orc_1.set_path(test_dir)

    exp.start(orc)
    with pytest.raises(SmartSimError):
        exp.start(orc_1)

    exp.stop(orc)
Пример #7
0
def test_stop_entity_list(fileutils, wlmutils):

    exp_name = "test-launch-stop-ensemble"
    exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher())
    test_dir = fileutils.make_test_dir(exp_name)

    script = fileutils.get_test_conf_path("sleep.py")
    settings = wlmutils.get_run_settings("python", f"{script} --time=10")
    ensemble = exp.create_ensemble("e1", run_settings=settings, replicas=2)
    ensemble.set_path(test_dir)

    exp.start(ensemble, block=False)
    time.sleep(5)
    exp.stop(ensemble)
    statuses = exp.get_status(ensemble)
    assert all([stat == constants.STATUS_CANCELLED for stat in statuses])
    assert all([m.name in exp._control._jobs.completed for m in ensemble])
Пример #8
0
def test_exchange(fileutils):
    """Run two processes, each process puts a tensor on
    the DB, then accesses the other process's tensor.
    Finally, the tensor is used to run a model.
    """

    test_dir = fileutils.make_test_dir("smartredis_ensemble_exchange_test")
    exp = Experiment("smartredis_ensemble_exchange",
                     exp_path=test_dir,
                     launcher="local")

    # create and start a database
    orc = Orchestrator(port=REDIS_PORT)
    exp.generate(orc)
    exp.start(orc, block=False)

    rs = RunSettings("python", "producer.py --exchange")
    params = {"mult": [1, -10]}
    ensemble = Ensemble(
        name="producer",
        params=params,
        run_settings=rs,
        perm_strat="step",
    )

    ensemble.register_incoming_entity(ensemble[0])
    ensemble.register_incoming_entity(ensemble[1])

    config = fileutils.get_test_conf_path("smartredis")
    ensemble.attach_generator_files(to_copy=[config])

    exp.generate(ensemble)

    # start the models
    exp.start(ensemble, summary=False)

    # get and confirm statuses
    statuses = exp.get_status(ensemble)
    assert all([stat == constants.STATUS_COMPLETED for stat in statuses])

    # stop the orchestrator
    exp.stop(orc)

    print(exp.summary())
Пример #9
0
def test_reconnect_local_orc():
    """Test reconnecting to orchestrator from first experiment"""
    global first_dir
    # start new experiment
    exp_name = "test-orc-local-reconnect-2nd"
    exp_2 = Experiment(exp_name, launcher="local")

    checkpoint = osp.join(first_dir, "smartsim_db.dat")
    reloaded_orc = exp_2.reconnect_orchestrator(checkpoint)

    # let statuses update once
    time.sleep(5)

    statuses = exp_2.get_status(reloaded_orc)
    for stat in statuses:
        if stat == constants.STATUS_FAILED:
            exp_2.stop(reloaded_orc)
            assert False
    exp_2.stop(reloaded_orc)
def test_launch_slurm_cluster_orc(fileutils):
    """test clustered 3-node orchestrator"""
    exp_name = "test-launch-slurm-cluster-orc-batch"
    exp = Experiment(exp_name, launcher="slurm")
    test_dir = fileutils.make_test_dir(exp_name)

    # batch = False to launch on existing allocation
    orc = SlurmOrchestrator(6780, db_nodes=3, batch=True)
    orc.set_path(test_dir)

    exp.start(orc, block=True)
    status = exp.get_status(orc)

    # don't use assert so that orc we don't leave an orphan process
    if constants.STATUS_FAILED in status:
        exp.stop(orc)
        assert False

    exp.stop(orc)
    status = exp.get_status(orc)
    assert all([stat == constants.STATUS_CANCELLED for stat in status])
def test_launch_slurm_cluster_orc_reconnect(fileutils):
    """test reconnecting to clustered 3-node orchestrator"""

    exp_name = "test-launch-slurm-cluster-orc-batch-reconect"
    exp = Experiment(exp_name, launcher="slurm")
    test_dir = fileutils.make_test_dir(exp_name)

    # batch = False to launch on existing allocation
    orc = SlurmOrchestrator(6780, db_nodes=3, batch=True)
    orc.set_path(test_dir)

    exp.start(orc, block=True)

    status = exp.get_status(orc)
    # don't use assert so that orc we don't leave an orphan process
    if constants.STATUS_FAILED in status:
        exp.stop(orc)
        assert False

    exp_name = "test-orc-slurm-cluster-orc-batch-reconnect-2nd"
    exp_2 = Experiment(exp_name, launcher="slurm")

    checkpoint = osp.join(test_dir, "smartsim_db.dat")
    reloaded_orc = exp_2.reconnect_orchestrator(checkpoint)

    # let statuses update once
    time.sleep(5)

    statuses = exp_2.get_status(reloaded_orc)
    for stat in statuses:
        if stat == constants.STATUS_FAILED:
            exp_2.stop(reloaded_orc)
            assert False
    exp_2.stop(reloaded_orc)
Пример #12
0
def mom6_clustered_driver(
        walltime="02:00:00",
        ensemble_size=1,
        nodes_per_member=25,
        tasks_per_node=45,
        mom6_exe_path="/lus/cls01029/shao/dev/gfdl/MOM6-examples/build/gnu/" +
    "ice_ocean_SIS2/repro/MOM6",
        ensemble_node_features='[CL48|SK48|SK56]',
        mask_table="mask_table.315.32x45",
        domain_layout="32,45",
        eke_model_name="ncar_ml_eke.gpu.pt",
        eke_backend="GPU",
        orchestrator_port=6780,
        orchestrator_interface="ipogif0",
        orchestrator_nodes=3,
        orchestrator_node_features='P100',
        configure_only=False):
    """Run a MOM6 OM4_025 simulation with a cluster of databases used for
    machine-learning inference

    :param walltime: how long to allocate for the run, "hh:mm:ss"
    :type walltime: str, optional
    :param ensemble_size: number of members in the ensemble
    :type ensemble_size: int, optional
    :param nodes_per_member: number of nodes allocated to each ensemble member
    :type nodes_per_member: int, optional
    :param tasks_per_node: how many MPI ranks to be run per node
    :type tasks_per_node: int, optional
    :param mom6_exe_path: full path to the compiled MOM6 executable
    :type mom6_exe_path: str, optional
    :param ensemble_node_features: (Slurm-only) Constraints/features for the
                                    node
    :type ensemble_node_features: str, optional
    :param mask_table: the file to use for the specified layout eliminating
                       land domains
    :type mask_table: str, optional
    :param domain_layout: the particular domain decomposition
    :type domain_layout: str, optional
    :param eke_model_name: file containing the saved machine-learning model
    :type eke_model_name: str, optional
    :param eke_backend: (CPU or GPU), sets whether the ML-EKE model will be
                        run on CPU or GPU
    :type eke_backend: str, optional
    :param orchestrator_port: port that the database will listen on
    :type orchestrator_port: int, optional
    :param orchestrator_interface: network interface bound to the database
    :type orchestrator_interface: str, optional
    :param orchestrator_nodes: number of orchestrator nodes to use
    :type orchestrator_nodes: int, optional
    :param orchestrator_node_features: (Slurm-only) node features requested for
                                       the orchestrator nodes
    :type orchestrator_node_features: str, optional
    :param configure_only: If True, only configure the experiment and return
                           the orchestrator and experiment objects
    :type configure_only: bool, optional
    """

    experiment = Experiment("AI-EKE-MOM6", launcher="auto")
    mom_ensemble = create_mom_ensemble(experiment, walltime, ensemble_size,
                                       nodes_per_member, tasks_per_node,
                                       mom6_exe_path, ensemble_node_features)
    configure_mom_ensemble(mom_ensemble, False, orchestrator_nodes >= 3,
                           mask_table, domain_layout, eke_model_name,
                           eke_backend)
    orchestrator = create_distributed_orchestrator(
        experiment, orchestrator_port, orchestrator_interface,
        orchestrator_nodes, orchestrator_node_features, walltime)

    experiment.generate(mom_ensemble, orchestrator, overwrite=True)
    if configure_only:
        return experiment, mom_ensemble, orchestrator
    else:
        experiment.start(mom_ensemble, orchestrator, summary=True)
        experiment.stop(orchestrator)
Пример #13
0
def mom6_colocated_driver(
        walltime="02:00:00",
        ensemble_size=1,
        nodes_per_member=15,
        tasks_per_node=17,
        mom6_exe_path="/lus/cls01029/shao/dev/gfdl/MOM6-examples/build/gnu/" +
    "ice_ocean_SIS2/repro/MOM6",
        ensemble_node_features='P100',
        mask_table="mask_table.33.16x18",
        domain_layout="16,18",
        eke_model_name="ncar_ml_eke.gpu.pt",
        eke_backend="GPU",
        orchestrator_port=6780,
        orchestrator_interface="ipogif0",
        colocated_stride=18,
        orchestrator_cpus=4,
        limit_orchestrator_cpus=False):
    """Run a MOM6 OM4_025 simulation using a colocated deployment for online
    machine-learning inference

    :param walltime: how long to allocate for the run, "hh:mm:ss"
    :type walltime: str, optional
    :param ensemble_size: number of members in the ensemble
    :type ensemble_size: int, optional
    :param nodes_per_member: number of nodes allocated to each ensemble member
    :type nodes_per_member: int, optional
    :param tasks_per_node: how many MPI ranks to be run per node
    :type tasks_per_node: int, optional
    :param mom6_exe_path: full path to the compiled MOM6 executable
    :type mom6_exe_path: str, optional
    :param ensemble_node_features: (Slurm-only) Constraints/features for the
                                    node
    :type ensemble_node_features: str, optional
    :param mask_table: the file to use for the specified layout eliminating
                       land domains
    :type mask_table: str, optional
    :param domain_layout: the particular domain decomposition
    :type domain_layout: str, optional
    :param eke_model_name: file containing the saved machine-learning model
    :type eke_model_name: str, optional
    :param eke_backend: (CPU or GPU), sets whether the ML-EKE model will be
                        run on CPU or GPU
    :type eke_backend: str, optional
    :param orchestrator_port: port that the database will listen on
    :type orchestrator_port: int, optional
    :param orchestrator_interface: network interface bound to the orchestrator
    :type orchestrator_interface: str, optional
    :param orchestrator_cpus: Specify the number of cores that the
                                    orchestrator can use to handle requests
    :type orchestrator_cpus: int, optional
    :param limit_orchestrator_cpus: Limit the number of CPUs that the
                                    orchestrator can use to handle requests
    :type limit_orchestrator_cpus: bool, optional
    """
    experiment = Experiment("AI-EKE-MOM6", launcher="auto")
    mom_ensemble = create_mom_ensemble(experiment, walltime, ensemble_size,
                                       nodes_per_member, tasks_per_node,
                                       mom6_exe_path, ensemble_node_features)
    configure_mom_ensemble(mom_ensemble,
                           True,
                           False,
                           mask_table,
                           domain_layout,
                           eke_model_name,
                           eke_backend,
                           colocated_stride=colocated_stride)

    add_colocated_orchestrator(
        mom_ensemble,
        orchestrator_port,
        orchestrator_interface,
        orchestrator_cpus,
        limit_orchestrator_cpus,
    )

    experiment.generate(mom_ensemble, overwrite=True)
    experiment.start(mom_ensemble, summary=True)
    experiment.stop()
Пример #14
0
def test_stop_type():
    """Wrong argument type given to stop"""
    exp = Experiment("name")
    with pytest.raises(TypeError):
        exp.stop("model")