Exemple #1
0
def test_bad_run_command_args(fileutils, wlmutils):
    """Should fail because of incorrect arguments given to the
    run command

    This test ensures that we catch immediate failures
    """
    launcher = wlmutils.get_test_launcher()
    if launcher != "slurm":
        pytest.skip(f"Only fails with slurm. Launcher is {launcher}")

    exp_name = "test-bad-run-command-args"
    exp = Experiment(exp_name, launcher=launcher)
    test_dir = fileutils.make_test_dir(exp_name)

    script = fileutils.get_test_conf_path("bad.py")

    # this argument will get turned into an argument for the run command
    # of the specific WLM of the system.
    settings = wlmutils.get_run_settings("python",
                                         f"{script} --time=5",
                                         badarg="bad-arg")

    model = exp.create_model("bad-model", path=test_dir, run_settings=settings)

    with pytest.raises(SmartSimError):
        exp.start(model)
Exemple #2
0
def test_full_exp(fileutils):

    test_dir = fileutils.make_test_dir("gen_full_test")
    exp = Experiment("gen-test", test_dir, launcher="local")

    model = exp.create_model("model", run_settings=rs)
    script = fileutils.get_test_conf_path("sleep.py")
    model.attach_generator_files(to_copy=script)

    orc = Orchestrator(6780)
    params = {"THERMO": [10, 20, 30], "STEPS": [10, 20, 30]}
    ensemble = exp.create_ensemble("test_ens", params=params, run_settings=rs)

    config = fileutils.get_test_conf_path("in.atm")
    ensemble.attach_generator_files(to_configure=config)
    exp.generate(orc, ensemble, model)

    # test for ensemble
    assert osp.isdir(osp.join(test_dir, "test_ens/"))
    for i in range(9):
        assert osp.isdir(osp.join(test_dir, "test_ens/test_ens_" + str(i)))

    # test for orc dir
    assert osp.isdir(osp.join(test_dir, "database"))

    # test for model file
    assert osp.isdir(osp.join(test_dir, "model"))
    assert osp.isfile(osp.join(test_dir, "model/sleep.py"))
Exemple #3
0
def test_models(fileutils, wlmutils):
    exp_name = "test-models-launch"
    exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher())
    test_dir = fileutils.make_test_dir(exp_name)

    script = fileutils.get_test_conf_path("sleep.py")
    settings = wlmutils.get_run_settings("python", f"{script} --time=5")
    M1 = exp.create_model("m1", path=test_dir, run_settings=settings)
    M2 = exp.create_model("m2", path=test_dir, run_settings=settings)

    exp.start(M1, M2, block=True)
    statuses = exp.get_status(M1, M2)
    assert all([stat == constants.STATUS_COMPLETED for stat in statuses])
Exemple #4
0
def test_model_failure(fileutils):
    exp_name = "test-model-failure"
    exp = Experiment(exp_name, launcher="local")
    test_dir = fileutils.make_test_dir(exp_name)

    script = fileutils.get_test_conf_path("bad.py")
    settings = RunSettings("python", f"{script} --time=3")

    M1 = exp.create_model("m1", path=test_dir, run_settings=settings)

    exp.start(M1, block=True)
    statuses = exp.get_status(M1)
    assert all([stat == constants.STATUS_FAILED for stat in statuses])
Exemple #5
0
def test_stop_entity(fileutils, wlmutils):
    exp_name = "test-launch-stop-model"
    exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher())
    test_dir = fileutils.make_test_dir(exp_name)

    script = fileutils.get_test_conf_path("sleep.py")
    settings = wlmutils.get_run_settings("python", f"{script} --time=10")
    M1 = exp.create_model("m1", path=test_dir, run_settings=settings)

    exp.start(M1, block=False)
    time.sleep(5)
    exp.stop(M1)
    assert M1.name in exp._control._jobs.completed
    assert exp.get_status(M1)[0] == constants.STATUS_CANCELLED
Exemple #6
0
def test_summary(fileutils, wlmutils):
    """Fairly rudimentary test of the summary dataframe"""

    exp_name = "test-launch-summary"
    exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher())
    test_dir = fileutils.make_test_dir(exp_name)

    sleep = fileutils.get_test_conf_path("sleep.py")
    bad = fileutils.get_test_conf_path("bad.py")
    sleep_settings = wlmutils.get_run_settings("python", f"{sleep} --time=3")
    bad_settings = wlmutils.get_run_settings("python", f"{bad} --time=6")

    sleep = exp.create_model("sleep",
                             path=test_dir,
                             run_settings=sleep_settings)
    bad = exp.create_model("bad", path=test_dir, run_settings=bad_settings)

    # start and poll
    exp.start(sleep, bad)
    assert exp.get_status(bad)[0] == constants.STATUS_FAILED
    assert exp.get_status(sleep)[0] == constants.STATUS_COMPLETED

    summary_df = exp.summary()
    print(summary_df)
    row = summary_df.loc[0]

    assert sleep.name == row["Name"]
    assert sleep.type == row["Entity-Type"]
    assert 0 == int(row["RunID"])
    assert 0 == int(row["Returncode"])

    row_1 = summary_df.loc[1]

    assert bad.name == row_1["Name"]
    assert bad.type == row_1["Entity-Type"]
    assert 0 == int(row_1["RunID"])
    assert 0 != int(row_1["Returncode"])
Exemple #7
0
def test_failed_status(fileutils, wlmutils):
    """Test when a failure occurs deep into model execution"""

    exp_name = "test-report-failure"
    exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher())
    test_dir = fileutils.make_test_dir(exp_name)

    script = fileutils.get_test_conf_path("bad.py")
    settings = wlmutils.get_run_settings("python", f"{script} --time=7")

    model = exp.create_model("bad-model", path=test_dir, run_settings=settings)

    exp.start(model, block=False)
    while not exp.finished(model):
        time.sleep(2)
    status = exp.get_status(model)
    assert status[0] == constants.STATUS_FAILED
def test_batch_ensemble(fileutils, wlmutils):
    """Test the launch of a manually constructed batch ensemble"""

    exp_name = "test-slurm-batch-ensemble"
    exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher())
    test_dir = fileutils.make_test_dir(exp_name)

    script = fileutils.get_test_conf_path("sleep.py")
    settings = wlmutils.get_run_settings("python", f"{script} --time=5")
    M1 = exp.create_model("m1", path=test_dir, run_settings=settings)
    M2 = exp.create_model("m2", path=test_dir, run_settings=settings)

    batch = SbatchSettings(nodes=2, time="00:01:00")
    ensemble = exp.create_ensemble("batch-ens", batch_settings=batch)
    ensemble.add_model(M1)
    ensemble.add_model(M2)
    ensemble.set_path(test_dir)

    exp.start(ensemble, block=True)
    statuses = exp.get_status(ensemble)
    assert all([stat == constants.STATUS_COMPLETED for stat in statuses])
def test_launch_pbs_mpmd():
    """test the launch of a aprun MPMD workload

    this test will obtain an allocation as a batch workload.
    Aprun MPMD workloads share an output file for all processes
    and they share MPI_COMM_WORLDs.

    Prior to running this test, hw_mpi.c in test_configs needs to
    be compiled. #TODO write a script for this.
    """
    exp = Experiment("pbs-test", launcher="pbs")
    run_args = {"pes": 1, "pes-per-node": 1}
    aprun = AprunSettings("./hellow", run_args=run_args)
    aprun2 = AprunSettings("./hellow", run_args=run_args)
    aprun.make_mpmd(aprun2)
    model = exp.create_model("hello_world", run_settings=aprun)

    qsub = QsubBatchSettings(nodes=2, ppn=1, time="1:00:00")
    ensemble = exp.create_ensemble("ensemble", batch_settings=qsub)
    ensemble.add_model(model)

    exp.start(ensemble)
Exemple #10
0
from copy import deepcopy

import pytest

from smartsim import Experiment
from smartsim.database import Orchestrator
from smartsim.error import SmartSimError
from smartsim.settings import RunSettings
from smartsim.utils.entityutils import separate_entities

# ---- create entities for testing --------

rs = RunSettings("python", "sleep.py")

exp = Experiment("util-test", launcher="local")
model = exp.create_model("model_1", run_settings=rs)
model_2 = exp.create_model("model_1", run_settings=rs)
ensemble = exp.create_ensemble("ensemble", run_settings=rs, replicas=1)
orc = Orchestrator()
orc_1 = deepcopy(orc)


def test_separate():
    ent, ent_list, _orc = separate_entities([model, ensemble, orc])
    assert ent[0] == model
    assert ent_list[0] == ensemble
    assert _orc == orc


def test_two_orc():
    with pytest.raises(SmartSimError):
Exemple #11
0
def test_model_prefix():
    exp = Experiment("test")
    model = exp.create_model("model",
                             RunSettings("python"),
                             enable_key_prefixing=True)
    assert model._key_prefixing_enabled == True