def test_launch_slurm_cluster_orc_reconnect(fileutils): """test reconnecting to clustered 3-node orchestrator""" exp_name = "test-launch-slurm-cluster-orc-batch-reconect" exp = Experiment(exp_name, launcher="slurm") test_dir = fileutils.make_test_dir(exp_name) # batch = False to launch on existing allocation orc = SlurmOrchestrator(6780, db_nodes=3, batch=True) orc.set_path(test_dir) exp.start(orc, block=True) status = exp.get_status(orc) # don't use assert so that orc we don't leave an orphan process if constants.STATUS_FAILED in status: exp.stop(orc) assert False exp_name = "test-orc-slurm-cluster-orc-batch-reconnect-2nd" exp_2 = Experiment(exp_name, launcher="slurm") checkpoint = osp.join(test_dir, "smartsim_db.dat") reloaded_orc = exp_2.reconnect_orchestrator(checkpoint) # let statuses update once time.sleep(5) statuses = exp_2.get_status(reloaded_orc) for stat in statuses: if stat == constants.STATUS_FAILED: exp_2.stop(reloaded_orc) assert False exp_2.stop(reloaded_orc)
def test_launch_pbs_orc(fileutils, wlmutils): """test single node orchestrator""" launcher = wlmutils.get_test_launcher() if launcher != "pbs": pytest.skip("Test only runs on systems with PBSPro as WLM") exp_name = "test-launch-pbs-orc" exp = Experiment(exp_name, launcher=launcher) test_dir = fileutils.make_test_dir(exp_name) # batch = False to launch on existing allocation orc = PBSOrchestrator(6780, batch=False) orc.set_path(test_dir) exp.start(orc, block=True) status = exp.get_status(orc) # don't use assert so that orc we don't leave an orphan process if constants.STATUS_FAILED in status: exp.stop(orc) assert False exp.stop(orc) status = exp.get_status(orc) assert all([stat == constants.STATUS_CANCELLED for stat in status])
def test_launch_slurm_cluster_orc(fileutils, wlmutils): """test clustered 3-node orchestrator""" # TODO detect number of nodes in allocation and skip if not sufficent launcher = wlmutils.get_test_launcher() if launcher != "slurm": pytest.skip("Test only runs on systems with Slurm as WLM") exp_name = "test-launch-slurm-cluster-orc" exp = Experiment(exp_name, launcher=launcher) test_dir = fileutils.make_test_dir(exp_name) # batch = False to launch on existing allocation orc = SlurmOrchestrator(6780, db_nodes=3, batch=False) orc.set_path(test_dir) exp.start(orc, block=True) status = exp.get_status(orc) # don't use assert so that orc we don't leave an orphan process if constants.STATUS_FAILED in status: exp.stop(orc) assert False exp.stop(orc) status = exp.get_status(orc) assert all([stat == constants.STATUS_CANCELLED for stat in status])
def test_launch_pbs_cluster_orc(fileutils, wlmutils): """test clustered 3-node orchestrator This test will fail if the PBS allocation is not obtained with `-l place=scatter` It will also fail if there are not enough nodes in the allocation to support a 3 node deployment """ launcher = wlmutils.get_test_launcher() if launcher != "pbs": pytest.skip("Test only runs on systems with PBSPro as WLM") exp_name = "test-launch-pbs-cluster-orc" exp = Experiment(exp_name, launcher=launcher) test_dir = fileutils.make_test_dir(exp_name) # batch = False to launch on existing allocation orc = PBSOrchestrator(6780, db_nodes=3, batch=False, inter_op_threads=4) orc.set_path(test_dir) exp.start(orc, block=True) status = exp.get_status(orc) # don't use assert so that orc we don't leave an orphan process if constants.STATUS_FAILED in status: exp.stop(orc) assert False exp.stop(orc) status = exp.get_status(orc) assert all([stat == constants.STATUS_CANCELLED for stat in status])
def test_restart(fileutils, wlmutils): exp_name = "test-restart" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher()) test_dir = fileutils.make_test_dir(exp_name) script = fileutils.get_test_conf_path("sleep.py") settings = wlmutils.get_run_settings("python", f"{script} --time=5") M1 = exp.create_model("m1", path=test_dir, run_settings=settings) M2 = exp.create_model("m2", path=test_dir, run_settings=settings) exp.start(M1, M2, block=True) statuses = exp.get_status(M1, M2) assert all([stat == constants.STATUS_COMPLETED for stat in statuses]) exp.start(M1, M2, block=True) statuses = exp.get_status(M1, M2) assert all([stat == constants.STATUS_COMPLETED for stat in statuses])
def test_launch_slurm_cluster_orc(fileutils): """test clustered 3-node orchestrator""" exp_name = "test-launch-slurm-cluster-orc-batch" exp = Experiment(exp_name, launcher="slurm") test_dir = fileutils.make_test_dir(exp_name) # batch = False to launch on existing allocation orc = SlurmOrchestrator(6780, db_nodes=3, batch=True) orc.set_path(test_dir) exp.start(orc, block=True) status = exp.get_status(orc) # don't use assert so that orc we don't leave an orphan process if constants.STATUS_FAILED in status: exp.stop(orc) assert False exp.stop(orc) status = exp.get_status(orc) assert all([stat == constants.STATUS_CANCELLED for stat in status])
def test_model_failure(fileutils): exp_name = "test-model-failure" exp = Experiment(exp_name, launcher="local") test_dir = fileutils.make_test_dir(exp_name) script = fileutils.get_test_conf_path("bad.py") settings = RunSettings("python", f"{script} --time=3") M1 = exp.create_model("m1", path=test_dir, run_settings=settings) exp.start(M1, block=True) statuses = exp.get_status(M1) assert all([stat == constants.STATUS_FAILED for stat in statuses])
def test_ensemble(fileutils, wlmutils): exp_name = "test-ensemble-launch" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher()) test_dir = fileutils.make_test_dir(exp_name) script = fileutils.get_test_conf_path("sleep.py") settings = wlmutils.get_run_settings("python", f"{script} --time=5") ensemble = exp.create_ensemble("e1", run_settings=settings, replicas=2) ensemble.set_path(test_dir) exp.start(ensemble, block=True) statuses = exp.get_status(ensemble) assert all([stat == constants.STATUS_COMPLETED for stat in statuses])
def test_stop_entity(fileutils, wlmutils): exp_name = "test-launch-stop-model" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher()) test_dir = fileutils.make_test_dir(exp_name) script = fileutils.get_test_conf_path("sleep.py") settings = wlmutils.get_run_settings("python", f"{script} --time=10") M1 = exp.create_model("m1", path=test_dir, run_settings=settings) exp.start(M1, block=False) time.sleep(5) exp.stop(M1) assert M1.name in exp._control._jobs.completed assert exp.get_status(M1)[0] == constants.STATUS_CANCELLED
def test_consumer(fileutils): """Run three processes, each one of the first two processes puts a tensor on the DB; the third process accesses the tensors put by the two producers. Finally, the tensor is used to run a model by each producer and the consumer accesses the two results. """ test_dir = fileutils.make_test_dir("smartredis_ensemble_consumer_test") exp = Experiment("smartredis_ensemble_consumer", exp_path=test_dir, launcher="local") # create and start a database orc = Orchestrator(port=REDIS_PORT) exp.generate(orc) exp.start(orc, block=False) rs_prod = RunSettings("python", "producer.py") rs_consumer = RunSettings("python", "consumer.py") params = {"mult": [1, -10]} ensemble = Ensemble(name="producer", params=params, run_settings=rs_prod, perm_strat="step") consumer = Model("consumer", params={}, path=ensemble.path, run_settings=rs_consumer) ensemble.add_model(consumer) ensemble.register_incoming_entity(ensemble[0]) ensemble.register_incoming_entity(ensemble[1]) config = fileutils.get_test_conf_path("smartredis") ensemble.attach_generator_files(to_copy=[config]) exp.generate(ensemble) # start the models exp.start(ensemble, summary=False) # get and confirm statuses statuses = exp.get_status(ensemble) assert all([stat == constants.STATUS_COMPLETED for stat in statuses]) # stop the orchestrator exp.stop(orc) print(exp.summary())
def test_models(fileutils): exp_name = "test-models-local-launch" exp = Experiment(exp_name, launcher="local") test_dir = fileutils.make_test_dir(exp_name) script = fileutils.get_test_conf_path("sleep.py") settings = RunSettings("python", f"{script} --time=3") M1 = exp.create_model("m1", path=test_dir, run_settings=settings) M2 = exp.create_model("m2", path=test_dir, run_settings=settings) exp.start(M1, M2, block=True, summary=True) statuses = exp.get_status(M1, M2) assert all([stat == constants.STATUS_COMPLETED for stat in statuses])
def test_summary(fileutils, wlmutils): """Fairly rudimentary test of the summary dataframe""" exp_name = "test-launch-summary" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher()) test_dir = fileutils.make_test_dir(exp_name) sleep = fileutils.get_test_conf_path("sleep.py") bad = fileutils.get_test_conf_path("bad.py") sleep_settings = wlmutils.get_run_settings("python", f"{sleep} --time=3") bad_settings = wlmutils.get_run_settings("python", f"{bad} --time=6") sleep = exp.create_model("sleep", path=test_dir, run_settings=sleep_settings) bad = exp.create_model("bad", path=test_dir, run_settings=bad_settings) # start and poll exp.start(sleep, bad) assert exp.get_status(bad)[0] == constants.STATUS_FAILED assert exp.get_status(sleep)[0] == constants.STATUS_COMPLETED summary_df = exp.summary() print(summary_df) row = summary_df.loc[0] assert sleep.name == row["Name"] assert sleep.type == row["Entity-Type"] assert 0 == int(row["RunID"]) assert 0 == int(row["Returncode"]) row_1 = summary_df.loc[1] assert bad.name == row_1["Name"] assert bad.type == row_1["Entity-Type"] assert 0 == int(row_1["RunID"]) assert 0 != int(row_1["Returncode"])
def test_stop_entity_list(fileutils, wlmutils): exp_name = "test-launch-stop-ensemble" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher()) test_dir = fileutils.make_test_dir(exp_name) script = fileutils.get_test_conf_path("sleep.py") settings = wlmutils.get_run_settings("python", f"{script} --time=10") ensemble = exp.create_ensemble("e1", run_settings=settings, replicas=2) ensemble.set_path(test_dir) exp.start(ensemble, block=False) time.sleep(5) exp.stop(ensemble) statuses = exp.get_status(ensemble) assert all([stat == constants.STATUS_CANCELLED for stat in statuses]) assert all([m.name in exp._control._jobs.completed for m in ensemble])
def test_batch_ensemble_replicas(fileutils, wlmutils): exp_name = "test-slurm-batch-ensemble-replicas" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher()) test_dir = fileutils.make_test_dir(exp_name) script = fileutils.get_test_conf_path("sleep.py") settings = wlmutils.get_run_settings("python", f"{script} --time=5") batch = SbatchSettings(nodes=2, time="00:01:00") ensemble = exp.create_ensemble( "batch-ens-replicas", batch_settings=batch, run_settings=settings, replicas=2 ) ensemble.set_path(test_dir) exp.start(ensemble, block=True) statuses = exp.get_status(ensemble) assert all([stat == constants.STATUS_COMPLETED for stat in statuses])
def test_failed_status(fileutils, wlmutils): """Test when a failure occurs deep into model execution""" exp_name = "test-report-failure" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher()) test_dir = fileutils.make_test_dir(exp_name) script = fileutils.get_test_conf_path("bad.py") settings = wlmutils.get_run_settings("python", f"{script} --time=7") model = exp.create_model("bad-model", path=test_dir, run_settings=settings) exp.start(model, block=False) while not exp.finished(model): time.sleep(2) status = exp.get_status(model) assert status[0] == constants.STATUS_FAILED
def test_local_orchestrator(fileutils): """Test launching orchestrator locally""" global first_dir exp_name = "test-orc-launch-local" exp = Experiment(exp_name, launcher="local") test_dir = fileutils.make_test_dir(exp_name) first_dir = test_dir orc = Orchestrator(port=6780) orc.set_path(test_dir) exp.start(orc) statuses = exp.get_status(orc) assert [stat != constants.STATUS_FAILED for stat in statuses] # simulate user shutting down main thread exp._control._jobs.actively_monitoring = False exp._control._launcher.task_manager.actively_monitoring = False
def test_exchange(fileutils): """Run two processes, each process puts a tensor on the DB, then accesses the other process's tensor. Finally, the tensor is used to run a model. """ test_dir = fileutils.make_test_dir("smartredis_ensemble_exchange_test") exp = Experiment("smartredis_ensemble_exchange", exp_path=test_dir, launcher="local") # create and start a database orc = Orchestrator(port=REDIS_PORT) exp.generate(orc) exp.start(orc, block=False) rs = RunSettings("python", "producer.py --exchange") params = {"mult": [1, -10]} ensemble = Ensemble( name="producer", params=params, run_settings=rs, perm_strat="step", ) ensemble.register_incoming_entity(ensemble[0]) ensemble.register_incoming_entity(ensemble[1]) config = fileutils.get_test_conf_path("smartredis") ensemble.attach_generator_files(to_copy=[config]) exp.generate(ensemble) # start the models exp.start(ensemble, summary=False) # get and confirm statuses statuses = exp.get_status(ensemble) assert all([stat == constants.STATUS_COMPLETED for stat in statuses]) # stop the orchestrator exp.stop(orc) print(exp.summary())
def test_reconnect_local_orc(): """Test reconnecting to orchestrator from first experiment""" global first_dir # start new experiment exp_name = "test-orc-local-reconnect-2nd" exp_2 = Experiment(exp_name, launcher="local") checkpoint = osp.join(first_dir, "smartsim_db.dat") reloaded_orc = exp_2.reconnect_orchestrator(checkpoint) # let statuses update once time.sleep(5) statuses = exp_2.get_status(reloaded_orc) for stat in statuses: if stat == constants.STATUS_FAILED: exp_2.stop(reloaded_orc) assert False exp_2.stop(reloaded_orc)
def test_batch_ensemble(fileutils, wlmutils): """Test the launch of a manually constructed batch ensemble""" exp_name = "test-slurm-batch-ensemble" exp = Experiment(exp_name, launcher=wlmutils.get_test_launcher()) test_dir = fileutils.make_test_dir(exp_name) script = fileutils.get_test_conf_path("sleep.py") settings = wlmutils.get_run_settings("python", f"{script} --time=5") M1 = exp.create_model("m1", path=test_dir, run_settings=settings) M2 = exp.create_model("m2", path=test_dir, run_settings=settings) batch = SbatchSettings(nodes=2, time="00:01:00") ensemble = exp.create_ensemble("batch-ens", batch_settings=batch) ensemble.add_model(M1) ensemble.add_model(M2) ensemble.set_path(test_dir) exp.start(ensemble, block=True) statuses = exp.get_status(ensemble) assert all([stat == constants.STATUS_COMPLETED for stat in statuses])
def test_status_type(): exp = Experiment("test") with pytest.raises(TypeError): exp.get_status([])
def test_status_pre_launch(): model = Model("name", {}, "./", RunSettings("python")) exp = Experiment("test") with pytest.raises(SmartSimError): exp.get_status(model)