Пример #1
0
def test_full_exp(fileutils):

    test_dir = fileutils.make_test_dir("gen_full_test")
    exp = Experiment("gen-test", test_dir, launcher="local")

    model = exp.create_model("model", run_settings=rs)
    script = fileutils.get_test_conf_path("sleep.py")
    model.attach_generator_files(to_copy=script)

    orc = Orchestrator(6780)
    params = {"THERMO": [10, 20, 30], "STEPS": [10, 20, 30]}
    ensemble = exp.create_ensemble("test_ens", params=params, run_settings=rs)

    config = fileutils.get_test_conf_path("in.atm")
    ensemble.attach_generator_files(to_configure=config)
    exp.generate(orc, ensemble, model)

    # test for ensemble
    assert osp.isdir(osp.join(test_dir, "test_ens/"))
    for i in range(9):
        assert osp.isdir(osp.join(test_dir, "test_ens/test_ens_" + str(i)))

    # test for orc dir
    assert osp.isdir(osp.join(test_dir, "database"))

    # test for model file
    assert osp.isdir(osp.join(test_dir, "model"))
    assert osp.isfile(osp.join(test_dir, "model/sleep.py"))
Пример #2
0
def test_consumer(fileutils):
    """Run three processes, each one of the first two processes
    puts a tensor on the DB; the third process accesses the
    tensors put by the two producers.
    Finally, the tensor is used to run a model by each producer
    and the consumer accesses the two results.
    """
    test_dir = fileutils.make_test_dir("smartredis_ensemble_consumer_test")
    exp = Experiment("smartredis_ensemble_consumer",
                     exp_path=test_dir,
                     launcher="local")

    # create and start a database
    orc = Orchestrator(port=REDIS_PORT)
    exp.generate(orc)
    exp.start(orc, block=False)

    rs_prod = RunSettings("python", "producer.py")
    rs_consumer = RunSettings("python", "consumer.py")
    params = {"mult": [1, -10]}
    ensemble = Ensemble(name="producer",
                        params=params,
                        run_settings=rs_prod,
                        perm_strat="step")

    consumer = Model("consumer",
                     params={},
                     path=ensemble.path,
                     run_settings=rs_consumer)
    ensemble.add_model(consumer)

    ensemble.register_incoming_entity(ensemble[0])
    ensemble.register_incoming_entity(ensemble[1])

    config = fileutils.get_test_conf_path("smartredis")
    ensemble.attach_generator_files(to_copy=[config])

    exp.generate(ensemble)

    # start the models
    exp.start(ensemble, summary=False)

    # get and confirm statuses
    statuses = exp.get_status(ensemble)
    assert all([stat == constants.STATUS_COMPLETED for stat in statuses])

    # stop the orchestrator
    exp.stop(orc)

    print(exp.summary())
Пример #3
0
def test_exchange(fileutils):
    """Run two processes, each process puts a tensor on
    the DB, then accesses the other process's tensor.
    Finally, the tensor is used to run a model.
    """

    test_dir = fileutils.make_test_dir("smartredis_ensemble_exchange_test")
    exp = Experiment("smartredis_ensemble_exchange",
                     exp_path=test_dir,
                     launcher="local")

    # create and start a database
    orc = Orchestrator(port=REDIS_PORT)
    exp.generate(orc)
    exp.start(orc, block=False)

    rs = RunSettings("python", "producer.py --exchange")
    params = {"mult": [1, -10]}
    ensemble = Ensemble(
        name="producer",
        params=params,
        run_settings=rs,
        perm_strat="step",
    )

    ensemble.register_incoming_entity(ensemble[0])
    ensemble.register_incoming_entity(ensemble[1])

    config = fileutils.get_test_conf_path("smartredis")
    ensemble.attach_generator_files(to_copy=[config])

    exp.generate(ensemble)

    # start the models
    exp.start(ensemble, summary=False)

    # get and confirm statuses
    statuses = exp.get_status(ensemble)
    assert all([stat == constants.STATUS_COMPLETED for stat in statuses])

    # stop the orchestrator
    exp.stop(orc)

    print(exp.summary())
Пример #4
0
def test_dir_files(fileutils):
    """test the generate of models with files that
    are directories with subdirectories and files
    """

    test_dir = fileutils.make_test_dir("gen_dir_test")
    exp = Experiment("gen-test", test_dir, launcher="local")

    params = {"THERMO": [10, 20, 30], "STEPS": [10, 20, 30]}
    ensemble = exp.create_ensemble("dir_test", params=params, run_settings=rs)
    conf_dir = fileutils.get_test_dir_path("test_dir")
    ensemble.attach_generator_files(to_copy=conf_dir)

    exp.generate(ensemble)

    assert osp.isdir(osp.join(test_dir, "dir_test/"))
    for i in range(9):
        model_path = osp.join(test_dir, "dir_test/dir_test_" + str(i))
        assert osp.isdir(model_path)
        assert osp.isdir(osp.join(model_path, "test_dir_1"))
        assert osp.isfile(osp.join(model_path, "test.py"))
Пример #5
0
def mom6_clustered_driver(
        walltime="02:00:00",
        ensemble_size=1,
        nodes_per_member=25,
        tasks_per_node=45,
        mom6_exe_path="/lus/cls01029/shao/dev/gfdl/MOM6-examples/build/gnu/" +
    "ice_ocean_SIS2/repro/MOM6",
        ensemble_node_features='[CL48|SK48|SK56]',
        mask_table="mask_table.315.32x45",
        domain_layout="32,45",
        eke_model_name="ncar_ml_eke.gpu.pt",
        eke_backend="GPU",
        orchestrator_port=6780,
        orchestrator_interface="ipogif0",
        orchestrator_nodes=3,
        orchestrator_node_features='P100',
        configure_only=False):
    """Run a MOM6 OM4_025 simulation with a cluster of databases used for
    machine-learning inference

    :param walltime: how long to allocate for the run, "hh:mm:ss"
    :type walltime: str, optional
    :param ensemble_size: number of members in the ensemble
    :type ensemble_size: int, optional
    :param nodes_per_member: number of nodes allocated to each ensemble member
    :type nodes_per_member: int, optional
    :param tasks_per_node: how many MPI ranks to be run per node
    :type tasks_per_node: int, optional
    :param mom6_exe_path: full path to the compiled MOM6 executable
    :type mom6_exe_path: str, optional
    :param ensemble_node_features: (Slurm-only) Constraints/features for the
                                    node
    :type ensemble_node_features: str, optional
    :param mask_table: the file to use for the specified layout eliminating
                       land domains
    :type mask_table: str, optional
    :param domain_layout: the particular domain decomposition
    :type domain_layout: str, optional
    :param eke_model_name: file containing the saved machine-learning model
    :type eke_model_name: str, optional
    :param eke_backend: (CPU or GPU), sets whether the ML-EKE model will be
                        run on CPU or GPU
    :type eke_backend: str, optional
    :param orchestrator_port: port that the database will listen on
    :type orchestrator_port: int, optional
    :param orchestrator_interface: network interface bound to the database
    :type orchestrator_interface: str, optional
    :param orchestrator_nodes: number of orchestrator nodes to use
    :type orchestrator_nodes: int, optional
    :param orchestrator_node_features: (Slurm-only) node features requested for
                                       the orchestrator nodes
    :type orchestrator_node_features: str, optional
    :param configure_only: If True, only configure the experiment and return
                           the orchestrator and experiment objects
    :type configure_only: bool, optional
    """

    experiment = Experiment("AI-EKE-MOM6", launcher="auto")
    mom_ensemble = create_mom_ensemble(experiment, walltime, ensemble_size,
                                       nodes_per_member, tasks_per_node,
                                       mom6_exe_path, ensemble_node_features)
    configure_mom_ensemble(mom_ensemble, False, orchestrator_nodes >= 3,
                           mask_table, domain_layout, eke_model_name,
                           eke_backend)
    orchestrator = create_distributed_orchestrator(
        experiment, orchestrator_port, orchestrator_interface,
        orchestrator_nodes, orchestrator_node_features, walltime)

    experiment.generate(mom_ensemble, orchestrator, overwrite=True)
    if configure_only:
        return experiment, mom_ensemble, orchestrator
    else:
        experiment.start(mom_ensemble, orchestrator, summary=True)
        experiment.stop(orchestrator)
Пример #6
0
def mom6_colocated_driver(
        walltime="02:00:00",
        ensemble_size=1,
        nodes_per_member=15,
        tasks_per_node=17,
        mom6_exe_path="/lus/cls01029/shao/dev/gfdl/MOM6-examples/build/gnu/" +
    "ice_ocean_SIS2/repro/MOM6",
        ensemble_node_features='P100',
        mask_table="mask_table.33.16x18",
        domain_layout="16,18",
        eke_model_name="ncar_ml_eke.gpu.pt",
        eke_backend="GPU",
        orchestrator_port=6780,
        orchestrator_interface="ipogif0",
        colocated_stride=18,
        orchestrator_cpus=4,
        limit_orchestrator_cpus=False):
    """Run a MOM6 OM4_025 simulation using a colocated deployment for online
    machine-learning inference

    :param walltime: how long to allocate for the run, "hh:mm:ss"
    :type walltime: str, optional
    :param ensemble_size: number of members in the ensemble
    :type ensemble_size: int, optional
    :param nodes_per_member: number of nodes allocated to each ensemble member
    :type nodes_per_member: int, optional
    :param tasks_per_node: how many MPI ranks to be run per node
    :type tasks_per_node: int, optional
    :param mom6_exe_path: full path to the compiled MOM6 executable
    :type mom6_exe_path: str, optional
    :param ensemble_node_features: (Slurm-only) Constraints/features for the
                                    node
    :type ensemble_node_features: str, optional
    :param mask_table: the file to use for the specified layout eliminating
                       land domains
    :type mask_table: str, optional
    :param domain_layout: the particular domain decomposition
    :type domain_layout: str, optional
    :param eke_model_name: file containing the saved machine-learning model
    :type eke_model_name: str, optional
    :param eke_backend: (CPU or GPU), sets whether the ML-EKE model will be
                        run on CPU or GPU
    :type eke_backend: str, optional
    :param orchestrator_port: port that the database will listen on
    :type orchestrator_port: int, optional
    :param orchestrator_interface: network interface bound to the orchestrator
    :type orchestrator_interface: str, optional
    :param orchestrator_cpus: Specify the number of cores that the
                                    orchestrator can use to handle requests
    :type orchestrator_cpus: int, optional
    :param limit_orchestrator_cpus: Limit the number of CPUs that the
                                    orchestrator can use to handle requests
    :type limit_orchestrator_cpus: bool, optional
    """
    experiment = Experiment("AI-EKE-MOM6", launcher="auto")
    mom_ensemble = create_mom_ensemble(experiment, walltime, ensemble_size,
                                       nodes_per_member, tasks_per_node,
                                       mom6_exe_path, ensemble_node_features)
    configure_mom_ensemble(mom_ensemble,
                           True,
                           False,
                           mask_table,
                           domain_layout,
                           eke_model_name,
                           eke_backend,
                           colocated_stride=colocated_stride)

    add_colocated_orchestrator(
        mom_ensemble,
        orchestrator_port,
        orchestrator_interface,
        orchestrator_cpus,
        limit_orchestrator_cpus,
    )

    experiment.generate(mom_ensemble, overwrite=True)
    experiment.start(mom_ensemble, summary=True)
    experiment.stop()