示例#1
0
def test_simple_init(mv_kwargs):
    """Tests whether initialisation works for all basic cases."""
    # With the full set of arguments
    mv = Multiverse(**mv_kwargs)

    # Assert some basic types
    assert isinstance(mv.wm, WorkerManager)
    assert isinstance(mv.dm, DataManager)
    assert isinstance(mv.pm, PlotManager)

    # Without the run configuration
    mv_kwargs.pop('run_cfg_path')
    mv_kwargs['paths']['model_note'] += "_wo_run_cfg"
    Multiverse(**mv_kwargs)

    # Suppressing the user config
    mv_kwargs['user_cfg_path'] = False
    mv_kwargs['paths']['model_note'] += "_wo_user_cfg"
    Multiverse(**mv_kwargs)
    # NOTE Without specifying a path, the search path will be used, which makes
    # the results untestable and creates spurious folders for the user.
    # Therefore, we cannot test for the case where no user config is given ...

    # No user config path given -> search at default location
    mv_kwargs['user_cfg_path'] = None
    mv_kwargs['paths']['model_note'] = "_user_cfg_path_none"
    Multiverse(**mv_kwargs)

    # No user config at default search location
    Multiverse.USER_CFG_SEARCH_PATH = "this_is_not_a_path"
    mv_kwargs['paths']['model_note'] = "_user_cfg_path_none_and_no_class_var"
    Multiverse(**mv_kwargs)
示例#2
0
def test_stop_conditions(mv_kwargs):
    """An integration test for stop conditions"""
    mv_kwargs['run_cfg_path'] = STOP_COND_CFG_PATH
    mv = Multiverse(**mv_kwargs)
    mv.run_sweep()
    time.sleep(2)
    assert len(mv.wm.tasks) == 13
    assert len(mv.wm.stopped_tasks) == 13  # all stopped
示例#3
0
def test_prepare_executable(mv_kwargs):
    """Tests handling of the executable, i.e. copying to a temporary location
    and emitting helpful error messages
    """
    mv_kwargs['executable_control'] = dict(run_from_tmpdir=False)
    mv = Multiverse(**mv_kwargs)

    # The dummy model should be available at this point, so _prepare_executable
    # should have correctly set a binary path, but not in a temporary directory
    assert mv._model_binpath is not None
    assert mv._tmpdir is None
    original_binpath = mv._model_binpath

    # Now, let the executable be copied to a temporary location
    mv._prepare_executable(run_from_tmpdir=True)
    assert mv._model_binpath != original_binpath
    assert mv._tmpdir is not None

    # Adjust the info bundle for this Multiverse to use the temporary location
    tmp_binpath = mv._model_binpath
    mv._info_bundle = copy.deepcopy(mv.info_bundle)
    mv._info_bundle.paths['binary'] = tmp_binpath

    # With the executable in a temporary location, we can change its access
    # rights to test the PermissionError
    os.chmod(tmp_binpath, 0o600)
    with pytest.raises(PermissionError, match="is not executable"):
        mv._prepare_executable()

    # Finally, remove that (temporary) file, to test the FileNotFound error
    os.remove(tmp_binpath)
    with pytest.raises(FileNotFoundError, match="Did you build it?"):
        mv._prepare_executable()
示例#4
0
def test_FrozenMultiverse(mv_kwargs, cluster_env):
    """Test the FrozenMultiverse class"""
    # Need a regular Multiverse and corresponding output for that
    mv = Multiverse(**mv_kwargs)
    mv.run_single()

    # NOTE Need to adjust the data directory in order to not create collisions
    # in the eval directory due to same timestamps ...

    # Now create a frozen Multiverse from that one
    # Without run directory, the latest one should be loaded
    print("\nInitializing FrozenMultiverse without further kwargs")
    FrozenMultiverse(**mv_kwargs,
                     data_manager=dict(out_dir="eval/{timestamp:}_1"))


    # With a relative path, the corresponding directory should be found
    print("\nInitializing FrozenMultiverse with timestamp as run_dir")
    FrozenMultiverse(**mv_kwargs, run_dir=os.path.basename(mv.dirs['run']),
                     data_manager=dict(out_dir="eval/{timestamp:}_2"))

    # With an absolute path, that path should be used directly
    print("\nInitializing FrozenMultiverse with absolute path to run_dir")
    FrozenMultiverse(**mv_kwargs, run_dir=mv.dirs['run'],
                     data_manager=dict(out_dir="eval/{timestamp:}_3"))

    # With a relative path, the path relative to the CWD should be used
    print("\nInitializing FrozenMultiverse with relative path to run_dir")
    FrozenMultiverse(**mv_kwargs, run_dir=os.path.relpath(mv.dirs['run'],
                                                          start=os.getcwd()),
                     data_manager=dict(out_dir="eval/{timestamp:}_4"))

    # Bad type of run directory should fail
    with pytest.raises(TypeError, match="Argument run_dir needs"):
        FrozenMultiverse(**mv_kwargs, run_dir=123,
                         data_manager=dict(out_dir="eval/{timestamp:}_5"))

    # Non-existing directory should fail
    with pytest.raises(IOError, match="No run directory found at"):
        FrozenMultiverse(**mv_kwargs, run_dir="my_non-existing_directory",
                         data_manager=dict(out_dir="eval/{timestamp:}_6"))

    # Cluster mode
    print("\nInitializing FrozenMultiverse in cluster mode")
    mv_kwargs['run_cfg_path'] = CLUSTER_MODE_CFG_PATH
    mv_kwargs['cluster_params'] = dict(env=cluster_env)
    FrozenMultiverse(**mv_kwargs, run_dir=os.path.relpath(mv.dirs['run'],
                                                          start=os.getcwd()),
                     data_manager=dict(out_dir="eval/{timestamp:}_7"))


    with pytest.raises(NotImplementedError, match="use_meta_cfg_from_run_dir"):
        FrozenMultiverse(**mv_kwargs, run_dir="/some/path/to/a/run_dir",
                         use_meta_cfg_from_run_dir=True,
                         data_manager=dict(out_dir="eval/{timestamp:}_7"))
示例#5
0
def test_config_handling(mv_kwargs):
    """Tests the config handling of the Multiverse"""
    # Multiverse that does not load the default user config
    mv_kwargs['user_cfg_path'] = False
    Multiverse(**mv_kwargs)

    # Testing whether errors are raised
    # Multiverse with wrong run config
    mv_kwargs['run_cfg_path'] = 'an/invalid/run_cfg_path'
    with pytest.raises(FileNotFoundError):
        Multiverse(**mv_kwargs)
示例#6
0
def test_shared_worker_manager(mv_kwargs):
    """Tests using a shared WorkerManager between multiple Multiverses, an
    experimental feature that allows to use the WorkerManager for running
    multiple Multiverses.
    """
    mvs = list()
    shared_wm = None

    # Create a number of Multiverse, some with sweeps configured
    for i in range(5):
        _kws = copy.deepcopy(mv_kwargs)
        _kws['paths']['model_note'] += f"_no{i}"
        if i%2 == 0:
            _kws['run_cfg_path'] = SWEEP_CFG_PATH

        # Create Multiverse and manually add tasks
        mv = Multiverse(**_kws, _shared_worker_manager=shared_wm)
        mv._add_sim_tasks()

        # Keep track of it
        mvs.append(mv)

        # Define the shared WorkerManager instance (for the next iteration)
        shared_wm = mvs[0].wm

    # There should now be a total of 14 tasks, 4 each from Multiverses 0, 2,
    # and 4, and one each from Multiverses 1 and 3
    assert len(mvs) == 5
    assert len(shared_wm.tasks) == (4 + 1 + 4 + 1 + 4)

    # Let the shared WorkerManager start working
    shared_wm.start_working()

    # Check the output directories of each Multiverse were created (proxy for
    # the run having succeeded)
    for i, mv in enumerate(mvs):
        if i%2 == 0:
            uni_names = ('uni1', 'uni2', 'uni3', 'uni4')
        else:
            uni_names = ('uni0',)

        for uni_name in uni_names:
            assert os.path.isdir(os.path.join(mv.dirs['data'], uni_name))

        # Report files will only be created for the first Multiverse, because
        # there is (and can be) only one Reporter instance.
        _report_file = os.path.join(mv.dirs['run'], '_report.txt')
        _sweep_info_file = os.path.join(mv.dirs['run'], '_sweep_info.txt')
        if i == 0:
            assert os.path.isfile(_report_file)
            assert os.path.isfile(_sweep_info_file)
        else:
            assert not os.path.isfile(_report_file)
            assert not os.path.isfile(_sweep_info_file)
示例#7
0
def test_detect_doubled_folders(mv_kwargs):
    """Tests whether an existing folder will raise an exception."""
    # Init Multiverse
    Multiverse(**mv_kwargs)

    # create output folders again
    # expect error due to existing folders
    with pytest.raises(RuntimeError, match="Simulation directory already"):
        # And another one, that will also create a directory
        Multiverse(**mv_kwargs)
        Multiverse(**mv_kwargs)
示例#8
0
def dm_after_large_sweep(mv_kwargs) -> DataManager:
    """Initialises a Multiverse with a DataManager, runs a simulation with
    output going into a temporary directory, then returns the DataManager."""
    # Initialise the Multiverse
    mv_kwargs['run_cfg_path'] = LARGE_SWEEP_CFG_PATH
    mv = Multiverse(**mv_kwargs)

    # Run a sweep
    mv.run_sweep()

    # Return the data manager
    return mv.dm
示例#9
0
def test_prolog_and_epilog_is_run(mv_kwargs):
    """Test that the prolog and epilog are always run"""
    # NOTE Ensure that information on parallel execution is logged
    mv_kwargs['parameter_space'] = dict(log_levels=dict(model='debug'))

    # Run with default settings and check log message
    mv = Multiverse(**mv_kwargs)
    mv.run()
    log = mv.wm.tasks[0].streams['out']['log']
    assert any(("Prolog finished." in line for line in log))
    assert any(("Epilog finished." in line for line in log))

    # The "Invoking epilog ..." message should _not_ be there in *this* case,
    # because it denotes that the simulation stopped after receiving a signal
    assert not any(("Invoking epilog ..." in line for line in log))

    # Now perform a longer simulation with a timeout
    mv_kwargs['parameter_space']['num_steps'] = int(1e9)
    mv_kwargs['parameter_space']['write_every'] = int(1e6)
    mv_kwargs['run_kwargs'] = dict(timeout=1.)
    mv_kwargs['paths']['model_note'] = "with_timeout"
    mv = Multiverse(**mv_kwargs)
    mv.run()
    log = mv.wm.tasks[0].streams['out']['log']
    assert any(("Prolog finished." in line for line in log))
    assert any(("Invoking epilog ..." in line for line in log))
    assert any(("Epilog finished." in line for line in log))
示例#10
0
def default_mv(mv_kwargs) -> Multiverse:
    """Initialises a unique default configuration of the Multiverse to test
    everything beyond initialisation.

    Using the mv_kwargs fixture, it is assured that the output directory is
    unique.
    """
    return Multiverse(**mv_kwargs)
示例#11
0
def test_backup(mv_kwargs):
    """Tests whether the backup of all config parts and the executable works"""
    mv = Multiverse(**mv_kwargs)
    cfg_path = mv.dirs['config']

    assert os.path.isfile(os.path.join(cfg_path, 'base_cfg.yml'))
    assert os.path.isfile(os.path.join(cfg_path, 'user_cfg.yml'))
    assert os.path.isfile(os.path.join(cfg_path, 'model_cfg.yml'))
    assert os.path.isfile(os.path.join(cfg_path, 'run_cfg.yml'))
    assert os.path.isfile(os.path.join(cfg_path, 'update_cfg.yml'))

    # And once more, now including the executable
    mv_kwargs['backups'] = dict(backup_executable=True)
    mv_kwargs['paths']['model_note'] = "with-exec-backup"
    mv = Multiverse(**mv_kwargs)

    assert os.path.isfile(os.path.join(mv.dirs['run'], 'backup', 'dummy'))
示例#12
0
def test_bifurcation_diagram_2d(tmpdir):
    """Test plotting of the bifurcation diagram"""
    # Create and run simulation
    raise_exc = {'plot_manager': {'raise_exc': True}}
    mv = Multiverse(model_name='SavannaHomogeneous',
                    run_cfg_path=BIFURCATION_DIAGRAM_2D_RUN,
                    paths=dict(out_dir=str(tmpdir)),
                    **raise_exc)
    mv.run_sweep()

    # Load
    mv.dm.load_from_cfg(print_tree=False)

    # Plot the bifurcation using the last datapoint
    mv.pm.plot_from_cfg(plots_cfg=BIFURCATION_DIAGRAM_2D_PLOTS,
                        plot_only=["bifurcation_diagram_2d"])
    # Plot the bifurcation using the fixpoint
    mv.pm.plot_from_cfg(plots_cfg=BIFURCATION_DIAGRAM_2D_PLOTS,
                        plot_only=["bifurcation_diagram_2d_fixpoint_to_plot"])
示例#13
0
def test_multiple_runs_not_allowed(mv_kwargs):
    """Assert that multiple runs are prohibited"""
    # Create Multiverse and run
    mv = Multiverse(**mv_kwargs)
    mv.run_single()

    # Another run should not be possible
    with pytest.raises(RuntimeError, match="Could not add simulation task"):
        mv.run_single()
示例#14
0
def test_parallel_init(mv_kwargs):
    """Test enabling parallel execution through the config"""
    # NOTE Ensure that information on parallel execution is logged
    mv_kwargs['parameter_space'] = dict(log_levels=dict(core='debug'))

    # Run with default settings and check log message
    mv_kwargs['paths']['model_note'] = "pexec_disabled"
    mv = Multiverse(**mv_kwargs)
    mv.run()
    log = mv.wm.tasks[0].streams['out']['log']
    assert any(("Parallel execution disabled" in line for line in log))

    # Now override default setting
    mv_kwargs['parameter_space']['parallel_execution'] = dict(enabled=True)
    mv_kwargs['paths']['model_note'] = "pexec_enabled"
    mv = Multiverse(**mv_kwargs)
    mv.run()
    log = mv.wm.tasks[0].streams['out']['log']
    assert any(("Parallel execution enabled" in line for line in log))
示例#15
0
def test_run_sweep(mv_kwargs):
    """Tests a run with a single simulation"""
    # Adjust the defaults to use the sweep configuration for run configuration
    mv_kwargs['run_cfg_path'] = SWEEP_CFG_PATH
    mv = Multiverse(**mv_kwargs)

    # Run the sweep
    mv.run()

    # There should now be four directories in the data directory
    assert len(os.listdir(mv.dirs['data'])) == 4

    # With a parameter space without volume, i.e. without any sweeps added,
    # the sweep should not be possible
    mv_kwargs['run_cfg_path'] = RUN_CFG_PATH
    mv_kwargs['paths']['model_note'] = "_invalid_cfg"
    mv = Multiverse(**mv_kwargs)

    with pytest.raises(ValueError, match="The parameter space has no sweeps"):
        mv.run_sweep()
示例#16
0
def test_bifurcation_diagram(tmpdir):
    """Test plotting of the bifurcation diagram"""
    # Create and run simulation
    raise_exc = {'plot_manager': {'raise_exc': True}}
    mv = Multiverse(model_name='SavannaHomogeneous',
                    run_cfg_path=BIFURCATION_DIAGRAM_RUN,
                    paths=dict(out_dir=str(tmpdir)),
                    **raise_exc)
    mv.run_sweep()

    # Load
    mv.dm.load_from_cfg(print_tree=False)

    # Plot the bifurcation using the last datapoint
    mv.pm.plot_from_cfg(plots_cfg=BIFURCATION_DIAGRAM_PLOTS,
                        plot_only=["bifurcation_endpoint"])
    # Plot the bifurcation using the fixpoint
    mv.pm.plot_from_cfg(plots_cfg=BIFURCATION_DIAGRAM_PLOTS,
                        plot_only=["bifurcation_fixpoint"])
    mv.pm.plot_from_cfg(plots_cfg=BIFURCATION_DIAGRAM_PLOTS,
                        plot_only=["bifurcation_fixpoint_to_plot"])
    # Plot the bifurcation using scatter
    mv.pm.plot_from_cfg(plots_cfg=BIFURCATION_DIAGRAM_PLOTS,
                        plot_only=["bifurcation_scatter"])
    # Plot the bifurcation using oscillation
    mv.pm.plot_from_cfg(plots_cfg=BIFURCATION_DIAGRAM_PLOTS,
                        plot_only=["bifurcation_oscillation"])

    # Redo simulation, but using several initial conditions
    mv = Multiverse(
        model_name='SavannaHomogeneous',
        run_cfg_path=BIFURCATION_DIAGRAM_RUN,
        paths=dict(out_dir=str(tmpdir)),
        **raise_exc,
        parameter_space=dict(seed=psp.ParamDim(default=0, range=[4])))
    mv.run_sweep()
    mv.dm.load_from_cfg(print_tree=False)

    # Plot the bifurcation using multistability
    mv.pm.plot_from_cfg(plots_cfg=BIFURCATION_DIAGRAM_PLOTS,
                        plot_only=["bifurcation_fixpoint"])
示例#17
0
def test_renew_plot_manager(mv_kwargs):
    """Tests the renewal of PlotManager instances in the Multiverse"""
    mv = Multiverse(**mv_kwargs)
    initial_pm = mv.pm

    # Try to renew it (with a bad config). The old one should remain
    with pytest.raises(ValueError, match="Failed setting up"):
        mv.renew_plot_manager(foo="bar")

    assert mv.pm is initial_pm

    # Again, this time it should work
    mv.renew_plot_manager()
    assert mv.pm is not initial_pm
示例#18
0
def test_parameter_validation(mv_kwargs):
    """Tests integration of the parameter validation feature"""
    # Works
    mv_kwargs['run_cfg_path'] = RUN_CFG_PATH_VALID
    mv_kwargs['model_name'] = "ForestFire"
    mv_kwargs['paths']['model_note'] = "valid"
    mv = Multiverse(**mv_kwargs)
    mv.run_single()

    # Fails
    mv_kwargs['run_cfg_path'] = RUN_CFG_PATH_INVALID
    mv_kwargs['model_name'] = "ForestFire"
    mv_kwargs['paths']['model_note'] = "invalid"
    with pytest.raises(ValidationError, match="Validation failed for 3 para"):
        mv = Multiverse(**mv_kwargs)
示例#19
0
def test_graph_plots(tmpdir):
    """Tests the plot_funcs.dag.graph module"""
    # Create and run simulation
    raise_exc = {'plot_manager': {'raise_exc': True}}
    mv = Multiverse(model_name='CopyMeGraph',
                    run_cfg_path=GRAPH_RUN,
                    paths=dict(out_dir=str(tmpdir)),
                    **raise_exc)

    mv.run_single()

    # Load
    mv.dm.load_from_cfg(print_tree=False)

    # Single graph plots
    mv.pm.plot_from_cfg(
        plots_cfg=GRAPH_PLOTS,
        plot_only=(
            "Graph",
            "DiGraph",
            "MultiGraph",
            "MultiDiGraph",
            "ExternalProperties",
            "Example_graph_plot",
            "custom_node_positioning_model",
            "explicit_node_positions",
            "custom_graph_creation",
            "custom_graph_arr_creation",
        ),
    )

    # Animation plots
    mv.pm.plot_from_cfg(
        plots_cfg=GRAPH_PLOTS,
        plot_only=[
            "graph_anim1",
            "graph_anim2",
            "graph_anim3",
            "graph_anim_external",
            "graph_anim4",
            "graph_anim_custom_graph_creation",
        ],
    )

    # Test failing cases – if possible these test are done in the (faster)
    # GraphPlot-class test.
    # Providing invalid dag tag for external property
    with pytest.raises(
            PlotCreatorError,
            match=(
                "No tag 'some_state_transformed' found in the data selected by "
                "the DAG!"),
    ):
        mv.pm.plot_from_cfg(plots_cfg=GRAPH_PLOTS,
                            plot_only=["invalid_ext_prop"])

    # Ambiguous time specifications for animation
    with pytest.raises(PlotCreatorError,
                       match="ambiguous animation time specifications"):
        mv.pm.plot_from_cfg(plots_cfg=GRAPH_PLOTS,
                            plot_only=["anim_amgiguous_time_spec"])

    # Trying to animate from single nx.Graph
    with pytest.raises(PlotCreatorError,
                       match="due to invalid type of the 'graph'"):
        mv.pm.plot_from_cfg(plots_cfg=GRAPH_PLOTS,
                            plot_only=["anim_not_dataarray"])
示例#20
0
def test_cluster_mode_run(mv_kwargs, cluster_env_specific):
    cluster_env = cluster_env_specific

    # Define a custom test environment
    mv_kwargs['run_cfg_path'] = CLUSTER_MODE_CFG_PATH
    mv_kwargs['cluster_params'] = dict(env=cluster_env)

    # Parameter space has 12 points
    # Five nodes are being used: node002, node003, node004, node006, node011
    # Test for first node, should perform 3 simulations
    cluster_env['TEST_NODENAME'] = "node002"
    mv_kwargs['paths']['model_note'] = "node002"

    mv = Multiverse(**mv_kwargs)
    mv.run_sweep()
    assert mv.wm.num_finished_tasks == 3
    assert [t.name for t in mv.wm.tasks] == ['uni01', 'uni06', 'uni11']
    # NOTE: simulated universes are uni01 ... uni12

    # Test for second node, should also perform 3 simulations
    cluster_env['TEST_NODENAME'] = "node003"
    mv_kwargs['paths']['model_note'] = "node003"

    mv = Multiverse(**mv_kwargs)
    mv.run_sweep()
    assert mv.wm.num_finished_tasks == 3
    assert [t.name for t in mv.wm.tasks] == ['uni02', 'uni07', 'uni12']

    # The third node should only perform 2 simulations
    cluster_env['TEST_NODENAME'] = "node004"
    mv_kwargs['paths']['model_note'] = "node004"

    mv = Multiverse(**mv_kwargs)
    mv.run_sweep()
    assert mv.wm.num_finished_tasks == 2
    assert [t.name for t in mv.wm.tasks] == ['uni03', 'uni08']

    # The fourth and fifth node should also perform only 2 simulations
    cluster_env['TEST_NODENAME'] = "node006"
    mv_kwargs['paths']['model_note'] = "node006"

    mv = Multiverse(**mv_kwargs)
    mv.run_sweep()
    assert mv.wm.num_finished_tasks == 2
    assert [t.name for t in mv.wm.tasks] == ['uni04', 'uni09']

    cluster_env['TEST_NODENAME'] = "node011"
    mv_kwargs['paths']['model_note'] = "node011"

    mv = Multiverse(**mv_kwargs)
    mv.run_sweep()
    assert mv.wm.num_finished_tasks == 2
    assert [t.name for t in mv.wm.tasks] == ['uni05', 'uni10']
示例#21
0
def test_cluster_mode(mv_kwargs, cluster_env):
    """Tests cluster mode basics like: resolution of parameters, creation of
    the run directory, ...
    """
    # Define a custom test environment
    mv_kwargs['run_cfg_path'] = CLUSTER_MODE_CFG_PATH
    mv_kwargs['cluster_params'] = dict(env=cluster_env)

    # Create the Multiverse
    mv = Multiverse(**mv_kwargs)

    rcps = mv.resolved_cluster_params
    assert len(rcps) == 10 + 1

    # Check the custom output directory
    assert 'my_custom_dir' in mv.dirs['run']

    # Check the job ID is part of the run directory path
    assert 'job123' in mv.dirs['run']

    # Make sure the required keys are available
    assert all([k in rcps for k in ('job_id', 'num_nodes', 'node_list',
                                    'node_name', 'timestamp')])

    # Check some types
    assert isinstance(rcps['job_id'], int)
    assert isinstance(rcps['num_nodes'], int)
    assert isinstance(rcps['num_procs'], int)
    assert isinstance(rcps['node_list'], list)
    assert isinstance(rcps['timestamp'], int)

    # Check some values
    assert rcps['node_index'] == 3  # for node006
    assert rcps['timestamp'] > 0
    assert "node006" in rcps['node_list']
    assert len(rcps['node_list']) == 5
    # NOTE Actual parsing of node list is checked in test__cluster.py

    # Can add additional info to the run directory
    mv_kwargs['cluster_params']['additional_run_dir_fstrs'] = ["xyz{job_id:}",
                                                               "N{num_nodes:}"]
    mv = Multiverse(**mv_kwargs)
    assert 'xyz123_N5' in mv.dirs['run']

    # Single-node case
    cluster_env['TEST_JOB_NUM_NODES'] = '1'
    cluster_env['TEST_JOB_NODELIST'] = "node006"
    mv = Multiverse(**mv_kwargs)
    assert mv.resolved_cluster_params['node_list'] == ["node006"]

    # Test error messages; also see test__cluster.py for more dedicated tests
    # Node name not in node list
    cluster_env['TEST_NODENAME'] = 'node042'
    with pytest.raises(ValueError, match="Failed parsing node list"):
        Multiverse(**mv_kwargs)

    # Wrong number of nodes
    cluster_env['TEST_NODENAME'] = 'node003'
    cluster_env['TEST_JOB_NUM_NODES'] = '3'
    with pytest.raises(ValueError, match="Failed parsing node list"):
        Multiverse(**mv_kwargs)

    # Missing environment variables
    cluster_env.pop('TEST_NODENAME')
    with pytest.raises(ValueError,
                       match="Missing required environment variable"):
        Multiverse(**mv_kwargs)
示例#22
0
def test_invalid_model_name_and_operation(default_mv, mv_kwargs):
    """Tests for correct behaviour upon invalid model names"""
    # Try to instantiate with invalid model name
    mv_kwargs['model_name'] = "invalid_model_RandomShit_bgsbjkbkfvwuRfopiwehGP"
    with pytest.raises(KeyError, match="No model with name 'invalid_model_"):
        Multiverse(**mv_kwargs)