def test_resolve_header(set_up):
    func_name = "resolve_header"
    func = shared_template.resolve_header
    params = inspect.getfullargspec(func).args
    variable_lines = [11, 12]
    for root_path, realisation in set_up:
        input_params = get_input_params(root_path, func_name, params)
        input_params[0] = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), "..", "..", "templates"
        )
        input_params[8] = "nesi_header.cfg"
        input_params.append({"n_tasks": 40})
        test_output = func(*input_params)
        bench_output = get_bench_output(root_path, func_name)
        for test_line, bench_line in zip(
            [
                x
                for i, x in enumerate(test_output.split("\n"))
                if i not in variable_lines
            ],
            [
                x
                for i, x in enumerate(bench_output.split("\n"))
                if i not in variable_lines
            ],
        ):
            assert test_line == bench_line
示例#2
0
def test_main(set_up, mocker):
    """No return value. Just check that it runs without crashing"""
    function = "main"
    params = inspect.getfullargspec(scripts.submit_hf.main).args

    mocker.patch("scripts.submit_hf.set_wct",
                 lambda x, y, z: mocked_set_wct(x, y, True))
    mocker.patch("scripts.submit_hf.confirm", lambda x: False)
    mocker.patch(
        "scripts.submit_hf.est.est_HF_chours_single",
        lambda *args, **kwargs: (2, 0.05, 40),
    )

    for root_path, realisation in set_up:
        args = get_input_params(root_path,
                                "{}_{}".format("submit_hf.py",
                                               function), params)

        # Fault will probably change on each set of data, so reset this every time
        mocker.patch(
            "scripts.submit_hf.utils.load_sim_params",
            lambda x: mocked_load_sim_params(
                os.path.join(
                    root_path,
                    "CSRoot",
                    "Runs",
                    get_fault_from_rel(realisation),
                    realisation,
                    x,
                )),
        )

        scripts.submit_hf.main(*args)
示例#3
0
def test_get_stations(set_up):
    func_name = "get_stations"
    params = inspect.getfullargspec(shared.get_stations).args
    for root_path, _ in set_up:
        input_params = get_input_params(root_path, func_name, params)
        test_output = shared.get_stations(*input_params)
        bench_output = get_bench_output(root_path, func_name)
        assert test_output == bench_output
示例#4
0
def test_user_select(set_up, mocker):
    func_name = "user_select"
    params = inspect.getfullargspec(shared.user_select).args
    for root_path, realisation in set_up:
        input_params = get_input_params(root_path, func_name, params)
        mocker.patch("shared_workflow.shared.input", lambda x: "2")
        test_output = shared.user_select(*input_params)
        bench_output = get_bench_output(root_path, func_name)
        assert test_output == bench_output
def test_generate_command(set_up):
    func_name = "generate_command"
    func = shared_template.generate_command
    params = inspect.getfullargspec(func).args
    for root_path, realisation in set_up:
        input_params = get_input_params(root_path, func_name, params)
        test_output = func(*input_params)
        bench_output = get_bench_output(root_path, func_name)
        assert test_output == bench_output
def test_generate_context(set_up):
    func_name = "generate_context"
    func = shared_template.generate_context
    params = inspect.getfullargspec(func).args
    for root_path, realisation in set_up:
        input_params = get_input_params(root_path, func_name, params)
        input_params[0] = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), "..", "..", "templates"
        )
        test_output = func(*input_params)
        # Check test output exists, doing a line by line comparison was considered to be too
        # expensive when changes were made which required the test data to be updated
        assert test_output
示例#7
0
def test_install_simulation(set_up):
    func_name = "install_simulation"
    params = inspect.getfullargspec(install_shared.install_simulation).args
    for root_path, _ in set_up:
        input_params = get_input_params(root_path, func_name, params)

        for i in range(len(input_params)):
            if isinstance(input_params[i], str) and input_params[i].startswith(
                ("CSRoot", "AdditionalData", "PangopangoF29/")):
                input_params[i] = os.path.join(root_path, input_params[i])
        test_output = install_shared.install_simulation(*input_params)
        root_params_dict = test_output[0]

        # Accounting for removed parameters
        # Simpler solution than downloading, editing and re-uploading the test data
        root_params_dict["global_root"] = "/nesi/project/nesi00213"
        root_params_dict[
            "v_1d_mod"] = "/nesi/project/nesi00213/VelocityModel/Mod-1D/Cant1D_v3-midQ_OneRay.1d"
        root_params_dict["bb"]["version"] = "3.0.4"
        root_params_dict["bb"]["site_specific"] = False
        del root_params_dict["hf"]["hf_vel_mod_1d"]
        root_params_dict[
            "v_mod_1d_name"] = "/nesi/project/nesi00213/VelocityModel/Mod-1D/Cant1D_v2-midQ_leer.1d"

        bench_output = get_bench_output(root_path, func_name)[0]
        bench_output["ims"] = {
            "component": ["geom"],
            "extended_period":
            False,
            "pSA_periods": [
                0.02,
                0.05,
                0.1,
                0.2,
                0.3,
                0.4,
                0.5,
                0.75,
                1.0,
                2.0,
                3.0,
                4.0,
                5.0,
                7.5,
                10.0,
            ],
        }
        assert root_params_dict == bench_output
def test_main(set_up, mocker):
    """No return value. Just check that it runs without crashing"""
    function = "main"
    params = inspect.getfullargspec(scripts.submit_emod3d.main).args

    mocker.patch("scripts.submit_emod3d.set_wct",
                 lambda x, y, z: mocked_set_wct(x, y, True))
    mocker.patch("scripts.submit_emod3d.confirm", lambda x: False)
    mocker.patch(
        "scripts.submit_emod3d.est.est_LF_chours_single",
        lambda a, b, c, d, e, f, g: (2, 0.05, 40),
    )
    mocker.patch(
        "scripts.set_runparams.utils.load_yaml",
        lambda x: mocked_load_yaml(
            os.path.join(
                os.path.dirname(os.path.realpath(__file__)),
                "..",
                "..",
                "templates",
                "gmsim",
                "16.1",
                "emod3d_defaults.yaml",
            )) if "emod3d_defaults.yaml" in x else mocked_load_yaml(x),
    )

    for root_path, realisation in set_up:
        args = get_input_params(root_path,
                                "{}_{}".format("submit_emod3d.py",
                                               function), params)

        # Fault will probably change on each set of data, so reset these every time
        mocker.patch(
            "scripts.submit_emod3d.utils.load_sim_params",
            lambda x: mocked_load_sim_params(
                os.path.join(
                    root_path,
                    "CSRoot",
                    "Runs",
                    get_fault_from_rel(realisation),
                    realisation,
                    x,
                )),
        )

        scripts.submit_emod3d.main(*args)
def test_write_sl_script(set_up, mocker):
    func_name = "write_sl_script"
    func = shared_template.write_sl_script
    params = inspect.getfullargspec(func).args
    for root_path, realisation in set_up:
        input_params = get_input_params(root_path, func_name, params)

        input_params[4]["platform_specific_args"] = {"n_tasks": input_params[4]["n_tasks"]}
        del input_params[4]["n_tasks"]
        input_params[6]["run_command"] = "srun"

        slurm_script = io.StringIO("")
        mocker.patch(
            "shared_workflow.shared_template.write_file",
            lambda _, parts: slurm_script.write("\n".join(parts)),
        )

        func(*input_params)
        test_output = [
            "{}\n".format(line) for line in slurm_script.getvalue().split("\n")
        ]
        
        #testing if the test_output is non-zero output. it doesn't check whether this .sl script makes sense (it will be covered by end-to-end test)
        assert len(test_output) > 0