Ejemplo n.º 1
0
def fixture_disable_cmk_update_config(monkeypatch: pytest.MonkeyPatch) -> None:
    # During CME config computation the EC rule packs are loaded which currently also load the
    # rule usage information from the running EC. Since we do not have a EC running this fails
    # and causes timeouts. Disable this for these tests.
    monkeypatch.setattr(cmk.gui.watolib.activate_changes,
                        "_execute_cmk_update_config", lambda: None)
Ejemplo n.º 2
0
def test_openapi_bulk_hosts(
    monkeypatch: pytest.MonkeyPatch,
    wsgi_app,
    with_automation_user,
):
    monkeypatch.setattr(
        "cmk.gui.watolib.hosts_and_folders.delete_hosts",
        lambda *args, **kwargs: DeleteHostsResult(),
    )

    username, secret = with_automation_user
    wsgi_app.set_authorization(("Bearer", username + " " + secret))

    base = "/NO_SITE/check_mk/api/1.0"

    resp = wsgi_app.call_method(
        "post",
        base + "/domain-types/host_config/actions/bulk-create/invoke",
        params=json.dumps({
            "entries": [
                {
                    "host_name": "foobar",
                    "folder": "/",
                    "attributes": {
                        "ipaddress": "127.0.0.2"
                    },
                },
                {
                    "host_name": "sample",
                    "folder": "/",
                    "attributes": {
                        "ipaddress": "127.0.0.2",
                        "site": "NO_SITE",
                    },
                },
            ]
        }),
        status=200,
        content_type="application/json",
    )
    assert len(resp.json["value"]) == 2

    _resp = wsgi_app.call_method(
        "put",
        base + "/domain-types/host_config/actions/bulk-update/invoke",
        params=json.dumps({
            "entries": [{
                "host_name": "foobar",
                "attributes": {
                    "ipaddress": "192.168.1.1",
                    "tag_address_family": "ip-v4-only",
                },
            }],
        }),
        status=200,
        content_type="application/json",
    )

    # verify attribute ipaddress is set corretly
    resp = wsgi_app.call_method(
        "get",
        base + "/objects/host_config/foobar",
        status=200,
    )
    assert resp.json["extensions"]["attributes"]["ipaddress"] == "192.168.1.1"

    # remove attribute ipaddress via bulk request
    wsgi_app.call_method(
        "put",
        base + "/domain-types/host_config/actions/bulk-update/invoke",
        params=json.dumps({
            "entries": [{
                "host_name": "foobar",
                "remove_attributes": ["ipaddress"]
            }],
        }),
        status=200,
        content_type="application/json",
    )

    # verify attribute ipaddress was removed correctly
    resp = wsgi_app.call_method(
        "get",
        base + "/objects/host_config/foobar",
        status=200,
    )
    assert "ipaddress" not in resp.json["extensions"]["attributes"]

    # adding invalid attribute should fail
    _resp = wsgi_app.call_method(
        "put",
        base + "/domain-types/host_config/actions/bulk-update/invoke",
        params=json.dumps({
            "entries": [{
                "host_name": "foobar",
                "attributes": {
                    "foobaz": "bar"
                }
            }],
        }),
        status=400,
        content_type="application/json",
    )

    _resp = wsgi_app.call_method(
        "post",
        base + "/domain-types/host_config/actions/bulk-delete/invoke",
        params=json.dumps({"entries": ["foobar", "sample"]}),
        status=204,
        content_type="application/json",
    )
Ejemplo n.º 3
0
class EclRunTest(ResTest):
    def setUp(self):
        self.ecl_config_path = os.path.dirname(
            inspect.getsourcefile(Ecl100Config))
        self.monkeypatch = MonkeyPatch()

    def tearDown(self):
        self.monkeypatch.undo()

    @staticmethod
    def _eclrun_conf():
        return {
            "eclrun_env": {
                "SLBSLS_LICENSE_FILE": "*****@*****.**",
                "ECLPATH": "/prog/res/ecl/grid",
                "PATH": "/prog/res/ecl/grid/macros",
                "F_UFMTENDIAN": "big",
                "LSB_JOB_ID": None,
            }
        }

    def init_eclrun_config(self):
        conf = EclRunTest._eclrun_conf()
        with open("ecl100_config.yml", "w") as f:
            f.write(yaml.dump(conf))
        self.monkeypatch.setenv("ECL100_SITE_CONFIG", "ecl100_config.yml")

    @tmpdir()
    @mock.patch.dict(os.environ, {"LSB_JOBID": "some-id"})
    def test_env(self):
        self.init_eclrun_config()
        with open("eclrun", "w") as f, open("DUMMY.DATA", "w"):
            f.write("""#!/usr/bin/env python
import os
import json
with open("env.json", "w") as f:
    json.dump(dict(os.environ), f)
""")
        os.chmod("eclrun", os.stat("eclrun").st_mode | stat.S_IEXEC)
        ecl_config = Ecl100Config()
        eclrun_config = EclrunConfig(ecl_config, "2019.3")
        ecl_run = EclRun("DUMMY", None, check_status=False)
        with mock.patch.object(ecl_run, "_get_run_command",
                               mock.MagicMock(return_value="./eclrun")):
            ecl_run.runEclipse(eclrun_config=eclrun_config)
        with open("env.json") as f:
            run_env = json.load(f)

        eclrun_env = self._eclrun_conf()["eclrun_env"]
        for k, v in eclrun_env.items():
            if v is None:
                assert k not in run_env
                continue

            if k == "PATH":
                assert run_env[k].startswith(v)
            else:
                assert v == run_env[k]

    @tmpdir()
    @pytest.mark.equinor_test
    def test_run(self):
        self.init_eclrun_config()
        shutil.copy(
            os.path.join(self.SOURCE_ROOT,
                         "../test-data/local/eclipse/SPE1.DATA"),
            "SPE1.DATA",
        )
        ecl_config = Ecl100Config()

        ecl_run = EclRun("SPE1.DATA", None)
        ecl_run.runEclipse(eclrun_config=EclrunConfig(ecl_config, "2019.3"))

        ok_path = os.path.join(ecl_run.runPath(),
                               "{}.OK".format(ecl_run.baseName()))
        log_path = os.path.join(ecl_run.runPath(),
                                "{}.LOG".format(ecl_run.baseName()))

        self.assertTrue(os.path.isfile(ok_path))
        self.assertTrue(os.path.isfile(log_path))
        self.assertTrue(os.path.getsize(log_path) > 0)

        errors = ecl_run.parseErrors()
        self.assertEqual(0, len(errors))

    @pytest.mark.equinor_test
    @tmpdir()
    def test_run_api(self):
        self.init_eclrun_config()
        shutil.copy(
            os.path.join(self.SOURCE_ROOT,
                         "../test-data/local/eclipse/SPE1.DATA"),
            "SPE1.DATA",
        )
        ecl_config = Ecl100Config()
        run(ecl_config, ["SPE1.DATA", "--version=2019.3"])

        self.assertTrue(os.path.isfile("SPE1.DATA"))

    @pytest.mark.equinor_test
    @tmpdir()
    def test_failed_run(self):
        self.init_eclrun_config()
        shutil.copy(
            os.path.join(self.SOURCE_ROOT,
                         "../test-data/local/eclipse/SPE1_ERROR.DATA"),
            "SPE1_ERROR.DATA",
        )
        ecl_config = Ecl100Config()
        eclrun_config = EclrunConfig(ecl_config, "2019.3")
        ecl_run = EclRun("SPE1_ERROR", None)
        with self.assertRaises(Exception) as error_context:
            ecl_run.runEclipse(eclrun_config=eclrun_config)
        self.assertIn("ERROR", str(error_context.exception))

    @pytest.mark.equinor_test
    @tmpdir()
    def test_failed_run_OK(self):
        self.init_eclrun_config()
        shutil.copy(
            os.path.join(self.SOURCE_ROOT,
                         "../test-data/local/eclipse/SPE1_ERROR.DATA"),
            "SPE1_ERROR.DATA",
        )
        ecl_config = Ecl100Config()
        run(ecl_config, ["SPE1_ERROR", "--version=2019.3", "--ignore-errors"])

    @pytest.mark.equinor_test
    @tmpdir()
    def test_no_hdf5_output_by_default_with_ecl100(self):
        self.init_eclrun_config()
        shutil.copy(
            os.path.join(self.SOURCE_ROOT,
                         "../test-data/local/eclipse/SPE1.DATA"),
            "SPE1.DATA",
        )
        ecl_config = Ecl100Config()
        # check that by default .h5 file IS NOT produced
        run(ecl_config, ["SPE1.DATA", "--version=2019.3"])
        self.assertFalse(os.path.exists("SPE1.h5"))

    @pytest.mark.equinor_test
    @tmpdir()
    def test_flag_to_produce_hdf5_output_with_ecl100(self):
        self.init_eclrun_config()
        shutil.copy(
            os.path.join(self.SOURCE_ROOT,
                         "../test-data/local/eclipse/SPE1.DATA"),
            "SPE1.DATA",
        )
        ecl_config = Ecl100Config()
        # check that with flag .h5 file IS produced
        run(ecl_config,
            ["SPE1.DATA", "--version=2019.3", "--summary-conversion"])
        self.assertTrue(os.path.exists("SPE1.h5"))

    @pytest.mark.equinor_test
    @tmpdir()
    def test_mpi_run(self):
        self.init_eclrun_config()
        shutil.copy(
            os.path.join(self.SOURCE_ROOT,
                         "../test-data/local/eclipse/SPE1_PARALLELL.DATA"),
            "SPE1_PARALLELL.DATA",
        )
        ecl_config = Ecl100Config()
        run(ecl_config,
            ["SPE1_PARALLELL.DATA", "--version=2019.3", "--num-cpu=2"])
        self.assertTrue(os.path.isfile("SPE1_PARALLELL.LOG"))
        self.assertTrue(os.path.getsize("SPE1_PARALLELL.LOG") > 0)

    @pytest.mark.equinor_test
    @tmpdir()
    def test_summary_block(self):
        self.init_eclrun_config()
        shutil.copy(
            os.path.join(self.SOURCE_ROOT,
                         "../test-data/local/eclipse/SPE1.DATA"),
            "SPE1.DATA",
        )
        ecl_config = Ecl100Config()
        ecl_run = EclRun("SPE1.DATA", None)
        ret_value = ecl_run.summary_block()
        self.assertTrue(ret_value is None)

        ecl_run.runEclipse(eclrun_config=EclrunConfig(ecl_config, "2019.3"))
        ecl_sum = ecl_run.summary_block()
        self.assertTrue(isinstance(ecl_sum, EclSum))

    @pytest.mark.equinor_test
    @tmpdir()
    def test_check(self):
        full_case = os.path.join(
            self.SOURCE_ROOT, "../test-data/Equinor/ECLIPSE/Gurbat/ECLIPSE")
        short_case = os.path.join(
            self.SOURCE_ROOT,
            "../test-data/Equinor/ECLIPSE/ShortSummary/ECLIPSE")
        failed_case = os.path.join(
            self.SOURCE_ROOT,
            "../test-data/Equinor/ECLIPSE/SummaryFail/NOR-2013A_R002_1208-0",
        )

        with self.assertRaises(IOError):
            self.assertTrue(EclRun.checkCase(full_case, failed_case))

        with self.assertRaises(IOError):
            self.assertTrue(EclRun.checkCase(full_case, "DOES-NOT-EXIST"))

        with self.assertRaises(IOError):
            self.assertTrue(EclRun.checkCase("DOES-NOT-EXIST", full_case))

        with self.assertRaises(ValueError):
            EclRun.checkCase(full_case, short_case)

        self.assertTrue(not os.path.isfile("CHECK_ECLIPSE_RUN.OK"))
        self.assertTrue(EclRun.checkCase(full_case, full_case))
        self.assertTrue(os.path.isfile("CHECK_ECLIPSE_RUN.OK"))

        os.remove("CHECK_ECLIPSE_RUN.OK")
        self.assertTrue(EclRun.checkCase(
            short_case, full_case))  # Simulation is longer than refcase - OK
        self.assertTrue(os.path.isfile("CHECK_ECLIPSE_RUN.OK"))
Ejemplo n.º 4
0
 def test_glibc_version_string_confstr_missing(
         self, monkeypatch: pytest.MonkeyPatch) -> None:
     monkeypatch.delattr(os, "confstr", raising=False)
     assert glibc_version_string_confstr() is None
Ejemplo n.º 5
0
 def test_get_prog(self, monkeypatch: pytest.MonkeyPatch, argv: str,
                   executable: str, expected: str) -> None:
     monkeypatch.setattr("pip._internal.utils.misc.sys.argv", [argv])
     monkeypatch.setattr("pip._internal.utils.misc.sys.executable",
                         executable)
     assert get_prog() == expected
Ejemplo n.º 6
0
def mock_home_dir(monkeypatch: MonkeyPatch, tmp_path: Path) -> Path:
    monkeypatch.setattr(Path, "home", lambda: tmp_path)
    return tmp_path
Ejemplo n.º 7
0
def test_rmtree_retries(monkeypatch: pytest.MonkeyPatch) -> None:
    """
    Test pip._internal.utils.rmtree will retry failures
    """
    monkeypatch.setattr(shutil, "rmtree", Failer(duration=1).call)
    rmtree("foo")
Ejemplo n.º 8
0
def test_openapi_activate_changes(
    monkeypatch: pytest.MonkeyPatch,
    wsgi_app,
    with_automation_user,
    mock_livestatus: MockLiveStatusConnection,
):
    username, secret = with_automation_user
    wsgi_app.set_authorization(("Bearer", username + " " + secret))

    base = "/NO_SITE/check_mk/api/1.0"

    # We create a host
    live = mock_livestatus

    host_created = wsgi_app.call_method(
        "post",
        base + "/domain-types/host_config/collections/all",
        params='{"host_name": "foobar", "folder": "/"}',
        status=200,
        content_type="application/json",
    )

    with live(expect_status_query=True):
        resp = wsgi_app.call_method(
            "post",
            base +
            "/domain-types/activation_run/actions/activate-changes/invoke",
            status=400,
            params='{"sites": ["asdf"]}',
            content_type="application/json",
        )
        assert "Unknown site" in repr(resp.json), resp.json

        resp = wsgi_app.call_method(
            "post",
            base +
            "/domain-types/activation_run/actions/activate-changes/invoke",
            status=200,
            content_type="application/json",
        )

    with live(expect_status_query=True):
        resp = wsgi_app.call_method(
            "post",
            base +
            "/domain-types/activation_run/actions/activate-changes/invoke",
            status=302,
            params='{"redirect": true}',
            content_type="application/json",
        )

    for _ in range(10):
        resp = wsgi_app.follow_link(
            resp,
            CMK_WAIT_FOR_COMPLETION,
        )
        if resp.status_code == 204:
            break

    # We delete the host again
    monkeypatch.setattr(
        "cmk.gui.watolib.hosts_and_folders.delete_hosts",
        lambda *args, **kwargs: None,
    )
    wsgi_app.follow_link(
        host_created,
        ".../delete",
        status=204,
        headers={"If-Match": host_created.headers["ETag"]},
        content_type="application/json",
    )

    # And activate the changes

    with live(expect_status_query=True):
        resp = wsgi_app.call_method(
            "post",
            base +
            "/domain-types/activation_run/actions/activate-changes/invoke",
            content_type="application/json",
        )

    for _ in range(10):
        resp = wsgi_app.follow_link(
            resp,
            CMK_WAIT_FOR_COMPLETION,
        )
        if resp.status_code == 204:
            break
Ejemplo n.º 9
0
def test_user_agent_user_data(monkeypatch: pytest.MonkeyPatch) -> None:
    monkeypatch.setenv("PIP_USER_AGENT_USER_DATA", "some_string")
    assert "some_string" in PipSession().headers["User-Agent"]
Ejemplo n.º 10
0
    def test_get_noverify(self, monkeypatch: MonkeyPatch):
        scget = SplitCopyGet()
        scget.scs = MockSplitCopyShared()
        scget.progress = MockProgress()

        def validate_remote_path_get():
            pass

        def delete_target_local():
            pass

        def remote_filesize():
            return 1000000

        def split_file_remote(scp_lib, file_size, split_size, remote_tmpdir):
            pass

        def get_chunk_info(remote_tmpdir):
            return [["a", 1234], ["b", 1234], ["c", 1234]]

        def get_files(ftp_lib, ssh_lib, scp_lib, chunk, remote_tmpdir,
                      ssh_kwargs):
            return None

        def join_files_local():
            pass

        def inc_percentage():
            for n in range(90, 101):
                time.sleep(0.1)
                scget.progress.totals["percent_done"] = n

        scget.noverify = True
        monkeypatch.setattr(scget, "validate_remote_path_get",
                            validate_remote_path_get)
        monkeypatch.setattr(scget, "delete_target_local", delete_target_local)
        monkeypatch.setattr(scget, "remote_filesize", remote_filesize)
        monkeypatch.setattr(scget, "split_file_remote", split_file_remote)
        monkeypatch.setattr(scget, "get_chunk_info", get_chunk_info)
        monkeypatch.setattr(scget, "get_files", get_files)
        monkeypatch.setattr(scget, "join_files_local", join_files_local)
        thread = Thread(
            name="inc_percentage_done",
            target=inc_percentage,
        )
        thread.start()
        result = scget.get()
        thread.join()
        assert isinstance(result[0], datetime.datetime), isinstance(
            result[1], datetime.datetime)
Ejemplo n.º 11
0
    def test_get_fail(self, monkeypatch: MonkeyPatch):
        def validate_remote_path_get():
            pass

        def delete_target_local():
            pass

        def remote_filesize():
            return 1000000

        def remote_sha_get():
            return "abcdef012345"

        def split_file_remote(scp_lib, file_size, split_size, remote_tmpdir):
            pass

        def get_chunk_info(remote_tmpdir):
            return [["a", 1234], ["b", 1234], ["c", 1234]]

        def get_files(ftp_lib, ssh_lib, scp_lib, chunk, remote_tmpdir,
                      ssh_kwargs):
            raise TransferError

        scget = SplitCopyGet()
        scget.scs = MockSplitCopyShared()
        scget.progress = MockProgress()
        monkeypatch.setattr(scget, "validate_remote_path_get",
                            validate_remote_path_get)
        monkeypatch.setattr(scget, "delete_target_local", delete_target_local)
        monkeypatch.setattr(scget, "remote_filesize", remote_filesize)
        monkeypatch.setattr(scget, "remote_sha_get", remote_sha_get)
        monkeypatch.setattr(scget, "split_file_remote", split_file_remote)
        monkeypatch.setattr(scget, "get_chunk_info", get_chunk_info)
        monkeypatch.setattr(scget, "get_files", get_files)

        with raises(SystemExit):
            scget.get()
Ejemplo n.º 12
0
 def test_js_resources_hashes_mock_non_full(self, v: str, monkeypatch: pytest.MonkeyPatch) -> None:
     monkeypatch.setattr(buv, "__version__", v)
     monkeypatch.setattr(resources, "__version__", v)
     r = resources.JSResources()
     assert r.mode == "cdn"
     assert r.hashes == {}
Ejemplo n.º 13
0
def test_refresh_from_environment_variable(mocked_method, monkeypatch: pytest.MonkeyPatch):
    monkeypatch.setenv(Credentials.AUTH_MODE.legacy.get_env_name(), AuthType.EXTERNAL_PROCESS.name, prepend=False)
    cc = RawSynchronousFlyteClient(PlatformConfig(auth_mode=None).auto(None))
    cc.refresh_credentials()
    assert mocked_method.called
Ejemplo n.º 14
0
def test_synchronize_site(
    mocked_responses: responses.RequestsMock,
    monkeypatch: pytest.MonkeyPatch,
    edition: cmk_version.Edition,
    tmp_path: Path,
    mocker: MockerFixture,
):
    if edition is cmk_version.Edition.CME:
        pytest.skip("Seems faked site environment is not 100% correct")

    mocked_responses.add(
        method=responses.POST,
        url=
        "http://localhost/unit_remote_1/check_mk/automation.py?command=get-config-sync-state",
        body=repr((
            {
                "etc/check_mk/conf.d/wato/hosts.mk": (
                    33204,
                    15,
                    None,
                    "0fc4df48a03c3e972a86c9d573bc04f6e2a5d91aa368d7f4ce4ec5cd93ee5725",
                ),
                "etc/check_mk/multisite.d/wato/global.mk": (
                    33204,
                    6,
                    None,
                    "0e10d5fc5aedd798b68706c0189aeccadccae1fa6cc72324524293769336571c",
                ),
                "etc/htpasswd": (
                    33204,
                    0,
                    None,
                    "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
                ),
            },
            0,
        )),
    )

    mocked_responses.add(
        method=responses.POST,
        url=
        "http://localhost/unit_remote_1/check_mk/automation.py?command=receive-config-sync",
        body="True",
    )

    snapshot_data_collector_class = ("CMESnapshotDataCollector"
                                     if edition is cmk_version.Edition.CME else
                                     "CRESnapshotDataCollector")

    is_pre_17_site = False
    monkeypatch.setattr(cmk_version, "is_raw_edition",
                        lambda: edition is cmk_version.Edition.CRE)
    monkeypatch.setattr(cmk_version, "is_managed_edition",
                        lambda: edition is cmk_version.Edition.CME)
    monkeypatch.setattr(utils, "is_pre_17_remote_site",
                        lambda s: is_pre_17_site)

    activation_manager = _get_activation_manager(monkeypatch)
    assert activation_manager._activation_id is not None
    snapshot_settings = _create_sync_snapshot(
        activation_manager,
        snapshot_data_collector_class,
        monkeypatch,
        tmp_path,
        is_pre_17_site=is_pre_17_site,
        remote_site=SiteId("unit_remote_1"),
        edition=edition,
    )

    site_activation = activate_changes.ActivateChangesSite(
        SiteId("unit_remote_1"),
        snapshot_settings,
        activation_manager._activation_id,
        prevent_activate=True,
    )

    site_activation._time_started = time.time()
    site_activation._synchronize_site()
Ejemplo n.º 15
0
 def suppress_get_configuration_automation_call(
         monkeypatch: MonkeyPatch) -> None:
     monkeypatch.setattr(
         "cmk.gui.watolib.check_mk_automations.get_configuration",
         lambda *args, **kwargs: GetConfigurationResult({}),
     )
Ejemplo n.º 16
0
class RMSConfigTest(ResTest):
    def setUp(self):
        self.monkeypatch = MonkeyPatch()
        pass

    def tearDown(self):
        self.monkeypatch.undo()

    def test_load(self):
        self.monkeypatch.setenv("RMS_SITE_CONFIG", "file/does/not/exist")
        with self.assertRaises(IOError):
            conf = RMSConfig()

        self.monkeypatch.setenv("RMS_SITE_CONFIG", RMSConfig.DEFAULT_CONFIG_FILE)
        conf = RMSConfig()

        with self.assertRaises(OSError):
            exe = conf.executable

        with TestAreaContext("yaml"):
            with open("file.yml", "w") as f:
                f.write("this:\n -should\n-be\ninvalid:yaml?")

            self.monkeypatch.setenv("RMS_SITE_CONFIG", "file.yml")
            with self.assertRaises(ValueError):
                conf = RMSConfig()

            os.mkdir("bin")
            with open("bin/rms", "w") as f:
                f.write("This is an RMS executable ...")
            os.chmod("bin/rms", stat.S_IEXEC)

            with open("file.yml", "w") as f:
                f.write("executable: bin/rms")

            conf = RMSConfig()
            self.assertEqual(conf.executable, "bin/rms")
            self.assertIsNone(conf.threads)

            with open("file.yml", "w") as f:
                f.write("executable: bin/rms\n")
                f.write("threads: 17")

            conf = RMSConfig()
            self.assertEqual(conf.threads, 17)

            with open("file.yml", "w") as f:
                f.write("executable: bin/rms\n")
                f.write("wrapper: not-exisiting-exec")

            conf = RMSConfig()

            with self.assertRaises(OSError):
                conf.wrapper

            with open("file.yml", "w") as f:
                f.write("executable: bin/rms\n")
                f.write("wrapper: bash")

            conf = RMSConfig()
            self.assertEqual(conf.wrapper, "bash")

    def test_load_env(self):
        with TestAreaContext("yaml"):
            self.monkeypatch.setenv("RMS_SITE_CONFIG", "file.yml")
            with open("file.yml", "w") as f:
                f.write(
                    """\
executable: bin/rms\n
wrapper: bash
env:
  10.1.3:
    PATH_PREFIX: /some/path
    PYTHONPATH: /some/pythonpath
"""
                )
            conf = RMSConfig()
            self.assertEqual(conf.env("10.1.3")["PATH_PREFIX"], "/some/path")
            self.assertEqual(conf.env("10.1.3")["PYTHONPATH"], "/some/pythonpath")
            self.assertEqual(conf.env("non_existing"), {})
Ejemplo n.º 17
0
def clean_env(monkeypatch: MonkeyPatch) -> None:
    monkeypatch.delenv("VAULT_TOKEN", raising=False)
    monkeypatch.delenv("VAULT_ADDR", raising=False)
    monkeypatch.delenv("VAULT_ROLE_ID", raising=False)
    monkeypatch.delenv("VAULT_SECRET_ID", raising=False)
Ejemplo n.º 18
0
 def setUp(self):
     self.monkeypatch = MonkeyPatch()
     pass
Ejemplo n.º 19
0
 def test_doesnt_validate_doc_due_to_env_var(self, check_integrity, monkeypatch: pytest.MonkeyPatch, test_plot) -> None:
     monkeypatch.setenv("BOKEH_VALIDATE_DOC", "false")
     with beu.OutputDocumentFor([test_plot]):
         pass
     assert not check_integrity.called
Ejemplo n.º 20
0
def test_envdel_unset(monkeypatch: pytest.MonkeyPatch) -> None:
    monkeypatch.delenv(ENVVAR, raising=False)
    with envdel(ENVVAR):
        assert ENVVAR not in os.environ
    assert ENVVAR not in os.environ
Ejemplo n.º 21
0
 def test_glibc_version_string_confstr_fail(
         self, monkeypatch: pytest.MonkeyPatch,
         failure: Callable[[Any], Any]) -> None:
     monkeypatch.setattr(os, "confstr", failure, raising=False)
     assert glibc_version_string_confstr() is None
Ejemplo n.º 22
0
def test_envdel_unset_modified(monkeypatch: pytest.MonkeyPatch) -> None:
    monkeypatch.delenv(ENVVAR, raising=False)
    with envdel(ENVVAR):
        assert ENVVAR not in os.environ
        os.environ[ENVVAR] = "quux"
    assert ENVVAR not in os.environ
Ejemplo n.º 23
0
 def test_glibc_version_string_ctypes_missing(
         self, monkeypatch: pytest.MonkeyPatch) -> None:
     monkeypatch.setitem(sys.modules, "ctypes", None)
     assert glibc_version_string_ctypes() is None
Ejemplo n.º 24
0
def test_envdel(monkeypatch: pytest.MonkeyPatch) -> None:
    monkeypatch.setenv(ENVVAR, "foo")
    with envdel(ENVVAR):
        assert ENVVAR not in os.environ
    assert os.environ[ENVVAR] == "foo"
Ejemplo n.º 25
0
async def test_parse_ids_current_user(monkeypatch: pytest.MonkeyPatch):
    """It should work if the user in the ID is the current user."""
    monkeypatch.setattr('cylc.flow.id_cli.get_user', lambda: 'rincewind')
    await parse_ids_async('~rincewind/luggage', constraint='workflows')
Ejemplo n.º 26
0
class EclRunTest(ResTest):
    def setUp(self):
        self.ecl_config_path = os.path.dirname(inspect.getsourcefile(Ecl100Config))
        self.monkeypatch = MonkeyPatch()

    def tearDown(self):
        self.monkeypatch.undo()

    def init_ecl100_config(self):
        ecl14_prefix = "/prog/ecl/grid/2014.2/bin/linux_x86_64/"
        ecl19_prefix = "/prog/res/ecl/grid/2019.3/bin/linux_x86_64/"
        mpi_prefix = "/prog/ecl/grid/tools/linux_x86_64/intel/mpi/5.0.2.044/"
        conf = {
            "env": {
                "F_UFMTENDIAN": "big",
                "LM_LICENSE_FILE": "*****@*****.**",
                "ARCH": "x86_64",
            },
            "versions": {
                "2014.2": {
                    "scalar": {"executable": ecl14_prefix + "eclipse.exe"},
                    "mpi": {
                        "executable": ecl14_prefix + "eclipse_ilmpi.exe",
                        "mpirun": mpi_prefix + "bin64/mpirun",
                        "env": {
                            "I_MPI_ROOT": mpi_prefix,
                            "P4_RSHCOMMAND": "ssh",
                            "LD_LIBRARY_PATH": mpi_prefix + "lib64:$LD_LIBRARY_PATH",
                            "PATH": mpi_prefix + "bin64:$PATH",
                        },
                    },
                },
                "2019.3": {
                    "scalar": {"executable": ecl19_prefix + "eclipse.exe"},
                    "mpi": {
                        "executable": ecl19_prefix + "eclipse_ilmpi.exe",
                        "mpirun": mpi_prefix + "bin64/mpirun",
                        "env": {
                            "I_MPI_ROOT": mpi_prefix,
                            "P4_RSHCOMMAND": "ssh",
                            "LD_LIBRARY_PATH": mpi_prefix + "lib64:$LD_LIBRARY_PATH",
                            "PATH": mpi_prefix + "bin64:$PATH",
                        },
                    },
                },
            },
        }
        with open("ecl100_config.yml", "w") as f:
            f.write(yaml.dump(conf))
        self.monkeypatch.setenv("ECL100_SITE_CONFIG", "ecl100_config.yml")

    def init_flow_config(self):
        version = "2018.10"

        p = Popen(["flow", "--version"], stdin=PIPE, stdout=PIPE, stderr=PIPE)
        output, err = p.communicate()
        rc = p.returncode
        if rc == 0:
            version = find_version(output)
        path_to_exe = find_executable("flow")

        conf = {
            "default_version": version,
            "versions": {
                version: {
                    "scalar": {"executable": path_to_exe},
                }
            },
        }

        with open("flow_config.yml", "w") as f:
            f.write(yaml.dump(conf))
        self.monkeypatch.setenv("FLOW_SITE_CONFIG", "flow_config.yml")

    def test_make_LSB_MCPU_machine_list(self):
        self.assertListEqual(
            ["host1", "host1", "host1", "host1", "host2", "host2", "host2", "host2"],
            ecl_run.make_LSB_MCPU_machine_list("host1 4 host2 4"),
        )

    @tmpdir()
    def test_create(self):
        # This test can make do with a mock simulator; - just something executable

        conf = {
            "versions": {
                "2014.2": {
                    "scalar": {"executable": "bin/scalar_exe"},
                    "mpi": {"executable": "bin/mpi_exe", "mpirun": "bin/mpirun"},
                }
            }
        }
        with open("ecl100_config.yml", "w") as f:
            f.write(yaml.dump(conf))

        os.mkdir("bin")
        self.monkeypatch.setenv("ECL100_SITE_CONFIG", "ecl100_config.yml")
        for f in ["scalar_exe", "mpi_exe", "mpirun"]:
            fname = os.path.join("bin", f)
            with open(fname, "w") as fh:
                fh.write("This is an exectable ...")

            os.chmod(fname, stat.S_IEXEC)

        with open("ECLIPSE.DATA", "w") as f:
            f.write("Mock eclipse data file")

        ecl_config = Ecl100Config()
        sim = ecl_config.sim("2014.2")
        mpi_sim = ecl_config.mpi_sim("2014.2")
        ecl_run = EclRun("ECLIPSE.DATA", sim)
        self.assertEqual(ecl_run.runPath(), os.getcwd())

        os.mkdir("path")
        with open("path/ECLIPSE.DATA", "w") as f:
            f.write("Mock eclipse data file")

        ecl_run = EclRun("path/ECLIPSE.DATA", sim)
        self.assertEqual(ecl_run.runPath(), os.path.join(os.getcwd(), "path"))
        self.assertEqual(ecl_run.baseName(), "ECLIPSE")
        self.assertEqual(1, ecl_run.numCpu())

        # invalid number of CPU
        with self.assertRaises(ValueError):
            ecl_run = EclRun("path/ECLIPSE.DATA", sim, num_cpu="xxx")

        ecl_run = EclRun("path/ECLIPSE.DATA", mpi_sim, num_cpu="10")
        self.assertEqual(10, ecl_run.numCpu())

        # Missing datafile
        with self.assertRaises(IOError):
            ecl_run = EclRun("DOES/NOT/EXIST", mpi_sim, num_cpu="10")

    @pytest.mark.xfail(
        reason="Finding a version on Komodo of flow that is not OPM-flow"
    )
    @flow_installed
    @tmpdir()
    def test_flow(self):
        self.init_flow_config()
        shutil.copy(
            os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1.DATA"),
            "SPE1.DATA",
        )
        shutil.copy(
            os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1_ERROR.DATA"),
            "SPE1_ERROR.DATA",
        )
        flow_config = FlowConfig()
        sim = flow_config.sim()
        flow_run = EclRun("SPE1.DATA", sim)
        flow_run.runEclipse()

        run(flow_config, ["SPE1.DATA"])

        flow_run = EclRun("SPE1_ERROR.DATA", sim)
        with self.assertRaises(Exception):
            flow_run.runEclipse()

        run(flow_config, ["SPE1_ERROR.DATA", "--ignore-errors"])

        # Invalid version
        with self.assertRaises(Exception):
            run(flow_config, ["SPE1.DATA", "--version=no/such/version"])

    @tmpdir()
    def test_running_flow_given_env_config_can_still_read_parent_env(self):
        version = "1111.11"

        # create a script that prints env vars ENV1 and ENV2 to a file
        with open("flow", "w") as f:
            f.write("#!/bin/bash\n")
            f.write("echo $ENV1 > out.txt\n")
            f.write("echo $ENV2 >> out.txt\n")
        executable = os.path.join(os.getcwd(), "flow")
        os.chmod(executable, 0o777)

        # create a flow_config.yml with environment extension ENV2
        conf = {
            "default_version": version,
            "versions": {
                version: {
                    "scalar": {"executable": executable, "env": {"ENV2": "VAL2"}},
                }
            },
        }

        with open("flow_config.yml", "w") as f:
            f.write(yaml.dump(conf))

        # set the environment variable ENV1
        self.monkeypatch.setenv("ENV1", "VAL1")
        self.monkeypatch.setenv("FLOW_SITE_CONFIG", "flow_config.yml")

        with open("DUMMY.DATA", "w") as f:
            f.write("dummy")

        with open("DUMMY.PRT", "w") as f:
            f.write("Errors 0\n")
            f.write("Bugs 0\n")

        # run the script
        flow_config = FlowConfig()
        sim = flow_config.sim()
        flow_run = EclRun("DUMMY.DATA", sim)
        flow_run.runEclipse()

        # assert that the script was able to read both the variables correctly
        with open("out.txt") as f:
            lines = f.readlines()

        self.assertEqual(len(lines), 2)
        self.assertEqual(lines[0].strip(), "VAL1")
        self.assertEqual(lines[1].strip(), "VAL2")

    @tmpdir()
    def test_running_flow_given_no_env_config_can_still_read_parent_env(self):
        version = "1111.11"

        # create a script that prints env vars ENV1 and ENV2 to a file
        with open("flow", "w") as f:
            f.write("#!/bin/bash\n")
            f.write("echo $ENV1 > out.txt\n")
            f.write("echo $ENV2 >> out.txt\n")
        executable = os.path.join(os.getcwd(), "flow")
        os.chmod(executable, 0o777)

        # create a flow_config.yml with environment extension ENV2
        conf = {
            "default_version": version,
            "versions": {
                version: {
                    "scalar": {"executable": executable},
                }
            },
        }

        with open("flow_config.yml", "w") as f:
            f.write(yaml.dump(conf))

        # set the environment variable ENV1
        self.monkeypatch.setenv("ENV1", "VAL1")
        self.monkeypatch.setenv("ENV2", "VAL2")
        self.monkeypatch.setenv("FLOW_SITE_CONFIG", "flow_config.yml")

        with open("DUMMY.DATA", "w") as f:
            f.write("dummy")

        with open("DUMMY.PRT", "w") as f:
            f.write("Errors 0\n")
            f.write("Bugs 0\n")

        # run the script
        flow_config = FlowConfig()
        sim = flow_config.sim()
        flow_run = EclRun("DUMMY.DATA", sim)
        flow_run.runEclipse()

        # assert that the script was able to read both the variables correctly
        with open("out.txt") as f:
            lines = f.readlines()

        self.assertEqual(len(lines), 2)
        self.assertEqual(lines[0].strip(), "VAL1")
        self.assertEqual(lines[1].strip(), "VAL2")

    @tmpdir()
    def test_running_flow_given_env_variables_with_same_name_as_parent_env_variables_will_overwrite(  # noqa
        self,
    ):
        version = "1111.11"

        # create a script that prints env vars ENV1 and ENV2 to a file
        with open("flow", "w") as f:
            f.write("#!/bin/bash\n")
            f.write("echo $ENV1 > out.txt\n")
            f.write("echo $ENV2 >> out.txt\n")
        executable = os.path.join(os.getcwd(), "flow")
        os.chmod(executable, 0o777)

        # create a flow_config.yml with environment extension ENV2
        conf = {
            "default_version": version,
            "versions": {
                version: {
                    "scalar": {
                        "executable": executable,
                        "env": {"ENV1": "OVERWRITTEN1", "ENV2": "OVERWRITTEN2"},
                    },
                }
            },
        }

        with open("flow_config.yml", "w") as f:
            f.write(yaml.dump(conf))

        # set the environment variable ENV1
        self.monkeypatch.setenv("ENV1", "VAL1")
        self.monkeypatch.setenv("ENV2", "VAL2")
        self.monkeypatch.setenv("FLOW_SITE_CONFIG", "flow_config.yml")

        with open("DUMMY.DATA", "w") as f:
            f.write("dummy")

        with open("DUMMY.PRT", "w") as f:
            f.write("Errors 0\n")
            f.write("Bugs 0\n")

        # run the script
        flow_config = FlowConfig()
        sim = flow_config.sim()
        flow_run = EclRun("DUMMY.DATA", sim)
        flow_run.runEclipse()

        # assert that the script was able to read both the variables correctly
        with open("out.txt") as f:
            lines = f.readlines()

        self.assertEqual(len(lines), 2)
        self.assertEqual(lines[0].strip(), "OVERWRITTEN1")
        self.assertEqual(lines[1].strip(), "OVERWRITTEN2")

    @tmpdir()
    @pytest.mark.equinor_test
    def test_run(self):
        self.init_ecl100_config()
        shutil.copy(
            os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1.DATA"),
            "SPE1.DATA",
        )
        ecl_config = Ecl100Config()
        sim = ecl_config.sim("2014.2")
        ecl_run = EclRun("SPE1.DATA", sim)
        ecl_run.runEclipse()

        ok_path = os.path.join(ecl_run.runPath(), "{}.OK".format(ecl_run.baseName()))
        log_path = os.path.join(ecl_run.runPath(), "{}.LOG".format(ecl_run.baseName()))

        self.assertTrue(os.path.isfile(ok_path))
        self.assertTrue(os.path.isfile(log_path))
        self.assertTrue(os.path.getsize(log_path) > 0)

        errors = ecl_run.parseErrors()
        self.assertEqual(0, len(errors))

        # Monkey patching the ecl_run to use an executable which
        # will fail with exit(1); don't think Eclipse actually
        # fails with exit(1) - but let us at least be prepared
        # when/if it does.
        ecl_run.sim.executable = os.path.join(
            self.SOURCE_ROOT, "tests/libres_tests/res/fm/ecl_run_fail"
        )
        with self.assertRaises(Exception):
            ecl_run.runEclipse()

    @tmpdir()
    @pytest.mark.equinor_test
    def test_run_new_log_file(self):
        self.init_ecl100_config()
        shutil.copy(
            os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1.DATA"),
            "SPE1.DATA",
        )
        ecl_config = Ecl100Config()
        sim = ecl_config.sim("2019.3")
        ecl_run = EclRun("SPE1.DATA", sim)
        ecl_run.runEclipse()

        ok_path = os.path.join(ecl_run.runPath(), "{}.OK".format(ecl_run.baseName()))
        log_path = os.path.join(ecl_run.runPath(), "{}.OUT".format(ecl_run.baseName()))

        self.assertTrue(os.path.isfile(ok_path))
        self.assertTrue(os.path.isfile(log_path))
        self.assertTrue(os.path.getsize(log_path) > 0)

        errors = ecl_run.parseErrors()
        self.assertEqual(0, len(errors))

        # Monkey patching the ecl_run to use an executable which
        # will fail with exit(1); don't think Eclipse actually
        # fails with exit(1) - but let us at least be prepared
        # when/if it does.
        ecl_run.sim.executable = os.path.join(
            self.SOURCE_ROOT, "tests/libres_tests/res/fm/ecl_run_fail"
        )
        with self.assertRaises(Exception):
            ecl_run.runEclipse()

    @pytest.mark.equinor_test
    @tmpdir()
    def test_run_api(self):
        self.init_ecl100_config()
        shutil.copy(
            os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1.DATA"),
            "SPE1.DATA",
        )
        ecl_config = Ecl100Config()
        run(ecl_config, ["SPE1.DATA", "--version=2014.2"])

        self.assertTrue(os.path.isfile("SPE1.DATA"))

    @pytest.mark.equinor_test
    @tmpdir()
    def test_failed_run(self):
        self.init_ecl100_config()
        shutil.copy(
            os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1_ERROR.DATA"),
            "SPE1_ERROR.DATA",
        )
        ecl_config = Ecl100Config()
        sim = ecl_config.sim("2014.2")
        ecl_run = EclRun("SPE1_ERROR", sim)
        with self.assertRaises(Exception):
            ecl_run.runEclipse()
        try:
            ecl_run.runEclipse()
        except Exception as e:
            self.assertTrue("ERROR" in str(e))

    @pytest.mark.equinor_test
    @tmpdir()
    def test_failed_run_OK(self):
        self.init_ecl100_config()
        shutil.copy(
            os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1_ERROR.DATA"),
            "SPE1_ERROR.DATA",
        )
        ecl_config = Ecl100Config()
        run(ecl_config, ["SPE1_ERROR", "--version=2014.2", "--ignore-errors"])

        # Monkey patching the ecl_run to use an executable which will fail with exit(1),
        # in the nocheck mode that should also be OK.
        sim = ecl_config.sim("2014.2")
        ecl_run = EclRun("SPE1_ERROR", sim, check_status=False)
        ecl_run.sim.executable = os.path.join(
            self.SOURCE_ROOT, "tests/libres_tests/res/fm/ecl_run_fail"
        )
        ecl_run.runEclipse()

    @pytest.mark.equinor_test
    @tmpdir()
    def test_mpi_run(self):
        self.init_ecl100_config()
        shutil.copy(
            os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1_PARALLELL.DATA"),
            "SPE1_PARALLELL.DATA",
        )
        ecl_config = Ecl100Config()
        run(ecl_config, ["SPE1_PARALLELL.DATA", "--version=2014.2", "--num-cpu=2"])
        self.assertTrue(os.path.isfile("SPE1_PARALLELL.LOG"))
        self.assertTrue(os.path.getsize("SPE1_PARALLELL.LOG") > 0)

    @pytest.mark.equinor_test
    @tmpdir()
    def test_summary_block(self):
        self.init_ecl100_config()
        shutil.copy(
            os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1.DATA"),
            "SPE1.DATA",
        )
        ecl_config = Ecl100Config()
        sim = ecl_config.sim("2014.2")
        ecl_run = EclRun("SPE1.DATA", sim)
        ret_value = ecl_run.summary_block()
        self.assertTrue(ret_value is None)

        ecl_run.runEclipse()
        ecl_sum = ecl_run.summary_block()
        self.assertTrue(isinstance(ecl_sum, EclSum))

    @pytest.mark.equinor_test
    @tmpdir()
    def test_check(self):
        full_case = os.path.join(self.TESTDATA_ROOT, "Equinor/ECLIPSE/Gurbat/ECLIPSE")
        short_case = os.path.join(
            self.TESTDATA_ROOT, "Equinor/ECLIPSE/ShortSummary/ECLIPSE"
        )
        failed_case = os.path.join(
            self.SOURCE_ROOT,
            "test-data/Equinor/ECLIPSE/SummaryFail/NOR-2013A_R002_1208-0",
        )

        with self.assertRaises(IOError):
            self.assertTrue(EclRun.checkCase(full_case, failed_case))

        with self.assertRaises(IOError):
            self.assertTrue(EclRun.checkCase(full_case, "DOES-NOT-EXIST"))

        with self.assertRaises(IOError):
            self.assertTrue(EclRun.checkCase("DOES-NOT-EXIST", full_case))

        with self.assertRaises(ValueError):
            EclRun.checkCase(full_case, short_case)

        self.assertTrue(not os.path.isfile("CHECK_ECLIPSE_RUN.OK"))
        self.assertTrue(EclRun.checkCase(full_case, full_case))
        self.assertTrue(os.path.isfile("CHECK_ECLIPSE_RUN.OK"))

        os.remove("CHECK_ECLIPSE_RUN.OK")
        self.assertTrue(
            EclRun.checkCase(short_case, full_case)
        )  # Simulation is longer than refcase - OK
        self.assertTrue(os.path.isfile("CHECK_ECLIPSE_RUN.OK"))

    @pytest.mark.equinor_test
    @tmpdir()
    def test_error_parse(self):
        self.init_ecl100_config()
        shutil.copy(
            os.path.join(self.TESTDATA_ROOT, "local/eclipse/SPE1.DATA"),
            "SPE1.DATA",
        )
        prt_file = os.path.join(self.TESTDATA_ROOT, "local/eclipse/parse/ERROR.PRT")
        shutil.copy(prt_file, "SPE1.PRT")

        ecl_config = Ecl100Config()
        sim = ecl_config.sim("2014.2")
        ecl_run = EclRun("SPE1.DATA", sim)

        error_list = ecl_run.parseErrors()
        self.assertEqual(len(error_list), 2)

        # NB: The ugly white space in the error0 literal is actually part of
        #     the string we are matching; i.e. it must be retained.
        error0 = """ @--  ERROR  AT TIME        0.0   DAYS    ( 1-JAN-0):
 @           UNABLE TO OPEN INCLUDED FILE                                    
 @           /private/joaho/ERT/git/Gurbat/XXexample_grid_sim.GRDECL         
 @           SYSTEM ERROR CODE IS       29                                   """  # noqa

        error1 = """ @--  ERROR  AT TIME        0.0   DAYS    ( 1-JAN-0):
 @           INCLUDE FILES MISSING.                                          """  # noqa

        self.assertEqual(error_list[0], error0)
        self.assertEqual(error_list[1], error1)

    def test_slurm_env_parsing(self):
        host_list = make_SLURM_machine_list("ws", "2")
        self.assertEqual(host_list, ["ws", "ws"])

        host_list = make_SLURM_machine_list("ws1,ws2", "2,3")
        self.assertEqual(host_list, ["ws1", "ws1", "ws2", "ws2", "ws2"])

        host_list = make_SLURM_machine_list("ws[1-3]", "1,2,3")
        self.assertEqual(host_list, ["ws1", "ws2", "ws2", "ws3", "ws3", "ws3"])

        host_list = make_SLURM_machine_list("ws[1,3]", "1,3")
        self.assertEqual(host_list, ["ws1", "ws3", "ws3", "ws3"])

        host_list = make_SLURM_machine_list("ws[1-3,6-8]", "1,2,3,1,2,3")
        self.assertEqual(
            host_list,
            [
                "ws1",
                "ws2",
                "ws2",
                "ws3",
                "ws3",
                "ws3",
                "ws6",
                "ws7",
                "ws7",
                "ws8",
                "ws8",
                "ws8",
            ],
        )

        host_list = make_SLURM_machine_list("ws[1-3,6-8]", "2(x2),3,1,2(x2)")
        self.assertEqual(
            host_list,
            [
                "ws1",
                "ws1",
                "ws2",
                "ws2",
                "ws3",
                "ws3",
                "ws3",
                "ws6",
                "ws7",
                "ws7",
                "ws8",
                "ws8",
            ],
        )

        host_list = make_SLURM_machine_list("ws[1-3,6],ws[7-8]", "2(x2),3,1,2(x2)")
        self.assertEqual(
            host_list,
            [
                "ws1",
                "ws1",
                "ws2",
                "ws2",
                "ws3",
                "ws3",
                "ws3",
                "ws6",
                "ws7",
                "ws7",
                "ws8",
                "ws8",
            ],
        )
Ejemplo n.º 27
0
def test_openapi_hosts(
    monkeypatch: pytest.MonkeyPatch,
    wsgi_app,
    with_automation_user,
):

    username, secret = with_automation_user
    wsgi_app.set_authorization(("Bearer", username + " " + secret))

    base = "/NO_SITE/check_mk/api/1.0"

    resp = wsgi_app.call_method(
        "post",
        base + "/domain-types/host_config/collections/all",
        params='{"host_name": "foobar", "folder": "/"}',
        status=200,
        content_type="application/json",
    )

    resp = wsgi_app.follow_link(
        resp,
        "self",
        status=200,
    )

    attributes = {
        "ipaddress": "127.0.0.1",
        "snmp_community": {
            "type": "v1_v2_community",
            "community": "blah",
        },
    }
    resp = wsgi_app.follow_link(
        resp,
        ".../update",
        status=200,
        params=json.dumps({"attributes": attributes}),
        headers={"If-Match": resp.headers["ETag"]},
        content_type="application/json",
    )
    got_attributes = resp.json["extensions"]["attributes"]
    assert attributes.items() <= got_attributes.items()  # pylint: disable=dict-items-not-iterating

    resp = wsgi_app.follow_link(
        resp,
        ".../update",
        status=200,
        params='{"update_attributes": {"alias": "bar"}}',
        headers={"If-Match": resp.headers["ETag"]},
        content_type="application/json",
    )
    assert resp.json["extensions"]["attributes"]["alias"] == "bar"

    resp = wsgi_app.follow_link(
        resp,
        ".../update",
        status=200,
        params='{"remove_attributes": ["alias"]}',
        headers={"If-Match": resp.headers["ETag"]},
        content_type="application/json",
    )
    assert (resp.json["extensions"]["attributes"].items() >= {
        "ipaddress": "127.0.0.1"
    }.items())  # pylint: disable=dict-items-not-iterating

    # make sure changes are written to disk:
    resp = wsgi_app.follow_link(resp, "self", status=200)
    assert (resp.json["extensions"]["attributes"].items() >= {
        "ipaddress": "127.0.0.1"
    }.items())  # pylint: disable=dict-items-not-iterating

    # also try to update with wrong attribute
    wsgi_app.follow_link(
        resp,
        ".../update",
        status=400,
        params='{"attributes": {"foobaz": "bar"}}',
        headers={"If-Match": resp.headers["ETag"]},
        content_type="application/json",
    )

    monkeypatch.setattr(
        "cmk.gui.watolib.hosts_and_folders.delete_hosts",
        lambda *args, **kwargs: DeleteHostsResult(),
    )
    wsgi_app.follow_link(
        resp,
        ".../delete",
        status=204,
        content_type="application/json",
    )
Ejemplo n.º 28
0
def fixture_get_languages(monkeypatch: MonkeyPatch) -> None:
    monkeypatch.setattr(
        search,
        "get_languages",
        lambda: [[""], ["de"]],
    )
Ejemplo n.º 29
0
 def setUp(self):
     self.ecl_config_path = os.path.dirname(
         inspect.getsourcefile(Ecl100Config))
     self.monkeypatch = MonkeyPatch()
Ejemplo n.º 30
0
def test_generate_pre_17_site_snapshot(
    edition: cmk_version.Edition,
    monkeypatch: pytest.MonkeyPatch,
    tmp_path: Path,
    with_user_login: UserId,
    remote_site: SiteId,
) -> None:
    snapshot_data_collector_class = ("CMESnapshotDataCollector"
                                     if edition is cmk_version.Edition.CME else
                                     "CRESnapshotDataCollector")

    is_pre_17_site = True
    monkeypatch.setattr(cmk_version, "is_raw_edition",
                        lambda: edition is cmk_version.Edition.CRE)
    monkeypatch.setattr(cmk_version, "is_managed_edition",
                        lambda: edition is cmk_version.Edition.CME)
    monkeypatch.setattr(utils, "is_pre_17_remote_site",
                        lambda s: is_pre_17_site)

    activation_manager = _get_activation_manager(monkeypatch, remote_site)
    snapshot_settings = _create_sync_snapshot(
        activation_manager,
        snapshot_data_collector_class,
        monkeypatch,
        tmp_path,
        is_pre_17_site,
        remote_site,
        edition=edition,
    )

    # And now check the resulting snapshot contents
    unpack_dir = tmp_path / "snapshot_unpack"
    if unpack_dir.exists():
        shutil.rmtree(str(unpack_dir))

    with tarfile.open(snapshot_settings.snapshot_path, "r") as t:
        t.extractall(str(unpack_dir))

    expected_subtars = [
        "auth.secret.tar",
        "password_store.secret.tar",
        "auth.serials.tar",
        "check_mk.tar",
        "diskspace.tar",
        "htpasswd.tar",
        "mkeventd_mkp.tar",
        "mkeventd.tar",
        "multisite.tar",
        "sitespecific.tar",
        "stored_passwords.tar",
        "usersettings.tar",
    ]

    if is_enterprise_repo():
        expected_subtars += [
            "dcd.tar",
            "mknotify.tar",
        ]

    if active_config.sites[remote_site].get("replicate_mkps", False):
        expected_subtars += [
            "local.tar",
            "mkps.tar",
        ]

    if not cmk_version.is_raw_edition():
        expected_subtars.append("liveproxyd.tar")

    if cmk_version.is_managed_edition():
        expected_subtars += [
            "customer_check_mk.tar",
            "customer_gui_design.tar",
            "customer_multisite.tar",
            "gui_logo.tar",
            "gui_logo_dark.tar",
            "gui_logo_facelift.tar",
        ]

    if not is_pre_17_site:
        expected_subtars += [
            "omd.tar",
        ]

    assert sorted(f.name
                  for f in unpack_dir.iterdir()) == sorted(expected_subtars)

    expected_files: Dict[str, List[str]] = {
        "mkeventd_mkp.tar": [],
        "multisite.tar": ["global.mk", "users.mk"],
        "usersettings.tar": [with_user_login],
        "mkeventd.tar": ["rules.mk"],
        "check_mk.tar": ["hosts.mk", "contacts.mk"],
        "htpasswd.tar": ["htpasswd"],
        "liveproxyd.tar": [],
        "sitespecific.tar": ["sitespecific.mk"],
        "stored_passwords.tar": ["stored_passwords"],
        "auth.secret.tar": [],
        "password_store.secret.tar": [],
        "dcd.tar": [],
        "auth.serials.tar": ["auth.serials"],
        "mknotify.tar": [],
        "diskspace.tar": [],
        "omd.tar": [] if is_pre_17_site else ["sitespecific.mk", "global.mk"],
    }

    if active_config.sites[remote_site].get("replicate_mkps", False):
        expected_files.update({"local.tar": [], "mkps.tar": []})

    if cmk_version.is_managed_edition():
        expected_files.update({
            "customer_check_mk.tar": ["customer.mk"],
            "customer_gui_design.tar": [],
            "customer_multisite.tar": ["customer.mk"],
            "gui_logo.tar": [],
            "gui_logo_dark.tar": [],
            "gui_logo_facelift.tar": [],
            # TODO: Shouldn't we clean up these subtle differences?
            "mkeventd.tar": ["rules.mk"],
            "check_mk.tar": ["groups.mk", "contacts.mk", "passwords.mk"],
            "multisite.tar": [
                "bi_config.bi",
                "customers.mk",
                "global.mk",
                "groups.mk",
                "user_connections.mk",
                "users.mk",
            ],
        })

    if not cmk_version.is_raw_edition():
        expected_files["liveproxyd.tar"] = []

    # And now check the subtar contents
    for subtar in unpack_dir.iterdir():
        subtar_unpack_dir = unpack_dir / subtar.stem
        subtar_unpack_dir.mkdir(parents=True, exist_ok=True)

        with tarfile.open(str(subtar), "r") as s:
            s.extractall(str(subtar_unpack_dir))

        files = sorted(
            str(f.relative_to(subtar_unpack_dir))
            for f in subtar_unpack_dir.iterdir())

        assert sorted(expected_files[subtar.name]) == files, (
            "Subtar %s has wrong files" % subtar.name)