Beispiel #1
0
def step(context, name):
    import os
    import json
    from chroma_core.models import Nid
    from tests.unit.chroma_core.helpers import MockAgentRpc

    # Skip setup if it was already done in a previous scenario.
    if len(MockAgentRpc.mock_servers) > 0:
        return

    path = os.path.join(os.path.dirname(__file__),
                        "../../../../sample_data/%s.json" % name)
    with open(path) as fh:
        data = json.load(fh)

    for host in data["hosts"]:
        host["nids"] = [Nid.split_nid_string(n) for n in host["nids"]]
        MockAgentRpc.mock_servers[host["address"]] = host
Beispiel #2
0
def load_filesystem_from_json(data):
    # Since this is only ever used for the behave tests, and the behave tests
    # are slated to be decommissioned at some point, we're just going to
    # abandon all pretense that we might be loading a non-synthetic cluster.
    from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
    from tests.unit.chroma_core.helpers import synthetic_volume
    from chroma_core.models import ManagedMgs, VolumeNode

    from chroma_core.lib.cache import ObjectCache
    from chroma_core.models import ManagedHost, ManagedTarget, ManagedTargetMount
    from chroma_core.models import Nid
    from tests.unit.chroma_core.helpers import synthetic_host

    lookup = defaultdict(dict)

    for host_info in data["hosts"]:
        from tests.unit.chroma_core.helpers import MockAgentRpc

        mock_host_info = MockAgentRpc.mock_servers[host_info["address"]]
        # host, command = JobSchedulerClient.create_host(mock_host_info['fqdn'], mock_host_info['nodename'], ['manage_targets'], address = host_info['address'])
        nids = [Nid.split_nid_string(n) for n in mock_host_info["nids"]]

        # update mock_servers with list of Nid objects
        MockAgentRpc.mock_servers[host_info["address"]]["nids"] = nids

        host = synthetic_host(mock_host_info["address"],
                              nids=nids,
                              fqdn=mock_host_info["fqdn"],
                              nodename=mock_host_info["nodename"])
        ObjectCache.add(ManagedHost, host)
        host.state = "managed"
        host.save()
        lookup["hosts"][host_info["address"]] = host

    def _create_volume_for_mounts(mounts):
        # The test data doesn't give us a serial, so mung one out of the device paths
        # on the basis that they're iSCSI-style
        serial = mounts[0]["device_node"].split("/")[-1]
        volume = synthetic_volume(serial=serial)
        for mount in mounts:
            VolumeNode.objects.create(host=lookup["hosts"][mount["host"]],
                                      path=mount["device_node"],
                                      primary=mount["primary"],
                                      volume=volume)
        return volume

    for mgs_info in data["mgss"]:
        volume = _create_volume_for_mounts(mgs_info["mounts"])
        target, target_mounts = ManagedMgs.create_for_volume(volume.id)
        target.uuid = uuid.uuid4().__str__()
        target.ha_label = "%s_%s" % (target.name, uuid.uuid4().__str__()[0:6])
        ObjectCache.add(ManagedTarget, target.managedtarget_ptr)
        for tm in target_mounts:
            ObjectCache.add(ManagedTargetMount, tm)
        lookup["mgt"][mgs_info["mounts"][0]["host"]] = target

    for fs_info in data["filesystems"]:
        fs_bundle = {
            "name": fs_info["name"],
            "mgt": {
                "id": lookup["mgt"][fs_info["mgs"]].id
            },
            "mdts": [],
            "osts": [],
            "conf_params": {},
        }

        for mdt_info in fs_info["mdts"]:
            volume = _create_volume_for_mounts(mdt_info["mounts"])

            # Although we create multiple volumes for the mdt, initially only add a single volume, once delete of MDT
            # is possible we can add them all and then delete before adding. This code is structured for that fixed case.
            if not fs_bundle["mdts"]:
                fs_bundle["mdts"].append({
                    "volume_id": volume.id,
                    "conf_params": {}
                })

        for ost_info in fs_info["osts"]:
            volume = _create_volume_for_mounts(ost_info["mounts"])
            fs_bundle["osts"].append({
                "volume_id": volume.id,
                "conf_params": {}
            })

        fs, command = JobSchedulerClient.create_filesystem(fs_bundle)
Beispiel #3
0
    def test_combined_mgs_mdt(self, invoke):
        invoke.return_value = '{"Ok": []}'

        def fixture_glob(g):
            return glob.glob(
                os.path.join(os.path.dirname(__file__),
                             "fixtures/test_combined_mgs_mdt", g))

        for path in fixture_glob("*_nid.txt"):
            address = os.path.basename(path).split("_")[0]
            nids = [
                Nid.split_nid_string(l.strip())
                for l in open(path).readlines()
            ]
            synthetic_host(address, nids)

        host_data = {}
        for path in fixture_glob("*detect_scan_output.txt"):
            address = os.path.basename(path).split("_")[0]
            data = json.load(open(path))["result"]
            host_data[ManagedHost.objects.get(address=address)] = data

        # Simplified volume construction:
        #  * Assume all device paths referenced in detection exist
        #  * Assume all devices visible on all hosts
        #  * Assume device node paths are identical on all hosts
        devpaths = set()
        for host, data in host_data.items():
            for lt in data["local_targets"]:
                for d in lt["device_paths"]:
                    if not d in devpaths:
                        devpaths.add(d)
                        volume = Volume.objects.create()
                        for host in host_data.keys():
                            VolumeNode.objects.create(volume=volume,
                                                      path=d,
                                                      host=host)

        def _detect_scan_device_plugin(host, command, args=None):
            self.assertIn(command, ["detect_scan", "device_plugin"])

            if command == "detect_scan":
                return host_data[host]

            raise AgentException(
                host, command, args,
                "No device plugin data available in unit tests")

        job = DetectTargetsJob.objects.create()

        with mock.patch("chroma_core.lib.job.Step.invoke_agent",
                        new=mock.Mock(side_effect=_detect_scan_device_plugin)):
            with mock.patch("chroma_core.models.Volume.storage_resource"):
                synchronous_run_job(job)

        self.assertEqual(ManagedFilesystem.objects.count(), 1)
        self.assertEqual(ManagedFilesystem.objects.get().name, "test18fs")

        self.assertEqual(ManagedOst.objects.count(), 8)

        for t in ManagedTarget.objects.all():
            self.assertEqual(t.immutable_state, True)

        def assertMount(target_name, primary_host, failover_hosts=list()):
            target = ManagedTarget.objects.get(name=target_name)
            self.assertEquals(
                ManagedTargetMount.objects.filter(target=target).count(),
                1 + len(failover_hosts))
            self.assertEquals(
                ManagedTargetMount.objects.filter(
                    target=target,
                    primary=True,
                    host=ManagedHost.objects.get(
                        address=primary_host)).count(),
                1,
            )
            for h in failover_hosts:
                self.assertEquals(
                    ManagedTargetMount.objects.filter(
                        target=target,
                        primary=False,
                        host=ManagedHost.objects.get(address=h)).count(),
                    1,
                )

        assertMount("MGS", "kp-lustre-1-8-mgs-1")
        assertMount("test18fs-MDT0000", "kp-lustre-1-8-mgs-1")
        assertMount("test18fs-OST0000", "kp-lustre-1-8-oss-1")
        assertMount("test18fs-OST0001", "kp-lustre-1-8-oss-1")
        assertMount("test18fs-OST0002", "kp-lustre-1-8-oss-2")
        assertMount("test18fs-OST0003", "kp-lustre-1-8-oss-2")
        assertMount("test18fs-OST0004", "kp-lustre-1-8-oss-3")
        assertMount("test18fs-OST0005", "kp-lustre-1-8-oss-3")
        assertMount("test18fs-OST0006", "kp-lustre-1-8-oss-4")
        assertMount("test18fs-OST0007", "kp-lustre-1-8-oss-4")

        self.assertEqual(ManagedFilesystem.objects.get().state, "available")