def create_fake_filesystem_client(self, active=False):
        from chroma_core.models import ManagedMgs, ManagedMdt, ManagedOst, ManagedFilesystem, LustreClientMount
        from tests.unit.chroma_core.helpers import synthetic_volume_full

        mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(
            self.server).id,
                                              name="MGS")
        fs = ManagedFilesystem.objects.create(mgs=mgt, name='testfs')
        ObjectCache.add(ManagedFilesystem, fs)
        ManagedMdt.create_for_volume(synthetic_volume_full(self.server).id,
                                     filesystem=fs)
        ManagedOst.create_for_volume(synthetic_volume_full(self.server).id,
                                     filesystem=fs)
        state = 'mounted' if active else 'unmounted'
        self.mount = LustreClientMount.objects.create(host=self.worker,
                                                      filesystem=fs,
                                                      state=state)

        ObjectCache.add(LustreClientMount, self.mount)
Esempio n. 2
0
    def _create_filesystem_n_osts(self, n_targets):
        assert n_targets >= 3
        ManagedFilesystem.objects.update(not_deleted=None)
        ManagedTarget.objects.update(not_deleted=None)
        ManagedTargetMount.objects.update(not_deleted=None)
        self._create_n_volumes_host_pairs(n_targets)
        assert ManagedTarget.objects.count() == 0

        fs = None
        for i, volume in enumerate(Volume.objects.all()):
            if i == 0:
                mgt, mounts = ManagedMgs.create_for_volume(volume.id)
                fs = ManagedFilesystem.objects.create(name="foo", mgs=mgt)
                ObjectCache.add(ManagedFilesystem, fs)
            elif i == 1:
                ObjectCache.add(
                    ManagedTarget, ManagedMdt.create_for_volume(volume.id, filesystem=fs)[0].managedtarget_ptr
                )
            else:
                ObjectCache.add(
                    ManagedTarget, ManagedOst.create_for_volume(volume.id, filesystem=fs)[0].managedtarget_ptr
                )
Esempio n. 3
0
    def create_simple_filesystem(self, host, start=True):
        from chroma_core.models import ManagedMgs, ManagedMdt, ManagedOst, ManagedFilesystem
        self.mgt, mgt_tms = ManagedMgs.create_for_volume(
            self._test_lun(host).id, name="MGS")
        self.fs = ManagedFilesystem.objects.create(mgs=self.mgt, name="testfs")
        ObjectCache.add(ManagedFilesystem, self.fs)

        self.mdt, mdt_tms = ManagedMdt.create_for_volume(
            self._test_lun(host).id, filesystem=self.fs)
        self.ost, ost_tms = ManagedOst.create_for_volume(
            self._test_lun(host).id, filesystem=self.fs)

        for target in [self.mgt, self.ost, self.mdt]:
            ObjectCache.add(ManagedTarget, target.managedtarget_ptr)
        for tm in chain(mgt_tms, mdt_tms, ost_tms):
            ObjectCache.add(ManagedTargetMount, tm)

        if start:
            self.fs = self.set_and_assert_state(self.fs, 'available')
            self.mgt = freshen(self.mgt)
            self.mdt = freshen(self.mdt)
            self.ost = freshen(self.ost)
Esempio n. 4
0
    def test_mgs_nid_change(self):
        mgs = synthetic_host('mgs')
        mds = synthetic_host('mds')
        oss = synthetic_host('oss')

        from chroma_core.models import ManagedMgs, ManagedMdt, ManagedOst, ManagedFilesystem, ManagedTarget, ManagedTargetMount
        self.mgt, mgt_tms = ManagedMgs.create_for_volume(
            synthetic_volume_full(mgs).id, name="MGS")
        self.fs = ManagedFilesystem.objects.create(mgs=self.mgt, name="testfs")
        self.mdt, mdt_tms = ManagedMdt.create_for_volume(
            synthetic_volume_full(mds).id, filesystem=self.fs)
        self.ost, ost_tms = ManagedOst.create_for_volume(
            synthetic_volume_full(oss).id, filesystem=self.fs)
        ObjectCache.add(ManagedFilesystem, self.fs)
        for target in [self.mgt, self.ost, self.mdt]:
            ObjectCache.add(ManagedTarget, target.managedtarget_ptr)
        for tm in chain(mgt_tms, mdt_tms, ost_tms):
            ObjectCache.add(ManagedTargetMount, tm)

        self.fs = self.set_and_assert_state(self.fs, 'available')

        self.mock_servers['mgs']['nids'] = [Nid.Nid('192.168.0.99', 'tcp', 0)]
        self.assertNidsCorrect(mgs)

        JobSchedulerClient.command_run_jobs([{
            'class_name': 'UpdateNidsJob',
            'args': {
                'hosts': [api.get_resource_uri(mgs)]
            }
        }], "Test update nids")
        self.drain_progress()
        # The -3 looks past the start/stop that happens after writeconf
        self.assertEqual(MockAgentRpc.host_calls[mgs][-3][0],
                         "writeconf_target")
        self.assertEqual(MockAgentRpc.host_calls[mds][-3][0],
                         "writeconf_target")
        self.assertEqual(MockAgentRpc.host_calls[oss][-3][0],
                         "writeconf_target")
        self.assertState(self.fs, 'stopped')
Esempio n. 5
0
    def create_simple_filesystem(self, host):
        from chroma_core.models import ManagedMgs, ManagedMdt, ManagedOst, ManagedFilesystem, ManagedTargetMount
        self.mgt, _ = ManagedMgs.create_for_volume(
            synthetic_volume_full(host).id, name="MGS")
        self.fs = ManagedFilesystem.objects.create(mgs=self.mgt, name="testfs")
        ObjectCache.add(ManagedFilesystem, self.fs)
        ObjectCache.add(ManagedTarget,
                        ManagedTarget.objects.get(id=self.mgt.id))

        self.mdt, _ = ManagedMdt.create_for_volume(
            synthetic_volume_full(host).id, filesystem=self.fs)
        self.ost, _ = ManagedOst.create_for_volume(
            synthetic_volume_full(host).id, filesystem=self.fs)
        ObjectCache.add(ManagedTarget,
                        ManagedTarget.objects.get(id=self.mdt.id))
        ObjectCache.add(ManagedTarget,
                        ManagedTarget.objects.get(id=self.ost.id))
        ObjectCache.add(ManagedTargetMount,
                        ManagedTargetMount.objects.get(target_id=self.mgt.id))
        ObjectCache.add(ManagedTargetMount,
                        ManagedTargetMount.objects.get(target_id=self.mdt.id))
        ObjectCache.add(ManagedTargetMount,
                        ManagedTargetMount.objects.get(target_id=self.ost.id))
Esempio n. 6
0
def load_filesystem_from_json(data):
    # Since this is only ever used for the behave tests, and the behave tests
    # are slated to be decommissioned at some point, we're just going to
    # abandon all pretense that we might be loading a non-synthetic cluster.
    from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
    from tests.unit.chroma_core.helpers import synthetic_volume
    from chroma_core.models import ManagedMgs, VolumeNode

    from chroma_core.lib.cache import ObjectCache
    from chroma_core.models import ManagedHost, ManagedTarget, ManagedTargetMount
    from chroma_core.models import Nid
    from tests.unit.chroma_core.helpers import synthetic_host

    lookup = defaultdict(dict)

    for host_info in data["hosts"]:
        from tests.unit.chroma_core.helpers import MockAgentRpc

        mock_host_info = MockAgentRpc.mock_servers[host_info["address"]]
        # host, command = JobSchedulerClient.create_host(mock_host_info['fqdn'], mock_host_info['nodename'], ['manage_targets'], address = host_info['address'])
        nids = [Nid.split_nid_string(n) for n in mock_host_info["nids"]]

        # update mock_servers with list of Nid objects
        MockAgentRpc.mock_servers[host_info["address"]]["nids"] = nids

        host = synthetic_host(mock_host_info["address"],
                              nids=nids,
                              fqdn=mock_host_info["fqdn"],
                              nodename=mock_host_info["nodename"])
        ObjectCache.add(ManagedHost, host)
        host.state = "managed"
        host.save()
        lookup["hosts"][host_info["address"]] = host

    def _create_volume_for_mounts(mounts):
        # The test data doesn't give us a serial, so mung one out of the device paths
        # on the basis that they're iSCSI-style
        serial = mounts[0]["device_node"].split("/")[-1]
        volume = synthetic_volume(serial=serial)
        for mount in mounts:
            VolumeNode.objects.create(host=lookup["hosts"][mount["host"]],
                                      path=mount["device_node"],
                                      primary=mount["primary"],
                                      volume=volume)
        return volume

    for mgs_info in data["mgss"]:
        volume = _create_volume_for_mounts(mgs_info["mounts"])
        target, target_mounts = ManagedMgs.create_for_volume(volume.id)
        target.uuid = uuid.uuid4().__str__()
        target.ha_label = "%s_%s" % (target.name, uuid.uuid4().__str__()[0:6])
        ObjectCache.add(ManagedTarget, target.managedtarget_ptr)
        for tm in target_mounts:
            ObjectCache.add(ManagedTargetMount, tm)
        lookup["mgt"][mgs_info["mounts"][0]["host"]] = target

    for fs_info in data["filesystems"]:
        fs_bundle = {
            "name": fs_info["name"],
            "mgt": {
                "id": lookup["mgt"][fs_info["mgs"]].id
            },
            "mdts": [],
            "osts": [],
            "conf_params": {},
        }

        for mdt_info in fs_info["mdts"]:
            volume = _create_volume_for_mounts(mdt_info["mounts"])

            # Although we create multiple volumes for the mdt, initially only add a single volume, once delete of MDT
            # is possible we can add them all and then delete before adding. This code is structured for that fixed case.
            if not fs_bundle["mdts"]:
                fs_bundle["mdts"].append({
                    "volume_id": volume.id,
                    "conf_params": {}
                })

        for ost_info in fs_info["osts"]:
            volume = _create_volume_for_mounts(ost_info["mounts"])
            fs_bundle["osts"].append({
                "volume_id": volume.id,
                "conf_params": {}
            })

        fs, command = JobSchedulerClient.create_filesystem(fs_bundle)