예제 #1
0
    def prepare(self):
        from south.management.commands import patch_for_test_db_setup

        self.test_runner.setup_test_environment()
        # This is necessary to ensure that we use django.core.syncdb()
        # instead of south's hacked syncdb()
        patch_for_test_db_setup()
        self.old_db_config = self.test_runner.setup_databases()

        mgs_host = ManagedHost.objects.create(
                address="mgs",
                fqdn="mgs",
                nodename="mgs")
        mgs_vol = Volume.objects.create(label="mgs")
        VolumeNode.objects.create(host = mgs_host,
                                  path = uuid.uuid4(),
                                  primary = True,
                                  use = True,
                                  volume = mgs_vol)
        self.mgs, mounts = ManagedMgs.create_for_volume(mgs_vol.pk, name="MGS")
        self.fs_entity = ManagedFilesystem.objects.create(name=options.fsname,
                                                          mgs=self.mgs)
        self.oss_list = self.prepare_oss_list()
        self.mds_list = self.prepare_mds_list()

        if not options.no_precreate:
            self.precreate_stats()
예제 #2
0
    def test_two_nids_no_failover(self):
        mgs0 = self._host_with_nids("primary-mgs-twonid")
        other = self._host_with_nids("othernode")
        mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(mgs0).id, name="MGS")
        fs = self._create_file_system(mgt, other)

        self.assertEqual(mgt.nids(), ((u"1.2.3.4@tcp0", u"4.3.2.1@tcp1"),))
        self.assertEqual(fs.mgs_spec(), u"1.2.3.4@tcp0,4.3.2.1@tcp1")
    def setUp(self):
        super(TestTargetTransitions, self).setUp()

        self.mgt, mgt_tms = ManagedMgs.create_for_volume(self._test_lun(self.host).id, name="MGS")
        ObjectCache.add(ManagedTarget, self.mgt.managedtarget_ptr)
        for tm in mgt_tms:
            ObjectCache.add(ManagedTargetMount, tm)
        self.assertEqual(ManagedMgs.objects.get(pk=self.mgt.pk).state, "unformatted")
예제 #4
0
    def test_one_nid_no_failover(self):
        mgs0 = self._host_with_nids('primary-mgs')
        other = self._host_with_nids('othernode')
        mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(mgs0).id, name = "MGS")
        fs = self._create_file_system(mgt, other)

        self.assertEqual(mgt.nids(), ((u'1.2.3.4@tcp0',),))
        self.assertEqual(fs.mgs_spec(), u'1.2.3.4@tcp0')
예제 #5
0
    def test_one_nid_with_failover(self):
        mgs0 = self._host_with_nids("primary-mgs")
        mgs1 = self._host_with_nids("failover-mgs")
        other = self._host_with_nids("othernode")
        mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(mgs0, secondary_hosts=[mgs1]).id, name="MGS")
        fs = self._create_file_system(mgt, other)

        self.assertEqual(mgt.nids(), ((u"1.2.3.4@tcp0",), (u"1.2.3.5@tcp5",)))
        self.assertEqual(fs.mgs_spec(), u"1.2.3.4@tcp0:1.2.3.5@tcp5")
예제 #6
0
    def test_two_nids_with_failover(self):
        mgs0 = self._host_with_nids('primary-mgs-twonid')
        mgs1 = self._host_with_nids('failover-mgs-twonid')
        other = self._host_with_nids('othernode')
        mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(mgs0, secondary_hosts=[mgs1]).id, name = "MGS")
        fs = self._create_file_system(mgt, other)

        self.assertEqual(mgt.nids(), ((u'1.2.3.4@tcp0', u'4.3.2.1@tcp1'), (u'1.2.3.5@tcp5', u'4.3.2.2@tcp1')))
        self.assertEqual(fs.mgs_spec(), u'1.2.3.4@tcp0,4.3.2.1@tcp1:1.2.3.5@tcp5,4.3.2.2@tcp1')
예제 #7
0
    def setUp(self):
        super(TestSharedTarget, self).setUp()

        self.mgt, tms = ManagedMgs.create_for_volume(self._test_lun(
            ManagedHost.objects.get(address='pair1'),
            secondary_hosts=[ManagedHost.objects.get(address='pair2')]).id,
                                                     name="MGS")

        ObjectCache.add(ManagedTarget, self.mgt.managedtarget_ptr)
        for tm in tms:
            ObjectCache.add(ManagedTargetMount, tm)
        self.assertEqual(
            ManagedMgs.objects.get(pk=self.mgt.pk).state, 'unformatted')
    def create_fake_filesystem_client(self, active=False):
        from chroma_core.models import ManagedMgs, ManagedMdt, ManagedOst, ManagedFilesystem, LustreClientMount
        from tests.unit.chroma_core.helpers import synthetic_volume_full

        mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(self.server).id, name = "MGS")
        fs = ManagedFilesystem.objects.create(mgs = mgt, name = 'testfs')
        ObjectCache.add(ManagedFilesystem, fs)
        ManagedMdt.create_for_volume(synthetic_volume_full(self.server).id, filesystem = fs)
        ManagedOst.create_for_volume(synthetic_volume_full(self.server).id, filesystem = fs)
        state = 'mounted' if active else 'unmounted'
        self.mount = LustreClientMount.objects.create(host = self.worker, filesystem = fs, state = state)

        ObjectCache.add(LustreClientMount, self.mount)
    def test_failing_job(self):
        mgt, tms = ManagedMgs.create_for_volume(self._test_lun(self.host).id, name = "MGS")
        ObjectCache.add(ManagedTarget, mgt.managedtarget_ptr)
        for tm in tms:
            ObjectCache.add(ManagedTargetMount, tm)

        try:
            MockAgentRpc.succeed = False
            # This is to check that the scheduler doesn't run past the failed job (like in HYD-1572)
            self.set_and_assert_state(mgt.managedtarget_ptr, 'mounted', check = False)
            mgt = self.assertState(mgt, 'unformatted')
        finally:
            MockAgentRpc.succeed = True
            mgt.managedtarget_ptr = self.set_and_assert_state(mgt.managedtarget_ptr, 'mounted')
예제 #10
0
    def create_simple_filesystem(self, host):
        from chroma_core.models import ManagedMgs, ManagedMdt, ManagedOst, ManagedFilesystem, ManagedTargetMount
        self.mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(host).id, name = "MGS")
        self.fs = ManagedFilesystem.objects.create(mgs = self.mgt, name = "testfs")
        ObjectCache.add(ManagedFilesystem, self.fs)
        ObjectCache.add(ManagedTarget, ManagedTarget.objects.get(id = self.mgt.id))

        self.mdt, _ = ManagedMdt.create_for_volume(synthetic_volume_full(host).id, filesystem = self.fs)
        self.ost, _ = ManagedOst.create_for_volume(synthetic_volume_full(host).id, filesystem = self.fs)
        ObjectCache.add(ManagedTarget, ManagedTarget.objects.get(id = self.mdt.id))
        ObjectCache.add(ManagedTarget, ManagedTarget.objects.get(id = self.ost.id))
        ObjectCache.add(ManagedTargetMount, ManagedTargetMount.objects.get(target_id = self.mgt.id))
        ObjectCache.add(ManagedTargetMount, ManagedTargetMount.objects.get(target_id = self.mdt.id))
        ObjectCache.add(ManagedTargetMount, ManagedTargetMount.objects.get(target_id = self.ost.id))
예제 #11
0
    def test_mgs_nid_change(self):
        mgs = synthetic_host("mgs")
        mds = synthetic_host("mds")
        oss = synthetic_host("oss")

        from chroma_core.models import (
            ManagedMgs,
            ManagedMdt,
            ManagedOst,
            ManagedFilesystem,
            ManagedTarget,
            ManagedTargetMount,
        )

        self.mgt, mgt_tms = ManagedMgs.create_for_volume(
            synthetic_volume_full(mgs).id, name="MGS")
        self.fs = ManagedFilesystem.objects.create(mgs=self.mgt, name="testfs")
        self.mdt, mdt_tms = ManagedMdt.create_for_volume(
            synthetic_volume_full(mds).id, filesystem=self.fs)
        self.ost, ost_tms = ManagedOst.create_for_volume(
            synthetic_volume_full(oss).id, filesystem=self.fs)
        ObjectCache.add(ManagedFilesystem, self.fs)
        for target in [self.mgt, self.ost, self.mdt]:
            ObjectCache.add(ManagedTarget, target.managedtarget_ptr)
        for tm in chain(mgt_tms, mdt_tms, ost_tms):
            ObjectCache.add(ManagedTargetMount, tm)

        self.fs = self.set_and_assert_state(self.fs, "available")

        self.mock_servers["mgs"]["nids"] = [Nid.Nid("192.168.0.99", "tcp", 0)]
        self.assertNidsCorrect(mgs)

        JobSchedulerClient.command_run_jobs([{
            "class_name": "UpdateNidsJob",
            "args": {
                "hosts": [api.get_resource_uri(mgs)]
            }
        }], "Test update nids")
        self.drain_progress()
        # The -3 looks past the start/stop that happens after writeconf
        self.assertEqual(MockAgentRpc.host_calls[mgs][-3][0],
                         "writeconf_target")
        self.assertEqual(MockAgentRpc.host_calls[mds][-3][0],
                         "writeconf_target")
        self.assertEqual(MockAgentRpc.host_calls[oss][-3][0],
                         "writeconf_target")
        self.assertState(self.fs, "stopped")
    def setUp(self):
        ChromaApiTestCase.setUp(self)
        fixture = collections.defaultdict(list)
        for line in open(
                os.path.join(os.path.dirname(__file__),
                             "fixtures/stats.sjson")):
            data = json.loads(line.strip())
            fixture[data["type"], data["id"]].append(
                (data["time"], data["data"]))
        # create gaps in data to test alignment
        for key in (min(fixture), max(fixture)):
            del fixture[key][-1]
        self.hosts = [
            synthetic_host("myserver{0:d}".format(n)) for n in range(2)
        ]
        self.mgt, mounts = ManagedMgs.create_for_volume(synthetic_volume_full(
            self.hosts[0]).id,
                                                        name="MGS")
        self.fs = ManagedFilesystem.objects.create(mgs=self.mgt, name="testfs")
        ObjectCache.add(ManagedFilesystem, self.fs)
        self.mdt, mounts = ManagedMdt.create_for_volume(synthetic_volume_full(
            self.hosts[0]).id,
                                                        filesystem=self.fs)
        ObjectCache.add(ManagedTarget, self.mdt.managedtarget_ptr)
        for tm in mounts:
            ObjectCache.add(ManagedTargetMount, tm)

        self.osts = [
            ManagedOst.create_for_volume(synthetic_volume_full(
                self.hosts[1]).id,
                                         filesystem=self.fs)[0]
            for n in range(2)
        ]
        # store fixture data with corresponding targets
        for target, key in zip(self.hosts + [self.mdt] + self.osts,
                               sorted(fixture)):
            store = metrics.MetricStore.new(target)
            kwargs = {
                "jobid_var": "procname_uid"
            } if isinstance(target, ManagedOst) else {}
            for timestamp, value in fixture[key]:
                Stats.insert(store.serialize(value, timestamp, **kwargs))
        for model in Stats:
            model.cache.clear()
예제 #13
0
    def _create_filesystem_n_osts(self, n_targets):
        assert n_targets >= 3
        ManagedFilesystem.objects.update(not_deleted=None)
        ManagedTarget.objects.update(not_deleted=None)
        ManagedTargetMount.objects.update(not_deleted=None)
        self._create_n_volumes_host_pairs(n_targets)
        assert ManagedTarget.objects.count() == 0

        fs = None
        for i, volume in enumerate(Volume.objects.all()):
            if i == 0:
                mgt, mounts = ManagedMgs.create_for_volume(volume.id)
                fs = ManagedFilesystem.objects.create(name="foo", mgs=mgt)
                ObjectCache.add(ManagedFilesystem, fs)
            elif i == 1:
                ObjectCache.add(
                    ManagedTarget, ManagedMdt.create_for_volume(volume.id, filesystem=fs)[0].managedtarget_ptr
                )
            else:
                ObjectCache.add(
                    ManagedTarget, ManagedOst.create_for_volume(volume.id, filesystem=fs)[0].managedtarget_ptr
                )
예제 #14
0
    def create_simple_filesystem(self, host, start=True):
        from chroma_core.models import ManagedMgs, ManagedMdt, ManagedOst, ManagedFilesystem
        self.mgt, mgt_tms = ManagedMgs.create_for_volume(
            self._test_lun(host).id, name="MGS")
        self.fs = ManagedFilesystem.objects.create(mgs=self.mgt, name="testfs")
        ObjectCache.add(ManagedFilesystem, self.fs)

        self.mdt, mdt_tms = ManagedMdt.create_for_volume(
            self._test_lun(host).id, filesystem=self.fs)
        self.ost, ost_tms = ManagedOst.create_for_volume(
            self._test_lun(host).id, filesystem=self.fs)

        for target in [self.mgt, self.ost, self.mdt]:
            ObjectCache.add(ManagedTarget, target.managedtarget_ptr)
        for tm in chain(mgt_tms, mdt_tms, ost_tms):
            ObjectCache.add(ManagedTargetMount, tm)

        if start:
            self.fs = self.set_and_assert_state(self.fs, 'available')
            self.mgt = freshen(self.mgt)
            self.mdt = freshen(self.mdt)
            self.ost = freshen(self.ost)
예제 #15
0
def load_filesystem_from_json(data):
    # Since this is only ever used for the behave tests, and the behave tests
    # are slated to be decommissioned at some point, we're just going to
    # abandon all pretense that we might be loading a non-synthetic cluster.
    from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
    from tests.unit.chroma_core.helpers import synthetic_volume
    from chroma_core.models import ManagedMgs, VolumeNode

    from chroma_core.lib.cache import ObjectCache
    from chroma_core.models import ManagedHost, ManagedTarget, ManagedTargetMount
    from chroma_core.models import Nid
    from tests.unit.chroma_core.helpers import synthetic_host

    lookup = defaultdict(dict)

    for host_info in data["hosts"]:
        from tests.unit.chroma_core.helpers import MockAgentRpc

        mock_host_info = MockAgentRpc.mock_servers[host_info["address"]]
        # host, command = JobSchedulerClient.create_host(mock_host_info['fqdn'], mock_host_info['nodename'], ['manage_targets'], address = host_info['address'])
        nids = [Nid.split_nid_string(n) for n in mock_host_info["nids"]]

        # update mock_servers with list of Nid objects
        MockAgentRpc.mock_servers[host_info["address"]]["nids"] = nids

        host = synthetic_host(mock_host_info["address"],
                              nids=nids,
                              fqdn=mock_host_info["fqdn"],
                              nodename=mock_host_info["nodename"])
        ObjectCache.add(ManagedHost, host)
        host.state = "managed"
        host.save()
        lookup["hosts"][host_info["address"]] = host

    def _create_volume_for_mounts(mounts):
        # The test data doesn't give us a serial, so mung one out of the device paths
        # on the basis that they're iSCSI-style
        serial = mounts[0]["device_node"].split("/")[-1]
        volume = synthetic_volume(serial=serial)
        for mount in mounts:
            VolumeNode.objects.create(host=lookup["hosts"][mount["host"]],
                                      path=mount["device_node"],
                                      primary=mount["primary"],
                                      volume=volume)
        return volume

    for mgs_info in data["mgss"]:
        volume = _create_volume_for_mounts(mgs_info["mounts"])
        target, target_mounts = ManagedMgs.create_for_volume(volume.id)
        target.uuid = uuid.uuid4().__str__()
        target.ha_label = "%s_%s" % (target.name, uuid.uuid4().__str__()[0:6])
        ObjectCache.add(ManagedTarget, target.managedtarget_ptr)
        for tm in target_mounts:
            ObjectCache.add(ManagedTargetMount, tm)
        lookup["mgt"][mgs_info["mounts"][0]["host"]] = target

    for fs_info in data["filesystems"]:
        fs_bundle = {
            "name": fs_info["name"],
            "mgt": {
                "id": lookup["mgt"][fs_info["mgs"]].id
            },
            "mdts": [],
            "osts": [],
            "conf_params": {},
        }

        for mdt_info in fs_info["mdts"]:
            volume = _create_volume_for_mounts(mdt_info["mounts"])

            # Although we create multiple volumes for the mdt, initially only add a single volume, once delete of MDT
            # is possible we can add them all and then delete before adding. This code is structured for that fixed case.
            if not fs_bundle["mdts"]:
                fs_bundle["mdts"].append({
                    "volume_id": volume.id,
                    "conf_params": {}
                })

        for ost_info in fs_info["osts"]:
            volume = _create_volume_for_mounts(ost_info["mounts"])
            fs_bundle["osts"].append({
                "volume_id": volume.id,
                "conf_params": {}
            })

        fs, command = JobSchedulerClient.create_filesystem(fs_bundle)