def _create_file_system(self, mgt, other): fs = ManagedFilesystem.objects.create(mgs=mgt, name="testfs") ObjectCache.add(ManagedFilesystem, fs) ManagedMdt.create_for_volume(synthetic_volume_full(other).id, filesystem=fs) ManagedOst.create_for_volume(synthetic_volume_full(other).id, filesystem=fs) return fs
def create_fake_filesystem_client(self, active=False): from chroma_core.models import ManagedMgs, ManagedMdt, ManagedOst, ManagedFilesystem, LustreClientMount from tests.unit.chroma_core.helpers import synthetic_volume_full mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(self.server).id, name = "MGS") fs = ManagedFilesystem.objects.create(mgs = mgt, name = 'testfs') ObjectCache.add(ManagedFilesystem, fs) ManagedMdt.create_for_volume(synthetic_volume_full(self.server).id, filesystem = fs) ManagedOst.create_for_volume(synthetic_volume_full(self.server).id, filesystem = fs) state = 'mounted' if active else 'unmounted' self.mount = LustreClientMount.objects.create(host = self.worker, filesystem = fs, state = state) ObjectCache.add(LustreClientMount, self.mount)
def test_two_concurrent_removes(self): """ Test that we can concurrently remove two filesystems which depend on the same mgt """ fs2 = ManagedFilesystem.objects.create(mgs=self.mgt, name="testfs2") ObjectCache.add(ManagedFilesystem, fs2) mdt2, mdt_tms = ManagedMdt.create_for_volume(self._test_lun( self.host).id, filesystem=fs2) ost2, ost_tms = ManagedOst.create_for_volume(self._test_lun( self.host).id, filesystem=fs2) for target in [mdt2, ost2]: ObjectCache.add(ManagedTarget, target.managedtarget_ptr) for tm in chain(mdt_tms, ost_tms): ObjectCache.add(ManagedTargetMount, tm) self.fs = self.set_and_assert_state(self.fs, "available") fs2 = self.set_and_assert_state(fs2, "available") self.set_state_delayed([(self.fs, "removed")]) self.set_state_delayed([(fs2, "removed")]) self.set_state_complete() with self.assertRaises(ManagedFilesystem.DoesNotExist): ManagedFilesystem.objects.get(pk=self.fs.pk) with self.assertRaises(ManagedFilesystem.DoesNotExist): ManagedFilesystem.objects.get(pk=fs2.pk)
def create_simple_filesystem(self, host): from chroma_core.models import ManagedMgs, ManagedMdt, ManagedOst, ManagedFilesystem, ManagedTargetMount self.mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(host).id, name = "MGS") self.fs = ManagedFilesystem.objects.create(mgs = self.mgt, name = "testfs") ObjectCache.add(ManagedFilesystem, self.fs) ObjectCache.add(ManagedTarget, ManagedTarget.objects.get(id = self.mgt.id)) self.mdt, _ = ManagedMdt.create_for_volume(synthetic_volume_full(host).id, filesystem = self.fs) self.ost, _ = ManagedOst.create_for_volume(synthetic_volume_full(host).id, filesystem = self.fs) ObjectCache.add(ManagedTarget, ManagedTarget.objects.get(id = self.mdt.id)) ObjectCache.add(ManagedTarget, ManagedTarget.objects.get(id = self.ost.id)) ObjectCache.add(ManagedTargetMount, ManagedTargetMount.objects.get(target_id = self.mgt.id)) ObjectCache.add(ManagedTargetMount, ManagedTargetMount.objects.get(target_id = self.mdt.id)) ObjectCache.add(ManagedTargetMount, ManagedTargetMount.objects.get(target_id = self.ost.id))
def test_mgs_nid_change(self): mgs = synthetic_host("mgs") mds = synthetic_host("mds") oss = synthetic_host("oss") from chroma_core.models import ( ManagedMgs, ManagedMdt, ManagedOst, ManagedFilesystem, ManagedTarget, ManagedTargetMount, ) self.mgt, mgt_tms = ManagedMgs.create_for_volume( synthetic_volume_full(mgs).id, name="MGS") self.fs = ManagedFilesystem.objects.create(mgs=self.mgt, name="testfs") self.mdt, mdt_tms = ManagedMdt.create_for_volume( synthetic_volume_full(mds).id, filesystem=self.fs) self.ost, ost_tms = ManagedOst.create_for_volume( synthetic_volume_full(oss).id, filesystem=self.fs) ObjectCache.add(ManagedFilesystem, self.fs) for target in [self.mgt, self.ost, self.mdt]: ObjectCache.add(ManagedTarget, target.managedtarget_ptr) for tm in chain(mgt_tms, mdt_tms, ost_tms): ObjectCache.add(ManagedTargetMount, tm) self.fs = self.set_and_assert_state(self.fs, "available") self.mock_servers["mgs"]["nids"] = [Nid.Nid("192.168.0.99", "tcp", 0)] self.assertNidsCorrect(mgs) JobSchedulerClient.command_run_jobs([{ "class_name": "UpdateNidsJob", "args": { "hosts": [api.get_resource_uri(mgs)] } }], "Test update nids") self.drain_progress() # The -3 looks past the start/stop that happens after writeconf self.assertEqual(MockAgentRpc.host_calls[mgs][-3][0], "writeconf_target") self.assertEqual(MockAgentRpc.host_calls[mds][-3][0], "writeconf_target") self.assertEqual(MockAgentRpc.host_calls[oss][-3][0], "writeconf_target") self.assertState(self.fs, "stopped")
def setUp(self): ChromaApiTestCase.setUp(self) fixture = collections.defaultdict(list) for line in open( os.path.join(os.path.dirname(__file__), "fixtures/stats.sjson")): data = json.loads(line.strip()) fixture[data["type"], data["id"]].append( (data["time"], data["data"])) # create gaps in data to test alignment for key in (min(fixture), max(fixture)): del fixture[key][-1] self.hosts = [ synthetic_host("myserver{0:d}".format(n)) for n in range(2) ] self.mgt, mounts = ManagedMgs.create_for_volume(synthetic_volume_full( self.hosts[0]).id, name="MGS") self.fs = ManagedFilesystem.objects.create(mgs=self.mgt, name="testfs") ObjectCache.add(ManagedFilesystem, self.fs) self.mdt, mounts = ManagedMdt.create_for_volume(synthetic_volume_full( self.hosts[0]).id, filesystem=self.fs) ObjectCache.add(ManagedTarget, self.mdt.managedtarget_ptr) for tm in mounts: ObjectCache.add(ManagedTargetMount, tm) self.osts = [ ManagedOst.create_for_volume(synthetic_volume_full( self.hosts[1]).id, filesystem=self.fs)[0] for n in range(2) ] # store fixture data with corresponding targets for target, key in zip(self.hosts + [self.mdt] + self.osts, sorted(fixture)): store = metrics.MetricStore.new(target) kwargs = { "jobid_var": "procname_uid" } if isinstance(target, ManagedOst) else {} for timestamp, value in fixture[key]: Stats.insert(store.serialize(value, timestamp, **kwargs)) for model in Stats: model.cache.clear()
def _create_filesystem_n_osts(self, n_targets): assert n_targets >= 3 ManagedFilesystem.objects.update(not_deleted=None) ManagedTarget.objects.update(not_deleted=None) ManagedTargetMount.objects.update(not_deleted=None) self._create_n_volumes_host_pairs(n_targets) assert ManagedTarget.objects.count() == 0 fs = None for i, volume in enumerate(Volume.objects.all()): if i == 0: mgt, mounts = ManagedMgs.create_for_volume(volume.id) fs = ManagedFilesystem.objects.create(name="foo", mgs=mgt) ObjectCache.add(ManagedFilesystem, fs) elif i == 1: ObjectCache.add( ManagedTarget, ManagedMdt.create_for_volume(volume.id, filesystem=fs)[0].managedtarget_ptr ) else: ObjectCache.add( ManagedTarget, ManagedOst.create_for_volume(volume.id, filesystem=fs)[0].managedtarget_ptr )
def create_simple_filesystem(self, host, start=True): from chroma_core.models import ManagedMgs, ManagedMdt, ManagedOst, ManagedFilesystem self.mgt, mgt_tms = ManagedMgs.create_for_volume( self._test_lun(host).id, name="MGS") self.fs = ManagedFilesystem.objects.create(mgs=self.mgt, name="testfs") ObjectCache.add(ManagedFilesystem, self.fs) self.mdt, mdt_tms = ManagedMdt.create_for_volume( self._test_lun(host).id, filesystem=self.fs) self.ost, ost_tms = ManagedOst.create_for_volume( self._test_lun(host).id, filesystem=self.fs) for target in [self.mgt, self.ost, self.mdt]: ObjectCache.add(ManagedTarget, target.managedtarget_ptr) for tm in chain(mgt_tms, mdt_tms, ost_tms): ObjectCache.add(ManagedTargetMount, tm) if start: self.fs = self.set_and_assert_state(self.fs, 'available') self.mgt = freshen(self.mgt) self.mdt = freshen(self.mdt) self.ost = freshen(self.ost)
def create_entity(self, fs): self.create_volume() self.entity, mounts = ManagedMdt.create_for_volume(self.volume.pk, name=self.name, filesystem=fs) self.entity.metrics