def setUp(self): super(TestOrderedTargets, self).setUp() # If the test that just ran imported storage_plugin_manager, it will # have instantiated its singleton, and created some DB records. # Django TestCase rolls back the database, so make sure that we # also roll back (reset) this singleton. import chroma_core.lib.storage_plugin.manager chroma_core.lib.storage_plugin.manager.storage_plugin_manager = ( chroma_core.lib.storage_plugin.manager.StoragePluginManager()) load_default_profile() self.job_scheduler = JobScheduler() self.no_of_nodes = 10 self.nodes = [] for node in range(0, self.no_of_nodes): self.nodes.append(synthetic_host("node%s" % node)) for node in self.nodes: synthetic_volume_full( node, secondary_hosts=list(set(self.nodes) - set([node]))) self.volume_ids = [volume.id for volume in Volume.objects.all()]
def test_HYD1483(self): """Test that adding a second MGS to a host emits a useful error.""" mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(self.host).id, name = "MGS") mgt.save() new_mgt_volume = synthetic_volume_full(self.host) mdt_volume = synthetic_volume_full(self.host) ost_volume = synthetic_volume_full(self.host) response = self.api_client.post("/api/filesystem/", data = { 'name': 'testfs', 'mgt': {'volume_id': new_mgt_volume.id}, 'mdts': [{ 'volume_id': mdt_volume.id, 'conf_params': {} }], 'osts': [{ 'volume_id': ost_volume.id, 'conf_params': {} }], 'conf_params': {} }) self.assertHttpBadRequest(response) errors = self.deserialize(response) self.assertIn('only one MGS is allowed per server', errors['mgt']['volume_id'][0])
def test_patch_creation(self): """Test that creating multiple Targets using PATCH returns a target and a command""" host = synthetic_host("myserver") self.create_simple_filesystem(host) spare_volume_1 = synthetic_volume_full(host) spare_volume_2 = synthetic_volume_full(host) response = self.api_client.patch( "/api/target/", data={ "objects": [ { "kind": "OST", "filesystem_id": self.fs.id, "volume_id": spare_volume_1.id }, { "kind": "MDT", "filesystem_id": self.fs.id, "volume_id": spare_volume_2.id }, ], "deletions": [], }, ) self.assertHttpAccepted(response)
def test_striping_post(self): """Test OSTs are assigned to alternating hosts.""" self.host = synthetic_host("myserver") hosts = [synthetic_host("myserver{0:d}".format(n)) for n in range(4)] * 2 # keep hosts in alternating order, but supply them grouped data = { "name": "testfs", "mgt": { "volume_id": synthetic_volume_full(self.host).id, "conf_params": {} }, "mdts": [{ "volume_id": synthetic_volume_full(self.host).id, "conf_params": {} }], "osts": [{ "volume_id": synthetic_volume_full(host).id, "conf_params": {} } for host in sorted(hosts, key=str)], "conf_params": {}, } response = self.api_client.post("/api/filesystem/", data=data) self.assertHttpAccepted(response) content = json.loads(response.content) self.assertEqual( map(str, hosts), list(self._target_hosts(content["filesystem"]["osts"])))
def test_force_removal(self): """Test the mode of removal which should not rely on the host being accessible""" host = synthetic_host("myaddress") synthetic_volume_full(host) self.assertEqual(Volume.objects.count(), 1) self.assertEqual(VolumeNode.objects.count(), 1) # The host disappears, never to be seen again MockAgentRpc.succeed = False try: JobSchedulerClient.command_run_jobs([{ "class_name": "ForceRemoveHostJob", "args": { "host_id": host.id } }], "Test host force remove") self.drain_progress() finally: MockAgentRpc.succeed = True with self.assertRaises(ManagedHost.DoesNotExist): ManagedHost.objects.get(address="myaddress") self.assertEqual(Volume.objects.count(), 0) self.assertEqual(VolumeNode.objects.count(), 0)
def _create_file_system(self, mgt, other): fs = ManagedFilesystem.objects.create(mgs=mgt, name="testfs") ObjectCache.add(ManagedFilesystem, fs) ManagedMdt.create_for_volume(synthetic_volume_full(other).id, filesystem=fs) ManagedOst.create_for_volume(synthetic_volume_full(other).id, filesystem=fs) return fs
def create_fake_filesystem_client(self, active=False): from chroma_core.models import ManagedMgs, ManagedMdt, ManagedOst, ManagedFilesystem, LustreClientMount from tests.unit.chroma_core.helpers import synthetic_volume_full mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(self.server).id, name = "MGS") fs = ManagedFilesystem.objects.create(mgs = mgt, name = 'testfs') ObjectCache.add(ManagedFilesystem, fs) ManagedMdt.create_for_volume(synthetic_volume_full(self.server).id, filesystem = fs) ManagedOst.create_for_volume(synthetic_volume_full(self.server).id, filesystem = fs) state = 'mounted' if active else 'unmounted' self.mount = LustreClientMount.objects.create(host = self.worker, filesystem = fs, state = state) ObjectCache.add(LustreClientMount, self.mount)
def test_removal(self): host = synthetic_host("myaddress") synthetic_volume_full(host) self.assertEqual(Volume.objects.count(), 1) self.assertEqual(VolumeNode.objects.count(), 1) host = self.set_and_assert_state(host, "removed") with self.assertRaises(ManagedHost.DoesNotExist): ManagedHost.objects.get(address="myaddress") self.assertEqual(ManagedHost.objects.count(), 0) self.assertEqual(Volume.objects.count(), 0) self.assertEqual(VolumeNode.objects.count(), 0)
def create_simple_filesystem(self, host): from chroma_core.models import ManagedMgs, ManagedMdt, ManagedOst, ManagedFilesystem, ManagedTargetMount self.mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(host).id, name = "MGS") self.fs = ManagedFilesystem.objects.create(mgs = self.mgt, name = "testfs") ObjectCache.add(ManagedFilesystem, self.fs) ObjectCache.add(ManagedTarget, ManagedTarget.objects.get(id = self.mgt.id)) self.mdt, _ = ManagedMdt.create_for_volume(synthetic_volume_full(host).id, filesystem = self.fs) self.ost, _ = ManagedOst.create_for_volume(synthetic_volume_full(host).id, filesystem = self.fs) ObjectCache.add(ManagedTarget, ManagedTarget.objects.get(id = self.mdt.id)) ObjectCache.add(ManagedTarget, ManagedTarget.objects.get(id = self.ost.id)) ObjectCache.add(ManagedTargetMount, ManagedTargetMount.objects.get(target_id = self.mgt.id)) ObjectCache.add(ManagedTargetMount, ManagedTargetMount.objects.get(target_id = self.mdt.id)) ObjectCache.add(ManagedTargetMount, ManagedTargetMount.objects.get(target_id = self.ost.id))
def test_multiple_volumenodes(self): """ Test that if a Volume and multiple VolumeNodes on the same host a fetch with host_id produces a single Volume with multiple VolumeNodes. Previous to HYD-6331 multiple copies of the same Volume would be returned. """ host0 = synthetic_host("host0") host1 = synthetic_host("host1") volume = synthetic_volume_full(host0, secondary_hosts=[host1]) # Check we get 1 Volume with 2 VolumeNodes (check for with and without primary) for data in [{"host_id": host0.id}, {"host_id": host0.id, "primary": True}]: response = self.api_client.get("/api/volume/", data=data) self.assertHttpOK(response) content = json.loads(response.content) self.assertEqual(len(content["objects"]), 1) # Check the Volume has 2 VolumeNodes self.assertEqual(len(content["objects"][0]["volume_nodes"]), 2) # Now add another VolumeNode on host0 VolumeNode.objects.create(volume=volume, host=host0, path="/secondvolumenode", primary=False) # Check we get 1 Volume again but with 3 VolumeNodes with and without primary for data in [{"host_id": host0.id}, {"host_id": host0.id, "primary": True}]: response = self.api_client.get("/api/volume/", data=data) self.assertHttpOK(response) content = json.loads(response.content) self.assertEqual(len(content["objects"]), 1) # Check the Volume has 3 VolumeNodes self.assertEqual(len(content["objects"][0]["volume_nodes"]), 3)
def test_mgs_nid_change(self): mgs = synthetic_host("mgs") mds = synthetic_host("mds") oss = synthetic_host("oss") from chroma_core.models import ( ManagedMgs, ManagedMdt, ManagedOst, ManagedFilesystem, ManagedTarget, ManagedTargetMount, ) self.mgt, mgt_tms = ManagedMgs.create_for_volume( synthetic_volume_full(mgs).id, name="MGS") self.fs = ManagedFilesystem.objects.create(mgs=self.mgt, name="testfs") self.mdt, mdt_tms = ManagedMdt.create_for_volume( synthetic_volume_full(mds).id, filesystem=self.fs) self.ost, ost_tms = ManagedOst.create_for_volume( synthetic_volume_full(oss).id, filesystem=self.fs) ObjectCache.add(ManagedFilesystem, self.fs) for target in [self.mgt, self.ost, self.mdt]: ObjectCache.add(ManagedTarget, target.managedtarget_ptr) for tm in chain(mgt_tms, mdt_tms, ost_tms): ObjectCache.add(ManagedTargetMount, tm) self.fs = self.set_and_assert_state(self.fs, "available") self.mock_servers["mgs"]["nids"] = [Nid.Nid("192.168.0.99", "tcp", 0)] self.assertNidsCorrect(mgs) JobSchedulerClient.command_run_jobs([{ "class_name": "UpdateNidsJob", "args": { "hosts": [api.get_resource_uri(mgs)] } }], "Test update nids") self.drain_progress() # The -3 looks past the start/stop that happens after writeconf self.assertEqual(MockAgentRpc.host_calls[mgs][-3][0], "writeconf_target") self.assertEqual(MockAgentRpc.host_calls[mds][-3][0], "writeconf_target") self.assertEqual(MockAgentRpc.host_calls[oss][-3][0], "writeconf_target") self.assertState(self.fs, "stopped")
def test_two_nids_no_failover(self): mgs0 = self._host_with_nids("primary-mgs-twonid") other = self._host_with_nids("othernode") mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(mgs0).id, name="MGS") fs = self._create_file_system(mgt, other) self.assertEqual(mgt.nids(), ((u"1.2.3.4@tcp0", u"4.3.2.1@tcp1"),)) self.assertEqual(fs.mgs_spec(), u"1.2.3.4@tcp0,4.3.2.1@tcp1")
def test_one_nid_no_failover(self): mgs0 = self._host_with_nids('primary-mgs') other = self._host_with_nids('othernode') mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(mgs0).id, name = "MGS") fs = self._create_file_system(mgt, other) self.assertEqual(mgt.nids(), ((u'1.2.3.4@tcp0',),)) self.assertEqual(fs.mgs_spec(), u'1.2.3.4@tcp0')
def test_two_nids_with_failover(self): mgs0 = self._host_with_nids('primary-mgs-twonid') mgs1 = self._host_with_nids('failover-mgs-twonid') other = self._host_with_nids('othernode') mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(mgs0, secondary_hosts=[mgs1]).id, name = "MGS") fs = self._create_file_system(mgt, other) self.assertEqual(mgt.nids(), ((u'1.2.3.4@tcp0', u'4.3.2.1@tcp1'), (u'1.2.3.5@tcp5', u'4.3.2.2@tcp1'))) self.assertEqual(fs.mgs_spec(), u'1.2.3.4@tcp0,4.3.2.1@tcp1:1.2.3.5@tcp5,4.3.2.2@tcp1')
def test_one_nid_with_failover(self): mgs0 = self._host_with_nids("primary-mgs") mgs1 = self._host_with_nids("failover-mgs") other = self._host_with_nids("othernode") mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(mgs0, secondary_hosts=[mgs1]).id, name="MGS") fs = self._create_file_system(mgt, other) self.assertEqual(mgt.nids(), ((u"1.2.3.4@tcp0",), (u"1.2.3.5@tcp5",))) self.assertEqual(fs.mgs_spec(), u"1.2.3.4@tcp0:1.2.3.5@tcp5")
def setUp(self): ChromaApiTestCase.setUp(self) fixture = collections.defaultdict(list) for line in open( os.path.join(os.path.dirname(__file__), "fixtures/stats.sjson")): data = json.loads(line.strip()) fixture[data["type"], data["id"]].append( (data["time"], data["data"])) # create gaps in data to test alignment for key in (min(fixture), max(fixture)): del fixture[key][-1] self.hosts = [ synthetic_host("myserver{0:d}".format(n)) for n in range(2) ] self.mgt, mounts = ManagedMgs.create_for_volume(synthetic_volume_full( self.hosts[0]).id, name="MGS") self.fs = ManagedFilesystem.objects.create(mgs=self.mgt, name="testfs") ObjectCache.add(ManagedFilesystem, self.fs) self.mdt, mounts = ManagedMdt.create_for_volume(synthetic_volume_full( self.hosts[0]).id, filesystem=self.fs) ObjectCache.add(ManagedTarget, self.mdt.managedtarget_ptr) for tm in mounts: ObjectCache.add(ManagedTargetMount, tm) self.osts = [ ManagedOst.create_for_volume(synthetic_volume_full( self.hosts[1]).id, filesystem=self.fs)[0] for n in range(2) ] # store fixture data with corresponding targets for target, key in zip(self.hosts + [self.mdt] + self.osts, sorted(fixture)): store = metrics.MetricStore.new(target) kwargs = { "jobid_var": "procname_uid" } if isinstance(target, ManagedOst) else {} for timestamp, value in fixture[key]: Stats.insert(store.serialize(value, timestamp, **kwargs)) for model in Stats: model.cache.clear()
def step(context): from chroma_core.models.host import ManagedHost, VolumeNode from tests.unit.chroma_core.helpers import MockAgentRpc, synthetic_host, synthetic_volume_full for address, host_info in sorted(MockAgentRpc.mock_servers.items()): if not ManagedHost.objects.filter(fqdn=host_info["fqdn"]).exists(): host = synthetic_host(address, nids=host_info["nids"], fqdn=host_info["fqdn"], nodename=host_info["nodename"]) for address, host_info in sorted(MockAgentRpc.mock_servers.items()): if not VolumeNode.objects.filter( host__fqdn=host_info["fqdn"], path__startswith="/fake/path/").exists(): synthetic_volume_full( ManagedHost.objects.get(fqdn=host_info["fqdn"])) eq_(ManagedHost.objects.count(), len(MockAgentRpc.mock_servers))
def _new_ost_with_params(self, params): spare_volume = synthetic_volume_full(self.host) return self.api_client.post("/api/target/", data={ 'kind': 'OST', 'filesystem_id': self.fs.id, 'volume_id': spare_volume.id, 'conf_params': params })
def test_deleted_volumenode(self): """ Test deleting a VolumeNode means the volume API does not return it. """ host0 = synthetic_host("host0") host1 = synthetic_host("host1") synthetic_volume_full(host0, secondary_hosts=[host1]) self.assertEqual(1, len(self._get_volumes())) self.assertEqual(1, len(self._get_volumes(host0.id))) self.assertEqual(1, len(self._get_volumes(host1.id))) VolumeNode.objects.get(host_id=host0.id).mark_deleted() self.assertEqual(0, len(self._get_volumes(host0.id))) self.assertEqual(1, len(self._get_volumes(host1.id))) VolumeNode.objects.filter(host_id=host1.id).delete() self.assertEqual(0, len(self._get_volumes(host1.id)))
def test_unusable_by_lustre(self): """Test selecting host by filesystem with valid and invalid filesystem ids.""" host = synthetic_host("myserver") synthetic_volume_full(host) synthetic_volume_full(host, usable_for_lustre=False) response = self.api_client.get("/api/volume/") self.assertHttpOK(response) content = json.loads(response.content) self.assertEqual(2, len(content["objects"])) response = self.api_client.get("/api/volume/", data={"category": "usable"}) self.assertHttpOK(response) content = json.loads(response.content) self.assertEqual(1, len(content["objects"])) response = self.api_client.get("/api/volume/", data={"category": "unused"}) self.assertHttpOK(response) content = json.loads(response.content) self.assertEqual(1, len(content["objects"]))
def _new_ost_with_params(self, params): spare_volume = synthetic_volume_full(self.host) return self.api_client.post( "/api/target/", data={ "kind": "OST", "filesystem_id": self.fs.id, "volume_id": spare_volume.id, "conf_params": params }, )
def test_unusable_by_lustre(self): """Test selecting host by filesystem with valid and invalid filesystem ids.""" host = synthetic_host('myserver') synthetic_volume_full(host) synthetic_volume_full(host, usable_for_lustre=False) response = self.api_client.get('/api/volume/') self.assertHttpOK(response) content = json.loads(response.content) self.assertEqual(2, len(content['objects'])) response = self.api_client.get('/api/volume/', data={'category': 'usable'}) self.assertHttpOK(response) content = json.loads(response.content) self.assertEqual(1, len(content['objects'])) response = self.api_client.get('/api/volume/', data={'category': 'unused'}) self.assertHttpOK(response) content = json.loads(response.content) self.assertEqual(1, len(content['objects']))
def test_missing(self): """Test that POSTs without conf_params are OK. This is for backwards compatability with respect to Chroma 1.0.0.0 which didn't have conf_params on POSTs at all""" spare_volume = synthetic_volume_full(self.host) response = self.api_client.post("/api/target/", data={ "kind": "OST", "filesystem_id": self.fs.id, "volume_id": spare_volume.id }) self.assertHttpAccepted(response)
def test_HYD424(self): """Test that filesystems can't be created using unmanaged MGSs""" mgt, _ = ManagedMgs.create_for_volume(synthetic_volume_full(self.host).id, name = "MGS") mgt.immutable_state = True mgt.save() # Shouldn't offer the MGS for FS creation response = self.api_client.get("/api/target/", data = {'kind': 'MGT', 'limit': 0, 'immutable_state': False}) self.assertHttpOK(response) mgts = self.deserialize(response)['objects'] self.assertEqual(len(mgts), 0) mdt_volume = synthetic_volume_full(self.host) ost_volume = synthetic_volume_full(self.host) # Shouldn't accept the MGS for FS creation response = self.api_client.post("/api/filesystem/", data = { 'name': 'testfs', 'mgt': {'id': mgt.id}, 'mdts': [{ 'volume_id': mdt_volume.id, 'conf_params': {} }], 'osts': [{ 'volume_id': ost_volume.id, 'conf_params': {} }], 'conf_params': {} }) self.assertHttpBadRequest(response) errors = self.deserialize(response) self.assertDictEqual(errors, { 'mgt': {'id': ['MGT is unmanaged']}, 'mdts': {}, 'osts': {}, })
def test_post_creation(self): """Test that creating an OST using POST returns a target and a command""" host = synthetic_host("myserver") self.create_simple_filesystem(host) spare_volume = synthetic_volume_full(host) response = self.api_client.post("/api/target/", data={ "kind": "OST", "filesystem_id": self.fs.id, "volume_id": spare_volume.id }) self.assertHttpAccepted(response)
def _post_filesystem(self, fs_params, mgt_params, mdt_params, ost_params): mgt_volume = synthetic_volume_full(self.host) mdt_volume = synthetic_volume_full(self.host) ost_volume = synthetic_volume_full(self.host) return self.api_client.post("/api/filesystem/", data={ 'name': 'testfs', 'mgt': { 'volume_id': mgt_volume.id, 'conf_params': mgt_params }, 'mdts': [{ 'volume_id': mdt_volume.id, 'conf_params': mdt_params, }], 'osts': [{ 'volume_id': ost_volume.id, 'conf_params': ost_params }], 'conf_params': fs_params })
def _post_filesystem(self, fs_params, mgt_params, mdt_params, ost_params): mgt_volume = synthetic_volume_full(self.host) mdt_volume = synthetic_volume_full(self.host) ost_volume = synthetic_volume_full(self.host) return self.api_client.post( "/api/filesystem/", data={ "name": "testfs", "mgt": { "volume_id": mgt_volume.id, "conf_params": mgt_params }, "mdts": [{ "volume_id": mdt_volume.id, "conf_params": mdt_params }], "osts": [{ "volume_id": ost_volume.id, "conf_params": ost_params }], "conf_params": fs_params, }, )
def test_multiple_volume_nodes(self): """ Test that when a volume has multiple volume nodes on one host, the volume is not duplicated in the arguments to resource manager (HYD-2119) """ host = synthetic_host() volume = synthetic_volume_full(host) # An extra volume node, so that there are now two on one host VolumeNode.objects.create(volume=volume, host=host, path="/dev/sdaxxx") self.assertEqual(VolumeNode.objects.filter(host=host).count(), 2) resource_manager = mock.Mock() AgentPluginHandlerCollection(resource_manager).rebalance_host_volumes(host.id) called_with_volumes = list(resource_manager.balance_unweighted_volume_nodes.call_args[0][0]) self.assertListEqual(called_with_volumes, [volume])
def test_striping_patch(self): """Test OSTs are assigned to alternating hosts.""" self.create_simple_filesystem(synthetic_host("myserver")) hosts = [synthetic_host("myserver{0:d}".format(n)) for n in range(4)] * 2 # keep hosts in alternating order, but supply them grouped objects = [{ "kind": "OST", "filesystem_id": self.fs.id, "volume_id": synthetic_volume_full(host).id } for host in sorted(hosts, key=str)] response = self.api_client.patch("/api/target/", data={ "deletions": [], "objects": objects }) self.assertHttpAccepted(response) content = json.loads(response.content) self.assertEqual(map(str, hosts), list(self._target_hosts(content["targets"])))
def test_HYD965(self): """Test that targets cannot be added using volumes which are already in use""" host = synthetic_host("myserver") self.create_simple_filesystem(host) spare_volume = synthetic_volume_full(host) response = self.api_client.post("/api/target/", data={ "kind": "OST", "filesystem_id": self.fs.id, "volume_id": spare_volume.id }) self.assertHttpAccepted(response) response = self.api_client.post("/api/target/", data={ "kind": "OST", "filesystem_id": self.fs.id, "volume_id": spare_volume.id }) self.assertHttpBadRequest(response)