def test_striping_post(self): """Test OSTs are assigned to alternating hosts.""" self.host = synthetic_host("myserver") hosts = [synthetic_host("myserver{0:d}".format(n)) for n in range(4)] * 2 # keep hosts in alternating order, but supply them grouped data = { "name": "testfs", "mgt": { "volume_id": synthetic_volume_full(self.host).id, "conf_params": {} }, "mdts": [{ "volume_id": synthetic_volume_full(self.host).id, "conf_params": {} }], "osts": [{ "volume_id": synthetic_volume_full(host).id, "conf_params": {} } for host in sorted(hosts, key=str)], "conf_params": {}, } response = self.api_client.post("/api/filesystem/", data=data) self.assertHttpAccepted(response) content = json.loads(response.content) self.assertEqual( map(str, hosts), list(self._target_hosts(content["filesystem"]["osts"])))
def test_multiple_volumenodes(self): """ Test that if a Volume and multiple VolumeNodes on the same host a fetch with host_id produces a single Volume with multiple VolumeNodes. Previous to HYD-6331 multiple copies of the same Volume would be returned. """ host0 = synthetic_host("host0") host1 = synthetic_host("host1") volume = synthetic_volume_full(host0, secondary_hosts=[host1]) # Check we get 1 Volume with 2 VolumeNodes (check for with and without primary) for data in [{"host_id": host0.id}, {"host_id": host0.id, "primary": True}]: response = self.api_client.get("/api/volume/", data=data) self.assertHttpOK(response) content = json.loads(response.content) self.assertEqual(len(content["objects"]), 1) # Check the Volume has 2 VolumeNodes self.assertEqual(len(content["objects"][0]["volume_nodes"]), 2) # Now add another VolumeNode on host0 VolumeNode.objects.create(volume=volume, host=host0, path="/secondvolumenode", primary=False) # Check we get 1 Volume again but with 3 VolumeNodes with and without primary for data in [{"host_id": host0.id}, {"host_id": host0.id, "primary": True}]: response = self.api_client.get("/api/volume/", data=data) self.assertHttpOK(response) content = json.loads(response.content) self.assertEqual(len(content["objects"]), 1) # Check the Volume has 3 VolumeNodes self.assertEqual(len(content["objects"][0]["volume_nodes"]), 3)
def test_HYD648(self): """Test that datetimes in the API have a timezone""" synthetic_host('myserver') response = self.api_client.get("/api/host/") self.assertHttpOK(response) host = self.deserialize(response)['objects'][0] t = IMLDateTime.parse(host['state_modified_at']) self.assertNotEqual(t.tzinfo, None)
def setUp(self): super(TestAdvertisedTargetJobs, self).setUp() load_default_profile() self.target = mock.Mock() self.target.immutable_state = False self.target.failover_hosts = [synthetic_host()] self.target.primary_host = synthetic_host() self.target.active_host = self.target.primary_host
def test_associating_outlets_with_hosts(self, notify): synthetic_host(address="foo") host = self.api_get_list("/api/host/")[0] outlet = self.api_get_list("/api/power_control_device_outlet/")[0] self.api_patch_attributes(outlet["resource_uri"], {"host": host["resource_uri"]}) outlet = self.api_get(outlet["resource_uri"]) self.assertEqual(outlet["host"], host["resource_uri"]) self.assertTrue(notify.called)
def make_alertstate(self, alert_obj=HostOfflineAlert, alert_item=None, dismissed=False, severity=INFO, created_at=None, active=False): if alert_item is None: alert_item = synthetic_host() alert_type = alert_item.__class__.__name__ # The following fields must be unique together for each AlertState # alert_item_type, alert_item_id, alert_type, active # item_type and item_id are the content_type and pk of alert_item return alert_obj.objects.create( severity=severity, active=active, alert_item=alert_item, begin=created_at, end=created_at, dismissed=dismissed, alert_type=alert_type, )
def test_update_properties(self): update_scan = UpdateScan() update_scan.host = synthetic_host('test1') update_scan.started_at = IMLDateTime.utcnow() self.assertEqual(update_scan.host.properties, '{}') update_scan.update_properties(None) update_scan.update_properties({'key': 'value'})
def setUp(self): super(TestOrderedTargets, self).setUp() # If the test that just ran imported storage_plugin_manager, it will # have instantiated its singleton, and created some DB records. # Django TestCase rolls back the database, so make sure that we # also roll back (reset) this singleton. import chroma_core.lib.storage_plugin.manager chroma_core.lib.storage_plugin.manager.storage_plugin_manager = ( chroma_core.lib.storage_plugin.manager.StoragePluginManager()) load_default_profile() self.job_scheduler = JobScheduler() self.no_of_nodes = 10 self.nodes = [] for node in range(0, self.no_of_nodes): self.nodes.append(synthetic_host("node%s" % node)) for node in self.nodes: synthetic_volume_full( node, secondary_hosts=list(set(self.nodes) - set([node]))) self.volume_ids = [volume.id for volume in Volume.objects.all()]
def test_force_removal_with_filesystem(self): """Test that when a filesystem depends on a host, the filesystem is deleted along with the host when doing a force remove""" host = synthetic_host("myaddress") self.create_simple_filesystem() from chroma_core.models import ManagedMgs, ManagedMdt, ManagedOst, ManagedFilesystem self.fs = self.set_and_assert_state(self.fs, "available") self.assertState(self.mgt.managedtarget_ptr, "mounted") self.assertState(self.mdt.managedtarget_ptr, "mounted") self.assertState(self.ost.managedtarget_ptr, "mounted") self.assertEqual(ManagedFilesystem.objects.get(pk=self.fs.pk).state, "available") # The host disappears, never to be seen again MockAgentRpc.succeed = False try: JobSchedulerClient.command_run_jobs( [{"class_name": "ForceRemoveHostJob", "args": {"host_id": host.id}}], "Test host force remove" ) self.drain_progress() finally: MockAgentRpc.succeed = True with self.assertRaises(ManagedHost.DoesNotExist): ManagedHost.objects.get(address="myaddress") self.assertEqual(ManagedMgs.objects.count(), 0) self.assertEqual(ManagedOst.objects.count(), 0) self.assertEqual(ManagedMdt.objects.count(), 0) self.assertEqual(Volume.objects.count(), 0) self.assertEqual(VolumeNode.objects.count(), 0) self.assertEqual(ManagedFilesystem.objects.count(), 0)
def test_force_removal(self): """Test the mode of removal which should not rely on the host being accessible""" host = synthetic_host("myaddress") synthetic_volume_full(host) self.assertEqual(Volume.objects.count(), 1) self.assertEqual(VolumeNode.objects.count(), 1) # The host disappears, never to be seen again MockAgentRpc.succeed = False try: JobSchedulerClient.command_run_jobs([{ "class_name": "ForceRemoveHostJob", "args": { "host_id": host.id } }], "Test host force remove") self.drain_progress() finally: MockAgentRpc.succeed = True with self.assertRaises(ManagedHost.DoesNotExist): ManagedHost.objects.get(address="myaddress") self.assertEqual(Volume.objects.count(), 0) self.assertEqual(VolumeNode.objects.count(), 0)
def setUp(self): super(TestAvailableJobs, self).setUp() from chroma_core.services.job_scheduler.job_scheduler import JobScheduler from tests.unit.chroma_core.helpers import load_default_profile load_default_profile() self.JobScheduler = JobScheduler self.js = JobScheduler() volume = synthetic_volume(with_storage=False) # Create object before ObjectCache init, so they are in the cache. self.host = synthetic_host() self.mgs = ManagedMgs.objects.create(volume=volume) self.fs = ManagedFilesystem.objects.create(name="mgsfs", mgs=self.mgs) self.mdt = ManagedMdt.objects.create(volume=volume, filesystem=self.fs, index=1) self.ost = ManagedOst.objects.create(volume=volume, filesystem=self.fs, index=1) # If you create object after init of this case, they will not be in it. ObjectCache.getInstance()
def setUp(self): super(TestAvailableJobs, self).setUp() from chroma_core.services.job_scheduler.job_scheduler import JobScheduler from tests.unit.chroma_core.helpers import load_default_profile load_default_profile() self.JobScheduler = JobScheduler self.js = JobScheduler() # Create object before ObjectCache init, so they are in the cache. self.host = synthetic_host() (mgt, fs, mdt, ost) = create_simple_fs() self.mgs = mgt self.fs = fs self.mdt = mdt self.ost = ost # If you create object after init of this case, they will not be in it. ObjectCache.getInstance()
def test_patch_creation(self): """Test that creating multiple Targets using PATCH returns a target and a command""" host = synthetic_host("myserver") self.create_simple_filesystem(host) spare_volume_1 = synthetic_volume_full(host) spare_volume_2 = synthetic_volume_full(host) response = self.api_client.patch( "/api/target/", data={ "objects": [ { "kind": "OST", "filesystem_id": self.fs.id, "volume_id": spare_volume_1.id }, { "kind": "MDT", "filesystem_id": self.fs.id, "volume_id": spare_volume_2.id }, ], "deletions": [], }, ) self.assertHttpAccepted(response)
def test_mgs_nid_change(self): mgs = synthetic_host("mgs") mds = synthetic_host("mds") oss = synthetic_host("oss") from chroma_core.models import ( ManagedMgs, ManagedMdt, ManagedOst, ManagedFilesystem, ManagedTarget, ManagedTargetMount, ) self.mgt, mgt_tms = ManagedMgs.create_for_volume( synthetic_volume_full(mgs).id, name="MGS") self.fs = ManagedFilesystem.objects.create(mgs=self.mgt, name="testfs") self.mdt, mdt_tms = ManagedMdt.create_for_volume( synthetic_volume_full(mds).id, filesystem=self.fs) self.ost, ost_tms = ManagedOst.create_for_volume( synthetic_volume_full(oss).id, filesystem=self.fs) ObjectCache.add(ManagedFilesystem, self.fs) for target in [self.mgt, self.ost, self.mdt]: ObjectCache.add(ManagedTarget, target.managedtarget_ptr) for tm in chain(mgt_tms, mdt_tms, ost_tms): ObjectCache.add(ManagedTargetMount, tm) self.fs = self.set_and_assert_state(self.fs, "available") self.mock_servers["mgs"]["nids"] = [Nid.Nid("192.168.0.99", "tcp", 0)] self.assertNidsCorrect(mgs) JobSchedulerClient.command_run_jobs([{ "class_name": "UpdateNidsJob", "args": { "hosts": [api.get_resource_uri(mgs)] } }], "Test update nids") self.drain_progress() # The -3 looks past the start/stop that happens after writeconf self.assertEqual(MockAgentRpc.host_calls[mgs][-3][0], "writeconf_target") self.assertEqual(MockAgentRpc.host_calls[mds][-3][0], "writeconf_target") self.assertEqual(MockAgentRpc.host_calls[oss][-3][0], "writeconf_target") self.assertState(self.fs, "stopped")
def setUp(self): super(TestFilesystemConfParamValidation, self).setUp() self.host = synthetic_host("myserver") # For PUTs self.old_command_run_jobs = JobSchedulerClient.command_run_jobs JobSchedulerClient.command_run_jobs = mock.Mock( side_effect=lambda jobs, msg: Command.objects.create().id)
def setUp(self): super(TestAdvertisedHostJobs, self).setUp() load_default_profile() self.host = synthetic_host() self.set_managed(True) self.host.state = self.normal_host_state
def setUp(self): super(TestAvailableTransitions, self).setUp() self.js = JobScheduler() load_default_profile() self.host = synthetic_host() self.assertEqual(self.host.state, "managed")
def test_set_state_partial(self): """Test operations using partial PUT containing only the state attribute, as used in Chroma 1.0.0.0 GUI""" host = synthetic_host("myserver") self.create_simple_filesystem(host) mgt_uri = "/api/target/%s/" % self.mgt.id with mock.patch("chroma_core.models.Command.set_state", mock.Mock(return_value=None)): self.api_set_state_partial(mgt_uri, "unmounted") Command.set_state.assert_called_once()
def test_set_state_full(self): """Test operations using a fully populated PUT""" host = synthetic_host("myserver") self.create_simple_filesystem(host) mgt_uri = "/api/target/%s/" % self.mgt.id with mock.patch("chroma_core.models.Command.set_state", mock.Mock(return_value=None)): self.api_set_state_full(mgt_uri, "unmounted") Command.set_state.assert_called_once()
def _host_with_nids(self, address): host_nids = { "primary-mgs": [Nid.Nid("1.2.3.4", "tcp", 0)], "failover-mgs": [Nid.Nid("1.2.3.5", "tcp", 5)], "primary-mgs-twonid": [Nid.Nid("1.2.3.4", "tcp", 0), Nid.Nid("4.3.2.1", "tcp", 1)], "failover-mgs-twonid": [Nid.Nid("1.2.3.5", "tcp", 5), Nid.Nid("4.3.2.2", "tcp", 1)], "othernode": [Nid.Nid("1.2.3.6", "tcp", 0), Nid.Nid("4.3.2.3", "tcp", 1)], } return synthetic_host(address, host_nids[address])
def _host_with_nids(self, address): host_nids = { 'primary-mgs': [Nid.Nid('1.2.3.4', 'tcp', 0)], 'failover-mgs': [Nid.Nid('1.2.3.5', 'tcp', 5)], 'primary-mgs-twonid': [Nid.Nid('1.2.3.4', 'tcp', 0), Nid.Nid('4.3.2.1', 'tcp', 1)], 'failover-mgs-twonid': [Nid.Nid('1.2.3.5', 'tcp', 5), Nid.Nid('4.3.2.2', 'tcp', 1)], 'othernode': [Nid.Nid('1.2.3.6', 'tcp', 0), Nid.Nid('4.3.2.3', 'tcp', 1)] } return synthetic_host(address, host_nids[address])
def setUp(self): super(TestClientManagementJobs, self).setUp() load_default_profile() worker_profile = self.load_worker_profile() self.worker = synthetic_host(server_profile=worker_profile.name) self.worker.immutable_state = False self.worker.state = self.normal_host_state self.server = synthetic_host() self.server.immutable_state = False self.server.state = self.normal_host_state # If the test that just ran imported storage_plugin_manager, it will # have instantiated its singleton, and created some DB records. # Django TestCase rolls back the database, so make sure that we # also roll back (reset) this singleton. import chroma_core.lib.storage_plugin.manager chroma_core.lib.storage_plugin.manager.storage_plugin_manager = chroma_core.lib.storage_plugin.manager.StoragePluginManager()
def setUpTestData(cls): super(JobTestCaseWithHost, cls).setUpTestData() cls.hosts = [] for address, info in cls.mock_servers.items(): host = synthetic_host(address=address, fqdn=info["fqdn"], nids=info["nids"], nodename=info["nodename"]) cls.hosts.append(host)
def setUp(self): super(TestAvailableTransitions, self).setUp() self.js = JobScheduler() self.volume = synthetic_volume(with_storage=False) load_default_profile() self.host = synthetic_host() self.assertEqual(self.host.state, 'managed')
def test_deleted_volumenode(self): """ Test deleting a VolumeNode means the volume API does not return it. """ host0 = synthetic_host("host0") host1 = synthetic_host("host1") synthetic_volume_full(host0, secondary_hosts=[host1]) self.assertEqual(1, len(self._get_volumes())) self.assertEqual(1, len(self._get_volumes(host0.id))) self.assertEqual(1, len(self._get_volumes(host1.id))) VolumeNode.objects.get(host_id=host0.id).mark_deleted() self.assertEqual(0, len(self._get_volumes(host0.id))) self.assertEqual(1, len(self._get_volumes(host1.id))) VolumeNode.objects.filter(host_id=host1.id).delete() self.assertEqual(0, len(self._get_volumes(host1.id)))
def create_host_ssh(address, server_profile, root_pw, pkey, pkey_pw): host_data = AgentRpc.mock_servers[address] host = synthetic_host(address, nids=host_data['nids'], fqdn=host_data['fqdn'], nodename=host_data['nodename']) ObjectCache.add(ManagedHost, host) command = Command.objects.create(complete=True, message="Mock create_host_ssh") return host, command
def setUp(self): super(TestRegistration, self).setUp() load_default_profile() self.old_create_host = JobSchedulerClient.create_host JobSchedulerClient.create_host = mock.Mock( side_effect=lambda *args, **kwargs: (synthetic_host('mynewhost', **self.mock_servers['mynewhost']), mock.Mock(id='bar'))) ValidatedClientView.valid_certs = {}
def test_striping_patch(self): """Test OSTs are assigned to alternating hosts.""" self.create_simple_filesystem(synthetic_host("myserver")) hosts = [synthetic_host("myserver{0:d}".format(n)) for n in range(4)] * 2 # keep hosts in alternating order, but supply them grouped objects = [{ "kind": "OST", "filesystem_id": self.fs.id, "volume_id": synthetic_volume_full(host).id } for host in sorted(hosts, key=str)] response = self.api_client.patch("/api/target/", data={ "deletions": [], "objects": objects }) self.assertHttpAccepted(response) content = json.loads(response.content) self.assertEqual(map(str, hosts), list(self._target_hosts(content["targets"])))
def setUp(self): super(TestTargetPutValidation, self).setUp() self.host = synthetic_host("myserver") self.create_simple_filesystem(self.host) self.filesystem = self.deserialize( self.api_client.get("/api/filesystem/"))["objects"][0] self.mgt = self.filesystem["mgt"] self.mdt = self.filesystem["mdts"][0] self.ost = self.deserialize( self.api_client.get(self.filesystem["osts"][0]))
def test_one_host(self): try: dbperf.enabled = True with dbperf("create_from_string"): host = synthetic_host("myaddress") with dbperf("set_state"): self.set_state_delayed([(host, "managed")]) with dbperf("run_next"): self.set_state_complete() self.assertState(host, "managed") finally: dbperf.enabled = False