def test13_update_primary_storage_capacityIops_to_zero(self): updatedIops = 0 StoragePool.update(self.apiClient, id=self.primary_storage_id, capacityiops=updatedIops, tags=self.primary_tag) # Verify in cloudsatck storage_pools_response = list_storage_pools( self.apiClient, clusterid=self.cluster.id) for data in storage_pools_response: if data.id == self.primary_storage_id: storage_pool = data self.assertEqual( storage_pool.capacityiops, updatedIops, "Primary storage capacityiops not updated") # Verify in datera datera_primary_storage_name = "cloudstack-" + self.primary_storage_id for instance in self.datera_api.app_instances.list(): if instance['name'] == datera_primary_storage_name: datera_instance = instance app_instance_response_iops = ( datera_instance['storage_instances'] ['storage-1']['volumes']['volume-1']['performance_policy'] ['total_iops_max']) self.assertEqual( app_instance_response_iops, updatedIops, "app-instance capacityiops not updated") StoragePool.delete(self.primary_storage, self.apiClient) self.cleanup = []
def test06_primary_storage_cancel_maintenance_mode(self): StoragePool.enableMaintenance(self.apiClient, id=self.primary_storage_id) StoragePool.cancelMaintenance(self.apiClient, id=self.primary_storage_id) # Verify in cloudsatck storage_pools_response = list_storage_pools( self.apiClient, clusterid=self.cluster.id) for storage in storage_pools_response: if storage.id == self.primary_storage_id: storage_pool = storage self.assertEqual( storage_pool.state, "Up", "Primary storage not in up mode") # Verify in datera datera_primary_storage_name = "cloudstack-" + self.primary_storage_id for instance in self.datera_api.app_instances.list(): if instance['name'] == datera_primary_storage_name: datera_instance = instance self.assertEqual( datera_instance["admin_state"], "online", "app-instance not in online mode") # Verify in xenserver for key, value in self.xen_session.xenapi.SR.get_all_records().items(): if value['name_description'] == self.primary_storage_id: xen_sr = value self.assertEqual( set(["forget", "destroy"]).issubset(xen_sr["allowed_operations"]), False, "Xenserver SR in offline mode") StoragePool.delete(self.primary_storage, self.apiClient) self.cleanup = []
def test13_update_primary_storage_capacityIops_to_zero(self): updatedIops = 0 StoragePool.update(self.apiClient, id=self.primary_storage_id, capacityiops=updatedIops, tags=self.primary_tag) # Verify in cloudsatck storage_pools_response = list_storage_pools(self.apiClient, clusterid=self.cluster.id) for data in storage_pools_response: if data.id == self.primary_storage_id: storage_pool = data self.assertEqual(storage_pool.capacityiops, updatedIops, "Primary storage capacityiops not updated") # Verify in datera datera_primary_storage_name = "cloudstack-" + self.primary_storage_id for instance in self.datera_api.app_instances.list(): if instance['name'] == datera_primary_storage_name: datera_instance = instance app_instance_response_iops = ( datera_instance['storage_instances']['storage-1']['volumes'] ['volume-1']['performance_policy']['total_iops_max']) self.assertEqual(app_instance_response_iops, updatedIops, "app-instance capacityiops not updated") StoragePool.delete(self.primary_storage, self.apiClient) self.cleanup = []
def test06_primary_storage_cancel_maintenance_mode(self): StoragePool.enableMaintenance(self.apiClient, id=self.primary_storage_id) StoragePool.cancelMaintenance(self.apiClient, id=self.primary_storage_id) # Verify in cloudsatck storage_pools_response = list_storage_pools(self.apiClient, clusterid=self.cluster.id) for storage in storage_pools_response: if storage.id == self.primary_storage_id: storage_pool = storage self.assertEqual(storage_pool.state, "Up", "Primary storage not in up mode") # Verify in datera datera_primary_storage_name = "cloudstack-" + self.primary_storage_id for instance in self.datera_api.app_instances.list(): if instance['name'] == datera_primary_storage_name: datera_instance = instance self.assertEqual(datera_instance["admin_state"], "online", "app-instance not in online mode") # Verify in xenserver for key, value in self.xen_session.xenapi.SR.get_all_records().items(): if value['name_description'] == self.primary_storage_id: xen_sr = value self.assertEqual( set(["forget", "destroy"]).issubset(xen_sr["allowed_operations"]), False, "Xenserver SR in offline mode") StoragePool.delete(self.primary_storage, self.apiClient) self.cleanup = []
def test04_delete_primary_storage(self): #cleanup_resources(self.apiClient, self._primary_storage) StoragePool.delete(self.primary_storage, self.apiClient) self.cleanup = [] # Verify in Cloudstack storage_pools_response = list_storage_pools(self.apiClient, clusterid=self.cluster.id) for storage in storage_pools_response: self.assertNotEqual(storage.id, self.primary_storage_id, "Primary storage not deleted") # Verify in Datera flag = 0 datera_primary_storage_name = "cloudstack-" + self.primary_storage_id for item in self.datera_api.app_instances.list(): if item['name'] == datera_primary_storage_name: flag = 1 self.assertEqual(flag, 0, "app instance not deleted.") # Verify in xenserver for key, value in self.xen_session.xenapi.SR.get_all_records().items(): self.assertNotEqual(value['name_description'], self.primary_storage_id, "SR not deleted in xenserver") # Verify in sql database command = "select uuid from storage_pool" sql_result = self.dbConnection.execute(command) key = 0 for uuid in sql_result: if uuid[0] == self.primary_storage_id: key = 1 self.assertEqual(key, 0, "Primary storage not deleted in database")
def test_03_try_delete_primary_with_snapshots(self): virtual_machine = VirtualMachine.create( self.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=self.zone.id, templateid=self.template.id, serviceofferingid=self.serviceOfferings.id, hypervisor=self.hypervisor, rootdisksize=10) volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine.id, type="ROOT") volume = volume[0] name = volume.path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].templateName != self.template_name: raise Exception( "Storpool volume's template %s is not with the same template %s" % (spvolume[0].templateName, self.template_name)) except spapi.ApiError as err: raise Exception(err) snapshot = Snapshot.create( self.apiclient, volume_id=volume.id, ) id = self.helper.get_snapshot_template_id(self.apiclient, snapshot, self.storage_pool_id) if id is None: raise Exception("There isn't primary storgae id") virtual_machine.delete(self.apiclient, expunge=True) pool = list_storage_pools(self.apiclient, id=id) if pool[0].name == self.template_name: try: StoragePool.delete(self.sp_primary_storage, self.apiclient) except Exception as err: StoragePool.cancelMaintenance(self.apiclient, id=self.sp_primary_storage.id) self.debug("Storage pool could not be delete due to %s" % err) else: self.cleanup.append(snapshot) raise Exception("Snapshot is not on the same pool") Snapshot.delete(snapshot, self.apiclient)
def cleanUpCloudStack(cls): spapiRemote = spapi.Api.fromConfig() remote_cluster = cls.helper.get_remote_storpool_cluster() try: # Cleanup resources used cleanup_resources(cls.apiclient, cls.cleanup) StoragePool.delete(cls.sp_primary_storage, cls.apiclient) ServiceOffering.delete(cls.serviceOfferings, cls.apiclient) spapiRemote.volumeTemplateDelete(templateName=cls.template_name, clusterName=remote_cluster) spapiRemote.volumeTemplateDelete(templateName=cls.template_name, ) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return
def test07_update_primary_storage_capacityBytes(self): updatedDiskSize = self.testdata[TestData.newCapacityBytes] StoragePool.update(self.apiClient, id=self.primary_storage_id, capacitybytes=updatedDiskSize, tags=self.primary_tag) # Verify in cloudsatck storage_pools_response = list_storage_pools( self.apiClient, clusterid=self.cluster.id) for data in storage_pools_response: if data.id == self.primary_storage_id: storage_pool = data self.assertEqual( storage_pool.disksizetotal, updatedDiskSize, "Primary storage not updated") # Verify in datera datera_primary_storage_name = "cloudstack-" + self.primary_storage_id for instance in self.datera_api.app_instances.list(): if instance['name'] == datera_primary_storage_name: datera_instance = instance app_instance_response_disk_size = ( datera_instance['storage_instances'] ['storage-1']['volumes']['volume-1']['size'] * 1073741824) self.assertEqual( app_instance_response_disk_size, updatedDiskSize, "app-instance not updated") # Verify in xenserver #for key, value in self.xen_session.xenapi.SR.get_all_records().items(): # if value['name_description'] == self.primary_storage_id: # xen_sr = value #Uncomment after xen fix #print xen_sr #print xen_sr['physical_size'], updatedDiskSize #self.assertEqual( # int(xen_sr['physical_size']) + 12582912, updatedDiskSize, # "Xen server physical storage not updated") StoragePool.delete(self.primary_storage, self.apiClient) self.cleanup = []
def test07_update_primary_storage_capacityBytes(self): updatedDiskSize = self.testdata[TestData.newCapacityBytes] StoragePool.update(self.apiClient, id=self.primary_storage_id, capacitybytes=updatedDiskSize, tags=self.primary_tag) # Verify in cloudsatck storage_pools_response = list_storage_pools(self.apiClient, clusterid=self.cluster.id) for data in storage_pools_response: if data.id == self.primary_storage_id: storage_pool = data self.assertEqual(storage_pool.disksizetotal, updatedDiskSize, "Primary storage not updated") # Verify in datera datera_primary_storage_name = "cloudstack-" + self.primary_storage_id for instance in self.datera_api.app_instances.list(): if instance['name'] == datera_primary_storage_name: datera_instance = instance app_instance_response_disk_size = ( datera_instance['storage_instances']['storage-1']['volumes'] ['volume-1']['size'] * 1073741824) self.assertEqual(app_instance_response_disk_size, updatedDiskSize, "app-instance not updated") # Verify in xenserver #for key, value in self.xen_session.xenapi.SR.get_all_records().items(): # if value['name_description'] == self.primary_storage_id: # xen_sr = value #Uncomment after xen fix #print xen_sr #print xen_sr['physical_size'], updatedDiskSize #self.assertEqual( # int(xen_sr['physical_size']) + 12582912, updatedDiskSize, # "Xen server physical storage not updated") StoragePool.delete(self.primary_storage, self.apiClient) self.cleanup = []
def test04_delete_primary_storage(self): #cleanup_resources(self.apiClient, self._primary_storage) StoragePool.delete(self.primary_storage, self.apiClient) self.cleanup = [] # Verify in Cloudstack storage_pools_response = list_storage_pools( self.apiClient, clusterid=self.cluster.id) if len(storage_pools_response) > 0: for storage in storage_pools_response: self.assertNotEqual( storage.id, self.primary_storage_id, "Primary storage not deleted") # Verify in Datera flag = 0 datera_primary_storage_name = "cloudstack-" + self.primary_storage_id for item in self.datera_api.app_instances.list(): if item['name'] == datera_primary_storage_name: flag = 1 self.assertEqual(flag, 0, "app instance not deleted.") # Verify in xenserver for key, value in self.xen_session.xenapi.SR.get_all_records().items(): self.assertNotEqual( value['name_description'], self.primary_storage_id, "SR not deleted in xenserver") # Verify in sql database command = "select uuid from storage_pool" sql_result = self.dbConnection.execute(command) key = 0 for uuid in sql_result: if uuid[0] == self.primary_storage_id: key = 1 self.assertEqual( key, 0, "Primary storage not deleted in database")
storages = StoragePool.list(apiClient) if storages: for storage in storages: print "storage name={}, id={}".format(storage.name, storage.id) if storage.state == 'Maintenance': print "delete StoragePool" cmd = deleteStoragePool.deleteStoragePoolCmd() cmd.id = storage.id cmd.forced = 'True' apiClient.deleteStoragePool(cmd) else: print "Delete StoragePool" s = StoragePool(tmp_dict) s.id = storage.id s.forced = 'True' s.delete(apiClient) # hosts = Host.list(apiClient) # if hosts: # for host in hosts: # print "host name={}, id={}".format(host.name, host.id) # if host.type == 'Routing': # h = Host(tmp_dict) # if host.resourcestate != 'PrepareForMaintenance' \ # and host.resourcestate != 'Maintenance': # print "Maintenance for host" # h.enableMaintenance(apiClient, host.id) hosts = Host.list(apiClient) if hosts: for host in hosts:
def test_04_try_delete_primary_with_template(self): virtual_machine = VirtualMachine.create( self.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=self.zone.id, templateid=self.template.id, serviceofferingid=self.serviceOfferings.id, hypervisor=self.hypervisor, rootdisksize=10) volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine.id, type="ROOT", listall=True) volume = volume[0] name = volume.path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].templateName != self.template_name: raise Exception( "Storpool volume's template %s is not with the same template %s" % (spvolume[0].templateName, self.template_name)) except spapi.ApiError as err: raise Exception(err) backup_config = list_configurations(self.apiclient, name="sp.bypass.secondary.storage") if (backup_config[0].value == "false"): backup_config = Configurations.update( self.apiclient, name="sp.bypass.secondary.storage", value="true") snapshot = Snapshot.create( self.apiclient, volume_id=volume.id, ) self.debug("###################### %s" % snapshot) id = self.helper.get_snapshot_template_id(self.apiclient, snapshot, self.storage_pool_id) if id is None: raise Exception("There isn't primary storgae id") virtual_machine.delete(self.apiclient, expunge=True) pool = list_storage_pools(self.apiclient, id=id) services = { "displaytext": "Template-1", "name": "Template-1-name", "ostypeid": self.template.ostypeid, "ispublic": "true" } template = Template.create_from_snapshot(self.apiclient, snapshot=snapshot, services=services) Snapshot.delete(snapshot, self.apiclient) try: StoragePool.delete(self.sp_primary_storage, self.apiclient) except Exception as err: StoragePool.cancelMaintenance(self.apiclient, id=self.sp_primary_storage.id) self.debug("Storge pool could not be delete due to %s" % err) Template.delete(template, self.apiclient)