def test_deployVmWithCustomDisk(self): """Test custom disk sizes beyond range """ # Steps for validation # 1. listConfigurations - custom.diskoffering.size.min # and custom.diskoffering.size.max # 2. deployVm with custom disk offering size < min # 3. deployVm with custom disk offering min< size < max # 4. deployVm with custom disk offering size > max # Validate the following # 2. and 4. of deploy VM should fail. # Only case 3. should succeed. # cleanup all created data disks from the account config = Configurations.list( self.apiclient, name="custom.diskoffering.size.min" ) self.assertEqual( isinstance(config, list), True, "custom.diskoffering.size.min should be present in global config" ) # minimum size of custom disk (in GBs) min_size = int(config[0].value) self.debug("custom.diskoffering.size.min: %s" % min_size) config = Configurations.list( self.apiclient, name="custom.diskoffering.size.max" ) self.assertEqual( isinstance(config, list), True, "custom.diskoffering.size.min should be present in global config" ) # maximum size of custom disk (in GBs) max_size = int(config[0].value) self.debug("custom.diskoffering.size.max: %s" % max_size) self.debug("Creating a volume with size less than min cust disk size") self.services["custom_volume"]["customdisksize"] = (min_size - 1) self.services["custom_volume"]["zoneid"] = self.zone.id with self.assertRaises(Exception): Volume.create_custom_disk( self.apiclient, self.services["custom_volume"], account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id ) self.debug("Create volume failed!") self.debug("Creating a volume with size more than max cust disk size") self.services["custom_volume"]["customdisksize"] = (max_size + 1) with self.assertRaises(Exception): Volume.create_custom_disk( self.apiclient, self.services["custom_volume"], account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id ) self.debug("Create volume failed!") self.debug("Creating a volume with size more than min cust disk " + "but less than max cust disk size" ) self.services["custom_volume"]["customdisksize"] = (min_size + 1) try: Volume.create_custom_disk( self.apiclient, self.services["custom_volume"], account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id ) self.debug("Create volume of cust disk size succeeded") except Exception as e: self.fail("Create volume failed with exception: %s" % e) return
def test_01_create_volume(self): """Test Volume creation for all Disk Offerings (incl. custom) """ # Validate the following # 1. Create volumes from the different sizes # 2. Verify the size of volume with actual size allocated self.volumes = [] for k, v in self.services["volume_offerings"].items(): volume = Volume.create( self.apiClient, v, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id ) self.debug("Created a volume with ID: %s" % volume.id) self.volumes.append(volume) if self.virtual_machine.hypervisor == "KVM": sparse_volume = Volume.create( self.apiClient, self.services, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.sparse_disk_offering.id ) self.debug("Created a sparse volume: %s" % sparse_volume.id) self.volumes.append(sparse_volume) volume = Volume.create_custom_disk( self.apiClient, self.services, account=self.account.name, domainid=self.account.domainid, ) self.debug("Created a volume with custom offering: %s" % volume.id) self.volumes.append(volume) #Attach a volume with different disk offerings #and check the memory allocated to each of them for volume in self.volumes: list_volume_response = Volume.list( self.apiClient, id=volume.id) self.assertEqual( isinstance(list_volume_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes" ) self.debug( "Attaching volume (ID: %s) to VM (ID: %s)" % ( volume.id, self.virtual_machine.id )) self.virtual_machine.attach_volume( self.apiClient, volume ) try: ssh = self.virtual_machine.get_ssh_client() self.debug("Rebooting VM %s" % self.virtual_machine.id) ssh.execute("reboot") except Exception as e: self.fail("SSH access failed for VM %s - %s" % (self.virtual_machine.ipaddress, e)) # Poll listVM to ensure VM is started properly timeout = self.services["timeout"] while True: time.sleep(self.services["sleep"]) # Ensure that VM is in running state list_vm_response = VirtualMachine.list( self.apiClient, id=self.virtual_machine.id ) if isinstance(list_vm_response, list): vm = list_vm_response[0] if vm.state == 'Running': self.debug("VM state: %s" % vm.state) break if timeout == 0: raise Exception( "Failed to start VM (ID: %s) " % vm.id) timeout = timeout - 1 vol_sz = str(list_volume_response[0].size) ssh = self.virtual_machine.get_ssh_client( reconnect=True ) # Get the updated volume information list_volume_response = Volume.list( self.apiClient, id=volume.id) if list_volume_response[0].hypervisor.lower() == XEN_SERVER.lower(): volume_name = "/dev/xvd" + chr(ord('a') + int(list_volume_response[0].deviceid)) self.debug(" Using XenServer volume_name: %s" % (volume_name)) ret = checkVolumeSize(ssh_handle=ssh,volume_name=volume_name,size_to_verify=vol_sz) else: ret = checkVolumeSize(ssh_handle=ssh,size_to_verify=vol_sz) self.debug(" Volume Size Expected %s Actual :%s" %(vol_sz,ret[1])) self.virtual_machine.detach_volume(self.apiClient, volume) self.assertEqual(ret[0],SUCCESS,"Check if promised disk size actually available") time.sleep(self.services["sleep"])
def test_01_create_volume(self): """Test Volume creation for all Disk Offerings (incl. custom) """ # Validate the following # 1. Create volumes from the different sizes # 2. Verify the size of volume with actual size allocated self.volumes = [] for k, v in self.services["volume_offerings"].items(): volume = Volume.create(self.apiClient, v, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id) self.debug("Created a volume with ID: %s" % volume.id) self.volumes.append(volume) if self.virtual_machine.hypervisor == "KVM": sparse_volume = Volume.create( self.apiClient, self.services, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.sparse_disk_offering.id) self.debug("Created a sparse volume: %s" % sparse_volume.id) self.volumes.append(sparse_volume) volume = Volume.create_custom_disk( self.apiClient, self.services, account=self.account.name, domainid=self.account.domainid, ) self.debug("Created a volume with custom offering: %s" % volume.id) self.volumes.append(volume) # Attach a volume with different disk offerings # and check the memory allocated to each of them for volume in self.volumes: list_volume_response = Volume.list(self.apiClient, id=volume.id) self.assertEqual(isinstance(list_volume_response, list), True, "Check list response returns a valid list") self.assertNotEqual(list_volume_response, None, "Check if volume exists in ListVolumes") self.debug("Attaching volume (ID: %s) to VM (ID: %s)" % (volume.id, self.virtual_machine.id)) self.virtual_machine.attach_volume(self.apiClient, volume) try: ssh = self.virtual_machine.get_ssh_client() self.debug("Rebooting VM %s" % self.virtual_machine.id) ssh.execute("reboot") except Exception as e: self.fail("SSH access failed for VM %s - %s" % (self.virtual_machine.ipaddress, e)) # Poll listVM to ensure VM is started properly timeout = self.services["timeout"] while True: time.sleep(self.services["sleep"]) # Ensure that VM is in running state list_vm_response = VirtualMachine.list( self.apiClient, id=self.virtual_machine.id) if isinstance(list_vm_response, list): vm = list_vm_response[0] if vm.state == 'Running': self.debug("VM state: %s" % vm.state) break if timeout == 0: raise Exception("Failed to start VM (ID: %s) " % vm.id) timeout = timeout - 1 vol_sz = str(list_volume_response[0].size) ssh = self.virtual_machine.get_ssh_client(reconnect=True) # Get the updated volume information list_volume_response = Volume.list(self.apiClient, id=volume.id) if list_volume_response[0].hypervisor.lower() == XEN_SERVER.lower( ): volume_name = "/dev/xvd" + chr( ord('a') + int(list_volume_response[0].deviceid)) self.debug(" Using XenServer volume_name: %s" % (volume_name)) ret = checkVolumeSize(ssh_handle=ssh, volume_name=volume_name, size_to_verify=vol_sz) elif list_volume_response[0].hypervisor.lower() == "kvm": volume_name = "/dev/vd" + chr( ord('a') + int(list_volume_response[0].deviceid)) self.debug(" Using KVM volume_name: %s" % (volume_name)) ret = checkVolumeSize(ssh_handle=ssh, volume_name=volume_name, size_to_verify=vol_sz) else: ret = checkVolumeSize(ssh_handle=ssh, size_to_verify=vol_sz) self.debug(" Volume Size Expected %s Actual :%s" % (vol_sz, ret[1])) self.virtual_machine.detach_volume(self.apiClient, volume) self.assertEqual(ret[0], SUCCESS, "Check if promised disk size actually available") time.sleep(self.services["sleep"])
def test_01_attach_datadisk_to_vm_on_zwps(self): """ Attach Data Disk on CWPS To VM 1. Check if zwps storage pool exists. 2. Adding tag to zone wide primary storage 3. Launch a VM 4. Attach data disk to vm. 5. Verify disk is attached and in correct storage pool. """ # Step 1 if len(list(self.pools)) < 1: self.skipTest("There must be at least one zone wide \ storage pools available in the setup") # Step 2 # Adding tags to Storage Pools StoragePool.update( self.apiclient, id=self.pools[0].id, tags=[CLUSTERTAG1]) # Launch VM self.vm = VirtualMachine.create( self.apiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_zone1.id, zoneid=self.zone.id ) self.testdata["volume"]["zoneid"] = self.zone.id self.testdata["volume"]["customdisksize"] = 1 self.data_volume_created = Volume.create_custom_disk( self.userapiclient, self.testdata["volume"], account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id, ) self.cleanup.append(self.data_volume_created) # Step 4 self.vm.attach_volume( self.userapiclient, self.data_volume_created ) data_volumes_list = Volume.list( self.userapiclient, virtualmachineid=self.vm.id, type="DATA", listall=True ) self.debug("list volumes using vm id %s" % dir(data_volumes_list[0])) data_volumes_list = Volume.list(self.apiclient, id=self.data_volume_created.id, listall=True) data_volume = data_volumes_list[0] status = validateList(data_volume) # Step 5 self.assertEqual( status[0], PASS, "Check: volume list is valid") self.assertEqual( data_volume.state, "Ready", "Check: Data volume is attached to VM") if data_volume.storage != self.pools[0].name: self.fail("check if volume is created in correct storage pool") return
def test_01_attach_datadisk_to_vm_on_zwps(self): """ Attach Data Disk on CWPS To VM 1. Check if zwps storage pool exists. 2. Adding tag to zone wide primary storage 3. Launch a VM 4. Attach data disk to vm. 5. Verify disk is attached and in correct storage pool. """ # Step 1 if len(list(self.pools)) < 1: self.skipTest("There must be at least one zone wide \ storage pools available in the setup") # Step 2 # Adding tags to Storage Pools StoragePool.update(self.apiclient, id=self.pools[0].id, tags=[CLUSTERTAG1]) # Launch VM self.vm = VirtualMachine.create( self.apiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_zone1.id, zoneid=self.zone.id) self.testdata["volume"]["zoneid"] = self.zone.id self.testdata["volume"]["customdisksize"] = 1 self.data_volume_created = Volume.create_custom_disk( self.userapiclient, self.testdata["volume"], account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id, ) self.cleanup.append(self.data_volume_created) # Step 4 self.vm.attach_volume(self.userapiclient, self.data_volume_created) data_volumes_list = Volume.list(self.userapiclient, virtualmachineid=self.vm.id, type="DATA", listall=True) self.debug("list volumes using vm id %s" % dir(data_volumes_list[0])) data_volumes_list = Volume.list(self.apiclient, id=self.data_volume_created.id, listall=True) data_volume = data_volumes_list[0] status = validateList(data_volume) # Step 5 self.assertEqual(status[0], PASS, "Check: volume list is valid") self.assertEqual(data_volume.state, "Ready", "Check: Data volume is attached to VM") if data_volume.storage != self.pools[0].name: self.fail("check if volume is created in correct storage pool") return