def test_create_template_snapshot(self, value): """Test create snapshot and templates from volume # Validate the following # 1. Deploy VM with custoom disk offering and check the # primary storage resource count # 2. Stop the VM and create Snapshot from VM's volume # 3. Create volume againt from this snapshto and attach to VM # 4. Verify that primary storage count increases by the volume size # 5. Detach and delete volume, verify primary storage count decreaes by volume size""" if self.hypervisor.lower() in ['hyperv']: self.skipTest("Snapshots feature is not supported on Hyper-V") response = self.setupAccount(value) self.debug(response[0]) self.debug(response[1]) self.assertEqual(response[0], PASS, response[1]) apiclient = self.apiclient if value == CHILD_DOMAIN_ADMIN: apiclient = self.testClient.getUserApiClient( UserName=self.account.name, DomainName=self.account.domain) self.assertNotEqual( apiclient, FAIL, "Failure while getting api\ client of account: %s" % self.account.name) try: self.virtualMachine.stop(apiclient) except Exception as e: self.fail("Failed to stop instance: %s" % e) expectedCount = self.initialResourceCount response = matchResourceCount(self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, accountid=self.account.id) self.assertEqual(response[0], PASS, response[1]) self.debug("Creating snapshot from ROOT volume: %s" % self.virtualMachine.name) snapshot = None response = createSnapshotFromVirtualMachineVolume( apiclient, self.account, self.virtualMachine.id) self.assertEqual(response[0], PASS, response[1]) snapshot = response[1] response = matchResourceCount(self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, accountid=self.account.id) self.assertEqual(response[0], PASS, response[1]) try: self.services["volume"]["size"] = self.services["disk_offering"][ "disksize"] volume = Volume.create_from_snapshot( apiclient, snapshot_id=snapshot.id, services=self.services["volume"], account=self.account.name, domainid=self.account.domainid) self.debug("Attaching the volume to vm: %s" % self.virtualMachine.name) self.virtualMachine.attach_volume(apiclient, volume) except Exception as e: self.fail("Failure in volume operation: %s" % e) expectedCount += int(self.services["volume"]["size"]) response = matchResourceCount(self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, accountid=self.account.id) self.assertEqual(response[0], PASS, response[1]) try: self.virtualMachine.detach_volume(apiclient, volume) except Exception as e: self.fail("Failure in detach volume operation: %s" % e) try: self.debug("deleting the volume: %s" % volume.name) volume.delete(apiclient) except Exception as e: self.fail("Failure while deleting volume: %s" % e) expectedCount -= int(self.services["volume"]["size"]) response = matchResourceCount(self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, accountid=self.account.id) self.assertEqual(response[0], PASS, response[1]) return
def test_06_create_volume_from_non_native_snapshot(self): old_supports_resign = self.supports_resign self._set_supports_resign(False) primary_storage_db_id = self._get_cs_storage_pool_db_id( self.primary_storage) virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering.id, templateid=self.template.id, domainid=self.domain.id, startvm=True) self.assertEqual(virtual_machine.state.lower(), "running", TestSnapshots._vm_not_in_running_state_err_msg) list_volumes_response = list_volumes( self.apiClient, virtualmachineid=virtual_machine.id, listall=True) self._check_list( list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) vm_1_root_volume = list_volumes_response[0] dt_volume_1 = self._get_dt_volume_for_cs_volume(vm_1_root_volume) self.assertNotEqual(dt_volume_1, None, TestSnapshots._should_be_a_valid_volume_err) vol_snap_a = self._create_and_test_non_native_snapshot( vm_1_root_volume.id, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) services = { "diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True } volume_created_from_snapshot = Volume.create_from_snapshot( self.apiClient, vol_snap_a.id, services, account=self.account.name, domainid=self.domain.id) dt_snapshot_volume = self._get_dt_volume_for_cs_volume( volume_created_from_snapshot) self.assertNotEqual(dt_snapshot_volume, None, TestSnapshots._should_be_a_valid_volume_err) volume_created_from_snapshot = virtual_machine.attach_volume( self.apiClient, volume_created_from_snapshot) self._delete_and_test_non_native_snapshot(vol_snap_a) virtual_machine.delete(self.apiClient, True) list_volumes_response = list_volumes(self.apiClient, listall=True) self._check_list( list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) data_volume = list_volumes_response[0] data_volume_2 = Volume(data_volume.__dict__) data_volume_2.delete(self.apiClient) self._get_dt_volume_for_cs_volume(data_volume, should_exist=False) self._set_supports_resign(old_supports_resign)
def test_02_multiple_domains_primary_storage_limits(self): """Test primary storage counts in multiple child domains # Steps 1. Create a parent domain and two sub-domains in it (also admin accounts of each domain) Repeat following steps for both the child domains 2. Deploy VM in child domain 3. Check if the resource count for domain is updated correctly 4. Create a volume and attach it to the VM 5. Check if the primary storage resource count is updated correctly """ # Setting up account and domain hierarchy result = self.setupAccounts() self.assertEqual( result[0], PASS, "Failure while setting up accounts and domains: %s" % result[1]) users = result[2] templatesize = (self.template.size / (1024**3)) for domain, admin in list(users.items()): self.account = admin self.domain = domain apiclient = self.testClient.getUserApiClient( UserName=self.account.name, DomainName=self.account.domain) self.assertNotEqual( apiclient, FAILED, "Failed to create api client for account: %s" % self.account.name) try: vm = VirtualMachine.create( apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id, serviceofferingid=self.service_offering.id) self.cleanup.append(vm) expectedCount = templatesize + self.disk_offering.disksize result = isDomainResourceCountEqualToExpectedCount( self.apiclient, self.domain.id, expectedCount, RESOURCE_PRIMARY_STORAGE) self.assertFalse(result[0], result[1]) self.assertTrue(result[2], "Resource count does not match") # Creating service offering with 10 GB volume self.services["disk_offering"]["disksize"] = 10 disk_offering_10_GB = DiskOffering.create( self.apiclient, services=self.services["disk_offering"]) self.cleanup.append(disk_offering_10_GB) volume = Volume.create(apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=disk_offering_10_GB.id) self.cleanup.append( volume) # we get an exception in the next few lines volumeSize = (volume.size / (1024**3)) expectedCount += volumeSize vm.attach_volume(apiclient, volume=volume) self.cleanup.remove( volume) # we can't cleanup an attached volume result = isDomainResourceCountEqualToExpectedCount( self.apiclient, self.domain.id, expectedCount, RESOURCE_PRIMARY_STORAGE) self.assertFalse(result[0], result[1]) self.assertTrue(result[2], "Resource count does not match") except Exception as e: self.fail("Failure: %s" % e) return
def test_02_increase_volume_size_above_account_limit(self): """Test increasing volume size above the account limit # Validate the following # 1. Create a domain and its admin account # 2. Set account primary storage limit more than (5 GB volume + template size of VM) # and less than (20 GB volume+ template size of VM) # 3. Deploy a VM without any disk offering (only root disk) # 4. Create a volume of 5 GB in the account and attach it to the VM # 5. Try to (resize) the volume to 20 GB # 6. Resize opearation should fail""" # Setting up account and domain hierarchy result = self.setupAccounts() self.assertEqual(result[0], PASS, result[1]) templateSize = (self.template.size / (1024**3)) accountLimit = ((templateSize + self.disk_offering_20_GB.disksize) - 1) response = self.updateResourceLimits(accountLimit=accountLimit) self.assertEqual(response[0], PASS, response[1]) apiclient = self.testClient.getUserApiClient( UserName=self.parentd_admin.name, DomainName=self.parentd_admin.domain) self.assertNotEqual( apiclient, FAILED, "Failed to get api client\ of account: %s" % self.parentd_admin.name) try: virtualMachine = VirtualMachine.create( apiclient, self.services["virtual_machine"], accountid=self.parentd_admin.name, domainid=self.parent_domain.id, serviceofferingid=self.service_offering.id) volume = Volume.create(apiclient, self.services["volume"], zoneid=self.zone.id, account=self.parentd_admin.name, domainid=self.parent_domain.id, diskofferingid=self.disk_offering_5_GB.id) virtualMachine.attach_volume(apiclient, volume=volume) expectedCount = (templateSize + self.disk_offering_5_GB.disksize) response = matchResourceCount(self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, accountid=self.parentd_admin.id) if response[0] == FAIL: raise Exception(response[1]) except Exception as e: self.fail("Failed with exception: %s" % e) if self.hypervisor == str(XEN_SERVER).lower(): virtualMachine.stop(self.apiclient) with self.assertRaises(Exception): volume.resize(apiclient, diskofferingid=self.disk_offering_20_GB.id) return
def setUpClass(cls): testClient = super(TestCreateTemplate, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.services = testClient.getParsedTestDataConfig() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ['lxc']: # Template creation from root volume is not supported in LXC cls.unsupportedHypervisor = True return # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype try: cls.disk_offering = DiskOffering.create( cls.apiclient, cls.services["disk_offering"]) cls._cleanup.append(cls.disk_offering) template = get_template(cls.apiclient, cls.zone.id, cls.services["ostype"]) if template == FAILED: assert False, "get_template() failed to return template with description %s" % cls.services[ "ostype"] cls.services["template"]["ostypeid"] = template.ostypeid cls.services["template_2"]["ostypeid"] = template.ostypeid cls.services["ostypeid"] = template.ostypeid cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["volume"]["diskoffering"] = cls.disk_offering.id cls.services["volume"]["zoneid"] = cls.zone.id cls.services["sourcezoneid"] = cls.zone.id cls.account = Account.create(cls.apiclient, cls.services["account"], domainid=cls.domain.id) cls._cleanup.append(cls.account) cls.service_offering = ServiceOffering.create( cls.apiclient, cls.services["service_offerings"]) cls._cleanup.append(cls.service_offering) #create virtual machine cls.virtual_machine = VirtualMachine.create( cls.apiclient, cls.services["virtual_machine"], templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, mode=cls.services["mode"]) #Stop virtual machine cls.virtual_machine.stop(cls.apiclient) list_volume = Volume.list(cls.apiclient, virtualmachineid=cls.virtual_machine.id, type='ROOT', listall=True) cls.volume = list_volume[0] except Exception as e: cls.tearDownClass() raise unittest.SkipTest("Exception in setUpClass: %s" % e) return
def test_01_volume_from_snapshot(self): """Test Creating snapshot from volume having spaces in name(KVM) """ # Validate the following # 1. Create a virtual machine and data volume # 2. Attach data volume to VM # 3. Login to machine; create temp/test directories on data volume # and write some random data # 4. Snapshot the Volume # 5. Create another Volume from snapshot # 6. Mount/Attach volume to another virtual machine # 7. Compare data, data should match random_data_0 = random_gen(size=100) random_data_1 = random_gen(size=100) self.debug("random_data_0 : %s" % random_data_0) self.debug("random_data_1: %s" % random_data_1) try: ssh_client = self.virtual_machine.get_ssh_client() except Exception as e: self.fail("SSH failed for VM: %s" % self.virtual_machine.ipaddress) volume = Volume.create(self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id) self.debug("Created volume with ID: %s" % volume.id) self.virtual_machine.attach_volume(self.apiclient, volume) self.debug("Attach volume: %s to VM: %s" % (volume.id, self.virtual_machine.id)) self.debug("Formatting volume: %s to ext3" % volume.id) # Format partition using ext3 # Note that this is the second data disk partition of virtual machine # as it was already containing data disk before attaching the new # volume, Hence datadiskdevice_2 format_volume_to_ext3( ssh_client, self.services["volume"][self.hypervisor]["datadiskdevice_2"]) cmds = [ "fdisk -l", "mkdir -p %s" % self.services["paths"]["mount_dir"], "mount -t ext3 %s1 %s" % (self.services["volume"][self.hypervisor]["datadiskdevice_2"], self.services["paths"]["mount_dir"]), "mkdir -p %s/%s/{%s,%s} " % (self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["sub_lvl_dir2"]), "echo %s > %s/%s/%s/%s" % (random_data_0, self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["random_data"]), "echo %s > %s/%s/%s/%s" % (random_data_1, self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir2"], self.services["paths"]["random_data"]), "cat %s/%s/%s/%s" % (self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["random_data"]) ] for c in cmds: self.debug("Command: %s" % c) result = ssh_client.execute(c) self.debug(result) # Unmount the Sec Storage cmds = [ "umount %s" % (self.services["paths"]["mount_dir"]), ] for c in cmds: self.debug("Command: %s" % c) ssh_client.execute(c) list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', id=volume.id) self.assertEqual(isinstance(list_volume_response, list), True, "Check list volume response for valid data") volume_response = list_volume_response[0] # Create snapshot from attached volume snapshot = Snapshot.create(self.apiclient, volume_response.id, account=self.account.name, domainid=self.account.domainid) self.debug("Created snapshot: %s" % snapshot.id) # Create volume from snapshot volume_from_snapshot = Volume.create_from_snapshot( self.apiclient, snapshot.id, self.services["volume"], account=self.account.name, domainid=self.account.domainid) # Detach the volume from virtual machine self.virtual_machine.detach_volume(self.apiclient, volume) self.debug("Detached volume: %s from VM: %s" % (volume.id, self.virtual_machine.id)) self.debug("Created Volume: %s from Snapshot: %s" % (volume_from_snapshot.id, snapshot.id)) volumes = Volume.list(self.apiclient, id=volume_from_snapshot.id) self.assertEqual(isinstance(volumes, list), True, "Check list response returns a valid list") self.assertNotEqual(len(volumes), None, "Check Volume list Length") self.assertEqual(volumes[0].id, volume_from_snapshot.id, "Check Volume in the List Volumes") # Attaching volume to new VM new_virtual_machine = VirtualMachine.create( self.apiclient, self.services["server_without_disk"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, mode=self.services["mode"]) self.debug("Deployed new VM for account: %s" % self.account.name) # self.cleanup.append(new_virtual_machine) self.debug("Attaching volume: %s to VM: %s" % (volume_from_snapshot.id, new_virtual_machine.id)) new_virtual_machine.attach_volume(self.apiclient, volume_from_snapshot) # Rebooting is required so that newly attached disks are detected self.debug("Rebooting : %s" % new_virtual_machine.id) new_virtual_machine.reboot(self.apiclient) try: # Login to VM to verify test directories and files ssh = new_virtual_machine.get_ssh_client() # Mount datadiskdevice_1 because this is the first data disk of the # new virtual machine cmds = [ "fdisk -l", "mkdir -p %s" % self.services["paths"]["mount_dir"], "mount -t ext3 %s1 %s" % (self.services["volume"][self.hypervisor]["datadiskdevice_1"], self.services["paths"]["mount_dir"]), ] for c in cmds: self.debug("Command: %s" % c) result = ssh.execute(c) self.debug(result) returned_data_0 = ssh.execute( "cat %s/%s/%s/%s" % (self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["random_data"])) returned_data_1 = ssh.execute( "cat %s/%s/%s/%s" % (self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir2"], self.services["paths"]["random_data"])) except Exception as e: self.fail("SSH access failed for VM: %s, Exception: %s" % (new_virtual_machine.ipaddress, e)) self.debug("returned_data_0: %s" % returned_data_0[0]) self.debug("returned_data_1: %s" % returned_data_1[0]) # Verify returned data self.assertEqual( random_data_0, returned_data_0[0], "Verify newly attached volume contents with existing one") self.assertEqual( random_data_1, returned_data_1[0], "Verify newly attached volume contents with existing one") # Unmount the Sec Storage cmds = [ "umount %s" % (self.services["paths"]["mount_dir"]), ] for c in cmds: self.debug("Command: %s" % c) ssh_client.execute(c) return
def test_04_template_from_snapshot(self): """Create Template from snapshot """ # Validate the following # 2. Snapshot the Root disk # 3. Create Template from snapshot # 4. Deploy Virtual machine using this template # 5. VM should be in running state if self.hypervisor.lower() in ['hyperv', 'lxc']: self.skipTest("Snapshots feature is not supported on %s" % self.hypervisor.lower()) userapiclient = self.testClient.getUserApiClient( UserName=self.account.name, DomainName=self.account.domain) volumes = Volume.list(userapiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True) volume = volumes[0] self.debug("Creating a snapshot from volume: %s" % volume.id) # Create a snapshot of volume snapshot = Snapshot.create(userapiclient, volume.id, account=self.account.name, domainid=self.account.domainid) self.debug("Creating a template from snapshot: %s" % snapshot.id) # Generate template from the snapshot template = Template.create_from_snapshot(userapiclient, snapshot, self.services["template"]) self.cleanup.append(template) # Verify created template templates = Template.list( userapiclient, templatefilter=self.services["template"]["templatefilter"], id=template.id) self.assertNotEqual(templates, None, "Check if result exists in list item call") self.assertEqual(templates[0].id, template.id, "Check new template id in list resources call") self.debug("Deploying a VM from template: %s" % template.id) # Deploy new virtual machine using template virtual_machine = VirtualMachine.create( userapiclient, self.services["virtual_machine"], templateid=template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, ) self.cleanup.append(virtual_machine) vm_response = VirtualMachine.list(userapiclient, id=virtual_machine.id, account=self.account.name, domainid=self.account.domainid) self.assertEqual(isinstance(vm_response, list), True, "Check for list VM response return valid list") # Verify VM response to check whether VM deployment was successful self.assertNotEqual(len(vm_response), 0, "Check VMs available in List VMs response") vm = vm_response[0] self.assertEqual(vm.state, 'Running', "Check the state of VM created from Template") return
def test_05_resize_detached_vol_uuid(self): #volume is created with UUID, but after DB update, has to be with it's globalId volume = Volume.list(self.apiclient, id=self.volume6.id) self.helper.resizing_volume(volume[0], globalid=True)
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalIdVolumes, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) with open(cls.ARGS.cfg) as json_text: cfg.logger.info(cls.ARGS.cfg) cfg.logger.info(json_text) conf = json.load(json_text) cfg.logger.info(conf) zone = conf['mgtSvr'][0].get('zone') cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, zone_name=zone) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) cls.host = list_hosts(cls.apiclient, zoneid=cls.zone.id) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id cls.sp_template_1 = "-".join(["test-ssd-b", random_gen()]) cfg.logger.info( pprint.pformat("############################ %s" % cls.zone)) storpool_primary_storage = { "name": cls.sp_template_1, "zoneid": cls.zone.id, "url": "SP_API_HTTP=10.2.87.30:81;SP_AUTH_TOKEN=1234567890;SP_TEMPLATE=%s" % cls.sp_template_1, "scope": "zone", "capacitybytes": 564325555333, "capacityiops": 155466, "hypervisor": "kvm", "provider": "StorPool", "tags": cls.sp_template_1 } cls.storpool_primary_storage = storpool_primary_storage host, port, auth = cls.getCfgFromUrl( url=storpool_primary_storage["url"]) cls.spapi = spapi.Api(host=host, port=port, auth=auth) storage_pool = list_storage_pools( cls.apiclient, name=storpool_primary_storage["name"]) if storage_pool is None: newTemplate = sptypes.VolumeTemplateCreateDesc( name=storpool_primary_storage["name"], placeAll="ssd", placeTail="ssd", placeHead="ssd", replication=1) template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.primary_storage = storage_pool storpool_service_offerings_ssd = { "name": cls.sp_template_1, "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": cls.sp_template_1 } service_offerings_ssd = list_service_offering( cls.apiclient, name=storpool_service_offerings_ssd["name"]) if service_offerings_ssd is None: service_offerings_ssd = ServiceOffering.create( cls.apiclient, storpool_service_offerings_ssd) else: service_offerings_ssd = service_offerings_ssd[0] cls.service_offering = service_offerings_ssd cls._cleanup.append(cls.service_offering) cfg.logger.info(pprint.pformat(cls.service_offering)) cls.sp_template_2 = "-".join(["test-ssd2-b", random_gen()]) storpool_primary_storage2 = { "name": cls.sp_template_2, "zoneid": cls.zone.id, "url": "SP_API_HTTP=10.2.87.30:81;SP_AUTH_TOKEN=1234567890;SP_TEMPLATE=%s" % cls.sp_template_2, "scope": "zone", "capacitybytes": 564325555333, "capacityiops": 1554, "hypervisor": "kvm", "provider": "StorPool", "tags": cls.sp_template_2 } cls.storpool_primary_storage2 = storpool_primary_storage2 storage_pool = list_storage_pools( cls.apiclient, name=storpool_primary_storage2["name"]) if storage_pool is None: newTemplate = sptypes.VolumeTemplateCreateDesc( name=storpool_primary_storage2["name"], placeAll="ssd", placeTail="ssd", placeHead="ssd", replication=1) template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage2) else: storage_pool = storage_pool[0] cls.primary_storage2 = storage_pool storpool_service_offerings_ssd2 = { "name": cls.sp_template_2, "displaytext": "SP_CO_2", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "tags": cls.sp_template_2 } service_offerings_ssd2 = list_service_offering( cls.apiclient, name=storpool_service_offerings_ssd2["name"]) if service_offerings_ssd2 is None: service_offerings_ssd2 = ServiceOffering.create( cls.apiclient, storpool_service_offerings_ssd2) else: service_offerings_ssd2 = service_offerings_ssd2[0] cls.service_offering2 = service_offerings_ssd2 cls._cleanup.append(cls.service_offering2) os.killpg(cls.mvn_proc_grp, signal.SIGTERM) time.sleep(30) cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) disk_offering = list_disk_offering(cls.apiclient, name="Small") disk_offering_20 = list_disk_offering(cls.apiclient, name="Medium") disk_offering_100 = list_disk_offering(cls.apiclient, name="Large") assert disk_offering is not None assert disk_offering_20 is not None assert disk_offering_100 is not None cls.disk_offering = disk_offering[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine4) cls.virtual_machine5 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine5) cls.virtual_machine6 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine6) cls.volume1 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume1) cls.volume2 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_2], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume2) cls.volume3 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_3], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume3) cls.volume4 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_4], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume4) cls.volume5 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_5], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume5) cls.volume6 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_6], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume6) cls.volume7 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.volume8 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.virtual_machine.stop(cls.apiclient, forced=True) cls.volume_on_sp_1 = cls.virtual_machine.attach_volume( cls.apiclient, cls.volume1) vol = list_volumes(cls.apiclient, id=cls.volume3.id) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume3) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume3) vol = list_volumes(cls.apiclient, id=cls.volume3.id) cls.volume_on_sp_3 = vol[0] cls.virtual_machine.attach_volume(cls.apiclient, cls.volume2) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume2) cls.virtual_machine3.attach_volume(cls.apiclient, cls.volume4) cls.virtual_machine3.detach_volume(cls.apiclient, cls.volume4) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume8) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume8) cls.virtual_machine.start(cls.apiclient) list_root = list_volumes(cls.apiclient, virtualmachineid=cls.virtual_machine5.id, type="ROOT") cls.snapshot_uuid1 = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid1) cls.snapshot_uuid2 = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid2) #Snapshot on secondary cls.helper.bypass_secondary(False) cls.snapshot_uuid_on_secondary = Snapshot.create( cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_on_secondary) cls.snapshot_uuid3 = Snapshot.create(cls.apiclient, volume_id=cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid3) cls.snapshot_uuid4 = Snapshot.create(cls.apiclient, volume_id=cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid4) cls.snapshot_uuid_bypassed = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_bypassed) Volume.delete(cls.volume7, cls.apiclient) cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def setUpClass(cls): cls.testClient = super(TestTemplates, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.hypervisor = cls.testClient.getHypervisorInfo() cls.services = Services().services # Get Zone, Domain and templates cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.domain = get_domain(cls.api_client) cls.services['mode'] = cls.zone.networktype template = get_template( cls.api_client, cls.zone.id, cls.services["ostype"] ) cls.templateSupported = True cls._cleanup = [] if cls.hypervisor.lower() in ['lxc']: cls.templateSupported = False return cls.services["virtual_machine"]["zoneid"] = cls.zone.id try: cls.account = Account.create( cls.api_client, cls.services["account"], domainid=cls.domain.id ) cls._cleanup.append(cls.account) cls.services["account"] = cls.account.name cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"], ) cls._cleanup.append(cls.service_offering) # create virtual machine cls.virtual_machine = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, ) #Stop virtual machine cls.virtual_machine.stop(cls.api_client) listvolumes = Volume.list( cls.api_client, virtualmachineid=cls.virtual_machine.id, type='ROOT', listall=True ) assert validateList(listvolumes)[0] == PASS, "volumes list is empty" cls.volume = listvolumes[0] except Exception as e: cls.tearDownClass() raise unittest.SkipTest("Exception in setUpClass: %s" % e)
def test_01_positive_test_1(self): """ positive test for volume life cycle # 1. Deploy a vm [vm1] with shared storage and data disk # 2. Deploy a vm [vm2]with shared storage without data disk # 3. # 4. Create a new volume and attache to vm2 # 5. Detach data disk from vm1 and download it # Variance(1-9) # 6. Upload volume by providing url of downloaded volume in step 5 # 7. Attach the volume to a different vm - vm2 # 8. Try to delete an attached volume # 9. Create template from root volume of VM1 # 10. Create new VM using the template created in step 9 # 11. Delete the template # 12. Detach the disk from VM2 and re-attach the disk to VM1 # 13. # 14. # 15.Migrate volume(detached) and then attach to a vm and live-migrate # 16.Upload volume of size smaller than storage.max.volume.upload.size(leaving the negative case) # 17.NA # 18. # 19.NA # 20.Detach data disks from VM2 and delete volume """ # 1. Deploy a vm [vm1] with shared storage and data disk self.virtual_machine_1 = VirtualMachine.create(self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_1.id, zoneid=self.zone.id, diskofferingid=self.disk_offering_1.id, mode=self.testdata["mode"] ) verify_vm(self, self.virtual_machine_1.id) # List data volume for vm1 list_volume = Volume.list(self.userapiclient, virtualmachineid=self.virtual_machine_1.id, type='DATADISK' ) self.assertEqual(validateList(list_volume)[0], PASS, "Check List volume response for vm id %s" % self.virtual_machine_1.id) list_data_volume_for_vm1 = list_volume[0] self.assertEqual(len(list_volume), 1, "There is no data disk attached to vm id:%s" % self.virtual_machine_1.id) self.assertEqual(list_data_volume_for_vm1.virtualmachineid, str(self.virtual_machine_1.id), "Check if volume state (attached) is reflected") # 2. Deploy a vm [vm2]with shared storage without data disk self.virtual_machine_2 = VirtualMachine.create(self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_1.id, zoneid=self.zone.id, mode=self.testdata["mode"] ) verify_vm(self, self.virtual_machine_2.id) #4. Create a new volume and attache to vm2 self.volume = Volume.create(self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_1.id, zoneid=self.zone.id ) list_data_volume = Volume.list(self.userapiclient, id=self.volume.id ) self.assertEqual(validateList(list_data_volume)[0], PASS, "Check List volume response for volume %s" % self.volume.id) self.assertEqual(list_data_volume[0].id, self.volume.id, "check list volume response for volume id: %s" % self.volume.id) self.debug("volume id %s got created successfully" % list_data_volume[0].id) # Attach volume to vm2 self.virtual_machine_2.attach_volume(self.userapiclient, self.volume ) verify_attach_volume(self, self.virtual_machine_2.id, self.volume.id) #Variance if self.zone.localstorageenabled: # V1.Create vm3 with local storage offering self.virtual_machine_local_3=VirtualMachine.create(self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_2.id, zoneid=self.zone.id, mode=self.testdata["mode"] ) verify_vm(self, self.virtual_machine_local_3.id) # V2.create two data disk on local storage self.local_volumes = [] for i in range(2): local_volume = Volume.create(self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_local.id, zoneid=self.zone.id ) list_local_data_volume = Volume.list(self.userapiclient, id=local_volume.id ) self.assertEqual(validateList(list_local_data_volume)[0], PASS, "Check List volume response for volume %s" % local_volume.id) self.assertEqual(list_local_data_volume[0].id, local_volume.id, "check list volume response for volume id: %s" % local_volume.id) self.debug("volume id %s got created successfully" % list_local_data_volume[0].id) self.local_volumes.append(local_volume) # V3.Attach local disk to vm1 self.virtual_machine_1.attach_volume(self.userapiclient, self.local_volumes[0] ) verify_attach_volume(self, self.virtual_machine_1.id, self.local_volumes[0].id) if self.list_storage: # V4.create vm4 with zone wide storage self.virtual_machine_zone_4 = VirtualMachine.create(self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.tagged_so.id, zoneid=self.zone.id, mode=self.testdata["mode"] ) verify_vm(self, self.virtual_machine_zone_4.id) # V5.Create two data disk on zone wide storage self.zone_volumes = [] for i in range(2): zone_volume = Volume.create(self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_tagged.id, zoneid=self.zone.id ) list_zone_data_volume = Volume.list(self.userapiclient, id=zone_volume.id ) self.assertEqual(validateList(list_zone_data_volume)[0], PASS, "Check List volume response for volume %s" % zone_volume.id) self.assertEqual(list_zone_data_volume[0].id, zone_volume.id, "check list volume response for volume id: %s" % zone_volume.id) self.debug("volume id:%s got created successfully" % list_zone_data_volume[0].id) self.zone_volumes.append(zone_volume) # V6.Attach data disk running on ZWPS to VM1 (root disk on shared) self.virtual_machine_1.attach_volume(self.userapiclient, self.zone_volumes[0] ) verify_attach_volume(self, self.virtual_machine_1.id, self.zone_volumes[0].id) # V7. Create a cluster wide volume and attach to vm running on zone wide storage self.cluster_volume = Volume.create(self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_1.id, zoneid=self.zone.id ) list_cluster_volume = Volume.list(self.userapiclient, id=self.cluster_volume.id ) self.assertEqual(validateList(list_cluster_volume)[0], PASS, "Check List volume response for volume %s" % self.cluster_volume.id) self.assertEqual(list_cluster_volume[0].id, str(self.cluster_volume.id), "volume does not exist %s" % self.cluster_volume.id) self.debug("volume id %s got created successfuly" % list_cluster_volume[0].id) self.virtual_machine_zone_4.attach_volume(self.userapiclient, self.cluster_volume ) verify_attach_volume(self, self.virtual_machine_zone_4.id, self.cluster_volume.id) if self.list_storage and self.zone.localstorageenabled: #V8.Attach zone wide volume to vm running on local storage self.virtual_machine_local_3.attach_volume(self.userapiclient, self.zone_volumes[1] ) verify_attach_volume(self, self.virtual_machine_local_3.id, self.zone_volumes[1].id) # V9.Attach local volume to a vm running on zone wide storage self.virtual_machine_zone_4.attach_volume(self.userapiclient, self.local_volumes[1] ) verify_attach_volume(self, self.virtual_machine_zone_4.id, self.local_volumes[1].id) # 5. Detach data disk from vm1 and download it self.virtual_machine_1.detach_volume(self.userapiclient, volume=list_data_volume_for_vm1 ) verify_detach_volume(self, self.virtual_machine_1.id, list_data_volume_for_vm1.id) # download detached volume self.extract_volume = Volume.extract(self.userapiclient, volume_id=list_data_volume_for_vm1.id, zoneid=self.zone.id, mode='HTTP_DOWNLOAD' ) self.debug("extracted url is%s :" % self.extract_volume.url) try: formatted_url = urllib.unquote_plus(self.extract_volume.url) self.debug("Attempting to download volume at url %s" % formatted_url) response = urllib.urlopen(formatted_url) self.debug("response from volume url %s" % response.getcode()) fd, path = tempfile.mkstemp() self.debug("Saving volume %s to path %s" % (list_data_volume_for_vm1.id, path)) os.close(fd) with open(path, 'wb') as fd: fd.write(response.read()) self.debug("Saved volume successfully") except Exception: self.fail("Extract Volume Failed with invalid URL %s (vol id: %s)" % (self.extract_volume, list_data_volume_for_vm1.id)) #Need to get format for downloaded volume ,for now using default format VHD if "OVA" in self.extract_volume.url.upper(): self.testdata["upload_volume"]["format"] = "OVA" if "QCOW2" in self.extract_volume.url.upper(): self.testdata["upload_volume"]["format"] = "QCOW2" # 6. Upload volume by providing url of downloaded volume in step 5 self.upload_response = Volume.upload(self.userapiclient, zoneid=self.zone.id, url=self.extract_volume.url, services=self.testdata["upload_volume"] ) self.upload_response.wait_for_upload(self.userapiclient ) self.debug("uploaded volume id is %s" % self.upload_response.id) # 7. Attach the volume to a different vm - vm2 self.virtual_machine_2.attach_volume(self.userapiclient, volume=self.upload_response ) verify_attach_volume(self, self.virtual_machine_2.id, self.upload_response.id) # 8. Try to delete an attached volume try: self.volume.delete(self.userapiclient ) self.fail("Volume got deleted in attached state %s " % self.volume.id) except Exception as e: self.debug("Attached volume deletion failed because %s" % e) #9. Create template from root volume of VM1(stop VM->create template -> start vm) self.virtual_machine_1.stop(self.userapiclient ) self.list_root_disk_for_vm1 = Volume.list(self.userapiclient, virtualmachineid=self.virtual_machine_1.id, type='ROOT' ) self.assertEqual(validateList(self.list_root_disk_for_vm1)[0], PASS, "Check List volume response for vm %s" % self.virtual_machine_1.id) self.assertEqual(len(self.list_root_disk_for_vm1), 1, "list root disk for vm1 is empty : %s" % self.virtual_machine_1.id) self.template_from_vm1_root_disk = Template.create(self.userapiclient, self.testdata["template"], self.list_root_disk_for_vm1[0].id, account=self.account.name, domainid=self.account.domainid ) list_template = Template.list(self.userapiclient, templatefilter=self.testdata["templatefilter"], id=self.template_from_vm1_root_disk.id ) self.assertEqual(validateList(list_template)[0], PASS, "Check List template response for template id %s" % self.template_from_vm1_root_disk.id) self.assertEqual(len(list_template), 1, "list template response is empty for template id : %s" % list_template[0].id) self.assertEqual(list_template[0].id, self.template_from_vm1_root_disk.id, "list template id is not same as created template") self.debug("Template id:%s got created successfully" % self.template_from_vm1_root_disk.id) self.virtual_machine_1.start(self.userapiclient ) # 10. Deploy a vm using template ,created from vm1's root disk self.virtual_machine_3 = VirtualMachine.create(self.userapiclient, self.testdata["small"], templateid=self.template_from_vm1_root_disk.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_1.id, zoneid=self.zone.id, mode=self.testdata["mode"] ) verify_vm(self, self.virtual_machine_3.id) # 11.delete the template created from root disk of vm1 try: self.template_from_vm1_root_disk.delete(self.userapiclient ) self.debug("Template id: %s got deleted successfuly" % self.template_from_vm1_root_disk.id) except Exception as e: raise Exception("Template deletion failed with error %s" % e) list_template = Template.list(self.userapiclient, templatefilter=self.testdata["templatefilter"], id=self.template_from_vm1_root_disk.id ) self.assertEqual(list_template, None, "Template is not deleted, id %s:" % self.template_from_vm1_root_disk.id) self.debug("Template id%s got deleted successfully" % self.template_from_vm1_root_disk.id) # List vm and check the state of vm verify_vm(self, self.virtual_machine_3.id) #12.Detach the disk from VM2 and re-attach the disk to VM1 self.virtual_machine_2.detach_volume(self.userapiclient, volume=self.upload_response ) verify_detach_volume(self, self.virtual_machine_2.id, self.upload_response.id) self.virtual_machine_1.attach_volume(self.userapiclient, volume=self.upload_response ) verify_attach_volume(self, self.virtual_machine_1.id, self.upload_response.id) # 15.Migrate volume(detached) and then attach to a vm and live-migrate self.migrate_volume = Volume.create(self.userapiclient, services=self.testdata["volume"], diskofferingid=self.disk_offering_1.id, zoneid=self.zone.id ) list_volume = Volume.list(self.apiclient, id=self.migrate_volume.id ) self.assertEqual(validateList(list_volume)[0], PASS, "Check List volume response for volume %s" % self.migrate_volume.id) self.assertEqual(list_volume[0].id, str(self.migrate_volume.id), "volume does not exist %s" % self.migrate_volume.id) self.debug("volume id %s got created successfuly" % list_volume[0].id) self.virtual_machine_1.attach_volume(self.userapiclient, self.migrate_volume ) verify_attach_volume(self, self.virtual_machine_1.id, self.migrate_volume.id) self.virtual_machine_1.detach_volume(self.userapiclient, volume=self.migrate_volume ) verify_detach_volume(self, self.virtual_machine_1.id, self.migrate_volume.id) list_volume = Volume.list(self.apiclient, id=self.migrate_volume.id ) self.assertEqual(validateList(list_volume)[0], PASS, "Check List volume response for volume %s" % self.migrate_volume.id) self.assertEqual(list_volume[0].id, str(self.migrate_volume.id), "volume does not exist %s" % self.migrate_volume.id) self.debug("volume id %s got created successfuly" % list_volume[0].id) list_pool = StoragePool.list(self.apiclient, id=list_volume[0].storageid ) self.assertEqual(validateList(list_pool)[0], PASS, "Check List pool response for storage id %s" % list_volume[0].storageid) self.assertGreater(len(list_pool), 0, "Check the list list storagepoolresponse for vm id: %s" % list_volume[0].storageid) list_pools = StoragePool.list(self.apiclient, scope=list_pool[0].scope ) self.assertEqual(validateList(list_pools)[0], PASS, "Check List pool response for scope %s" % list_pool[0].scope) self.assertGreater(len(list_pools), 0, "Check the list vm response for scope :%s" % list_volume[0].scope) storagepoolid = None for i in range(len(list_pools)): if list_volume[0].storageid != list_pools[i].id: storagepoolid = list_pools[i].id break else: self.debug("No pool available for volume migration ") if storagepoolid is not None: try: volume_migrate = Volume.migrate(self.apiclient, storageid=storagepoolid, volumeid=self.migrate_volume.id ) except Exception as e: raise Exception("Volume migration failed with error %s" % e) self.virtual_machine_2.attach_volume(self.userapiclient, self.migrate_volume ) verify_attach_volume(self, self.virtual_machine_2.id, self.migrate_volume.id) pool_for_migration = StoragePool.listForMigration(self.apiclient, id=self.migrate_volume.id ) self.assertEqual(validateList(pool_for_migration)[0], PASS, "Check list pool For Migration response for volume %s" % self.migrate_volume.id) self.assertGreater(len(pool_for_migration), 0, "Check the listForMigration response for volume :%s" % self.migrate_volume.id) try: volume_migrate = Volume.migrate(self.apiclient, storageid=pool_for_migration[0].id, volumeid=self.migrate_volume.id, livemigrate=True ) except Exception as e: raise Exception("Volume migration failed with error %s" % e) else: try: self.migrate_volume.delete(self.userapiclient ) self.debug("volume id:%s got deleted successfully " % self.migrate_volume.id) except Exception as e: raise Exception("Volume deletion failed with error %s" % e) # 16.Upload volume of size smaller than storage.max.volume.upload.size(leaving the negative case) self.testdata["upload_volume"]["format"] = "VHD" volume_upload = Volume.upload(self.userapiclient, self.testdata["upload_volume"], zoneid=self.zone.id ) volume_upload.wait_for_upload(self.userapiclient ) self.debug("volume id :%s got uploaded successfully is " % volume_upload.id) # 20.Detach data disk from vm 2 and delete the volume self.virtual_machine_2.detach_volume(self.userapiclient, volume=self.volume ) verify_detach_volume(self, self.virtual_machine_2.id, self.volume.id) try: self.volume.delete(self.userapiclient ) self.debug("volume id:%s got deleted successfully " % self.volume.id) except Exception as e: raise Exception("Volume deletion failed with error %s" % e)
def test_01_data_persistency_root_disk(self): """ Test the timing issue of root disk data sync # 1. Write data to root disk of a VM # 2. Create a template from the root disk of VM # 3. Create a new VM from this template # 4. Check that the data is present in the new VM This is to test that data is persisted on root disk of VM or not when template is created immediately from it """ ssh = self.virtual_machine.get_ssh_client() sampleText = "This is sample data" cmds = [ "cd /root/", "touch testFile.txt", "chmod 600 testFile.txt", "echo %s >> testFile.txt" % sampleText ] for c in cmds: ssh.execute(c) #Stop virtual machine self.virtual_machine.stop(self.api_client) list_volume = Volume.list( self.api_client, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True) if isinstance(list_volume, list): self.volume = list_volume[0] else: raise Exception( "Exception: Unable to find root volume for VM: %s" % self.virtual_machine.id) self.services["template"]["ostype"] = self.services["ostype"] #Create templates for Edit, Delete & update permissions testcases customTemplate = Template.create( self.userapiclient, self.services["template"], self.volume.id, account=self.account.name, domainid=self.account.domainid ) self.cleanup.append(customTemplate) # Delete the VM - No longer needed self.virtual_machine.delete(self.apiclient) virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=customTemplate.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, mode=self.services["mode"] ) ssh = virtual_machine.get_ssh_client() response = ssh.execute("cat /root/testFile.txt") res = str(response[0]) self.assertEqual(res, sampleText, "The data %s does not match\ with sample test %s" % (res, sampleText)) return
def test_01_volume_snapshot(self): """ Test Volume (root) Snapshot # 1. Deploy a VM on primary storage and . # 2. Take snapshot on root disk # 3. Verify the snapshot's entry in the "snapshots" table and presence of the corresponding snapshot on the Secondary Storage # 4. Create Template from the Snapshot and Deploy a VM using the Template # 5. Log in to the VM from template and make verify the contents of the ROOT disk matches with the snapshot. # 6. Delete Snapshot and Deploy a Linux VM from the Template and verify the successful deployment of the VM. # 7. Create multiple snapshots on the same volume and Check the integrity of all the snapshots by creating a template from the snapshot and deploying a Vm from it and delete one of the snapshots # 8. Verify that the original checksum matches with the checksum of VM's created from remaning snapshots # 9. Make verify the contents of the ROOT disk matches with the snapshot # 10.Verify that Snapshot of both DATA and ROOT volume should succeed when snapshot of Data disk of a VM is taken when snapshot of ROOT volume of VM is in progress # 11.Create snapshot of data disk and verify the original checksum matches with the volume created from snapshot # 12.Verify that volume's state should not change when snapshot of a DATA volume is taken that is attached to a VM # 13.Verify that volume's state should not change when snapshot of a DATA volume is taken that is not attached to a VM # 14.Verify that create Snapshot with quiescevm=True should succeed # 15.revertSnapshot() to revert VM to a specified Volume snapshot for root volume """ # Step 1 # Get ROOT Volume Id root_volumes_cluster_list = list_volumes(self.apiclient, virtualmachineid=self.vm_1.id, type='ROOT', listall=True) root_volume_cluster = root_volumes_cluster_list[0] disk_volumes_cluster_list = list_volumes(self.apiclient, virtualmachineid=self.vm_1.id, type='DATADISK', listall=True) data_disk = disk_volumes_cluster_list[0] root_vol_state = root_volume_cluster.state ckecksum_random_root_cluster = createChecksum( service=self.testdata, virtual_machine=self.vm_1, disk=root_volume_cluster, disk_type="rootdiskdevice") self.vm_1.stop(self.apiclient) root_vol_snap = Snapshot.create(self.apiclient, root_volume_cluster.id) self.assertEqual(root_vol_snap.state, "BackedUp", "Check if the snapshot state is correct ") self.assertEqual(root_vol_state, root_volume_cluster.state, "Check if volume state has changed") self.vm_1.start(self.apiclient) # Step 2 snapshot_list = list_snapshots(self.apiclient, id=root_vol_snap.id) self.assertNotEqual(snapshot_list, None, "Check if result exists in list item call") self.assertEqual(snapshot_list[0].id, root_vol_snap.id, "Check resource id in list resources call") self.assertTrue( is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, root_vol_snap.id)) events = list_events(self.apiclient, account=self.account.name, domainid=self.account.domainid, type='SNAPSHOT.CREATE') event_list_validation_result = validateList(events) self.assertEqual( event_list_validation_result[0], PASS, "event list validation failed due to %s" % event_list_validation_result[2]) self.debug("Events list contains event SNAPSHOT.CREATE") qresultset = self.dbclient.execute( "select * from event where type='SNAPSHOT.CREATE' AND \ description like '%%%s%%' AND state='Completed';" % root_volume_cluster.id) event_validation_result = validateList(qresultset) self.assertEqual( event_validation_result[0], PASS, "event list validation failed due to %s" % event_validation_result[2]) self.assertNotEqual(len(qresultset), 0, "Check DB Query result set") qresult = str(qresultset) self.assertEqual( qresult.count('SNAPSHOT.CREATE') > 0, True, "Check SNAPSHOT.CREATE event in events table") #Usage_Event qresultset = self.dbclient.execute( "select * from usage_event where type='SNAPSHOT.CREATE' AND \ resource_name='%s'" % root_vol_snap.name) usage_event_validation_result = validateList(qresultset) self.assertEqual( usage_event_validation_result[0], PASS, "event list validation failed due to %s" % usage_event_validation_result[2]) self.assertNotEqual(len(qresultset), 0, "Check DB Query result set") self.assertEqual( self.dbclient.execute( "select size from usage_event where type='SNAPSHOT.CREATE' AND \ resource_name='%s'" % root_vol_snap.name)[0][0], root_vol_snap.physicalsize) # Step 3 # create template from snapshot root_vol_snap templateFromSnapshot = Template.create_from_snapshot( self.apiclient, root_vol_snap, self.testdata["template_2"]) self.assertNotEqual(templateFromSnapshot, None, "Check if result exists in list item call") vm_from_temp = VirtualMachine.create( self.apiclient, self.testdata["small"], templateid=templateFromSnapshot.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id, mode=self.zone.networktype) self.assertNotEqual(vm_from_temp, None, "Check if result exists in list item call") compareChecksum(self.apiclient, service=self.testdata, original_checksum=ckecksum_random_root_cluster, disk_type="rootdiskdevice", virt_machine=vm_from_temp) vm_from_temp.delete(self.apiclient) # Step 4 root_vol_snap.delete(self.userapiclient) self.assertEqual( list_snapshots( self.apiclient, volumeid=root_volume_cluster.id, ), None, "Snapshot list should be empty") events = list_events(self.apiclient, account=self.account.name, domainid=self.account.domainid, type='SNAPSHOT.DELETE') event_list_validation_result = validateList(events) self.assertEqual( event_list_validation_result[0], PASS, "event list validation failed due to %s" % event_list_validation_result[2]) self.debug("Events list contains event SNAPSHOT.DELETE") self.debug("select id from account where uuid = '%s';" % self.account.id) qresultset = self.dbclient.execute( "select id from account where uuid = '%s';" % self.account.id) account_validation_result = validateList(qresultset) self.assertEqual( account_validation_result[0], PASS, "event list validation failed due to %s" % account_validation_result[2]) self.assertNotEqual(len(qresultset), 0, "Check DB Query result set") qresult = qresultset[0] account_id = qresult[0] qresultset = self.dbclient.execute( "select * from event where type='SNAPSHOT.DELETE' AND \ account_id='%s' AND state='Completed';" % account_id) delete_snap_validation_result = validateList(qresultset) self.assertEqual( delete_snap_validation_result[0], PASS, "event list validation failed due to %s" % delete_snap_validation_result[2]) self.assertNotEqual(len(qresultset), 0, "Check DB Query result set") qresult = str(qresultset) self.assertEqual( qresult.count('SNAPSHOT.DELETE') > 0, True, "Check SNAPSHOT.DELETE event in events table") # Step 5 # delete snapshot and deploy vm from snapshot vm_from_temp_2 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=templateFromSnapshot.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id, mode=self.zone.networktype) self.assertNotEqual(vm_from_temp_2, None, "Check if result exists in list item call") # Step 6: compareChecksum(self.apiclient, service=self.testdata, original_checksum=ckecksum_random_root_cluster, disk_type="rootdiskdevice", virt_machine=vm_from_temp_2) vm_from_temp_2.delete(self.apiclient) # Step 7 # Multiple Snapshots self.vm_1.stop(self.apiclient) snaps = [] for i in range(2): root_vol_snap = Snapshot.create(self.apiclient, root_volume_cluster.id) self.assertEqual( root_vol_snap.state, "BackedUp", "Check if the data vol snapshot state is correct ") snaps.append(root_vol_snap) templateFromSnapshot = Template.create_from_snapshot( self.apiclient, root_vol_snap, self.testdata["template_2"]) self.assertNotEqual(templateFromSnapshot, None, "Check if result exists in list item call") vm_from_temp = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=templateFromSnapshot.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id, mode=self.zone.networktype) self.assertNotEqual(vm_from_temp, None, "Check if result exists in list item call") compareChecksum(self.apiclient, service=self.testdata, original_checksum=ckecksum_random_root_cluster, disk_type="rootdiskdevice", virt_machine=vm_from_temp) vm_from_temp.delete(self.apiclient) templateFromSnapshot.delete(self.apiclient) self.vm_1.start(self.apiclient) delete_snap = snaps.pop(1) delete_snap.delete(self.apiclient) self.assertEqual(Snapshot.list(self.apiclient, id=delete_snap.id), None, "Snapshot list should be empty") # Step 8 for snap in snaps: templateFromSnapshot = Template.create_from_snapshot( self.apiclient, snap, self.testdata["template_2"]) self.assertNotEqual(templateFromSnapshot, None, "Check if result exists in list item call") vm_from_temp = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=templateFromSnapshot.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id, mode=self.zone.networktype) self.assertNotEqual(vm_from_temp, None, "Check if result exists in list item call") compareChecksum(self.apiclient, service=self.testdata, original_checksum=ckecksum_random_root_cluster, disk_type="rootdiskdevice", virt_machine=vm_from_temp) templateFromSnapshot.delete(self.apiclient) vm_from_temp.delete(self.apiclient) for snap in snaps: snap.delete(self.apiclient) # Step 9 ckecksum_root_cluster = createChecksum(service=self.testdata, virtual_machine=self.vm_1, disk=root_volume_cluster, disk_type="rootdiskdevice") self.vm_1.stop(self.apiclient) root_vol_snap_2 = Snapshot.create(self.apiclient, root_volume_cluster.id) self.assertEqual(root_vol_snap_2.state, "BackedUp", "Check if the data vol snapshot state is correct ") snap_list_validation_result = validateList(events) self.assertEqual( snap_list_validation_result[0], PASS, "snapshot list validation failed due to %s" % snap_list_validation_result[2]) self.assertNotEqual(snapshot_list, None, "Check if result exists in list item call") templateFromSnapshot = Template.create_from_snapshot( self.apiclient, root_vol_snap_2, self.testdata["template_2"]) self.debug("create template event comlites with template %s name" % templateFromSnapshot.name) self.assertNotEqual(templateFromSnapshot, None, "Check if result exists in list item call") vm_from_temp_2 = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=templateFromSnapshot.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id, mode=self.zone.networktype) self.assertNotEqual(vm_from_temp_2, None, "Check if result exists in list item call") compareChecksum(self.apiclient, service=self.testdata, original_checksum=ckecksum_root_cluster, disk_type="rootdiskdevice", virt_machine=vm_from_temp_2) vm_from_temp_2.delete(self.apiclient) # Step 10 # Take snapshot of Data disk of a VM , when snapshot of ROOT volume of # VM is in progress try: self.vm_1.stop(self.apiclient) t1 = Thread(target=Snapshot.create, args=(self.apiclient, root_volume_cluster.id)) t2 = Thread(target=Snapshot.create, args=(self.apiclient, data_disk.id)) t1.start() t2.start() t1.join() t2.join() except: self.debug("Error: unable to start thread") # Step 11 # Data Disk self.vm_1.start(self.apiclient) ckecksum_data_disk = createChecksum(service=self.testdata, virtual_machine=self.vm_1, disk=data_disk, disk_type="datadiskdevice_1") data_vol_state = data_disk.state self.vm_1.stop(self.apiclient) data_vol_snap = Snapshot.create(self.apiclient, data_disk.id) self.assertEqual(data_vol_snap.state, "BackedUp", "Check if the data vol snapshot state is correct ") self.assertEqual(data_vol_state, data_disk.state, "Check if volume state has changed") data_snapshot_list = list_snapshots(self.apiclient, id=data_vol_snap.id) self.assertNotEqual(data_snapshot_list, None, "Check if result exists in list item call") self.assertEqual(data_snapshot_list[0].id, data_vol_snap.id, "Check resource id in list resources call") self.assertTrue( is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, data_vol_snap.id)) events = list_events(self.apiclient, account=self.account.name, domainid=self.account.domainid, type='SNAPSHOT.CREATE') event_list_validation_result = validateList(events) self.assertEqual( event_list_validation_result[0], PASS, "event list validation failed due to %s" % event_list_validation_result[2]) self.debug("Events list contains event SNAPSHOT.CREATE") self.testdata["volume"]["zoneid"] = self.zone.id volumeFromSnap = Volume.create_from_snapshot( self.apiclient, data_vol_snap.id, self.testdata["volume"], account=self.account.name, domainid=self.account.domainid, ) self.assertTrue( is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, data_vol_snap.id)) new_vm = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id, mode=self.zone.networktype) new_vm.attach_volume(self.apiclient, volumeFromSnap) new_vm.reboot(self.apiclient) compareChecksum(self.apiclient, service=self.testdata, original_checksum=ckecksum_data_disk, disk_type="datadiskdevice_1", virt_machine=new_vm) # Step 12 data_volume_2 = Volume.create(self.apiclient, self.testdata["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id) self.vm_1.start(self.apiclient) self.vm_1.attach_volume(self.userapiclient, data_volume_2) self.vm_1.reboot(self.apiclient) self.vm_1.stop(self.apiclient) data_vol_snap_1 = Snapshot.create(self.apiclient, data_volume_2.id) self.assertEqual(data_vol_snap_1.state, "BackedUp", "Check if the snapshot state is correct ") data_disk_2_list = Volume.list(self.userapiclient, listall=self.testdata["listall"], id=data_volume_2.id) self.vm_1.start(self.apiclient) checksum_data_2 = createChecksum(service=self.testdata, virtual_machine=self.vm_1, disk=data_disk_2_list[0], disk_type="datadiskdevice_2") # Step 13 self.vm_1.detach_volume(self.apiclient, data_volume_2) self.vm_1.reboot(self.apiclient) prev_state = data_volume_2.state data_vol_snap_2 = Snapshot.create(self.apiclient, data_volume_2.id) self.assertEqual(data_vol_snap_2.state, prev_state, "Check if the volume state is correct ") data_snapshot_list_2 = list_snapshots(self.apiclient, id=data_vol_snap_2.id) self.assertNotEqual(data_snapshot_list_2, None, "Check if result exists in list item call") self.assertEqual(data_snapshot_list_2[0].id, data_vol_snap_2.id, "Check resource id in list resources call") self.testdata["volume"]["zoneid"] = self.zone.id volumeFromSnap_2 = Volume.create_from_snapshot( self.apiclient, data_vol_snap_2.id, self.testdata["volume"], account=self.account.name, domainid=self.account.domainid, ) self.vm_2.attach_volume(self.userapiclient, volumeFromSnap_2) self.vm_2.reboot(self.apiclient) data_disk_2_list = Volume.list(self.userapiclient, listall=self.testdata["listall"], id=volumeFromSnap_2.id) compareChecksum(self.apiclient, service=self.testdata, original_checksum=checksum_data_2, disk_type="datadiskdevice_2", virt_machine=self.vm_2) # Step 14 self.vm_1.stop(self.apiclient) with self.assertRaises(Exception): root_vol_snap.revertVolToSnapshot(self.apiclient) # Step 15 root_snap = Snapshot.create(self.apiclient, root_volume_cluster.id) with self.assertRaises(Exception): root_snap.revertVolToSnapshot(self.apiclient) return
def setUpCloudStack(cls): testClient = super(TestVmSnapshot, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.unsupportedHypervisor = False # Setup test data td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.name == cls.getClsConfig().mgtSvr[0].zone: cls.zone = z assert cls.zone is not None cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported template = get_template( cls.apiclient, cls.zone.id, account = "system" ) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.template = template cls.account = cls.helper.create_account( cls.apiclient, cls.services["account"], accounttype = 1, domainid=cls.domain.id, roleid = 1 ) cls._cleanup.append(cls.account) securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid, id = securitygroup.id) primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] storage_pool = list_storage_pools( cls.apiclient, name = primarystorage.get("name") ) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering( cls.apiclient, name="ssd" ) assert disk_offering is not None service_offering_only = list_service_offering( cls.apiclient, name="ssd" ) if service_offering_only is not None: cls.service_offering_only = service_offering_only[0] else: cls.service_offering_only = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering_only is not None cls.disk_offering = disk_offering[0] # Create 1 data volume_1 cls.volume = Volume.create( cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id, size=10 ) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering_only.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def test_02_list_volume_snapshots_byid(self): """ @Desc: Test to List Volume Snapshots by Id @Steps: Step1: Listing all the volume snapshots for a user Step2: Verifying that list size is 0 Step3: Creating a volume snapshot Step4: Listing all the volume snapshots again for a user Step5: Verifying that list size is 1 Step6: Listing all the volume snapshots by specifying snapshot id Step7: Verifying that list size is 1 Step8: Verifying details of the listed volume snapshot """ # Listing all the volume snapshots for a User list_vol_snaps_before = Snapshot.list(self.userapiclient, listall=self.services["listall"]) # Verifying list size is 0 self.assertIsNone(list_vol_snaps_before, "Volume snapshots exists for newly created user") # Listing the root volumes available for the user volumes_list = Volume.list(self.userapiclient, listall=self.services["listall"]) status = validateList(volumes_list) self.assertEquals( PASS, status[0], "Root volume did not get created while deploying a VM") # Verifying list size to be 1 self.assertEquals(1, len(volumes_list), "More than 1 root volume created for deployed VM") root_volume = volumes_list[0] # Creating a volume snapshot snapshot_created = Snapshot.create( self.userapiclient, root_volume.id, ) self.assertIsNotNone(snapshot_created, "Snapshot creation failed") self.cleanup.append(snapshot_created) # Listing all the volume snapshots for user again list_vol_snaps_after = Snapshot.list(self.userapiclient, listall=self.services["listall"]) status = validateList(list_vol_snaps_after) self.assertEquals(PASS, status[0], "Volume snapshot creation failed") # Verifying that list size is 1 self.assertEquals(1, len(list_vol_snaps_after), "Failed to create Volume snapshot") # Listing volume snapshot by id list_vol_snapshot = Snapshot.list(self.userapiclient, listall=self.services["listall"], id=snapshot_created.id) status = validateList(list_vol_snapshot) self.assertEquals(PASS, status[0], "Failed to list Volume snapshot by Id") # Verifying that list size is 1 self.assertEquals( 1, len(list_vol_snapshot), "Size of the list volume snapshot by Id is not matching") # Verifying details of the listed snapshot to be same as snapshot created above #Creating expected and actual values dictionaries expected_dict = { "id": snapshot_created.id, "name": snapshot_created.name, "state": snapshot_created.state, "intervaltype": snapshot_created.intervaltype, "account": snapshot_created.account, "domain": snapshot_created.domainid, "volume": snapshot_created.volumeid } actual_dict = { "id": list_vol_snapshot[0].id, "name": list_vol_snapshot[0].name, "state": list_vol_snapshot[0].state, "intervaltype": list_vol_snapshot[0].intervaltype, "account": list_vol_snapshot[0].account, "domain": list_vol_snapshot[0].domainid, "volume": list_vol_snapshot[0].volumeid } vol_snapshot_status = self.__verify_values(expected_dict, actual_dict) self.assertEqual(True, vol_snapshot_status, "Listed Volume Snapshot details are not as expected") return
def test_create_volume_under_domain(self): """Create a volume under a non-root domain as non-root-domain user 1. Create a domain under ROOT 2. Create a user within this domain 3. As user in step 2. create a volume with standard disk offering 4. Ensure the volume is created in the domain and available to the user in his listVolumes call """ dom = Domain.create( self.apiclient, services={}, name="NROOT", parentdomainid=self.domain.id ) self.cleanup.append(dom) self.assertTrue(dom is not None, msg="Domain creation failed") domuser = Account.create( apiclient=self.apiclient, services=self.services["account"], admin=False, domainid=dom.id ) self.cleanup.insert(-2, domuser) self.assertTrue(domuser is not None) domapiclient = self.testClient.getUserApiClient( UserName=domuser.name, DomainName=dom.name) diskoffering = DiskOffering.list(self.apiclient) self.assertTrue( isinstance( diskoffering, list), msg="DiskOffering list is not a list?") self.assertTrue( len(diskoffering) > 0, "no disk offerings in the deployment") vol = Volume.create( domapiclient, services=self.services["volume"], zoneid=self.zone.id, account=domuser.name, domainid=dom.id, diskofferingid=diskoffering[0].id ) self.assertTrue( vol is not None, "volume creation fails in domain %s as user %s" % (dom.name, domuser.name)) listed_vol = Volume.list(domapiclient, id=vol.id) self.assertTrue( listed_vol is not None and isinstance( listed_vol, list), "invalid response from listVolumes for volume %s" % vol.id) self.assertTrue( listed_vol[0].id == vol.id, "Volume returned by list volumes %s not matching with queried\ volume %s in domain %s" % (listed_vol[0].id, vol.id, dom.name))
def test01_template_download_URL_expire(self): """ @Desc:Template files are deleted from secondary storage after download URL expires Step1:Deploy vm with default cent os template Step2:Stop the vm Step3:Create template from the vm's root volume Step4:Extract Template and wait for the download url to expire Step5:Deploy another vm with the template created at Step3 Step6:Verify that vm deployment succeeds """ params = [ 'extract.url.expiration.interval', 'extract.url.cleanup.interval' ] wait_time = 0 for param in params: config = Configurations.list( self.apiClient, name=param, ) self.assertEqual( validateList(config)[0], PASS, "Config list returned invalid response") wait_time = wait_time + int(config[0].value) self.debug("Total wait time for url expiry: %s" % wait_time) # Creating Virtual Machine self.virtual_machine = VirtualMachine.create( self.userapiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, ) self.assertIsNotNone(self.virtual_machine, "Virtual Machine creation failed") self.cleanup.append(self.virtual_machine) #Stop virtual machine self.virtual_machine.stop(self.userapiclient) list_volume = Volume.list(self.userapiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True) self.assertEqual( validateList(list_volume)[0], PASS, "list volumes with type ROOT returned invalid list") self.volume = list_volume[0] self.create_template = Template.create(self.userapiclient, self.services["template"], volumeid=self.volume.id, account=self.account.name, domainid=self.account.domainid) self.assertIsNotNone(self.create_template, "Failed to create template from root volume") self.cleanup.append(self.create_template) """ Extract template """ try: Template.extract(self.userapiclient, self.create_template.id, 'HTTP_DOWNLOAD', self.zone.id) except Exception as e: self.fail("Extract template failed with error %s" % e) self.debug("Waiting for %s seconds for url to expire" % repr(wait_time + 20)) time.sleep(wait_time + 20) self.debug("Waited for %s seconds for url to expire" % repr(wait_time + 20)) """ Deploy vm with the template created from the volume. After url expiration interval only url should be deleted not the template. To validate this deploy vm with the template """ try: self.vm = VirtualMachine.create( self.userapiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.create_template.id) self.cleanup.append(self.vm) except Exception as e: self.fail("Template is automatically deleted after URL expired.\ So vm deployment failed with error: %s" % e) return
def test_deployVmWithCustomDisk(self): """Test custom disk sizes beyond range """ # Steps for validation # 1. listConfigurations - custom.diskoffering.size.min # and custom.diskoffering.size.max # 2. deployVm with custom disk offering size < min # 3. deployVm with custom disk offering min< size < max # 4. deployVm with custom disk offering size > max # Validate the following # 2. and 4. of deploy VM should fail. # Only case 3. should succeed. # cleanup all created data disks from the account config = Configurations.list( self.apiclient, name="custom.diskoffering.size.min" ) self.assertEqual( isinstance(config, list), True, "custom.diskoffering.size.min should be present in global config" ) # minimum size of custom disk (in GBs) min_size = int(config[0].value) self.debug("custom.diskoffering.size.min: %s" % min_size) config = Configurations.list( self.apiclient, name="custom.diskoffering.size.max" ) self.assertEqual( isinstance(config, list), True, "custom.diskoffering.size.min should be present in global config" ) # maximum size of custom disk (in GBs) max_size = int(config[0].value) self.debug("custom.diskoffering.size.max: %s" % max_size) self.debug("Creating a volume with size less than min cust disk size") self.services["custom_volume"]["customdisksize"] = (min_size - 1) self.services["custom_volume"]["zoneid"] = self.zone.id with self.assertRaises(Exception): Volume.create_custom_disk( self.apiclient, self.services["custom_volume"], account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id ) self.debug("Create volume failed!") self.debug("Creating a volume with size more than max cust disk size") self.services["custom_volume"]["customdisksize"] = (max_size + 1) with self.assertRaises(Exception): Volume.create_custom_disk( self.apiclient, self.services["custom_volume"], account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id ) self.debug("Create volume failed!") self.debug("Creating a volume with size more than min cust disk " + "but less than max cust disk size" ) self.services["custom_volume"]["customdisksize"] = (min_size + 1) try: Volume.create_custom_disk( self.apiclient, self.services["custom_volume"], account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id ) self.debug("Create volume of cust disk size succeeded") except Exception as e: self.fail("Create volume failed with exception: %s" % e) return
def setUpClass(cls): cls.testClient = super(TestTemplates, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls._cleanup = [] cls.unsupportedHypervisor = False cls.hypervisor = cls.testClient.getHypervisorInfo() if cls.hypervisor.lower() in ['lxc']: cls.unsupportedHypervisor = True return # populate second zone id for iso copy cmd = listZones.listZonesCmd() zones = cls.api_client.listZones(cmd) if not isinstance(zones, list): raise Exception("Failed to find zones.") if len(zones) >= 2: cls.services["destzoneid"] = zones[1].id template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"]) cls.services["virtual_machine"]["zoneid"] = cls.zone.id try: cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id) cls._cleanup.append(cls.account) cls.services["account"] = cls.account.name cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"]) cls._cleanup.append(cls.service_offering) # create virtual machine cls.virtual_machine = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, ) # Stop virtual machine cls.virtual_machine.stop(cls.api_client) timeout = cls.services["timeout"] while True: list_volume = Volume.list( cls.api_client, virtualmachineid=cls.virtual_machine.id, type='ROOT', listall=True) if isinstance(list_volume, list): break elif timeout == 0: raise Exception("List volumes failed.") time.sleep(5) timeout = timeout - 1 cls.volume = list_volume[0] # Create template from volume cls.template = Template.create(cls.api_client, cls.services["template"], cls.volume.id) except Exception as e: cls.tearDownClass() raise unittest.SkipTest("Failure in setUpClass: %s" % e)
def test_01_migrateVolume(self): """ @Desc:Volume is not retaining same uuid when migrating from one storage to another. Step1:Create a volume/data disk Step2:Verify UUID of the volume Step3:Migrate the volume to another primary storage within the cluster Step4:Migrating volume to new primary storage should succeed Step5:volume UUID should not change even after migration """ vol = Volume.create( self.apiclient, self.services["volume"], diskofferingid=self.disk_offering.id, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, ) self.assertIsNotNone(vol, "Failed to create volume") vol_res = Volume.list( self.apiclient, id=vol.id ) self.assertEqual( validateList(vol_res)[0], PASS, "Invalid response returned for list volumes") vol_uuid = vol_res[0].id try: self.virtual_machine.attach_volume( self.apiclient, vol ) except Exception as e: self.fail("Attaching data disk to vm failed with error %s" % e) pools = StoragePool.listForMigration( self.apiclient, id=vol.id ) if not pools: self.skipTest( "No suitable storage pools found for volume migration.\ Skipping") self.assertEqual( validateList(pools)[0], PASS, "invalid pool response from findStoragePoolsForMigration") pool = pools[0] self.debug("Migrating Volume-ID: %s to Pool: %s" % (vol.id, pool.id)) try: Volume.migrate( self.apiclient, volumeid=vol.id, storageid=pool.id, livemigrate='true' ) except Exception as e: self.fail("Volume migration failed with error %s" % e) migrated_vols = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, listall='true', type='DATADISK' ) self.assertEqual( validateList(migrated_vols)[0], PASS, "invalid volumes response after migration") migrated_vol_uuid = migrated_vols[0].id self.assertEqual( vol_uuid, migrated_vol_uuid, "Volume is not retaining same uuid when migrating from one\ storage to another" ) self.virtual_machine.detach_volume( self.apiclient, vol ) self.cleanup.append(vol) return
def get_root_device_uuid_for_vm(cls, vm_id, root_device_id): volumes = Volume.list(cls.api_client, virtualmachineid=vm_id, listall=True) return volumes[root_device_id].id
def test_01_volume_attach_detach(self): """Test Volume attach/detach to VM (5 data volumes) """ # Validate the following # 1. Deploy a vm and create 5 data disk # 2. Attach all the created Volume to the vm. # 3. Detach all the volumes attached. # 4. Reboot the VM. VM should be successfully rebooted # 5. Stop the VM. Stop VM should be successful # 6. Start The VM. Start VM should be successful try: volumes = [] # Create 5 volumes and attach to VM for i in range(self.max_data_volumes): volume = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id ) self.cleanup.append(volume) volumes.append(volume) # Check List Volume response for newly created volume list_volume_response = Volume.list( self.apiclient, id=volume.id ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes") self.assertEqual( isinstance(list_volume_response, list), True, "Check list volumes response for valid list") # Attach volume to VM self.virtual_machine.attach_volume( self.apiclient, volume ) # Check all volumes attached to same VM list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', listall=True ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes" ) self.assertEqual( isinstance(list_volume_response, list), True, "Check list volumes response for valid list" ) self.assertEqual( len(list_volume_response), self.max_data_volumes, "Volumes attached to the VM %s. Expected %s" % (len(list_volume_response), self.max_data_volumes)) # Detach all volumes from VM for volume in volumes: self.virtual_machine.detach_volume( self.apiclient, volume ) # Reboot VM self.debug("Rebooting the VM: %s" % self.virtual_machine.id) self.virtual_machine.reboot(self.apiclient) # Sleep to ensure that VM is in ready state time.sleep(self.services["sleep"]) vm_response = VirtualMachine.list( self.apiclient, id=self.virtual_machine.id, ) # Verify VM response to check whether VM deployment was successful self.assertEqual( isinstance(vm_response, list), True, "Check list VM response for valid list" ) self.assertNotEqual( len(vm_response), 0, "Check VMs available in List VMs response" ) vm = vm_response[0] self.assertEqual( vm.state, 'Running', "Check the state of VM" ) # Stop VM self.virtual_machine.stop(self.apiclient) # Start VM self.virtual_machine.start(self.apiclient) # Sleep to ensure that VM is in ready state time.sleep(self.services["sleep"]) vm_response = VirtualMachine.list( self.apiclient, id=self.virtual_machine.id, ) # Verify VM response to check whether VM deployment was successful self.assertEqual( isinstance(vm_response, list), True, "Check list VM response for valid list" ) self.assertNotEqual( len(vm_response), 0, "Check VMs available in List VMs response" ) vm = vm_response[0] self.assertEqual( vm.state, 'Running', "Check the state of VM" ) except Exception as e: self.fail("Exception occuered: %s" % e) return
def setUpClass(cls): testClient = super(TestTemplates, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.services = testClient.getParsedTestDataConfig() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ['lxc']: # Template creation from root volume is not supported in LXC cls.unsupportedHypervisor = True return # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype #populate second zone id for iso copy cls.zones = Zone.list(cls.apiclient) if not isinstance(cls.zones, list): raise Exception("Failed to find zones.") cls.disk_offering = DiskOffering.create(cls.apiclient, cls.services["disk_offering"]) template = get_template(cls.apiclient, cls.zone.id, cls.services["ostype"]) if template == FAILED: assert False, "get_template() failed to return template with description %s" % cls.services[ "ostype"] cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["volume"]["diskoffering"] = cls.disk_offering.id cls.services["volume"]["zoneid"] = cls.zone.id cls.services["template_2"]["zoneid"] = cls.zone.id cls.services["sourcezoneid"] = cls.zone.id cls.services["template"]["ostypeid"] = template.ostypeid cls.services["template_2"]["ostypeid"] = template.ostypeid cls.services["ostypeid"] = template.ostypeid cls.account = Account.create(cls.apiclient, cls.services["account"], admin=True, domainid=cls.domain.id) cls.user = Account.create(cls.apiclient, cls.services["account"], domainid=cls.domain.id) cls.service_offering = ServiceOffering.create( cls.apiclient, cls.services["service_offerings"]) #create virtual machine cls.virtual_machine = VirtualMachine.create( cls.apiclient, cls.services["virtual_machine"], templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, mode=cls.services["mode"]) #Stop virtual machine cls.virtual_machine.stop(cls.apiclient) list_volume = Volume.list(cls.apiclient, virtualmachineid=cls.virtual_machine.id, type='ROOT', listall=True) try: cls.volume = list_volume[0] except Exception as e: raise Exception( "Exception: Unable to find root volume foe VM: %s - %s" % (cls.virtual_machine.id, e)) #Create templates for Edit, Delete & update permissions testcases cls.template_1 = Template.create(cls.apiclient, cls.services["template"], cls.volume.id, account=cls.account.name, domainid=cls.account.domainid) cls.template_2 = Template.create(cls.apiclient, cls.services["template_2"], cls.volume.id, account=cls.account.name, domainid=cls.account.domainid) cls._cleanup = [ cls.service_offering, cls.disk_offering, cls.account, cls.user ]
def test_01_volume_iso_attach(self): """Test Volumes and ISO attach """ # Validate the following # 1. Create and attach 5 data volumes to VM # 2. Create an ISO. Attach it to VM instance # 3. Verify that attach ISO is successful # Create 5 volumes and attach to VM if self.hypervisor.lower() in ["lxc"]: self.skipTest("attach ISO is not supported on LXC") for i in range(self.max_data_volumes): volume = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id ) self.debug("Created volume: %s for account: %s" % ( volume.id, self.account.name )) # Check List Volume response for newly created volume list_volume_response = Volume.list( self.apiclient, id=volume.id ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes" ) self.assertEqual( isinstance(list_volume_response, list), True, "Check list volumes response for valid list" ) # Attach volume to VM self.virtual_machine.attach_volume( self.apiclient, volume ) # Check all volumes attached to same VM list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', listall=True ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes" ) self.assertEqual( isinstance(list_volume_response, list), True, "Check list volumes response for valid list" ) self.assertEqual( len(list_volume_response), self.max_data_volumes, "Volumes attached to the VM %s. Expected %s" % (len(list_volume_response), self.max_data_volumes)) # Create an ISO and attach it to VM iso = Iso.create( self.apiclient, self.services["iso"], account=self.account.name, domainid=self.account.domainid, ) self.debug("Created ISO with ID: %s for account: %s" % ( iso.id, self.account.name )) try: self.debug("Downloading ISO with ID: %s" % iso.id) iso.download(self.apiclient) except Exception as e: self.fail("Exception while downloading ISO %s: %s" % (iso.id, e)) # Attach ISO to virtual machine self.debug("Attach ISO ID: %s to VM: %s" % ( iso.id, self.virtual_machine.id )) cmd = attachIso.attachIsoCmd() cmd.id = iso.id cmd.virtualmachineid = self.virtual_machine.id self.apiclient.attachIso(cmd) # Verify ISO is attached to VM vm_response = VirtualMachine.list( self.apiclient, id=self.virtual_machine.id, ) # Verify VM response to check whether VM deployment was successful self.assertEqual( isinstance(vm_response, list), True, "Check list VM response for valid list" ) self.assertNotEqual( len(vm_response), 0, "Check VMs available in List VMs response" ) vm = vm_response[0] self.assertEqual( vm.isoid, iso.id, "Check ISO is attached to VM or not" ) return
def test_01_import_storage_policies(self): """Test VMWare storage policies """ # Validate the following: # 1. Import VMWare storage policies - the command should return non-zero result # 2. List current VMWare storage policies - the command should return non-zero result # 3. Create service offering with first of the imported policies # 4. Create disk offering with first of the imported policies # 5. Create VM using the already created service offering # 6. Create volume using the already created disk offering # 7. Attach this volume to our VM # 8. Detach the volume from our VM self.hypervisor = self.testClient.getHypervisorInfo() if self.hypervisor.lower() != "vmware": self.skipTest( "VMWare storage policies feature is not supported on %s" % self.hypervisor.lower()) self.debug("Importing VMWare storage policies") imported_policies = self.import_vmware_storage_policies(self.apiclient) if len(imported_policies) == 0: self.skipTest("There are no VMWare storage policies") self.debug("Listing VMWare storage policies") listed_policies = self.list_storage_policies(self.apiclient) self.assertNotEqual(len(listed_policies), 0, "Check if list of storage policies is not zero") self.assertEqual( len(imported_policies), len(listed_policies), "Check if the number of imported policies is identical to the number of listed policies" ) selected_policy = None for imported_policy in imported_policies: compatible_pools = self.list_storage_policy_compatible_pools( self.apiclient, imported_policy.id) if compatible_pools and len(compatible_pools) > 0: selected_policy = imported_policy break if not selected_policy: self.skipTest( "There are no compatible storage pools with the imported policies" ) # Create service offering with the first storage policy from the list service_offering = ServiceOffering.create( self.apiclient, self.testdata["service_offering"], storagepolicy=selected_policy.id) self.cleanup.append(service_offering) # Create disk offering with the first storage policy from the list disk_offering = DiskOffering.create(self.apiclient, self.testdata["disk_offering"], storagepolicy=selected_policy.id) self.cleanup.append(disk_offering) l2_network = Network.create(self.apiclient, self.testdata["l2-network"], zoneid=self.zone.id, networkofferingid=self.network_offering.id) self.cleanup.append(l2_network) virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, zoneid=self.zone.id, networkids=l2_network.id, serviceofferingid=service_offering.id, ) self.cleanup.append(virtual_machine) vms = VirtualMachine.list(self.apiclient, id=virtual_machine.id, listall=True) self.assertEqual( isinstance(vms, list), True, "listVirtualMachines returned invalid object in response.") self.assertNotEqual(len(vms), 0, "listVirtualMachines returned empty list.") self.debug("Deployed VM on host: %s" % vms[0].hostid) volume = Volume.create(self.apiclient, self.testdata["volume"], account=self.account.name, domainid=self.account.domainid, zoneid=self.zone.id, diskofferingid=disk_offering.id) self.cleanup.append(volume) list_volume_response = Volume.list(self.apiclient, id=volume.id) self.assertEqual(isinstance(list_volume_response, list), True, "Check list response returns a valid list") self.assertNotEqual(list_volume_response, None, "Check if volume exists in ListVolumes") return
def test_01_attach_volume(self): """Attach a created Volume to a Running VM """ # Validate the following # 1. Create a data volume. # 2. List Volumes should not have vmname and virtualmachineid fields in # response before volume attach (to VM) # 3. Attch volume to VM. Attach volume should be successful. # 4. List Volumes should have vmname and virtualmachineid fields in # response before volume attach (to VM) # Check the list volumes response for vmname and virtualmachineid list_volume_response = Volume.list( self.apiclient, id=self.volume.id ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes" ) self.assertEqual( isinstance(list_volume_response, list), True, "Check list volumes response for valid list" ) volume = list_volume_response[0] self.assertEqual( volume.type, 'DATADISK', "Check volume type from list volume response" ) self.assertEqual( hasattr(volume, 'vmname'), True, "Check whether volume has vmname field" ) self.assertEqual( hasattr(volume, 'virtualmachineid'), True, "Check whether volume has virtualmachineid field" ) # Attach volume to VM self.debug("Attach volume: %s to VM: %s" % ( self.volume.id, self.virtual_machine.id )) self.virtual_machine.attach_volume(self.apiclient, self.volume) # Check all volumes attached to same VM list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', listall=True ) self.assertNotEqual( list_volume_response, None, "Check if volume exists in ListVolumes" ) self.assertEqual( isinstance(list_volume_response, list), True, "Check list volumes response for valid list" ) volume = list_volume_response[0] self.assertEqual( volume.vmname, self.virtual_machine.name, "Check virtual machine name in list volumes response" ) self.assertEqual( volume.virtualmachineid, self.virtual_machine.id, "Check VM ID in list Volume response" ) return
def setUpClass(cls): testClient = super(TestMultipleVolumeAttach, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.services = testClient.getParsedTestDataConfig() cls._cleanup = [] # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls.hypervisor = testClient.getHypervisorInfo() cls.invalidStoragePoolType = False #for LXC if the storage pool of type 'rbd' ex: ceph is not available, skip the test if cls.hypervisor.lower() == 'lxc': if not find_storage_pool_type(cls.apiclient, storagetype='rbd'): # RBD storage type is required for data volumes for LXC cls.invalidStoragePoolType = True return cls.disk_offering = DiskOffering.create(cls.apiclient, cls.services["disk_offering"]) template = get_template(cls.apiclient, cls.zone.id, cls.services["ostype"]) if template == FAILED: assert False, "get_template() failed to return template with description %s" % cls.services[ "ostype"] cls.services["domainid"] = cls.domain.id cls.services["zoneid"] = cls.zone.id cls.services["template"] = template.id cls.services["diskofferingid"] = cls.disk_offering.id # Create VMs, VMs etc cls.account = Account.create(cls.apiclient, cls.services["account"], domainid=cls.domain.id) cls.service_offering = ServiceOffering.create( cls.apiclient, cls.services["service_offering"]) cls.virtual_machine = VirtualMachine.create( cls.apiclient, cls.services, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, mode=cls.services["mode"]) #Create volumes (data disks) cls.volume1 = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.volume2 = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.volume3 = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.volume4 = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls._cleanup = [cls.service_offering, cls.disk_offering, cls.account]
def test_01_list_volume_snapshots_pagination(self): """ @Desc: Test to List Volume Snapshots pagination @steps: Step1: Listing all the volume snapshots for a user Step2: Verifying that list size is 0 Step3: Creating (page size + 1) number of volume snapshots Step4: Listing all the volume snapshots again for a user Step5: Verifying that list size is (page size + 1) Step6: Listing all the volume snapshots in page1 Step7: Verifying that list size is (page size) Step8: Listing all the volume snapshots in page2 Step9: Verifying that list size is 1 Step10: Deleting the volume snapshot present in page 2 Step11: Listing all the volume snapshots in page2 Step12: Verifying that list size is 0 """ # Listing all the volume snapshots for a User list_vol_snaps_before = Snapshot.list(self.userapiclient, listall=self.services["listall"]) # Verifying list size is 0 self.assertIsNone(list_vol_snaps_before, "Volume snapshots exists for newly created user") # Listing the root volumes available for the user volumes_list = Volume.list(self.userapiclient, listall=self.services["listall"]) status = validateList(volumes_list) self.assertEquals( PASS, status[0], "Root volume did not get created while deploying a VM") # Verifying list size to be 1 self.assertEquals(1, len(volumes_list), "More than 1 root volume created for deployed VM") root_volume = volumes_list[0] # Creating pagesize + 1 number of volume snapshots for i in range(0, (self.services["pagesize"] + 1)): snapshot_created = Snapshot.create( self.userapiclient, root_volume.id, ) self.assertIsNotNone(snapshot_created, "Snapshot creation failed") self.cleanup.append(snapshot_created) # Listing all the volume snapshots for user again list_vol_snaps_after = Snapshot.list(self.userapiclient, listall=self.services["listall"]) status = validateList(list_vol_snaps_after) self.assertEquals(PASS, status[0], "Volume snapshot creation failed") # Verifying that list size is pagesize + 1 self.assertEquals( self.services["pagesize"] + 1, len(list_vol_snaps_after), "Failed to create pagesize + 1 number of Volume snapshots") # Listing all the volume snapshots in page 1 list_vol_snaps_page1 = Snapshot.list( self.userapiclient, listall=self.services["listall"], page=1, pagesize=self.services["pagesize"]) status = validateList(list_vol_snaps_page1) self.assertEquals(PASS, status[0], "Failed to list volume snapshots in page 1") # Verifying the list size to be equal to pagesize self.assertEquals( self.services["pagesize"], len(list_vol_snaps_page1), "Size of volume snapshots in page 1 is not matching") # Listing all the volume snapshots in page 2 list_vol_snaps_page2 = Snapshot.list( self.userapiclient, listall=self.services["listall"], page=2, pagesize=self.services["pagesize"]) status = validateList(list_vol_snaps_page2) self.assertEquals(PASS, status[0], "Failed to list volume snapshots in page 2") # Verifying the list size to be equal to pagesize self.assertEquals( 1, len(list_vol_snaps_page2), "Size of volume snapshots in page 2 is not matching") # Deleting the volume snapshot present in page 2 Snapshot.delete(snapshot_created, self.userapiclient) # Listing all the snapshots in page 2 again list_vol_snaps_page2 = Snapshot.list( self.userapiclient, listall=self.services["listall"], page=2, pagesize=self.services["pagesize"]) # Verifying that list size is 0 self.assertIsNone(list_vol_snaps_page2, "Volume snapshot not deleted from page 2") return
def test_03_multiple_domains_multiple_volumes(self): """Test primary storage counts in multiple child domains # Steps 1. Create a parent domain and two sub-domains in it (also admin accounts of each domain) Repeat following steps for both the child domains 2. Deploy VM in child domain 3. Check if the resource count for domain is updated correctly 4. Create multiple volumes and attach it to the VM 5. Check if the primary storage resource count is updated correctly 6. Delete one of the volumes and check if the primary storage resource count reduced by equivalent number 7. Detach other volume and check primary storage resource count remains the same """ # Setting up account and domain hierarchy if self.hypervisor.lower() == 'lxc': if not find_storage_pool_type(self.apiclient, storagetype='rbd'): self.skipTest( "RBD storage type is required for data volumes for LXC") result = self.setupAccounts() if result[0] == FAIL: self.fail("Failure while setting up accounts and domains: %s" % result[1]) else: users = result[2] templatesize = (self.template.size / (1024**3)) for domain, admin in list(users.items()): self.account = admin self.domain = domain apiclient = self.testClient.getUserApiClient( UserName=self.account.name, DomainName=self.account.domain) self.assertNotEqual( apiclient, FAILED, "Failed to create api client for account: %s" % self.account.name) try: vm = VirtualMachine.create( apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id, serviceofferingid=self.service_offering.id, startvm=False) self.cleanup.append(vm) expectedCount = templatesize + self.disk_offering.disksize result = isDomainResourceCountEqualToExpectedCount( self.apiclient, self.domain.id, expectedCount, RESOURCE_PRIMARY_STORAGE) self.assertFalse(result[0], result[1]) self.assertTrue(result[2], "Resource count does not match") volume1size = self.services["disk_offering"]["disksize"] = 15 disk_offering_15_GB = DiskOffering.create( self.apiclient, services=self.services["disk_offering"]) self.cleanup.append(disk_offering_15_GB) volume2size = self.services["disk_offering"]["disksize"] = 20 disk_offering_20_GB = DiskOffering.create( self.apiclient, services=self.services["disk_offering"]) self.cleanup.append(disk_offering_20_GB) volume_1 = Volume.create(apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=disk_offering_15_GB.id) self.cleanup.append(volume_1) volume_2 = Volume.create(apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=disk_offering_20_GB.id) self.cleanup.append(volume_2) vm.attach_volume(apiclient, volume=volume_1) self.cleanup.remove(volume_1) vm.attach_volume(apiclient, volume=volume_2) self.cleanup.remove(volume_2) expectedCount += volume1size + volume2size result = isDomainResourceCountEqualToExpectedCount( self.apiclient, self.domain.id, expectedCount, RESOURCE_PRIMARY_STORAGE) self.assertFalse(result[0], result[1]) self.assertTrue(result[2], "Resource count does not match") vm.detach_volume(apiclient, volume=volume_1) volume_1.delete(apiclient) expectedCount -= volume1size result = isDomainResourceCountEqualToExpectedCount( self.apiclient, self.domain.id, expectedCount, RESOURCE_PRIMARY_STORAGE) self.assertFalse(result[0], result[1]) self.assertTrue(result[2], "Resource count does not match") except Exception as e: self.fail("Failure: %s" % e) return
def test_create_multiple_volumes(self, value): """Test create multiple volumes # Validate the following # 1. Create a VM with custom disk offering and check the primary storage count # of account # 2. Create multiple volumes in account # 3. Verify that primary storage count increases by same amount # 4. Attach volumes to VM and verify resource count remains the same # 5. Detach and delete both volumes one by one and verify resource count decreases # proportionately""" # Creating service offering with 10 GB volume response = self.setupAccount(value) self.assertEqual(response[0], PASS, response[1]) apiclient = self.apiclient if value == CHILD_DOMAIN_ADMIN: apiclient = self.testClient.getUserApiClient( UserName=self.account.name, DomainName=self.account.domain) self.assertNotEqual( apiclient, FAIL, "Failure while getting\ api client of account %s" % self.account.name) try: self.services["disk_offering"]["disksize"] = 5 disk_offering_5_GB = DiskOffering.create( self.apiclient, services=self.services["disk_offering"]) self.cleanup.append(disk_offering_5_GB) self.services["disk_offering"]["disksize"] = 10 disk_offering_10_GB = DiskOffering.create( self.apiclient, services=self.services["disk_offering"]) self.cleanup.append(disk_offering_10_GB) volume_1 = Volume.create(apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=disk_offering_5_GB.id) volume_2 = Volume.create(apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=disk_offering_10_GB.id) self.debug("Attaching volume %s to vm %s" % (volume_1.name, self.virtualMachine.name)) self.virtualMachine.attach_volume(apiclient, volume=volume_1) self.debug("Attaching volume %s to vm %s" % (volume_2.name, self.virtualMachine.name)) self.virtualMachine.attach_volume(apiclient, volume=volume_2) except Exception as e: self.fail("Failure: %s" % e) expectedCount = self.initialResourceCount + 15 # (5 + 10) response = matchResourceCount(self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, accountid=self.account.id) self.assertEqual(response[0], PASS, response[1]) try: # Detaching and deleting volume 1 self.virtualMachine.detach_volume(apiclient, volume=volume_1) volume_1.delete(apiclient) except Exception as e: self.fail("Failure while volume operation: %s" % e) expectedCount -= 5 #After deleting first volume response = matchResourceCount(self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, accountid=self.account.id) self.assertEqual(response[0], PASS, response[1]) try: # Detaching and deleting volume 2 self.virtualMachine.detach_volume(apiclient, volume=volume_2) volume_2.delete(apiclient) except Exception as e: self.fail("Failure while volume operation: %s" % e) expectedCount -= 10 response = matchResourceCount(self.apiclient, expectedCount, RESOURCE_PRIMARY_STORAGE, accountid=self.account.id) self.assertEqual(response[0], PASS, response[1]) return