def test_04_vm_glid_to_another_storage(self): vm = VirtualMachine.create(self.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=self.zone.id, templateid=self.template.id, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, rootdisksize=10) volumes = list_volumes(self.apiclient, virtualmachineid=vm.id) for v in volumes: name = v.path.split("/")[3] try: sp_volume = self.spapi.volumeList(volumeName="~" + name) except spapi.ApiError as err: raise Exception(err) vm.stop(self.apiclient, forced=True) migrated_vm = self.migrate_vm(vm, self.primary_storage2) volumes = list_volumes(self.apiclient, virtualmachineid=migrated_vm.id) for v in volumes: name = v.path.split("/")[3] try: sp_volume = self.spapi.volumeList(volumeName="~" + name) except spapi.ApiError as err: raise Exception(err) self.assertEqual( v.storageid, self.primary_storage.id, "Did not migrate virtual machine from NFS to StorPool") self._cleanup.append(vm)
def test_25_vc_policy_attach_vol_global_id_vm_uuid(self): tag = Tag.create(self.apiclient, resourceIds=self.virtual_machine4.id, resourceType='UserVm', tags={'vc-policy': 'testing_vc-policy'}) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine4.id) vm_tags = vm[0].tags volumes = list_volumes( self.apiclient, virtualmachineid=self.virtual_machine4.id, ) self.assertTrue(len(volumes) == 1, "Volume length should be == 1") for v in volumes: self.helper.vc_policy_tags_global_id(v, vm_tags, False) volume = Volume.create( self.apiclient, {"diskname": "StorPoolDisk-GlId-%d" % random.randint(0, 100)}, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) self.virtual_machine4.attach_volume(self.apiclient, volume) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine4.id) vm_tags = vm[0].tags volumes = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine4.id, id=volume.id) self.assertTrue(len(volumes) == 1, "Volume length should be == 1") self.helper.vc_policy_tags_global_id(volumes[0], vm_tags, False) self._cleanup.append(volume)
def test_10_attach_detach_instances_with_glId(self): volume = Volume.create( self.apiclient, {"diskname":"StorPoolDisk-GlId-%d" % random.randint(0, 100) }, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) vm = VirtualMachine.create( self.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=self.zone.id, templateid=self.template.id, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, rootdisksize=10 ) vm.attach_volume(self.apiclient, volume) list = list_volumes(self.apiclient,virtualmachineid = vm.id, id = volume.id) list_root = list_volumes(self.apiclient,virtualmachineid = vm.id, type = "ROOT") self.assertIsNotNone(list, "Volume was not attached") self.assertIsNotNone(list_root, "ROOT volume is missing") self.helper.storpool_volume_globalid(list[0]) self.helper.storpool_volume_globalid(list_root[0]) vm.stop(self.apiclient, forced=True) detached = vm.detach_volume(self.apiclient, list[0]) self.assertIsNone(detached.virtualmachineid, "Volume was not detached from vm") Volume.delete(volume, self.apiclient) vm.delete(self.apiclient, expunge=True)
def test_CLOUDSTACK_6181_stoppedvm_root_resize(self): """ @Desc: Test root volume resize of stopped VM @Reference: https://issues.apache.org/jira/browse/CLOUDSTACK-6181 @Steps: Step1: Deploy VM in stopped state (startvm=false), resize via 'resizeVolume', start VM. Root is new size. """ # Check whether usage server is running or not if self.hypervisor.lower() != 'kvm': self.skipTest("Test can be run only on KVM hypervisor") # deploy virtual machine in stopped state self.services["virtual_machine"]["zoneid"] = self.zone.id self.services["virtual_machine"]["template"] = self.template.id # Step3: Verifying that VM creation is successful virtual_machine = VirtualMachine.create( self.apiClient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False) self.cleanup.append(virtual_machine) # Verify VM state self.assertEqual(virtual_machine.state, 'Stopped', "Check VM state is Stopped or not") volumes = list_volumes(self.apiClient, virtualmachineid=virtual_machine.id, type='ROOT', listall=True) self.assertIsNotNone(volumes, "root volume is not returned properly") newrootsize = (self.template.size >> 30) + 2 cmd = resizeVolume.resizeVolumeCmd() cmd.id = volumes[0].id cmd.size = newrootsize self.apiClient.resizeVolume(cmd) virtual_machine.start(self.apiClient) volumes_after_resize = list_volumes( self.apiClient, virtualmachineid=virtual_machine.id, type='ROOT', listall=True) rootvolume = volumes_after_resize[0] success = False if rootvolume is not None and rootvolume.size == (newrootsize << 30): success = True self.assertEqual(success, True, "Check if the root volume resized appropriately") return
def test_01_verify_events_table(self): """ Test events table # 1. Deploy a VM. # 2. Take VM snapshot. # 3. Verify that events table records UUID of the volume in descrption instead of volume ID """ # Step 1 # Create VM vm = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id, ) volumes_list = list_volumes(self.apiclient, virtualmachineid=vm.id, type='ROOT', listall=True) volume_list_validation = validateList(volumes_list) self.assertEqual( volume_list_validation[0], PASS, "volume list validation failed due to %s" % volume_list_validation[2]) root_volume = volumes_list[0] # Step 2 # Create snapshot of root volume snapshot = Snapshot.create(self.apiclient, root_volume.id) snapshots_list = Snapshot.list(self.userapiclient, id=snapshot.id) status = validateList(snapshots_list) self.assertEqual(status[0], PASS, "Snapshots List Validation Failed") self.assertEqual(snapshot.state, "BackedUp", "Check if snapshot gets created properly") # Step 3 qresultset = self.dbclient.execute( "select description from event where type='SNAPSHOT.CREATE' AND \ description like '%%%s%%'" % root_volume.id) event_validation_result = validateList(qresultset) self.assertEqual( event_validation_result[0], PASS, "event list validation failed due to %s" % event_validation_result[2]) self.assertNotEqual( len(qresultset), 0, "Check if events table records UUID of the volume") return
def test_05_snapshot_events(self): """Test snapshot events """ # Validate the following # 1. Perform snapshot on the root disk of this VM and check the events/alerts. # 2. delete the snapshots and check the events/alerts # 3. listEvents() shows created/deleted snapshot events # Get the Root disk of VM volumes = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine.id, type="ROOT", listall=True) self.assertEqual(isinstance(volumes, list), True, "Check list response returns a valid list") volume = volumes[0] # Create a snapshot from the ROOTDISK snapshot = Snapshot.create(self.apiclient, volume.id) self.debug("Snapshot created with ID: %s" % snapshot.id) snapshots = list_snapshots(self.apiclient, id=snapshot.id) self.assertEqual(isinstance(snapshots, list), True, "Check list response returns a valid list") self.assertNotEqual(snapshots, None, "Check if result exists in list snapshots call") self.assertEqual(snapshots[0].id, snapshot.id, "Check snapshot id in list resources call") snapshot.delete(self.apiclient) # Sleep to ensure that snapshot is deleted properly time.sleep(self.services["sleep"]) events = list_events( self.apiclient, account=self.account.name, domainid=self.account.domainid, type="SNAPSHOT.DELETE" ) self.assertEqual(isinstance(events, list), True, "Check list response returns a valid list") self.assertNotEqual(events, None, "Check if event exists in list events call") self.assertIn(events[0].state, ["Completed", "Scheduled"], "Check events state in list events call") return
def test_04_delete_snapshot(self): """Test Delete Snapshot """ # 1. Snapshot the Volume # 2. Delete the snapshot # 3. Verify snapshot is removed by calling List Snapshots API # 4. Verify snapshot was removed from image store self.debug("Creating volume under account: %s" % self.account.name) volume = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id, ) self.debug("Created volume: %s" % volume.id) self.debug("Attaching volume to vm: %s" % self.virtual_machine.id) self.virtual_machine.attach_volume(self.apiclient, volume) self.debug("Volume attached to vm") volumes = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine.id, type="DATADISK", id=volume.id) self.assertEqual(isinstance(volumes, list), True, "Check list response returns a valid list") snapshot = Snapshot.create( self.apiclient, volumes[0].id, account=self.account.name, domainid=self.account.domainid ) snapshot.delete(self.apiclient) snapshots = list_snapshots(self.apiclient, id=snapshot.id) self.assertEqual(snapshots, None, "Check if result exists in list item call") self.assertFalse(is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id)) return
def _create_and_test_snapshot(self, cs_vol_id, primary_storage_db_id, expected_num_snapshots, err_mesg): vol_snap = Snapshot.create(self.apiClient, volume_id=cs_vol_id) list_volumes_response = list_volumes(self.apiClient, id=cs_vol_id) cs_volume = list_volumes_response[0] dt_volume = self._get_dt_volume_for_cs_volume(cs_volume) dt_snapshots = self._get_native_snapshots_for_dt_volume(dt_volume) self._check_list(dt_snapshots, expected_num_snapshots, err_mesg) dt_snapshot = self._most_recent_dt_snapshot(dt_snapshots) vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap) snapshot_details = self._get_snapshot_details(vol_snap_db_id) dt_volume_id = self._get_app_instance_name_from_cs_volume(cs_volume) dt_snapshot_id = dt_volume_id + ':' + dt_snapshot['timestamp'] self._check_snapshot_details(snapshot_details, vol_snap_db_id, dt_volume_id, dt_snapshot_id, primary_storage_db_id) return vol_snap
def test_05_set_vcpolicy_tag_with_admin_and_try_delete_with_user(self): ''' Test set vc-policy tag to VM with one attached disk ''' tag = Tag.create(self.apiclient, resourceIds=self.virtual_machine.id, resourceType='UserVm', tags={'vc-policy': 'testing_vc-policy'}) self.debug( '######################### test_05_set_vcpolicy_tag_with_admin_and_try_delete_with_user tags ######################### ' ) vm = list_virtual_machines(self.userapiclient, id=self.virtual_machine.id) vm_tags = vm[0].tags volumes = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine.id, listall=True) self.debug( '######################### test_01_set_vcpolicy_tag_to_vm_with_attached_disks tags ######################### ' ) self.vc_policy_tags(volumes, vm_tags, vm) try: Tag.delete(self.userapiclient, resourceIds=self.virtual_machine.id, resourceType='UserVm', tags={'vc-policy': 'testing_vc-policy'}) except Exception as e: self.debug( "##################### test_05_set_vcpolicy_tag_with_admin_and_try_delete_with_user %s " % e)
def test_02_set_vcpolicy_tag_to_attached_disk(self): """ Test set vc-policy tag to new disk attached to VM""" volume_attached = self.virtual_machine.attach_volume( self.apiclient, self.volume_2) volume = list_volumes(self.apiclient, id=volume_attached.id, listall=True) name = volume[0].path.split("/")[3] sp_volume = self.spapi.volumeList(volumeName="~" + name) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine.id, listall=True) vm_tags = vm[0].tags for vm_tag in vm_tags: for sp_tag in sp_volume[0].tags: if sp_tag == vm_tag.key: self.assertEqual( sp_tag, vm_tag.key, "StorPool tag is not the same as the Virtual Machine tag" ) self.assertEqual( sp_volume[0].tags[sp_tag], vm_tag.value, "StorPool tag value is not the same as the Virtual Machine tag value" ) if sp_tag == 'cvm': self.assertEqual(sp_volume[0].tags[sp_tag], vm[0].id, "cvm tag is not the expected value")
def test_08_vcpolicy_tag_to_reverted_disk(self): tag = Tag.create(self.apiclient, resourceIds=self.virtual_machine2.id, resourceType='UserVm', tags={'vc-policy': 'testing_vc-policy'}) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine2.id, listall=True) vm_tags = vm[0].tags volume = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine2.id, listall=True, type="ROOT") self.vc_policy_tags(volume, vm_tags, vm) snapshot = Snapshot.create(self.apiclient, volume[0].id, account=self.account.name, domainid=self.account.domainid) virtual_machine = self.virtual_machine2.stop(self.apiclient, forced=True) cmd = revertSnapshot.revertSnapshotCmd() cmd.id = snapshot.id revertedn = self.apiclient.revertSnapshot(cmd) vm = list_virtual_machines(self.apiclient, id=self.virtual_machine2.id) vm_tags = vm[0].tags vol = list_volumes(self.apiclient, id=snapshot.volumeid, listall=True) self.vc_policy_tags(vol, vm_tags, vm)
def _create_vm_using_template_and_destroy_vm(self, template): vm_name = "VM-%d" % random.randint(0, 100) virtual_machine_dict = {"name": vm_name, "displayname": vm_name} virtual_machine = VirtualMachine.create( self.apiClient, virtual_machine_dict, accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering.id, templateid=template.id, domainid=self.domain.id, startvm=True ) list_volumes_response = list_volumes( self.apiClient, virtualmachineid=virtual_machine.id, listall=True ) vm_root_volume = list_volumes_response[0] virtual_machine.delete(self.apiClient, True)
def test_08_migrate_vm_live_with_snapshots_on_remote(self): """ Create snapshots on all the volumes, Migrate all the volumes and VM. """ global vm2 # Get ROOT Volume vol_for_snap = list_volumes( self.apiclient, virtualmachineid=vm2.id, listall=True) for vol in vol_for_snap: snapshot = Snapshot.create( self.apiclient, volume_id=vol.id ) globalId = self.storpool_snapshot_globalid(snapshot) sn = sptypes.SnapshotUpdateDesc(rename = snapshot.id) rename = self.spapi.snapshotUpdate(snapshotName = globalId, json = sn) snapshot.validateState( self.apiclient, snapshotstate="backedup", ) self._cleanup.append(snapshot) # Migrate all volumes and VMs destinationHost, vol_list = self.get_destination_pools_hosts(vm2) vm2 = self.migrateVm(self.virtual_machine_on_remote, destinationHost)
def test_06_migrate_live_remote(self): """ Migrate VMs/Volumes live """ global vm2 volumes = list_volumes(self.apiclient, virtualmachineid = self.virtual_machine_on_remote.id) for v in volumes: vol = sptypes.VolumeUpdateDesc(rename = v.id) name = v.path.split("/")[3] rename = self.spapi.volumeUpdate(volumeName = "~" + name, json = vol) destinationHost = self.getDestinationHost(self.virtual_machine_on_remote.hostid, self.host_remote) # Migrate the VM vm2 = self.migrateVm(self.virtual_machine_on_remote, destinationHost) # self.check_files(vm,destinationHost) """ Migrate the VM and ROOT volume """ # Get all volumes to be migrated destinationHost, vol_list = self.get_destination_pools_hosts(vm2) vm2 = self.migrateVm(self.virtual_machine_on_remote, destinationHost)
def test_02_migrate_vm_from_ceph_to_storpool_live(self): """ Migrate VMs/Volumes live """ self.storage_pool = StoragePool.update(self.apiclient, id=self.storage_pool.id, tags=["ssd, ceph"]) random_data = self.writeToFile(self.vm2) cmd = listHosts.listHostsCmd() cmd.type = "Routing" cmd.state = "Up" cmd.zoneid = self.zone.id hosts = self.apiclient.listHosts(cmd) destinationHost = self.helper.getDestinationHost( self.vm2.hostid, hosts) vol_pool_map = {} volumes = list_volumes(self.apiclient, virtualmachineid=self.vm2.id, listall=True) for v in volumes: vol_pool_map[v.id] = self.storage_pool.id # Migrate the vm2 print(vol_pool_map) vm2 = self.vm2.migrate_vm_with_volume(self.apiclient, hostid=destinationHost.id, migrateto=vol_pool_map) self.checkFileAndContentExists(self.vm2, random_data) self.storage_pool = StoragePool.update(self.apiclient, id=self.storage_pool.id, tags=["ssd"])
def test_12_migrate_vm_live_with_snapshots_on_remote(self): """ Create snapshots on all the volumes, Migrate all the volumes and VM. """ global vm2 # Get ROOT Volume vol_for_snap = list_volumes( self.apiclient, virtualmachineid=vm2.id, listall=True) for vol in vol_for_snap: snapshot = Snapshot.create( self.apiclient, volume_id=vol.id ) snapshot.validateState( self.apiclient, snapshotstate="backedup", ) # Migrate all volumes and VMs destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote) for v in vol_list: self.helper.check_storpool_volume_iops(self.spapi, v) vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_2, destinationHost) destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm2, self.host_remote) for v in vol_list: self.helper.check_storpool_volume_iops(self.spapi, v)
def test_07_snapshot_to_template_bypass_secondary(self): ''' Create template from snapshot bypassing secondary storage ''' ##cls.virtual_machine volume = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine.id) snapshot = Snapshot.create(self.apiclient, volume_id=volume[0].id) backup_config = list_configurations(self.apiclient, name="sp.bypass.secondary.storage") if (backup_config[0].value == "false"): backup_config = Configurations.update( self.apiclient, name="sp.bypass.secondary.storage", value="true") self.assertIsNotNone(snapshot, "Could not create snapshot") self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot") template = self.create_template_from_snapshot(self.apiclient, self.services, snapshotid=snapshot.id) virtual_machine = VirtualMachine.create( self.apiclient, {"name": "StorPool-%d" % random.randint(0, 100)}, zoneid=self.zone.id, templateid=template.id, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, rootdisksize=10) ssh_client = virtual_machine.get_ssh_client() self.assertIsNotNone(template, "Template is None") self.assertIsInstance(template, Template, "Template is instance of template") self._cleanup.append(snapshot) self._cleanup.append(template)
def test_15_snapshot_root_vol_glid(self): vm = VirtualMachine.create(self.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=self.zone.id, templateid=self.template.id, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, rootdisksize=10) list = list_volumes(self.apiclient, virtualmachineid=vm.id, type="ROOT") self.assertIsNotNone(list, "Could not find ROOT volume") self.helper.storpool_volume_globalid(list[0]) snapshot = Snapshot.create( self.apiclient, volume_id=list[0].id, ) self.assertIsNotNone(snapshot, "Could not create snapshot") self.assertIsInstance(snapshot, Snapshot, "Created snapshot is not instance of Snapshot") self.helper.storpool_snapshot_globalid(snapshot) self._cleanup.append(vm) self._cleanup.append(snapshot)
def test_02_snapshot_data_disk(self): """Test Snapshot Data Disk """ volume = list_volumes( self.apiclient, virtualmachineid=self.virtual_machine_with_disk.id, type='DATADISK', listall=True) self.assertEqual(isinstance(volume, list), True, "Check list response returns a valid list") self.debug("Creating a Snapshot from data volume: %s" % volume[0].id) snapshot = Snapshot.create(self.apiclient, volume[0].id, account=self.account.name, domainid=self.account.domainid) snapshots = list_snapshots(self.apiclient, id=snapshot.id) self.assertEqual(isinstance(snapshots, list), True, "Check list response returns a valid list") self.assertNotEqual(snapshots, None, "Check if result exists in list item call") self.assertEqual(snapshots[0].id, snapshot.id, "Check resource id in list resources call") self.assertTrue( is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id)) return
def test_01_create_vm_on_new_primary_storage(self): ''' Test create Virtual machine on new StorPool's primary storage ''' virtual_machine = VirtualMachine.create( self.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=self.zone.id, templateid=self.template.id, serviceofferingid=self.serviceOfferings.id, hypervisor=self.hypervisor, rootdisksize=10) volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine.id, type="ROOT") volume = volume[0] name = volume.path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].templateName != self.template_name: raise Exception( "Storpool volume's template %s is not with the same template %s" % (spvolume[0].templateName, self.template_name)) except spapi.ApiError as err: raise Exception(err) virtual_machine.delete(self.apiclient, expunge=True)
def test_13_snapshot_detached_vol_with_glid(self): volume = Volume.create( self.apiclient, {"diskname": "StorPoolDisk-GlId-%d" % random.randint(0, 100)}, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) self.virtual_machine3.start(self.apiclient) self.virtual_machine3.attach_volume(self.apiclient, volume) list = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine3.id, id=volume.id) self.assertIsNotNone(list, "Volume was not attached") self.helper.storpool_volume_globalid(list[0]) self.virtual_machine3.stop(self.apiclient, forced=True) snapshot = Snapshot.create( self.apiclient, volume_id=volume.id, ) self.assertIsNotNone(snapshot, "Could not create snapshot") self.helper.storpool_snapshot_globalid(snapshot) self._cleanup.append(volume) self._cleanup.append(snapshot)
def test_04_snapshot_volume_bypass_secondary(self): ''' Test snapshot bypassing secondary ''' Configurations.update(self.apiclient, name = "sp.bypass.secondary.storage", value = "true") volume = list_volumes( self.apiclient, virtualmachineid = self.virtual_machine.id, type = "ROOT", listall = True, ) snapshot = Snapshot.create( self.apiclient, volume_id = volume[0].id, account=self.account.name, domainid=self.account.domainid, ) try: cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd() cmd.snapshotid = snapshot.id snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd) flag = False for s in snapshot_details: if s["snapshotDetailsName"] == snapshot.id: name = s["snapshotDetailsValue"].split("/")[3] sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name) flag = True self.debug('################ %s' % sp_snapshot) if flag == False: raise Exception("Could not find snapshot in snapshot details") except spapi.ApiError as err: raise Exception(err) self.assertIsNotNone(snapshot, "Could not create snapshot")
def test_08_migrate_vm_live_with_snapshots_on_remote(self): """ Create snapshots on all the volumes, Migrate all the volumes and VM. """ global vm2 # Get ROOT Volume vol_for_snap = list_volumes(self.apiclient, virtualmachineid=vm2.id, listall=True) for vol in vol_for_snap: snapshot = Snapshot.create( self.apiclient, volume_id=vol.id, account=self.account.name, domainid=self.account.domainid, ) snapshot.validateState( self.apiclient, snapshotstate="backedup", ) # Migrate all volumes and VMs destinationHost, vol_list = self.helper.get_destination_pools_hosts( self.apiclient, vm2, self.host_remote) vm2 = self.helper.migrateVm(self.apiclient, self.virtual_machine_on_remote, destinationHost)
def test_12_move_across_subdomain_vm_volumes(self): """Test as domain admin, stop a VM from subdomain1 and attempt to move it to subdomain2 """ # Validate the following: # 1. deploy VM in sub subdomain1 with volumes. # 3. assignVirtualMachine to subdomain2 userapiclient = self.testClient.getUserApiClient( UserName=self.sdomain_account_user1['account'].name, DomainName=self.sdomain_account_user1['domain'].name, type=2) self.create_vm(self.sdomain_account_user1['account'], self.sdomain_account_user1['domain'], volume=self.sdomain_account_user1['volume']) self.assertRaises(Exception, self.virtual_machine.assign_virtual_machine, userapiclient, self.sdomain_account_user2['account'].name, self.sdomain_account_user2['domain'].id) # Check all volumes attached to same VM list_volume_response = list_volumes( self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', listall=True) self.assertEqual(isinstance(list_volume_response, list), True, "Check list volumes response for valid list") self.assertNotEqual(list_volume_response[0].domainid, self.sdomain_account_user2['domain'].id, "Volume ownership not changed.") self.virtual_machine.detach_volume( self.apiclient, self.sdomain_account_user1['volume'])
def _resize_volume(self, volume, new_disk_offering): cmd = resizeVolume.resizeVolumeCmd() cmd.id = self.volume.id cmd.diskofferingid = new_disk_offering.id self.apiClient.resizeVolume(cmd) do_size_bytes = int(new_disk_offering.disksize * (1024**3)) retries = 3 success = False while retries > 0: retries -= 1 list_volumes_response = list_volumes(self.apiClient, id=volume.id) for vol in list_volumes_response: if vol.id == volume.id and \ int(vol.size) == do_size_bytes and \ vol.state == 'Ready': success = True if success: break else: time.sleep(10) self.assertEqual(success, True, self._volume_resize_err)
def get_destination_pools_hosts(self, apiclient, vm, hosts): vol_list = list_volumes(apiclient, virtualmachineid=vm.id, listall=True) # Get destination host destinationHost = self.getDestinationHost(vm.hostid, hosts) return destinationHost, vol_list
def test_08_attach_detach_vol_glId(self): vm = VirtualMachine.create(self.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=self.zone.id, templateid=self.template.id, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, rootdisksize=10) vm.attach_volume(self.apiclient, self.volume5) #volume is created with UUID, but after DB update, has to be with it's globalId volume = Volume.list(self.apiclient, id=self.volume5.id) self.helper.storpool_volume_globalid(volume[0]) list = list_volumes(self.apiclient, virtualmachineid=vm.id, id=self.volume5.id) self.assertIsNotNone(list, "Volume was not attached") vm.stop(self.apiclient, forced=True) detached = vm.detach_volume(self.apiclient, self.volume5) self.assertIsNone(detached.virtualmachineid, "Volume was not detached from vm") vm.delete(self.apiclient, expunge=True)
def _check_volume_state(api_client, volume_id, volume_state): volume = list_volumes(api_client, id=volume_id, listall=True)[0] if str(volume.state).lower() == volume_state.lower(): return True, "" return False, "The volume is not in the '" + volume_state + "' state. State = " + str( volume.state)
def test_01_snapshot_root_disk(self): """Test Snapshot Root Disk """ # Validate the following # 1. listSnapshots should list the snapshot that was created. # 2. verify that secondary storage NFS share contains # the reqd volume under # /secondary/snapshots//$account_id/$volumeid/$snapshot_uuid # 3. verify backup_snap_id was non null in the `snapshots` table # 4. Verify that zoneid is returned in listSnapshots API response volumes = list_volumes( self.apiclient, virtualmachineid=self.virtual_machine_with_disk.id, type='ROOT', listall=True) snapshot = Snapshot.create(self.apiclient, volumes[0].id, account=self.account.name, domainid=self.account.domainid) self.cleanup.append(snapshot) self.debug("Snapshot created: ID - %s" % snapshot.id) snapshots = list_snapshots(self.apiclient, id=snapshot.id) self.assertEqual(isinstance(snapshots, list), True, "Check list response returns a valid list") self.assertNotEqual(snapshots, None, "Check if result exists in list item call") self.assertEqual(snapshots[0].id, snapshot.id, "Check resource id in list resources call") self.assertIsNotNone(snapshots[0].zoneid, "Zone id is not none in listSnapshots") self.assertEqual(snapshots[0].zoneid, self.zone.id, "Check zone id in the list snapshots") self.debug( "select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" % str(snapshot.id)) qresultset = self.dbclient.execute( "select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" % str(snapshot.id)) self.assertNotEqual(len(qresultset), 0, "Check DB Query result set") qresult = qresultset[0] snapshot_uuid = qresult[0] # backup_snap_id = snapshot UUID self.assertNotEqual(str(snapshot_uuid), 'NULL', "Check if backup_snap_id is not null") self.assertTrue( is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id)) return
def test_01_storage_migrate_root_and_data_disks(self): src_host, dest_host = self._get_source_and_dest_hosts() virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering_1.id, templateid=self.template.id, domainid=self.domain.id, hostid=src_host.id, startvm=True) self.cleanup.append(virtual_machine) cs_root_volume = list_volumes(self.apiClient, listall=True, virtualmachineid=virtual_machine.id)[0] sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, self.primary_storage.id, self, TestVMMigrationWithStorage. _sf_account_id_should_be_non_zero_int_err_msg) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_root_volume = sf_util.check_and_get_sf_volume( sf_volumes, cs_root_volume.name, self) cs_data_volume = Volume.create(self.apiClient, self.testdata[TestData.volume_1], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering_1.id) self.cleanup.append(cs_data_volume) cs_data_volume = virtual_machine.attach_volume(self.apiClient, cs_data_volume) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_data_volume = sf_util.check_and_get_sf_volume( sf_volumes, cs_data_volume.name, self) sf_root_volume, sf_data_volume = self._migrate_and_verify( virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, sf_root_volume, sf_data_volume, self.xen_session_1, self.xen_session_2) src_host, dest_host = dest_host, src_host self._migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, sf_root_volume, sf_data_volume, self.xen_session_2, self.xen_session_1)
def setUpClass(cls): cls.testClient = super(TestAccountSnapshotClean, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls.hypervisor = cls.testClient.getHypervisorInfo() if cls.hypervisor.lower() in ['lxc']: raise unittest.SkipTest("snapshots are not supported on %s" % cls.hypervisor.lower()) template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"]) cls.services["server"]["zoneid"] = cls.zone.id cls.services["template"] = template.id cls._cleanup = [] try: # Create VMs, NAT Rules etc cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id) cls.services["account"] = cls.account.name if cls.zone.localstorageenabled: cls.services["service_offering"]["storagetype"] = "local" cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"]) cls.virtual_machine = VirtualMachine.create( cls.api_client, cls.services["server"], templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id) # Get the Root disk of VM volumes = list_volumes(cls.api_client, virtualmachineid=cls.virtual_machine.id, type='ROOT', listall=True) volume = volumes[0] # Create a snapshot from the ROOTDISK cls.snapshot = Snapshot.create(cls.api_client, volume.id) except Exception as e: cls.tearDownClass() unittest.SkipTest("setupClass fails for %s" % cls.__name__) raise e return
def _get_only_volume(self, virtual_machine_id): list_volumes_response = list_volumes( self.apiClient, virtualmachineid=virtual_machine_id, listall=True) sf_util.check_list( list_volumes_response, 1, self, TestOnlineStorageMigration. _should_only_be_one_volume_in_list_err_msg) return list_volumes_response[0]
def test_04_snapshot_limit(self): """Test snapshot limit in snapshot policies """ # Validate the following # 1. Perform hourly recurring snapshot on the root disk of VM and keep # the maxsnapshots as 1 # 2. listSnapshots should list the snapshot that was created # snapshot folder in secondary storage should contain only one # snapshot image(/secondary/snapshots/$accountid/$volumeid/) # Get the Root disk of VM volumes = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine.id, type="ROOT", listall=True) self.assertEqual(isinstance(volumes, list), True, "Check list response returns a valid list") volume = volumes[0] # Create a snapshot policy recurring_snapshot = SnapshotPolicy.create(self.apiclient, volume.id, self.services["recurring_snapshot"]) self.cleanup.append(recurring_snapshot) snapshot_policy = list_snapshot_policy(self.apiclient, id=recurring_snapshot.id, volumeid=volume.id) self.assertEqual(isinstance(snapshot_policy, list), True, "Check list response returns a valid list") self.assertNotEqual(snapshot_policy, None, "Check if result exists in list item call") self.assertEqual( snapshot_policy[0].id, recurring_snapshot.id, "Check recurring snapshot id in list resources call" ) self.assertEqual( snapshot_policy[0].maxsnaps, self.services["recurring_snapshot"]["maxsnaps"], "Check interval type in list resources call", ) # Sleep for (maxsnaps+1) hours to verify # only maxsnaps snapshots are retained time.sleep((int(self.services["recurring_snapshot"]["maxsnaps"]) + 1) * 3600) # Verify the snapshot was created or not snapshots = list_snapshots( self.apiclient, volumeid=volume.id, intervaltype=self.services["recurring_snapshot"]["intervaltype"], snapshottype="RECURRING", listall=True, ) self.assertEqual(isinstance(snapshots, list), True, "Check list response returns a valid list") self.assertEqual( len(snapshots), self.services["recurring_snapshot"]["maxsnaps"], "Check maximum number of recurring snapshots retained", ) snapshot = snapshots[0] # Sleep to ensure that snapshot is reflected in sec storage time.sleep(self.services["sleep"]) self.assertTrue(is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id)) return
def _check_and_get_cs_volume(self, volume_id, volume_name): list_volumes_response = list_volumes(self.apiClient, id=volume_id) sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg) cs_volume = list_volumes_response[0] self._check_volume(cs_volume, volume_name) return cs_volume
def test_05_use_private_template_in_project(self): """Test use of private template in a project """ # 1. Create a project # 2. Verify that in order to use somebody's Private template for vm # creation in the project, permission to use the template has to # be granted to the Project (use API 'updateTemplatePermissions' # with project id to achieve that). try: self.debug("Deploying VM for with public template: %s" % self.template.id) virtual_machine_1 = VirtualMachine.create( self.apiclient, self.services["server"], templateid=self.template.id, serviceofferingid=self.service_offering.id, projectid=self.project.id, ) self.cleanup.append(virtual_machine_1) # Verify VM state self.assertEqual(virtual_machine_1.state, "Running", "Check VM state is Running or not") virtual_machine_1.stop(self.apiclient) # Get the Root disk of VM volumes = list_volumes(self.apiclient, projectid=self.project.id, type="ROOT", listall=True) self.assertEqual(isinstance(volumes, list), True, "Check for list volume response return valid data") volume = volumes[0] self.debug("Creating template from volume: %s" % volume.id) # Create a template from the ROOTDISK template_1 = Template.create(self.userapiclient, self.services["template"], volumeid=volume.id) self.cleanup.append(template_1) # Verify Template state self.assertEqual(template_1.isready, True, "Check Template is in ready state or not") # Update template permissions to grant permission to project self.debug( "Updating template permissions:%s to grant access to project: %s" % (template_1.id, self.project.id) ) template_1.updatePermissions(self.apiclient, op="add", projectids=self.project.id) self.debug("Deploying VM for with privileged template: %s" % self.template.id) virtual_machine_2 = VirtualMachine.create( self.apiclient, self.services["server"], templateid=template_1.id, serviceofferingid=self.service_offering.id, projectid=self.project.id, ) self.cleanup.append(virtual_machine_2) # Verify VM state self.assertEqual(virtual_machine_2.state, "Running", "Check VM state is Running or not") except Exception as e: self.fail("Exception occured: %s" % e) return
def test_01_check_revert_snapshot(self): """ Test revert snapshot on XenServer # 1. Deploy a VM. # 2. Take VM snapshot. # 3. Verify that volume snapshot fails with error can not create volume snapshot for VM with VM-snapshot """ # Step 1 vm = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id, ) volumes_cluster_list = list_volumes( self.apiclient, virtualmachineid=vm.id, type='ROOT', listall=True ) volume_list_validation = validateList(volumes_cluster_list) self.assertEqual( volume_list_validation[0], PASS, "Event list validation failed due to %s" % volume_list_validation[2] ) root_volume = volumes_cluster_list[0] #Step 2 vm_snap = VmSnapshot.create(self.apiclient, vm.id) self.assertEqual( vm_snap.state, "Ready", "Check the snapshot of vm is ready!" ) #Step 3 with self.assertRaises(Exception): Snapshot.create( self.apiclient, root_volume.id) return
def _check_volume_state(api_client, volume_id, volume_state): volume = list_volumes( api_client, id=volume_id, listall=True )[0] if str(volume.state).lower() == volume_state.lower(): return True, "" return False, "The volume is not in the '" + volume_state + "' state. State = " + str(volume.state)
def test_01_storage_migrate_root_and_data_disks(self): src_host, dest_host = self._get_source_and_dest_hosts() virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering_1.id, templateid=self.template.id, domainid=self.domain.id, hostid=src_host.id, startvm=True ) self.cleanup.append(virtual_machine) cs_root_volume = list_volumes(self.apiClient, listall=True, virtualmachineid=virtual_machine.id)[0] sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self) cs_data_volume = Volume.create( self.apiClient, self.testdata[TestData.volume_1], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering_1.id ) self.cleanup.append(cs_data_volume) cs_data_volume = virtual_machine.attach_volume( self.apiClient, cs_data_volume ) sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self) sf_root_volume, sf_data_volume = self._migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, sf_root_volume, sf_data_volume, self.xen_session_1, self.xen_session_2) src_host, dest_host = dest_host, src_host self._migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, sf_root_volume, sf_data_volume, self.xen_session_2, self.xen_session_1)
def test_02_snapshot_data_disk(self): """Test Snapshot Data Disk """ if self.hypervisor.lower() in ['hyperv']: self.skipTest("Snapshots feature is not supported on Hyper-V") volume = list_volumes( self.apiclient, virtualmachineid=self.virtual_machine_with_disk.id, type='DATADISK', listall=True ) self.assertEqual( isinstance(volume, list), True, "Check list response returns a valid list" ) self.debug("Creating a Snapshot from data volume: %s" % volume[0].id) snapshot = Snapshot.create( self.apiclient, volume[0].id, account=self.account.name, domainid=self.account.domainid ) snapshots = list_snapshots( self.apiclient, id=snapshot.id ) self.assertEqual( isinstance(snapshots, list), True, "Check list response returns a valid list" ) self.assertNotEqual( snapshots, None, "Check if result exists in list item call" ) self.assertEqual( snapshots[0].id, snapshot.id, "Check resource id in list resources call" ) self.assertTrue( is_snapshot_on_nfs( self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id)) return
def check_and_get_cs_volume(obj_test, volume_id, volume_name, obj_assert): list_volumes_response = list_volumes( obj_test.apiClient, id=volume_id ) check_list(list_volumes_response, 1, obj_assert, "There should only be one volume in this list.") cs_volume = list_volumes_response[0] check_volume(obj_test, cs_volume, volume_name, obj_assert) return cs_volume
def test_05_snapshots_per_project(self): """Test Snapshot limit per project """ # Validate the following # 1. set max no of snapshots per project to 1. # 2. Create one snapshot in the project. Snapshot should be # successfully created # 5. Try to create another snapshot in this project. It should give # user an appropriate error and an alert should be generated. if self.hypervisor.lower() in ["hyperv"]: raise self.skipTest("Snapshots feature is not supported on Hyper-V") self.debug("Updating snapshot resource limits for project: %s" % self.project.id) # Set usage_vm=1 for Account 1 update_resource_limit(self.apiclient, 3, max=1, projectid=self.project.id) # Snapshot self.debug("Deploying VM for account: %s" % self.account.name) virtual_machine_1 = VirtualMachine.create( self.apiclient, self.services["server"], templateid=self.template.id, serviceofferingid=self.service_offering.id, projectid=self.project.id, ) self.cleanup.append(virtual_machine_1) # Verify VM state self.assertEqual(virtual_machine_1.state, "Running", "Check VM state is Running or not") # Get the Root disk of VM volumes = list_volumes( self.apiclient, virtualmachineid=virtual_machine_1.id, projectid=self.project.id, type="ROOT" ) self.assertEqual(isinstance(volumes, list), True, "Check for list volume response return valid data") self.debug("Creating snapshot from volume: %s" % volumes[0].id) # Create a snapshot from the ROOTDISK snapshot_1 = Snapshot.create(self.apiclient, volumes[0].id, projectid=self.project.id) self.cleanup.append(snapshot_1) # list snapshots snapshots = list_snapshots(self.apiclient, projectid=self.project.id) self.debug("snapshots list: %s" % snapshots) self.assertEqual(validateList(snapshots)[0], PASS, "Snapshots list validation failed") self.assertEqual(len(snapshots), 1, "Snapshots list should have exactly one entity") # Exception should be raised for second snapshot with self.assertRaises(Exception): Snapshot.create(self.apiclient, volumes[0].id, projectid=self.project.id) return
def test_01_snapshot_data_disk(self): """Test Snapshot Data Disk """ volume = list_volumes( self.apiclient, virtualmachineid=self.virtual_machine_with_disk.id, type='DATADISK', listall=True ) self.assertEqual( isinstance(volume, list), True, "Check list response returns a valid list" ) self.debug("Creating a Snapshot from data volume: %s" % volume[0].id) snapshot = Snapshot.create( self.apiclient, volume[0].id, account=self.account.name, domainid=self.account.domainid, asyncbackup=True ) snapshots = list_snapshots( self.apiclient, id=snapshot.id ) self.assertEqual( isinstance(snapshots, list), True, "Check list response returns a valid list" ) self.assertNotEqual( snapshots, None, "Check if result exists in list item call" ) self.assertEqual( snapshots[0].id, snapshot.id, "Check resource id in list resources call" ) self.assertEqual( snapshot.state, "BackingUp", "Check resource state in list resources call" ) return
def _validate_storage(self, storage, vm): list_volumes_response = list_volumes( self.apiClient, virtualmachineid=vm.id, listall=True) self.assertNotEqual( list_volumes_response, None, "'list_volumes_response' should not be equal to 'None'.") for volume in list_volumes_response: if volume.type.upper() == "ROOT": volumeData = volume self.assertEqual(volume.storage, storage.name, "Volume not created for VM " + str(vm.id)) #Verify in cloudstack storage_pools_response = list_storage_pools( self.apiClient, id=storage.id) self.assertEqual( int(volumeData.size), int(storage_pools_response[0].disksizeused), "Allocated disk sizes are not same in volumes and primary stoarge") #Verify in datera datera_primarystorage_name = "cloudstack-" + storage.id for instance in self.datera_api.app_instances.list(): if instance['name'] == datera_primarystorage_name: app_instance_response = instance app_instance_response_disk = ( app_instance_response['storage_instances'] ['storage-1']['volumes']['volume-1'] ['capacity_in_use'] * 1073741824) self.assertEqual( int(volumeData.size), int(app_instance_response_disk), "App instance usage size is incorrect") #Verify in xen server for key, value in self.xen_session.xenapi.SR.get_all_records().items(): if value['name_description'] == storage.id: xen_server_response = value self.assertEqual( int(xen_server_response['physical_utilisation']), int(volumeData.size), "Allocated disk sizes is incorrect in xenserver")
def test_06_create_snapshots_in_project(self): """Test create snapshots in project """ # Validate the following # 1. Create a project # 2. Add some snapshots to the project # 3. Verify snapshot created inside project can only be used in inside # the project self.debug("Deploying VM for Project: %s" % self.project.id) virtual_machine_1 = VirtualMachine.create( self.apiclient, self.services["server"], templateid=self.template.id, serviceofferingid=self.service_offering.id, projectid=self.project.id, ) self.cleanup.append(virtual_machine_1) # Verify VM state self.assertEqual(virtual_machine_1.state, "Running", "Check VM state is Running or not") # Get the Root disk of VM volumes = list_volumes(self.apiclient, projectid=self.project.id, type="ROOT", listall=True) self.assertEqual(isinstance(volumes, list), True, "Check for list volume response return valid data") self.debug("Creating snapshot from volume: %s" % volumes[0].id) # Create a snapshot from the ROOTDISK snapshot = Snapshot.create(self.apiclient, volumes[0].id, projectid=self.project.id) self.cleanup.append(snapshot) # Verify Snapshot state self.assertEqual( snapshot.state in ["BackedUp", "CreatedOnPrimary", "Allocated"], True, "Check Snapshot state is in one of the mentioned possible states, \ It is currently: %s" % snapshot.state, ) snapshots = Snapshot.list(self.apiclient, account=self.account.name, domainid=self.account.domainid) self.assertEqual(snapshots, None, "Snapshots should not be available outside the project") return
def test_04_public_template_use_in_project(self): """Test Templates creation in projects """ # 1. Create a project # 2. Verify Public templates can be used without any restriction # 3. Verify that template created in project can be used in project # without any restrictions try: self.debug("Deploying VM for with public template: %s" % self.template.id) virtual_machine_1 = VirtualMachine.create( self.apiclient, self.services["server"], templateid=self.template.id, serviceofferingid=self.service_offering.id, projectid=self.project.id, ) self.cleanup.append(virtual_machine_1) # Verify VM state self.assertEqual(virtual_machine_1.state, "Running", "Check VM state is Running or not") virtual_machine_1.stop(self.apiclient) # Get the Root disk of VM volumes = list_volumes(self.apiclient, projectid=self.project.id, type="ROOT", listall=True) self.assertEqual(isinstance(volumes, list), True, "Check for list volume response return valid data") volume = volumes[0] self.debug("Creating template from volume: %s" % volume.id) # Create a template from the ROOTDISK template_1 = Template.create( self.apiclient, self.services["template"], volumeid=volume.id, projectid=self.project.id ) self.cleanup.append(template_1) # Verify Template state self.assertEqual(template_1.isready, True, "Check Template is in ready state or not") except Exception as e: self.fail("Exception occured: %s" % e) return
def create_vm(self, account, domain, isRunning=False, project =None, limit =None, pfrule =False, lbrule =None, natrule =None, volume =None, snapshot =False): #TODO: Implemnt pfrule/lbrule/natrule self.debug("Deploying instance in the account: %s" % account.name) self.virtual_machine = VirtualMachine.create(self.apiclient, self.services["virtual_machine"], accountid=account.name, domainid=domain.id, serviceofferingid=self.service_offering.id, mode=self.zone.networktype if pfrule else 'basic', projectid=project.id if project else None) self.debug("Deployed instance in account: %s" % account.name) list_virtual_machines(self.apiclient, id=self.virtual_machine.id) if snapshot: volumes = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True) self.snapshot = Snapshot.create(self.apiclient, volumes[0].id, account=account.name, domainid=account.domainid) if volume: self.virtual_machine.attach_volume(self.apiclient, volume) if not isRunning: self.virtual_machine.stop(self.apiclient) self.cleanup.append(self.virtual_machine)
def test_12_move_across_subdomain_vm_volumes(self): """Test as domain admin, stop a VM from subdomain1 and attempt to move it to subdomain2 """ # Validate the following: # 1. deploy VM in sub subdomain1 with volumes. # 3. assignVirtualMachine to subdomain2 userapiclient = self.testClient.getUserApiClient(UserName=self.sdomain_account_user1['account'].name, DomainName=self.sdomain_account_user1['domain'].name, type=2) self.create_vm(self.sdomain_account_user1['account'], self.sdomain_account_user1['domain'],volume=self.sdomain_account_user1['volume']) self.assertRaises(Exception, self.virtual_machine.assign_virtual_machine, userapiclient, self.sdomain_account_user2['account'].name ,self.sdomain_account_user2['domain'].id) # Check all volumes attached to same VM list_volume_response = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', listall=True) self.assertEqual(isinstance(list_volume_response, list), True, "Check list volumes response for valid list") self.assertNotEqual(list_volume_response[0].domainid, self.sdomain_account_user2['domain'].id, "Volume ownership not changed.") self.virtual_machine.detach_volume(self.apiclient, self.sdomain_account_user1['volume'])
def _get_root_volume(self, vm): list_volumes_response = list_volumes( self.apiClient, virtualmachineid=vm.id, listall=True ) self.assertNotEqual( list_volumes_response, None, "'list_volumes_response' should not be equal to 'None'." ) self.assertEqual( len(list_volumes_response) > 0, True, "'len(list_volumes_response)' should be greater than 0." ) for volume in list_volumes_response: if volume.type.upper() == "ROOT": return volume self.assert_(False, "Unable to locate the ROOT volume of the VM with the following ID: " + str(vm.id))
def test_02_take_VM_snapshot_with_data_disk(self): self.virtual_machine.start(self.apiClient) data_volume = Volume.create( self.apiClient, self.testdata[TestData.volume_1], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering.id ) self.cleanup = [data_volume] self.virtual_machine.attach_volume(self.apiClient, data_volume) root_volumes = list_volumes(self.apiClient, type="ROOT", listAll="true") self._check_list(root_volumes, 1, TestVMSnapshots._should_only_be_one_root_volume_err_msg) root_volume = root_volumes[0] root_volume_id = {'volumeid': root_volume.id} sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(root_volume_id) sf_iscsi_root_volume_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName'] self._check_iscsi_name(sf_iscsi_root_volume_name) root_volume_path_1 = self._get_path(root_volume_id) data_volumes = list_volumes(self.apiClient, type="DATADISK", listAll="true") self._check_list(data_volumes, 1, "There should only be one data volume.") data_volume = data_volumes[0] data_volume_id = {'volumeid': data_volume.id} sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(data_volume_id) sf_iscsi_data_volume_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName'] self._check_iscsi_name(sf_iscsi_data_volume_name) data_volume_path_1 = self._get_path(data_volume_id) ####################################### ####################################### # STEP 1: Take snapshot of running VM # ####################################### ####################################### vm_snapshot = VmSnapshot.create( self.apiClient, vmid=self.virtual_machine.id, snapshotmemory="false", name="Test Snapshot", description="Test Snapshot Desc" ) list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true") self._verify_vm_snapshot(list_vm_snapshots, vm_snapshot) root_volume_path_2 = self._get_path(root_volume_id) self.assertEqual( root_volume_path_1, root_volume_path_2, TestVMSnapshots._path_should_not_have_changed_err_msg ) data_volume_path_2 = self._get_path(data_volume_id) self.assertEqual( data_volume_path_1, data_volume_path_2, TestVMSnapshots._path_should_not_have_changed_err_msg ) root_volume_xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sf_iscsi_root_volume_name)[0] root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr) self._check_list(root_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg) root_volume_vdis_after_create = self._get_vdis(root_volume_xen_vdis) vdiSnapshotOf = self.xen_session.xenapi.VDI.get_record(root_volume_vdis_after_create.snapshot_vdi["snapshot_of"]) self.assertEqual( vdiSnapshotOf["uuid"], root_volume_vdis_after_create.active_vdi["uuid"], TestVMSnapshots._snapshot_parent_not_correct_err_msg ) data_volume_xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sf_iscsi_data_volume_name)[0] data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr) self._check_list(data_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg) data_volume_vdis_after_create = self._get_vdis(data_volume_xen_vdis) vdiSnapshotOf = self.xen_session.xenapi.VDI.get_record(data_volume_vdis_after_create.snapshot_vdi["snapshot_of"]) self.assertEqual( vdiSnapshotOf["uuid"], data_volume_vdis_after_create.active_vdi["uuid"], TestVMSnapshots._snapshot_parent_not_correct_err_msg ) ####################################### ####################################### ### STEP 2: Revert VM to Snapshot ### ####################################### ####################################### self.virtual_machine.stop(self.apiClient) VmSnapshot.revertToSnapshot(self.apiClient, vmsnapshotid=vm_snapshot.id) list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true") self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg) root_volume_path_3 = self._get_path(root_volume_id) self.assertNotEqual( root_volume_path_1, root_volume_path_3, TestVMSnapshots._path_should_have_changed_err_msg ) root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr) self._check_list(root_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg) root_volume_vdis_after_revert = self._get_vdis(root_volume_xen_vdis) self.assertNotEqual( root_volume_vdis_after_create.active_vdi["uuid"], root_volume_vdis_after_revert.active_vdi["uuid"], TestVMSnapshots._active_vdis_should_not_be_the_same_err_msg ) self.assertEqual( root_volume_vdis_after_create.snapshot_vdi["uuid"], root_volume_vdis_after_revert.snapshot_vdi["uuid"], TestVMSnapshots._snapshot_vdis_should_be_the_same_err_msg ) self.assertEqual( root_volume_vdis_after_create.base_vdi["uuid"], root_volume_vdis_after_revert.base_vdi["uuid"], TestVMSnapshots._base_vdis_should_be_the_same_err_msg ) data_volume_path_3 = self._get_path(data_volume_id) self.assertNotEqual( data_volume_path_1, data_volume_path_3, TestVMSnapshots._path_should_have_changed_err_msg ) data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr) self._check_list(data_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg) data_volume_vdis_after_revert = self._get_vdis(data_volume_xen_vdis) self.assertNotEqual( data_volume_vdis_after_create.active_vdi["uuid"], data_volume_vdis_after_revert.active_vdi["uuid"], TestVMSnapshots._active_vdis_should_not_be_the_same_err_msg ) self.assertEqual( data_volume_vdis_after_create.snapshot_vdi["uuid"], data_volume_vdis_after_revert.snapshot_vdi["uuid"], TestVMSnapshots._snapshot_vdis_should_be_the_same_err_msg ) self.assertEqual( data_volume_vdis_after_create.base_vdi["uuid"], data_volume_vdis_after_revert.base_vdi["uuid"], TestVMSnapshots._base_vdis_should_be_the_same_err_msg ) ####################################### ####################################### ##### STEP 3: Delete VM snapshot ##### ####################################### ####################################### VmSnapshot.deleteVMSnapshot(self.apiClient, vmsnapshotid=vm_snapshot.id) list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true") self.assertEqual( list_vm_snapshots, None, TestVMSnapshots._should_be_no_vm_snapshots_err_msg ) root_volume_path_4 = self._get_path(root_volume_id) self.assertEqual( root_volume_path_3, root_volume_path_4, TestVMSnapshots._path_should_not_have_changed_err_msg ) root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr) self._check_list(root_volume_xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg) root_volume_vdis_after_delete = self._get_vdis(root_volume_xen_vdis, True) self.assertEqual( root_volume_vdis_after_revert.active_vdi["uuid"], root_volume_vdis_after_delete.active_vdi["uuid"], TestVMSnapshots._active_vdis_should_be_the_same_err_msg ) data_volume_path_4 = self._get_path(data_volume_id) self.assertEqual( data_volume_path_3, data_volume_path_4, TestVMSnapshots._path_should_not_have_changed_err_msg ) data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr) self._check_list(data_volume_xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg) data_volume_vdis_after_delete = self._get_vdis(data_volume_xen_vdis, True) self.assertEqual( data_volume_vdis_after_revert.active_vdi["uuid"], data_volume_vdis_after_delete.active_vdi["uuid"], TestVMSnapshots._active_vdis_should_be_the_same_err_msg ) ####################################### ####################################### ##### STEP 4: Start VM ##### ####################################### ####################################### self.virtual_machine.detach_volume(self.apiClient, data_volume) self.virtual_machine.start(self.apiClient)
def test_01_take_VM_snapshot(self): self.virtual_machine.start(self.apiClient) root_volumes = list_volumes(self.apiClient, type="ROOT", listAll="true") self._check_list(root_volumes, 1, TestVMSnapshots._should_only_be_one_root_volume_err_msg) root_volume = root_volumes[0] volume_id = {'volumeid': root_volume.id} sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(volume_id) sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName'] self._check_iscsi_name(sf_iscsi_name) root_volume_path_1 = self._get_path(volume_id) ####################################### ####################################### # STEP 1: Take snapshot of running VM # ####################################### ####################################### vm_snapshot = VmSnapshot.create( self.apiClient, vmid=self.virtual_machine.id, snapshotmemory="false", name="Test Snapshot", description="Test Snapshot Desc" ) list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true") self._verify_vm_snapshot(list_vm_snapshots, vm_snapshot) root_volume_path_2 = self._get_path(volume_id) self.assertEqual( root_volume_path_1, root_volume_path_2, TestVMSnapshots._path_should_not_have_changed_err_msg ) xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sf_iscsi_name)[0] xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr) self._check_list(xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg) vdis_after_create = self._get_vdis(xen_vdis) vdiSnapshotOf = self.xen_session.xenapi.VDI.get_record(vdis_after_create.snapshot_vdi["snapshot_of"]) self.assertEqual( vdiSnapshotOf["uuid"], vdis_after_create.active_vdi["uuid"], TestVMSnapshots._snapshot_parent_not_correct_err_msg ) ####################################### ####################################### ### STEP 2: Revert VM to Snapshot ### ####################################### ####################################### self.virtual_machine.stop(self.apiClient) VmSnapshot.revertToSnapshot(self.apiClient, vmsnapshotid=vm_snapshot.id) list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true") self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg) root_volume_path_3 = self._get_path(volume_id) self.assertNotEqual( root_volume_path_1, root_volume_path_3, TestVMSnapshots._path_should_have_changed_err_msg ) xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr) self._check_list(xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg) vdis_after_revert = self._get_vdis(xen_vdis) self.assertNotEqual( vdis_after_create.active_vdi["uuid"], vdis_after_revert.active_vdi["uuid"], TestVMSnapshots._active_vdis_should_not_be_the_same_err_msg ) self.assertEqual( vdis_after_create.snapshot_vdi["uuid"], vdis_after_revert.snapshot_vdi["uuid"], TestVMSnapshots._snapshot_vdis_should_be_the_same_err_msg ) self.assertEqual( vdis_after_create.base_vdi["uuid"], vdis_after_revert.base_vdi["uuid"], TestVMSnapshots._base_vdis_should_be_the_same_err_msg ) ####################################### ####################################### ##### STEP 3: Delete VM snapshot ##### ####################################### ####################################### VmSnapshot.deleteVMSnapshot(self.apiClient, vmsnapshotid=vm_snapshot.id) list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true") self.assertEqual( list_vm_snapshots, None, TestVMSnapshots._should_be_no_vm_snapshots_err_msg ) root_volume_path_4 = self._get_path(volume_id) self.assertEqual( root_volume_path_3, root_volume_path_4, TestVMSnapshots._path_should_not_have_changed_err_msg ) xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr) self._check_list(xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg) vdis_after_delete = self._get_vdis(xen_vdis, True) self.assertEqual( vdis_after_revert.active_vdi["uuid"], vdis_after_delete.active_vdi["uuid"], TestVMSnapshots._active_vdis_should_be_the_same_err_msg ) ####################################### ####################################### ##### STEP 4: Start VM ##### ####################################### ####################################### self.virtual_machine.start(self.apiClient)
def tearDown(self): try: root_volume = list_volumes( self.apiclient, virtualmachineid=self.vm_1.id, type='ROOT', listall=True ) self.vm_1.stop(self.apiclient) snaps = [] for i in range(2): root_vol_snap = Snapshot.create( self.apiclient, root_volume[0].id) self.assertEqual( root_vol_snap.state, "BackedUp", "Check if the data vol snapshot state is correct " ) snaps.append(root_vol_snap) for snap in snaps: self.assertNotEqual( self.dbclient.execute( "select status from snapshots where name='%s'" % snap.name), "Destroyed" ) for snap in snaps: self.assertTrue( is_snapshot_on_nfs( self.apiclient, self.dbclient, self.config, self.zone.id, snap.id)) self.account.delete(self.apiclient) for snap in snaps: self.assertEqual( self.dbclient.execute( "select status from snapshots where name='%s'" % snap.name)[0][0], "Destroyed" ) for snap in snaps: self.assertFalse( is_snapshot_on_nfs( self.apiclient, self.dbclient, self.config, self.zone.id, snap.id)) cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return
def setUpClass(cls): cls.testClient = super(TestVMPasswordEnabled, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services # Get Zone, Domain and templates domain = get_domain(cls.api_client) zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = zone.networktype template = get_template( cls.api_client, zone.id, cls.services["ostype"] ) # Set Zones and disk offerings cls.services["small"]["zoneid"] = zone.id cls.services["small"]["template"] = template.id # Create VMs, NAT Rules etc cls.account = Account.create( cls.api_client, cls.services["account"], domainid=domain.id ) cls.small_offering = ServiceOffering.create( cls.api_client, cls.services["service_offerings"]["small"] ) cls.virtual_machine = VirtualMachine.create( cls.api_client, cls.services["small"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.small_offering.id, mode=cls.services["mode"] ) networkid = cls.virtual_machine.nic[0].networkid cls.hypervisor = cls.testClient.getHypervisorInfo() # create egress rule to allow wget of my cloud-set-guest-password # script if zone.networktype.lower() == 'advanced': EgressFireWallRule.create( cls.api_client, networkid=networkid, protocol=cls.services["egress"]["protocol"], startport=cls.services["egress"]["startport"], endport=cls.services["egress"]["endport"], cidrlist=cls.services["egress"]["cidrlist"]) cls.virtual_machine.password = cls.services["small"]["password"] ssh = cls.virtual_machine.get_ssh_client() # below steps are required to get the new password from VR # (reset password) # http://cloudstack.org/dl/cloud-set-guest-password # Copy this file to /etc/init.d # chmod +x /etc/init.d/cloud-set-guest-password # chkconfig --add cloud-set-guest-password cmds = [ "cd /etc/init.d;wget http://people.apache.org/~tsp/cloud-set-guest-password", "chmod +x /etc/init.d/cloud-set-guest-password", "chkconfig --add cloud-set-guest-password", ] for c in cmds: ssh.execute(c) # Adding delay of 120 sec to avoid data loss due to timing issue time.sleep(120) # Stop virtual machine cls.virtual_machine.stop(cls.api_client) # Poll listVM to ensure VM is stopped properly timeout = cls.services["timeout"] while True: time.sleep(cls.services["sleep"]) # Ensure that VM is in stopped state list_vm_response = list_virtual_machines( cls.api_client, id=cls.virtual_machine.id ) if isinstance(list_vm_response, list): vm = list_vm_response[0] if vm.state == 'Stopped': break if timeout == 0: raise Exception( "Failed to stop VM (ID: %s) " % vm.id) timeout = timeout - 1 list_volume = list_volumes( cls.api_client, virtualmachineid=cls.virtual_machine.id, type='ROOT', listall=True ) if isinstance(list_volume, list): cls.volume = list_volume[0] else: raise Exception( "Exception: Unable to find root volume for VM: %s" % cls.virtual_machine.id) cls.services["template"]["ostype"] = cls.services["ostype"] cls.services["template"]["ispublic"] = True # Create templates for Edit, Delete & update permissions testcases cls.pw_enabled_template = Template.create( cls.api_client, cls.services["template"], cls.volume.id, ) # Delete the VM - No longer needed cls.virtual_machine.delete(cls.api_client, expunge=True) cls.services["small"]["template"] = cls.pw_enabled_template.id cls.vm = VirtualMachine.create( cls.api_client, cls.services["small"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.small_offering.id, mode=cls.services["mode"] ) cls._cleanup = [ cls.small_offering, cls.pw_enabled_template, cls.account ]
def _get_updated_cs_volume(self, cs_volume_id): return list_volumes(self.apiClient, listall=True, id=cs_volume_id)[0]
def test_02_host_maintenance_mode_with_activities(self): """Test host maintenance mode with activities """ # Validate the following # 1. Create Vms. Acquire IP. Create port forwarding & load balancing # rules for Vms. # 2. While activities are ongoing: Create snapshots, recurring # snapshots, create templates, download volumes, Host 1: put to # maintenance mode. All Vms should failover to Host 2 in cluster # Vms should be in running state. All port forwarding rules and # load balancing Rules should work. # 3. After failover to Host 2 succeeds, deploy Vms. Deploy Vms on host # 2 should succeed. All ongoing activities in step 3 should succeed # 4. Host 1: cancel maintenance mode. # 5. While activities are ongoing: Create snapshots, recurring # snapshots, create templates, download volumes, Host 2: put to # maintenance mode. All Vms should failover to Host 1 in cluster. # 6. After failover to Host 1 succeeds, deploy VMs. Deploy Vms on # host 1 should succeed. All ongoing activities in step 6 should # succeed. hosts = Host.list( self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing' ) self.assertEqual( isinstance(hosts, list), True, "List hosts should return valid host response" ) if len(hosts) < 2: self.skipTest("There must be at least 2 hosts present in cluster") self.debug("Checking HA with hosts: %s, %s" % ( hosts[0].name, hosts[1].name )) self.debug("Deploying VM in account: %s" % self.account.name) # Spawn an instance in that network virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id ) vms = VirtualMachine.list( self.apiclient, id=virtual_machine.id, listall=True ) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" ) self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" ) vm = vms[0] self.debug("Deployed VM on host: %s" % vm.hostid) self.assertEqual( vm.state, "Running", "Deployed VM should be in RUnning state" ) networks = Network.list( self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True ) self.assertEqual( isinstance(networks, list), True, "List networks should return valid list for the account" ) network = networks[0] self.debug("Associating public IP for account: %s" % self.account.name) public_ip = PublicIPAddress.create( self.apiclient, accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, networkid=network.id ) self.debug("Associated %s with network %s" % ( public_ip.ipaddress.ipaddress, network.id )) self.debug("Creating PF rule for IP address: %s" % public_ip.ipaddress.ipaddress) NATRule.create( self.apiclient, virtual_machine, self.services["natrule"], ipaddressid=public_ip.ipaddress.id ) self.debug("Creating LB rule on IP with NAT: %s" % public_ip.ipaddress.ipaddress) # Create Load Balancer rule on IP already having NAT rule lb_rule = LoadBalancerRule.create( self.apiclient, self.services["lbrule"], ipaddressid=public_ip.ipaddress.id, accountid=self.account.name ) self.debug("Created LB rule with ID: %s" % lb_rule.id) # Should be able to SSH VM try: self.debug("SSH into VM: %s" % virtual_machine.id) virtual_machine.get_ssh_client( ipaddress=public_ip.ipaddress.ipaddress) except Exception as e: self.fail("SSH Access failed for %s: %s" % (virtual_machine.ipaddress, e) ) # Get the Root disk of VM volumes = list_volumes( self.apiclient, virtualmachineid=virtual_machine.id, type='ROOT', listall=True ) volume = volumes[0] self.debug( "Root volume of VM(%s): %s" % ( virtual_machine.name, volume.name )) # Create a snapshot from the ROOTDISK self.debug("Creating snapshot on ROOT volume: %s" % volume.name) snapshot = Snapshot.create(self.apiclient, volumes[0].id) self.debug("Snapshot created: ID - %s" % snapshot.id) snapshots = list_snapshots( self.apiclient, id=snapshot.id, listall=True ) self.assertEqual( isinstance(snapshots, list), True, "Check list response returns a valid list" ) self.assertNotEqual( snapshots, None, "Check if result exists in list snapshots call" ) self.assertEqual( snapshots[0].id, snapshot.id, "Check snapshot id in list resources call" ) # Generate template from the snapshot self.debug("Generating template from snapshot: %s" % snapshot.name) template = Template.create_from_snapshot( self.apiclient, snapshot, self.services["templates"] ) self.debug("Created template from snapshot: %s" % template.id) templates = list_templates( self.apiclient, templatefilter=self.services["templates"]["templatefilter"], id=template.id ) self.assertEqual( isinstance(templates, list), True, "List template call should return the newly created template" ) self.assertEqual( templates[0].isready, True, "The newly created template should be in ready state" ) first_host = vm.hostid self.debug("Enabling maintenance mode for host %s" % vm.hostid) cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() cmd.id = first_host self.apiclient.prepareHostForMaintenance(cmd) self.debug("Waiting for SSVMs to come up") wait_for_ssvms( self.apiclient, zoneid=self.zone.id, podid=self.pod.id, ) timeout = self.services["timeout"] # Poll and check state of VM while it migrates from one host to another while True: vms = VirtualMachine.list( self.apiclient, id=virtual_machine.id, listall=True ) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" ) self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" ) vm = vms[0] self.debug("VM 1 state: %s" % vm.state) if vm.state in ["Stopping", "Stopped", "Running", "Starting", "Migrating"]: if vm.state == "Running": break else: time.sleep(self.services["sleep"]) timeout = timeout - 1 else: self.fail( "VM migration from one-host-to-other failed\ while enabling maintenance" ) second_host = vm.hostid self.assertEqual( vm.state, "Running", "VM should be in Running state after enabling host maintenance" ) # Should be able to SSH VM try: self.debug("SSH into VM: %s" % virtual_machine.id) virtual_machine.get_ssh_client( ipaddress=public_ip.ipaddress.ipaddress) except Exception as e: self.fail("SSH Access failed for %s: %s" % (virtual_machine.ipaddress, e) ) self.debug("Deploying VM in account: %s" % self.account.name) # Spawn an instance on other host virtual_machine_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id ) vms = VirtualMachine.list( self.apiclient, id=virtual_machine_2.id, listall=True ) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" ) self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" ) vm = vms[0] self.debug("Deployed VM on host: %s" % vm.hostid) self.debug("VM 2 state: %s" % vm.state) self.assertEqual( vm.state, "Running", "Deployed VM should be in Running state" ) self.debug("Canceling host maintenance for ID: %s" % first_host) cmd = cancelHostMaintenance.cancelHostMaintenanceCmd() cmd.id = first_host self.apiclient.cancelHostMaintenance(cmd) self.debug("Maintenance mode canceled for host: %s" % first_host) # Get the Root disk of VM volumes = list_volumes( self.apiclient, virtualmachineid=virtual_machine_2.id, type='ROOT', listall=True ) volume = volumes[0] self.debug( "Root volume of VM(%s): %s" % ( virtual_machine_2.name, volume.name )) # Create a snapshot from the ROOTDISK self.debug("Creating snapshot on ROOT volume: %s" % volume.name) snapshot = Snapshot.create(self.apiclient, volumes[0].id) self.debug("Snapshot created: ID - %s" % snapshot.id) snapshots = list_snapshots( self.apiclient, id=snapshot.id, listall=True ) self.assertEqual( isinstance(snapshots, list), True, "Check list response returns a valid list" ) self.assertNotEqual( snapshots, None, "Check if result exists in list snapshots call" ) self.assertEqual( snapshots[0].id, snapshot.id, "Check snapshot id in list resources call" ) # Generate template from the snapshot self.debug("Generating template from snapshot: %s" % snapshot.name) template = Template.create_from_snapshot( self.apiclient, snapshot, self.services["templates"] ) self.debug("Created template from snapshot: %s" % template.id) templates = list_templates( self.apiclient, templatefilter=self.services["templates"]["templatefilter"], id=template.id ) self.assertEqual( isinstance(templates, list), True, "List template call should return the newly created template" ) self.assertEqual( templates[0].isready, True, "The newly created template should be in ready state" ) self.debug("Enabling maintenance mode for host %s" % second_host) cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() cmd.id = second_host self.apiclient.prepareHostForMaintenance(cmd) self.debug("Maintenance mode enabled for host: %s" % second_host) self.debug("Waiting for SSVMs to come up") wait_for_ssvms( self.apiclient, zoneid=self.zone.id, podid=self.pod.id, ) # Poll and check the status of VMs timeout = self.services["timeout"] while True: vms = VirtualMachine.list( self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True ) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" ) self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" ) vm = vms[0] self.debug( "VM state after enabling maintenance on first host: %s" % vm.state) if vm.state in ["Stopping", "Stopped", "Running", "Starting", "Migrating"]: if vm.state == "Running": break else: time.sleep(self.services["sleep"]) timeout = timeout - 1 else: self.fail( "VM migration from one-host-to-other failed\ while enabling maintenance" ) # Poll and check the status of VMs timeout = self.services["timeout"] while True: vms = VirtualMachine.list( self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True ) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" ) self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" ) vm = vms[1] self.debug( "VM state after enabling maintenance on first host: %s" % vm.state) if vm.state in ["Stopping", "Stopped", "Running", "Starting", "Migrating"]: if vm.state == "Running": break else: time.sleep(self.services["sleep"]) timeout = timeout - 1 else: self.fail( "VM migration from one-host-to-other failed\ while enabling maintenance" ) for vm in vms: self.debug( "VM states after enabling maintenance mode on host: %s - %s" % (first_host, vm.state)) self.assertEqual( vm.state, "Running", "Deployed VM should be in Running state" ) # Spawn an instance on other host virtual_machine_3 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id ) vms = VirtualMachine.list( self.apiclient, id=virtual_machine_3.id, listall=True ) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" ) self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" ) vm = vms[0] self.debug("Deployed VM on host: %s" % vm.hostid) self.debug("VM 3 state: %s" % vm.state) self.assertEqual( vm.state, "Running", "Deployed VM should be in Running state" ) self.debug("Canceling host maintenance for ID: %s" % second_host) cmd = cancelHostMaintenance.cancelHostMaintenanceCmd() cmd.id = second_host self.apiclient.cancelHostMaintenance(cmd) self.debug("Maintenance mode canceled for host: %s" % second_host) self.debug("Waiting for SSVMs to come up") wait_for_ssvms( self.apiclient, zoneid=self.zone.id, podid=self.pod.id, ) return
def test_08_delete_volume_was_attached(self): """Delete volume that was attached to a VM and is detached now""" self.virtual_machine.start(self.apiClient) ####################################### ####################################### # STEP 1: Create vol and attach to VM # ####################################### ####################################### new_volume = Volume.create( self.apiClient, self.testdata[TestData.volume_2], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) volume_to_delete_later = new_volume self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) new_volume = self.virtual_machine.attach_volume(self.apiClient, new_volume) vol = self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) vm = self._get_vm(self.virtual_machine.id) self.assertEqual(vol.virtualmachineid, vm.id, "Check if attached to virtual machine") self.assertEqual(vm.state.lower(), "running", str(vm.state)) sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg, ) sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, new_volume, self) self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) sf_iscsi_name = sf_util.get_iqn(self.cs_api, new_volume, self) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) sf_util.check_vag(sf_volume, sf_vag_id, self) self._check_xen_sr(sf_iscsi_name) ####################################### ####################################### # STEP 2: Detach and delete volume # ####################################### ####################################### new_volume = self.virtual_machine.detach_volume(self.apiClient, new_volume) vol = self._check_and_get_cs_volume(new_volume.id, self.testdata[TestData.volume_2][TestData.diskName]) vm = self._get_vm(self.virtual_machine.id) self.assertEqual(vol.virtualmachineid, None, "Check if attached to virtual machine") self.assertEqual(vm.state.lower(), "running", str(vm.state)) sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) self.assertEqual(len(sf_volume["volumeAccessGroups"]), 0, TestVolumes._volume_should_not_be_in_a_vag) self._check_xen_sr(sf_iscsi_name, False) volume_to_delete_later.delete(self.apiClient) list_volumes_response = list_volumes(self.apiClient, id=new_volume.id) self.assertEqual(list_volumes_response, None, "Check volume was deleted") sf_volumes = self._get_active_sf_volumes(sf_account_id) sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self, False)
def setUpClass(cls): cls.testClient = super(TestAccountSnapshotClean, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.services = Services().services # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls.hypervisor = cls.testClient.getHypervisorInfo() if cls.hypervisor.lower() in ['lxc']: raise unittest.SkipTest("snapshots are not supported on %s" % cls.hypervisor.lower()) template = get_template( cls.api_client, cls.zone.id, cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id cls.services["template"] = template.id cls._cleanup = [] try: # Create VMs, NAT Rules etc cls.account = Account.create( cls.api_client, cls.services["account"], domainid=cls.domain.id ) cls.services["account"] = cls.account.name if cls.zone.localstorageenabled: cls.services["service_offering"]["storagetype"] = "local" cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"] ) cls.virtual_machine = VirtualMachine.create( cls.api_client, cls.services["server"], templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id ) # Get the Root disk of VM volumes = list_volumes( cls.api_client, virtualmachineid=cls.virtual_machine.id, type='ROOT', listall=True ) volume = volumes[0] # Create a snapshot from the ROOTDISK cls.snapshot = Snapshot.create(cls.api_client, volume.id) except Exception, e: cls.tearDownClass() unittest.SkipTest("setupClass fails for %s" % cls.__name__) raise e
def test_00_deploy_vm_root_resize(self): """Test deploy virtual machine with root resize # Validate the following: # 1. listVirtualMachines returns accurate information # 2. root disk has new size per listVolumes # 3. Rejects non-supported hypervisor types """ if(self.hypervisor.lower() == 'kvm'): newrootsize = (self.template.size >> 30) + 2 self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id, rootdisksize=newrootsize ) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s"\ % self.virtual_machine.id ) self.assertEqual( isinstance(list_vms, list), True, "List VM response was not a valid list" ) self.assertNotEqual( len(list_vms), 0, "List VM response was empty" ) vm = list_vms[0] self.assertEqual( vm.id, self.virtual_machine.id, "Virtual Machine ids do not match" ) self.assertEqual( vm.name, self.virtual_machine.name, "Virtual Machine names do not match" ) self.assertEqual( vm.state, "Running", msg="VM is not in Running state" ) # get root vol from created vm, verify it is correct size list_volume_response = list_volumes( self.apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True ) rootvolume = list_volume_response[0] success = False if rootvolume is not None and rootvolume.size == (newrootsize << 30): success = True self.assertEqual( success, True, "Check if the root volume resized appropriately" ) else: self.debug("hypervisor %s unsupported for test 00, verifying it errors properly" % self.hypervisor) newrootsize = (self.template.size >> 30) + 2 success = False try: self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id, rootdisksize=newrootsize ) except Exception as ex: if re.search("Hypervisor \S+ does not support rootdisksize override", str(ex)): success = True else: self.debug("virtual machine create did not fail appropriately. Error was actually : " + str(ex)); self.assertEqual(success, True, "Check if unsupported hypervisor %s fails appropriately" % self.hypervisor)
def setUpClass(cls): testClient = super(TestConcurrentSnapshots, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.testdata = testClient.getParsedTestDataConfig() cls.hypervisor = cls.testClient.getHypervisorInfo() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.template = get_template( cls.apiclient, cls.zone.id, cls.testdata["ostype"]) cls._cleanup = [] cls.vm_pool = [] cls.snapshotSupported = True if cls.hypervisor.lower() in ["hyperv", "lxc"]: cls.snapshotSupported = False return # Set sleep time as per Snapshot Recurring Policy - HOURLY cls.sleep_time_for_hourly_policy = 60 * 60 * 1 cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__ try: # Create an account cls.account = Account.create( cls.apiclient, cls.testdata["account"], domainid=cls.domain.id ) cls._cleanup.append(cls.account) # Create user api client of the account cls.userapiclient = testClient.getUserApiClient( UserName=cls.account.name, DomainName=cls.account.domain ) # Create Service offering cls.service_offering = ServiceOffering.create( cls.apiclient, cls.testdata["service_offering"], ) cls._cleanup.append(cls.service_offering) for i in range(4): cls.vm = VirtualMachine.create( cls.apiclient, cls.testdata["small"], templateid=cls.template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, zoneid=cls.zone.id, mode=cls.zone.networktype ) cls.vm_pool.append(cls.vm) cls._cleanup.append(cls.vm) cls.checksum_pool = [] cls.root_pool = [] cls.snapshot_pool = [] cls.rec_policy_pool = [] for vm in cls.vm_pool: root_volumes = list_volumes( cls.apiclient, virtualmachineid=vm.id, type='ROOT', listall=True ) checksum_root = createChecksum( cls.testdata, vm, root_volumes[0], "rootdiskdevice") cls.checksum_pool.append(checksum_root) cls.root_pool.append(root_volumes[0]) try: cls.pools = StoragePool.list(cls.apiclient, zoneid=cls.zone.id) except Exception as e: raise unittest.SkipTest(e) except Exception as e: cls.tearDownClass() raise e return
def test_01_recover_VM(self): """ Test Restore VM on VMWare 1. Deploy a VM without datadisk 2. Restore the VM 3. Verify that VM comes up in Running state """ try: self.pools = StoragePool.list( self.apiclient, zoneid=self.zone.id, scope="CLUSTER") status = validateList(self.pools) # Step 3 self.assertEqual( status[0], PASS, "Check: Failed to list cluster wide storage pools") if len(self.pools) < 2: self.skipTest("There must be at atleast two cluster wide\ storage pools available in the setup") except Exception as e: self.skipTest(e) # Adding tags to Storage Pools cluster_no = 1 StoragePool.update( self.apiclient, id=self.pools[0].id, tags=[CLUSTERTAG1[:-1] + repr(cluster_no)]) self.vm = VirtualMachine.create( self.apiclient, self.testdata["small"], accountid=self.account.name, templateid=self.template.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_cwps.id, zoneid=self.zone.id, ) # Step 2 volumes_root_list = list_volumes( self.apiclient, virtualmachineid=self.vm.id, type=ROOT, listall=True ) root_volume = volumes_root_list[0] # Restore VM till its ROOT disk is recreated on onother Primary Storage while True: self.vm.restore(self.apiclient) volumes_root_list = list_volumes( self.apiclient, virtualmachineid=self.vm.id, type=ROOT, listall=True ) root_volume = volumes_root_list[0] if root_volume.storage != self.pools[0].name: break # Step 3 vm_list = list_virtual_machines( self.apiclient, id=self.vm.id) state = vm_list[0].state i = 0 while(state != "Running"): vm_list = list_virtual_machines( self.apiclient, id=self.vm.id) time.sleep(10) i = i + 1 state = vm_list[0].state if i >= 10: self.fail("Restore VM Failed") break return
def test_01_volume_snapshot(self): """ Test Volume (root) Snapshot # 1. Deploy a VM on primary storage and . # 2. Take snapshot on root disk # 3. Verify the snapshot's entry in the "snapshots" table and presence of the corresponding snapshot on the Secondary Storage # 4. Create Template from the Snapshot and Deploy a VM using the Template # 5. Log in to the VM from template and make verify the contents of the ROOT disk matches with the snapshot. # 6. Delete Snapshot and Deploy a Linux VM from the Template and verify the successful deployment of the VM. # 7. Create multiple snapshots on the same volume and Check the integrity of all the snapshots by creating a template from the snapshot and deploying a Vm from it and delete one of the snapshots # 8. Verify that the original checksum matches with the checksum of VM's created from remaning snapshots # 9. Make verify the contents of the ROOT disk matches with the snapshot # 10.Verify that Snapshot of both DATA and ROOT volume should succeed when snapshot of Data disk of a VM is taken when snapshot of ROOT volume of VM is in progress # 11.Create snapshot of data disk and verify the original checksum matches with the volume created from snapshot # 12.Verify that volume's state should not change when snapshot of a DATA volume is taken that is attached to a VM # 13.Verify that volume's state should not change when snapshot of a DATA volume is taken that is not attached to a VM # 14.Verify that create Snapshot with quiescevm=True should succeed # 15.revertSnapshot() to revert VM to a specified Volume snapshot for root volume """ # Step 1 # Get ROOT Volume Id root_volumes_cluster_list = list_volumes( self.apiclient, virtualmachineid=self.vm_1.id, type='ROOT', listall=True ) root_volume_cluster = root_volumes_cluster_list[0] disk_volumes_cluster_list = list_volumes( self.apiclient, virtualmachineid=self.vm_1.id, type='DATADISK', listall=True ) data_disk = disk_volumes_cluster_list[0] root_vol_state = root_volume_cluster.state ckecksum_random_root_cluster = createChecksum( service=self.testdata, virtual_machine=self.vm_1, disk=root_volume_cluster, disk_type="rootdiskdevice") self.vm_1.stop(self.apiclient) root_vol_snap = Snapshot.create( self.apiclient, root_volume_cluster.id) self.assertEqual( root_vol_snap.state, "BackedUp", "Check if the snapshot state is correct " ) self.assertEqual( root_vol_state, root_volume_cluster.state, "Check if volume state has changed" ) self.vm_1.start(self.apiclient) # Step 2 snapshot_list = list_snapshots( self.apiclient, id=root_vol_snap.id ) self.assertNotEqual( snapshot_list, None, "Check if result exists in list item call" ) self.assertEqual( snapshot_list[0].id, root_vol_snap.id, "Check resource id in list resources call" ) self.assertTrue( is_snapshot_on_nfs( self.apiclient, self.dbclient, self.config, self.zone.id, root_vol_snap.id)) events = list_events( self.apiclient, account=self.account.name, domainid=self.account.domainid, type='SNAPSHOT.CREATE') event_list_validation_result = validateList(events) self.assertEqual( event_list_validation_result[0], PASS, "event list validation failed due to %s" % event_list_validation_result[2]) self.debug("Events list contains event SNAPSHOT.CREATE") qresultset = self.dbclient.execute( "select * from event where type='SNAPSHOT.CREATE' AND \ description like '%%%s%%' AND state='Completed';" % root_volume_cluster.id) event_validation_result = validateList(qresultset) self.assertEqual( event_validation_result[0], PASS, "event list validation failed due to %s" % event_validation_result[2]) self.assertNotEqual( len(qresultset), 0, "Check DB Query result set" ) qresult = str(qresultset) self.assertEqual( qresult.count('SNAPSHOT.CREATE') > 0, True, "Check SNAPSHOT.CREATE event in events table" ) #Usage_Event qresultset = self.dbclient.execute( "select * from usage_event where type='SNAPSHOT.CREATE' AND \ resource_name='%s'" % root_vol_snap.name) usage_event_validation_result = validateList(qresultset) self.assertEqual( usage_event_validation_result[0], PASS, "event list validation failed due to %s" % usage_event_validation_result[2]) self.assertNotEqual( len(qresultset), 0, "Check DB Query result set" ) self.assertEqual( self.dbclient.execute("select size from usage_event where type='SNAPSHOT.CREATE' AND \ resource_name='%s'" % root_vol_snap.name)[0][0], root_vol_snap.physicalsize) # Step 3 # create template from snapshot root_vol_snap templateFromSnapshot = Template.create_from_snapshot( self.apiclient, root_vol_snap, self.testdata["template_2"]) self.assertNotEqual( templateFromSnapshot, None, "Check if result exists in list item call" ) vm_from_temp = VirtualMachine.create( self.apiclient, self.testdata["small"], templateid=templateFromSnapshot.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id, mode=self.zone.networktype ) self.assertNotEqual( vm_from_temp, None, "Check if result exists in list item call" ) compareChecksum( self.apiclient, service=self.testdata, original_checksum=ckecksum_random_root_cluster, disk_type="rootdiskdevice", virt_machine=vm_from_temp ) vm_from_temp.delete(self.apiclient) # Step 4 root_vol_snap.delete(self.userapiclient) self.assertEqual( list_snapshots( self.apiclient, volumeid=root_volume_cluster.id, ), None, "Snapshot list should be empty") events = list_events( self.apiclient, account=self.account.name, domainid=self.account.domainid, type='SNAPSHOT.DELETE') event_list_validation_result = validateList(events) self.assertEqual( event_list_validation_result[0], PASS, "event list validation failed due to %s" % event_list_validation_result[2]) self.debug("Events list contains event SNAPSHOT.DELETE") self.debug("select id from account where uuid = '%s';" % self.account.id) qresultset = self.dbclient.execute( "select id from account where uuid = '%s';" % self.account.id ) account_validation_result = validateList(qresultset) self.assertEqual( account_validation_result[0], PASS, "event list validation failed due to %s" % account_validation_result[2]) self.assertNotEqual( len(qresultset), 0, "Check DB Query result set" ) qresult = qresultset[0] account_id = qresult[0] qresultset = self.dbclient.execute( "select * from event where type='SNAPSHOT.DELETE' AND \ account_id='%s' AND state='Completed';" % account_id) delete_snap_validation_result = validateList(qresultset) self.assertEqual( delete_snap_validation_result[0], PASS, "event list validation failed due to %s" % delete_snap_validation_result[2]) self.assertNotEqual( len(qresultset), 0, "Check DB Query result set" ) qresult = str(qresultset) self.assertEqual( qresult.count('SNAPSHOT.DELETE') > 0, True, "Check SNAPSHOT.DELETE event in events table" ) # Step 5 # delete snapshot and deploy vm from snapshot vm_from_temp_2 = VirtualMachine.create( self.apiclient, self.testdata["small"], templateid=templateFromSnapshot.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id, mode=self.zone.networktype ) self.assertNotEqual( vm_from_temp_2, None, "Check if result exists in list item call" ) # Step 6: compareChecksum( self.apiclient, service=self.testdata, original_checksum=ckecksum_random_root_cluster, disk_type="rootdiskdevice", virt_machine=vm_from_temp_2 ) vm_from_temp_2.delete(self.apiclient) # Step 7 # Multiple Snapshots self.vm_1.stop(self.apiclient) snaps = [] for i in range(2): root_vol_snap = Snapshot.create( self.apiclient, root_volume_cluster.id) self.assertEqual( root_vol_snap.state, "BackedUp", "Check if the data vol snapshot state is correct " ) snaps.append(root_vol_snap) templateFromSnapshot = Template.create_from_snapshot( self.apiclient, root_vol_snap, self.testdata["template_2"]) self.assertNotEqual( templateFromSnapshot, None, "Check if result exists in list item call" ) vm_from_temp = VirtualMachine.create( self.apiclient, self.testdata["small"], templateid=templateFromSnapshot.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id, mode=self.zone.networktype ) self.assertNotEqual( vm_from_temp, None, "Check if result exists in list item call" ) compareChecksum( self.apiclient, service=self.testdata, original_checksum=ckecksum_random_root_cluster, disk_type="rootdiskdevice", virt_machine=vm_from_temp ) vm_from_temp.delete(self.apiclient) templateFromSnapshot.delete(self.apiclient) self.vm_1.start(self.apiclient) delete_snap = snaps.pop(1) delete_snap.delete(self.apiclient) self.assertEqual( Snapshot.list( self.apiclient, id=delete_snap.id ), None, "Snapshot list should be empty") # Step 8 for snap in snaps: templateFromSnapshot = Template.create_from_snapshot( self.apiclient, snap, self.testdata["template_2"]) self.assertNotEqual( templateFromSnapshot, None, "Check if result exists in list item call" ) vm_from_temp = VirtualMachine.create( self.apiclient, self.testdata["small"], templateid=templateFromSnapshot.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id, mode=self.zone.networktype ) self.assertNotEqual( vm_from_temp, None, "Check if result exists in list item call" ) compareChecksum( self.apiclient, service=self.testdata, original_checksum=ckecksum_random_root_cluster, disk_type="rootdiskdevice", virt_machine=vm_from_temp ) templateFromSnapshot.delete(self.apiclient) vm_from_temp.delete(self.apiclient) for snap in snaps: snap.delete(self.apiclient) # Step 9 ckecksum_root_cluster = createChecksum( service=self.testdata, virtual_machine=self.vm_1, disk=root_volume_cluster, disk_type="rootdiskdevice") self.vm_1.stop(self.apiclient) root_vol_snap_2 = Snapshot.create( self.apiclient, root_volume_cluster.id) self.assertEqual( root_vol_snap_2.state, "BackedUp", "Check if the data vol snapshot state is correct " ) snap_list_validation_result = validateList(events) self.assertEqual( snap_list_validation_result[0], PASS, "snapshot list validation failed due to %s" % snap_list_validation_result[2]) self.assertNotEqual( snapshot_list, None, "Check if result exists in list item call" ) templateFromSnapshot = Template.create_from_snapshot( self.apiclient, root_vol_snap_2, self.testdata["template_2"]) self.debug( "create template event comlites with template %s name" % templateFromSnapshot.name) self.assertNotEqual( templateFromSnapshot, None, "Check if result exists in list item call" ) vm_from_temp_2 = VirtualMachine.create( self.apiclient, self.testdata["small"], templateid=templateFromSnapshot.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id, mode=self.zone.networktype ) self.assertNotEqual( vm_from_temp_2, None, "Check if result exists in list item call" ) compareChecksum( self.apiclient, service=self.testdata, original_checksum=ckecksum_root_cluster, disk_type="rootdiskdevice", virt_machine=vm_from_temp_2 ) vm_from_temp_2.delete(self.apiclient) # Step 10 # Take snapshot of Data disk of a VM , when snapshot of ROOT volume of # VM is in progress try: self.vm_1.stop(self.apiclient) t1 = Thread( target=Snapshot.create, args=( self.apiclient, root_volume_cluster.id )) t2 = Thread( target=Snapshot.create, args=( self.apiclient, data_disk.id )) t1.start() t2.start() t1.join() t2.join() except: self.debug("Error: unable to start thread") # Step 11 # Data Disk self.vm_1.start(self.apiclient) ckecksum_data_disk = createChecksum( service=self.testdata, virtual_machine=self.vm_1, disk=data_disk, disk_type="datadiskdevice_1") data_vol_state = data_disk.state self.vm_1.stop(self.apiclient) data_vol_snap = Snapshot.create( self.apiclient, data_disk.id) self.assertEqual( data_vol_snap.state, "BackedUp", "Check if the data vol snapshot state is correct " ) self.assertEqual( data_vol_state, data_disk.state, "Check if volume state has changed" ) data_snapshot_list = list_snapshots( self.apiclient, id=data_vol_snap.id ) self.assertNotEqual( data_snapshot_list, None, "Check if result exists in list item call" ) self.assertEqual( data_snapshot_list[0].id, data_vol_snap.id, "Check resource id in list resources call" ) self.assertTrue( is_snapshot_on_nfs( self.apiclient, self.dbclient, self.config, self.zone.id, data_vol_snap.id)) events = list_events( self.apiclient, account=self.account.name, domainid=self.account.domainid, type='SNAPSHOT.CREATE') event_list_validation_result = validateList(events) self.assertEqual( event_list_validation_result[0], PASS, "event list validation failed due to %s" % event_list_validation_result[2]) self.debug("Events list contains event SNAPSHOT.CREATE") volumeFromSnap = Volume.create_from_snapshot( self.apiclient, data_vol_snap.id, self.testdata["volume"], account=self.account.name, domainid=self.account.domainid, zoneid=self.zone.id ) self.assertTrue( is_snapshot_on_nfs( self.apiclient, self.dbclient, self.config, self.zone.id, data_vol_snap.id)) new_vm = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, zoneid=self.zone.id, mode=self.zone.networktype ) new_vm.attach_volume( self.apiclient, volumeFromSnap ) new_vm.reboot(self.apiclient) compareChecksum( self.apiclient, service=self.testdata, original_checksum=ckecksum_data_disk, disk_type="datadiskdevice_1", virt_machine=new_vm ) # Step 12 data_volume_2 = Volume.create( self.apiclient, self.testdata["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id ) self.vm_1.start(self.apiclient) self.vm_1.attach_volume( self.userapiclient, data_volume_2 ) self.vm_1.reboot(self.apiclient) self.vm_1.stop(self.apiclient) data_vol_snap_1 = Snapshot.create( self.apiclient, data_volume_2.id) self.assertEqual( data_vol_snap_1.state, "BackedUp", "Check if the snapshot state is correct " ) data_disk_2_list = Volume.list( self.userapiclient, listall=self.testdata["listall"], id=data_volume_2.id ) self.vm_1.start(self.apiclient) checksum_data_2 = createChecksum( service=self.testdata, virtual_machine=self.vm_1, disk=data_disk_2_list[0], disk_type="datadiskdevice_2") # Step 13 self.vm_1.detach_volume(self.apiclient, data_volume_2) self.vm_1.reboot(self.apiclient) prev_state = data_volume_2.state data_vol_snap_2 = Snapshot.create( self.apiclient, data_volume_2.id) self.assertEqual( data_vol_snap_2.state, prev_state, "Check if the volume state is correct " ) data_snapshot_list_2 = list_snapshots( self.apiclient, id=data_vol_snap_2.id ) self.assertNotEqual( data_snapshot_list_2, None, "Check if result exists in list item call" ) self.assertEqual( data_snapshot_list_2[0].id, data_vol_snap_2.id, "Check resource id in list resources call" ) volumeFromSnap_2 = Volume.create_from_snapshot( self.apiclient, data_vol_snap_2.id, self.testdata["volume"], account=self.account.name, domainid=self.account.domainid, zoneid=self.zone.id ) self.vm_2.attach_volume( self.userapiclient, volumeFromSnap_2 ) self.vm_2.reboot(self.apiclient) data_disk_2_list = Volume.list( self.userapiclient, listall=self.testdata["listall"], id=volumeFromSnap_2.id ) compareChecksum( self.apiclient, service=self.testdata, original_checksum=checksum_data_2, disk_type="datadiskdevice_2", virt_machine=self.vm_2 ) # Step 14 self.vm_1.stop(self.apiclient) with self.assertRaises(Exception): root_vol_snap.revertVolToSnapshot(self.apiclient) # Step 15 root_snap = Snapshot.create( self.apiclient, root_volume_cluster.id) with self.assertRaises(Exception): root_snap.revertVolToSnapshot(self.apiclient) return