def test(): global test_obj_dict #volume_creation_option = test_util.VolumeOption() #test_util.test_dsc('Create volume and check') #disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) #volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) #volume = test_stub.create_volume(volume_creation_option) bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") volume_creation_option = test_util.VolumeOption() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume1 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume1) volume1.check() volume_uuid = volume1.volume.uuid test_util.test_dsc('Create vm and check') vm = test_stub.create_vr_vm('migrate_volume_vm', 'imageName_net', 'l3VlanNetwork2') test_obj_dict.add_vm(vm) vm.check() vm_uuid = vm.vm.uuid volume1.attach(vm) volume1.detach(vm_uuid) vm.stop() image_obj = volume1.create_template([bss[0].uuid]) vm.start() host_uuid = vm.vm.hostUuid ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) volume2 = image_obj.create_data_volume(ps.uuid, 'volumeName', host_uuid) test_obj_dict.add_volume(volume2) volume2.check() volume_uuid = volume2.volume.uuid ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') snapshots = test_obj_dict.get_volume_snapshot(volume_uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshots.check() snapshots.create_snapshot('create_snapshot2') snapshots.check() target_host = test_lib.lib_find_random_host_by_volume_uuid(volume_uuid) target_host_uuid = target_host.uuid vol_ops.migrate_volume(volume_uuid, target_host_uuid) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Cold migrate Data Volume from Template with Snapshot Test Success')
def test(): global vm vm = test_stub.create_vr_vm('migrate_stopped_vm', 'imageName_s', 'l3VlanNetwork2') ps = test_lib.lib_get_primary_storage_by_uuid( vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') vm2 = test_stub.create_vr_vm('migrate_stopped_vm2', 'imageName_s', 'l3VlanNetwork2') ps2 = test_lib.lib_get_primary_storage_by_uuid( vm2.get_vm().allVolumes[0].primaryStorageUuid) if ps2.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') target_host = test_lib.lib_find_random_host(vm.vm) vm.stop() vm2.stop() thread = threading.Thread(target=migrate_volume, args=( vm.get_vm().allVolumes[0].uuid, target_host.uuid, )) thread.start() #target_host = test_lib.lib_find_random_host(vm2.vm) thread2 = threading.Thread(target=migrate_volume, args=( vm2.get_vm().allVolumes[0].uuid, target_host.uuid, )) thread2.start() time.sleep(2) progress = res_ops.get_task_progress(vm.get_vm().allVolumes[0].uuid) if int(progress.progress) < 0 or int(progress.progress) > 100: test_util.test_fail( "Progress of task should be between 0 and 100, while it actually is %s" % (progress.progress)) progress = res_ops.get_task_progress(vm2.get_vm().allVolumes[0].uuid) if int(progress.progress) < 0 or int(progress.progress) > 100: test_util.test_fail( "Progress of task should be between 0 and 100, while it actually is %s" % (progress.progress)) thread.join() thread2.join() vm.destroy() test_util.test_pass('Migrate Stopped VM Test Success')
def test(): ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE) ceph_ps = [ps for ps in ps_inv if ps.type == 'Ceph'] if not ceph_ps: test_util.test_skip('Skip test as there is not Ceph primary storage') flavor = case_flavor[os.getenv('CASE_FLAVOR')] if flavor['shared_vm']: multi_ps.create_vm(ps_type="SharedBlock") else: multi_ps.create_vm(ps_type="Ceph") multi_ps.create_data_volume(vms=multi_ps.vm, ps_type='SharedBlock') multi_ps.create_data_volume(vms= multi_ps.vm, ps_type='Ceph') vm = multi_ps.vm[0] vm.stop() shared_ps = multi_ps.get_ps(ps_type='SharedBlock') ceph_ps = multi_ps.get_ps(ps_type='Ceph') if flavor['to_shared_vm']: if not flavor['to_shared_volume']: ps_uuid_for_root_volume = shared_ps.uuid ps_uuid_for_data_volume = ceph_ps.uuid else: if flavor['to_shared_volume']: ps_uuid_for_root_volume = ceph_ps.uuid ps_uuid_for_data_volume = shared_ps.uuid root_volume_systag = [] data_volume_systag = ["volumeProvisioningStrategy::ThinProvisioning"] cloned_vm = vm.clone(['test_stop_vm_full_clone'], full=True, ps_uuid_for_root_volume=ps_uuid_for_root_volume, ps_uuid_for_data_volume=ps_uuid_for_data_volume, root_volume_systag=root_volume_systag, data_volume_systag=data_volume_systag)[0] multi_ps.vm.append(cloned_vm.vm) volumes_list = test_lib.lib_get_all_volumes(cloned_vm.vm) volumes_number = len(volumes_list) if volumes_number != 3: test_util.test_fail('Did not just find 3 volumes for [vm:] %s. But we assigned 2 data volume to the vm. We only catch %s volumes' % (cloned_vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 3 volumes for [vm:] %s.' % cloned_vm.vm.uuid) ps = test_lib.lib_get_primary_storage_by_uuid(test_lib.lib_get_root_volume(cloned_vm.vm).primaryStorageUuid) data_volume_ps1 = test_lib.lib_get_primary_storage_by_uuid(test_lib.lib_get_data_volumes(cloned_vm.vm)[0].primaryStorageUuid) data_volume_ps2 = test_lib.lib_get_primary_storage_by_uuid(test_lib.lib_get_data_volumes(cloned_vm.vm)[1].primaryStorageUuid) if flavor['to_shared_vm']: if not flavor['to_shared_volume']: test_util.test_logger(ps.type + data_volume_ps1.type + data_volume_ps2.type) assert ps.type == 'SharedBlock' and data_volume_ps1.type == 'Ceph' and data_volume_ps2.type == 'Ceph' else: if flavor['to_shared_volume']: test_util.test_logger(ps.type + data_volume_ps1.type + data_volume_ps2.type) assert ps.type == 'Ceph' and data_volume_ps1.type == 'SharedBlock' and data_volume_ps2.type == 'SharedBlock' test_util.test_pass('Full Clone Stopped VM Test Success')
def check(self): super(zstack_kvm_volume_file_checker, self).check() volume = self.test_obj.volume volume_installPath = volume.installPath if not volume_installPath: test_util.test_logger('Check result: [installPath] is Null for [volume uuid: ] %s. Can not check volume file existence' % volume.uuid) return self.judge(False) ps_uuid = volume.primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) if test_lib.lib_is_ps_iscsi_backend(ps_uuid): self.check_iscsi(volume, volume_installPath, ps) elif ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE: self.check_nfs(volume, volume_installPath) elif ps.type == inventory.LOCAL_STORAGE_TYPE: host = test_lib.lib_get_local_storage_volume_host(volume.uuid) if not host: return self.judge(False) self.check_file_exist(volume, volume_installPath, host) elif ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE: self.check_ceph(volume, volume_installPath, ps) elif ps.type == 'SharedBlock': self.check_sharedblock(volume, volume_installPath, ps) elif ps.type == 'AliyunEBS': self.check_ebs(ps, volume_installPath) elif ps.type == 'MiniStorage': self.check_mini(volume, volume_installPath, ps)
def check(self): super(zstack_kvm_share_volume_file_checker, self).check() volume = self.test_obj.volume volume_installPath = volume.installPath if not volume_installPath: test_util.test_logger( 'Check result: [installPath] is Null for [volume uuid: ] %s. Can not check volume file existence' % volume.uuid) return self.judge(False) ps_uuid = volume.primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) #if test_lib.lib_is_ps_iscsi_backend(ps_uuid): # self.check_iscsi(volume, volume_installPath, ps) #elif ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE: # self.check_nfs(volume, volume_installPath) #elif ps.type == inventory.LOCAL_STORAGE_TYPE: # host = test_lib.lib_get_local_storage_volume_host(volume.uuid) # if not host: # return self.judge(False) # self.check_file_exist(volume, volume_installPath, host) if ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE: self.check_ceph(volume, volume_installPath, ps) else: test_util.test_logger( 'Check result: [share volume] primary storage is only support ceph, other storage type is not supported.' )
def test(): global vms for i in range(0, threads_num): vms[i] = test_stub.create_vr_vm('migrate_stopped_vm', 'imageName_net', 'l3VlanNetwork2') test_obj_dict.add_vm(vms[i]) ps = test_lib.lib_get_primary_storage_by_uuid(vms[i].get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') for i in range(0, threads_num): threads[i] = threading.Thread(target=migrate_volume, args=(i, )) threads[i].start() for i in range(0, threads_num): checker_threads[i] = threading.Thread(target=check_migrate_volume_progress, args=(i, )) checker_threads[i].start() for i in range(0, threads_num): checker_threads[i].join() threads[i].join() for i in range(0, threads_num): if threads_result[i] != "Done": test_util.test_fail("Exception happened during migrate Volume") if checker_threads_result[i] != "Done": test_util.test_fail("Exception happened during check migrate Volume progress") for i in range(0, threads_num): vms[i].destroy() vms[i].expunge() test_util.test_pass('Migrate Stopped VM progress Test Success')
def test(): global test_obj_dict volume_creation_option = test_util.VolumeOption() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume_uuid = volume.volume.uuid test_util.test_dsc('Create vm and check') vm = test_stub.create_vr_vm('migrate_volume_vm', 'imageName_s', 'l3VlanNetwork2') test_obj_dict.add_vm(vm) vm.check() vm_uuid = vm.vm.uuid ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') test_util.test_dsc('Attach data volume to vm and check') volume.attach(vm) target_host = test_lib.lib_find_random_host_by_volume_uuid(volume_uuid) target_host_uuid = target_host.uuid test_util.test_dsc('Detach data volume from vm and check') volume.detach(vm_uuid) vol_ops.migrate_volume(volume_uuid, target_host_uuid) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Cold migrate Data Volume Test Success')
def test(): test_util.test_skip('Time cases need further polish, skip test right now') vm_name = 'vm_' + key_gen(7) begin_time = int(time.time() * 1000) vm = test_stub.create_vm(vm_name) test_obj_dict.add_vm(vm) ps = test_lib.lib_get_primary_storage_by_uuid( vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') vm.check() [select_bs_time, allocate_host_time, allocate_ps_time, local_storage_allocate_capacity_time,\ allocate_volume_time, allocate_nic_time, instantiate_res_pre_time, create_on_hypervisor_time,\ instantiate_res_post_time] = test_stub.get_stage_time(vm_name, begin_time) test_util.test_dsc("select_bs_time is " + str(select_bs_time)) test_util.test_dsc("allocate_host_time is " + str(allocate_host_time)) test_util.test_dsc("allocate_ps_time is " + str(allocate_ps_time)) test_util.test_dsc("local_storage_allocate_capacity_time is " + str(local_storage_allocate_capacity_time)) test_util.test_dsc("allocate_volume_time is " + str(allocate_volume_time)) test_util.test_dsc("allocate_nic_time is " + str(allocate_nic_time)) test_util.test_dsc("instantiate_res_pre_time is " + str(instantiate_res_pre_time)) test_util.test_dsc("create_on_hypervisor_time is " + str(create_on_hypervisor_time)) test_util.test_dsc("instantiate_res_post_time is " + str(instantiate_res_post_time)) if select_bs_time > 10: test_util.test_fail('select_bs_time is bigger than 10 milliseconds') if allocate_host_time > 190: test_util.test_fail( 'allocate_host_time is bigger than 190 milliseconds') if allocate_ps_time > 70: test_util.test_fail('allocate_ps_time is bigger than 70 milliseconds') if local_storage_allocate_capacity_time > 70: test_util.test_fail( 'local_storage_allocate_capacity_time is bigger than 70 milliseconds' ) if allocate_volume_time > 90: test_util.test_fail( 'allocate_volume_time is bigger than 90 milliseconds') if allocate_nic_time > 70: test_util.test_fail('allocate_nic_time is bigger than 70 milliseconds') if instantiate_res_pre_time > 1300: test_util.test_fail( 'instantiate_res_pre_time is bigger than 1300 milliseconds') if create_on_hypervisor_time > 2500: test_util.test_fail( 'create_on_hypervisor_time is bigger than 2500 milliseconds') if instantiate_res_post_time > 30: test_util.test_fail( 'instantiate_res_post_time is bigger than 30 milliseconds') vm.destroy() test_util.test_pass('Create VM and Check time for Each Stage Test Success')
def test(): allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE, "SharedBlock"] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) flavor = case_flavor[os.environ.get('CASE_FLAVOR')] test_util.test_dsc('Create test vm and check') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags( ['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) test_util.test_dsc('Attach volume and check') #vm.check() volume.attach(vm) if flavor['vm_running'] == False: vm.stop() if flavor['vm_running'] == True: allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) ps_uuid = volume.get_volume().primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) test_util.test_dsc('Create volume template and check') bs_list = test_lib.lib_get_backup_storage_list_by_vm(vm.get_vm()) bs_uuid_list = [] for bs in bs_list: bs_uuid_list.append(bs.uuid) vol_tmpt = volume.create_template(bs_uuid_list, 'new_data_template') test_obj_dict.add_image(vol_tmpt) vol_tmpt.check() volume.check() volume.delete() test_obj_dict.rm_volume(volume) test_util.test_dsc('Create volume from template and check') volume2 = vol_tmpt.create_data_volume(ps_uuid, 'new_volume_from_template') test_obj_dict.add_volume(volume2) vol_tmpt.delete() test_obj_dict.rm_image(vol_tmpt) volume2.check() volume2.attach(vm) vm.check() volume2.check() volume2.detach() volume2.delete() test_obj_dict.rm_volume(volume2) vm.destroy() test_util.test_pass( 'Create Sharable Data Volume Template from Data Volume Success.')
def test(): global vm vm = test_stub.create_vr_vm('migrate_stopped_vm', 'imageName_s', 'l3VlanNetwork2') test_obj_dict.add_vm(vm) ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') vm_inv = vm.get_vm() vm_uuid = vm_inv.uuid test_util.test_dsc('Add ISO Image') cond = res_ops.gen_query_conditions('name', '=', 'sftp') bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid img_option = test_util.ImageOption() img_option.set_name('iso') img_option.set_backup_storage_uuid_list([bs_uuid]) mn = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0] cmd = "echo fake iso for test only > %s/apache-tomcat/webapps/zstack/static/zstack-repo/7/x86_64/os/test.iso" % (os.environ.get('zstackInstallPath')) if os.system("ip r | grep %s" % (mn.hostName)) == 0: os.system(cmd) else: for host in test_lib.lib_get_all_hosts_from_plan(): test_util.test_logger("host.managementIp_: %s" %(host.managementIp_)) test_util.test_logger("mn.hostName: %s" %(mn.hostName)) test_util.test_logger("anotherIp: %s" %(test_stub.get_another_ip_of_host(host.managementIp_, host.username_, host.password_))) if host.managementIp_ == mn.hostName or test_stub.get_another_ip_of_host(host.managementIp_, host.username_, host.password_) == mn.hostName: out = test_lib.lib_execute_ssh_cmd(host.managementIp_, host.username_, host.password_, cmd, timeout=30) img_option.set_url('http://%s:8080/zstack/static/zstack-repo/7/x86_64/os/test.iso' % (mn.hostName)) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) test_util.test_dsc('Attach ISO to VM') cond = res_ops.gen_query_conditions('name', '=', 'iso') iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid img_ops.attach_iso(iso_uuid, vm_uuid) test_util.test_dsc('Migrate VM') vm.check() target_host = test_lib.lib_find_random_host(vm.vm) vm.stop() vol_ops.migrate_volume(vm.get_vm().allVolumes[0].uuid, target_host.uuid) vm.check() vm.start() vm.check() img_ops.detach_iso(vm_uuid) image.delete() image.expunge() test_obj_dict.rm_image(image) vm.destroy() test_util.test_pass('Migrate Stopped VM Test Success When Attach ISO')
def test(): allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE, "SharedBlock"] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) flavor = case_flavor[os.environ.get('CASE_FLAVOR')] test_util.test_dsc('Create test vm and check') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) test_util.test_dsc('Attach volume and check') #vm.check() volume.attach(vm) if flavor['vm_running'] == False: vm.stop() if flavor['vm_running'] == True: allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) ps_uuid = volume.get_volume().primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) test_util.test_dsc('Create volume template and check') bs_list = test_lib.lib_get_backup_storage_list_by_vm(vm.get_vm()) bs_uuid_list = [] for bs in bs_list: bs_uuid_list.append(bs.uuid) vol_tmpt = volume.create_template(bs_uuid_list, 'new_data_template') test_obj_dict.add_image(vol_tmpt) vol_tmpt.check() volume.check() volume.delete() test_obj_dict.rm_volume(volume) test_util.test_dsc('Create volume from template and check') volume2 = vol_tmpt.create_data_volume(ps_uuid, 'new_volume_from_template') test_obj_dict.add_volume(volume2) vol_tmpt.delete() test_obj_dict.rm_image(vol_tmpt) volume2.check() volume2.attach(vm) vm.check() volume2.check() volume2.detach() volume2.delete() test_obj_dict.rm_volume(volume2) vm.destroy() test_util.test_pass('Create Sharable Data Volume Template from Data Volume Success.')
def check(self): super(zstack_kvm_volume_attach_checker, self).check() volume = self.test_obj.volume if not volume.vmInstanceUuid: test_util.test_logger('Check result: [volume:] %s does NOT have vmInstanceUuid. It is not attached to any vm.' % volume.uuid) return self.judge(False) if not self.test_obj.target_vm: test_util.test_logger('Check result: test [volume:] %s does NOT have vmInstance record in test structure. Can not do furture checking.' % volume.uuid) return self.judge(False) vm = self.test_obj.target_vm.vm volume_installPath = volume.installPath if not volume_installPath: test_util.test_logger('Check result: [installPath] is Null for [volume uuid: ] %s. Can not check if volume is attached to vm.' % volume.uuid) return self.judge(False) host = test_lib.lib_get_vm_host(vm) cmd = vm_plugin.VmStatusCmd() cmd.vm_uuids = [vm.uuid] rspstr = http.json_dump_post(testagent.build_http_path(host.managementIp, vm_plugin.VM_BLK_STATUS), cmd) rsp = jsonobject.loads(rspstr) output = jsonobject.dumps(rsp.vm_status[vm.uuid]) if volume_installPath.startswith('iscsi'): volume_installPath = volume_installPath.split(';')[0].split('/iqn')[1] volume_installPath = 'iqn%s' % volume_installPath volume_installPath = volume_installPath[:-2] elif volume_installPath.startswith('ceph'): volume_installPath = volume_installPath.split('ceph://')[1] elif volume_installPath.startswith('fusionstor'): volume_installPath = volume_installPath.split('fusionstor://')[1] elif volume_installPath.startswith('sharedblock'): volume_installPath = "/dev/" + volume_installPath.split('sharedblock://')[1] elif volume_installPath.startswith('mini'): _cmd = "drbdsetup show %s | grep device | awk -F';' '{print $1}' | awk '{print $3}'" % volume.uuid result = test_lib.lib_execute_ssh_cmd(host.managementIp,host.username, host.password, _cmd, 180) volume_installPath = '/dev/drbd' + result.strip() elif volume_installPath.startswith('ebs'): ps_uuid = volume.primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) url = ps.url.replace('ocean/api', 'dev/name') vol_id = volume_installPath.split(';')[1].split('volumeId=')[-1] req = urllib2.Request(url, headers={'Volumeid': vol_id}) volume_installPath = '/dev/' + urllib2.urlopen(req).read().split('"')[-2] if volume_installPath in output: test_util.test_logger('Check result: [volume:] %s [file:] %s is found in [vm:] %s on [host:] %s .' % (volume.uuid, volume_installPath, vm.uuid, host.managementIp)) return self.judge(True) else: test_util.test_logger('Check result: [volume:] %s [file:] %s is not found in [vm:] %s on [host:] %s .' % (volume.uuid, volume_installPath, vm.uuid, host.managementIp)) return self.judge(False)
def test(): global vm vm = test_stub.create_vr_vm('migrate_stopped_vm', 'imageName_s', 'l3VlanNetwork2') test_obj_dict.add_vm(vm) ps = test_lib.lib_get_primary_storage_by_uuid( vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') vm_inv = vm.get_vm() vm_uuid = vm_inv.uuid test_util.test_dsc('Add ISO Image') cond = res_ops.gen_query_conditions('name', '=', 'sftp') bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid img_option = test_util.ImageOption() img_option.set_name('iso') img_option.set_backup_storage_uuid_list([bs_uuid]) os.system( "echo fake iso for test only > %s/apache-tomcat/webapps/zstack/static/test.iso" % (os.environ.get('zstackInstallPath'))) img_option.set_url('http://%s:8080/zstack/static/test.iso' % (os.environ.get('node1Ip'))) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) test_util.test_dsc('Attach ISO to VM') cond = res_ops.gen_query_conditions('name', '=', 'iso') iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid img_ops.attach_iso(iso_uuid, vm_uuid) test_util.test_dsc('Migrate VM') vm.check() target_host = test_lib.lib_find_random_host(vm.vm) vm.stop() vol_ops.migrate_volume(vm.get_vm().allVolumes[0].uuid, target_host.uuid) vm.check() vm.start() vm.check() img_ops.detach_iso(vm_uuid) image.delete() image.expunge() test_obj_dict.rm_image(image) vm.destroy() test_util.test_pass('Migrate Stopped VM Test Success When Attach ISO')
def test(): #skip ceph in c74 cmd = "cat /etc/redhat-release | grep '7.4'" mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName rsp = test_lib.lib_execute_ssh_cmd(mn_ip, 'root', 'password', cmd, 180) if rsp != False: ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE) for i in ps: if i.type == 'Ceph': test_util.test_skip('cannot hotplug iso to the vm in ceph,it is a libvirt bug:https://bugzilla.redhat.com/show_bug.cgi?id=1541702.') global vm vm = test_stub.create_vr_vm('migrate_vm', 'imageName_s', 'l3VlanNetwork2') vm.check() ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on localstorage PS') vm_inv = vm.get_vm() vm_uuid = vm_inv.uuid test_util.test_dsc('Add ISO Image') #cond = res_ops.gen_query_conditions('name', '=', 'sftp') bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0].uuid img_option = test_util.ImageOption() img_option.set_name('iso') img_option.set_backup_storage_uuid_list([bs_uuid]) testIsoUrl = os.environ.get('testIsoUrl') img_option.set_url(testIsoUrl) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) test_util.test_dsc('Attach ISO to VM') cond = res_ops.gen_query_conditions('name', '=', 'iso') iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid img_ops.attach_iso(iso_uuid, vm_uuid) test_util.test_dsc('Migrate VM') test_stub.migrate_vm_to_random_host(vm) vm.check() img_ops.detach_iso(vm_uuid) image.delete() image.expunge() test_obj_dict.rm_image(image) vm.destroy() test_util.test_pass('Migrate VM Test Success When Attach ISO')
def test(): test_util.test_dsc('Create test vm and check the time spend on each stage') test_util.test_skip('Time cases need further polish, skip test right now') vm_name = 'vm_'+key_gen(7) begin_time = int(time.time()*1000) vm = test_stub.create_named_vm(vm_name) test_obj_dict.add_vm(vm) ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') vr = test_lib.lib_find_vr_by_vm(vm.vm)[0] if vr.applianceVmType != "vrouter": test_util.test_skip("This test only for vyos network") vm.check() [select_bs_time, allocate_host_time, allocate_ps_time, local_storage_allocate_capacity_time,\ allocate_volume_time, allocate_nic_time, instantiate_res_pre_time, create_on_hypervisor_time,\ instantiate_res_post_time] = test_stub.get_stage_time(vm_name, begin_time) test_util.test_dsc("select_bs_time is "+str(select_bs_time)) test_util.test_dsc("allocate_host_time is "+str(allocate_host_time)) test_util.test_dsc("allocate_ps_time is "+str(allocate_ps_time)) test_util.test_dsc("local_storage_allocate_capacity_time is "+str(local_storage_allocate_capacity_time)) test_util.test_dsc("allocate_volume_time is "+str(allocate_volume_time)) test_util.test_dsc("allocate_nic_time is "+str(allocate_nic_time)) test_util.test_dsc("instantiate_res_pre_time is "+str(instantiate_res_pre_time)) test_util.test_dsc("create_on_hypervisor_time is "+str(create_on_hypervisor_time)) test_util.test_dsc("instantiate_res_post_time is "+str(instantiate_res_post_time)) if select_bs_time > 10: test_util.test_fail('select_bs_time is bigger than 10 milliseconds') if allocate_host_time > 190: test_util.test_fail('allocate_host_time is bigger than 190 milliseconds') if allocate_ps_time > 70: test_util.test_fail('allocate_ps_time is bigger than 70 milliseconds') if local_storage_allocate_capacity_time > 70: test_util.test_fail('local_storage_allocate_capacity_time is bigger than 70 milliseconds') if allocate_volume_time > 90: test_util.test_fail('allocate_volume_time is bigger than 90 milliseconds') if allocate_nic_time > 70: test_util.test_fail('allocate_nic_time is bigger than 70 milliseconds') if instantiate_res_pre_time > 1300: test_util.test_fail('instantiate_res_pre_time is bigger than 1300 milliseconds') if create_on_hypervisor_time > 8000: test_util.test_fail('create_on_hypervisor_time is bigger than 2500 milliseconds') if instantiate_res_post_time > 30: test_util.test_fail('instantiate_res_post_time is bigger than 30 milliseconds') vm.destroy() test_util.test_pass('Create VM and Check time for Each Stage Test Success')
def test(): global vm vm = test_stub.create_vr_vm('migrate_stopped_vm', 'imageName_s', 'l3VlanNetwork2') ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') vm2 = test_stub.create_vr_vm('migrate_stopped_vm2', 'imageName_s', 'l3VlanNetwork2') ps2 = test_lib.lib_get_primary_storage_by_uuid(vm2.get_vm().allVolumes[0].primaryStorageUuid) if ps2.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') target_host = test_lib.lib_find_random_host(vm.vm) vm.stop() vm2.stop() thread = threading.Thread(target=migrate_volume, args=(vm.get_vm().allVolumes[0].uuid, target_host.uuid, )) thread.start() #target_host = test_lib.lib_find_random_host(vm2.vm) thread2 = threading.Thread(target=migrate_volume, args=(vm2.get_vm().allVolumes[0].uuid, target_host.uuid, )) thread2.start() time.sleep(2) progress = res_ops.get_task_progress(vm.get_vm().allVolumes[0].uuid) if int(progress.progress) < 0 or int(progress.progress) > 100: test_util.test_fail("Progress of task should be between 0 and 100, while it actually is %s" % (progress.progress)) progress = res_ops.get_task_progress(vm2.get_vm().allVolumes[0].uuid) if int(progress.progress) < 0 or int(progress.progress) > 100: test_util.test_fail("Progress of task should be between 0 and 100, while it actually is %s" % (progress.progress)) thread.join() thread2.join() vm.destroy() test_util.test_pass('Migrate Stopped VM Test Success')
def test(): global vm vm = test_stub.create_vr_vm('migrate_stopped_vm', 'imageName_s', 'l3VlanNetwork2') ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') vm.check() target_host = test_lib.lib_find_random_host(vm.vm) vm.stop() vol_ops.migrate_volume(vm.get_vm().allVolumes[0].uuid, target_host.uuid) vm.check() vm.start() vm.check() vm.destroy() test_util.test_pass('Migrate Stopped VM Test Success')
def test(): global vm vm = test_stub.create_vr_vm('migrate_stopped_vm', 'imageName_s', 'l3VlanNetwork2') test_obj_dict.add_vm(vm) ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') vm_inv = vm.get_vm() vm_uuid = vm_inv.uuid test_util.test_dsc('Add ISO Image') cond = res_ops.gen_query_conditions('name', '=', 'sftp') bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid img_option = test_util.ImageOption() img_option.set_name('iso') img_option.set_backup_storage_uuid_list([bs_uuid]) os.system("echo fake iso for test only > %s/apache-tomcat/webapps/zstack/static/test.iso" % (os.environ.get('zstackInstallPath'))) img_option.set_url('http://%s:8080/zstack/static/test.iso' % (os.environ.get('node1Ip'))) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) test_util.test_dsc('Attach ISO to VM') cond = res_ops.gen_query_conditions('name', '=', 'iso') iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid img_ops.attach_iso(iso_uuid, vm_uuid) test_util.test_dsc('Migrate VM') vm.check() target_host = test_lib.lib_find_random_host(vm.vm) vm.stop() vol_ops.migrate_volume(vm.get_vm().allVolumes[0].uuid, target_host.uuid) vm.check() vm.start() vm.check() img_ops.detach_iso(vm_uuid) image.delete() image.expunge() test_obj_dict.rm_image(image) vm.destroy() test_util.test_pass('Migrate Stopped VM Test Success When Attach ISO')
def test(): global vm vm = test_stub.create_vr_vm('migrate_stopped_vm', 'imageName_s', 'l3VlanNetwork2') ps = test_lib.lib_get_primary_storage_by_uuid( vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') vm.check() target_host = test_lib.lib_find_random_host(vm.vm) vm.stop() vol_ops.migrate_volume(vm.get_vm().allVolumes[0].uuid, target_host.uuid) vm.check() vm.start() vm.check() vm.destroy() test_util.test_pass('Migrate Stopped VM Test Success')
def test(): global test_obj_dict test_util.test_dsc('Create vm and check') vm = test_stub.create_vr_vm('migrate_volume_vm', 'imageName_net', 'l3VlanNetwork2') test_obj_dict.add_vm(vm) vm.check() vm_uuid = vm.vm.uuid root_vol_uuid = vm.vm.rootVolumeUuid ps = test_lib.lib_get_primary_storage_by_uuid( vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') snapshots = test_obj_dict.get_volume_snapshot(root_vol_uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('snapshot_for_volume') snapshots.check() snapshot = snapshots.get_current_snapshot() snapshot_uuid = snapshot.snapshot.uuid volume = snapshot.create_data_volume() test_obj_dict.add_volume(volume) volume.check() volume_uuid = volume.volume.uuid snapshots = test_obj_dict.get_volume_snapshot(volume_uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshots.check() snapshots.create_snapshot('create_snapshot2') snapshots.check() target_host = test_lib.lib_find_random_host_by_volume_uuid(volume_uuid) target_host_uuid = target_host.uuid vol_ops.migrate_volume(volume_uuid, target_host_uuid) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass( 'Cold migrate Data Volume from Snapshot with Snapshot Test Success')
def check(self): super(zstack_kvm_volume_file_checker, self).check() volume = self.test_obj.volume volume_installPath = volume.installPath if not volume_installPath: test_util.test_logger( 'Check result: [installPath] is Null for [volume uuid: ] %s. Can not check volume file existence' % volume.uuid) return self.judge(False) ps_uuid = volume.primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) if test_lib.lib_is_ps_iscsi_backend(ps_uuid): self.check_iscsi(volume, volume_installPath, ps) elif ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE: self.check_nfs(volume, volume_installPath) elif ps.type == inventory.LOCAL_STORAGE_TYPE: self.check_nfs(volume, volume_installPath) elif ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE: self.check_ceph(volume, volume_installPath, ps)
def test(): global test_obj_dict test_util.test_dsc('Create vm and check') vm = test_stub.create_vr_vm('migrate_volume_vm', 'imageName_net', 'l3VlanNetwork2') test_obj_dict.add_vm(vm) vm.check() vm_uuid = vm.vm.uuid root_vol_uuid = vm.vm.rootVolumeUuid ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') snapshots = test_obj_dict.get_volume_snapshot(root_vol_uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('snapshot_for_volume') snapshots.check() snapshot = snapshots.get_current_snapshot() snapshot_uuid = snapshot.snapshot.uuid volume = snapshot.create_data_volume() test_obj_dict.add_volume(volume) volume.check() volume_uuid = volume.volume.uuid snapshots = test_obj_dict.get_volume_snapshot(volume_uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshots.check() snapshots.create_snapshot('create_snapshot2') snapshots.check() target_host = test_lib.lib_find_random_host_by_volume_uuid(volume_uuid) target_host_uuid = target_host.uuid vol_ops.migrate_volume(volume_uuid, target_host_uuid) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Cold migrate Data Volume from Snapshot with Snapshot Test Success')
def test(): ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE) ceph_ps = [ps for ps in ps_inv if ps.type == 'Ceph'] if not ceph_ps: test_util.test_skip('Skip test as there is not Ceph primary storage') flavor = case_flavor[os.getenv('CASE_FLAVOR')] if flavor['shared_vm']: multi_ps.create_vm(ps_type="SharedBlock") else: multi_ps.create_vm(ps_type="Ceph") vm = multi_ps.vm[0] if not flavor['running']: vm.stop() shared_ps = multi_ps.get_ps(ps_type='SharedBlock') ceph_ps = multi_ps.get_ps(ps_type='Ceph') if flavor['to_shared_vm']: ps_uuid_for_root_volume = shared_ps.uuid else: ps_uuid_for_root_volume = ceph_ps.uuid root_volume_systag = [] cloned_vm = vm.clone(['test_vm_clone_without_volume'], full=True, ps_uuid_for_root_volume=ps_uuid_for_root_volume, root_volume_systag=root_volume_systag)[0] multi_ps.vm.append(cloned_vm.vm) volumes_list = test_lib.lib_get_all_volumes(cloned_vm.vm) volumes_number = len(volumes_list) if volumes_number != 1: test_util.test_fail('Did not just find 1 volumes for [vm:] %s.' % cloned_vm.vm.uuid) else: test_util.test_logger('Find 1 volumes for [vm:] %s.' % cloned_vm.vm.uuid) ps = test_lib.lib_get_primary_storage_by_uuid(test_lib.lib_get_root_volume(cloned_vm.vm).primaryStorageUuid) if flavor['to_shared_vm']: assert ps.type == 'SharedBlock' else: assert ps.type == 'Ceph' test_util.test_pass('VM Clone Without Volume Test Success')
def test(): global vm bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") image_option = test_util.ImageOption() image_option.set_name('test_image_cache_cleanup') image_option.set_format('qcow2') image_option.set_mediaType('RootVolumeTemplate') image_option.set_url(os.environ.get('imageUrl_s')) image_option.set_backup_storage_uuid_list([bss[0].uuid]) new_image = zstack_image_header.ZstackTestImage() new_image.set_creation_option(image_option) new_image.add_root_volume_template() l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option = test_util.VmOption() vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(new_image.image.uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('test_image_cache_cleanup_vm1') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') test_obj_dict.add_vm(vm) vm.check() host = test_lib.lib_find_host_by_vm(vm.get_vm()) target_host = test_lib.lib_find_random_host(vm.vm) vm.stop() vol_ops.migrate_volume(vm.get_vm().allVolumes[0].uuid, target_host.uuid) vm.check() vm.start() vm.check() new_image.delete() new_image.expunge() ps_ops.cleanup_imagecache_on_primary_storage(ps.uuid) count = 0 while True: image_cache_path = "%s/imagecache/template/%s" % (ps.mountPath, new_image.image.uuid) if not test_lib.lib_check_file_exist(host, image_cache_path): break elif count > 5: test_util.test_fail('image cache is expected to be deleted') test_util.test_logger('check %s times: image cache still exist' % (count)) time.sleep(5) count += 1 count = 0 while True: image_cache_path = "%s/zstore-cache/%s" % (ps.mountPath, new_image.image.uuid) if not test_lib.lib_check_file_exist(host, image_cache_path): break elif count > 5: test_util.test_fail('image cache is expected to be deleted') test_util.test_logger('check %s times: image cache still exist' % (count)) time.sleep(5) count += 1 vm.destroy() test_util.test_pass('Migrate VM Test Success')
def test(): test_util.test_dsc('Test Change VM Image Function') #set overProvisioning.primaryStorage's value as 10 con_ops.change_global_config('mevoco','overProvisioning.primaryStorage',10) global vm test_lib.lib_create_disk_offering(diskSize=1099511627776,name="1T") disk_offering_uuids = [test_lib.lib_get_disk_offering_by_name("smallDiskOffering").uuid,test_lib.lib_get_disk_offering_by_name("root-disk").uuid,test_lib.lib_get_disk_offering_by_name("1T").uuid] vm = test_stub.create_vm(image_name = "ttylinux",vm_name="test-vm",disk_offering_uuids = disk_offering_uuids) test_obj_dict.add_vm(vm) vm.check() vm_uuid = vm.get_vm().uuid last_data_volumes_uuids = [] last_data_volumes = test_lib.lib_get_data_volumes(vm.get_vm()) for data_volume in last_data_volumes: last_data_volumes_uuids.append(data_volume.uuid) last_l3network_uuid = test_lib.lib_get_l3s_uuid_by_vm(vm.get_vm()) last_primarystorage_uuid = test_lib.lib_get_root_volume(vm.get_vm()).primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(last_primarystorage_uuid) avail_cap = ps.availableCapacity total_cap = ps.totalCapacity vm_ops.stop_vm(vm_uuid) image = test_lib.lib_get_image_by_name("image_for_sg_test") image_uuid = image.uuid image_tiny = test_lib.lib_get_image_by_name("ttylinux") image_tiny_uuid = image_tiny.uuid change_size = (image.size-image_tiny.size)/10 vm_ops.change_vm_image(vm_uuid,image_uuid) vm_ops.start_vm(vm_uuid) vm.update() #check whether the vm is running successfully if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip,'22',180): test_util.test_fail('vm:%s is not startup in 180 seconds.Fail to reboot it.' % vm_uuid) #check whether data volumes attached to the vm has changed data_volumes_after_uuids = [] data_volumes_after = test_lib.lib_get_data_volumes(vm.get_vm()) for data_volume in data_volumes_after: data_volumes_after_uuids.append(data_volume.uuid) if set(last_data_volumes_uuids) != set(data_volumes_after_uuids): test_util.test_fail('Change Vm Image Failed.Data volumes changed.') #check whether the network config has changed l3network_uuid_after = test_lib.lib_get_l3s_uuid_by_vm(vm.get_vm()) if l3network_uuid_after != last_l3network_uuid: test_util.test_fail('Change VM Image Failed.The Network config has changed.') #check whether primarystorage has changed primarystorage_uuid_after = test_lib.lib_get_root_volume(vm.get_vm()).primaryStorageUuid if primarystorage_uuid_after != last_primarystorage_uuid: test_util.test_fail('Change VM Image Failed.Primarystorage has changed.') ps = test_lib.lib_get_primary_storage_by_uuid(primarystorage_uuid_after) avail_cap1 = ps.availableCapacity total_cap1 = ps.totalCapacity if total_cap != total_cap1: test_util.test_fail('Primary Storage total capacity is not same,after changing vm image:%s.The previous value:%s, the current value:%s' % (image_uuid,total_cap,total_cap1)) if not (avail_cap1-1) <= (avail_cap - change_size) <= (avail_cap1+1): test_util.test_fail('Primary Storage available capacity is not correct,after changing larger image:%s.The previous value:%s, the current value:%s' % (image_uuid,avail_cap,avail_cap1)) vm_ops.stop_vm(vm_uuid) image_tiny_uuid = image_tiny.uuid vm_ops.change_vm_image(vm_uuid,image_tiny_uuid) vm_ops.start_vm(vm_uuid) vm.update() #check whether the vm is running successfully if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip,'22',180): test_util.test_fail('vm:%s is not startup in 180 seconds.Fail to reboot it.' % vm_uuid) #check whether data volumes attached to the vm has changed data_volumes_after_uuids_tiny = [] data_volumes_after_tiny = test_lib.lib_get_data_volumes(vm.get_vm()) for data_volume in data_volumes_after_tiny: data_volumes_after_uuids_tiny.append(data_volume.uuid) if set(data_volumes_after_uuids_tiny) != set(data_volumes_after_uuids): test_util.test_fail('Change Vm Image Failed.Data volumes changed.') #check whether the network config has changed l3network_uuid_after_tiny = test_lib.lib_get_l3s_uuid_by_vm(vm.get_vm()) if l3network_uuid_after_tiny != l3network_uuid_after: test_util.test_fail('Change VM Image Failed.The Network config has changed.') #check whether primarystorage has changed primarystorage_uuid_after_tiny = test_lib.lib_get_root_volume(vm.get_vm()).primaryStorageUuid if primarystorage_uuid_after_tiny != primarystorage_uuid_after: test_util.test_fail('Change VM Image Failed.Primarystorage has changed.') ps = test_lib.lib_get_primary_storage_by_uuid(primarystorage_uuid_after_tiny) avail_cap2 = ps.availableCapacity total_cap2 = ps.totalCapacity if total_cap2 != total_cap1: test_util.test_fail('Primary Storage total capacity is not same,after changing vm image:%s.The previous value:%s, the current value:%s' % (image_tiny_uuid,total_cap1,total_cap2)) if not (change_size-1)<=(avail_cap2 - avail_cap1)<= (change_size+1): test_util.test_fail('Primary Storage available capacity is not correct,after changing smaller image:%s.The previous value:%s, the current value:%s' % (image_tiny_uuid,avail_cap1,avail_cap2)) test_lib.lib_destroy_vm_and_data_volumes(vm.get_vm()) vm.expunge() test_util.test_pass('Change Vm Image Test Success')
def test(): global vm bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") image_option = test_util.ImageOption() image_option.set_name('test_image_cache_cleanup') image_option.set_format('qcow2') image_option.set_mediaType('RootVolumeTemplate') image_option.set_url(os.environ.get('imageUrl_s')) image_option.set_backup_storage_uuid_list([bss[0].uuid]) new_image = zstack_image_header.ZstackTestImage() new_image.set_creation_option(image_option) new_image.add_root_volume_template() l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option = test_util.VmOption() vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(new_image.image.uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('test_image_cache_cleanup_vm1') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') test_obj_dict.add_vm(vm) vm.check() host = test_lib.lib_find_host_by_vm(vm.get_vm()) target_host = test_lib.lib_find_random_host(vm.vm) vm.stop() vol_ops.migrate_volume(vm.get_vm().allVolumes[0].uuid, target_host.uuid) vm.check() vm.start() vm.check() new_image.delete() new_image.expunge() ps_ops.cleanup_imagecache_on_primary_storage(ps.uuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: image_cache_path = "%s/imagecache/template/%s" % (ps.mountPath, new_image.image.uuid) if test_lib.lib_check_file_exist(host, image_cache_path): test_util.test_fail('image cache is expected to be deleted') # elif ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE: # elif ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE: # elif ps.type == 'SharedMountPoint': vm.destroy() test_util.test_pass('Migrate VM Test Success')
def test(): global test_obj_dict ova_image_name = os.environ['vcenterDefaultmplate'] network_pattern1 = os.environ['l3vCenterNoVlanNetworkName'] disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('largeDiskOfferingName')) test_util.test_dsc('Create vm and check') vm = test_stub.create_vm_in_vcenter(vm_name='test_volume_after_sync_vm', image_name=ova_image_name, l3_name=network_pattern1) test_obj_dict.add_vm(vm) vm.check() ps_uuid = vm.vm.allVolumes[0].primaryStorageUuid vc_ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) vc_host = test_lib.lib_find_host_by_vm(vm.vm).managementIp test_util.test_dsc('Create volumes and check') volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_name('vcenter_volume') volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.attach(vm) volume.check() volume_creation_option.set_name('vcenter_volume1') volume_creation_option.set_primary_storage_uuid(ps_uuid) volume1 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume1) volume1.check() volume.detach() volume.check() volume1.attach(vm) volume1.check() volume1.delete() volume1.check() test_util.test_dsc('Sync vcenter') vc_name = os.environ['vcenter'] vcenter_uuid = vct_ops.lib_get_vcenter_by_name(vc_name).uuid test_util.test_logger(vcenter_uuid) vct_ops.sync_vcenter(vcenter_uuid) time.sleep(5) test_util.test_dsc('check volumes after synchronizing vcenter') db_volume = test_lib.lib_get_volume_by_uuid(volume.get_volume().uuid) db_volume1 = test_lib.lib_get_volume_by_uuid(volume1.get_volume().uuid) if db_volume.status != 'Ready' or db_volume1.status != 'Deleted': test_util.test_fail( "check data volumes fail after synchronizing vcenter") #connect vcenter import ssl from pyVmomi import vim import atexit from pyVim import connect import zstackwoodpecker.zstack_test.vcenter_checker.zstack_vcenter_vm_checker as vm_checker vcenter_password = os.environ['vcenterpwd'] vcenter_server = os.environ['vcenter'] vcenter_username = os.environ['vcenteruser'] sslContext = ssl.SSLContext(ssl.PROTOCOL_SSLv23) sslContext.verify_mode = ssl.CERT_NONE SI = connect.SmartConnect(host=vcenter_server, user=vcenter_username, pwd=vcenter_password, port=443, sslContext=sslContext) if not SI: test_util.test_fail("Unable to connect to the vCenter") content = SI.RetrieveContent() volume_installPath = vc_ps.url.split( '//')[1] + db_volume.installPath.split('[' + vc_ps.name + ']')[1].lstrip() test_util.test_logger(volume_installPath) cmd = 'rm -f %s' % volume_installPath vchost_user = os.environ['vchostUser'] vchost_password = os.environ['vchostpwd'] result = test_lib.lib_execute_ssh_cmd(vc_host, vchost_user, vchost_password, cmd, 180) atexit.register(connect.Disconnect, SI) test_util.test_dsc('Sync vcenter') vct_ops.sync_vcenter(vcenter_uuid) time.sleep(5) db_volume = test_lib.lib_get_volume_by_uuid(volume.get_volume().uuid) if db_volume: test_util.test_fail( "check data volumes fail after synchronizing vcenter") #cleanup vm.destroy() vm.expunge() volume1.expunge() test_util.test_pass("Test sync volume in vcenter passed.")
def test(): vm1 = test_stub.create_vr_vm('maintain_host_vm1', 'imageName_s', 'l3VlanNetwork2') test_obj_dict.add_vm(vm1) vm2 = test_stub.create_vr_vm('maintain_host_vm2', 'imageName_s', 'l3VlanNetwork2') test_obj_dict.add_vm(vm2) vm1.check() vm2.check() if not test_lib.lib_check_vm_live_migration_cap( vm1.vm) or not test_lib.lib_check_vm_live_migration_cap(vm2.vm): test_util.test_skip('skip migrate if live migrate not supported') ps = test_lib.lib_get_primary_storage_by_uuid( vm1.get_vm().allVolumes[0].primaryStorageUuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: test_util.test_skip( 'skip migrate vm with data volume if localstorate is used') ps = test_lib.lib_get_primary_storage_by_uuid( vm2.get_vm().allVolumes[0].primaryStorageUuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: test_util.test_skip( 'skip migrate vm with data volume if localstorate is used') test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) test_util.test_dsc('Attach volume and check') volume.attach(vm1) volume.check() current_host1 = test_lib.lib_get_vm_host(vm1.vm) conditions = res_ops.gen_query_conditions('clusterUuid', '=', vm1.vm.clusterUuid) conditions = res_ops.gen_query_conditions('state', '=', host_header.ENABLED, conditions) conditions = res_ops.gen_query_conditions('status', '=', host_header.CONNECTED, conditions) all_hosts = res_ops.query_resource(res_ops.HOST, conditions) if len(all_hosts) <= 1: test_util.test_fail( 'Not available host to do maintenance, since there is only %s host' % len(all_hosts)) target_host = random.choice(all_hosts) if current_host1.uuid != target_host.uuid: vm1.migrate(target_host.uuid) current_host2 = test_lib.lib_get_vm_host(vm2.vm) if current_host2.uuid != target_host.uuid: vm2.migrate(target_host.uuid) new_host = test_lib.lib_get_vm_host(vm1.vm) if new_host.uuid != target_host.uuid: test_util.test_fail( 'VM did not migrate to target [host:] %s, but to [host:] %s' % (target_host.uuid, new_host.uuid)) volume.check() host = test_kvm_host.ZstackTestKvmHost() host.set_host(target_host) host.maintain() #need to update vm's inventory, since they will be changed by maintenace mode vm1.update() vm2.update() vm1.check() vm2.check() volume.check() host.change_state(test_kvm_host.ENABLE_EVENT) if not linux.wait_callback_success(is_host_connected, host.get_host().uuid, 120): test_util.test_fail( 'host status is not changed to connected, after changing its state to Enable' ) vm1.migrate(target_host.uuid) vm2.migrate(target_host.uuid) vm1.check() vm2.check() volume.check() vm1.destroy() test_obj_dict.rm_vm(vm1) vm2.destroy() test_obj_dict.rm_vm(vm2) volume.delete() test_obj_dict.rm_volume(volume) test_util.test_pass('Maintain Host Test Success')
def test(): global test_obj_dict #volume_creation_option = test_util.VolumeOption() #test_util.test_dsc('Create volume and check') #disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) #volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) #volume = test_stub.create_volume(volume_creation_option) bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") volume_creation_option = test_util.VolumeOption() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume1 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume1) volume1.check() volume_uuid = volume1.volume.uuid test_util.test_dsc('Create vm and check') vm = test_stub.create_vr_vm('migrate_volume_vm', 'imageName_net', 'l3VlanNetwork2') test_obj_dict.add_vm(vm) vm.check() vm_uuid = vm.vm.uuid volume1.attach(vm) volume1.detach(vm_uuid) vm.stop() image_obj = volume1.create_template([bss[0].uuid]) vm.start() host_uuid = vm.vm.hostUuid ps = test_lib.lib_get_primary_storage_by_uuid( vm.get_vm().allVolumes[0].primaryStorageUuid) volume2 = image_obj.create_data_volume(ps.uuid, 'volumeName', host_uuid) test_obj_dict.add_volume(volume2) volume2.check() volume_uuid = volume2.volume.uuid ps = test_lib.lib_get_primary_storage_by_uuid( vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') snapshots = test_obj_dict.get_volume_snapshot(volume_uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshots.check() snapshots.create_snapshot('create_snapshot2') snapshots.check() target_host = test_lib.lib_find_random_host_by_volume_uuid(volume_uuid) target_host_uuid = target_host.uuid vol_ops.migrate_volume(volume_uuid, target_host_uuid) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass( 'Cold migrate Data Volume from Template with Snapshot Test Success')
def test(): primary_storage_list = res_ops.query_resource(res_ops.PRIMARY_STORAGE) for ps in primary_storage_list: if ps.type == "AliyunNAS": test_util.test_skip('The test is not supported by AliyunNAS primary storage.') test_util.test_dsc('Create original vm') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) vm1 = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm1) test_util.test_dsc('Create Data Volume obj.') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_name('volume for create both root and data volume snapshot') volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.attach(vm) test_util.test_dsc('Construct root volume obj.') vm_root_volume_inv = test_lib.lib_get_root_volume(vm.get_vm()) #root_volume = zstack_vol_header.ZstackTestVolume() #root_volume.set_volume(vm_root_volume_inv) #root_volume.set_target_vm(vm) #root_volume.set_state(vol_header.ATTACHED) root_volume_uuid = vm_root_volume_inv.uuid root_image_uuid = vm_root_volume_inv.rootImageUuid vm_img_inv = test_lib.lib_get_image_by_uuid(root_image_uuid) test_util.test_dsc('Stop vm before create snapshot.') vm.stop() test_util.test_dsc('create snapshot') snapshots_data = test_obj_dict.get_volume_snapshot(volume.get_volume().uuid) snapshots_data.set_utility_vm(vm1) snapshots_data.create_snapshot('create_data_snapshot1') snapshot1 = snapshots_data.get_current_snapshot() snapshots_data.create_snapshot('create_data_snapshot2') snapshots_data.create_snapshot('create_data_snapshot3') #snapshots_root = zstack_sp_header.ZstackVolumeSnapshot() #snapshots_root.set_target_volume(root_volume) #test_obj_dict.add_volume_snapshot(snapshots_root) snapshots_root = test_obj_dict.get_volume_snapshot(vm_root_volume_inv.uuid) snapshots_root.set_utility_vm(vm1) snapshots_root.create_snapshot('create_root_snapshot1') snapshots_root.create_snapshot('create_root_snapshot2') snapshot2 = snapshots_root.get_current_snapshot() snapshots_root.create_snapshot('create_root_snapshot3') snapshot3 = snapshots_root.get_current_snapshot() test_util.test_dsc('delete snapshot3 and create image tempalte from root') snapshots_root.delete_snapshot(snapshot3) image_option = test_util.ImageOption() image_option.set_name('creating_image_from_root_volume_after_creating_sp') image_option.set_guest_os_type(vm_img_inv.guestOsType) image_option.set_bits(vm_img_inv.bits) image_option.set_root_volume_uuid(root_volume_uuid) backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.get_vm()) bs_uuid_list = [] for bs in backup_storage_list: bs_uuid_list.append(bs.uuid) image_option.set_backup_storage_uuid_list(bs_uuid_list) test_util.test_dsc('create image template from root volume') image2 = zstack_img_header.ZstackTestImage() image2.set_creation_option(image_option) image2.create() test_obj_dict.add_image(image2) image2.check() image2_uuid = image2.get_image().uuid test_util.test_dsc('create vm2 with new created template and check') vm_creation_option = vm.get_creation_option() vm_creation_option.set_image_uuid(image2_uuid) vm2 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm2) vm2.check() vm2.destroy() test_obj_dict.rm_vm(vm2) ps_uuid = vm_root_volume_inv.primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: # LOCAL Storage do not support create volume and template from backuped snapshot test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Create image from root volume with creating/destroying Snapshot Success') #check data snapshots snapshots_data.use_snapshot(snapshot1) snapshots_data.create_snapshot('create_snapshot1.1.1') snapshots_data.create_snapshot('create_snapshot1.1.2') test_util.test_dsc('create snapshot4 and finally delete all snapshots_root') snapshots_root.create_snapshot('create_snapshot4') snapshot4 = snapshots_root.get_current_snapshot() #snapshots_root.backup_snapshot(snapshot4) snapshots_root.check() #vm.destroy() #test_obj_dict.rm_vm(vm) test_util.test_dsc('create image template2 from root snapshot') image_option.set_root_volume_uuid(snapshot4.get_snapshot().uuid) snapshot4.set_image_creation_option(image_option) image3 = snapshot4.create_image_template() test_obj_dict.add_image(image3) image3.check() image3_uuid = image3.get_image().uuid test_util.test_dsc('create vm3 with new created template and check') vm_creation_option.set_image_uuid(image3_uuid) vm3 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm3) vm3.check() vm3.destroy() test_obj_dict.rm_vm(vm3) #check data snapshots snapshots_data.use_snapshot(snapshot1) snapshots_data.create_snapshot('create_snapshot1.2.1') snapshots_data.check() test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Create image from root volume with creating/destroying Snapshot Success')
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") max_attempts = test_lib.lib_get_ha_selffencer_maxattempts() test_lib.lib_set_ha_selffencer_maxattempts('3') storagechecker_timeout = test_lib.lib_get_ha_selffencer_storagechecker_timeout( ) test_lib.lib_set_ha_selffencer_storagechecker_timeout('5') vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #l3_name = os.environ.get('l3NoVlanNetworkName1') l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) for vr in vrs: if test_lib.lib_is_vm_running(vr) != True: vm_ops.start_vm(vr.uuid) time.sleep(60) conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', os.environ.get('hostIp'), conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() if not test_lib.lib_check_vm_live_migration_cap(vm.vm): test_util.test_skip('skip ha if live migrate not supported') ps = test_lib.lib_get_primary_storage_by_uuid( vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on localstorage') #vm.check() host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") l2_network_interface = os.environ.get('l2ManagementNetworkInterface') cmd = "ifdown %s && sleep 180 && ifup %s" % (l2_network_interface, l2_network_interface) host_username = os.environ.get('hostUsername') host_password = os.environ.get('hostPassword') rsp = test_lib.lib_execute_ssh_cmd(host_ip, host_username, host_password, cmd, 180) if not rsp: test_util.test_logger( "host is expected to shutdown after its network down for a while") test_util.test_logger("wait for 600 seconds") time.sleep(600) vm.update() if test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp == host_ip: test_util.test_fail("VM is expected to start running on another host") vm.set_state(vm_header.RUNNING) vm.check() vm.destroy() test_lib.lib_set_ha_selffencer_maxattempts(max_attempts) test_lib.lib_set_ha_selffencer_storagechecker_timeout( storagechecker_timeout) os.system('bash -ex %s %s' % (os.environ.get('hostRecoverScript'), host_ip)) host_ops.reconnect_host(host_uuid) test_util.test_pass('Test VM ha on host failure Success')
def test(): test_util.test_dsc('Create original vm') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) vm.stop() vm1 = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm1) vm1.check() test_util.test_dsc('create snapshot for root volume') vm_root_volume_inv = test_lib.lib_get_root_volume(vm.get_vm()) snapshots_root = test_obj_dict.get_volume_snapshot(vm_root_volume_inv.uuid) snapshots_root.set_utility_vm(vm1) test_obj_dict.add_volume_snapshot(snapshots_root) snapshots_root.create_snapshot('create_root_snapshot1') test_util.test_dsc('create image template from root volume') root_volume_uuid = vm_root_volume_inv.uuid root_image_uuid = vm_root_volume_inv.rootImageUuid vm_img_inv = test_lib.lib_get_image_by_uuid(root_image_uuid) image_option = test_util.ImageOption() image_option.set_name('creating_image_from_root_volume') image_option.set_guest_os_type(vm_img_inv.guestOsType) image_option.set_bits(vm_img_inv.bits) image_option.set_root_volume_uuid(root_volume_uuid) backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.get_vm()) bs_uuid_list = [] for bs in backup_storage_list: bs_uuid_list.append(bs.uuid) image_option.set_backup_storage_uuid_list(bs_uuid_list) image = zstack_img_header.ZstackTestImage() image.set_creation_option(image_option) image.create() if test_lib.lib_get_delete_policy('image') != zstack_header.DELETE_DIRECT: test_obj_dict.add_image(image) image.delete() test_util.test_dsc('Construct volume obj.') r_volume = zstack_volume_header.ZstackTestVolume() r_volume.set_volume(test_lib.lib_get_root_volume(vm.get_vm())) r_volume.set_state(vol_header.ATTACHED) test_util.test_dsc('Create volume template') bs_list = test_lib.lib_get_backup_storage_list_by_vm(vm.get_vm()) vol_tmpt = r_volume.create_template([bs_list[0].uuid], 'new_data_template_by_root_volume') if test_lib.lib_get_delete_policy('image') != zstack_header.DELETE_DIRECT: test_obj_dict.add_image(vol_tmpt) #destroy vm host_uuid = test_lib.lib_get_vm_host(vm.get_vm()).uuid vm.destroy() test_util.test_dsc('Create volume from template') ps_uuid = vm.get_vm().allVolumes[0].primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: volume = vol_tmpt.create_data_volume(ps_uuid, 'new_data_volume_from_template1', host_uuid) else: volume = vol_tmpt.create_data_volume(ps_uuid, 'new_data_volume_from_template1') test_obj_dict.add_volume(volume) vol_tmpt.delete() test_util.test_dsc('create snapshot') snapshots = zstack_sp_header.ZstackVolumeSnapshot() snapshots.set_target_volume(volume) snapshots.set_utility_vm(vm1) test_obj_dict.add_volume_snapshot(snapshots) snapshots.create_snapshot('create_snapshot1') snapshot1 = snapshots.get_current_snapshot() snapshots.create_snapshot('create_snapshot2') snapshot2 = snapshots.get_current_snapshot() snapshots.create_snapshot('create_snapshot3') snapshot3 = snapshots.get_current_snapshot() test_util.test_dsc('delete snapshot3') snapshots.delete_snapshot(snapshot3) snapshots.check() test_obj_dict.rm_volume_snapshot(snapshots) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Create snapshot from a volume, which is created from data volume template, which is create from a root volume Success')
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #l3_name = os.environ.get('l3NoVlanNetworkName1') l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid test_lib.clean_up_all_vr() #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName #vr_host_ips = [] #for vr in vrs: # vr_ip = test_lib.lib_find_host_by_vr(vr).managementIp # #ensure mn host has no vr # if vr_ip == mn_ip: # conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip) # host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid # vm_ops.migrate_vm(vr.uuid, host_uuid) # vr_host_ips.append(vr_ip) # if test_lib.lib_is_vm_running(vr) != True: # vm_ops.start_vm(vr.uuid) #time.sleep(60) conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '=', mn_ip, conditions) #for vr_host_ip in vr_host_ips: # conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() vm.check() test_stub.ensure_host_has_no_vr(host_uuid) if not test_lib.lib_check_vm_live_migration_cap(vm.vm): vm.destroy() test_util.test_skip('skip ha if live migrate not supported') ps = test_lib.lib_get_primary_storage_by_uuid( vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: vm.destroy() test_util.test_skip('Skip test on localstorage') host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp host_port = test_lib.lib_get_host_port(host_ip) test_util.test_logger("host %s is disconnecting" % (host_ip)) host_list = test_stub.get_sce_hosts(test_lib.all_scenario_config, test_lib.scenario_file) for host in host_list: if host.ip_ == host_ip: test_host = host break if not test_host: test_util.test_fail('there is no host with ip %s in scenario file.' % (host_ip)) #test_stub.down_host_network(host_ip, test_lib.all_scenario_config) test_stub.stop_host(test_host, test_lib.all_scenario_config, 'cold') time.sleep(30) #test_stub.up_host_network(host_ip, test_lib.all_scenario_config) test_stub.start_host(test_host, test_lib.all_scenario_config) test_stub.recover_host_vlan(test_host, test_lib.all_scenario_config, test_lib.deploy_config) time.sleep(120) cmd = "nohup zstack-ctl start &" host_username = os.environ.get('hostUsername') host_password = os.environ.get('hostPassword') if not test_lib.lib_execute_ssh_cmd( mn_ip, host_username, host_password, cmd, timeout=300): test_util.test_fail("CMD:%s execute failed on %s" % (cmd, mn_ip)) #test_util.test_logger("wait for 480 seconds") #time.sleep(480) time.sleep(120) cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid) if not res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Stopped": test_util.test_fail("vm is not stopped as expected.") vm.destroy() #this is used to design to check mn works normally time.sleep(20) vm.create() vm.check() vm.destroy() #host_ops.reconnect_host(host_uuid) test_util.test_pass( 'Test checking vm status after force stop and start success')
def test(): global vm vm = test_stub.create_vm() test_util.test_logger('Create VM Success') volume = test_stub.create_volume() test_util.test_logger('Create volume Success') volume.attach(vm) test_util.test_logger('attach volume Success') zone_uuid = vm.get_vm().zoneUuid bs_uuid_list = test_lib.lib_get_backup_storage_uuid_list_by_zone(zone_uuid) vol_template = test_stub.create_data_volume_template_from_volume(volume.get_volume().uuid, bs_uuid_list, "vol_temp_for_volume") imageUrl = test_stub.export_image_from_backup_storage(vol_template.uuid, bs_uuid_list[0]) vol = test_stub.create_data_volume_from_template(vol_template.uuid, volume.get_volume().primaryStorageUuid, "vol_from_template", vm.get_vm().hostUuid) vol_uuid = vol.uuid test_util.test_logger('create volume from volume template Success') test_stub.delete_volume_image(vol_template.uuid) ps = test_lib.lib_get_primary_storage_by_uuid(volume.get_volume().primaryStorageUuid) if ps.type == 'LocalStorage': for host in test_stub.get_local_storage_volume_migratable_hosts(vol_uuid): test_stub.migrate_local_storage_volume(vol_uuid, host.uuid) test_stub.migrate_local_storage_volume(vol_uuid, vm.get_vm().hostUuid) test_lib.lib_attach_volume(vol_uuid, vm.get_vm().uuid) test_lib.lib_detach_volume(vol_uuid) test_stub.expunge_image(vol_template.uuid) if ps.type == 'LocalStorage': for host in test_stub.get_local_storage_volume_migratable_hosts(vol_uuid): test_stub.migrate_local_storage_volume(vol_uuid, host.uuid) test_stub.migrate_local_storage_volume(vol_uuid, vm.get_vm().hostUuid) test_lib.lib_attach_volume(vol_uuid, vm.get_vm().uuid) test_lib.lib_detach_volume(vol_uuid) test_stub.delete_volume(vol_uuid) test_stub.recover_volume(vol_uuid) if ps.type == 'LocalStorage': for host in test_stub.get_local_storage_volume_migratable_hosts(vol_uuid): test_stub.migrate_local_storage_volume(vol_uuid, host.uuid) test_stub.migrate_local_storage_volume(vol_uuid, vm.get_vm().hostUuid) test_lib.lib_attach_volume(vol_uuid, vm.get_vm().uuid) test_lib.lib_detach_volume(vol_uuid) vol_template = test_stub.create_data_volume_template_from_volume(volume.get_volume().uuid, bs_uuid_list, "vol_temp_for_volume") test_stub.delete_volume(vol_uuid) test_stub.expunge_volume(vol_uuid) #create volume again vol = test_stub.create_data_volume_from_template(vol_template.uuid, volume.get_volume().primaryStorageUuid, "vol_from_template", vm.get_vm().hostUuid) vol_uuid = vol.uuid test_util.test_logger('create volume from volume template Success')
def check(self): ''' Will check snapshot tree correctness To be noticed. The tree depth changing will impact the snapshots who have been created. So if the snapshots are created before incrementalSnapshot.maxNum is changed. The checker results will be untrustable. ''' import json import zstacklib.utils.jsonobject as jsonobject sp_tree_actual = [] sp_tree_zs = [] super(zstack_kvm_snapshot_tree_checker, self).check() snapshots = self.test_obj.get_snapshot_list() if not self.test_obj.get_snapshot_head(): test_util.test_logger('Snapshot is not created, skipped checking') return self.judge(self.exp_result) #utility_vm = self.test_obj.get_utility_vm() #vm_inv = utility_vm.get_vm() volume_obj = self.test_obj.get_target_volume() volume_uuid = volume_obj.get_volume().uuid #volume_installPath = volume_obj.get_volume().installPath #if not volume_installPath: # test_util.test_logger('Check result: [installPath] is Null for [volume uuid: ] %s. Can not check volume file existence' % volume.uuid) # return self.judge(self.exp_result) if volume_obj.get_state() == vl_header.DELETED: test_util.test_logger('Checker result: target volume is deleted, can not get get and check snapshot tree status') return self.judge(self.exp_result) if volume_obj.get_target_vm(): if volume_obj.get_volume().type == 'Root' and volume_obj.get_target_vm().get_state() == vm_header.DESTROYED: test_util.test_logger('Checker result: target volume is deleted, can not get get and check snapshot tree status') return self.judge(self.exp_result) ps_uuid = volume_obj.get_volume().primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) # Only Ceph has raw image format for non-Root volume if ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE: for snapshot in snapshots: backing_list = [] backing_file = '' sp_covered = 0 activate_host = '' devPath = snapshot.get_snapshot().primaryStorageInstallPath.encode('utf-8').split("ceph://")[1] volumePath = snapshot.get_snapshot().primaryStorageInstallPath.encode('utf-8').split("ceph://")[1].split("@")[0] for i in sp_tree_actual: if devPath in i: test_util.test_logger('%s already in sp list %s' % (devPath, backing_list)) sp_covered = 1 if sp_covered == 1: continue else: test_util.test_logger('%s not in current sp list, start checking its backing chain' % (devPath)) backing_list.append(devPath) cmd_info = "rbd info %s" % devPath for host in test_lib.lib_find_hosts_by_ps_uuid(ps_uuid): result = test_lib.lib_execute_ssh_cmd(host.managementIp, 'root', 'password', cmd_info) if result: activate_host = host.managementIp break if not activate_host: test_util.test_logger('No activate host found for %s' % (snapshot)) return self.judge(self.exp_result) while True: cmd_info = "rbd info %s" % devPath result = test_lib.lib_execute_ssh_cmd(activate_host, 'root', 'password', cmd_info) if result: tmp_list = get_snaps_for_raw_by_ip(volumePath, activate_host) else: test_util.test_logger('No activate host found for %s' % (snapshot)) return self.judge(self.exp_result) if tmp_list: for i in tmp_list: i = i.replace("\n", "") if i == snapshot.get_snapshot().primaryStorageInstallPath.split("ceph://")[1].split("@")[1]: test_util.test_logger('%s is found for volume %s' % (devPath, volumePath)) sp_covered = 1 elif not tmp_list: test_util.test_logger('No snapshots found for volume %s' % (volumePath)) return self.judge(False) #backing_file = backing_file.replace("\n", "") if sp_covered == 1: break else: test_util.test_logger('%s is not found for volume %s' % (devPath, volumePath)) return self.judge(False) sp_covered = 0 #backing_list = list(reversed(backing_list)) if not sp_tree_actual: test_util.test_logger('current sp list is empty, add %s into it' % (backing_list)) sp_tree_actual.append(backing_list) continue for i in sp_tree_actual: if backing_list == i: sp_covered = 1 if sp_covered == 1: test_util.test_logger('%s already in current sp list %s, no need to add it anymore' % (backing_list, sp_tree_actual)) continue else: test_util.test_logger('%s not in current sp list %s, start comparing detailed list items' % (backing_list, sp_tree_actual)) for i in sp_tree_actual: count = min(len(backing_list), len(i)) - 1 tmp_count = 0 while tmp_count <= count: if backing_list[tmp_count] == i[tmp_count]: tmp_count += 1 sp_covered = 1 continue elif backing_list[tmp_count] != i[tmp_count]: sp_covered = 0 break if sp_covered == 0: if i == sp_tree_actual[-1]: test_util.test_logger('%s not in current sp list %s, add it into sp list' % (backing_list, sp_tree_actual)) sp_tree_actual.append(backing_list) break elif sp_covered == 1 and len(backing_list) > len(i): test_util.test_logger('%s is the superset of the list %s in current sp list %s, update current sp list' % (backing_list, i, sp_tree_actual)) sp_tree_actual.remove(i) sp_tree_actual.append(backing_list) break elif sp_covered == 1 and len(backing_list) <= len(i): test_util.test_logger('%s already in current sp list %s, no need to add it anymore' % (backing_list, sp_tree_actual)) break test_util.test_logger('sp_tree_actual is %s' % (sp_tree_actual)) vol_trees = test_lib.lib_get_volume_snapshot_tree(volume_uuid) tree_allowed_depth = cfg_ops.get_global_config_value('volumeSnapshot', \ 'incrementalSnapshot.maxNum') for vol_tree in vol_trees: tree = json.loads(jsonobject.dumps(vol_tree))['tree'] for leaf_node in get_leaf_nodes(tree): backing_list = [] backing_file = '' current_node = '' backing_file = leaf_node['inventory']['primaryStorageInstallPath'].split("ceph://")[1] backing_list.append(backing_file.encode('utf-8')) current_node = leaf_node while True: parent_node = get_parent_node(tree, current_node) if not parent_node: break backing_file = parent_node['inventory']['primaryStorageInstallPath'].split("ceph://")[1] backing_list.append(backing_file.encode('utf-8')) if parent_node.has_key('parentUuid'): current_node = parent_node continue else: break backing_list = list(reversed(backing_list)) sp_tree_zs.append(backing_list) test_util.test_logger('sp_tree_zs is %s' % (sp_tree_zs)) test_util.test_logger('compare the 2 sp lists - %s and %s' % (sp_tree_actual, sp_tree_zs)) sp_covered = 0 if len(sp_tree_actual) != len(sp_tree_zs): test_util.test_logger('%s is not same length as %s' % (sp_tree_actual, sp_tree_zs)) return self.judge(False) for i in sp_tree_actual: if i in sp_tree_zs: sp_covered = 1 test_util.test_logger('%s is in zs sp list %s' % (i, sp_tree_zs)) if i == sp_tree_actual[-1]: test_util.test_logger('all the items in %s are in zs sp list %s' % (sp_tree_actual, sp_tree_zs)) continue elif i not in sp_tree_zs: sp_covered = 0 test_util.test_logger('%s is not in zs sp list %s' % (i, sp_tree_zs)) return self.judge(False) elif ps.type == 'SharedBlock': for snapshot in snapshots: backing_list = [] backing_file = '' sp_covered = 0 devPath = "/dev/" + snapshot.get_snapshot().primaryStorageInstallPath.encode('utf-8').split("sharedblock://")[1] for i in sp_tree_actual: if devPath in i: test_util.test_logger('%s already in sp list %s' % (devPath, backing_list)) sp_covered = 1 if sp_covered == 1: continue else: test_util.test_logger('%s not in current sp list, start checking its backing chain' % (devPath)) backing_list.append(devPath) while True: activate_host = '' image_cached = 0 cmd_info = "lvs --nolocking --noheadings %s | awk '{print $3}'" % devPath cmd_activate = "lvchange -a y %s" % devPath cmd_unactivate = "lvchange -a n %s" % devPath for host in test_lib.lib_find_hosts_by_ps_uuid(ps_uuid): result = test_lib.lib_execute_ssh_cmd(host.managementIp, 'root', 'password', cmd_info) if "-a-" in result or "-ao-" in result: activate_host = host.managementIp backing_file = get_qcow_backing_file_by_ip(devPath, activate_host) break if not activate_host: activate_host = test_lib.lib_find_hosts_by_ps_uuid(ps_uuid)[0].managementIp test_lib.lib_execute_ssh_cmd(activate_host, 'root', 'password', cmd_activate) backing_file = get_qcow_backing_file_by_ip(devPath, activate_host) test_lib.lib_execute_ssh_cmd(activate_host, 'root', 'password', cmd_unactivate) backing_file = backing_file.replace("\n", "") if volume_obj.get_volume().type == 'Root': for image in test_lib.lib_get_not_vr_images(): if image.uuid == backing_file.split("/")[3]: test_util.test_logger('%s is against the Root volume and %s is the last snapshot and its backing file %s is image cache' % (snapshot, devPath, backing_file)) image_cached = 1 if image_cached == 1: break if backing_file: if len(backing_file.split("/")[3]) == 40: test_util.test_logger('%s is against the Data volume and %s is the last snapshot and its backing file %s is image cache from bs' % (snapshot, devPath, backing_file)) break else: backing_list.append(backing_file) devPath = backing_file else: break #if not backing_file: # if volume_obj.get_volume().type == 'Root': # test_util.test_logger('%s is against the Root volume, need to pop up the image cache %s' % (snapshot, devPath)) # backing_list.pop() # break #else: # backing_list.append(backing_file) # devPath = backing_file backing_list = list(reversed(backing_list)) if not sp_tree_actual: test_util.test_logger('current sp list is empty, add %s into it' % (backing_list)) sp_tree_actual.append(backing_list) continue for i in sp_tree_actual: if backing_list == i: sp_covered = 1 if sp_covered == 1: test_util.test_logger('%s already in current sp list %s, no need to add it anymore' % (backing_list, sp_tree_actual)) continue else: test_util.test_logger('%s not in current sp list %s, start comparing detailed list items' % (backing_list, sp_tree_actual)) for i in sp_tree_actual: count = min(len(backing_list), len(i)) - 1 tmp_count = 0 while tmp_count <= count: if backing_list[tmp_count] == i[tmp_count]: tmp_count += 1 sp_covered = 1 continue elif backing_list[tmp_count] != i[tmp_count]: sp_covered = 0 break if sp_covered == 0: if i == sp_tree_actual[-1]: test_util.test_logger('%s not in current sp list %s, add it into sp list' % (backing_list, sp_tree_actual)) sp_tree_actual.append(backing_list) break elif sp_covered == 1 and len(backing_list) > len(i): test_util.test_logger('%s is the superset of the list %s in current sp list %s, update current sp list' % (backing_list, i, sp_tree_actual)) sp_tree_actual.remove(i) sp_tree_actual.append(backing_list) break elif sp_covered == 1 and len(backing_list) <= len(i): test_util.test_logger('%s already in current sp list %s, no need to add it anymore' % (backing_list, sp_tree_actual)) break test_util.test_logger('sp_tree_actual is %s' % (sp_tree_actual)) vol_trees = test_lib.lib_get_volume_snapshot_tree(volume_uuid) tree_allowed_depth = cfg_ops.get_global_config_value('volumeSnapshot', \ 'incrementalSnapshot.maxNum') for vol_tree in vol_trees: tree = json.loads(jsonobject.dumps(vol_tree))['tree'] for leaf_node in get_leaf_nodes(tree): backing_list = [] backing_file = '' current_node = '' backing_file = "/dev/" + leaf_node['inventory']['primaryStorageInstallPath'].split("sharedblock://")[1] backing_list.append(backing_file.encode('utf-8')) current_node = leaf_node while True: parent_node = get_parent_node(tree, current_node) if not parent_node: break backing_file = "/dev/" + parent_node['inventory']['primaryStorageInstallPath'].split("sharedblock://")[1] backing_list.append(backing_file.encode('utf-8')) if parent_node.has_key('parentUuid'): current_node = parent_node continue else: break backing_list = list(reversed(backing_list)) sp_tree_zs.append(backing_list) test_util.test_logger('sp_tree_zs is %s' % (sp_tree_zs)) test_util.test_logger('compare the 2 sp lists - %s and %s' % (sp_tree_actual, sp_tree_zs)) sp_covered = 0 if len(sp_tree_actual) != len(sp_tree_zs): test_util.test_logger('%s is not same length as %s' % (sp_tree_actual, sp_tree_zs)) return self.judge(False) for i in sp_tree_actual: if i in sp_tree_zs: sp_covered = 1 test_util.test_logger('%s is in zs sp list %s' % (i, sp_tree_zs)) if i == sp_tree_actual[-1]: test_util.test_logger('all the items in %s are in zs sp list %s' % (sp_tree_actual, sp_tree_zs)) continue elif i not in sp_tree_zs: sp_covered = 0 test_util.test_logger('%s is not in zs sp list %s' % (i, sp_tree_zs)) return self.judge(False) tree_max_depth = find_tree_max_depth(tree) if tree_max_depth > (int(tree_allowed_depth) + 1): test_util.test_logger(\ 'Checker result: volume: %s snapshot tree: %s depth checking failure. The max \ allowed depth is : %s. But we get: %s' % (volume_uuid, tree['inventory'].uuid, \ tree_allowed_depth, str(tree_max_depth - 1))) return self.judge(False) test_util.test_logger(\ 'Checker result: volume: %s snapshot tree depth checking pass. The max allowed \ depth is : %s. The real snapshot max depth is: %s' % \ (volume_uuid, tree_allowed_depth, str(tree_max_depth - 1))) elif ps.type == "LocalStorage": for snapshot in snapshots: backing_list = [] backing_file = '' sp_covered = 0 activate_host = '' devPath = snapshot.get_snapshot().primaryStorageInstallPath.encode('utf-8') for i in sp_tree_actual: if devPath in i: test_util.test_logger('%s already in sp list %s' % (devPath, backing_list)) sp_covered = 1 if sp_covered == '1': continue else: test_util.test_logger('%s not in current sp list, start checking its backing chain' % (devPath)) backing_list.append(devPath) cmd_info = "ls %s" % devPath for host in test_lib.lib_find_hosts_by_ps_uuid(ps_uuid): result = test_lib.lib_execute_ssh_cmd(host.managementIp, 'root', 'password', cmd_info) if result: activate_host = host.managementIp break if not activate_host: test_util.test_logger('No activate host found for %s' % (snapshot)) return self.judge(self.exp_result) while True: cmd_info = "ls %s" % devPath image_cache = 0 result = test_lib.lib_execute_ssh_cmd(activate_host, 'root', 'password', cmd_info) if result: backing_file = get_qcow_backing_file_by_ip(devPath, activate_host) else: test_util.test_logger('No activate host found for %s' % (snapshot)) return self.judge(self.exp_result) backing_file = backing_file.replace("\n", "") if volume_obj.get_volume().type == 'Root': for image in test_lib.lib_get_not_vr_images(): if image.uuid in backing_file.split("/")[-1]: test_util.test_logger('%s is against the Root volume and %s is the last snapshot and its backing file %s is image cache' % (snapshot, devPath, backing_file)) image_cached = 1 if image_cached == 1: break if backing_file: if len(backing_file.split("/")[-1].split(".")[0]) == 40: test_util.test_logger('%s is against the Data volume and %s is the last snapshot and its backing file %s is image cache from bs' % (snapshot, devPath, backing_file)) break else: backing_list.append(backing_file) devPath = backing_file else: break #if not backing_file: # if volume_obj.get_volume().type == 'Root': # test_util.test_logger('%s is against the Root volume, need to pop up the image cache %s' % (snapshot, devPath)) # backing_list.pop() # break #else: # backing_list.append(backing_file) # devPath = backing_file backing_list = list(reversed(backing_list)) if not sp_tree_actual: test_util.test_logger('current sp list is empty, add %s into it' % (backing_list)) sp_tree_actual.append(backing_list) continue for i in sp_tree_actual: if backing_list == i: sp_covered = 1 if sp_covered == '1': test_util.test_logger('%s already in current sp list %s, no need to add it anymore' % (backing_list, sp_tree_actual)) continue else: test_util.test_logger('%s not in current sp list %s, start comparing detailed list items' % (backing_list, sp_tree_actual)) for i in sp_tree_actual: count = min(len(backing_list), len(i)) - 1 tmp_count = 0 while tmp_count <= count: if backing_list[tmp_count] == i[tmp_count]: tmp_count += 1 sp_covered = 1 continue elif backing_list[tmp_count] != i[tmp_count]: sp_covered = 0 break if sp_covered == 0: if i == sp_tree_actual[-1]: test_util.test_logger('%s not in current sp list %s, add it into sp list' % (backing_list, sp_tree_actual)) sp_tree_actual.append(backing_list) break elif sp_covered == 1 and len(backing_list) > len(i): test_util.test_logger('%s is the superset of the list %s in current sp list %s, update current sp list' % (backing_list, i, sp_tree_actual)) sp_tree_actual.remove(i) sp_tree_actual.append(backing_list) break elif sp_covered == 1 and len(backing_list) <= len(i): test_util.test_logger('%s already in current sp list %s, no need to add it anymore' % (backing_list, sp_tree_actual)) break test_util.test_logger('sp_tree_actual is %s' % (sp_tree_actual)) vol_trees = test_lib.lib_get_volume_snapshot_tree(volume_uuid) tree_allowed_depth = cfg_ops.get_global_config_value('volumeSnapshot', \ 'incrementalSnapshot.maxNum') for vol_tree in vol_trees: tree = json.loads(jsonobject.dumps(vol_tree))['tree'] for leaf_node in get_leaf_nodes(tree): backing_list = [] backing_file = '' current_node = '' backing_file = leaf_node['inventory']['primaryStorageInstallPath'] backing_list.append(backing_file.encode('utf-8')) current_node = leaf_node while True: parent_node = get_parent_node(tree, current_node) if not parent_node: break backing_file = parent_node['inventory']['primaryStorageInstallPath'] backing_list.append(backing_file.encode('utf-8')) if parent_node.has_key('parentUuid'): current_node = parent_node continue else: break backing_list = list(reversed(backing_list)) sp_tree_zs.append(backing_list) test_util.test_logger('sp_tree_zs is %s' % (sp_tree_zs)) test_util.test_logger('compare the 2 sp lists - %s and %s' % (sp_tree_actual, sp_tree_zs)) sp_covered = 0 if len(sp_tree_actual) != len(sp_tree_zs): test_util.test_logger('%s is not same length as %s' % (sp_tree_actual, sp_tree_zs)) return self.judge(False) for i in sp_tree_actual: if i in sp_tree_zs: sp_covered = 1 test_util.test_logger('%s is in zs sp list %s' % (i, sp_tree_zs)) if i == sp_tree_actual[-1]: test_util.test_logger('all the items in %s are in zs sp list %s' % (sp_tree_actual, sp_tree_zs)) continue elif i not in sp_tree_zs: sp_covered = 0 test_util.test_logger('%s is not in zs sp list %s' % (i, sp_tree_zs)) return self.judge(False) return self.judge(True)
def check(self): super(zstack_vcenter_volume_file_checker, self).check() volume = self.test_obj.volume volume_installPath = volume.installPath if not volume_installPath: test_util.test_logger('Check result: [installPath] is Null for [volume uuid: ] %s. Can not check volume file existence' % volume.uuid) return self.judge(False) ps_uuid = volume.primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) if test_lib.lib_is_ps_iscsi_backend(ps_uuid): self.check_iscsi(volume, volume_installPath, ps) elif ps.type == inventory.VCENTER_PRIMARY_STORAGE_TYPE: cond = res_ops.gen_query_conditions('volume.uuid', '=', volume.uuid) vc_ps = res_ops.query_resource(res_ops.VCENTER_PRIMARY_STORAGE, cond) global vc_ps_volume_expunged if vc_ps: vc_ps = vc_ps[0] sign = 1 vc_ps_volume_expunged = vc_ps else: sign = 0 vc_ps = vc_ps_volume_expunged #connect vcenter, get datastore.host import ssl from pyVmomi import vim import atexit from pyVim import connect import zstackwoodpecker.zstack_test.vcenter_checker.zstack_vcenter_vm_checker as vm_checker vcenter_password = os.environ['vcenterpwd'] vcenter_server = os.environ['vcenter'] vcenter_username = os.environ['vcenteruser'] sslContext = ssl.SSLContext(ssl.PROTOCOL_SSLv23) sslContext.verify_mode = ssl.CERT_NONE SI = connect.SmartConnect(host=vcenter_server, user=vcenter_username, pwd=vcenter_password, port=443, sslContext=sslContext) if not SI: test_util.test_fail("Unable to connect to the vCenter") content = SI.RetrieveContent() datastore = vm_checker.get_obj(content, [vim.Datastore], name=vc_ps.name) test_util.test_logger(datastore) host = str(datastore.host[0].key) host_morval = host.split(':')[1][:-1] test_util.test_logger(host_morval) atexit.register(connect.Disconnect, SI) cond = res_ops.gen_query_conditions('hypervisorType', '=', 'ESX') vc_hosts = res_ops.query_resource(res_ops.HOST, cond) for vc_host in vc_hosts: if vc_host.morval == host_morval: vc_host = vc_host.managementIp break if not vc_host: return self.judge(False) if volume_installPath.startswith('[' + vc_ps.name + ']'): test_util.test_logger(vc_ps.url) if sign: vc_ps.url = vc_ps.url.split('//')[1] test_util.test_logger(vc_ps.url) volume_installPath = volume_installPath.split('[' + vc_ps.name + ']')[1].lstrip() volume_installPath = vc_ps.url + volume_installPath file_exist = "file_exist" cmd = '[ -f %s ] && echo %s' % (volume_installPath, file_exist) vchost_user = os.environ['vchostUser'] vchost_password = os.environ['vchostpwd'] #test_lib.lib_execute_ssh_cmd(vc_host, vchost_user, vchost_password, cmd, 180) result = test_lib.lib_execute_ssh_cmd(vc_host, vchost_user, vchost_password, cmd, 180) test_util.test_logger(result) result = str(result) test_util.test_logger(result) if result.rstrip('\n') == "file_exist": test_util.test_logger(result.rstrip('\n')) return self.judge(True) else: return self.judge(False)
def test(): global test_obj_dict ova_image_name = os.environ['vcenterDefaultmplate'] network_pattern1 = os.environ['l3vCenterNoVlanNetworkName'] disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('largeDiskOfferingName')) test_util.test_dsc('Create vm and check') vm = test_stub.create_vm_in_vcenter(vm_name = 'test_volume_after_sync_vm', image_name = ova_image_name, l3_name = network_pattern1) test_obj_dict.add_vm(vm) vm.check() ps_uuid = vm.vm.allVolumes[0].primaryStorageUuid vc_ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) vc_host = test_lib.lib_find_host_by_vm(vm.vm).managementIp test_util.test_dsc('Create volumes and check') volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_name('vcenter_volume') volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.attach(vm) volume.check() volume.detach() volume.check() volume_creation_option.set_name('vcenter_volume1') volume_creation_option.set_primary_storage_uuid(ps_uuid) volume1 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume1) volume1.check() volume1.attach(vm) volume1.check() volume1.delete() volume1.check() test_util.test_dsc('Sync vcenter') vcenter_uuid = vct_ops.lib_get_vcenter_by_name(os.environ['vcenter']).uuid vct_ops.sync_vcenter(vcenter_uuid) time.sleep(5) test_util.test_dsc('check volumes after synchronizing vcenter') db_volume = test_lib.lib_get_volume_by_uuid(volume.get_volume().uuid) db_volume1 = test_lib.lib_get_volume_by_uuid(volume1.get_volume().uuid) if db_volume.status != 'Ready' or db_volume1.status != 'Deleted': test_util.test_fail("check data volumes fail after synchronizing vcenter") #delete volume file volume_installPath = vc_ps.url.split('//')[1] + db_volume.installPath.split('[' + vc_ps.name + ']')[1].lstrip() test_util.test_logger(volume_installPath) cmd = 'rm -f %s' %volume_installPath vchost_user = os.environ['vchostUser'] vchost_password = os.environ['vchostpwd'] result = test_lib.lib_execute_ssh_cmd(vc_host, vchost_user, vchost_password, cmd, 180) test_util.test_dsc('Sync vcenter') vct_ops.sync_vcenter(vcenter_uuid) time.sleep(5) db_volume = test_lib.lib_get_volume_by_uuid(volume.get_volume().uuid) if db_volume: test_util.test_fail("check data volumes fail after synchronizing vcenter") #cleanup vm.destroy() vm.expunge() volume1.expunge() test_util.test_pass("Test sync volume in vcenter passed.")
def test(): global test_obj_dict ova_image_name = os.environ['vcenterDefaultmplate'] network_pattern1 = os.environ['l3vCenterNoVlanNetworkName'] disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('largeDiskOfferingName')) test_util.test_dsc('Create vm and check') vm = test_stub.create_vm_in_vcenter(vm_name='test_volume_after_sync_vm', image_name=ova_image_name, l3_name=network_pattern1) test_obj_dict.add_vm(vm) vm.check() ps_uuid = vm.vm.allVolumes[0].primaryStorageUuid vc_ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) vc_host = test_lib.lib_find_host_by_vm(vm.vm).managementIp test_util.test_dsc('Create volumes and check') volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_name('vcenter_volume') volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.attach(vm) volume.check() volume.detach() volume.check() volume_creation_option.set_name('vcenter_volume1') volume_creation_option.set_primary_storage_uuid(ps_uuid) volume1 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume1) volume1.check() volume1.attach(vm) volume1.check() volume1.delete() volume1.check() test_util.test_dsc('Sync vcenter') vcenter_uuid = vct_ops.lib_get_vcenter_by_name(os.environ['vcenter']).uuid vct_ops.sync_vcenter(vcenter_uuid) time.sleep(5) test_util.test_dsc('check volumes after synchronizing vcenter') db_volume = test_lib.lib_get_volume_by_uuid(volume.get_volume().uuid) db_volume1 = test_lib.lib_get_volume_by_uuid(volume1.get_volume().uuid) if db_volume.status != 'Ready' or db_volume1.status != 'Deleted': test_util.test_fail( "check data volumes fail after synchronizing vcenter") #delete volume file volume_installPath = vc_ps.url.split( '//')[1] + db_volume.installPath.split('[' + vc_ps.name + ']')[1].lstrip() test_util.test_logger(volume_installPath) cmd = 'rm -f %s' % volume_installPath vchost_user = os.environ['vchostUser'] vchost_password = os.environ['vchostpwd'] result = test_lib.lib_execute_ssh_cmd(vc_host, vchost_user, vchost_password, cmd, 180) test_util.test_dsc('Sync vcenter') vct_ops.sync_vcenter(vcenter_uuid) time.sleep(5) db_volume = test_lib.lib_get_volume_by_uuid(volume.get_volume().uuid) if db_volume: test_util.test_fail( "check data volumes fail after synchronizing vcenter") #cleanup vm.destroy() vm.expunge() volume1.expunge() test_util.test_pass("Test sync volume in vcenter passed.")
def test(): test_util.test_dsc('Test Change VM Image Function') #set overProvisioning.primaryStorage's value as 10 primary_storage_list = res_ops.query_resource(res_ops.PRIMARY_STORAGE) for ps in primary_storage_list: if ps.type == "SharedBlock": test_util.test_skip('SharedBlock primary storage does not support overProvision') con_ops.change_global_config('mevoco','overProvisioning.primaryStorage',10) global vm test_lib.lib_create_disk_offering(diskSize=1099511627776,name="1T") l3_uuid = test_lib.lib_get_l3_by_name("l3VlanNetwork3").uuid image_uuid = test_lib.lib_get_image_by_name("ttylinux").uuid disk_offering_uuids = [test_lib.lib_get_disk_offering_by_name("smallDiskOffering").uuid,test_lib.lib_get_disk_offering_by_name("root-disk").uuid,test_lib.lib_get_disk_offering_by_name("1T").uuid] vm = test_stub.create_vm(l3_uuid_list = [l3_uuid],image_uuid = image_uuid,vm_name="test-nxs",disk_offering_uuids = disk_offering_uuids) test_obj_dict.add_vm(vm) vm.check() vm_uuid = vm.get_vm().uuid last_data_volumes_uuids = [] last_data_volumes = test_lib.lib_get_data_volumes(vm.get_vm()) for data_volume in last_data_volumes: last_data_volumes_uuids.append(data_volume.uuid) last_l3network_uuid = test_lib.lib_get_l3s_uuid_by_vm(vm.get_vm()) last_primarystorage_uuid = test_lib.lib_get_root_volume(vm.get_vm()).primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(last_primarystorage_uuid) avail_cap = ps.availableCapacity total_cap = ps.totalCapacity vm_ops.stop_vm(vm_uuid) vr = test_lib.lib_find_vr_by_vm(vm.get_vm())[0] vr_mgmt_ip = test_lib.lib_find_vr_mgmt_ip(vr) #stop vm's vr vm_ops.stop_vm(vr.uuid) image_uuid = test_lib.lib_get_image_by_name("image_for_sg_test").uuid vm_ops.change_vm_image(vm_uuid,image_uuid) #check whether vr's status is running if vr.applianceVmType == 'vrouter': if not test_lib.lib_wait_target_up(vr_mgmt_ip,'7272',240): test_util.test_fail('vm:%s is not startup in 240 seconds.Fail to reboot it.' % vr.uuid) time.sleep(20) #if vr.state == 'Stopped': else: vm_ops.start_vm(vr.uuid) vm_ops.reconnect_vr(vr.uuid) vm_ops.start_vm(vm_uuid) vm.update() #check whether the vm is running successfully if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip,'22',120): test_util.test_fail('vm:%s is not startup in 120 seconds.Fail to reboot it.' % vm_uuid) #check whether data volumes attached to the vm has changed data_volumes_after_uuids = [] data_volumes_after = test_lib.lib_get_data_volumes(vm.get_vm()) for data_volume in data_volumes_after: data_volumes_after_uuids.append(data_volume.uuid) if set(last_data_volumes_uuids) != set(data_volumes_after_uuids): test_util.test_fail('Change Vm Image Failed.Data volumes changed.') #check whether the network config has changed l3network_uuid_after = test_lib.lib_get_l3s_uuid_by_vm(vm.get_vm()) if set(l3network_uuid_after) != set(last_l3network_uuid): test_util.test_fail('Change VM Image Failed.The Network config has changed.') #check whether primarystorage has changed primarystorage_uuid_after = test_lib.lib_get_root_volume(vm.get_vm()).primaryStorageUuid if primarystorage_uuid_after != last_primarystorage_uuid: test_util.test_fail('Change VM Image Failed.Primarystorage has changed.') ps = test_lib.lib_get_primary_storage_by_uuid(primarystorage_uuid_after) avail_cap1 = ps.availableCapacity total_cap1 = ps.totalCapacity if total_cap != total_cap1: test_util.test_fail('Primary Storage total capacity is not same,after changing vm image:%s.The previous value:%s, the current value:%s' % (image_uuid,total_cap,total_cap1)) if avail_cap <= avail_cap1: test_util.test_fail('Primary Storage available capacity is not correct,after changing larger image:%s.The previous value:%s, the current value:%s' % (image_uuid,avail_cap,avail_cap1)) vm_ops.stop_vm(vm_uuid) #stop vm's vr vm_ops.stop_vm(vr.uuid) image_tiny_uuid = test_lib.lib_get_image_by_name("ttylinux").uuid vm_ops.change_vm_image(vm_uuid,image_tiny_uuid) #check whether vr's status is running if vr.applianceVmType == 'vrouter': if not test_lib.lib_wait_target_up(vr_mgmt_ip,'7272',240): test_util.test_fail('vm:%s is not startup in 240 seconds.Fail to reboot it.' % vr.uuid) time.sleep(20) #if vr.state == 'Stopped': else: vm_ops.start_vm(vr.uuid) vm_ops.reconnect_vr(vr.uuid) vm_ops.start_vm(vm_uuid) vm.update() #check whether the vm is running successfully if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip,'22',120): test_util.test_fail('vm:%s is not startup in 120 seconds.Fail to reboot it.' % vm_uuid) #check whether data volumes attached to the vm has changed data_volumes_after_uuids_tiny = [] data_volumes_after_tiny = test_lib.lib_get_data_volumes(vm.get_vm()) for data_volume in data_volumes_after_tiny: data_volumes_after_uuids_tiny.append(data_volume.uuid) if set(data_volumes_after_uuids_tiny) != set(data_volumes_after_uuids): test_util.test_fail('Change Vm Image Failed.Data volumes changed.') #check whether the network config has changed l3network_uuid_after_tiny = test_lib.lib_get_l3s_uuid_by_vm(vm.get_vm()) if set(l3network_uuid_after_tiny) != set(l3network_uuid_after_tiny): test_util.test_fail('Change VM Image Failed.The Network config has changed.') #check whether primarystorage has changed primarystorage_uuid_after_tiny = test_lib.lib_get_root_volume(vm.get_vm()).primaryStorageUuid if primarystorage_uuid_after != primarystorage_uuid_after_tiny: test_util.test_fail('Change VM Image Failed.Primarystorage has changed.') ps = test_lib.lib_get_primary_storage_by_uuid(primarystorage_uuid_after_tiny) avail_cap2 = ps.availableCapacity total_cap2 = ps.totalCapacity if total_cap2 != total_cap1: test_util.test_fail('Primary Storage total capacity is not same,after changing vm image:%s.The previous value:%s, the current value:%s' % (image_uuid,total_cap1,total_cap2)) if avail_cap2 <= avail_cap1: test_util.test_fail('Primary Storage available capacity is not correct,after changing smaller image:%s.The previous value:%s, the current value:%s' % (image_uuid,avail_cap1,avail_cap2)) test_lib.lib_destroy_vm_and_data_volumes(vm.get_vm()) test_util.test_pass('Change Vm Image Test Success')
def test(): test_util.test_dsc('Create test vm as utility vm') if res_ops.query_resource(res_ops.PRIMARY_STORAGE, [])[0].type \ == inventory.CEPH_PRIMARY_STORAGE_TYPE: test_util.test_skip('Ceph primary storage does not support backup volume snapshot, since all volume snapshots are save in same ceph') return vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) test_util.test_dsc('Create volume for snapshot testing') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_name('volume for snapshot testing') volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.attach(vm) import time #time.sleep(10) volume.detach() ps_uuid = volume.get_volume().primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: # LOCAL Storage do not support create volume and template from backuped snapshot test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_skip('Skip test create volume from backuped storage, when volume is deleted.') #make sure utility vm is starting and running vm.check() test_util.test_dsc('create snapshot and check') snapshots = test_obj_dict.get_volume_snapshot(volume.get_volume().uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshot1 = snapshots.get_current_snapshot() snapshot1.backup() original_checking_points1 = snapshots.get_checking_points(snapshot1) snapshots.create_snapshot('create_snapshot2') snapshots.create_snapshot('create_snapshot3') snapshot3 = snapshots.get_current_snapshot() snapshot3.backup() original_checking_points3 = snapshots.get_checking_points(snapshot3) volume.delete() test_util.test_dsc('create new data volume based on backuped snapshot1') volume1 = snapshot1.create_data_volume(name = 'snapshot1_volume') test_obj_dict.add_volume(volume1) volume1.attach(vm) volume1.check() volume1.detach() snapshots1 = test_obj_dict.get_volume_snapshot(volume1.get_volume().uuid) snapshots1.set_utility_vm(vm) test_util.test_dsc('create new data volume based on backuped snapshot3') volume3 = snapshot3.create_data_volume(name = 'snapshot3_volume') test_obj_dict.add_volume(volume3) #create data volume from sp doesn't need a pre-attach/detach volume3.check() snapshots3 = test_obj_dict.get_volume_snapshot(volume3.get_volume().uuid) snapshots3.set_utility_vm(vm) snapshots1.create_snapshot('create_snapshot1-1') snapshots1.check() snapshots3.create_snapshot('create_snapshot3-1') snapshots3.create_snapshot('create_snapshot3-2') snapshots.delete_snapshot(snapshot3) snapshots3.check() test_util.test_dsc('Delete snapshot, volume and check') snapshots.delete() test_obj_dict.rm_volume_snapshot(snapshots) snapshots1.check() snapshots1.delete() test_obj_dict.rm_volume_snapshot(snapshots1) snapshots3.check() snapshots3.delete() test_obj_dict.rm_volume_snapshot(snapshots3) volume.delete() test_obj_dict.rm_volume(volume) vm.destroy() test_util.test_pass('Backup Snapshot test Success')
def test(): global vm bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") image_option = test_util.ImageOption() image_option.set_name('test_image_cache_cleanup') image_option.set_format('qcow2') image_option.set_mediaType('RootVolumeTemplate') image_option.set_url(os.environ.get('imageUrl_s')) image_option.set_backup_storage_uuid_list([bss[0].uuid]) new_image = zstack_image_header.ZstackTestImage() new_image.set_creation_option(image_option) new_image.add_root_volume_template() l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option = test_util.VmOption() vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(new_image.image.uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('test_image_cache_cleanup_migration_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() if not test_lib.lib_check_vm_live_migration_cap(vm.vm): test_util.test_skip('skip migrate if live migrate not supported') ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) test_obj_dict.add_vm(vm) vm.check() if ps.type == 'SharedMountPoint': test_util.test_skip('CleanUpImageCacheOnPrimaryStorage not supported on SMP storage, skip test.') elif ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE: test_util.test_skip('ceph is not directly using image cache, skip test.') host = test_lib.lib_find_host_by_vm(vm.get_vm()) test_stub.migrate_vm_to_random_host(vm) vm.check() test_stub.migrate_vm_to_random_host(vm) vm.check() new_image.delete() new_image.expunge() vm.destroy() vm.expunge() ps_ops.cleanup_imagecache_on_primary_storage(ps.uuid) if ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE: image_cache_path = "%s/imagecache/template/%s/%s.qcow2" % (ps.mountPath, new_image.image.uuid, new_image.image.uuid) if test_lib.lib_check_file_exist(host, image_cache_path): test_util.test_fail('image cache is expected to be deleted') # elif ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE: # elif ps.type == 'SharedMountPoint': test_util.test_pass('Migrate VM Test Success')
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") max_attempts = test_lib.lib_get_ha_selffencer_maxattempts() test_lib.lib_set_ha_selffencer_maxattempts('3') storagechecker_timeout = test_lib.lib_get_ha_selffencer_storagechecker_timeout() test_lib.lib_set_ha_selffencer_storagechecker_timeout('5') vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #l3_name = os.environ.get('l3NoVlanNetworkName1') l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) for vr in vrs: if test_lib.lib_is_vm_running(vr) != True: vm_ops.start_vm(vr.uuid) time.sleep(60) conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', os.environ.get('hostIp'), conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() if not test_lib.lib_check_vm_live_migration_cap(vm.vm): test_util.test_skip('skip ha if live migrate not supported') ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on localstorage') #vm.check() host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") l2_network_interface = os.environ.get('l2ManagementNetworkInterface') cmd = "ifdown %s && sleep 180 && ifup %s" % (l2_network_interface, l2_network_interface) host_username = os.environ.get('hostUsername') host_password = os.environ.get('hostPassword') rsp = test_lib.lib_execute_ssh_cmd(host_ip, host_username, host_password, cmd, 180) if not rsp: test_util.test_logger("host is expected to shutdown after its network down for a while") test_util.test_logger("wait for 600 seconds") time.sleep(600) vm.update() if test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp == host_ip: test_util.test_fail("VM is expected to start running on another host") vm.set_state(vm_header.RUNNING) vm.check() vm.destroy() test_lib.lib_set_ha_selffencer_maxattempts(max_attempts) test_lib.lib_set_ha_selffencer_storagechecker_timeout(storagechecker_timeout) os.system('bash -ex %s %s' % (os.environ.get('hostRecoverScript'), host_ip)) host_ops.reconnect_host(host_uuid) test_util.test_pass('Test VM ha on host failure Success')
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout global test_host if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) vr_host_ips = [] for vr in vrs: vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp) if test_lib.lib_is_vm_running(vr) != True: vm_ops.start_vm(vr.uuid) time.sleep(60) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '=', mn_ip, conditions) #for vr_host_ip in vr_host_ips: # conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() if not test_lib.lib_check_vm_live_migration_cap(vm.vm): test_util.test_skip('skip ha if live migrate not supported') ps = test_lib.lib_get_primary_storage_by_uuid( vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on localstorage') host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp test_util.test_logger("host %s is disconnecting" % (host_ip)) ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") host_list = test_stub.get_sce_hosts(test_lib.all_scenario_config, test_lib.scenario_file) for host in host_list: if host.ip_ == host_ip: test_host = host break if not test_host: test_util.test_fail('there is no host with ip %s in scenario file.' % (host_ip)) test_stub.stop_host(test_host, test_lib.all_scenario_config, 'cold') test_util.test_logger( "wait for 30 seconds to ensure vm disconnected, then just wait 90s for target up." ) time.sleep(30) if test_lib.lib_wait_target_up(vm.vm.vmNics[0].ip, '22', 90): test_util.test_logger("%s can be connected within 120s" % (vm.vm.vmNics[0].ip)) test_stub.start_host(test_host, test_lib.all_scenario_config) vm.destroy() test_util.test_pass('Test VM ha change to running within 120s Success')
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #l3_name = os.environ.get('l3NoVlanNetworkName1') l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid test_lib.clean_up_all_vr() #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName #vr_host_ips = [] #for vr in vrs: # vr_ip = test_lib.lib_find_host_by_vr(vr).managementIp # #ensure mn host has no vr # if vr_ip == mn_ip: # conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip) # host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid # vm_ops.migrate_vm(vr.uuid, host_uuid) # vr_host_ips.append(vr_ip) # if test_lib.lib_is_vm_running(vr) != True: # vm_ops.start_vm(vr.uuid) #time.sleep(60) conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '=', mn_ip, conditions) #for vr_host_ip in vr_host_ips: # conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() vm.check() test_stub.ensure_host_has_no_vr(host_uuid) if not test_lib.lib_check_vm_live_migration_cap(vm.vm): vm.destroy() test_util.test_skip('skip ha if live migrate not supported') ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: vm.destroy() test_util.test_skip('Skip test on localstorage') host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp host_port = test_lib.lib_get_host_port(host_ip) test_util.test_logger("host %s is disconnecting" %(host_ip)) host_list = test_stub.get_sce_hosts(test_lib.all_scenario_config, test_lib.scenario_file) for host in host_list: if host.ip_ == host_ip: test_host = host break if not test_host: test_util.test_fail('there is no host with ip %s in scenario file.' %(host_ip)) #test_stub.down_host_network(host_ip, test_lib.all_scenario_config) test_stub.stop_host(test_host, test_lib.all_scenario_config, 'cold') time.sleep(30) #test_stub.up_host_network(host_ip, test_lib.all_scenario_config) test_stub.start_host(test_host, test_lib.all_scenario_config) test_stub.recover_host_vlan(test_host, test_lib.all_scenario_config, test_lib.deploy_config) time.sleep(120) cmd = "nohup zstack-ctl start &" host_username = os.environ.get('hostUsername') host_password = os.environ.get('hostPassword') if not test_lib.lib_execute_ssh_cmd(mn_ip, host_username, host_password, cmd, timeout = 300): test_util.test_fail("CMD:%s execute failed on %s" %(cmd, mn_ip)) #test_util.test_logger("wait for 480 seconds") #time.sleep(480) time.sleep(120) cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid) if not res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Stopped": test_util.test_fail("vm is not stopped as expected.") vm.destroy() #this is used to design to check mn works normally time.sleep(20) vm.create() vm.check() vm.destroy() #host_ops.reconnect_host(host_uuid) test_util.test_pass('Test checking vm status after force stop and start success')
def test(): test_util.test_dsc('Create original vm') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) vm1 = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm1) test_util.test_dsc('Create Data Volume obj.') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_name('volume for create both root and data volume snapshot') volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.attach(vm) test_util.test_dsc('Construct root volume obj.') vm_root_volume_inv = test_lib.lib_get_root_volume(vm.get_vm()) #root_volume = zstack_vol_header.ZstackTestVolume() #root_volume.set_volume(vm_root_volume_inv) #root_volume.set_target_vm(vm) #root_volume.set_state(vol_header.ATTACHED) root_volume_uuid = vm_root_volume_inv.uuid root_image_uuid = vm_root_volume_inv.rootImageUuid vm_img_inv = test_lib.lib_get_image_by_uuid(root_image_uuid) test_util.test_dsc('Stop vm before create snapshot.') vm.stop() test_util.test_dsc('create snapshot') snapshots_data = test_obj_dict.get_volume_snapshot(volume.get_volume().uuid) snapshots_data.set_utility_vm(vm1) snapshots_data.create_snapshot('create_data_snapshot1') snapshot1 = snapshots_data.get_current_snapshot() snapshots_data.create_snapshot('create_data_snapshot2') snapshots_data.create_snapshot('create_data_snapshot3') #snapshots_root = zstack_sp_header.ZstackVolumeSnapshot() #snapshots_root.set_target_volume(root_volume) #test_obj_dict.add_volume_snapshot(snapshots_root) snapshots_root = test_obj_dict.get_volume_snapshot(vm_root_volume_inv.uuid) snapshots_root.set_utility_vm(vm1) snapshots_root.create_snapshot('create_root_snapshot1') snapshots_root.create_snapshot('create_root_snapshot2') snapshot2 = snapshots_root.get_current_snapshot() snapshots_root.create_snapshot('create_root_snapshot3') snapshot3 = snapshots_root.get_current_snapshot() test_util.test_dsc('delete snapshot3 and create image tempalte from root') snapshots_root.delete_snapshot(snapshot3) image_option = test_util.ImageOption() image_option.set_name('creating_image_from_root_volume_after_creating_sp') image_option.set_guest_os_type(vm_img_inv.guestOsType) image_option.set_bits(vm_img_inv.bits) image_option.set_root_volume_uuid(root_volume_uuid) backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.get_vm()) bs_uuid_list = [] for bs in backup_storage_list: bs_uuid_list.append(bs.uuid) image_option.set_backup_storage_uuid_list(bs_uuid_list) test_util.test_dsc('create image template from root volume') image2 = zstack_img_header.ZstackTestImage() image2.set_creation_option(image_option) image2.create() test_obj_dict.add_image(image2) image2.check() image2_uuid = image2.get_image().uuid test_util.test_dsc('create vm2 with new created template and check') vm_creation_option = vm.get_creation_option() vm_creation_option.set_image_uuid(image2_uuid) vm2 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm2) vm2.check() vm2.destroy() test_obj_dict.rm_vm(vm2) ps_uuid = vm_root_volume_inv.primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: # LOCAL Storage do not support create volume and template from backuped snapshot test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Create image from root volume with creating/destroying Snapshot Success') #check data snapshots snapshots_data.use_snapshot(snapshot1) snapshots_data.create_snapshot('create_snapshot1.1.1') snapshots_data.create_snapshot('create_snapshot1.1.2') test_util.test_dsc('create snapshot4 and finally delete all snapshots_root') snapshots_root.create_snapshot('create_snapshot4') snapshot4 = snapshots_root.get_current_snapshot() #snapshots_root.backup_snapshot(snapshot4) snapshots_root.check() #vm.destroy() #test_obj_dict.rm_vm(vm) test_util.test_dsc('create image template2 from root snapshot') image_option.set_root_volume_uuid(snapshot4.get_snapshot().uuid) snapshot4.set_image_creation_option(image_option) image3 = snapshot4.create_image_template() test_obj_dict.add_image(image3) image3.check() image3_uuid = image3.get_image().uuid test_util.test_dsc('create vm3 with new created template and check') vm_creation_option.set_image_uuid(image3_uuid) vm3 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm3) vm3.check() vm3.destroy() test_obj_dict.rm_vm(vm3) #check data snapshots snapshots_data.use_snapshot(snapshot1) snapshots_data.create_snapshot('create_snapshot1.2.1') snapshots_data.check() test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Create image from root volume with creating/destroying Snapshot Success')
def test(): vm1 = test_stub.create_vr_vm('maintain_host_vm1', 'imageName_s', 'l3VlanNetwork2') test_obj_dict.add_vm(vm1) vm2 = test_stub.create_vr_vm('maintain_host_vm2', 'imageName_s', 'l3VlanNetwork2') test_obj_dict.add_vm(vm2) vm1.check() vm2.check() if not test_lib.lib_check_vm_live_migration_cap(vm1.vm) or not test_lib.lib_check_vm_live_migration_cap(vm2.vm): test_util.test_skip('skip migrate if live migrate not supported') ps = test_lib.lib_get_primary_storage_by_uuid(vm1.get_vm().allVolumes[0].primaryStorageUuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('skip migrate vm with data volume if localstorate is used') ps = test_lib.lib_get_primary_storage_by_uuid(vm2.get_vm().allVolumes[0].primaryStorageUuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('skip migrate vm with data volume if localstorate is used') test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) test_util.test_dsc('Attach volume and check') volume.attach(vm1) volume.check() current_host1 = test_lib.lib_get_vm_host(vm1.vm) conditions = res_ops.gen_query_conditions('clusterUuid', '=', vm1.vm.clusterUuid) conditions = res_ops.gen_query_conditions('state', '=', host_header.ENABLED, conditions) conditions = res_ops.gen_query_conditions('status', '=', host_header.CONNECTED, conditions) all_hosts = res_ops.query_resource(res_ops.HOST, conditions) if len(all_hosts) <= 1: test_util.test_fail('Not available host to do maintenance, since there is only %s host' % len(all_hosts)) target_host = random.choice(all_hosts) if current_host1.uuid != target_host.uuid: vm1.migrate(target_host.uuid) current_host2 = test_lib.lib_get_vm_host(vm2.vm) if current_host2.uuid != target_host.uuid: vm2.migrate(target_host.uuid) new_host = test_lib.lib_get_vm_host(vm1.vm) if new_host.uuid != target_host.uuid: test_util.test_fail('VM did not migrate to target [host:] %s, but to [host:] %s' % (target_host.uuid, new_host.uuid)) volume.check() host = test_kvm_host.ZstackTestKvmHost() host.set_host(target_host) host.maintain() #need to update vm's inventory, since they will be changed by maintenace mode vm1.update() vm2.update() vm1.check() vm2.check() volume.check() host.change_state(test_kvm_host.ENABLE_EVENT) if not linux.wait_callback_success(is_host_connected, host.get_host().uuid, 180): test_util.test_fail('host status is not changed to connected, after changing its state to Enable') vm1.migrate(target_host.uuid) vm2.migrate(target_host.uuid) vm1.check() vm2.check() volume.check() vm1.destroy() test_obj_dict.rm_vm(vm1) vm2.destroy() test_obj_dict.rm_vm(vm2) volume.delete() test_obj_dict.rm_volume(volume) test_util.test_pass('Maintain Host Test Success')
def test(): global vm vm = test_stub.create_vr_vm('migrate_stopped_vm', 'imageName_s', 'l3VlanNetwork2') test_obj_dict.add_vm(vm) ps = test_lib.lib_get_primary_storage_by_uuid( vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') vm_inv = vm.get_vm() vm_uuid = vm_inv.uuid test_util.test_dsc('Add ISO Image') cond = res_ops.gen_query_conditions('name', '=', 'sftp') bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid img_option = test_util.ImageOption() img_option.set_name('iso') img_option.set_backup_storage_uuid_list([bs_uuid]) mn = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0] cmd = "echo fake iso for test only > %s/apache-tomcat/webapps/zstack/static/zstack-repo/7/x86_64/os/test.iso" % ( os.environ.get('zstackInstallPath')) if os.system("ip r | grep %s" % (mn.hostName)) == 0: os.system(cmd) else: for host in test_lib.lib_get_all_hosts_from_plan(): test_util.test_logger("host.managementIp_: %s" % (host.managementIp_)) test_util.test_logger("mn.hostName: %s" % (mn.hostName)) test_util.test_logger( "anotherIp: %s" % (test_stub.get_another_ip_of_host( host.managementIp_, host.username_, host.password_))) if host.managementIp_ == mn.hostName or test_stub.get_another_ip_of_host( host.managementIp_, host.username_, host.password_) == mn.hostName: out = test_lib.lib_execute_ssh_cmd(host.managementIp_, host.username_, host.password_, cmd, timeout=30) img_option.set_url( 'http://%s:8080/zstack/static/zstack-repo/7/x86_64/os/test.iso' % (mn.hostName)) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) test_util.test_dsc('Attach ISO to VM') cond = res_ops.gen_query_conditions('name', '=', 'iso') iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid img_ops.attach_iso(iso_uuid, vm_uuid) test_util.test_dsc('Migrate VM') vm.check() target_host = test_lib.lib_find_random_host(vm.vm) vm.stop() vol_ops.migrate_volume(vm.get_vm().allVolumes[0].uuid, target_host.uuid) vm.check() vm.start() vm.check() img_ops.detach_iso(vm_uuid) image.delete() image.expunge() test_obj_dict.rm_image(image) vm.destroy() test_util.test_pass('Migrate Stopped VM Test Success When Attach ISO')
def test(): global vm bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") image_option = test_util.ImageOption() image_option.set_name('test_image_cache_cleanup') image_option.set_format('qcow2') image_option.set_mediaType('RootVolumeTemplate') image_option.set_url(os.environ.get('imageUrl_s')) image_option.set_backup_storage_uuid_list([bss[0].uuid]) new_image = zstack_image_header.ZstackTestImage() new_image.set_creation_option(image_option) new_image.add_root_volume_template() l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option = test_util.VmOption() vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(new_image.image.uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('test_image_cache_cleanup_migration_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() if not test_lib.lib_check_vm_live_migration_cap(vm.vm): test_util.test_skip('skip migrate if live migrate not supported') ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) test_obj_dict.add_vm(vm) vm.check() if ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE: test_util.test_skip('ceph is not directly using image cache, skip test.') host = test_lib.lib_find_host_by_vm(vm.get_vm()) test_stub.migrate_vm_to_random_host(vm) vm.check() test_stub.migrate_vm_to_random_host(vm) vm.check() new_image.delete() new_image.expunge() vm.destroy() vm.expunge() ps_ops.cleanup_imagecache_on_primary_storage(ps.uuid) count = 0 while True: image_cache_path = "%s/imagecache/template/%s" % (ps.mountPath, new_image.image.uuid) if not test_lib.lib_check_file_exist(host, image_cache_path): break elif count > 5: test_util.test_fail('image cache is expected to be deleted') test_util.test_logger('check %s times: image cache still exist' % (count)) time.sleep(5) count += 1 count = 0 while True: image_cache_path = "%s/zstore-cache/%s" % (ps.mountPath, new_image.image.uuid) if not test_lib.lib_check_file_exist(host, image_cache_path): break elif count > 5: test_util.test_fail('image cache is expected to be deleted') test_util.test_logger('check %s times: image cache still exist' % (count)) time.sleep(5) count += 1 test_util.test_pass('Migrate VM Test Success')
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout global test_host if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) vr_host_ips = [] for vr in vrs: vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp) if test_lib.lib_is_vm_running(vr) != True: vm_ops.start_vm(vr.uuid) time.sleep(60) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '=', mn_ip, conditions) #for vr_host_ip in vr_host_ips: # conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() if not test_lib.lib_check_vm_live_migration_cap(vm.vm): test_util.test_skip('skip ha if live migrate not supported') ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on localstorage') host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp test_util.test_logger("host %s is disconnecting" %(host_ip)) ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") host_list = test_stub.get_sce_hosts(test_lib.all_scenario_config, test_lib.scenario_file) for host in host_list: if host.ip_ == host_ip: test_host = host break if not test_host: test_util.test_fail('there is no host with ip %s in scenario file.' %(host_ip)) test_stub.stop_host(test_host, test_lib.all_scenario_config, 'cold') test_util.test_logger("wait for 30 seconds to ensure vm disconnected, then just wait 90s for target up.") time.sleep(30) if test_lib.lib_wait_target_up(vm.vm.vmNics[0].ip, '22', 90): test_util.test_logger("%s can be connected within 120s" %(vm.vm.vmNics[0].ip)) test_stub.start_host(test_host, test_lib.all_scenario_config) vm.destroy() test_util.test_pass('Test VM ha change to running within 120s Success')
def check(self): super(zstack_vcenter_volume_file_checker, self).check() volume = self.test_obj.volume volume_installPath = volume.installPath if not volume_installPath: test_util.test_logger( 'Check result: [installPath] is Null for [volume uuid: ] %s. Can not check volume file existence' % volume.uuid) return self.judge(False) ps_uuid = volume.primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) if test_lib.lib_is_ps_iscsi_backend(ps_uuid): self.check_iscsi(volume, volume_installPath, ps) elif ps.type == inventory.VCENTER_PRIMARY_STORAGE_TYPE: cond = res_ops.gen_query_conditions('volume.uuid', '=', volume.uuid) vc_ps = res_ops.query_resource(res_ops.VCENTER_PRIMARY_STORAGE, cond) global vc_ps_volume_expunged if vc_ps: vc_ps = vc_ps[0] sign = 1 vc_ps_volume_expunged = vc_ps else: sign = 0 vc_ps = vc_ps_volume_expunged #connect vcenter, get datastore.host import ssl from pyVmomi import vim import atexit from pyVim import connect import zstackwoodpecker.zstack_test.vcenter_checker.zstack_vcenter_vm_checker as vm_checker vcenter_password = os.environ['vcenterpwd'] vcenter_server = os.environ['vcenter'] vcenter_username = os.environ['vcenteruser'] sslContext = ssl.SSLContext(ssl.PROTOCOL_SSLv23) sslContext.verify_mode = ssl.CERT_NONE SI = connect.SmartConnect(host=vcenter_server, user=vcenter_username, pwd=vcenter_password, port=443, sslContext=sslContext) if not SI: test_util.test_fail("Unable to connect to the vCenter") content = SI.RetrieveContent() datastore = vm_checker.get_obj(content, [vim.Datastore], name=vc_ps.name) test_util.test_logger(datastore) host = str(datastore.host[0].key) host_morval = host.split(':')[1][:-1] test_util.test_logger(host_morval) atexit.register(connect.Disconnect, SI) cond = res_ops.gen_query_conditions('hypervisorType', '=', 'ESX') vc_hosts = res_ops.query_resource(res_ops.HOST, cond) for vc_host in vc_hosts: if vc_host.morval == host_morval: vc_host = vc_host.managementIp break if not vc_host: return self.judge(False) if volume_installPath.startswith('[' + vc_ps.name + ']'): test_util.test_logger(vc_ps.url) if sign: vc_ps.url = vc_ps.url.split('//')[1] test_util.test_logger(vc_ps.url) volume_installPath = volume_installPath.split('[' + vc_ps.name + ']')[1].lstrip() volume_installPath = vc_ps.url + volume_installPath file_exist = "file_exist" cmd = '[ -f %s ] && echo %s' % (volume_installPath, file_exist) vchost_user = os.environ['vchostUser'] vchost_password = os.environ['vchostpwd'] #test_lib.lib_execute_ssh_cmd(vc_host, vchost_user, vchost_password, cmd, 180) result = test_lib.lib_execute_ssh_cmd(vc_host, vchost_user, vchost_password, cmd, 180) test_util.test_logger(result) result = str(result) test_util.test_logger(result) if result.rstrip('\n') == "file_exist": test_util.test_logger(result.rstrip('\n')) return self.judge(True) else: return self.judge(False)
def test(): test_util.test_dsc('Create test vm as utility vm') if res_ops.query_resource(res_ops.PRIMARY_STORAGE, [])[0].type \ == inventory.CEPH_PRIMARY_STORAGE_TYPE: test_util.test_skip( 'Ceph primary storage does not support backup volume snapshot, since all volume snapshots are save in same ceph' ) return vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) test_util.test_dsc('Create volume for snapshot testing') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_name('volume for snapshot testing') volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.attach(vm) import time #time.sleep(10) volume.detach() ps_uuid = volume.get_volume().primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: # LOCAL Storage do not support create volume and template from backuped snapshot test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_skip( 'Skip test create volume from backuped storage, when volume is deleted.' ) #make sure utility vm is starting and running vm.check() test_util.test_dsc('create snapshot and check') snapshots = test_obj_dict.get_volume_snapshot(volume.get_volume().uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshot1 = snapshots.get_current_snapshot() snapshot1.backup() original_checking_points1 = snapshots.get_checking_points(snapshot1) snapshots.create_snapshot('create_snapshot2') snapshots.create_snapshot('create_snapshot3') snapshot3 = snapshots.get_current_snapshot() snapshot3.backup() original_checking_points3 = snapshots.get_checking_points(snapshot3) volume.delete() test_util.test_dsc('create new data volume based on backuped snapshot1') volume1 = snapshot1.create_data_volume(name='snapshot1_volume') test_obj_dict.add_volume(volume1) volume1.attach(vm) volume1.check() volume1.detach() snapshots1 = test_obj_dict.get_volume_snapshot(volume1.get_volume().uuid) snapshots1.set_utility_vm(vm) test_util.test_dsc('create new data volume based on backuped snapshot3') volume3 = snapshot3.create_data_volume(name='snapshot3_volume') test_obj_dict.add_volume(volume3) #create data volume from sp doesn't need a pre-attach/detach volume3.check() snapshots3 = test_obj_dict.get_volume_snapshot(volume3.get_volume().uuid) snapshots3.set_utility_vm(vm) snapshots1.create_snapshot('create_snapshot1-1') snapshots1.check() snapshots3.create_snapshot('create_snapshot3-1') snapshots3.create_snapshot('create_snapshot3-2') snapshots.delete_snapshot(snapshot3) snapshots3.check() test_util.test_dsc('Delete snapshot, volume and check') snapshots.delete() test_obj_dict.rm_volume_snapshot(snapshots) snapshots1.check() snapshots1.delete() test_obj_dict.rm_volume_snapshot(snapshots1) snapshots3.check() snapshots3.delete() test_obj_dict.rm_volume_snapshot(snapshots3) volume.delete() test_obj_dict.rm_volume(volume) vm.destroy() test_util.test_pass('Backup Snapshot test Success')
def test(): test_util.test_dsc('Test Change VM Image Function') #set overProvisioning.primaryStorage's value as 10 con_ops.change_global_config('mevoco', 'overProvisioning.primaryStorage', 10) global vm test_lib.lib_create_disk_offering(diskSize=1099511627776, name="1T") disk_offering_uuids = [ test_lib.lib_get_disk_offering_by_name("smallDiskOffering").uuid, test_lib.lib_get_disk_offering_by_name("root-disk").uuid, test_lib.lib_get_disk_offering_by_name("1T").uuid ] vm = test_stub.create_vm(image_name="ttylinux", vm_name="test-vm", disk_offering_uuids=disk_offering_uuids) test_obj_dict.add_vm(vm) vm.check() vm_uuid = vm.get_vm().uuid last_data_volumes_uuids = [] last_data_volumes = test_lib.lib_get_data_volumes(vm.get_vm()) for data_volume in last_data_volumes: last_data_volumes_uuids.append(data_volume.uuid) last_l3network_uuid = test_lib.lib_get_l3s_uuid_by_vm(vm.get_vm()) last_primarystorage_uuid = test_lib.lib_get_root_volume( vm.get_vm()).primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(last_primarystorage_uuid) avail_cap = ps.availableCapacity total_cap = ps.totalCapacity vm_ops.stop_vm(vm_uuid) image_uuid = test_lib.lib_get_image_by_name("image_for_sg_test").uuid vm_ops.change_vm_image(vm_uuid, image_uuid) vm_ops.start_vm(vm_uuid) vm.update() #check whether the vm is running successfully if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 120): test_util.test_fail( 'vm:%s is not startup in 120 seconds.Fail to reboot it.' % vm_uuid) #check whether data volumes attached to the vm has changed data_volumes_after_uuids = [] data_volumes_after = test_lib.lib_get_data_volumes(vm.get_vm()) for data_volume in data_volumes_after: data_volumes_after_uuids.append(data_volume.uuid) if set(last_data_volumes_uuids) != set(data_volumes_after_uuids): test_util.test_fail('Change Vm Image Failed.Data volumes changed.') #check whether the network config has changed l3network_uuid_after = test_lib.lib_get_l3s_uuid_by_vm(vm.get_vm()) if l3network_uuid_after != last_l3network_uuid: test_util.test_fail( 'Change VM Image Failed.The Network config has changed.') #check whether primarystorage has changed primarystorage_uuid_after = test_lib.lib_get_root_volume( vm.get_vm()).primaryStorageUuid if primarystorage_uuid_after != last_primarystorage_uuid: test_util.test_fail( 'Change VM Image Failed.Primarystorage has changed.') ps = test_lib.lib_get_primary_storage_by_uuid(primarystorage_uuid_after) avail_cap1 = ps.availableCapacity total_cap1 = ps.totalCapacity if total_cap != total_cap1: test_util.test_fail( 'Primary Storage total capacity is not same,after changing vm image:%s.The previous value:%s, the current value:%s' % (image_uuid, total_cap, total_cap1)) if avail_cap <= avail_cap1: test_util.test_fail( 'Primary Storage available capacity is not correct,after changing larger image:%s.The previous value:%s, the current value:%s' % (image_uuid, avail_cap, avail_cap1)) vm_ops.stop_vm(vm_uuid) image_tiny_uuid = test_lib.lib_get_image_by_name("ttylinux").uuid vm_ops.change_vm_image(vm_uuid, image_tiny_uuid) vm_ops.start_vm(vm_uuid) vm.update() #check whether the vm is running successfully if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 120): test_util.test_fail( 'vm:%s is not startup in 120 seconds.Fail to reboot it.' % vm_uuid) #check whether data volumes attached to the vm has changed data_volumes_after_uuids_tiny = [] data_volumes_after_tiny = test_lib.lib_get_data_volumes(vm.get_vm()) for data_volume in data_volumes_after_tiny: data_volumes_after_uuids_tiny.append(data_volume.uuid) if set(data_volumes_after_uuids_tiny) != set(data_volumes_after_uuids): test_util.test_fail('Change Vm Image Failed.Data volumes changed.') #check whether the network config has changed l3network_uuid_after_tiny = test_lib.lib_get_l3s_uuid_by_vm(vm.get_vm()) if l3network_uuid_after_tiny != l3network_uuid_after: test_util.test_fail( 'Change VM Image Failed.The Network config has changed.') #check whether primarystorage has changed primarystorage_uuid_after_tiny = test_lib.lib_get_root_volume( vm.get_vm()).primaryStorageUuid if primarystorage_uuid_after_tiny != primarystorage_uuid_after: test_util.test_fail( 'Change VM Image Failed.Primarystorage has changed.') ps = test_lib.lib_get_primary_storage_by_uuid( primarystorage_uuid_after_tiny) avail_cap2 = ps.availableCapacity total_cap2 = ps.totalCapacity if total_cap2 != total_cap1: test_util.test_fail( 'Primary Storage total capacity is not same,after changing vm image:%s.The previous value:%s, the current value:%s' % (image_uuid, total_cap1, total_cap2)) if avail_cap2 <= avail_cap1: test_util.test_fail( 'Primary Storage available capacity is not correct,after changing smaller image:%s.The previous value:%s, the current value:%s' % (image_uuid, avail_cap1, avail_cap2)) test_lib.lib_destroy_vm_and_data_volumes(vm.get_vm()) vm.expunge() test_util.test_pass('Change Vm Image Test Success')