def test(): global vm global test_account_uuid import uuid account_name = uuid.uuid1().get_hex() account_pass = uuid.uuid1().get_hex() account_pass = hashlib.sha512(account_name).hexdigest() test_account = acc_ops.create_normal_account(account_name, account_pass) test_account_uuid = test_account.uuid test_account_session = acc_ops.login_by_account(account_name, account_pass) test_stub.share_admin_resource([test_account_uuid]) vm = test_stub.create_vm_with_volume(session_uuid=test_account_session) vm.check() volumes_number = len(test_lib.lib_get_all_volumes(vm.vm)) if volumes_number != 2: test_util.test_fail( 'Did not find 2 volumes for [vm:] %s. But we assigned 1 data volume when create the vm. We only catch %s volumes' % (vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 2 volumes for [vm:] %s.' % vm.vm.uuid) vm.destroy(test_account_session) vm.check() acc_ops.delete_account(test_account_uuid) test_util.test_pass('Create VM with volume by normal user account Success')
def test(): global test_obj_dict, bs, ps #judge whether BS is imagestore bs = res_ops.query_resource(res_ops.BACKUP_STORAGE) for i in bs: if i.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break else: test_util.test_skip('Skip test on non-imagestore') #judge whether PS is SharedBlock ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE) for i in ps: if i.type in ['SharedBlock', 'AliyunNAS']: test_util.test_skip('Skip test on SharedBlock and PS') image_name = os.environ.get('imageName_s') l3_name = os.environ.get('l3PublicNetworkName') vm = test_stub.create_vm("test_vm", image_name, l3_name) #vm.check() test_obj_dict.add_vm(vm) new_vm = vm.clone(['test_vm_clone_with_on_data_volume'], full=True)[0] test_obj_dict.add_vm(new_vm) volumes_number = len(test_lib.lib_get_all_volumes(new_vm.vm)) if volumes_number != 1: test_util.test_fail('Did not find 1 volumes for [vm:] %s. But we assigned 1 data volume when create the vm. We only catch %s volumes' % (new_vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 1 volumes for [vm:] %s.' % new_vm.vm.uuid) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Test clone vm with one data volume Success')
def test(): global test_obj_dict test_util.test_dsc('Create a VM with 3 additional data volumes with 1 of them using virtio-scsi') disk_offering1 = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) disk_offering_uuids = [] for i in range(0, 8): disk_offering_uuids.append(disk_offering1.uuid) disk_offering2 = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) for i in range(0, 14): disk_offering_uuids.append(disk_offering2.uuid) vm = test_stub.create_vlan_vm(system_tags=["virtio::diskOffering::%s::num::14" % (disk_offering2.uuid) ,"virtio::diskOffering::%s::num::14" % (disk_offering1.uuid)], l3_name=os.environ.get('l3VlanNetworkName1'), disk_offering_uuids=disk_offering_uuids) test_obj_dict.add_vm(vm) vm.check() volumes_number = len(test_lib.lib_get_all_volumes(vm.vm)) if volumes_number != 23: test_util.test_fail('Did not find 23 volumes for [vm:] %s. But we assigned 22 data volume when create the vm. We only catch %s volumes' % (vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 23 volumes for [vm:] %s.' % vm.vm.uuid) scsi_cmd = 'ls /dev/sd* | wc -l' if test_lib.lib_execute_command_in_vm(vm.get_vm(), scsi_cmd).strip() != '22': test_util.test_fail('Only expect 22 disk in virtio scsi mode') vm.destroy() test_util.test_pass('Create a VM with 22 additional data volumes with 22 of them using virtio-scsi PASS')
def test(): global test_obj_dict, bs #judge whether BS is imagestore bs = res_ops.query_resource(res_ops.BACKUP_STORAGE) for i in bs: if i.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break else: test_util.test_skip('Skip test on non-imagestore') #Skip for AliyunNAS PS ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE) for i in ps: if i.type == 'AliyunNAS': test_util.test_skip('Skip test on SharedBlock and AliyunNAS PS') volume_creation_option = test_util.VolumeOption() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) volume2 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) test_obj_dict.add_volume(volume2) volume.check() volume2.check() image_name = os.environ.get('imageName_s') l3_name = os.environ.get('l3PublicNetworkName') vm = test_stub.create_vm("test_vm", image_name, l3_name) #vm.check() test_obj_dict.add_vm(vm) volume.attach(vm) volume2.attach(vm) vm.suspend() new_vm = vm.clone([ 'test_vm_clone_vm1_with_two_data_volume', 'test_vm_clone_vm2_with_two_data_volume', 'test_vm_clone_vm3_with_two_data_volume' ], full=True) for i in new_vm: test_obj_dict.add_vm(i) volumes_number = len(test_lib.lib_get_all_volumes(i.vm)) if volumes_number != 3: test_util.test_fail( 'Did not find 3 volumes for [vm:] %s. But we assigned 3 data volume when create the vm. We only catch %s volumes' % (i.vm.uuid, volumes_number)) else: test_util.test_logger('Find 3 volumes for [vm:] %s.' % i.vm.uuid) vm.resume() test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Test full clone 3vms with two data volume Success')
def test(): ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE) ceph_ps = [ps for ps in ps_inv if ps.type == 'Ceph'] if not ceph_ps: test_util.test_skip('Skip test as there is not Ceph primary storage') flavor = case_flavor[os.getenv('CASE_FLAVOR')] if flavor['shared_vm']: multi_ps.create_vm(ps_type="SharedBlock") else: multi_ps.create_vm(ps_type="Ceph") multi_ps.create_data_volume(vms=multi_ps.vm, ps_type='SharedBlock') multi_ps.create_data_volume(vms= multi_ps.vm, ps_type='Ceph') vm = multi_ps.vm[0] vm.stop() shared_ps = multi_ps.get_ps(ps_type='SharedBlock') ceph_ps = multi_ps.get_ps(ps_type='Ceph') if flavor['to_shared_vm']: if not flavor['to_shared_volume']: ps_uuid_for_root_volume = shared_ps.uuid ps_uuid_for_data_volume = ceph_ps.uuid else: if flavor['to_shared_volume']: ps_uuid_for_root_volume = ceph_ps.uuid ps_uuid_for_data_volume = shared_ps.uuid root_volume_systag = [] data_volume_systag = ["volumeProvisioningStrategy::ThinProvisioning"] cloned_vm = vm.clone(['test_stop_vm_full_clone'], full=True, ps_uuid_for_root_volume=ps_uuid_for_root_volume, ps_uuid_for_data_volume=ps_uuid_for_data_volume, root_volume_systag=root_volume_systag, data_volume_systag=data_volume_systag)[0] multi_ps.vm.append(cloned_vm.vm) volumes_list = test_lib.lib_get_all_volumes(cloned_vm.vm) volumes_number = len(volumes_list) if volumes_number != 3: test_util.test_fail('Did not just find 3 volumes for [vm:] %s. But we assigned 2 data volume to the vm. We only catch %s volumes' % (cloned_vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 3 volumes for [vm:] %s.' % cloned_vm.vm.uuid) ps = test_lib.lib_get_primary_storage_by_uuid(test_lib.lib_get_root_volume(cloned_vm.vm).primaryStorageUuid) data_volume_ps1 = test_lib.lib_get_primary_storage_by_uuid(test_lib.lib_get_data_volumes(cloned_vm.vm)[0].primaryStorageUuid) data_volume_ps2 = test_lib.lib_get_primary_storage_by_uuid(test_lib.lib_get_data_volumes(cloned_vm.vm)[1].primaryStorageUuid) if flavor['to_shared_vm']: if not flavor['to_shared_volume']: test_util.test_logger(ps.type + data_volume_ps1.type + data_volume_ps2.type) assert ps.type == 'SharedBlock' and data_volume_ps1.type == 'Ceph' and data_volume_ps2.type == 'Ceph' else: if flavor['to_shared_volume']: test_util.test_logger(ps.type + data_volume_ps1.type + data_volume_ps2.type) assert ps.type == 'Ceph' and data_volume_ps1.type == 'SharedBlock' and data_volume_ps2.type == 'SharedBlock' test_util.test_pass('Full Clone Stopped VM Test Success')
def test(): global vm vm = test_stub.create_vm_with_volume() vm.check() volumes_number = len(test_lib.lib_get_all_volumes(vm.vm)) if volumes_number != 2: test_util.test_fail('Did not find 2 volumes for [vm:] %s. But we assigned 1 data volume when create the vm. We only catch %s volumes' % (vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 2 volumes for [vm:] %s.' % vm.vm.uuid) vm.destroy() vm.check() test_util.test_pass('Create VM Test Success')
def test(): global test_obj_dict test_util.test_dsc('Create test vm and check. VR only has DNS and DHCP services') vm = test_stub.create_vlan_vm_with_volume(os.environ.get('l3VlanNetworkName1'), None, 22) test_obj_dict.add_vm(vm) vm.check() volumes_number = len(test_lib.lib_get_all_volumes(vm.vm)) if volumes_number != 23: test_util.test_fail('Did not find 23 volumes for [vm:] %s. But we assigned 22 data volume when create the vm. We only catch %s volumes' % (vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 22 volumes for [vm:] %s.' % vm.vm.uuid) vm.destroy() test_util.test_pass('Create VirtualRouter VM DNS DHCP Test Success')
def test(): global test_obj_dict, bs #judge whether BS is imagestore bs = res_ops.query_resource(res_ops.BACKUP_STORAGE) for i in bs: if i.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break else: test_util.test_logger('BS is type %s.' % i.type) test_util.test_skip('Skip test on non-imagestore') ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE) for i in ps: if i.type == 'AliyunNAS': test_util.test_skip('Skip test on AliyunNAS PS') elif i.type == 'AliyunEBS': test_util.test_skip('Skip test on AliyunEBS PS') volume_creation_option = test_util.VolumeOption() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) volume2 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume2.check() #volume_uuid = volume.volume.uuid #image_name = os.environ.get('imageName_s') #l3_name = os.environ.get('l3PublicNetworkName') vm = test_stub.create_windows_vm() #vm.check() test_obj_dict.add_vm(vm) volume.attach(vm) volume2.attach(vm) vm.suspend() new_vm = vm.clone(['test_vm_clone_with_one_data_volume'], full=True)[0] test_obj_dict.add_vm(new_vm) volumes_number = len(test_lib.lib_get_all_volumes(new_vm.vm)) if volumes_number != 3: test_util.test_fail('Did not find 3 volumes for [vm:] %s. But we assigned 3 data volume when create the vm. We only catch %s volumes' % (new_vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 3 volumes for [vm:] %s.' % new_vm.vm.uuid) vm.resume() test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Test clone vm with windows two data volume Success')
def test(): global test_obj_dict, bs, ps #judge whether BS is imagestore bs = res_ops.query_resource(res_ops.BACKUP_STORAGE) for i in bs: if i.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break else: test_util.test_logger('BS is type %s.' % i.type) test_util.test_skip('Skip test on non-imagestore') #judge whether PS is SharedBlock ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE) for i in ps: # if i.type == 'SharedBlock': if i.type in ['SharedBlock', 'AliyunNAS']: test_util.test_skip('Skip test on SharedBlock PS') volume_creation_option = test_util.VolumeOption() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume_uuid = volume.volume.uuid #vol_size = volume.volume.size #image_name = os.environ.get('imageName_s') #l3_name = os.environ.get('l3PublicNetworkName') vm = test_stub.create_vlan_vm() #vm.check() test_obj_dict.add_vm(vm) volume.attach(vm) new_vm = vm.clone(['test_vm_clone_with_one_data_volume'], full=True)[0] test_obj_dict.add_vm(new_vm) volumes_number = len(test_lib.lib_get_all_volumes(new_vm.vm)) if volumes_number != 2: test_util.test_fail( 'Did not find 2 volumes for [vm:] %s. But we assigned 2 data volume when create the vm. We only catch %s volumes' % (new_vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 2 volumes for [vm:] %s.' % new_vm.vm.uuid) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Test clone vm with one data volume Success')
def test(): global vm vm = test_stub.create_vm_with_volume() vm.check() volumes_number = len(test_lib.lib_get_all_volumes(vm.vm)) if volumes_number != 2: test_util.test_fail( 'Did not find 2 volumes for [vm:] %s. But we assigned 1 data volume when create the vm. We only catch %s volumes' % (vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 2 volumes for [vm:] %s.' % vm.vm.uuid) vm.destroy() vm.check() test_util.test_pass('Create VM Test Success')
def test(): global test_obj_dict, bs, ps #judge whether BS is imagestore bs = res_ops.query_resource(res_ops.BACKUP_STORAGE) for i in bs: if i.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break else: test_util.test_skip('Skip test on non-imagestore') #judge whether PS is SharedBlock ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE) for i in ps: if i.type in ['SharedBlock', 'AliyunNAS']: test_util.test_skip('Skip test on SharedBlock and AliyunNAS PS') volume_creation_option = test_util.VolumeOption() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) volume2 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) test_obj_dict.add_volume(volume2) volume.check() volume2.check() image_name = os.environ.get('imageName_s') l3_name = os.environ.get('l3PublicNetworkName') vm = test_stub.create_vm("test_vm", image_name, l3_name) #vm.check() test_obj_dict.add_vm(vm) volume.attach(vm) volume2.attach(vm) new_vm = vm.clone(['test_vm_clone_vm1_with_two_data_volume','test_vm_clone_vm2_with_two_data_volume','test_vm_clone_vm3_with_two_data_volume'], full=True) for i in new_vm: test_obj_dict.add_vm(i) volumes_number = len(test_lib.lib_get_all_volumes(i.vm)) if volumes_number != 3: test_util.test_fail('Did not find 3 volumes for [vm:] %s. But we assigned 3 data volume when create the vm. We only catch %s volumes' % (i.vm.uuid, volumes_number)) else: test_util.test_logger('Find 3 volumes for [vm:] %s.' % i.vm.uuid) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Test full clone 3vms with two data volume Success')
def test(): global test_obj_dict test_util.test_dsc( 'Create test vm and check. VR only has DNS and DHCP services') vm = test_stub.create_vlan_vm_with_volume( os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm) vm.check() volumes_number = len(test_lib.lib_get_all_volumes(vm.vm)) if volumes_number != 2: test_util.test_fail( 'Did not find 2 volumes for [vm:] %s. But we assigned 1 data volume when create the vm. We only catch %s volumes' % (vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 2 volumes for [vm:] %s.' % vm.vm.uuid) vm.destroy() test_util.test_pass('Create VirtualRouter VM DNS DHCP Test Success')
def test(): global test_obj_dict test_util.test_dsc('Create test vm and check. VR only has DNS and DHCP services') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) disk_offering_uuids = [disk_offering.uuid] disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) disk_offering_uuids.append(disk_offering.uuid) vm = test_stub.create_vlan_vm_with_volume(os.environ.get('l3VlanNetworkName1'), disk_offering_uuids) test_obj_dict.add_vm(vm) vm.check() volumes_number = len(test_lib.lib_get_all_volumes(vm.vm)) if volumes_number != 3: test_util.test_fail('Did not find 3 volumes for [vm:] %s. But we assigned 2 data volume when create the vm. We only catch %s volumes' % (vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 3 volumes for [vm:] %s.' % vm.vm.uuid) vm.destroy() test_util.test_pass('Create VirtualRouter VM DNS DHCP Test Success')
def test(): ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE) ceph_ps = [ps for ps in ps_inv if ps.type == 'Ceph'] if not ceph_ps: test_util.test_skip('Skip test as there is not Ceph primary storage') flavor = case_flavor[os.getenv('CASE_FLAVOR')] if flavor['shared_vm']: multi_ps.create_vm(ps_type="SharedBlock") else: multi_ps.create_vm(ps_type="Ceph") vm = multi_ps.vm[0] if not flavor['running']: vm.stop() shared_ps = multi_ps.get_ps(ps_type='SharedBlock') ceph_ps = multi_ps.get_ps(ps_type='Ceph') if flavor['to_shared_vm']: ps_uuid_for_root_volume = shared_ps.uuid else: ps_uuid_for_root_volume = ceph_ps.uuid root_volume_systag = [] cloned_vm = vm.clone(['test_vm_clone_without_volume'], full=True, ps_uuid_for_root_volume=ps_uuid_for_root_volume, root_volume_systag=root_volume_systag)[0] multi_ps.vm.append(cloned_vm.vm) volumes_list = test_lib.lib_get_all_volumes(cloned_vm.vm) volumes_number = len(volumes_list) if volumes_number != 1: test_util.test_fail('Did not just find 1 volumes for [vm:] %s.' % cloned_vm.vm.uuid) else: test_util.test_logger('Find 1 volumes for [vm:] %s.' % cloned_vm.vm.uuid) ps = test_lib.lib_get_primary_storage_by_uuid(test_lib.lib_get_root_volume(cloned_vm.vm).primaryStorageUuid) if flavor['to_shared_vm']: assert ps.type == 'SharedBlock' else: assert ps.type == 'Ceph' test_util.test_pass('VM Clone Without Volume Test Success')
def test(): global vm global test_account_uuid import uuid account_name = uuid.uuid1().get_hex() account_pass = uuid.uuid1().get_hex() account_pass = hashlib.sha512(account_name).hexdigest() test_account = acc_ops.create_normal_account(account_name, account_pass) test_account_uuid = test_account.uuid test_account_session = acc_ops.login_by_account(account_name, account_pass) test_stub.share_admin_resource([test_account_uuid]) vm = test_stub.create_vm_with_volume(session_uuid = test_account_session) vm.check() volumes_number = len(test_lib.lib_get_all_volumes(vm.vm)) if volumes_number != 2: test_util.test_fail('Did not find 2 volumes for [vm:] %s. But we assigned 1 data volume when create the vm. We only catch %s volumes' % (vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 2 volumes for [vm:] %s.' % vm.vm.uuid) vm.destroy(test_account_session) vm.check() acc_ops.delete_account(test_account_uuid) test_util.test_pass('Create VM with volume by normal user account Success')
def test(): global test_obj_dict, bs, ps #judge whether BS is imagestore bs = res_ops.query_resource(res_ops.BACKUP_STORAGE) for i in bs: if i.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break else: test_util.test_skip('Skip test on non-imagestore') #judge whether PS is SharedBlock ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE) for i in ps: if i.type in ['SharedBlock', 'AliyunNAS']: test_util.test_skip('Skip test on SharedBlock and AliyunNAS PS') volume_creation_option = test_util.VolumeOption() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume_uuid = volume.volume.uuid vol_size = volume.volume.size image_name = os.environ.get('imageName_s') l3_name = os.environ.get('l3PublicNetworkName') vm = test_stub.create_vm("test_vm", image_name, l3_name) #vm.check() test_obj_dict.add_vm(vm) volume.attach(vm) new_vm = vm.clone(['test_vm_clone_with_one_data_volume'], full=True)[0] test_obj_dict.add_vm(new_vm) volumes_number = len(test_lib.lib_get_all_volumes(new_vm.vm)) if volumes_number != 2: test_util.test_fail('Did not find 2 volumes for [vm:] %s. But we assigned 2 data volume when create the vm. We only catch %s volumes' % (new_vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 2 volumes for [vm:] %s.' % new_vm.vm.uuid) #Set_size = 1024*1024*1024*5 #Vol_ops.resize_data_volume(volume_uuid, set_size) #Vm.update() #Vol_size_after = test_lib.lib_get_data_volumes(vm.get_vm())[0].size #If set_size != vol_size_after: # test_util.test_fail('Resize Data Volume failed, size = %s' % vol_size_after) #Volume.detach() #Vm.update() #Target_host = test_lib.lib_find_random_host(vm.get_vm()) #Vol_ops.migrate_volume(volume_uuid, target_host.uuid) #Cond = res_ops.gen_query_conditions('uuid', '=', volume_uuid) #Data_volume = res_ops.query_resource(res_ops.VOLUME, cond) #Vol_size_after = data_volume[0].size #If set_size != vol_size_after: # test_util.test_fail('Resize Data Volume failed, size = %s' % vol_size_after) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Test clone vm with one data volume Success')
def test(): global volume_offering_uuid,new_offering_uuid test_util.test_dsc('Test VM data volume bandwidth QoS by 20MB') # Only imagestore supports full clone bs = res_ops.query_resource(res_ops.BACKUP_STORAGE) for i in bs: if i.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break else: test_util.test_skip('Skip test on non-imagestore') # SharedBlock and AliyunNAS not support full clone ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE) for i in ps: if i.type in ['SharedBlock', 'AliyunNAS']: test_util.test_skip('Skip test on SharedBlock and AliyunNAS PS') #unit is KB write_bandwidth = 10*1024*1024 new_offering = test_lib.lib_create_instance_offering(write_bandwidth = write_bandwidth) new_offering_uuid = new_offering.uuid new_volume_offering = test_lib.lib_create_disk_offering(write_bandwidth = write_bandwidth) volume_offering_uuid = new_volume_offering.uuid vm = test_stub.create_vm(vm_name='vm_volume_qos', instance_offering_uuid = new_offering_uuid, disk_offering_uuids = [volume_offering_uuid]) test_obj_dict.add_vm(vm) vm.check() vm_inv = vm.get_vm() cond = res_ops.gen_query_conditions("vmInstanceUuid", '=', vm_inv.uuid) cond = res_ops.gen_query_conditions("type", '=', 'Data', cond) volume_uuid = res_ops.query_resource(res_ops.VOLUME, cond)[0].uuid test_lib.lib_mkfs_for_volume(volume_uuid, vm_inv) path = '/mnt' user_name = 'root' user_password = '******' os.system("sshpass -p '%s' ssh %s@%s 'mount /dev/vdb1 %s'"%(user_password, user_name, vm_inv.vmNics[0].ip, path)) vm.check() test_stub.make_ssh_no_password(vm_inv) # test_stub.install_fio(vm_inv) vm_ops.set_vm_disk_qos(test_lib.lib_get_data_volumes(vm_inv)[0].uuid, write_bandwidth*2, 'write') vm_ops.set_vm_disk_qos(test_lib.lib_get_root_volume(vm_inv).uuid, write_bandwidth*2, 'write') new_vm = vm.clone(['full_cloned_vm'], full=True)[0] new_vm.check() test_obj_dict.add_vm(new_vm) volumes_number = len(test_lib.lib_get_all_volumes(new_vm.vm)) if volumes_number != 2: test_util.test_fail('Did not find 2 volumes for [vm:] %s. But we assigned 2 data volume when create the vm. We only catch %s volumes' % (new_vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 2 volumes for [vm:] %s.' % new_vm.vm.uuid) new_vm_inv = new_vm.get_vm() test_stub.install_fio(new_vm_inv) if vm_ops.get_vm_disk_qos(test_lib.lib_get_data_volumes(new_vm_inv)[0].uuid).volumeBandwidthWrite != write_bandwidth*2: test_util.test_fail('Retrieved disk qos not match') test_stub.test_fio_bandwidth(new_vm_inv, write_bandwidth*2) test_stub.test_fio_bandwidth(new_vm_inv, write_bandwidth*2, path) if test_stub.test_fio_bandwidth(vm_inv, write_bandwidth, '/dev/vda', raise_exception=False): test_util.test_fail('disk read qos is not expected to have limit as only read qos was set') if test_stub.test_fio_bandwidth(vm_inv, write_bandwidth, '/dev/vdb', raise_exception=False): test_util.test_fail('disk read qos is not expected to have limit as only read qos was set') vm_ops.delete_instance_offering(new_offering_uuid) vol_ops.delete_disk_offering(volume_offering_uuid) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('VM data volume read QoS Test Pass')