def test(): global test_obj_dict test_util.test_dsc('Create a VM with 3 additional data volumes with 1 of them using virtio-scsi') disk_offering1 = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) disk_offering_uuids = [] for i in range(0, 8): disk_offering_uuids.append(disk_offering1.uuid) disk_offering2 = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) for i in range(0, 14): disk_offering_uuids.append(disk_offering2.uuid) vm = test_stub.create_vlan_vm(system_tags=["virtio::diskOffering::%s::num::14" % (disk_offering2.uuid) ,"virtio::diskOffering::%s::num::14" % (disk_offering1.uuid)], l3_name=os.environ.get('l3VlanNetworkName1'), disk_offering_uuids=disk_offering_uuids) test_obj_dict.add_vm(vm) vm.check() volumes_number = len(test_lib.lib_get_all_volumes(vm.vm)) if volumes_number != 23: test_util.test_fail('Did not find 23 volumes for [vm:] %s. But we assigned 22 data volume when create the vm. We only catch %s volumes' % (vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 23 volumes for [vm:] %s.' % vm.vm.uuid) scsi_cmd = 'ls /dev/sd* | wc -l' if test_lib.lib_execute_command_in_vm(vm.get_vm(), scsi_cmd).strip() != '22': test_util.test_fail('Only expect 22 disk in virtio scsi mode') vm.destroy() test_util.test_pass('Create a VM with 22 additional data volumes with 22 of them using virtio-scsi PASS')
def share_admin_resource(account_uuid_list): instance_offering_uuid = res_ops.get_resource(res_ops.INSTANCE_OFFERING)[0].uuid cond = res_ops.gen_query_conditions('mediaType', '!=', 'ISO') image_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid l3net_uuid = res_ops.get_resource(res_ops.L3_NETWORK)[0].uuid root_disk_uuid = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')).uuid data_disk_uuid = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')).uuid acc_ops.share_resources(account_uuid_list, [instance_offering_uuid, image_uuid, l3net_uuid, root_disk_uuid, data_disk_uuid])
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] #vr_uuid = vr.uuid #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) #volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.attach(vm) #ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) #ps_uuid = ps.uuid #ps_ops.change_primary_storage_state(ps_uuid, 'maintain') test_stub.maintain_all_pss() if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail( 'VM is expected to stop when PS change to maintain state') vm.set_state(vm_header.STOPPED) vm.check() volume.detach(vm.get_vm().uuid) #ps_ops.change_primary_storage_state(ps_uuid, 'enable') test_stub.enable_all_pss() host_ops.reconnect_host(host_uuid) #vm_ops.reconnect_vr(vr_uuid) time.sleep(5) vrs = test_lib.lib_get_all_vrs() for vr in vrs: vm_ops.start_vm(vr.uuid) vm.start() vm.check() volume.delete() #volume.expunge() volume.check() vm.destroy() test_util.test_pass('Delete volume under PS maintain mode Test Success')
def test(): test_util.test_dsc('Create Data Volume on ceph pool for VM Test') cond = res_ops.gen_query_conditions('type', '=', inventory.CEPH_PRIMARY_STORAGE_TYPE) ps = res_ops.query_resource_with_num(res_ops.PRIMARY_STORAGE, cond, limit=1) if not ps: test_util.test_skip('skip test that ceph ps not found.') ps = ps[0] vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) pool = ps_ops.create_ceph_primary_storage_pool(ps.uuid, 'woodpecker_7') test_util.test_dsc('Create shareable volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_primary_storage_uuid(ps.uuid) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags([ 'ceph::pool::woodpecker_7', 'ephemeral::shareable', 'capability::virtio-scsi' ]) #volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() test_util.test_dsc('Attach shareable volume to stopped VM and check') #mv vm checker later, to save some time. vm.check() vm.stop() volume.attach(vm) #volume.check() vm.start() vm.check() volume.check() if volume.get_volume().installPath.find('woodpecker') < 0: test_util.test_fail( 'data volume is expected to create on pool woodpecker, while its %s.' % (volume.get_volume().installPath)) test_util.test_dsc('Detach volume and check') volume.detach(vm.get_vm().uuid) volume.check() test_util.test_dsc('Delete volume and check') volume.delete() volume.check() test_obj_dict.rm_volume(volume) ps_ops.delete_ceph_primary_storage_pool(pool.uuid) vm.destroy() vm.check() test_util.test_pass( 'Create Shareable Data Volume on ceph pool and attach to stopped VM Test Success' )
def test(): test_util.test_dsc('Create test vm and check') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() test_util.test_dsc('Attach volume and check') #mv vm checker later, to save some time. vm.check() volume.attach(vm) volume.check() test_util.test_dsc('Detach volume and check') volume.detach() volume.check() test_util.test_dsc('Delete volume and check') volume.delete() volume.check() test_obj_dict.rm_volume(volume) vm.destroy() vm.check() test_util.test_pass('Create Data Volume for VM Test Success')
def test(): global test_obj_dict volume_creation_option = test_util.VolumeOption() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume_uuid = volume.volume.uuid test_util.test_dsc('Create vm and check') vm = test_stub.create_vr_vm('migrate_volume_vm', 'imageName_s', 'l3VlanNetwork2') test_obj_dict.add_vm(vm) vm.check() vm_uuid = vm.vm.uuid ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') test_util.test_dsc('Attach data volume to vm and check') volume.attach(vm) target_host = test_lib.lib_find_random_host_by_volume_uuid(volume_uuid) target_host_uuid = target_host.uuid test_util.test_dsc('Detach data volume from vm and check') volume.detach(vm_uuid) vol_ops.migrate_volume(volume_uuid, target_host_uuid) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Cold migrate Data Volume Test Success')
def create_vm_with_previous_iso(vm_creation_option=None, session_uuid=None): cond = res_ops.gen_query_conditions('name', '=', 'iso') image_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid root_disk_uuid = test_lib.lib_get_disk_offering_by_name( os.environ.get('rootDiskOfferingName')).uuid return create_vm(vm_creation_option, None, root_disk_uuid, image_uuid, session_uuid=session_uuid)
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE, "SharedBlock"] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) test_util.test_dsc('Create test vm and check') bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None) if not bss: test_util.test_skip("not find available backup storage. Skip test") test_lib.lib_set_delete_policy('vm', 'Delay') test_lib.lib_set_delete_policy('volume', 'Delay') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.attach(vm) #ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) #ps_uuid = ps.uuid #ps_ops.change_primary_storage_state(ps_uuid, 'disable') test_stub.disable_all_pss() if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to running when PS change to disable state') vm.set_state(vm_header.RUNNING) vm.check() volume.delete() volume.check() volume.expunge() volume.check() #ps_ops.change_primary_storage_state(ps_uuid, 'enable') test_stub.enable_all_pss() host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) vm.destroy() test_lib.lib_set_delete_policy('vm', 'Direct') test_lib.lib_set_delete_policy('volume', 'Direct') test_util.test_pass('Delete volume under PS disable mode Test Success')
def create_vm_with_previous_iso(vm_creation_option=None, session_uuid=None): cond = res_ops.gen_query_conditions('name', '=', 'iso') image_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid root_disk_uuid = test_lib.lib_get_disk_offering_by_name( os.environ.get('rootDiskOfferingName')).uuid return create_vm(vm_creation_option, None, root_disk_uuid, image_uuid, \ session_uuid = session_uuid)
def create_vm_with_volume(vm_creation_option = None, data_volume_uuids = None, \ session_uuid = None): if not data_volume_uuids: disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName'), session_uuid) data_volume_uuids = [disk_offering.uuid] return create_vm(vm_creation_option, data_volume_uuids, \ session_uuid = session_uuid)
def test(): global test_obj_dict volume_creation_option = test_util.VolumeOption() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume_uuid = volume.volume.uuid vol_size = volume.volume.size image_name = os.environ.get('imageName_net') l3_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vm("test_resize_vm", image_name, l3_name) vm.check() volume.attach(vm) test_obj_dict.add_vm(vm) snapshots = test_obj_dict.get_volume_snapshot(volume_uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshots.check() vm.stop() vm.check() set_size = 1024*1024*1024*5 vol_ops.resize_data_volume(volume_uuid, set_size) vm.update() vol_size_after = test_lib.lib_get_data_volumes(vm.get_vm())[0].size if set_size != vol_size_after: test_util.test_fail('Resize Data Volume failed, size = %s' % vol_size_after) test_obj_dict.rm_volume_snapshot(snapshots) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Resize Data Volume and Snapshot Test Success')
def test(): imagestore_backup_storage = test_lib.lib_get_image_store_backup_storage() if not imagestore_backup_storage: test_util.test_skip('Not find image store type backup storage.') img_option = test_util.ImageOption() img_option.set_name('iso') root_disk_uuid = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')).uuid bs_uuid = imagestore_backup_storage.uuid img_option.set_backup_storage_uuid_list([bs_uuid]) os.system("genisoimage -o %s/apache-tomcat/webapps/zstack/static/test.iso /tmp/" % (os.environ.get('zstackInstallPath'))) img_option.set_url('http://%s:8080/zstack/static/test.iso' % (os.environ.get('node1Ip'))) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) image_url = image.export() image.delete_exported_image() test_lib.lib_robot_cleanup(test_obj_dict) if image_url.endswith('.iso'): test_util.test_pass('Export ISO Image Test Success') else: test_util.test_fail('Export ISO Image Test Fail, exported ISO image ends with %s' % (image_url.split('.')[-1]))
def test(): global test_obj_dict volume_creation_option = test_util.VolumeOption() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume_uuid = volume.volume.uuid vol_size = volume.volume.size image_name = os.environ.get('imageName_net') l3_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vm("test_resize_vm", image_name, l3_name) vm.check() test_obj_dict.add_vm(vm) volume.attach(vm) vm.stop() vm.check() set_size = 1024 * 1024 * 1024 * 5 vol_ops.resize_data_volume(volume_uuid, set_size) vm.update() vol_size_after = test_lib.lib_get_data_volumes(vm.get_vm())[0].size if set_size != vol_size_after: test_util.test_fail('Resize Data Volume failed, size = %s' % vol_size_after) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Resize Data Volume Test Success')
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None) if not bss: test_util.test_skip("not find available backup storage. Skip test") if bss[0].type != inventory.CEPH_BACKUP_STORAGE_TYPE: test_util.test_skip("not find available ceph backup storage. Skip test") test_lib.lib_set_delete_policy('vm', 'Delay') test_lib.lib_set_delete_policy('volume', 'Delay') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.attach(vm) ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) ps_uuid = ps.uuid ps_ops.change_primary_storage_state(ps_uuid, 'disable') if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to running when PS change to disable state') vm.set_state(vm_header.RUNNING) vm.check() volume.delete() #volume.expunge() volume.check() ps_ops.change_primary_storage_state(ps_uuid, 'enable') host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) vm.destroy() test_lib.lib_set_delete_policy('vm', 'Direct') test_lib.lib_set_delete_policy('volume', 'Direct') test_util.test_pass('Delete volume under PS disable mode Test Success')
def test(): global test_obj_dict ova_image_name = os.environ['vcenterDefaultmplate'] network_pattern1 = 'L3-%s'%os.environ['dportgroup'] if not vct_ops.lib_get_vcenter_l3_by_name(network_pattern1): network_pattern1 = 'L3-%s'%os.environ['portgroup0'] disk_offering1 = test_lib.lib_get_disk_offering_by_name(os.environ.get('largeDiskOfferingName')) vm = test_stub.create_vm_in_vcenter(vm_name = 'test_for_sync_volume_size_vm', image_name = ova_image_name, l3_name = network_pattern1) test_obj_dict.add_vm(vm) vm.check() ps_uuid = vm.vm.allVolumes[0].primaryStorageUuid volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering1.uuid) volume_creation_option.set_name('test_for_sync_volume_size_volume') volume_creation_option.set_primary_storage_uuid(ps_uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() #SyncVolumeSize vol_ops.sync_volume_size(volume.get_volume().uuid) volume.attach(vm) volume.check() vol_ops.sync_volume_size(volume.get_volume().uuid) #cleanup test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass("Test sync volume size in vcenter passed.")
def test(): global test_obj_dict volumes = [] ova_image_name = os.environ['vcenterDefaultmplate'] network_pattern = 'L3-%s'%os.environ['dportgroup'] if not vct_ops.lib_get_vcenter_l3_by_name(network_pattern): network_pattern = 'L3-%s'%os.environ['portgroup0'] disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('largeDiskOfferingName')) #create vm with disk vm = test_stub.create_vm_in_vcenter(vm_name = 'vm_2', image_name = ova_image_name, l3_name = network_pattern, disk_offering_uuids = [disk_offering.uuid, disk_offering.uuid] ) test_obj_dict.add_vm(vm) vm.check() vcenter = os.environ.get('vcenter') SI = vct_ops.connect_vcenter(vcenter) content = SI.RetrieveContent() vm = vct_ops.get_vm(content, name = 'vm_2')[0] vct_ops.delete_virtual_disk(vm, 2) vct_ops.delete_virtual_disk(vm, 2) vcenter_uuid = vct_ops.lib_get_vcenter_by_name(vcenter).uuid vct_ops.sync_vcenter(vcenter_uuid) time.sleep(5) allvolumes = vct_ops.lib_get_vm_by_name('vm_2').allVolumes assert len(allvolumes) == 1 for volume in allvolumes: if volume.type == 'Data': volumes.append(volume.installPath) assert set(volumes) == set([]) #cleanup test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass("sync after detaching disk from vm in vmware test passed.")
def test(): imagestore_backup_storage = test_lib.lib_get_image_store_backup_storage() if not imagestore_backup_storage: test_util.test_skip('Not find image store type backup storage.') img_option = test_util.ImageOption() img_option.set_name('iso') root_disk_uuid = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')).uuid bs_uuid = imagestore_backup_storage.uuid img_option.set_backup_storage_uuid_list([bs_uuid]) command = "command -v genisoimage" result = test_lib.lib_execute_ssh_cmd(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], 'root', 'password', command) if not result: command = "yum -y install genisoimage --disablerepo=* --enablerepo=zstack-local" test_lib.lib_execute_ssh_cmd(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], 'root', 'password', command) command = "genisoimage -o %s/apache-tomcat/webapps/zstack/static/zstack-repo/7/x86_64/os/test.iso /tmp/" % os.environ.get('zstackInstallPath') test_lib.lib_execute_ssh_cmd(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], 'root', 'password', command) img_option.set_url('http://%s:8080/zstack/static/zstack-repo/7/x86_64/os/test.iso' % (os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'])) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) image_url = image.export() image.delete_exported_image() test_lib.lib_robot_cleanup(test_obj_dict) if image_url.endswith('.iso'): test_util.test_pass('Export ISO Image Test Success') else: test_util.test_fail('Export ISO Image Test Fail, exported ISO image ends with %s' % (image_url.split('.')[-1]))
def test(): global test_obj_dict volume_creation_option = test_util.VolumeOption() ps_uuid = res_ops.query_resource(res_ops.PRIMARY_STORAGE, [])[0].uuid test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_primary_storage_uuid(ps_uuid) if res_ops.query_resource(res_ops.PRIMARY_STORAGE, [])[0].type == "LocalStorage": host = test_lib.lib_find_random_host() volume_creation_option.set_system_tags( ["localStorage::hostUuid::%s" % host.uuid]) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume_uuid = volume.volume.uuid vol_size = volume.volume.size set_size = 1024 * 1024 * 1024 * 5 vol_ops.resize_data_volume(volume_uuid, set_size) cond = res_ops.gen_query_conditions('type', '=', "Data") cond = res_ops.gen_query_conditions('status', '=', "Ready", cond) vol_size_after = res_ops.query_resource(res_ops.VOLUME, cond)[0].size if set_size != vol_size_after: test_util.test_fail('Resize Data Volume failed, size = %s' % vol_size_after) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Resize Data Volume Test Success')
def test(): global test_obj_dict #enable vmware vmotion SI = vct_ops.connect_vcenter(os.environ['vcenter']) content = SI.RetrieveContent() hosts = vct_ops.get_host(content) for host in hosts: vct_ops.enable_vmotion(host) network_pattern = 'L3-%s'%os.environ['dportgroup'] if not vct_ops.lib_get_vcenter_l3_by_name(network_pattern): network_pattern = 'L3-%s'%os.environ['portgroup0'] ova_image_name = os.environ['vcenterDefaultmplate'] disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('largeDiskOfferingName')) #create vm vm = test_stub.create_vm_in_vcenter(vm_name = 'migrate_vm', image_name = ova_image_name, l3_name = network_pattern) vm.check() test_obj_dict.add_vm(vm) #check whether vm migration candidate hosts exist candidate_hosts = vm_ops.get_vm_migration_candidate_hosts(vm.vm.uuid).inventories if candidate_hosts == []: test_util.test_logger('Not find vm migration candidate hosts, skip test migrate vm') else: test_util.test_dsc('Migrate vm to the specified host') host_uuid = candidate_hosts[0].uuid vm_ops.migrate_vm(vm.vm.uuid, host_uuid) vm.update() vm.check() #check whether the specified host is effective assert candidate_hosts[0].name == test_lib.lib_find_host_by_vm(vm.vm).name #check the consistency of the migration in zstack and vmware assert candidate_hosts[0].name == vct_ops.find_host_by_vm(content, vm.vm.name) test_util.test_dsc('vm in suspended state does not allow migration') vm.suspend() candidate_host = vm_ops.get_vm_migration_candidate_hosts(vm.vm.uuid).inventories assert candidate_host == [] #create vm with disk vm1 = test_stub.create_vm_in_vcenter(vm_name = 'migrate_vm_with_disk', image_name = ova_image_name, l3_name = network_pattern, disk_offering_uuids = [disk_offering.uuid]) vm1.check() test_obj_dict.add_vm(vm1) #check whether vm migration candidate hosts exist candidate_hosts = vm_ops.get_vm_migration_candidate_hosts(vm1.vm.uuid).inventories if candidate_hosts == []: test_util.test_logger('Not find vm migration candidate hosts, skip test migrate vm with disk') else: test_util.test_dsc('Migrate vm with disk to the specified host') host_uuid = candidate_hosts[0].uuid vm_ops.migrate_vm(vm1.vm.uuid, host_uuid) vm1.update() vm1.check() assert candidate_hosts[0].name == test_lib.lib_find_host_by_vm(vm1.vm).name assert candidate_hosts[0].name == vct_ops.find_host_by_vm(content, vm1.vm.name) #cleanup test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass("Migrate vm test passed.")
def test(): global test_obj_dict ova_image_name = os.environ['vcenterDefaultmplate'] network_pattern1 = 'L3-%s' % os.environ['dportgroup'] if not vct_ops.lib_get_vcenter_l3_by_name(network_pattern1): network_pattern1 = 'L3-%s' % os.environ['portgroup0'] disk_offering1 = test_lib.lib_get_disk_offering_by_name( os.environ.get('largeDiskOfferingName')) vm = test_stub.create_vm_in_vcenter(vm_name='test_for_sync_volume_size_vm', image_name=ova_image_name, l3_name=network_pattern1) test_obj_dict.add_vm(vm) vm.check() ps_uuid = vm.vm.allVolumes[0].primaryStorageUuid volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering1.uuid) volume_creation_option.set_name('test_for_sync_volume_size_volume') volume_creation_option.set_primary_storage_uuid(ps_uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() #SyncVolumeSize vol_ops.sync_volume_size(volume.get_volume().uuid) volume.attach(vm) volume.check() vol_ops.sync_volume_size(volume.get_volume().uuid) #cleanup test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass("Test sync volume size in vcenter passed.")
def test(): global test_obj_dict volume_creation_option = test_util.VolumeOption() ps_uuid = res_ops.query_resource(res_ops.PRIMARY_STORAGE, [])[0].uuid test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_primary_storage_uuid(ps_uuid) if res_ops.query_resource(res_ops.PRIMARY_STORAGE, [])[0].type == "LocalStorage": host = test_lib.lib_find_random_host() volume_creation_option.set_system_tags(["localStorage::hostUuid::%s" % host.uuid]) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume_uuid = volume.volume.uuid vol_size = volume.volume.size set_size = 1024*1024*1024*5 vol_ops.resize_data_volume(volume_uuid, set_size) cond = res_ops.gen_query_conditions('type', '=', "Data") cond = res_ops.gen_query_conditions('status', '=', "Ready", cond) vol_size_after = res_ops.query_resource(res_ops.VOLUME, cond)[0].size if set_size != vol_size_after: test_util.test_fail('Resize Data Volume failed, size = %s' % vol_size_after) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Resize Data Volume Test Success')
def test(): vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) #volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.attach(vm) new_vms = vm.clone(vm_names) for new_vm in new_vms: test_obj_dict.add_vm(new_vm) if len(new_vms) != len(vm_names): test_util.test_fail('only %s VMs have been cloned, which is less than required: %s' % (len(new_vms), vm_names)) for new_vm in new_vms: new_vm = new_vm.get_vm() try: vm_names.remove(new_vm.name) test_util.test_logger('VM:%s name: %s is found' % (new_vm.uuid, new_vm.name)) except: test_util.test_fail('%s vm name: %s is not in list: %s' % (new_vm.uuid, new_vm.name, vm_names)) if test_lib.lib_get_data_volumes(new_vms[0]) != []: test_util.test_fail('The cloned vm is still have data volume, the expected behavior is only clone root volume.') test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Clone VM Test Success')
def test(): test_util.test_dsc('Create test vm and check') vm1 = test_stub.create_vm(vm_name="vm1", image_name="ocfs2-host-image") test_obj_dict.add_vm(vm1) vm2 = test_stub.create_vm(vm_name="vm2", image_name="ocfs2-host-image") test_obj_dict.add_vm(vm2) vm1.check() vm2.check() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) normal_volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(normal_volume) normal_volume.check() volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) sharable_volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(sharable_volume) sharable_volume.check() normal_volume.check() test_util.test_dsc('Attach volume and check') #mv vm checker later, to save some time. normal_volume.attach(vm2) sharable_volume.attach(vm1) sharable_volume.attach(vm2) sharable_volume.check() normal_volume.check() config_ocfs2_vms(vm1, vm2) check_sharable_volume(vm1, vm2) test_util.test_dsc('Detach volume and check') sharable_volume.detach(vm1.get_vm().uuid) sharable_volume.detach(vm2.get_vm().uuid) normal_volume.detach(vm2.get_vm().uuid) sharable_volume.check() normal_volume.check() test_util.test_dsc('Delete volume and check') sharable_volume.delete() sharable_volume.expunge() normal_volume.delete() normal_volume.expunge() sharable_volume.check() normal_volume.check() test_obj_dict.rm_volume(sharable_volume) test_obj_dict.rm_volume(normal_volume) vm1.destroy() vm2.destroy() vm1.check() vm2.check() vm1.expunge() vm2.expunge() test_util.test_pass('Create Data Volume for VM Test Success')
def test(): global test_obj_dict #volume_creation_option = test_util.VolumeOption() #test_util.test_dsc('Create volume and check') #disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) #volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) #volume = test_stub.create_volume(volume_creation_option) bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") volume_creation_option = test_util.VolumeOption() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume1 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume1) volume1.check() volume_uuid = volume1.volume.uuid test_util.test_dsc('Create vm and check') vm = test_stub.create_vr_vm('migrate_volume_vm', 'imageName_net', 'l3VlanNetwork2') test_obj_dict.add_vm(vm) vm.check() vm_uuid = vm.vm.uuid volume1.attach(vm) volume1.detach(vm_uuid) vm.stop() image_obj = volume1.create_template([bss[0].uuid]) vm.start() host_uuid = vm.vm.hostUuid ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) volume2 = image_obj.create_data_volume(ps.uuid, 'volumeName', host_uuid) test_obj_dict.add_volume(volume2) volume2.check() volume_uuid = volume2.volume.uuid ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') snapshots = test_obj_dict.get_volume_snapshot(volume_uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshots.check() snapshots.create_snapshot('create_snapshot2') snapshots.check() target_host = test_lib.lib_find_random_host_by_volume_uuid(volume_uuid) target_host_uuid = target_host.uuid vol_ops.migrate_volume(volume_uuid, target_host_uuid) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Cold migrate Data Volume from Template with Snapshot Test Success')
def create_vm_with_iso(vm_name, l3_name, session_uuid=None): img_option = test_util.ImageOption() img_option.set_name('fake_iso') root_disk_uuid = test_lib.lib_get_disk_offering_by_name( os.environ.get('mediumDiskOfferingName')).uuid bs_uuid = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, [], \ session_uuid)[0].uuid img_option.set_backup_storage_uuid_list([bs_uuid]) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName img_option.set_url('http://%s:8080/zstack/static/zstack-dvd/ks.cfg' % (mn_ip)) image_uuid = img_ops.add_iso_template(img_option).uuid vm_creation_option = test_util.VmOption() l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_root_disk_uuid(root_disk_uuid) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name(vm_name) vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() return vm
def test(): imagestore_backup_storage = test_lib.lib_get_image_store_backup_storage() if not imagestore_backup_storage: test_util.test_skip('Not find image store type backup storage.') img_option = test_util.ImageOption() img_option.set_name('iso') root_disk_uuid = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')).uuid bs_uuid = imagestore_backup_storage.uuid img_option.set_backup_storage_uuid_list([bs_uuid]) os.system("genisoimage -o %s/apache-tomcat/webapps/zstack/static/test.iso /tmp/" % (os.environ.get('zstackInstallPath'))) img_option.set_url('http://%s:8080/zstack/static/test.iso' % (os.environ.get('node1Ip'))) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) image_url = image.export() image.delete_exported_image() test_lib.lib_robot_cleanup(test_obj_dict) if image_url.endswith('.iso'): test_util.test_pass('Export ISO Image Test Success') else: test_util.test_fail('Export ISO Image Test Fail, exported ISO image ends with %s' % (image_url.split('.')[-1]))
def test(): test_util.test_dsc('Create test vm and check') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() test_util.test_dsc('Attach volume and check') vm.check() volume.attach(vm) volume.check() test_util.test_dsc('Stop VM') vm.stop() test_util.test_dsc('Detach volume and check') volume.detach() volume.check() test_util.test_dsc('Attach volume to stopped VM and check') volume.attach(vm) volume.check() test_util.test_dsc('Detach volume from stopped VM again and check') volume.detach() volume.check() volume2 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume2) test_util.test_dsc('Attach new volume2 to stopped VM and check') volume2.attach(vm) volume2.check() test_util.test_dsc('Detach volume2 from stopped VM again and check') volume2.detach() volume2.check() test_util.test_dsc('Attach new volume2 to stopped VM again and start vm') volume2.attach(vm) vm.start() test_util.test_dsc('Detach volume2 from running VM again and check') volume2.detach() volume2.check() test_util.test_dsc('Delete volume and check') volume.delete() volume.check() test_obj_dict.rm_volume(volume) volume2.delete() test_obj_dict.rm_volume(volume2) vm.destroy() test_util.test_pass('Do Volumes ops on stopped VM Success')
def test(): test_util.test_dsc('Create test vm and check') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() test_util.test_dsc('Attach volume and check') #mv vm checker later, to save some time. vm.check() volume.attach(vm) volume.check() test_util.test_dsc('Detach volume and check') volume.detach() volume.check() test_util.test_dsc('Delete volume and check') volume.delete() volume.check() test_obj_dict.rm_volume(volume) vm.destroy() vm.check() test_util.test_pass('Create Data Volume for VM Test Success')
def test(): global new_offering_uuid test_util.test_dsc('Test VM disk bandwidth QoS by 20MB') #unit is KB volume_bandwidth = 25*1024*1024 new_offering = test_lib.lib_create_instance_offering(volume_bandwidth = volume_bandwidth) new_offering_uuid = new_offering.uuid vm = test_stub.create_vm(vm_name = 'vm_volume_qos', \ instance_offering_uuid = new_offering.uuid) test_obj_dict.add_vm(vm) vm.check() volume_creation_option = test_util.VolumeOption() disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('largeDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_name('volume-1') volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) vm_inv = vm.get_vm() test_lib.lib_mkfs_for_volume(volume.get_volume().uuid, vm_inv) mount_point = '/tmp/zstack/test' test_stub.attach_mount_volume(volume, vm, mount_point) test_stub.make_ssh_no_password(vm_inv) test_stub.install_fio(vm_inv) test_stub.test_fio_bandwidth(vm_inv, volume_bandwidth, mount_point) vm_ops.delete_instance_offering(new_offering_uuid) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('VM Disk QoS Test Pass')
def create_vm_with_iso(vm_creation_option=None, session_uuid=None): img_option = test_util.ImageOption() img_option.set_name('iso') root_disk_uuid = test_lib.lib_get_disk_offering_by_name( os.environ.get('rootDiskOfferingName')).uuid bs_uuid = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, [], session_uuid)[0].uuid img_option.set_backup_storage_uuid_list([bs_uuid]) if os.path.exists("%s/apache-tomcat/webapps/zstack/static/zstack-repo/" % (os.environ.get('zstackInstallPath'))): os.system( "genisoimage -o %s/apache-tomcat/webapps/zstack/static/zstack-repo/7/x86_64/os/test.iso /tmp/" % (os.environ.get('zstackInstallPath'))) img_option.set_url( 'http://%s:8080/zstack/static/zstack-repo/7/x86_64/os/test.iso' % (os.environ.get('node1Ip'))) else: os.system( "genisoimage -o %s/apache-tomcat/webapps/zstack/static/test.iso /tmp/" % (os.environ.get('zstackInstallPath'))) img_option.set_url('http://%s:8080/zstack/static/test.iso' % (os.environ.get('node1Ip'))) image_uuid = img_ops.add_iso_template(img_option).uuid return create_vm(vm_creation_option, None, root_disk_uuid, image_uuid, session_uuid=session_uuid)
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_lib.lib_set_delete_policy('vm', 'Delay') test_lib.lib_set_delete_policy('volume', 'Delay') test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags( ['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.delete() volume.check() ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) ps_uuid = ps.uuid ps_ops.change_primary_storage_state(ps_uuid, 'disable') if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail( 'VM is expected to running when PS change to disable state') vm.set_state(vm_header.RUNNING) vm.check() volume.recover() volume.check() ps_ops.change_primary_storage_state(ps_uuid, 'enable') host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) volume.delete() volume.expunge() volume.check() vm.destroy() test_lib.lib_set_delete_policy('vm', 'Direct') test_lib.lib_set_delete_policy('volume', 'Direct') test_util.test_pass('Delete volume under PS disable mode Test Success')
def test(): global test_obj_dict cond = res_ops.gen_query_conditions('name', '=', 'newdatastore') ps_uuid = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)[0].uuid cond = res_ops.gen_query_conditions('name', '=', 'newdatastore (1)') ps1_uuid = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)[0].uuid centos_image_name = os.environ['image_dhcp_name'] if os.environ['dportgroup']: network_pattern = os.environ['dportgroup'] network_pattern = 'L3-%s' % network_pattern else: network_pattern = os.environ['portgroup0'] network_pattern = 'L3-%s' % network_pattern disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('largeDiskOfferingName')) #create vm vm = test_stub.create_vm_in_vcenter(vm_name='vm-create', image_name=centos_image_name, l3_name=network_pattern) vm.check() test_util.test_dsc('Create volume and check') volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_name('vcenter_volume') volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume_creation_option.set_primary_storage_uuid(ps_uuid) volume_creation_option.set_name('vcenter_volume_ps') volume_ps = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume_ps) volume_ps.check() volume_creation_option.set_primary_storage_uuid(ps1_uuid) volume_creation_option.set_name('vcenter_volume_ps1') volume_ps1 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume_ps1) volume_ps1.check() test_util.test_dsc('Attach volume and check') volume.attach(vm) volume.check() volume_ps.attach(vm) volume_ps.check() try: volume_ps1.attach(vm) except: test_util.test_logger('test for volume_ps1 pass') else: test_util.test_fail('volume_ps1 should not attach to vm') #cleanup test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass("Attach data volumes to vm test passed.")
def test(): allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE, "SharedBlock"] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) flavor = case_flavor[os.environ.get('CASE_FLAVOR')] test_util.test_dsc('Create test vm and check') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags( ['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) test_util.test_dsc('Attach volume and check') #vm.check() volume.attach(vm) if flavor['vm_running'] == False: vm.stop() if flavor['vm_running'] == True: allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) ps_uuid = volume.get_volume().primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) test_util.test_dsc('Create volume template and check') bs_list = test_lib.lib_get_backup_storage_list_by_vm(vm.get_vm()) bs_uuid_list = [] for bs in bs_list: bs_uuid_list.append(bs.uuid) vol_tmpt = volume.create_template(bs_uuid_list, 'new_data_template') test_obj_dict.add_image(vol_tmpt) vol_tmpt.check() volume.check() volume.delete() test_obj_dict.rm_volume(volume) test_util.test_dsc('Create volume from template and check') volume2 = vol_tmpt.create_data_volume(ps_uuid, 'new_volume_from_template') test_obj_dict.add_volume(volume2) vol_tmpt.delete() test_obj_dict.rm_image(vol_tmpt) volume2.check() volume2.attach(vm) vm.check() volume2.check() volume2.detach() volume2.delete() test_obj_dict.rm_volume(volume2) vm.destroy() test_util.test_pass( 'Create Sharable Data Volume Template from Data Volume Success.')
def test(): global vm ova_image_name = 'centos-dhcp' network_pattern1 = os.environ['vcenterDefaultNetwork'] cpuNum = 2 memorySize = 2*1024*1024*1024 cond = res_ops.gen_query_conditions('type', '!=', 'Vcenter') ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) for i in ps: if (i.type == 'Ceph') or (i.type == 'Sharedblock'): break else: test_util.test_skip('Skip test on non ceph or sharedblock PS') ps_uuid = ps[0].uuid cond = res_ops.gen_query_conditions('primaryStorage.uuid', '=', ps_uuid) cluster_uuid = res_ops.query_resource(res_ops.CLUSTER, cond)[0].uuid cond = res_ops.gen_query_conditions('clusterUuid', '=', cluster_uuid) host = res_ops.query_resource(res_ops.HOST, cond)[0] new_offering = test_lib.lib_create_instance_offering(cpuNum = cpuNum, memorySize = memorySize) vm = test_stub.create_vm_in_vcenter(vm_name = 'v2v-test', image_name = ova_image_name, l3_name = network_pattern1, instance_offering_uuid = new_offering.uuid) vm.check() disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_name('vcenter_volume') volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.attach(vm) l3_name = os.environ.get('l3VlanNetworkName1') l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid v2v_conversion_host = test_stub.add_v2v_conversion_host('v2v_host', host.uuid, '/tmp/zstack', 'VMWARE') url = 'vmware://%s' % vm.vm.uuid migrate_task = test_stub.convert_vm_from_foreign_hypervisor('test', url, cpuNum, memorySize, ps_uuid, [l3_uuid], cluster_uuid, v2v_conversion_host.uuid) cond = res_ops.gen_query_conditions('uuid', '=', migrate_task.uuid) long_job = res_ops.query_resource(res_ops.LONGJOB, cond)[0] assert long_job.state == 'Running' if check_cache(vm.vm.uuid, host.ip) == 1: test_util.test_fail('There should be cache in installpath:/tmp/zstack.') time.sleep(5) longjob_ops.cancel_longjob(long_job.uuid) long_job = res_ops.query_resource(res_ops.LONGJOB, cond)[0] assert long_job.state == 'Canceled' if check_cache(vm.vm.uuid, host.ip) == 0: test_util.test_fail('There are still cache in installpath: /tmp/zstack.') cond = res_ops.gen_query_conditions('name', '=', 'test') vm = res_ops.query_resource(res_ops.VM_INSTANCE, cond) if vm != None: test_util.fail('There are still vm after cancel long job.') #cleanup test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass("Cancel v2v long job successfully.")
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') test_lib.lib_set_delete_policy('vm', 'Delay') test_lib.lib_set_delete_policy('volume', 'Delay') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] #vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.delete() volume.check() ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) ps_uuid = ps.uuid ps_ops.change_primary_storage_state(ps_uuid, 'maintain') if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to stop when PS change to maintain state') vm.set_state(vm_header.STOPPED) vm.check() volume.recover() volume.check() ps_ops.change_primary_storage_state(ps_uuid, 'enable') host_ops.reconnect_host(host_uuid) #vm_ops.reconnect_vr(vr_uuid) vrs = test_lib.lib_get_all_vrs() for vr in vrs: vm_ops.start_vm(vr.uuid) vm.start() vm.check() volume.delete() #volume.expunge() volume.check() vm.destroy() test_lib.lib_set_delete_policy('vm', 'Direct') test_lib.lib_set_delete_policy('volume', 'Direct') test_util.test_pass('Delete volume under PS maintain mode Test Success')
def test(): global test_obj_dict, VOL_OPS, VM_STATE_OPS, utility_vm, backup ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0] vm_name = "test_vm" utility_vm_name = "utility_vm" cond = res_ops.gen_query_conditions("system", '=', "false") cond = res_ops.gen_query_conditions("mediaType", '=', "RootVolumeTemplate", cond) cond = res_ops.gen_query_conditions("platform", '=', "Linux", cond) img_name = res_ops.query_resource(res_ops.IMAGE, cond)[0].name cond = res_ops.gen_query_conditions("category", '=', "Private") l3_name = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0].name disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('smallDiskOfferingName')) disk_offering_uuids = [disk_offering.uuid] vm = test_stub.create_vm(vm_name, img_name, l3_name, disk_offering_uuids=disk_offering_uuids) utility_vm = test_stub.create_vm(utility_vm_name, img_name, l3_name) dvol = zstack_volume_header.ZstackTestVolume() dvol.set_volume(test_lib.lib_get_data_volumes(vm.get_vm())[0]) dvol.set_state(volume_header.ATTACHED) dvol.set_target_vm(vm) while True: OPS = VOL_OPS + VM_STATE_OPS if not backup_list: OPS.remove("VM_TEST_BACKUP_IMAGE") dvol = zstack_volume_header.ZstackTestVolume() dvol.set_volume(test_lib.lib_get_data_volumes(vm.get_vm())[0]) dvol.set_state(volume_header.ATTACHED) dvol.set_target_vm(vm) vm_op_test(vm, dvol, random.choice(OPS)) if vm.state == "Stopped": if backup_list: vm_op_test(vm, dvol, "VM_TEST_REVERT_BACKUP") vm.start() if test_lib.lib_is_vm_l3_has_vr(vm.vm): test_lib.TestHarness = test_lib.TestHarnessVR time.sleep(60) cmd = "dd if=/dev/urandom of=/dev/vdb bs=512k count=1" test_lib.lib_execute_command_in_vm(vm.vm, cmd) vm.suspend() vm_op_test(vm, dvol, "DVOL_BACKUP") compare(ps, vm, dvol, backup) vm.resume()
def test(): test_util.test_dsc('Create test vm and check') vm1 = test_stub.create_vm(vm_name="vm1", image_name="ocfs2-host-image") test_obj_dict.add_vm(vm1) vm2 = test_stub.create_vm(vm_name="vm2", image_name="ocfs2-host-image") test_obj_dict.add_vm(vm2) test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags( ['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() test_util.test_dsc('Attach volume and check') #mv vm checker later, to save some time. vm1.check() vm2.check() volume.attach(vm1) volume.attach(vm2) config_ocfs2_vms(vm1, vm2) check_sharable_volume(vm1, vm2) volume.check() vm1.stop() volume.check() vm2.stop() volume.check() vm1.start() volume.check() vm2.start() volume.check() test_util.test_dsc('Detach volume and check') volume.detach(vm1.get_vm().uuid) volume.detach(vm2.get_vm().uuid) volume.check() test_util.test_dsc('Delete volume and check') volume.delete() volume.expunge() volume.check() test_obj_dict.rm_volume(volume) vm1.destroy() vm2.destroy() vm1.check() vm2.check() vm1.expunge() vm2.expunge() test_util.test_pass('Create Data Volume for VM Test Success')
def test(): global vm global schd_job global schd_trigger vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) conditions = res_ops.gen_query_conditions('type', '=', 'Ceph') pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, conditions) if len(pss) == 0: test_util.test_skip('Skip due to no ceph storage available') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_name('volume for snapshot scheduler testing') volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.attach(vm) volume.detach() test_util.test_dsc('create snapshot and check') start_date = int(time.time()) parameters = {"snapshotMaxNumber": "3"} schd_job = schd_ops.create_scheduler_job('simple_create_snapshot_scheduler', 'simple_create_snapshot_scheduler', volume.get_volume().uuid, 'volumeSnapshot', parameters) schd_trigger = schd_ops.create_scheduler_trigger('simple_create_snapshot_scheduler', start_date+60, None, 120, 'simple') schd_ops.add_scheduler_job_to_trigger(schd_trigger.uuid, schd_job.uuid) snapshot_num = 0 for i in range(0, 3): test_util.test_logger('round %s' % (i)) test_stub.sleep_util(start_date + 60 + 120*i - 2) test_util.test_logger('check volume snapshot number at %s, there should be %s' % (start_date + 60 + 120*i - 2, snapshot_num)) new_snapshot_num = query_snapshot_number(volume.get_volume().uuid) if snapshot_num != new_snapshot_num: test_util.test_fail('there sholuld be %s snapshots' % (snapshot_num)) snapshot_num += 1 test_stub.sleep_util(start_date + 60 + 120*i + 65) test_util.test_logger('check volume snapshot number at %s, there should be %s' % (start_date + 60 + 120*i + 65, snapshot_num)) new_snapshot_num = query_snapshot_number(volume.get_volume().uuid) if snapshot_num != new_snapshot_num: test_util.test_fail('there sholuld be %s snapshots' % (snapshot_num)) test_stub.sleep_util(start_date + 60 + 120*3 +65) test_util.test_logger('check volume snapshot number at %s, there should be %s' % (start_date + 60 + 120*i + 65, 3)) new_snapshot_num = query_snapshot_number(volume.get_volume().uuid) if new_snapshot_num != 3: test_util.test_fail('there sholuld be 3 snapshots') schd_ops.del_scheduler_job(schd_job.uuid) schd_ops.del_scheduler_trigger(schd_trigger.uuid) vm.destroy() test_util.test_pass('Create limit create volume snapshot num success')
def test(): test_util.test_dsc('Create test vm as utility vm') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) test_util.test_dsc('Create volume for snapshot testing') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_name('volume for snapshot testing') volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) #make sure utility vm is starting and running vm.check() test_util.test_dsc('create snapshot and delete it') #snapshots = zstack_sp_header.ZstackVolumeSnapshot() #snapshots.set_target_volume(volume) #test_obj_dict.add_volume_snapshot(snapshots) snapshots = test_obj_dict.get_volume_snapshot(volume.get_volume().uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshot1 = snapshots.get_current_snapshot() snapshots.delete_snapshot(snapshot1) test_util.test_dsc('create 2 new snapshots, then delete the 1st one') snapshots.create_snapshot('create_snapshot2') snapshot2 = snapshots.get_current_snapshot() snapshots.create_snapshot('create_snapshot3') snapshot3 = snapshots.get_current_snapshot() snapshots.check() snapshots.delete_snapshot(snapshot2) test_util.test_dsc('create new snapshot to backup') snapshots.create_snapshot('create_snapshot4') snapshot4 = snapshots.get_current_snapshot() snapshots.create_snapshot('create_snapshot5') #snapshot4.backup() #snapshots.check() #test_util.test_dsc('use new backuped snapshot4 and delete it later. ') snapshots.use_snapshot(snapshot4) snapshots.check() snapshots.delete_snapshot(snapshot4) test_util.test_dsc('try to create last snapshot and delete it. ') snapshots.create_snapshot('create_snapshot6') snapshots.check() snapshots.delete() test_obj_dict.rm_volume_snapshot(snapshots) volume.check() test_obj_dict.rm_volume(volume) vm.check() vm.destroy() test_util.test_pass('Cleanup all Snapshots test Success')
def test(): global test_obj_dict network_pattern = 'L3-%s'%os.environ['dportgroup'] if not vct_ops.lib_get_vcenter_l3_by_name(network_pattern): network_pattern = 'L3-%s'%os.environ['portgroup0'] ova_image_name = os.environ['vcenterDefaultmplate'] disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('largeDiskOfferingName')) #create vm vm = test_stub.create_vm_in_vcenter(vm_name = 'vm-create', image_name = ova_image_name, l3_name = network_pattern) vm.check() test_obj_dict.add_vm(vm) ps_uuid = vm.vm.allVolumes[0].primaryStorageUuid ps1_uuid = None for ps in res_ops.query_resource(res_ops.VCENTER_PRIMARY_STORAGE): if ps.uuid != ps_uuid: ps1_uuid = ps.uuid break test_util.test_dsc('Create volume and check') volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_name('vcenter_volume') volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume_creation_option.set_primary_storage_uuid(ps_uuid) volume_creation_option.set_name('vcenter_volume_ps') volume_ps = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume_ps) volume_ps.check() if ps1_uuid: volume_creation_option.set_primary_storage_uuid(ps1_uuid) volume_creation_option.set_name('vcenter_volume_ps1') volume_ps1 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume_ps1) volume_ps1.check() test_util.test_dsc('Attach volume and check') volume.attach(vm) volume.check() volume_ps.attach(vm) volume_ps.check() if vct_ops.get_datastore_type(os.environ['vcenter']) == 'local' and ps1_uuid != None: try: volume_ps1.attach(vm) except: test_util.test_logger('test for volume_ps1 pass') else: test_util.test_fail('volume_ps1 should not attach to vm') #cleanup test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass("Attach data volumes to vm test passed.")
def test(): global test_obj_dict test_util.test_dsc('Create test vm and check. VR only has DNS and DHCP services') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) disk_offering_uuids = [disk_offering.uuid] disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) disk_offering_uuids.append(disk_offering.uuid) vm = test_stub.create_vlan_vm_with_volume(os.environ.get('l3VlanNetworkName1'), disk_offering_uuids) test_obj_dict.add_vm(vm) vm.check() volumes_number = len(test_lib.lib_get_all_volumes(vm.vm)) if volumes_number != 3: test_util.test_fail('Did not find 3 volumes for [vm:] %s. But we assigned 2 data volume when create the vm. We only catch %s volumes' % (vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 3 volumes for [vm:] %s.' % vm.vm.uuid) vm.destroy() test_util.test_pass('Create VirtualRouter VM DNS DHCP Test Success')
def create_attach_volume(vm_obj): global test_obj_dict volume_creation_option = test_util.VolumeOption() disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.attach(vm_obj)
def test(): test_util.test_dsc('Create test vm as utility vm') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) test_util.test_dsc('Create volume for snapshot testing') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_name('volume for snapshot testing') volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) #make sure utility vm is starting and running vm.check() test_util.test_dsc('create snapshot and delete it') #snapshots = zstack_sp_header.ZstackVolumeSnapshot() #snapshots.set_target_volume(volume) #test_obj_dict.add_volume_snapshot(snapshots) snapshots = test_obj_dict.get_volume_snapshot(volume.get_volume().uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshot1 = snapshots.get_current_snapshot() snapshots.delete_snapshot(snapshot1) test_util.test_dsc('create 2 new snapshots, then delete the 1st one') snapshots.create_snapshot('create_snapshot2') snapshot2 = snapshots.get_current_snapshot() snapshots.create_snapshot('create_snapshot3') snapshot3 = snapshots.get_current_snapshot() snapshots.check() snapshots.delete_snapshot(snapshot2) test_util.test_dsc('create new snapshot to backup') snapshots.create_snapshot('create_snapshot4') snapshot4 = snapshots.get_current_snapshot() snapshots.create_snapshot('create_snapshot5') snapshot4.backup() snapshots.check() test_util.test_dsc('use new backuped snapshot4 and delete it later. ') snapshots.use_snapshot(snapshot4) snapshots.check() snapshots.delete_snapshot(snapshot4) test_util.test_dsc('try to create last snapshot and delete it. ') snapshots.create_snapshot('create_snapshot6') snapshots.check() snapshots.delete() test_obj_dict.rm_volume_snapshot(snapshots) volume.check() test_obj_dict.rm_volume(volume) vm.check() vm.destroy() test_util.test_pass('Cleanup all Snapshots test Success')
def create_vlan_vm_with_volume(l3_name=None, disk_offering_uuids=None, disk_number=None): if not disk_offering_uuids: disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) disk_offering_uuids = [disk_offering.uuid] if disk_number: for i in range(disk_number - 1): disk_offering_uuids.append(disk_offering.uuid) return create_vlan_vm(l3_name, disk_offering_uuids)
def test(): allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE, "SharedBlock"] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) flavor = case_flavor[os.environ.get('CASE_FLAVOR')] test_util.test_dsc('Create test vm and check') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) test_util.test_dsc('Attach volume and check') #vm.check() volume.attach(vm) if flavor['vm_running'] == False: vm.stop() if flavor['vm_running'] == True: allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) ps_uuid = volume.get_volume().primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) test_util.test_dsc('Create volume template and check') bs_list = test_lib.lib_get_backup_storage_list_by_vm(vm.get_vm()) bs_uuid_list = [] for bs in bs_list: bs_uuid_list.append(bs.uuid) vol_tmpt = volume.create_template(bs_uuid_list, 'new_data_template') test_obj_dict.add_image(vol_tmpt) vol_tmpt.check() volume.check() volume.delete() test_obj_dict.rm_volume(volume) test_util.test_dsc('Create volume from template and check') volume2 = vol_tmpt.create_data_volume(ps_uuid, 'new_volume_from_template') test_obj_dict.add_volume(volume2) vol_tmpt.delete() test_obj_dict.rm_image(vol_tmpt) volume2.check() volume2.attach(vm) vm.check() volume2.check() volume2.detach() volume2.delete() test_obj_dict.rm_volume(volume2) vm.destroy() test_util.test_pass('Create Sharable Data Volume Template from Data Volume Success.')
def test(): global test_obj_dict, bs #judge whether BS is imagestore bs = res_ops.query_resource(res_ops.BACKUP_STORAGE) for i in bs: if i.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break else: test_util.test_skip('Skip test on non-imagestore') #Skip for AliyunNAS PS ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE) for i in ps: if i.type == 'AliyunNAS': test_util.test_skip('Skip test on SharedBlock and AliyunNAS PS') volume_creation_option = test_util.VolumeOption() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) volume2 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) test_obj_dict.add_volume(volume2) volume.check() volume2.check() image_name = os.environ.get('imageName_s') l3_name = os.environ.get('l3PublicNetworkName') vm = test_stub.create_vm("test_vm", image_name, l3_name) #vm.check() test_obj_dict.add_vm(vm) volume.attach(vm) volume2.attach(vm) vm.suspend() new_vm = vm.clone([ 'test_vm_clone_vm1_with_two_data_volume', 'test_vm_clone_vm2_with_two_data_volume', 'test_vm_clone_vm3_with_two_data_volume' ], full=True) for i in new_vm: test_obj_dict.add_vm(i) volumes_number = len(test_lib.lib_get_all_volumes(i.vm)) if volumes_number != 3: test_util.test_fail( 'Did not find 3 volumes for [vm:] %s. But we assigned 3 data volume when create the vm. We only catch %s volumes' % (i.vm.uuid, volumes_number)) else: test_util.test_logger('Find 3 volumes for [vm:] %s.' % i.vm.uuid) vm.resume() test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Test full clone 3vms with two data volume Success')
def create_vlan_vm_with_volume(l3_name=None, disk_offering_uuids=None, disk_number=None): if not disk_offering_uuids: disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('diskOfferingName-s')) disk_offering_uuids = [disk_offering.uuid] #current ZStack doesn't support create several same volume when creating VM by providing several same uuids. if disk_number: for i in range(disk_number - 1): disk_offering_uuids.append(disk_offering.uuid) return create_vlan_vm(l3_name, disk_offering_uuids)
def test(): test_util.test_dsc('Create test vm as utility vm') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) #this test will rely on live snapshot capability supporting host_inv = test_lib.lib_find_host_by_vm(vm.get_vm()) if not test_lib.lib_check_live_snapshot_cap(host_inv): vm.destroy() test_obj_dict.rm_vm(vm) test_util.test_skip( 'Skip test, since [host:] %s does not support live snapshot.') libvirt_ver = test_lib.lib_get_host_libvirt_tag(host_inv) if not libvirt_ver or libvirt_ver < '1.2.7': vm.destroy() test_obj_dict.rm_vm(vm) test_util.test_skip( "Skip test, since [host:] %s libvert version: %s is lower than 1.2.7, which doesn't support live merge, when doing snapshot deleting." % (host_inv.uuid, libvirt_ver)) vm1 = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm1) test_util.test_dsc('Create volume for snapshot testing') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_name('volume for snapshot testing') volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) #make sure utility vm is starting and running vm.check() test_util.test_dsc('create snapshot and check') #snapshots = zstack_sp_header.ZstackVolumeSnapshot() #snapshots.set_target_volume(volume) #test_obj_dict.add_volume_snapshot(snapshots) snapshots = test_obj_dict.get_volume_snapshot(volume.get_volume().uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshots.create_snapshot('create_snapshot2') snapshots.create_snapshot('create_snapshot3') volume.attach(vm1) snapshots.delete() test_obj_dict.rm_volume_snapshot(snapshots) vm1.start() volume.check() vm.destroy() test_obj_dict.rm_vm(vm) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Delete Snapshot on running vm test Success')
def test(): test_util.test_dsc('Create test vm and check') vm1 = test_stub.create_vm(vm_name="vm1", image_name="ocfs2-host-image") test_obj_dict.add_vm(vm1) vm2 = test_stub.create_vm(vm_name="vm2", image_name="ocfs2-host-image") test_obj_dict.add_vm(vm2) test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags( ['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() test_util.test_dsc('Attach volume and check') #mv vm checker later, to save some time. vm1.check() vm2.check() volume.attach(vm1) volume.attach(vm2) try: volume.attach(vm1) volume.attach(vm2) except: pass else: test_util.test_fail( "negative test found attach twice is allowed, which should not be expected." ) test_util.test_dsc('Detach volume and check') volume.detach(vm1.get_vm().uuid) volume.detach(vm2.get_vm().uuid) volume.check() test_util.test_dsc('Delete volume and check') volume.delete() volume.expunge() volume.check() test_obj_dict.rm_volume(volume) vm1.destroy() vm2.destroy() vm1.check() vm2.check() vm1.expunge() vm2.expunge() test_util.test_pass('Create Data Volume for VM Test Success')
def create_volume(volume_creation_option=None): if not volume_creation_option: disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('diskOfferingName-s')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_name('vr_test_volume') volume = zstack_volume_header.ZstackTestVolume() volume.set_creation_option(volume_creation_option) volume.create() return volume
def test(): vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.vm) for bs in backup_storage_list: if bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break #if bs.type == inventory.SFTP_BACKUP_STORAGE_TYPE: # break if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE: break else: vm.destroy() test_util.test_skip('Not find image store type backup storage.') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) #volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.attach(vm) vm.stop() vm.check() new_vms = vm.clone(vm_names) for new_vm in new_vms: test_obj_dict.add_vm(new_vm) if len(new_vms) != len(vm_names): test_util.test_fail( 'only %s VMs have been cloned, which is less than required: %s' % (len(new_vms), vm_names)) for new_vm in new_vms: new_vm = new_vm.get_vm() try: vm_names.remove(new_vm.name) test_util.test_logger('VM:%s name: %s is found' % (new_vm.uuid, new_vm.name)) except: test_util.test_fail('%s vm name: %s is not in list: %s' % (new_vm.uuid, new_vm.name, vm_names)) if test_lib.lib_get_data_volumes(new_vms[0].vm) != []: test_util.test_fail( 'The cloned vm is still have data volume, the expected behavior is only clone root volume.' ) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Clone VM Test Success')
def create_volume(volume_creation_option=None): if not volume_creation_option: disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_name('vr_test_volume') volume = zstack_volume_header.ZstackTestVolume() volume.set_creation_option(volume_creation_option) volume.create() return volume
def create_data_volume(self): disk_offering = test_lib.lib_get_disk_offering_by_name(os.getenv('rootDiskOfferingName')) ps_uuid = self.vm.vm.allVolumes[0].primaryStorageUuid volume_option = test_util.VolumeOption() volume_option.set_disk_offering_uuid(disk_offering.uuid) volume_option.set_name('data-volume-for-crt-image-test') # volume_option.set_primary_storage_uuid(ps_uuid) self.data_volume = create_volume(volume_option) self.set_ceph_mon_env(ps_uuid) self.data_volume.attach(self.vm) self.data_volume.check()
def test(): global vm global schd vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_name('volume for snapshot scheduler testing') volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.attach(vm) volume.detach() test_util.test_dsc('create snapshot and check') start_date = int(time.time()) sp_option = test_util.SnapshotOption() sp_option.set_name('simple_schduler_snapshot') sp_option.set_volume_uuid(volume.get_volume().uuid) schd = vol_ops.create_snapshot_scheduler( sp_option, 'simple', 'simple_create_snapshot_scheduler', start_date + 60, 120) snapshot_num = 0 for i in range(0, 3): test_util.test_logger('round %s' % (i)) test_stub.sleep_util(start_date + 60 + 120 * i - 2) test_util.test_logger( 'check volume snapshot number at %s, there should be %s' % (start_date + 60 + 120 * i - 2, snapshot_num)) new_snapshot_num = query_snapshot_number('simple_schduler_snapshot') if snapshot_num != new_snapshot_num: test_util.test_fail('there sholuld be %s snapshots' % (snapshot_num)) snapshot_num += 1 test_stub.sleep_util(start_date + 60 + 120 * i + 60) test_util.test_logger( 'check volume snapshot number at %s, there should be %s' % (start_date + 60 + 120 * i + 65, snapshot_num + 1)) new_snapshot_num = query_snapshot_number('simple_schduler_snapshot') if snapshot_num != new_snapshot_num: test_util.test_fail('there sholuld be %s snapshots' % (snapshot_num)) schd_ops.delete_scheduler(schd.uuid) vm.destroy() test_util.test_pass('Create Simple VM Stop Start Scheduler Success')