def test(): global session_to global session_mc global session_uuid session_uuid = acc_ops.login_as_admin() session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid) session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid) cond = res_ops.gen_query_conditions('type', '=', inventory.USER_VM_TYPE) num = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid) if num <= thread_threshold: vms = res_ops.query_resource(res_ops.VM_INSTANCE, cond, session_uuid) destroy_vms(vms) else: start = 0 limit = thread_threshold - 1 curr_num = start vms = [] while curr_num < num: vms_temp = res_ops.query_resource_fields(res_ops.VM_INSTANCE, \ cond, session_uuid, ['uuid'], start, limit) vms.extend(vms_temp) curr_num += limit start += limit destroy_vms(vms) vip_num = res_ops.query_resource_count(res_ops.VIP, [], session_uuid) if vip_num <= thread_threshold: vips = res_ops.query_resource(res_ops.VIP, [], session_uuid) destroy_vips(vips) else: start = 0 limit = thread_threshold - 1 curr_num = start vms = [] while curr_num < vip_num: vips_temp = res_ops.query_resource_fields(res_ops.VIP, \ [], session_uuid, ['uuid'], start, limit) vips.extend(vips_temp) curr_num += limit start += limit destroy_vips(vips) #con_ops.change_global_config('identity', 'session.timeout', session_to) #con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc) left_num = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid) if left_num == 0: test_util.test_pass('None VR VMs destroy Success. Destroy %d VMs.' % num) else: test_util.test_fail('None VR VMs destroy Fail. %d VMs are not Destroied.' % left_num) left_num = res_ops.query_resource_count(res_ops.VIP, [], session_uuid) if left_num == 0: test_util.test_pass('VIP destroy Success. Destroy %d VIP.' % num) else: test_util.test_fail('VIP destroy Fail. %d VIP are not Destroied.' % left_num) con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid) con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid) acc_ops.logout(session_uuid)
def test(): global new_image bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") if bss[0].type != inventory.CEPH_BACKUP_STORAGE_TYPE: if hasattr(inventory, 'IMAGE_STORE_BACKUP_STORAGE_TYPE') and bss[0].type != inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: test_util.test_skip("not find available imagestore or ceph backup storage. Skip test") thread = threading.Thread(target=add_image, args=(bss[0].uuid, )) thread.start() time.sleep(5) image_cond = res_ops.gen_query_conditions("status", '=', "Downloading") image = res_ops.query_resource_fields(res_ops.IMAGE, image_cond, \ None, fields=['uuid']) progress = res_ops.get_task_progress(image[0].uuid) if int(progress.progress) < 0 or int(progress.progress) > 100: test_util.test_fail("Progress of task should be between 0 and 100, while it actually is %s" % (progress.progress)) thread.join() new_image.delete() if test_lib.lib_get_image_delete_policy() != 'Direct': new_image.expunge() test_util.test_pass('Add image Progress Test Success')
def test(): if os.environ.get('CASE_FLAVOR'): flavor = case_flavor[os.environ.get('CASE_FLAVOR')] num = flavor['vol_num'] else: num = 10000 hostUuid = '' hostName = '' diskOfferingUuid = res_ops.query_resource_fields(res_ops.DISK_OFFERING)[0].uuid hosts = res_ops.query_resource_fields(res_ops.HOST) counter = 0 for i in range(0, 500): hostUuid = hosts[i].uuid hostName = hosts[i].name clusterUuid = hosts[i].clusterUuid cond = res_ops.gen_query_conditions('cluster.uuid', '=', clusterUuid) psUuid = res_ops.query_resource_fields(res_ops.PRIMARY_STORAGE, cond)[0].uuid for j in range(0, 20): counter += 1 if counter > num: test_util.test_pass("Create %s volumes finished" %num) volName = 'vol-'+str(j)+'-on-host-'+hostName thread = threading.Thread(target=create_vol, args=(volName, diskOfferingUuid, hostUuid, psUuid)) while threading.active_count() > 100: time.sleep(5) thread.start() test_util.test_fail("Fail to create vms")
def test(): if os.environ.get('CASE_FLAVOR'): flavor = case_flavor[os.environ.get('CASE_FLAVOR')] num = flavor['vm_num'] else: num = 10000 cond = res_ops.gen_query_conditions('system', '=', 'false') imageUuid = res_ops.query_resource_fields(res_ops.IMAGE, cond)[0].uuid hostUuid = '' hostName = '' cond = res_ops.gen_query_conditions('type', '=', 'UserVm') instanceOfferingUuid = res_ops.query_resource_fields(res_ops.INSTANCE_OFFERING, cond)[0].uuid cond = res_ops.gen_query_conditions('system', '=', 'false') l3NetworkUuids = res_ops.query_resource_fields(res_ops.L3_NETWORK, cond)[0].uuid hosts = res_ops.query_resource_fields(res_ops.HOST) counter = 0 for i in range(0, 500): hostUuid = hosts[i].uuid hostName = hosts[i].name for j in range(0, 20): counter += 1 if counter > num: test_util.test_pass("Create %s vms finished" %num) vm_name = 'vm-'+str(j)+'-on-host-'+hostName thread = threading.Thread(target=create_vm, args=(vm_name, imageUuid, hostUuid, instanceOfferingUuid, l3NetworkUuids)) while threading.active_count() > 10: time.sleep(5) thread.start() test_util.test_fail("Fail to create vms")
def destroy_initial_database(): zoneinvs = res_ops.query_resource_fields(res_ops.ZONE, [], None, ['uuid']) for zoneinv in zoneinvs: zone_operations.delete_zone(zoneinv.uuid) backstorageinvs = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, [], None, ['uuid']) for backstorageinv in backstorageinvs: bs_operations.delete_backup_storage(backstorageinv.uuid) iam2_ops.clean_iam2_enviroment()
def test(): global session_to global session_mc global session_uuid session_uuid = acc_ops.login_as_admin() session_to = con_ops.change_global_config('identity', 'session.timeout', '720000') session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000') cond = [] num = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond) if num <= thread_threshold: vms = res_ops.query_resource(res_ops.VM_INSTANCE, cond) destroy_vms(vms) else: start = 0 limit = thread_threshold - 1 curr_num = start vms = [] while curr_num < num: vms_temp = res_ops.query_resource_fields(res_ops.VM_INSTANCE, \ cond, None, ['uuid'], start, limit) vms.extend(vms_temp) curr_num += limit start += limit destroy_vms(vms) vip_num = res_ops.query_resource_count(res_ops.VIP, [], session_uuid) if vip_num <= thread_threshold: vips = res_ops.query_resource(res_ops.VIP, [], session_uuid) destroy_vips(vips) else: start = 0 limit = thread_threshold - 1 curr_num = start vms = [] while curr_num < vip_num: vips_temp = res_ops.query_resource_fields(res_ops.VIP, \ [], session_uuid, ['uuid'], start, limit) vips.extend(vips_temp) curr_num += limit start += limit destroy_vips(vips) test_util.test_pass('vms destroy Success. Destroy %d VMs.' % num) con_ops.change_global_config('identity', 'session.timeout', session_to) con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc) acc_ops.logout(session_uuid)
def test(): test_util.test_logger("start memory billing") test_util.test_logger("create man memory billing instantiation") bill_memory = test_stub.MemoryBilling() test_util.test_logger("loop 400 to create memory billing") test_stub.create_option_billing(bill_memory, count) test_util.test_logger("verify memory billing instantiation if is right,and then delete all") test_stub.verify_option_billing(count) test_util.test_logger("create memory billing instantiation") bill_memory.set_timeUnit("s") bill_memory.set_price("5") bill_memory.create_resource_type() test_util.test_logger("create vm instance") global vm vm = test_stub.create_vm_billing("test_vmm", test_stub.set_vm_resource()[0], None,\ test_stub.set_vm_resource()[1], test_stub.set_vm_resource()[2]) vm_memory_size_ratio = res_ops.query_resource_fields(res_ops.INSTANCE_OFFERING, \ res_ops.gen_query_conditions('uuid', '=',\ test_stub.set_vm_resource()[1]))[0].memorySize / 1024 / 1024 / float(1024) time.sleep(1) if float(bill_memory.get_price()) * vm_memory_size_ratio > bill_memory.get_price_total().total: test_util.test_fail("calculate memory cost fail,actual result is %s" %(bill_memory.get_price_total().total)) vm.clean() bill_memory.delete_resource() test_util.test_pass("check memory billing pass")
def test(): global new_image bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None) if not bss: test_util.test_skip("not find available backup storage. Skip test") for bs in bss: if bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break if bs.type == inventory.SFTP_BACKUP_STORAGE_TYPE: break if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE: break else: test_util.test_skip('Not find image store type backup storage.') image_option = test_util.ImageOption() image_option.set_format('raw') image_option.set_name('test_file_url_image') image_option.set_system_tags('qemuga') image_option.set_mediaType('RootVolumeTemplate') image_option.set_url("file:///etc/issue") image_option.set_backup_storage_uuid_list([bss[0].uuid]) image_option.set_timeout(60000) new_image = zstack_image_header.ZstackTestImage() new_image.set_creation_option(image_option) new_image.add_root_volume_template() new_image.delete() test_util.test_pass('test add file:///image.raw passed.')
def test(): global threads global checker_threads bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None) if not bss: test_util.test_skip("not find available backup storage. Skip test") if bss[0].type != inventory.CEPH_BACKUP_STORAGE_TYPE: if hasattr(inventory, 'IMAGE_STORE_BACKUP_STORAGE_TYPE') and bss[0].type != inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: test_util.test_skip("not find available imagestore or ceph backup storage. Skip test") for i in range(0, threads_num): threads[i] = threading.Thread(target=add_image, args=(bss[0].uuid, i, )) threads[i].start() for i in range(0, threads_num): checker_threads[i] = threading.Thread(target=check_add_image_progress, args=(i, )) checker_threads[i].start() for i in range(0, threads_num): checker_threads[i].join() threads[i].join() images[i].check() images[i].delete() test_util.test_pass('Add image Progress Test Success')
def test(): global vcenter_uuid vcenter1_name = os.environ['vcenter1_name'] vcenter1_domain_name = os.environ['vcenter1_ip'] vcenter1_username = os.environ['vcenter1_domain_name'] vcenter1_password = os.environ['vcenter1_password'] vm_network_pattern1 = os.environ['vcenter1_network_pattern1'] #add vcenter senario1: zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid) vcenter_uuid = inv.uuid if vcenter_uuid == None: test_util.test_fail("vcenter_uuid is None") #insert the basic operations for the newly join in vcenter resourse vm_network_list = [] vm_network_names = res_ops.query_resource_fields(res_ops.L3_NETWORK, [], None, fields=['name']) for vm_network in vm_network_names: vm_network_list.append(vm_network.name) test_util.test_logger( ", ".join( [ str(vm_network_tmp) for vm_network_tmp in vm_network_list ] ) ) if vm_network_pattern1 not in vm_network_list: test_util.test_fail("newly joined vcenter missing vm network1, test failed") vct_ops.delete_vcenter(vcenter_uuid) test_util.test_pass("add && delete vcenter test passed.")
def test(): global vcenter_uuid vcenter1_name = os.environ['vcenter1_name'] vcenter1_domain_name = os.environ['vcenter1_ip'] vcenter1_username = os.environ['vcenter1_domain_name'] vcenter1_password = os.environ['vcenter1_password'] ova_template_pattern1 = os.environ['vcenter1_template_exist'] #add vcenter senario1: zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid) vcenter_uuid = inv.uuid if vcenter_uuid == None: test_util.test_fail("vcenter_uuid is None") #insert the basic operations for the newly join in vcenter resourse image_list = [] image_names = res_ops.query_resource_fields(res_ops.IMAGE, [], None, fields=['name']) for image_name in image_names: image_list.append(image_name.name) test_util.test_logger( ", ".join( [ str(image_name_tmp) for image_name_tmp in image_list ] ) ) if ova_template_pattern1 not in image_list: test_util.test_fail("newly joined vcenter missing fingerprint vm1, test failed") vct_ops.delete_vcenter(vcenter_uuid) test_util.test_pass("add && delete vcenter test passed.")
def test(): global test_obj_dict #volume_creation_option = test_util.VolumeOption() #test_util.test_dsc('Create volume and check') #disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) #volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) #volume = test_stub.create_volume(volume_creation_option) bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") volume_creation_option = test_util.VolumeOption() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume1 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume1) volume1.check() volume_uuid = volume1.volume.uuid test_util.test_dsc('Create vm and check') vm = test_stub.create_vr_vm('migrate_volume_vm', 'imageName_net', 'l3VlanNetwork2') test_obj_dict.add_vm(vm) vm.check() vm_uuid = vm.vm.uuid volume1.attach(vm) volume1.detach(vm_uuid) vm.stop() image_obj = volume1.create_template([bss[0].uuid]) vm.start() host_uuid = vm.vm.hostUuid ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) volume2 = image_obj.create_data_volume(ps.uuid, 'volumeName', host_uuid) test_obj_dict.add_volume(volume2) volume2.check() volume_uuid = volume2.volume.uuid ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') snapshots = test_obj_dict.get_volume_snapshot(volume_uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshots.check() snapshots.create_snapshot('create_snapshot2') snapshots.check() target_host = test_lib.lib_find_random_host_by_volume_uuid(volume_uuid) target_host_uuid = target_host.uuid vol_ops.migrate_volume(volume_uuid, target_host_uuid) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Cold migrate Data Volume from Template with Snapshot Test Success')
def test(): success_round = 0 for i in range(0,vm_max): test_util.test_logger("clear %s data" % billing_resource) test_stub.resource_price_clear(billing_resource) test_util.test_logger("=====SPENDING CHECK VM %s=====" % str(i+1)) bill_memory = test_stub.MemoryBilling() time_unit = random.choice(time_unit_dict) price = str(random.randint(0,9999)) resource_unit = random.choice(resource_unit_dict.keys()) bill_memory.set_timeUnit(time_unit) bill_memory.set_price(price) bill_memory.set_resourceUnit(resource_unit) test_util.test_logger("create memory billing\n price=%s, timeUnit=%s resourceUnit=%s" % (price, time_unit, resource_unit)) bill_memory.create_resource_type() test_util.test_logger("create vm instance") global vm vm = test_stub.create_vm_billing("test_vmm", test_stub.set_vm_resource()[0], None,\ test_stub.set_vm_resource()[1], test_stub.set_vm_resource()[2]) vm_memory_size_ratio = res_ops.query_resource_fields(res_ops.INSTANCE_OFFERING, \ res_ops.gen_query_conditions('uuid', '=',\ test_stub.set_vm_resource()[1]))[0].memorySize / resource_unit_dict[resource_unit] test_util.test_logger("====check vm spending====") for r in range(0,round_max): test_util.test_logger("===spending check round %s-%s===" % (str(i+1), str(r+1))) if test_stub.check(bill_memory, billing_resource, random.choice(offset_unit_dict), random.randint(0,3), vm_memory_size_ratio): success_round += 1 else: test_util.test_fail("check vm billing spending finished\n success: %s/%s" % (success_round, round_sum)) test_util.test_pass("check memory billing finished\n success: %s/%s" % (success_round, round_sum))
def test(): global session_to global session_mc global session_uuid session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid) session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid) session_uuid = acc_ops.login_as_admin() num = res_ops.query_resource_count(res_ops.VIP, [], session_uuid) if num <= thread_threshold: vips = res_ops.query_resource(res_ops.VIP, [], session_uuid) delete_vips(vips) else: start = 0 limit = thread_threshold - 1 curr_num = start vips = [] while curr_num < num: vips_temp = res_ops.query_resource_fields(res_ops.VIP, [], \ session_uuid, ['uuid'], start, limit) vips.extend(vips_temp) curr_num += limit start += limit delete_vips(vips) con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid) con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid) left_num = res_ops.query_resource_count(res_ops.VIP, [], session_uuid) acc_ops.logout(session_uuid) if left_num == 0: test_util.test_pass('Delete VIP Success. Delete %d VIPs.' % num) else: test_util.test_fail('Delete VIP Fail. %d VIPs are not deleted.' % left_num)
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE, "SharedBlock"] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) test_util.test_dsc('Create test vm and check') bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None) if not bss: test_util.test_skip("not find available backup storage. Skip test") test_lib.lib_set_delete_policy('vm', 'Delay') test_lib.lib_set_delete_policy('volume', 'Delay') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.attach(vm) #ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) #ps_uuid = ps.uuid #ps_ops.change_primary_storage_state(ps_uuid, 'disable') test_stub.disable_all_pss() if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to running when PS change to disable state') vm.set_state(vm_header.RUNNING) vm.check() volume.delete() volume.check() volume.expunge() volume.check() #ps_ops.change_primary_storage_state(ps_uuid, 'enable') test_stub.enable_all_pss() host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) vm.destroy() test_lib.lib_set_delete_policy('vm', 'Direct') test_lib.lib_set_delete_policy('volume', 'Direct') test_util.test_pass('Delete volume under PS disable mode Test Success')
def delete_all_volumes(thread_threshold=1000): session_uuid = acc_ops.login_as_admin() session_to = con_ops.change_global_config("identity", "session.timeout", "720000") session_mc = con_ops.change_global_config("identity", "session.maxConcurrent", "10000") delete_policy = test_lib.lib_set_delete_policy("volume", "Direct") expunge_time = test_lib.lib_set_expunge_time("volume", 1) cond = res_ops.gen_query_conditions("status", "!=", "Deleted") num = res_ops.query_resource_count(res_ops.VOLUME, cond) if num <= thread_threshold: volumes = res_ops.query_resource(res_ops.VOLUME, cond) do_delete_volumes(volumes, thread_threshold) else: start = 0 limit = thread_threshold - 1 curr_num = start volumes = [] while curr_num < num: volumes_temp = res_ops.query_resource_fields(res_ops.VOLUME, cond, None, ["uuid"], start, limit) volumes.extend(volumes_temp) curr_num += limit start += limit do_delete_volumes(volumes, thread_threshold) test_lib.lib_set_delete_policy("volume", delete_policy) test_lib.lib_set_expunge_time("volume", expunge_time) test_util.test_logger("Volumes destroy Success. Destroy %d Volumes." % num)
def test(): bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") image_option = test_util.ImageOption() image_option.set_name('test_150min_downloading_image') image_option.set_format('qcow2') image_option.set_mediaType('RootVolumeTemplate') image_option.set_url(os.environ.get('timeout150MinImageUrl')) image_option.set_backup_storage_uuid_list([bss[0].uuid]) new_image = zstack_image_header.ZstackTestImage() new_image.set_creation_option(image_option) time1 = time.time() new_image.add_root_volume_template() time2 = time.time() cost_time = time2 - time1 if cost_time < 7200: test_util.test_fail('The test image is added less than 2 hours: \ %s, which does not meet the test criterial.' % cost_time) new_image.delete() new_image.expunge([bss[0].uuid]) test_util.test_pass('Add Image with 150 mins Pass.')
def test(): img_option = test_util.ImageOption() UEFI_image_url = os.environ.get('imageUrl_linux_UEFI') image_name = os.environ.get('imageName_linux_UEFI') img_option.set_name(image_name) bs_uuid = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, [], None)[0].uuid img_option.set_backup_storage_uuid_list([bs_uuid]) img_option.set_format('qcow2') img_option.set_url(UEFI_image_url) img_option.set_system_tags("bootMode::UEFI") image_inv = img_ops.add_root_volume_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) image_uuid = test_lib.lib_get_image_by_name(image_name).uuid test_obj_dict.add_image(image) vm = test_stub.create_vm(image_name = os.environ.get('imageName_linux_UEFI')) test_obj_dict.add_vm(vm) vm.check() vm_ip = vm.get_vm().vmNics[0].ip retcode = subprocess.call(["ping", "-c","4",vm_ip]) if retcode != 0: test_util.test_fail('Create VM Test linux UEFI failed.') else: test_util.test_pass('Create VM Test linux UEFI Success.') vm.destroy() test_util.test_pass('Create VM Test linux UEFI Success')
def check(self): super(zstack_share_volume_attach_db_checker, self).check() volume = self.test_obj.volume try: sv_cond = res_ops.gen_query_conditions("volumeUuid", '=', volume.uuid) share_volume_vm_uuids = res_ops.query_resource_fields(res_ops.SHARE_VOLUME, sv_cond, None, fields=['vmInstanceUuid']) except Exception as e: traceback.print_exc(file=sys.stdout) test_util.test_logger('Check result: [volumeInventory uuid:] %s does not exist in database.' % self.test_obj.volume.uuid) return self.judge(False) if not share_volume_vm_uuids: #update self.test_obj, due to vm destroyed. if self.test_obj.target_vm.state == vm_header.DESTROYED or \ self.test_obj.target_vm.state == vm_header.EXPUNGED: test_util.test_warn('Update test [volume:] %s state, since attached VM was destroyed.' % volume.uuid) self.test_obj.update() else: test_util.test_warn('Check warn: [volume:] %s state is not aligned with DB. DB did not record any attached VM, but test volume has attached vm record: %s.' % (volume.uuid, volume.vmInstanceUuid)) test_util.test_logger('Check result: [volume:] %s does NOT have vmInstanceUuid in Database. It is not attached to any vm.' % volume.uuid) return self.judge(False) if not self.test_obj.target_vm: test_util.test_logger('Check result: test [volume:] %s does NOT have vmInstance record in test structure. Can not do furture checking.' % volume.uuid) return self.judge(False) vm = self.test_obj.target_vm.vm if vm.uuid not in share_volume_vm_uuids: test_util.test_logger('Check result: [volume:] %s is attached to [vm:] %s in zstack database.' % (volume.uuid, vm.uuid)) return self.judge(True) else: test_util.test_logger('Check result: [volume:] %s is NOT attached to [vm:] %s in zstack database.' % (volume.uuid, vm.uuid)) return self.judge(False)
def test(): global session_to global session_mc global session_uuid session_to = con_ops.change_global_config("identity", "session.timeout", "720000", session_uuid) session_mc = con_ops.change_global_config("identity", "session.maxConcurrent", "10000", session_uuid) session_uuid = acc_ops.login_as_admin() num = res_ops.query_resource_count(res_ops.SECURITY_GROUP, [], session_uuid) if num <= thread_threshold: sgs = res_ops.query_resource(res_ops.SECURITY_GROUP, [], session_uuid) delete_sgs(sgs) else: start = 0 limit = thread_threshold - 1 curr_num = start sgs = [] while curr_num < num: sgs_tmp = res_ops.query_resource_fields(res_ops.SECURITY_GROUP, [], session_uuid, ["uuid"], start, limit) sgs.extend(sgs_tmp) curr_num += limit start += limit delete_sgs(sgs) # con_ops.change_global_config('identity', 'session.timeout', session_to) # con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc) con_ops.change_global_config("identity", "session.timeout", session_to, session_uuid) con_ops.change_global_config("identity", "session.maxConcurrent", session_mc, session_uuid) left_num = res_ops.query_resource_count(res_ops.SECURITY_GROUP, [], session_uuid) acc_ops.logout(session_uuid) if left_num == 0: test_util.test_pass("Delete SG Success. Delete %d SGs." % num) else: test_util.test_fail("Delete SG Fail. %d SGs are not deleted." % left_num)
def delete_all_volumes(thread_threshold = 1000): session_uuid = acc_ops.login_as_admin() session_to = con_ops.change_global_config('identity', 'session.timeout', '720000') session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000') delete_policy = test_lib.lib_set_delete_policy('volume', 'Direct') expunge_time = test_lib.lib_set_expunge_time('volume', 1) cond = res_ops.gen_query_conditions('status', '!=', 'Deleted') num = res_ops.query_resource_count(res_ops.VOLUME, cond) if num <= thread_threshold: volumes = res_ops.query_resource(res_ops.VOLUME, cond) do_delete_volumes(volumes, thread_threshold) else: start = 0 limit = thread_threshold - 1 curr_num = start volumes = [] while curr_num < num: volumes_temp = res_ops.query_resource_fields(res_ops.VOLUME, \ cond, None, ['uuid'], start, limit) volumes.extend(volumes_temp) curr_num += limit start += limit do_delete_volumes(volumes, thread_threshold) test_lib.lib_set_delete_policy('volume', delete_policy) test_lib.lib_set_expunge_time('volume', expunge_time) test_util.test_logger('Volumes destroy Success. Destroy %d Volumes.' % num)
def test(): test_util.test_logger("start cpu billing") test_util.test_logger("create man cpu billing instantiation") bill_cpu = test_stub.CpuBilling() test_util.test_logger("loop 400 to create cpu billing") test_stub.create_option_billing(bill_cpu, count) test_util.test_logger("verify cpu billing instantiation if is right,and then delete all") test_stub.verify_option_billing(count) test_util.test_logger("create cpu billing instantiation") bill_cpu.set_timeUnit("s") bill_cpu.set_price("5") bill_cpu.create_resource_type() test_util.test_logger("create vm instance") global vm vm = test_stub.create_vm_billing("test_vmm", test_stub.set_vm_resource()[0], None,\ test_stub.set_vm_resource()[1], test_stub.set_vm_resource()[2]) cpuNum = res_ops.query_resource_fields(res_ops.INSTANCE_OFFERING, \ res_ops.gen_query_conditions('uuid', '=',\ test_stub.set_vm_resource()[1]))[0].cpuNum time.sleep(1) if bill_cpu.get_price_total().total < cpuNum * int(bill_cpu.get_price()): test_util.test_fail("calculate cpu cost fail,actual result is %s" %(bill_cpu.get_price_total().total)) vm.clean() bill_cpu.delete_resource() test_util.test_pass("check cpu billing pass")
def test(): global vcenter_uuid vcenter1_name = os.environ['vcenter1_name'] vcenter1_domain_name = os.environ['vcenter1_ip'] vcenter1_username = os.environ['vcenter1_domain_name'] vcenter1_password = os.environ['vcenter1_password'] zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid) vcenter_uuid = inv.uuid if vcenter_uuid == None: test_util.test_fail("vcenter_uuid is None") vcenter_backup_storage_cond = res_ops.gen_query_conditions("name", '=', vcenter_backup_storage_name) vcbs_inv = res_ops.query_resource_fields(res_ops.VCENTER_BACKUP_STORAGE, vcenter_backup_storage_cond, None, fields=['uuid'])[0] vcbs_uuid = vcbs_inv.uuid if not vcbs_uuid: test_util.test_fail("not found vcenter backup storage") vct_ops.delete_vcenter(vcenter_uuid) test_util.test_pass("add && delete vcenter test passed.")
def test(): img_option = test_util.ImageOption() ipv6_image_url = os.environ.get('ipv6ImageUrl') image_name = os.environ.get('ipv6ImageName') img_option.set_name(image_name) bs_uuid = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, [], None)[0].uuid img_option.set_backup_storage_uuid_list([bs_uuid]) img_option.set_format('qcow2') img_option.set_url(ipv6_image_url) image_inv = img_ops.add_root_volume_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) vm1 = test_stub.create_vm(l3_name = "%s,%s" %(os.environ.get('l3PublicNetworkName1'), os.environ.get('l3PublicNetworkName')), vm_name = 'IPv6 2 stack test ipv4 and ipv6', image_name = image_name) vm2 = test_stub.create_vm(l3_name = os.environ.get('l3PublicNetworkName1'), vm_name = 'IPv6 2 stack test ipv6', image_name = image_name) time.sleep(90) #waiting for vm bootup vm1_nic1 = vm1.get_vm().vmNics[0].ip vm1_nic2 = vm1.get_vm().vmNics[1].ip vm2_nic1 = vm2.get_vm().vmNics[0].ip for ip in (vm1_nic1, vm1_nic2): if "." in ip: ipv4 = ip print "vm1_nic1 : %s, vm1_nic2: %s, vm2_nic1 :%s, ipv4 :%s." %(vm1_nic1, vm1_nic2, vm2_nic1,ipv4) cmd = "ping6 -c 4 %s" %(vm2_nic1) (retcode, output, erroutput) = ssh.execute(cmd, ipv4, "root", "password", True, 22) print "retcode is: %s; output is : %s.; erroutput is: %s" %(retcode, output , erroutput) if retcode != 0: test_util.test_fail('Test Create IPv6 VM Failed.')
def check_add_image_progress(index): image_cond = res_ops.gen_query_conditions("status", '=', "Downloading") image_cond = res_ops.gen_query_conditions("name", '=', 'test_add_image_progress%s' % (index), image_cond) image = res_ops.query_resource_fields(res_ops.IMAGE, image_cond, \ None, fields=['uuid']) if len(image) <= 0: test_util.test_fail("image is not in creating after 10 seconds") exit() if image[0].status == "Ready": test_util.test_logger("image has been added") exit() for i in range(0, 600): progresses = res_ops.get_progress(image_jobs[index]) if len(progresses) <= 0: time.sleep(0.1) continue progress = progresses[0] if progress.content != None: break else: test_util.test_logger('task progress still not ready') time.sleep(0.1) if int(progress.content) < 0 or int(progress.content) > 100: test_util.test_fail("Progress of task should be between 0 and 100, while it actually is %s" % (progress.content)) for i in range(0, 3600): time.sleep(1) test_util.test_logger(i) last_progress = progress progresses = res_ops.get_progress(image_jobs[index]) if len(progresses) <= 0: break progress = progresses[0] test_util.test_logger(progress.content) if progress.content == None: break if int(progress.content) < int(last_progress.content): test_util.test_fail("Progress (%s) of task is smaller than last time (%s)" % (progress.content, last_progress.content)) image_cond = res_ops.gen_query_conditions("uuid", '=', image[0].uuid) image_query2 = res_ops.query_resource_fields(res_ops.IMAGE, image_cond) time.sleep(1) if image_query2[0].status != "Ready": test_util.test_fail("Image should be ready when no progress anymore") checker_results[index] = 'pass'
def test(): global session_uuid global session_to global session_mc vm_num = os.environ.get('ZSTACK_TEST_NUM') if not vm_num: vm_num = 0 else: vm_num = int(vm_num) test_util.test_logger('ZSTACK_THREAD_THRESHOLD is %d' % thread_threshold) test_util.test_logger('ZSTACK_TEST_NUM is %d' % vm_num) org_num = vm_num conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid l3_name = os.environ.get('l3PublicNetworkName') conditions = res_ops.gen_query_conditions('name', '!=', l3_name) l3s = res_ops.query_resource_fields(res_ops.L3_NETWORK, conditions, \ session_uuid, ['uuid'], start = 0, limit = org_num) session_uuid = acc_ops.login_as_admin() #change account session timeout. session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid) session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid) random_name = random.random() vm_name = 'multihost_basic_vm_%s' % str(random_name) for vm_n in range(org_num): vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid vm_creation_option.set_l3_uuids([l3s[vm_n].uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_session_uuid(session_uuid) vm = test_vm_header.ZstackTestVm() vm_creation_option.set_name(vm_name) check_thread_exception() vm.set_creation_option(vm_creation_option) thread = threading.Thread(target=create_vm, args=(vm,)) while threading.active_count() > thread_threshold: time.sleep(1) thread.start() while threading.active_count() > 1: time.sleep(0.01) cond = res_ops.gen_query_conditions('name', '=', vm_name) vms = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid) con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid) con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid) acc_ops.logout(session_uuid) if vms == org_num: test_util.test_pass('Create %d VMs Test Success' % org_num) else: test_util.test_fail('Create %d VMs Test Failed. Only find %d VMs.' % (org_num, vms))
def skip_if_vr_not_vyos(vr_image_name): cond = res_ops.gen_query_conditions('name', '=', vr_image_name) vr_urls_list = res_ops.query_resource_fields(res_ops.IMAGE, cond, None, ['url']) for vr_url in vr_urls_list: if "vrouter" in vr_url.url: test_util.test_logger("find vrouter image. Therefore, no need to skip") break else: test_util.test_skip("not found vrouter image based on image name judgement. Therefore, skip test")
def test(): img_option = test_util.ImageOption() image_name = 'userdata-image' image_url = os.environ.get('userdataImageUrl') img_option.set_name(image_name) bs_uuid = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, [], None)[0].uuid img_option.set_backup_storage_uuid_list([bs_uuid]) img_option.set_format('raw') img_option.set_url(image_url) image_inv = img_ops.add_root_volume_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) l3_name = os.environ.get('l3VlanNetworkName5') l3_net = test_lib.lib_get_l3_by_name(l3_name) l3_net_uuid = l3_net.uuid if 'DHCP' not in test_lib.lib_get_l3_service_type(l3_net_uuid): test_util.test_skip('Only DHCP support userdata') for ns in l3_net.networkServices: if ns.networkServiceType == 'DHCP': sp_uuid = ns.networkServiceProviderUuid sp = test_lib.lib_get_network_service_provider_by_uuid(sp_uuid) if sp.type != 'Flat': test_util.test_skip('Only Flat DHCP support userdata') vm = test_stub.create_vm(l3_uuid_list = [l3_net_uuid], vm_name = 'userdata-vm',image_uuid = image.get_image().uuid,system_tags = ["userdata::%s" % os.environ.get('userdata_systemTags')]) test_obj_dict.add_vm(vm) time.sleep(60) try: vm.check() except: test_util.test_logger("expected failure to connect VM") vm_ip = vm.get_vm().vmNics[0].ip ssh_cmd = 'ssh -i %s -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null test@%s' % (os.environ.get('sshkeyPriKey_file'), vm_ip) cmd = '%s cat /tmp/helloworld_config' % ssh_cmd process_result = test_stub.execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_util.test_fail("fail to cat /tmp/helloworld_config") cmd = '%s find /tmp/temp' % ssh_cmd process_result = test_stub.execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_util.test_fail("fail to find /tmp/temp") vm.destroy() test_obj_dict.rm_vm(vm) image.delete() if test_lib.lib_get_image_delete_policy() != 'Direct': image.expunge() test_obj_dict.rm_image(image) test_util.test_pass('Create VM with userdata Success')
def test(): global image1 global test_obj_dict #run condition allow_bs_list = [inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE, inventory.CEPH_BACKUP_STORAGE_TYPE] test_lib.skip_test_when_bs_type_not_in_list(allow_bs_list) hosts = res_ops.query_resource(res_ops.HOST) if len(hosts) <= 1: test_util.test_skip("skip for host_num is not satisfy condition host_num>1") bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, None, fields=['uuid']) image_name1 = 'image1_a' image_option = test_util.ImageOption() image_option.set_format('qcow2') image_option.set_name(image_name1) #image_option.set_system_tags('qemuga') image_option.set_mediaType('RootVolumeTemplate') image_option.set_url(os.environ.get('imageUrl_s')) image_option.set_backup_storage_uuid_list([bss[0].uuid]) image_option.set_timeout(3600*1000) image1 = zstack_image_header.ZstackTestImage() image1.set_creation_option(image_option) image1.add_root_volume_template() image1.check() #export image if bss[0].type in [inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE]: image1.export() image_name = os.environ.get('imageName_net') l3_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vm('test-vm', image_name, l3_name) test_obj_dict.add_vm(vm) # clone vm cloned_vm_name = ['cloned_vm'] cloned_vm_obj = vm.clone(cloned_vm_name)[0] test_obj_dict.add_vm(cloned_vm_obj) # delete image image1.delete() # vm ops test test_stub.vm_ops_test(cloned_vm_obj, "VM_TEST_STATE") # expunge image image1.expunge() # vm ops test test_stub.vm_ops_test(cloned_vm_obj, "VM_TEST_RESIZE_RVOL") test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Cloned VM ops for BS Success')
def test(): bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") image_option = test_util.ImageOption() image_option.set_name('test_image_cache_cleanup') image_option.set_format('qcow2') image_option.set_mediaType('RootVolumeTemplate') image_option.set_url(os.environ.get('imageUrl_s')) image_option.set_backup_storage_uuid_list([bss[0].uuid]) new_image = zstack_image_header.ZstackTestImage() new_image.set_creation_option(image_option) new_image.add_root_volume_template() l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid vm = test_stub.create_vm([l3_net_uuid], new_image.image.uuid, 'imagecache_vm', \ default_l3_uuid = l3_net_uuid) test_obj_dict.add_vm(vm) vm.check() host = test_lib.lib_find_host_by_vm(vm.get_vm()) ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) vm.destroy() if test_lib.lib_get_vm_delete_policy() != 'Direct': vm.expunge() new_image.delete() if test_lib.lib_get_image_delete_policy() != 'Direct': new_image.expunge() if ps.type == 'SharedMountPoint': test_util.test_skip('CleanUpImageCacheOnPrimaryStorage not supported on SMP storage, skip test.') elif ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE: test_util.test_skip('ceph is not directly using image cache, skip test.') ps_ops.cleanup_imagecache_on_primary_storage(ps.uuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: image_cache_path = "%s/imagecache/template/%s/%s.qcow2" % (ps.mountPath, new_image.image.uuid, new_image.image.uuid) if test_lib.lib_check_file_exist(host, image_cache_path): test_util.test_fail('image cache is expected to be deleted') elif ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE: image_cache_path = "%s/imagecache/template/%s/%s.qcow2" % (ps.mountPath, new_image.image.uuid, new_image.image.uuid) if test_lib.lib_check_file_exist(host, image_cache_path): test_util.test_fail('image cache is expected to be deleted') # elif ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE: # elif ps.type == 'SharedMountPoint': test_util.test_pass('imagecache cleanup Pass.')
def test(): global ssh_timeout data_volume_size = 10737418240 disk_offering_option = test_util.DiskOfferingOption() disk_offering_option.set_name('root-disk-iso') disk_offering_option.set_diskSize(data_volume_size) data_volume_offering = vol_ops.create_volume_offering(disk_offering_option) test_obj_dict.add_disk_offering(data_volume_offering) cpuNum = 1 memorySize = 536870912 name = 'vm-offering-iso' new_offering_option = test_util.InstanceOfferingOption() new_offering_option.set_cpuNum(cpuNum) new_offering_option.set_memorySize(memorySize) new_offering_option.set_name(name) new_offering = vm_ops.create_instance_offering(new_offering_option) test_obj_dict.add_instance_offering(new_offering) img_option = test_util.ImageOption() img_option.set_name('image-iso') bs_uuid = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, [], None)[0].uuid img_option.set_backup_storage_uuid_list([bs_uuid]) img_option.set_url( os.environ.get('imageServer') + '/iso/iso_for_install_vm_test.iso') image_inv = img_ops.add_iso_template(img_option) image_uuid = image_inv.uuid image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid root_disk_uuid = data_volume_offering.uuid vm = test_stub.create_vm_with_iso([l3_net_uuid], image_uuid, 'vm-iso', root_disk_uuid, new_offering.uuid) host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp test_obj_dict.add_vm(vm) test_util.test_dsc('wait for iso installation') vm_inv = vm.get_vm() vm_ip = vm_inv.vmNics[0].ip #test_lib.lib_wait_target_up(vm_ip, '22', 2400) #vm.check() #cmd ='[ -e /root ] && echo yes || echo no' cmd = '[ -e /root ]' #ssh_num = 0 #ssh_ok = 0 #while ssh_num <= 5 and ssh_ok == 0 : # rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) # if rsp == False: # time.sleep(30) # else: # ssh_ok = 1 # break # ssh_num = ssh_num + 1 #if ssh_ok == 0: # test_util.test_fail('fail to ssh to VM') ssh_timeout = test_lib.SSH_TIMEOUT test_lib.SSH_TIMEOUT = 3600 test_lib.lib_set_vm_host_l2_ip(vm_inv) if not test_lib.lib_ssh_vm_cmd_by_agent_with_retry(host_ip, vm_ip, 'root', 'password', cmd): test_lib.SSH_TIMEOUT = ssh_timeout test_util.test_fail("iso has not been failed to installed.") test_lib.SSH_TIMEOUT = ssh_timeout vm.destroy() test_obj_dict.rm_vm(vm) image.delete() test_obj_dict.rm_image(image) vol_ops.delete_disk_offering(root_disk_uuid) test_obj_dict.rm_disk_offering(data_volume_offering) vm_ops.delete_instance_offering(new_offering.uuid) test_obj_dict.rm_instance_offering(new_offering) test_util.test_pass('Create VM with ISO Installation Test Success')
def test(): global image1 global test_obj_dict #run condition allow_bs_list = [ inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE, inventory.CEPH_BACKUP_STORAGE_TYPE ] test_lib.skip_test_when_bs_type_not_in_list(allow_bs_list) hosts = res_ops.query_resource(res_ops.HOST) if len(hosts) <= 1: test_util.test_skip( "skip for host_num is not satisfy condition host_num>1") bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, None, fields=['uuid']) image_name1 = 'image1_a' image_option = test_util.ImageOption() image_option.set_format('qcow2') image_option.set_name(image_name1) #image_option.set_system_tags('qemuga') image_option.set_mediaType('RootVolumeTemplate') image_option.set_url(os.environ.get('imageUrl_s')) image_option.set_backup_storage_uuid_list([bss[0].uuid]) image_option.set_timeout(3600 * 1000) image1 = zstack_image_header.ZstackTestImage() image1.set_creation_option(image_option) image1.add_root_volume_template() image1.check() #export image if bss[0].type in [inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE]: image1.export() image_name = os.environ.get('imageName_net') l3_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vm('test-vm', image_name, l3_name) test_obj_dict.add_vm(vm) # clone vm cloned_vm_name = ['cloned_vm'] cloned_vm_obj = vm.clone(cloned_vm_name)[0] test_obj_dict.add_vm(cloned_vm_obj) # delete image image1.delete() # vm ops test test_stub.vm_ops_test(cloned_vm_obj, "VM_TEST_STATE") # expunge image image1.expunge() # vm ops test test_stub.vm_ops_test(cloned_vm_obj, "VM_TEST_RESIZE_RVOL") test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Cloned VM ops for BS Success')
def test(): global image global test_obj_dict allow_bs_list = [ inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE, inventory.CEPH_BACKUP_STORAGE_TYPE ] test_lib.skip_test_when_bs_type_not_in_list(allow_bs_list) allow_ps_list = [ inventory.CEPH_PRIMARY_STORAGE_TYPE, inventory.NFS_PRIMARY_STORAGE_TYPE, 'SharedMountPoint' ] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) #run condition hosts = res_ops.query_resource(res_ops.HOST) if len(hosts) <= 1: test_util.test_skip( "skip for host_num is not satisfy condition host_num>1") bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, None, fields=['uuid']) #create disk offering data_volume_size = 10737418240 disk_offering_option = test_util.DiskOfferingOption() disk_offering_option.set_name('root-disk-iso') disk_offering_option.set_diskSize(data_volume_size) data_volume_offering = vol_ops.create_volume_offering(disk_offering_option) test_obj_dict.add_disk_offering(data_volume_offering) #create instance offering cpuNum = 2 memorySize = 1024 * 1024 * 1024 name = 'iso-vm-offering' new_offering_option = test_util.InstanceOfferingOption() new_offering_option.set_cpuNum(cpuNum) new_offering_option.set_memorySize(memorySize) new_offering_option.set_name(name) new_offering = vm_ops.create_instance_offering(new_offering_option) test_obj_dict.add_instance_offering(new_offering) #add iso img_option = test_util.ImageOption() img_option.set_name('iso1') bs_uuid = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, [], None)[0].uuid img_option.set_backup_storage_uuid_list([bs_uuid]) img_option.set_url(os.environ.get('isoForVmUrl')) image_inv = img_ops.add_iso_template(img_option) image_uuid = image_inv.uuid image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) #create vm by iso l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid root_disk_uuid = data_volume_offering.uuid vm = test_stub.create_vm_with_iso([l3_net_uuid], image_uuid, 'iso-vm', root_disk_uuid, new_offering.uuid) host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp test_obj_dict.add_vm(vm) #check vm vm_inv = vm.get_vm() vm_ip = vm_inv.vmNics[0].ip #cmd ='[ -e /root ]' #ssh_timeout = test_lib.SSH_TIMEOUT #test_lib.SSH_TIMEOUT = 3600 test_lib.lib_set_vm_host_l2_ip(vm_inv) test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, 22, 1800) #if not test_lib.lib_ssh_vm_cmd_by_agent_with_retry(host_ip, vm_ip, 'root', 'password', cmd): # test_lib.SSH_TIMEOUT = ssh_timeout # test_util.test_fail("iso has been failed to installed.") #test_lib.SSH_TIMEOUT = ssh_timeout #clone vm cloned_vm_name = ['cloned_vm_name'] cloned_vm_obj = vm.clone(cloned_vm_name)[0] test_obj_dict.add_vm(cloned_vm_obj) #delete iso image.delete() test_obj_dict.rm_image(image) #vm ops test test_stub.vm_ops_test(cloned_vm_obj, "VM_TEST_ALL") #expunge iso image.expunge() #detach iso img_ops.detach_iso(vm.vm.uuid) #vm ops test test_stub.vm_ops_test(cloned_vm_obj, "VM_TEST_ALL") #create image by vm root volume cloned_vm_img_name = "cloned_vm_image1" img_option2 = test_util.ImageOption() img_option2.set_backup_storage_uuid_list([bs_uuid]) img_option2.set_root_volume_uuid(cloned_vm_obj.vm.rootVolumeUuid) img_option2.set_name(cloned_vm_img_name) image1 = test_image.ZstackTestImage() image1.set_creation_option(img_option2) image1.create() image1.check() test_obj_dict.add_image(image1) bs_list = res_ops.query_resource(res_ops.BACKUP_STORAGE) if inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE == bs_list[0] and len( bs_list) == 1: #export image image1.export() #create vm vm2 = test_stub.create_vm('image-vm', cloned_vm_img_name, l3_name) #delete image image1.delete() test_obj_dict.rm_image(image1) #vm ops test test_stub.vm_ops_test(vm2, "VM_TEST_ALL") #expunge image image1.expunge() #vm ops test test_stub.vm_ops_test(vm2, "VM_TEST_ALL") vm.destroy() vm2.destroy() cloned_vm_obj.destroy() vol_ops.delete_disk_offering(root_disk_uuid) vm_ops.delete_instance_offering(new_offering.uuid) test_obj_dict.rm_vm(vm) test_obj_dict.rm_disk_offering(data_volume_offering) test_obj_dict.rm_instance_offering(new_offering) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Create VM Image in Image Store Success')
def test(): global image global test_obj_dict # run condition hosts = res_ops.query_resource(res_ops.HOST) if len(hosts) <= 1: test_util.test_skip( "skip for host_num is not satisfy condition host_num>1") bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, None, fields=['uuid']) # add iso and create vm from iso iso = test_stub.add_test_minimal_iso('minimal_iso') test_obj_dict.add_image(iso) root_volume_offering = test_stub.add_test_root_volume_offering( 'root-disk-iso', 10737418240) test_obj_dict.add_disk_offering(root_volume_offering) vm_offering = test_stub.add_test_vm_offering(2, 1024 * 1024 * 1024, 'iso-vm-offering') test_obj_dict.add_instance_offering(vm_offering) vm = test_stub.create_vm_with_iso_for_test(vm_offering.uuid, iso.image.uuid, root_volume_offering.uuid, 'iso-vm') test_obj_dict.add_vm(vm) # check vm vm_inv = vm.get_vm() test_lib.lib_set_vm_host_l2_ip(vm_inv) test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, 22, 1800) #create image by vm root volume created_vm_img_name = "created_vm_image1" img_option = test_util.ImageOption() img_option.set_backup_storage_uuid_list([bss[0].uuid]) img_option.set_root_volume_uuid(vm.vm.rootVolumeUuid) img_option.set_name(created_vm_img_name) image = test_image.ZstackTestImage() image.set_creation_option(img_option) image.create() test_obj_dict.add_image(image) #export image if bss[0].type in [inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE]: image.export() #create vm l3_name = os.environ.get('l3VlanNetworkName1') vm2 = test_stub.create_vm('image-vm', created_vm_img_name, l3_name) #del expunge and detach iso iso.delete() iso.expunge() img_ops.detach_iso(vm.vm.uuid) # vm ops test test_stub.vm_ops_test(vm2, "VM_TEST_STATE") # del and expunge image2 image.delete() # vm ops test test_stub.vm_ops_test(vm2, "VM_TEST_RESIZE_DVOL") image.expunge() # vm ops test test_stub.vm_ops_test(vm2, "VM_TEST_CHANGE_OS") test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Cloned VM ops for BS Success')
def test(): global vm bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") image_option = test_util.ImageOption() image_option.set_name('test_image_cache_cleanup') image_option.set_format('qcow2') image_option.set_mediaType('RootVolumeTemplate') image_option.set_url(os.environ.get('imageUrl_s')) image_option.set_backup_storage_uuid_list([bss[0].uuid]) new_image = zstack_image_header.ZstackTestImage() new_image.set_creation_option(image_option) new_image.add_root_volume_template() l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option = test_util.VmOption() vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(new_image.image.uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('test_image_cache_cleanup_migration_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() if not test_lib.lib_check_vm_live_migration_cap(vm.vm): test_util.test_skip('skip migrate if live migrate not supported') ps = test_lib.lib_get_primary_storage_by_uuid( vm.get_vm().allVolumes[0].primaryStorageUuid) test_obj_dict.add_vm(vm) vm.check() if ps.type == 'SharedMountPoint': test_util.test_skip( 'CleanUpImageCacheOnPrimaryStorage not supported on SMP storage, skip test.' ) elif ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE: test_util.test_skip( 'ceph is not directly using image cache, skip test.') host = test_lib.lib_find_host_by_vm(vm.get_vm()) test_stub.migrate_vm_to_random_host(vm) vm.check() test_stub.migrate_vm_to_random_host(vm) vm.check() new_image.delete() new_image.expunge() vm.destroy() vm.expunge() ps_ops.cleanup_imagecache_on_primary_storage(ps.uuid) if ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE: image_cache_path = "%s/imagecache/template/%s" % (ps.mountPath, new_image.image.uuid) if test_lib.lib_check_file_exist(host, image_cache_path): test_util.test_fail('image cache is expected to be deleted') # elif ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE: # elif ps.type == 'SharedMountPoint': test_util.test_pass('Migrate VM Test Success')
def test(): global role_uuid, project_uuid, project_02_uuid, project_template_01_uuid, project_template_02_uuid, \ company_uuid_01, company_uuid_02, department_01_uuid, department_02_uuid, virtual_id_group_uuid, \ virtual_id_uuid # 1 create role and add/remove policy statements = [{"effect": "Allow", "actions": ["org.zstack.header.vm.**"]}] role_uuid = iam2_ops.create_role('test_role', statements).uuid action = "org.zstack.header.image.**" statements = [{"effect": "Allow", "actions": [action]}] iam2_ops.add_policy_statements_to_role(role_uuid, statements) statement_uuid = iam2_ops.get_policy_statement_uuid_of_role( role_uuid, action) # statement_uuid= res_ops.get_resource(res_ops.ROLE, uuid=role_uuid)[0].statements[0].uuid iam2_ops.remove_policy_statements_from_role(role_uuid, [statement_uuid]) # 2 create 1001 projects and add/remove attributes to/from it for i in range(0, 1001): project_name = 'test_project-' + str(i) project_uuid = iam2_ops.create_iam2_project(project_name).uuid #iam2_ops.create_iam2_project(project_name) # TODO:there is nothing to do with the below api in the first version of iam2 # iam2_ops.add_attributes_to_iam2_project(project_uuid,attributes='') # iam2_ops.remove_attributes_from_iam2_project(project_uuid,attributes='') # 3 create project template from project #project_template_01_uuid = iam2_ops.create_iam2_project_template_from_project('project_template', project_uuid, # 'this is a template ' # 'description').uuid #project_template_inv = res_ops.get_resource(res_ops.IAM2_PROJECT_TEMPLATE, uuid=project_template_01_uuid) #if not project_template_inv: # test_util.test_fail("create template from project fail") # 4 create project template and then create project from template #project_template_02_uuid = iam2_ops.create_iam2_project_template('project_template_02').uuid #project_02_uuid = iam2_ops.create_iam2_project_from_template('project_02', project_template_02_uuid).uuid #project_inv = res_ops.get_resource(res_ops.IAM2_PROJECT, uuid=project_02_uuid) #if not project_inv: # test_util.test_fail("create project from template fail") # 5 create Company and Department (organization) company_uuid_01 = iam2_ops.create_iam2_organization( 'test_company_01', 'Company').uuid company_uuid_02 = iam2_ops.create_iam2_organization( 'test_company_02', 'Company').uuid department_01_uuid = iam2_ops.create_iam2_organization( 'test_department_01', 'Department', parent_uuid=company_uuid_01).uuid department_02_uuid = iam2_ops.create_iam2_organization( 'test_department_02', 'Department').uuid # 6 organization change parent iam2_ops.change_iam2_organization_parent(company_uuid_02, [department_02_uuid]) iam2_ops.change_iam2_organization_parent(company_uuid_02, [department_01_uuid]) department_inv = res_ops.get_resource(res_ops.IAM2_ORGANIZATION, uuid=department_01_uuid)[0] if department_inv.parentUuid != company_uuid_02: test_util.test_fail('change organization parent fail') department_inv = res_ops.get_resource(res_ops.IAM2_ORGANIZATION, uuid=department_02_uuid)[0] if department_inv.parentUuid != company_uuid_02: test_util.test_fail('change organization parent fail') # 7 create virtual id group and add/remove role and attributes to/from it #virtual_id_group_uuid = iam2_ops.create_iam2_virtual_id_group(project_uuid, 'test_virtual_id_group').uuid #iam2_ops.add_roles_to_iam2_virtual_id_group([role_uuid], virtual_id_group_uuid) #iam2_ops.remove_roles_from_iam2_virtual_idgroup([role_uuid], virtual_id_group_uuid) # TODO:there is nothing to do with the below api in the first version of iam2 # iam2_ops.add_attributes_to_iam2_virtual_id_group() # iam2_ops.remove_attributes_from_iam2_virtual_id_group() # 8 create 10001 virtual ids for i in range(0, 10001): (name, email, phone) = user_info_generator.generate_user_info() print name print email print phone virtual_id_uuid = iam2_ops.create_iam2_virtual_id( 'user-' + str(i), 'b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86' ).uuid iam2_ops.add_roles_to_iam2_virtual_id([role_uuid], virtual_id_uuid) #iam2_ops.remove_roles_from_iam2_virtual_id([role_uuid], virtual_id_uuid) attributes = [{ "name": "fullname", "value": name }, { "name": "phone", "value": phone }, { "name": "mail", "value": email }, { "name": "identifier", "value": str(i + 10000) }] iam2_ops.add_attributes_to_iam2_virtual_id(virtual_id_uuid, attributes) #cond = res_ops.gen_query_conditions('virtualIDUuid', '=', virtual_id_uuid) if i < 21: attributes = [{"name": "__PlatformAdmin__"}] iam2_ops.add_attributes_to_iam2_virtual_id(virtual_id_uuid, attributes) #cond_01 = res_ops.gen_query_conditions('name', '=', "__PlatformAdmin__", cond) #attribute_uuid = res_ops.query_resource_fields(res_ops.IAM2_VIRTUAL_ID_ATTRIBUTE, cond_01)[0].uuid #iam2_ops.remove_attributes_from_iam2_virtual_id(virtual_id_uuid, [attribute_uuid]) attributes = [{"name": "__ProjectAdmin__", "value": project_uuid}] iam2_ops.add_attributes_to_iam2_virtual_id(virtual_id_uuid, attributes) cond_02 = res_ops.gen_query_conditions('name', '=', "__ProjectAdmin__", cond) attribute_uuid = res_ops.query_resource_fields( res_ops.IAM2_VIRTUAL_ID_ATTRIBUTE, cond_02)[0].uuid iam2_ops.remove_attributes_from_iam2_virtual_id(virtual_id_uuid, [attribute_uuid]) # admin can't create Project operator # attributes = [{"name": "__ProjectOperator__", "value": project_uuid}] # iam2_ops.add_attributes_to_iam2_virtual_id(virtual_id_uuid, attributes) # iam2_ops.remove_attributes_from_iam2_virtual_id(virtual_id_uuid, attributes) # 9 add virtual id to organization and set it as OrganizationSupervisor iam2_ops.add_iam2_virtual_ids_to_organization([virtual_id_uuid], department_01_uuid) attributes = [{ "name": "__OrganizationSupervisor__", "value": virtual_id_uuid }] iam2_ops.add_attributes_to_iam2_organization(department_01_uuid, attributes) cond_03 = res_ops.gen_query_conditions('name', '=', "__OrganizationSupervisor__") cond_03 = res_ops.gen_query_conditions('value', '=', virtual_id_uuid, cond_03) attribute_uuid = res_ops.query_resource( res_ops.IAM2_ORGANIZATION_ATTRIBUTE, cond_03)[0].uuid iam2_ops.remove_attributes_from_iam2_organization(department_01_uuid, [attribute_uuid]) iam2_ops.remove_iam2_virtual_ids_from_organization([virtual_id_uuid], department_01_uuid) # 10 add virtual id to group and project iam2_ops.add_iam2_virtual_ids_to_project([virtual_id_uuid], project_uuid) iam2_ops.add_iam2_virtual_ids_to_group([virtual_id_uuid], virtual_id_group_uuid) iam2_ops.remove_iam2_virtual_ids_from_group([virtual_id_uuid], virtual_id_group_uuid) iam2_ops.remove_iam2_virtual_ids_from_project([virtual_id_uuid], project_uuid) # 11 change state disable = 'disable' enable = 'enable' Disabled = 'Disabled' iam2_ops.change_iam2_organization_state(company_uuid_01, disable) res_inv = res_ops.get_resource(res_ops.IAM2_ORGANIZATION, uuid=company_uuid_01)[0] if res_inv.state != Disabled: test_util.test_fail("test change iam2 organization state fail") iam2_ops.change_iam2_organization_state(company_uuid_01, enable) iam2_ops.change_iam2_organization_state(department_01_uuid, disable) iam2_ops.change_iam2_organization_state(department_01_uuid, enable) iam2_ops.change_iam2_project_state(project_uuid, disable) res_inv = res_ops.get_resource(res_ops.IAM2_PROJECT, uuid=project_uuid)[0] if res_inv.state != Disabled: test_util.test_fail("test change iam2 project state fail") iam2_ops.change_iam2_project_state(project_uuid, enable) iam2_ops.change_iam2_virtual_id_state(virtual_id_uuid, disable) res_inv = res_ops.get_resource(res_ops.IAM2_VIRTUAL_ID, uuid=virtual_id_uuid)[0] if res_inv.state != Disabled: test_util.test_fail("test change iam2 virtual id state fail") iam2_ops.change_iam2_virtual_id_state(virtual_id_uuid, enable) iam2_ops.change_iam2_virtual_id_group_state(virtual_id_group_uuid, disable) res_inv = res_ops.get_resource(res_ops.IAM2_VIRTUAL_ID_GROUP, uuid=virtual_id_group_uuid)[0] if res_inv.state != Disabled: test_util.test_fail("test change iam2 virtual id group state fail") iam2_ops.change_iam2_virtual_id_group_state(virtual_id_group_uuid, enable) iam2_ops.change_role_state(role_uuid, disable) res_inv = res_ops.get_resource(res_ops.ROLE, uuid=role_uuid)[0] if res_inv.state != Disabled: test_util.test_fail("test change iam2 role state fail") iam2_ops.change_role_state(role_uuid, enable) # 12 update virtual_id_new_name = 'virtual_id_new_name' virtual_id_new_des = 'virtual_id_new_des' virtual_id_new_password = '******' iam2_ops.update_iam2_virtual_id(virtual_id_uuid, virtual_id_new_name, virtual_id_new_des, virtual_id_new_password) virtual_id_inv = res_ops.get_resource(res_ops.IAM2_VIRTUAL_ID, uuid=virtual_id_uuid)[0] if virtual_id_inv.name != virtual_id_new_name: test_util.test_fail("update iam2 virtual id name fail") try: iam2_ops.login_iam2_virtual_id('username', 'password') except: test_util.test_logger("the old username and password can't login") try: virtual_id_session_uuid = iam2_ops.login_iam2_virtual_id( virtual_id_new_name, virtual_id_new_password) acc_ops.logout(virtual_id_session_uuid) except: test_util.test_fail("update iam2 virtual id name or password fail.") virtual_id_group_new_name = 'virtual_id_group_new_name' virtual_id_group_new_des = 'virtual_id_group_new_des' iam2_ops.update_iam2_virtual_id_group(virtual_id_group_uuid, virtual_id_group_new_name, virtual_id_group_new_des) virtual_id_group_inv = res_ops.get_resource(res_ops.IAM2_VIRTUAL_ID_GROUP, uuid=virtual_id_group_uuid)[0] if virtual_id_group_inv.name != virtual_id_group_new_name: test_util.test_fail("update iam2 virtual id group name fail") project_new_name = 'project_new_name' project_new_dsc = 'project_new_dsc' iam2_ops.update_iam2_project(project_uuid, project_new_name, project_new_dsc) project_inv = res_ops.get_resource(res_ops.IAM2_PROJECT, uuid=project_uuid)[0] if project_inv.name != project_new_name or project_inv.description != project_new_dsc: test_util.test_fail("update project information fail") company_new_name = 'company_new_name' company_new_dsc = 'company_new_dsc' iam2_ops.update_iam2_organization(company_uuid_02, company_new_name, company_new_dsc) organization_inv = res_ops.get_resource(res_ops.IAM2_ORGANIZATION, uuid=company_uuid_02)[0] if organization_inv.name != company_new_name or organization_inv.description != company_new_dsc: test_util.test_fail("update organization name fail") # 13 delete iam2_ops.delete_iam2_organization(company_uuid_01) iam2_ops.delete_iam2_organization(company_uuid_02) iam2_ops.delete_iam2_organization(department_01_uuid) iam2_ops.delete_iam2_organization(department_02_uuid) iam2_ops.delete_iam2_virtual_id_group(virtual_id_group_uuid) iam2_ops.delete_iam2_project(project_uuid) iam2_ops.delete_iam2_project(project_02_uuid) iam2_ops.expunge_iam2_project(project_uuid) iam2_ops.expunge_iam2_project(project_02_uuid) iam2_ops.delete_iam2_project_template(project_template_01_uuid) iam2_ops.delete_iam2_project_template(project_template_02_uuid) iam2_ops.delete_iam2_virtual_id(virtual_id_uuid) iam2_ops.delete_role(role_uuid) test_util.test_fail('success test iam2 login in by admin!')
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None) if not bss: test_util.test_skip("not find available backup storage. Skip test") if bss[0].type != inventory.CEPH_BACKUP_STORAGE_TYPE: test_util.test_skip( "not find available ceph backup storage. Skip test") host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags( ['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.attach(vm) #ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) #ps_uuid = ps.uuid #ps_ops.change_primary_storage_state(ps_uuid, 'disable') test_stub.disable_all_pss() if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail( 'VM is expected to running when PS change to disable state') vm.set_state(vm_header.RUNNING) vm.check() volume.detach(vm.get_vm().uuid) #ps_ops.change_primary_storage_state(ps_uuid, 'enable') test_stub.enable_all_pss() host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) volume.delete() #volume.expunge() volume.check() vm.destroy() test_util.test_pass('Delete volume under PS disable mode Test Success')
def test(): global role_uuid, project_uuid, project_02_uuid, project_template_01_uuid, project_template_02_uuid, \ company_uuid_01, company_uuid_02, department_01_uuid, department_02_uuid, virtual_id_group_uuid, \ virtual_id_uuid cond = res_ops.gen_query_conditions('name', '=', 'zone1') zone1_uuid = res_ops.query_resource_fields(res_ops.ZONE, cond)[0].uuid cond = res_ops.gen_query_conditions('name', '=', 'zone2') zone2_uuid = res_ops.query_resource_fields(res_ops.ZONE, cond)[0].uuid cond = res_ops.gen_query_conditions('name', '=', 'zone3') zone3_uuid = res_ops.query_resource_fields(res_ops.ZONE, cond)[0].uuid for char in ['A', 'B', 'C', 'D', 'E']: project_name = 'Project-' + char project_uuid = iam2_ops.create_iam2_project(project_name).uuid if project_name == 'Project-A' or project_name == 'Project-C': attributes = [{ "name": "__ProjectRelatedZone__", "value": zone1_uuid }, { "name": "__ProjectRelatedZone__", "value": zone2_uuid }] iam2_ops.add_attributes_to_iam2_project(project_uuid, attributes) if project_name == 'Project-B': attributes = [{ "name": "__ProjectRelatedZone__", "value": zone1_uuid }] iam2_ops.add_attributes_to_iam2_project(project_uuid, attributes) if project_name == 'Project-D': attributes = [{ "name": "__ProjectRelatedZone__", "value": zone2_uuid }, { "name": "__ProjectRelatedZone__", "value": zone3_uuid }] iam2_ops.add_attributes_to_iam2_project(project_uuid, attributes) if project_name == 'Project-E': attributes = [{ "name": "__ProjectRelatedZone__", "value": zone3_uuid }] iam2_ops.add_attributes_to_iam2_project(project_uuid, attributes) for i in range(1, 61): (name, email, phone) = user_info_generator.generate_user_info() print name print email print phone virtual_id_uuid = iam2_ops.create_iam2_virtual_id( 'user-' + str(i), 'b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86' ).uuid #iam2_ops.add_roles_to_iam2_virtual_id([role_uuid], virtual_id_uuid) vid_attributes = [{ "name": "fullname", "value": name }, { "name": "phone", "value": phone }, { "name": "mail", "value": email }, { "name": "identifier", "value": str(i + 10000) }] iam2_ops.add_attributes_to_iam2_virtual_id(virtual_id_uuid, vid_attributes) if i > 50: platform_attributes = [{"name": "__PlatformAdmin__"}] iam2_ops.add_attributes_to_iam2_virtual_id(virtual_id_uuid, platform_attributes) project_inv = res_ops.query_resource_fields(res_ops.IAM2_PROJECT) for j in range(0, 5): project_uuid = project_inv[j].uuid if i == 1: iam2_ops.add_iam2_virtual_ids_to_project([virtual_id_uuid], project_uuid) proj_attributes = [{ "name": "__ProjectAdmin__", "value": project_uuid }] iam2_ops.add_attributes_to_iam2_virtual_id( virtual_id_uuid, proj_attributes) if i <= 10 and j == 0: iam2_ops.add_iam2_virtual_ids_to_project([virtual_id_uuid], project_uuid) if i > 10 and i <= 20 and j == 1: iam2_ops.add_iam2_virtual_ids_to_project([virtual_id_uuid], project_uuid) if i > 20 and i <= 30 and j == 2: iam2_ops.add_iam2_virtual_ids_to_project([virtual_id_uuid], project_uuid) if i > 30 and i <= 40 and j == 3: iam2_ops.add_iam2_virtual_ids_to_project([virtual_id_uuid], project_uuid) if i > 40 and i <= 50 and j == 4: iam2_ops.add_iam2_virtual_ids_to_project([virtual_id_uuid], project_uuid) test_util.test_fail( 'Create environment with 3 zones 5 projects and 50 users seccess!')
def test(): global image global test_obj_dict #run condition hosts = res_ops.query_resource(res_ops.HOST) if len(hosts) <= 1: test_util.test_skip("skip for host_num is not satisfy condition host_num>1") bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, None, fields=['uuid']) #create disk offering data_volume_size = 10737418240 disk_offering_option = test_util.DiskOfferingOption() disk_offering_option.set_name('root-disk-iso') disk_offering_option.set_diskSize(data_volume_size) data_volume_offering = vol_ops.create_volume_offering(disk_offering_option) test_obj_dict.add_disk_offering(data_volume_offering) #create instance offering cpuNum = 2 memorySize = 2147483648 name = 'iso-vm-offering' new_offering_option = test_util.InstanceOfferingOption() new_offering_option.set_cpuNum(cpuNum) new_offering_option.set_memorySize(memorySize) new_offering_option.set_name(name) new_offering = vm_ops.create_instance_offering(new_offering_option) test_obj_dict.add_instance_offering(new_offering) #add iso img_option = test_util.ImageOption() img_option.set_name('iso1') bs_uuid = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, [], None)[0].uuid img_option.set_backup_storage_uuid_list([bs_uuid]) img_option.set_url('http://172.20.1.15:7480/iso/CentOS-x86_64-7.2-Minimal.iso') image_inv = img_ops.add_iso_template(img_option) image_uuid = image_inv.uuid image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) #create vm by iso l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid root_disk_uuid = data_volume_offering.uuid vm = test_stub.create_vm_with_iso([l3_net_uuid], image_uuid, 'iso-vm', root_disk_uuid, new_offering.uuid) host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp test_obj_dict.add_vm(vm) #check vm vm_inv = vm.get_vm() vm_ip = vm_inv.vmNics[0].ip #cmd ='[ -e /root ]' #ssh_timeout = test_lib.SSH_TIMEOUT #test_lib.SSH_TIMEOUT = 3600 test_lib.lib_set_vm_host_l2_ip(vm_inv) test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, 22, 1800) #if not test_lib.lib_ssh_vm_cmd_by_agent_with_retry(host_ip, vm_ip, 'root', 'password', cmd): # test_lib.SSH_TIMEOUT = ssh_timeout # test_util.test_fail("iso has been failed to installed.") #test_lib.SSH_TIMEOUT = ssh_timeout #delete iso image.delete() test_obj_dict.rm_image(image) #expunge iso image.expunge() #detach iso img_ops.detach_iso(vm.vm.uuid) #vm ops test test_stub.vm_ops_test(vm, "VM_TEST_CHANGE_OS") vm.destroy() vol_ops.delete_disk_offering(root_disk_uuid) vm_ops.delete_instance_offering(new_offering.uuid) test_obj_dict.rm_vm(vm) test_obj_dict.rm_disk_offering(data_volume_offering) test_obj_dict.rm_instance_offering(new_offering) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Create VM Image in Image Store Success')
def test(): global session_to global session_mc global session_uuid session_uuid = acc_ops.login_as_admin() session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid) session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid) cond = res_ops.gen_query_conditions('type', '=', inventory.USER_VM_TYPE) cond = res_ops.gen_query_conditions('state', '!=', 'Destroyed', cond) num = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid) if num <= thread_threshold: vms = res_ops.query_resource(res_ops.VM_INSTANCE, cond, session_uuid) destroy_vms(vms) else: start = 0 limit = thread_threshold - 1 curr_num = start vms = [] while curr_num < num: vms_temp = res_ops.query_resource_fields(res_ops.VM_INSTANCE, \ cond, session_uuid, ['uuid'], start, limit) vms.extend(vms_temp) curr_num += limit start += limit destroy_vms(vms) vip_num = res_ops.query_resource_count(res_ops.VIP, [], session_uuid) if vip_num <= thread_threshold: vips = res_ops.query_resource(res_ops.VIP, [], session_uuid) destroy_vips(vips) else: start = 0 limit = thread_threshold - 1 curr_num = start vms = [] while curr_num < vip_num: vips_temp = res_ops.query_resource_fields(res_ops.VIP, \ [], session_uuid, ['uuid'], start, limit) vips.extend(vips_temp) curr_num += limit start += limit destroy_vips(vips) #con_ops.change_global_config('identity', 'session.timeout', session_to) #con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc) left_num = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid) if left_num == 0: test_util.test_pass('None VR VMs destroy Success. Destroy %d VMs.' % num) else: test_util.test_fail('None VR VMs destroy Fail. %d VMs are not Destroied.' % left_num) left_num = res_ops.query_resource_count(res_ops.VIP, [], session_uuid) if left_num == 0: test_util.test_pass('VIP destroy Success. Destroy %d VIP.' % num) else: test_util.test_fail('VIP destroy Fail. %d VIP are not Destroied.' % left_num) con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid) con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid) acc_ops.logout(session_uuid)