def test(): global ipsec global vip1_uuid global vpc_vr cond = res_ops.gen_query_conditions('name', '=', 'public network') public_network = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0] vip1 = test_stub.create_vip('vip_ipsec', public_network.uuid) vip1_uuid = vip1.get_vip().uuid test_util.test_dsc('Create vpc vr and attach networks') vpc_vr = test_stub.create_vpc_vrouter() cond = res_ops.gen_query_conditions('name', '=', 'l3VlanNetwork11') l3_vlan_network11 = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0] vpc_vr.add_nic(l3_vlan_network11.uuid) peer_address = '10.94.10.10' try: ipsec = ipsec_ops.create_ipsec_connection('ipsec', None, peer_address, '123456', vip1_uuid, None) except: test_util.test_fail('Failed to create vpc ipsec') test_stub.delete_vip(vip1_uuid) vpc_vr.destroy() ipsec_ops.delete_ipsec_connection(ipsec.uuid) test_util.test_pass('Create VPC Ipsec Success')
def add_storage_for_backup(deployConfig): print "try to add backup storage" if xmlobject.has_element(deployConfig, 'backupStorages.imageStoreBackupStorage'): print "find image store backup storage" for bs in xmlobject.safe_list(deployConfig.backupStorages.imageStoreBackupStorage): if hasattr(bs, 'local_backup_storage_'): print "find local_backup_storage" cond = res_ops.gen_query_conditions('tag', '=', "allowbackup") tags = res_ops.query_resource(res_ops.SYSTEM_TAG, cond) if len(tags) > 0: print "local backup storage already exists" break cond = res_ops.gen_query_conditions('name', '=', bs.name_) bss = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond) print bss add_local_bs_tag = tag_ops.create_system_tag('ImageStoreBackupStorageVO', bss[0].uuid,'allowbackup') if xmlobject.has_element(deployConfig, 'backupStorages.imageStoreBackupStorage'): for bs in xmlobject.safe_list(deployConfig.backupStorages.imageStoreBackupStorage): if hasattr(bs, 'remote_backup_storage_'): print "find remote_backup_storage" cond = res_ops.gen_query_conditions('tag', '=', "remotebackup") tags = res_ops.query_resource(res_ops.SYSTEM_TAG, cond) if len(tags) > 0: print "remote backup storage already exists" break cond = res_ops.gen_query_conditions('name', '=', bs.name_) bss = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond) print bss add_local_bs_tag = tag_ops.create_system_tag('ImageStoreBackupStorageVO', bss[0].uuid,'remotebackup')
def get_host_by_mn_vm_process(scenarioConfig, scenarioFile): zstack_management_ip = scenarioConfig.basicConfig.zstackManagementIp.text_ mha_s_vm_list = get_mha_s_vm_list_from_scenario_file(scenarioConfig, scenarioFile) if len(mha_s_vm_list) < 1: return [] host_vm_inv = dict() host_inv = dict() for host in mha_s_vm_list: cond = res_ops.gen_query_conditions('vmNics.ip', '=', host.ip_) host_vm_inv[host] = sce_ops.query_resource(zstack_management_ip, res_ops.VM_INSTANCE, cond).inventories[0] cond = res_ops.gen_query_conditions('uuid', '=', host_vm_inv[host].hostUuid) host_inv[host] = sce_ops.query_resource(zstack_management_ip, res_ops.HOST, cond).inventories[0] host_list = [] for host in mha_s_vm_list: cmd = "ps axjf |grep kvm | grep mnvm.img | grep -v grep" try: query_kvm_process = sce_ops.execute_in_vm_console(zstack_management_ip, host_inv[host].managementIp, host_vm_inv[host].uuid, host, cmd) test_util.test_logger("check mn vm kvm process on host %s: %s" % (host.ip_, query_kvm_process)) if query_kvm_process.find('zstack/mnvm.img') >= 0: host_list.append(host) except: continue return host_list
def shutdown_host_network(host_vm, scenarioConfig, downMagt=True): ''' Here we change l2network_nic to be global is due to the maybe failed once all mn nodes disconnected In that case, lib_get_l2_magt_nic_by_vr_offering will be failed because of mn is disconnected. Of course, to be global means the management network can be only selected once in ZStack DB. ''' global l2network_nic zstack_management_ip = scenarioConfig.basicConfig.zstackManagementIp.text_ cond = res_ops.gen_query_conditions('vmNics.ip', '=', host_vm.ip_) host_vm_inv = sce_ops.query_resource(zstack_management_ip, res_ops.VM_INSTANCE, cond).inventories[0] cond = res_ops.gen_query_conditions('uuid', '=', host_vm_inv.hostUuid) host_inv = sce_ops.query_resource(zstack_management_ip, res_ops.HOST, cond).inventories[0] host_vm_config = sce_ops.get_scenario_config_vm(host_vm_inv.name_, scenarioConfig) if not l2network_nic: if downMagt: l2network_nic = test_lib.lib_get_l2_magt_nic_by_vr_offering() else: l2network_nic = test_lib.lib_get_l2_pub_nic_by_vr_offering() if not l2network_nic: test_util.test_fail("fail to get management l2 by vr offering") #l2network_nic = os.environ.get('l2ManagementNetworkInterface').replace("eth", "zsn") cmd = "ifdown %s" % (l2network_nic) sce_ops.execute_in_vm_console(zstack_management_ip, host_inv.managementIp, host_vm_inv.uuid, host_vm_config, cmd)
def test(): global agent_url global vm imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) cond = res_ops.gen_query_conditions('type', '=', 'SharedMountPoint') pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) if len(pss) == 0: test_util.test_skip('Required %s ps to test' % (ps_type)) ps_uuid = pss[0].uuid vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) vm.stop() ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid,'NeverStop') cond = res_ops.gen_query_conditions('uuid', '=', vm.get_vm().uuid) for i in range(5): time.sleep(1) try: if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == vm_header.RUNNING: break except: test_util.test_logger('Retry until VM change to running') if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == vm_header.RUNNING: test_util.test_pass('set HA after stopped VM test pass') test_util.test_fail('set HA after stopped VM test fail')
def test(): global vm, host3_uuid if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") conf_ops.change_global_config('ha', 'allow.slibing.cross.clusters', 'true') vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #l3_name = os.environ.get('l3NoVlanNetworkName1') #l3_name = os.environ.get('l3VlanNetworkName1') l3_name = os.environ.get('l3PublicNetworkName') host3_name = os.environ.get('hostName3') host4_name = os.environ.get('hostName4') conditions1 = res_ops.gen_query_conditions('name', '=', host3_name) host3_uuid = res_ops.query_resource(res_ops.HOST, conditions1)[0].uuid host3_ip = res_ops.query_resource(res_ops.HOST, conditions1)[0].managementIp conditions2 = res_ops.gen_query_conditions('name', '=', host4_name) host4_uuid = res_ops.query_resource(res_ops.HOST, conditions2)[0].uuid host4_ip = res_ops.query_resource(res_ops.HOST, conditions2)[0].managementIp l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm_creation_option.set_host_uuid(host3_uuid) vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() time.sleep(30) ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") time.sleep(5) vm.check() ssh_cmd1 = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % host3_ip cmd = '%s "poweroff" ' % ssh_cmd1 process_result = test_stub.execute_shell_in_process(cmd, tmp_file) time.sleep(360) host3_status = res_ops.query_resource(res_ops.HOST, conditions1)[0].status if host3_status == "Disconnected": conditions3 = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid) vm_status = res_ops.query_resource(res_ops.VM_INSTANCE, conditions3)[0].state vm_host_uuid = res_ops.query_resource(res_ops.VM_INSTANCE, conditions3)[0].hostUuid if vm_status != "Running" or vm_host_uuid != host4_uuid: test_util.test_fail('Test fail vm status: %s, vm_host_uuid: %s,' %(vm_status, vm_host_uuid)) vm.destroy() conf_ops.change_global_config('ha', 'allow.slibing.cross.clusters', 'false') conditions4 = res_ops.gen_query_conditions('vmNics.ip', '=', host3_ip) vm3_uuid = sce_ops.query_resource(zstack_management_ip, res_ops.VM_INSTANCE, conditions4).inventories[0].uuid sce_ops.start_vm(zstack_management_ip, vm3_uuid) test_util.test_pass('VM auto ha across cluster Test Success')
def test(): global test_obj_dict volume_creation_option = test_util.VolumeOption() ps_uuid = res_ops.query_resource(res_ops.PRIMARY_STORAGE, [])[0].uuid test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_primary_storage_uuid(ps_uuid) if res_ops.query_resource(res_ops.PRIMARY_STORAGE, [])[0].type == "LocalStorage": host = test_lib.lib_find_random_host() volume_creation_option.set_system_tags(["localStorage::hostUuid::%s" % host.uuid]) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume_uuid = volume.volume.uuid vol_size = volume.volume.size set_size = 1024*1024*1024*5 vol_ops.resize_data_volume(volume_uuid, set_size) cond = res_ops.gen_query_conditions('type', '=', "Data") cond = res_ops.gen_query_conditions('status', '=', "Ready", cond) vol_size_after = res_ops.query_resource(res_ops.VOLUME, cond)[0].size if set_size != vol_size_after: test_util.test_fail('Resize Data Volume failed, size = %s' % vol_size_after) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Resize Data Volume Test Success')
def test(): if os.environ.get('CASE_FLAVOR'): flavor = case_flavor[os.environ.get('CASE_FLAVOR')] num = flavor['vm_num'] else: num = 10000 cond = res_ops.gen_query_conditions('system', '=', 'false') imageUuid = res_ops.query_resource_fields(res_ops.IMAGE, cond)[0].uuid hostUuid = '' hostName = '' cond = res_ops.gen_query_conditions('type', '=', 'UserVm') instanceOfferingUuid = res_ops.query_resource_fields(res_ops.INSTANCE_OFFERING, cond)[0].uuid cond = res_ops.gen_query_conditions('system', '=', 'false') l3NetworkUuids = res_ops.query_resource_fields(res_ops.L3_NETWORK, cond)[0].uuid hosts = res_ops.query_resource_fields(res_ops.HOST) counter = 0 for i in range(0, 500): hostUuid = hosts[i].uuid hostName = hosts[i].name for j in range(0, 20): counter += 1 if counter > num: test_util.test_pass("Create %s vms finished" %num) vm_name = 'vm-'+str(j)+'-on-host-'+hostName thread = threading.Thread(target=create_vm, args=(vm_name, imageUuid, hostUuid, instanceOfferingUuid, l3NetworkUuids)) while threading.active_count() > 10: time.sleep(5) thread.start() test_util.test_fail("Fail to create vms")
def test(): global test_obj_dict cond = res_ops.gen_query_conditions('state', '=', 'Enabled') cond = res_ops.gen_query_conditions('type', '=', 'LocalStorage') cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond) ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) if len(ps) < 2: test_util.test_skip("Requres at least two local ps") ps1_res = vol_ops.get_local_storage_capacity(None, ps[0].uuid)[0] ps2_res = vol_ops.get_local_storage_capacity(None, ps[1].uuid)[0] if ps1_res.availableCapacity > ps2_res.availableCapacity: data_volume_size = ps2_res.availableCapacity + (ps1_res.availableCapacity - ps2_res.availableCapacity) / 2 else: data_volume_size = ps1_res.availableCapacity + (ps2_res.availableCapacity - ps1_res.availableCapacity) / 2 disk_offering_option = test_util.DiskOfferingOption() disk_offering_option.set_name('2-local-ps-test') disk_offering_option.set_diskSize(data_volume_size) data_volume_offering = vol_ops.create_volume_offering(disk_offering_option) test_obj_dict.add_disk_offering(data_volume_offering) vm = test_stub.create_vlan_vm(disk_offering_uuids=[data_volume_offering.uuid]) test_obj_dict.add_vm(vm) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('2 Local PS Test Pass') return False
def compare(ps, vm, dvol, backup): test_util.test_logger("-----------------compare----------------") # find vm_host host = test_lib.lib_find_host_by_vm(vm.vm) cond = res_ops.gen_query_conditions("type", '=', "ImageStoreBackupStorage") bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0] cond = res_ops.gen_query_conditions("uuid", '=', dvol.volume.uuid) current_volume = res_ops.query_resource(res_ops.VOLUME, cond)[0] vol_path = current_volume.installPath if ps.type == "SharedBlock": vol_path = "/dev/" + current_volume.installPath.split("/")[2] + "/" + current_volume.installPath.split("/")[3] test_util.test_logger(vol_path) name = backup.backupStorageRefs[0].installPath.split("/")[2] id = backup.backupStorageRefs[0].installPath.split("/")[3] # compare vm_root_volume & image cmd = "mkdir /root/%s;" \ "/usr/local/zstack/imagestore/bin/zstcli " \ "-rootca=/var/lib/zstack/imagestorebackupstorage/package/certs/ca.pem " \ "-url=%s:8000 " \ "pull -installpath /root/%s/old.qcow2 %s:%s;" \ "qemu-img compare %s /root/%s/old.qcow2;" % (id, bs.hostname, id, name, id, vol_path, id) # clean image result = test_lib.lib_execute_ssh_cmd(host.managementIp, "root", "password", cmd, timeout=300) if result != "Images are identical.\n": test_util.test_fail("compare vm_root_volume & image created by backup")
def test(): global volume_offering_uuid test_util.test_dsc('Test VM data volume bandwidth QoS by 20MB') #unit is KB volume_bandwidth = 5*1024*1024 new_volume_offering = test_lib.lib_create_disk_offering(volume_bandwidth = volume_bandwidth) volume_offering_uuid = new_volume_offering.uuid vm = test_stub.create_vm(vm_name = 'vm_volume_qos', disk_offering_uuids = [volume_offering_uuid]) vm.check() test_obj_dict.add_vm(vm) vm_inv = vm.get_vm() cond = res_ops.gen_query_conditions("vmInstanceUuid", '=', vm_inv.uuid) cond = res_ops.gen_query_conditions("type", '=', 'Data', cond) volume_uuid = res_ops.query_resource(res_ops.VOLUME, cond)[0].uuid test_lib.lib_mkfs_for_volume(volume_uuid, vm_inv) path = '/mnt' user_name = 'root' user_password = '******' os.system("sshpass -p '%s' ssh %s@%s 'mount /dev/vdb1 %s'"%(user_password, user_name, vm_inv.vmNics[0].ip, path)) vm.check() test_stub.make_ssh_no_password(vm_inv) test_stub.install_fio(vm_inv) test_stub.test_fio_bandwidth(vm_inv, volume_bandwidth, path) vol_ops.delete_disk_offering(volume_offering_uuid) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('VM data volume QoS Test Pass')
def create_vm(vm_creation_option=None, volume_uuids=None, root_disk_uuid=None, \ image_uuid=None, session_uuid=None): if not vm_creation_option: instance_offering_uuid = test_lib.lib_get_instance_offering_by_name(os.environ.get('instanceOfferingName_s')).uuid cond = res_ops.gen_query_conditions('mediaType', '!=', 'ISO') cond = res_ops.gen_query_conditions('platform', '=', 'Linux', cond) image_uuid = res_ops.query_resource(res_ops.IMAGE, cond, session_uuid)[0].uuid l3net_uuid = res_ops.get_resource(res_ops.L3_NETWORK, session_uuid)[0].uuid vm_creation_option = test_util.VmOption() vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_l3_uuids([l3net_uuid]) if volume_uuids: if isinstance(volume_uuids, list): vm_creation_option.set_data_disk_uuids(volume_uuids) else: test_util.test_fail('volume_uuids type: %s is not "list".' % type(volume_uuids)) if root_disk_uuid: vm_creation_option.set_root_disk_uuid(root_disk_uuid) if image_uuid: vm_creation_option.set_image_uuid(image_uuid) if session_uuid: vm_creation_option.set_session_uuid(session_uuid) vm = test_vm.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() return vm
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] #vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() test_util.test_dsc('Add ISO Image') cond = res_ops.gen_query_conditions("status", '=', "Connected") bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid img_option = test_util.ImageOption() img_option.set_name('iso') img_option.set_backup_storage_uuid_list([bs_uuid]) os.system("echo fake iso for test only > %s/apache-tomcat/webapps/zstack/static/test.iso" % (os.environ.get('zstackInstallPath'))) img_option.set_url('http://%s:8080/zstack/static/test.iso' % (os.environ.get('node1Ip'))) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) ps_uuid = ps.uuid ps_ops.change_primary_storage_state(ps_uuid, 'maintain') if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to stop when PS change to maintain state') vm.set_state(vm_header.STOPPED) vm.check() test_util.test_dsc('Attach ISO to VM') cond = res_ops.gen_query_conditions('name', '=', 'iso') iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid img_ops.attach_iso(iso_uuid, vm.vm.uuid) ps_ops.change_primary_storage_state(ps_uuid, 'enable') host_ops.reconnect_host(host_uuid) #vm_ops.reconnect_vr(vr_uuid) vrs = test_lib.lib_get_all_vrs() for vr in vrs: vm_ops.start_vm(vr.uuid) vm.start() vm.check() vm.destroy() vm.check() #vm.expunge() #vm.check() test_util.test_pass('PS maintain mode Test Success')
def check(self): import json import zstacklib.utils.jsonobject as jsonobject super(zstack_vid_policy_db_checker, self).check() try: conditions = res_ops.gen_query_conditions('uuid', '=', self.test_obj.get_vid().uuid) vid = res_ops.query_resource(res_ops.IAM2_VIRTUAL_ID, conditions)[0] except Exception as e: traceback.print_exc(file=sys.stdout) test_util.test_logger('Check result: [vid Inventory uuid:] %s does not exist in database.' % self.test_obj.get_vid().uuid) return self.judge(False) conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=', self.test_obj.get_vid().uuid) role_statements = res_ops.query_resource(res_ops.ROLE, conditions)[0].statements for state_lst in self.test_obj.get_vid_statements()[0]['actions']: test_result = False for statement in role_statements: atatement_dict = json.loads(jsonobject.dumps(statement))['statement'] for lst in atatement_dict['actions']: if lst == state_lst: test_result = True if test_result == False: test_util.test_logger('Check result: [vid Inventory statement:] does not exist in database.') return self.judge(False) return self.judge(True)
def test(): test_util.test_dsc('Test storage capacity when using expunge vm') if conf_ops.get_global_config_value('vm', 'deletionPolicy') != 'Delay' : test_util.test_skip('vm delete_policy is not Delay, skip test.') return zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid cond = res_ops.gen_query_conditions('state', '=', 'Enabled') cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond) host = res_ops.query_resource_with_num(res_ops.HOST, cond, limit = 1) if not host: test_util.test_skip('No Enabled/Connected host was found, skip test.' ) return True ps = res_ops.query_resource_with_num(res_ops.PRIMARY_STORAGE, cond, limit = 1) if not ps: test_util.test_skip('No Enabled/Connected primary storage was found, skip test.' ) return True host = host[0] ps = ps[0] host_res = vol_ops.get_local_storage_capacity(host.uuid, ps.uuid)[0] avail_cap = host_res.availableCapacity vm = test_stub.create_vm(vm_name = 'basic-test-vm', host_uuid = host.uuid) test_obj_dict.add_vm(vm) time.sleep(1) vm.destroy() vm.expunge() host_res2 = vol_ops.get_local_storage_capacity(host.uuid, ps.uuid)[0] avail_cap2 = host_res.availableCapacity if avail_cap != avail_cap2: test_util.test_fail('PS capacity is not same after create/expunge vm on host: %s. Capacity before create vm: %s, after expunge vm: %s ' % (host.uuid, avail_cap, avail_cap2)) test_util.test_pass('Expunge VM Test Success')
def path(): cond = res_ops.gen_query_conditions('state', '=', "Enabled") cond = res_ops.gen_query_conditions('status', '=', "Connected", cond) ps_inv = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) cond_imagestore = res_ops.gen_query_conditions('type', '=', "ImageStoreBackupStorage", cond) cond_ceph = res_ops.gen_query_conditions('type', '=', "Ceph", cond) imagestore = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond_imagestore) ceph_bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond_ceph) san_ps = [ps.uuid for ps in ps_inv if ps.type == 'SharedBlock'] ceph_ps = [ps.uuid for ps in ps_inv if ps.type == 'Ceph'] san_vms = ['utility_vm_for_robot_test' + '-' + ps.name for ps in ps_inv if ps.type == 'SharedBlock'] ceph_vms = ['utility_vm_for_robot_test' + '-' + ps.name for ps in ps_inv if ps.type == 'Ceph'] if san_ps and ceph_ps: return dict(initial_formation="template3", path_list=[[TestAction.create_volume, "volume1", "=ps_uuid::%s" % san_ps[0]], [TestAction.attach_volume, san_vms[-1], "volume1"], [TestAction.resize_volume, san_vms[-1], 5*1024*1024], [TestAction.clone_vm, san_vms[-1], "vm2", "=full"], [TestAction.detach_volume, "volume1"], [TestAction.attach_volume, ceph_vms[0], "volume1"], [TestAction.stop_vm, san_vms[-1]], [TestAction.reinit_vm, san_vms[-1]], [TestAction.start_vm, san_vms[-1]], [TestAction.clone_vm, ceph_vms[0], "vm3"], [TestAction.resize_volume, ceph_vms[0], 5*1024*1024], [TestAction.detach_volume, "volume1"], [TestAction.create_image_from_volume, ceph_vms[0], 'image_created_from_%s' % ceph_vms[0], "=bs_uuid::%s" % imagestore[0].uuid], [TestAction.create_vm_by_image, 'image_created_from_%s' % ceph_vms[0], 'qcow2', 'vm4', '=ps_uuid::%s' % random.choice(san_ps)], [TestAction.stop_vm, 'vm4'], [TestAction.ps_migrage_vm, 'vm4'], [TestAction.attach_volume, 'vm4', "volume1"]]) else: return dict(initial_formation="template3", path_list=[])
def get_bs(self): if self.ceph_bs_name: conditions = res_ops.gen_query_conditions('name', '=', self.ceph_bs_name) self.ceph_bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, conditions)[0] if self.ceph_bs_name_2: conditions = res_ops.gen_query_conditions('name', '=', self.ceph_bs_name_2) self.ceph_bs_2 = res_ops.query_resource(res_ops.BACKUP_STORAGE, conditions)[0]
def test(): global new_image bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") if bss[0].type != inventory.CEPH_BACKUP_STORAGE_TYPE: if hasattr(inventory, 'IMAGE_STORE_BACKUP_STORAGE_TYPE') and bss[0].type != inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: test_util.test_skip("not find available imagestore or ceph backup storage. Skip test") thread = threading.Thread(target=add_image, args=(bss[0].uuid, )) thread.start() time.sleep(5) image_cond = res_ops.gen_query_conditions("status", '=', "Downloading") image = res_ops.query_resource_fields(res_ops.IMAGE, image_cond, \ None, fields=['uuid']) progress = res_ops.get_task_progress(image[0].uuid) if int(progress.progress) < 0 or int(progress.progress) > 100: test_util.test_fail("Progress of task should be between 0 and 100, while it actually is %s" % (progress.progress)) thread.join() new_image.delete() if test_lib.lib_get_image_delete_policy() != 'Direct': new_image.expunge() test_util.test_pass('Add image Progress Test Success')
def check(self): super(zstack_kvm_sg_db_exist_checker, self).check() sg_list = self.test_obj.get_sg_list_by_nic(self.nic_uuid) if not sg_list: conditions = res_ops.gen_query_conditions('vmNicUuid', '=', self.nic_uuid) nic_sg = res_ops.query_resource(res_ops.VM_SECURITY_GROUP, conditions) if not nic_sg: test_util.test_logger('Check result: No [Security Group] is found in database for [nic:] %s.' % self.nic_uuid) return self.judge(False) else: test_util.test_warn('Check result: [Security Group] is found in database for [nic:] %s. It is not consistent with test_sg record.' % self.nic_uuid) return self.judge(True) for test_sg in sg_list: try: conditions = res_ops.gen_query_conditions('uuid', '=', test_sg.security_group.uuid) sg = res_ops.query_resource(res_ops.SECURITY_GROUP, conditions)[0] except Exception as e: traceback.print_exc(file=sys.stdout) test_util.test_logger('Check result: [Security Group Inventory uuid:] %s does not exist in database.' % test_sg.security_group.uuid) return self.judge(False) test_util.test_logger('Check result: [SecurityGroup Inventory uuid:] %s exist in database.' % sg.uuid) return self.judge(True)
def check(self): super(zstack_kvm_lbl_checker, self).check() self.vm_nic_uuids = self.lbl.get_vm_nics_uuid() self.algorithm = self.lbl.get_algorithm() self.vm_list = [] self.vm_ip_test_dict = {} for vm_nic_uuid in self.vm_nic_uuids: vm = test_lib.lib_get_vm_by_nic(vm_nic_uuid) if vm.state == 'Running': nic_ip = test_lib.lib_get_nic_by_uuid(vm_nic_uuid).ip self.vm_ip_test_dict[nic_ip] = 0 self.vm_list.append(vm) if not self.vm_list: test_util.test_logger('There is not living vm for load balancer test') return self.judge(False) cond = res_ops.gen_query_conditions('listeners.uuid', '=', self.lbl_uuid) vip_uuid = res_ops.query_resource(res_ops.LOAD_BALANCER, cond)[0].vipUuid cond = res_ops.gen_query_conditions('uuid', '=', vip_uuid) self.vip_ip = res_ops.query_resource(res_ops.VIP, cond)[0].ip if not len(self.vm_list) > 1: self.do_so_check() return if self.algorithm == lb_header.LB_ALGORITHM_RR: self.do_rr_check() elif self.algorithm == lb_header.LB_ALGORITHM_LC: #self.do_lc_check() #If not consider long connection, leastconn is same as round robin. self.do_rr_check() elif self.algorithm == lb_header.LB_ALGORITHM_SO: self.do_so_check()
def add_image_config(root_xml, original_images_setting, session_uuid = None): images_xml = etree.SubElement(root_xml, "images") cond = res_ops.gen_query_conditions('state', '=', 'Enabled') images = res_ops.query_resource(res_ops.IMAGE, cond, session_uuid) pre_images = {} if original_images_setting: pre_images_list = \ original_images_setting.get_child_node_as_list('image') for pre_image in pre_images_list: pre_images[pre_image.url_] = pre_image for image in images: image_xml = etree.SubElement(images_xml, "image") set_xml_item_attr(image_xml, 'name', image.name) set_xml_item_attr(image_xml, 'description', image.description) set_xml_item_attr(image_xml, 'url', image.url) set_xml_item_attr(image_xml, 'format', image.format) set_xml_item_attr(image_xml, 'mediaType', image.mediaType) set_xml_item_attr(image_xml, 'guestOsType', image.guestOsType) set_xml_item_attr(image_xml, 'hypervisorType', image.hypervisorType) set_xml_item_attr(image_xml, 'bits', image.bits) if pre_images.has_key(image.url): set_xml_item_attr(image_xml, 'username', \ pre_images[image.url].username__) set_xml_item_attr(image_xml, 'password', \ pre_images[image.url].password__) for bs in image.backupStorageRefs: cond = res_ops.gen_query_conditions('uuid', '=', \ bs.backupStorageUuid) bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond, \ session_uuid)[0] add_xml_item_value(image_xml, 'backupStorageRef', bs.name)
def test(): test_util.test_dsc('Test update instance offering') cond = res_ops.gen_query_conditions('state', '=', 'Enabled') cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond) host = res_ops.query_resource_with_num(res_ops.HOST, cond, limit = 1) if not host: test_util.test_skip('No Enabled/Connected host was found, skip test.' ) return True host_uuid = host[0].uuid new_offering = test_lib.lib_create_instance_offering(cpuNum = 1, \ cpuSpeed = 16, memorySize = 536870912, name = 'orgin_instance_name') test_obj_dict.add_instance_offering(new_offering) vm = test_stub.create_vm(vm_name = 'test_update_instance_offering', \ host_uuid = host_uuid, \ instance_offering_uuid = new_offering.uuid) test_obj_dict.add_vm(vm) vm.stop() #These parameters are need to be populated. updated_offering = test_lib.lib_update_instance_offering(new_offering.uuid, cpuNum = 2, cpuSpeed = 16, \ memorySize = 1073741824, name = 'updated_instance_name', \ volume_iops = None, volume_bandwidth = None, \ net_outbound_bandwidth = None, net_inbound_bandwidth = None) vm.start() vm.check() test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Test updated instance offering Pass')
def test(): h1_name = os.environ.get("hostName") cond = res_ops.gen_query_conditions('name', '=', h1_name) h1 = res_ops.query_resource(res_ops.HOST, cond) ag1 = ag_ops.create_affinity_group(name="ag1", policy="antiHard") vm1 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid, host_uuid=h1[0].uuid) assert vm1.get_vm().hostUuid == h1[0].uuid test_obj_dict.add_vm(vm1) h2_name = os.environ.get("hostName2") cond = res_ops.gen_query_conditions('name', '=', h2_name) h2 = res_ops.query_resource(res_ops.HOST, cond) vm2 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid, host_uuid=h2[0].uuid) assert vm2.get_vm().hostUuid == h2[0].uuid test_obj_dict.add_vm(vm2) try: vm3 = None vm3 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid, host_uuid=h2[0].uuid) except: if not vm3: test_util.test_logger("vm3 isn't created as expected") finally: if vm3: test_util.test_fail("Test Fail, vm3 [uuid:%s] is not expected to be created" % vm3.get_vm().uuid) test_lib.lib_error_cleanup(test_obj_dict) ag_ops.delete_affinity_group(ag1.uuid) test_util.test_pass("Affinity Group antiHard policy pass")
def test(): global volume_offering_uuid test_util.test_dsc('Test VM data volume bandwidth QoS by 20MB') #unit is KB write_bandwidth = 5*1024*1024 new_volume_offering = test_lib.lib_create_disk_offering(write_bandwidth = write_bandwidth) volume_offering_uuid = new_volume_offering.uuid vm = test_stub.create_vm(vm_name='vm_volume_qos', disk_offering_uuids = [volume_offering_uuid]) vm.check() test_obj_dict.add_vm(vm) vm_inv = vm.get_vm() cond = res_ops.gen_query_conditions("vmInstanceUuid", '=', vm_inv.uuid) cond = res_ops.gen_query_conditions("type", '=', 'Data', cond) volume_uuid = res_ops.query_resource(res_ops.VOLUME, cond)[0].uuid test_lib.lib_mkfs_for_volume(volume_uuid, vm_inv) path = '/mnt' user_name = 'root' user_password = '******' os.system("sshpass -p '%s' ssh %s@%s 'mount /dev/vdb1 %s'"%(user_password, user_name, vm_inv.vmNics[0].ip, path)) vm.check() test_stub.make_ssh_no_password(vm_inv) test_stub.install_fio(vm_inv) if vm_ops.get_vm_disk_qos(test_lib.lib_get_data_volumes(vm_inv)[0].uuid).volumeBandwidthRead != -1 and \ vm_ops.get_vm_disk_qos(test_lib.lib_get_data_volumes(vm_inv)[0].uuid).volumeBandwidthWrite != write_bandwidth: test_util.test_fail('Retrieved disk qos not match') test_stub.test_fio_bandwidth(vm_inv, write_bandwidth, path) if test_stub.test_fio_bandwidth(vm_inv, write_bandwidth/2, '/dev/vdb', raise_exception=False): test_util.test_fail('disk read qos is not expected to have limit as only write qos was set') vol_ops.delete_disk_offering(volume_offering_uuid) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('VM data volume write QoS Test Pass')
def test(): global session_uuid global session_to global session_mc vm_num = os.environ.get('ZSTACK_TEST_NUM') if not vm_num: vm_num = 0 else: vm_num = int(vm_num) test_util.test_logger('ZSTACK_THREAD_THRESHOLD is %d' % thread_threshold) test_util.test_logger('ZSTACK_TEST_NUM is %d' % vm_num) org_num = vm_num vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') conditions = res_ops.gen_query_conditions('name', '=', l3_name) l3_uuid = res_ops.query_resource_with_num(res_ops.L3_NETWORK, conditions, \ session_uuid, start = 0, limit = 1)[0].uuid vm_creation_option.set_l3_uuids([l3_uuid]) conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) session_uuid = acc_ops.login_as_admin() #change account session timeout. session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid) session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid) vm_creation_option.set_session_uuid(session_uuid) vm = test_vm_header.ZstackTestVm() random_name = random.random() vm_name = 'multihost_basic_vm_%s' % str(random_name) vm_creation_option.set_name(vm_name) while vm_num > 0: check_thread_exception() vm.set_creation_option(vm_creation_option) vm_num -= 1 thread = threading.Thread(target=create_vm, args=(vm,)) while threading.active_count() > thread_threshold: time.sleep(1) thread.start() while threading.active_count() > 1: time.sleep(0.01) cond = res_ops.gen_query_conditions('name', '=', vm_name) vms = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid) con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid) con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid) acc_ops.logout(session_uuid) if vms == org_num: test_util.test_pass('Create %d VMs Test Success' % org_num) else: test_util.test_fail('Create %d VMs Test Failed. Only find %d VMs.' % (org_num, vms))
def test(): h1_name = os.environ.get("hostName") cond = res_ops.gen_query_conditions('name', '=', h1_name) h1 = res_ops.query_resource(res_ops.HOST, cond) ag1 = ag_ops.create_affinity_group(name="ag1", policy="antiHard") vm1 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid, host_uuid=h1[0].uuid) assert vm1.get_vm().hostUuid == h1[0].uuid test_obj_dict.add_vm(vm1) h2_name = os.environ.get("hostName2") cond = res_ops.gen_query_conditions('name', '=', h2_name) h2 = res_ops.query_resource(res_ops.HOST, cond) vm2 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid, host_uuid=h2[0].uuid) assert vm2.get_vm().hostUuid == h2[0].uuid test_obj_dict.add_vm(vm2) try: vm1.migrate(vm2.get_vm().hostUuid) except: test_util.test_logger("vm1 is not expected to migrate to host2 [uuid: %s]" % vm2.get_vm().hostUuid) h3_name = os.environ.get("hostName3") cond = res_ops.gen_query_conditions('name', '=', h3_name) h3 = res_ops.query_resource(res_ops.HOST, cond) vm1.migrate(h3[0].uuid) vm1.migrate(h1[0].uuid) test_lib.lib_error_cleanup(test_obj_dict) ag_ops.delete_affinity_group(ag1.uuid) test_util.test_pass("Affinity Group antiHard policy pass")
def test(): global price #1.create root volume price with systemtags bill_rootvolume = test_lib.RootVolumeBilling() time_unit = random.choice(time_unit_dict) price = str(random.randint(0,9999)) resource_unit = random.choice(resource_unit_dict.keys()) bill_rootvolume.set_timeUnit(time_unit) bill_rootvolume.set_price(price) bill_rootvolume.set_resourceUnit(resource_unit) bill_rootvolume.set_price_system_tags(price_system_tags) test_util.test_logger("create root volume price with systemtags") price = bill_rootvolume.create_resource_type() if not price: test_util.test_fail("fail: create rootvolume price") else: test_util.test_logger("success: create rootvolume price %s" % price.uuid) #2.query root volume price cond = res_ops.gen_query_conditions('uuid', '=', price.uuid) price_query = bill_ops.query_resource_price(cond)[0] if price_query.uuid == price.uuid: test_util.test_logger("success: query rootvolume price %s" % price_query.uuid) else: test_util.test_fail("fail: query rootvolume price %s" % price.uuid) #3.delete root volume price delete_result = bill_ops.delete_resource_price(price.uuid) cond = res_ops.gen_query_conditions('uuid', '=', price.uuid) #delete check,because delete API always return success delete_check = bill_ops.query_resource_price(cond) if delete_check: test_util.test_fail("fail: delete rootvolume price %s" % price.uuid) else: test_util.test_logger("success: delete rootvolume price %s" % price_query.uuid)
def update(self, update_utility=False): if not self.utility_vm or update_utility: cond = res_ops.gen_query_conditions('name', '=', "utility_vm_for_robot_test") cond = res_ops.gen_query_conditions('state', '=', "Running", cond) vms = res_ops.query_resource(res_ops.VM_INSTANCE, cond) for vm in vms: if self.get_target_volume().get_volume().primaryStorageUuid == vm.allVolumes[0].primaryStorageUuid: import zstackwoodpecker.zstack_test.zstack_test_vm as zstack_vm_header utility_vm_uuid = vm.uuid utility_vm = zstack_vm_header.ZstackTestVm() utility_vm.create_from(utility_vm_uuid) self.utility_vm = utility_vm for snapshot in self.snapshot_list: snapshot.update() snapshot.set_utility_vm(self.utility_vm) test_util.test_logger("children for snapshot: %s" % snapshot) test_util.test_logger(snapshot.get_children_tree_list()) new_snap = self.find_new_auto_create_snapshot() if new_snap: self.add_snapshot(new_snap.uuid) test_util.test_logger(self.snapshot_list)
def Create(vm_name_prefix): global session_uuid global session_to global session_mc session_uuid = None session_to = None session_mc = None vm_num = os.environ.get('ZSTACK_TEST_NUM') if not vm_num: vm_num = 1000 else: vm_num = int(vm_num) test_util.test_logger('ZSTACK_THREAD_THRESHOLD is %d' % thread_threshold) test_util.test_logger('ZSTACK_TEST_NUM is %d' % vm_num) vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid cond = res_ops.gen_query_conditions('category', '=', 'Private') l3net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, cond, session_uuid)[0].uuid l3s = test_lib.lib_get_l3s() conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) #change account session timeout. session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid) session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid) session_uuid = acc_ops.login_as_admin() vm_creation_option.set_session_uuid(session_uuid) vm = test_vm_header.ZstackTestVm() vm_creation_option.set_l3_uuids([l3net_uuid]) while vm_num > 0: check_thread_exception() vm_name = '%s_%s' % (vm_name_prefix, str(vm_num)) vm_creation_option.set_name(vm_name) vm.set_creation_option(vm_creation_option) vm_num -= 1 thread = threading.Thread(target=create_vm, args=(vm,)) while threading.active_count() > thread_threshold: time.sleep(1) thread.start() while threading.active_count() > 1: time.sleep(0.05) cond = res_ops.gen_query_conditions('name', '=', vm_name) vms = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid) con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid) con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid) acc_ops.logout(session_uuid)
def test(): global test_obj_dict, VM_RUNGGING_OPS, VM_STOPPED_OPS, VM_STATE_OPS, backup ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0] if ps.type == inventory.LOCAL_STORAGE_TYPE: VM_RUNGGING_OPS.append("VM_TEST_MIGRATE") VM_STOPPED_OPS.append("VM_TEST_MIGRATE") else: VM_RUNGGING_OPS.append("VM_TEST_MIGRATE") vm_name = "test_vm" cond = res_ops.gen_query_conditions("system", '=', "false") cond = res_ops.gen_query_conditions("mediaType", '=', "RootVolumeTemplate", cond) cond = res_ops.gen_query_conditions("platform", '=', "Linux", cond) img_name = res_ops.query_resource(res_ops.IMAGE, cond)[0].name cond = res_ops.gen_query_conditions("category", '=', "Private") l3_name = res_ops.query_resource(res_ops.L3_NETWORK,cond)[0].name vm = test_stub.create_vm(vm_name, img_name, l3_name) i = 0 while True: i += 1 if i == 10: vm_op_test(vm, "VM_TEST_STOP") vm_op_test(vm, "VM_TEST_RESET") vm.start() time.sleep(60) vm.check() i = 0 vm_op_test(vm, random.choice(VM_STATE_OPS)) VM_OPS = VM_STATE_OPS if vm.state == "Running": VM_OPS = VM_RUNGGING_OPS if not backup_list: VM_OPS.remove("VM_TEST_BACKUP_IMAGE") elif vm.state == "Stopped": VM_OPS = VM_STOPPED_OPS if not backup_list: VM_OPS.remove("VM_TEST_REVERT_BACKUP") VM_OPS.remove("VM_TEST_BACKUP_IMAGE") vm_op_test(vm, random.choice(VM_OPS)) if vm.state == "Stopped": vm.start() if test_lib.lib_is_vm_l3_has_vr(vm.vm): test_lib.TestHarness = test_lib.TestHarnessVR time.sleep(60) cmd = "echo 111 > /root/" + str(int(time.time())) test_lib.lib_execute_command_in_vm(vm.vm,cmd) vm.suspend() # create_snapshot/backup vm_op_test(vm, "VM_TEST_BACKUP") # compare vm & image created by backup compare(ps, vm, backup) vm.resume()
def create_vm_with_previous_iso(vm_creation_option = None, session_uuid = None): cond = res_ops.gen_query_conditions('name', '=', 'iso') image_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid root_disk_uuid = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')).uuid return create_vm(vm_creation_option, None, root_disk_uuid, image_uuid, \ session_uuid = session_uuid)
def test(): from vncdotool import api global vm session_uuid = None instance_offering_uuid = test_lib.lib_get_instance_offering_by_name( os.environ.get('instanceOfferingName_s')).uuid cond = res_ops.gen_query_conditions('mediaType', '!=', 'ISO') cond = res_ops.gen_query_conditions('platform', '=', 'Linux', cond) image_uuid = res_ops.query_resource(res_ops.IMAGE, cond, session_uuid)[0].uuid l3net_uuid = res_ops.get_resource(res_ops.L3_NETWORK, session_uuid)[0].uuid vm_creation_option = test_util.VmOption() vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_l3_uuids([l3net_uuid]) vm_creation_option.set_console_password(password1) vm = test_vm.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() vm.check() console = test_lib.lib_get_vm_console_address(vm.get_vm().uuid) test_util.test_logger('[vm:] %s console is on %s:%s' % (vm.get_vm().uuid, console.hostIp, console.port)) display = str(int(console.port) - 5900) try: client = api.connect(console.hostIp + ":" + display) client.keyPress('k') test_util.test_fail( '[vm:] %s console on %s:%s is connectable without password' % (vm.get_vm().uuid, console.hostIp, console.port)) except: test_util.test_logger( '[vm:] %s console on %s:%s is not connectable without password' % (vm.get_vm().uuid, console.hostIp, console.port)) try: client = api.connect(console.hostIp + ":" + display, password1) client.keyPress('k') test_util.test_logger( '[vm:] %s console on %s:%s is connectable with password %s' % (vm.get_vm().uuid, console.hostIp, console.port, password1)) except: test_util.test_fail( '[vm:] %s console on %s:%s is not connectable with password %s' % (vm.get_vm().uuid, console.hostIp, console.port, password1)) test_lib.lib_set_vm_console_password(vm.get_vm().uuid, password2) test_util.test_logger('set [vm:] %s console with password %s' % (vm.get_vm().uuid, password2)) vm.reboot() try: client = api.connect(console.hostIp + ":" + display, password2) client.keyPress('k') test_util.test_logger( '[vm:] %s console on %s:%s is connectable with password %s' % (vm.get_vm().uuid, console.hostIp, console.port, password2)) except: test_util.test_fail( '[vm:] %s console on %s:%s is not connectable with password %s' % (vm.get_vm().uuid, console.hostIp, console.port, password2)) import signal def handler(signum, frame): raise Exception() signal.signal(signal.SIGALRM, handler) signal.alarm(30) try: client = api.connect(console.hostIp + ":" + display, password1) client.keyPress('k') test_util.test_fail( '[vm:] %s console on %s:%s is connectable with password %s' % (vm.get_vm().uuid, console.hostIp, console.port, password1)) except: test_util.test_logger( '[vm:] %s console on %s:%s is not connectable with password %s' % (vm.get_vm().uuid, console.hostIp, console.port, password1)) vm.destroy() test_util.test_pass('Set VM Console Password Test Success')
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout must_ps_list = [inventory.LOCAL_STORAGE_TYPE, 'SharedMountPoint'] test_lib.skip_test_if_any_ps_not_deployed(must_ps_list) if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid test_lib.clean_up_all_vr() #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) #vr_host_ips = [] #for vr in vrs: # vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp) # if test_lib.lib_is_vm_running(vr) != True: # vm_ops.start_vm(vr.uuid) #time.sleep(60) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions) #for vr_host_ip in vr_host_ips: # conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('ls_vm_none_status') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() vr_hosts = test_stub.get_host_has_vr() mn_hosts = test_stub.get_host_has_mn() nfs_hosts = test_stub.get_host_has_nfs() if not test_stub.ensure_vm_not_on(vm.get_vm().uuid, vm.get_vm().hostUuid, vr_hosts+mn_hosts+nfs_hosts): test_util.test_fail("Not find out a suitable host") #vm.check() host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp test_util.test_logger("host %s is disconnecting" %(host_ip)) test_stub.down_host_network(host_ip, test_lib.all_scenario_config) cond = res_ops.gen_query_conditions('name', '=', 'ls_vm_none_status') cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid, cond) for i in range(0, 300): vm_stop_time = i if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Unknown": test_stub.up_host_network(host_ip, test_lib.all_scenario_config) time.sleep(1) test_stub.recover_smp_nfs_server(host_ip) conditions = res_ops.gen_query_conditions('managementIp', '=', host_ip) kvm_host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid host_ops.reconnect_host(kvm_host_uuid) break time.sleep(1) if vm_stop_time is None: vm_stop_time = 300 for i in range(vm_stop_time, 300): if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Running": break time.sleep(1) else: test_util.test_fail("vm has not been changed to running as expected within 300s.") vm.destroy() test_util.test_pass('Test VM none change to Stopped within 300s Success')
def query_snapshot_number(snapshot_name): cond = res_ops.gen_query_conditions('name', '=', snapshot_name) return res_ops.query_resource_count(res_ops.VOLUME_SNAPSHOT, cond)
def test(): get_vm_con = res_ops.gen_query_conditions('type', '=', "UserVm") snapshotvms = Snapshot_VM_Simple_Scheduler_Parall(get_vm_con, "UserVm") snapshotvms.parall_test_run() snapshotvms.check_operation_result()
def test(): vm1 = test_stub.create_vr_vm('maintain_host_vm1', 'imageName_s', 'l3VlanNetwork2') test_obj_dict.add_vm(vm1) vm2 = test_stub.create_vr_vm('maintain_host_vm2', 'imageName_s', 'l3VlanNetwork2') test_obj_dict.add_vm(vm2) vm1.check() vm2.check() if not test_lib.lib_check_vm_live_migration_cap( vm1.vm) or not test_lib.lib_check_vm_live_migration_cap(vm2.vm): test_util.test_skip('skip migrate if live migrate not supported') current_host1 = test_lib.lib_get_vm_host(vm1.vm) current_host2 = test_lib.lib_get_vm_host(vm2.vm) conditions = res_ops.gen_query_conditions('clusterUuid', '=', vm1.vm.clusterUuid) conditions = res_ops.gen_query_conditions('state', '=', host_header.ENABLED, conditions) conditions = res_ops.gen_query_conditions('status', '=', host_header.CONNECTED, conditions) all_hosts = res_ops.query_resource(res_ops.HOST, conditions) if len(all_hosts) <= 1: test_util.test_fail( 'Not available host to do maintenance, since there is only %s host' % len(all_hosts)) vr = test_lib.lib_get_all_vrs() if len(vr) == 0: test_util.test_skip('Skip test if not using vr') vr_uuid = vr[0].uuid vr_host_uuid = test_lib.lib_get_vm_host(vr[0]).uuid for host_n in all_hosts: print 'host_n%s' % (host_n.uuid) if host_n.uuid != current_host1.uuid: if host_n.uuid != current_host2.uuid: if host_n.uuid != vr_host_uuid: target_host = host_n print 'target_host_uuid%s' % (target_host.uuid) vm1.migrate(target_host.uuid) vm2.migrate(target_host.uuid) break else: test_util.test_skip('can not find a host to migrate two host') new_host = test_lib.lib_get_vm_host(vm1.vm) if new_host.uuid != target_host.uuid: test_util.test_fail( 'VM did not migrate to target [host:] %s, but to [host:] %s' % (target_host.uuid, new_host.uuid)) new_host1 = test_lib.lib_get_vm_host(vm2.vm) if new_host1.uuid != target_host.uuid: test_util.test_fail( 'VM did not migrate to target [host:] %s, but to [host:] %s' % (target_host.uuid, new_host1.uuid)) host = test_kvm_host.ZstackTestKvmHost() host.set_host(target_host) host.maintain() #need to update vm's inventory, since they will be changed by maintenace mode vm1.update() vm2.update() ps = test_lib.lib_get_primary_storage_by_vm(vm1.get_vm()) if ps.type == inventory.LOCAL_STORAGE_TYPE: vm1.set_state(vm_header.STOPPED) vm2.set_state(vm_header.STOPPED) vm1.check() vm2.check() host.change_state(test_kvm_host.ENABLE_EVENT) if not linux.wait_callback_success(is_host_connected, host.get_host().uuid, 120): test_util.test_fail( 'host status is not changed to connected or host state is not changed to Enabled within 120s' ) if ps.type == inventory.LOCAL_STORAGE_TYPE: vm1.start() vm2.start() vm1.set_state(vm_header.RUNNING) vm2.set_state(vm_header.RUNNING) vm1.check() vm2.check() post_host1 = test_lib.lib_get_vm_host(vm1.vm) post_host2 = test_lib.lib_get_vm_host(vm2.vm) if post_host1.uuid != current_host1.uuid: vm1.migrate(current_host1.uuid) if post_host2.uuid != current_host2.uuid: vm2.migrate(current_host2.uuid) vm1.check() vm2.check() vm1.destroy() test_obj_dict.rm_vm(vm1) vm2.destroy() test_obj_dict.rm_vm(vm2) test_util.test_pass('Maintain Host Test Success')
def test(): global vm global host_uuid global test_host global host_ip global max_attempts global storagechecker_timeout must_ps_list = [inventory.LOCAL_STORAGE_TYPE, inventory.NFS_PRIMARY_STORAGE_TYPE] test_lib.skip_test_if_any_ps_not_deployed(must_ps_list) if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid test_lib.clean_up_all_vr() mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('ls_vm_ha_self_start') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() vr_hosts = test_stub.get_host_has_vr() mn_hosts = test_stub.get_host_has_mn() nfs_hosts = test_stub.get_host_has_nfs() if not test_stub.ensure_vm_not_on(vm.get_vm().uuid, vm.get_vm().hostUuid, vr_hosts+mn_hosts+nfs_hosts): test_util.test_fail("Not find out a suitable host") #vm.check() host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp test_util.test_logger("host %s is disconnecting" %(host_ip)) ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") host_list = test_stub.get_sce_hosts(test_lib.all_scenario_config, test_lib.scenario_file) for host in host_list: if host.ip_ == host_ip: test_host = host break if not test_host: test_util.test_fail('there is no host with ip %s in scenario file.' %(host_ip)) test_stub.stop_host(test_host, test_lib.all_scenario_config, 'cold') vm_stop_time = None cond = res_ops.gen_query_conditions('name', '=', 'ls_vm_ha_self_start') cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid, cond) for i in range(0, 300): vm_stop_time = i if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Stopped": test_stub.start_host(test_host, test_lib.all_scenario_config) test_stub.recover_host_vlan(test_host, test_lib.all_scenario_config, test_lib.deploy_config) conditions = res_ops.gen_query_conditions('managementIp', '=', host_ip) kvm_host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid host_ops.reconnect_host(kvm_host_uuid) break time.sleep(1) if vm_stop_time is None: vm_stop_time = 300 for i in range(vm_stop_time, 300): if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Starting": break time.sleep(1) else: test_util.test_fail("vm has not been changed to running as expected within 300s.") vm.destroy() test_util.test_pass('Test checking VM ha and none status when force stop vm Success.')
def test(): global original_rate test_util.test_dsc('Test change storage over provision rate method') zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid cond = res_ops.gen_query_conditions('state', '=', 'Enabled') cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond) host = res_ops.query_resource_with_num(res_ops.HOST, cond, limit=1) if not host: test_util.test_skip('No Enabled/Connected host was found, skip test.') return True host = host[0] over_provision_rate1 = 2.5 over_provision_rate2 = 1.5 target_volume_num = 12 kept_disk_size = 10 * 1024 * 1024 vm = test_stub.create_vm(vm_name = 'storage_over_prs_vm_1', \ host_uuid = host.uuid) test_obj_dict.add_vm(vm) host_res = test_lib.lib_get_cpu_memory_capacity(host_uuids=[host.uuid]) ps_res = test_lib.lib_get_storage_capacity(zone_uuids=[zone_uuid]) avail_cap = ps_res.availableCapacity if avail_cap < kept_disk_size: test_util.test_skip( 'available disk capacity:%d is too small, skip test.' % avail_cap) return True original_rate = test_lib.lib_set_provision_storage_rate( over_provision_rate1) data_volume_size = int(over_provision_rate1 * (avail_cap - kept_disk_size) / target_volume_num) disk_offering_option = test_util.DiskOfferingOption() disk_offering_option.set_name('storage-over-ps-test') disk_offering_option.set_diskSize(data_volume_size) data_volume_offering = vol_ops.create_volume_offering(disk_offering_option) test_obj_dict.add_disk_offering(data_volume_offering) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(data_volume_offering.uuid) volume_creation_option.set_name('volume-1') volume1 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume1) #res = test_lib.lib_get_storage_capacity(zone_uuids = [zone_uuid]) #test_util.test_logger('Current available storage size: %d' % res.availableCapacity) volume1.attach(vm) test_lib.lib_set_provision_storage_rate(over_provision_rate2) volume_creation_option.set_name('volume-2') volume2 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume2) #res = test_lib.lib_get_storage_capacity(zone_uuids = [zone_uuid]) #test_util.test_logger('Current available storage size: %d' % res.availableCapacity) volume2.attach(vm) volume1.delete() test_lib.lib_set_provision_storage_rate(over_provision_rate1) volume2.delete() test_lib.lib_set_provision_storage_rate(original_rate) ps_res2 = test_lib.lib_get_storage_capacity(zone_uuids=[zone_uuid]) avail_cap2 = ps_res2.availableCapacity if avail_cap2 != avail_cap: test_util.test_fail( 'Available disk size: %d is different with original size: %d, after creating volume under different over rate.' % (avail_cap2, avail_cap)) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Memory Over Provision Test Pass')
def is_host_connected(host_uuid): cond = res_ops.gen_query_conditions('uuid', '=', host_uuid) host = res_ops.query_resource(res_ops.HOST, cond)[0] if host.status == 'Connected' and host.state == 'Enabled': return True
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout allow_ps_list = [inventory.LOCAL_STORAGE_TYPE] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid test_lib.clean_up_all_vr() #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) #vr_host_ips = [] #for vr in vrs: # vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp) # if test_lib.lib_is_vm_running(vr) != True: # vm_ops.start_vm(vr.uuid) #time.sleep(60) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions) #for vr_host_ip in vr_host_ips: # conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('ls_vm_ha_self_start') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() test_stub.ensure_host_has_no_vr(host_uuid) #vm.check() host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp test_util.test_logger("host %s is disconnecting" %(host_ip)) ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") test_stub.down_host_network(host_ip, test_lib.all_scenario_config) #test_util.test_logger("wait for 180 seconds") #time.sleep(180) vm_stop_time = None cond = res_ops.gen_query_conditions('name', '=', 'ls_vm_ha_self_start') cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid, cond) for i in range(0, 180): vm_stop_time = i if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Unknown": test_stub.up_host_network(host_ip, test_lib.all_scenario_config) break time.sleep(1) if vm_stop_time is None: vm_stop_time = 180 for i in range(vm_stop_time, 180): if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Running": break time.sleep(1) else: test_util.test_fail("vm has not been changed to running as expected within 180s.") vm.destroy() test_util.test_pass('Test checking VM ha self-start after host is disconnect and recover Success')
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") max_attempts = test_lib.lib_get_ha_selffencer_maxattempts() test_lib.lib_set_ha_selffencer_maxattempts('12') storagechecker_timeout = test_lib.lib_get_ha_selffencer_storagechecker_timeout( ) test_lib.lib_set_ha_selffencer_storagechecker_timeout('15') vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #l3_name = os.environ.get('l3NoVlanNetworkName1') l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) for vr in vrs: if test_lib.lib_is_vm_running(vr) == True: vm_ops.start_vm(vr.uuid) time.sleep(60) conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', os.environ.get('hostIp'), conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() #vm.check() host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "OnHostFailure") l2_network_interface = os.environ.get('l2ManagementNetworkInterface') cmd = "ifdown %s && sleep 30 && ifup %s" % (l2_network_interface, l2_network_interface) try: rsp = test_lib.lib_execute_sh_cmd_by_agent(host_ip, cmd) test_util.test_logger( "host is not expected to shutdown after its network down just for a little while" ) except: test_util.test_fail( "host may have been shutdown, while it's not expected to shutdown") cmd = "date" try: rsp = test_lib.lib_execute_sh_cmd_by_agent(host_ip, cmd) test_util.test_logger("host is still alive") except: test_util.test_fail( "host is not expected to shutdown after its network down just for a little while" ) vm.destroy() test_lib.lib_set_ha_selffencer_maxattempts(max_attempts) test_lib.lib_set_ha_selffencer_storagechecker_timeout( storagechecker_timeout) time.sleep(60) test_util.test_pass('Test Host Self fence Success')
def test(): global mevoco1_ip global mevoco2_ip global ipsec1 global ipsec2 mevoco1_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] mevoco2_ip = os.environ['secondZStackMnIp'] test_util.test_dsc('Create test vm in mevoco1') vm1 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict1.add_vm(vm1) vm1.check() pri_l3_uuid1 = vm1.vm.vmNics[0].l3NetworkUuid vr1 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid1)[0] l3_uuid1 = test_lib.lib_find_vr_pub_nic(vr1).l3NetworkUuid vip1 = test_stub.create_vip('ipsec1_vip', l3_uuid1) cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid1) first_zstack_cidrs = res_ops.query_resource( res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_util.test_dsc('Create test vm in mevoco2') vm2 = test_stub.create_vlan_vm(os.environ.get('l3VlanDNATNetworkName')) test_obj_dict2.add_vm(vm2) vm2.check() pri_l3_uuid2 = vm2.vm.vmNics[0].l3NetworkUuid vr2 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid2)[0] l3_uuid2 = test_lib.lib_find_vr_pub_nic(vr2).l3NetworkUuid vip2 = test_stub.create_vip('ipsec2_vip', l3_uuid2) cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid2) second_zstack_cidrs = res_ops.query_resource( res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_util.test_dsc('Create ipsec in mevoco1') ipsec1 = ipsec_ops.create_ipsec_connection('ipsec1', pri_l3_uuid1, vip2.get_vip().ip, '123456', vip1.get_vip().uuid, [second_zstack_cidrs]) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_util.test_dsc('Create ipsec in mevoco2') ipsec2 = ipsec_ops.create_ipsec_connection('ipsec2', pri_l3_uuid2, vip1.get_vip().ip, '123456', vip2.get_vip().uuid, [first_zstack_cidrs]) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip if not test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip): test_util.test_fail( 'vm in mevoco1[MN:%s] could not connect to vm in mevoco2[MN:%s]' % (mevoco1_ip, mevoco2_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip if not test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip): test_util.test_fail( 'vm in mevoco1[MN:%s] could not connect to vm in mevoco2[MN:%s]' % (mevoco2_ip, mevoco1_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip ipsec_ops.delete_ipsec_connection(ipsec1.uuid) if test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip, no_exception=True): test_util.test_fail( 'vm in mevoco1[MN:%s] could still connect to vm in mevoco2[MN:%s] after Ipsec is deleted' % (mevoco1_ip, mevoco2_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip if test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip, no_exception=True): test_util.test_fail( 'vm in mevoco2[MN:%s] could still connect to vm in mevoco1[MN:%s] after Ipsec is deleted' % (mevoco2_ip, mevoco1_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_lib.lib_error_cleanup(test_obj_dict1) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip ipsec_ops.delete_ipsec_connection(ipsec2.uuid) test_lib.lib_error_cleanup(test_obj_dict2) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_util.test_pass('Create Ipsec Success')
def test(): bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") image_option = test_util.ImageOption() image_option.set_name('test_image_cache_cleanup') image_option.set_format('qcow2') image_option.set_mediaType('RootVolumeTemplate') image_option.set_url(os.environ.get('imageUrl_s')) image_option.set_backup_storage_uuid_list([bss[0].uuid]) new_image = zstack_image_header.ZstackTestImage() new_image.set_creation_option(image_option) new_image.add_root_volume_template() l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option = test_util.VmOption() vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(new_image.image.uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('test_image_cache_cleanup_vm1') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() test_obj_dict.add_vm(vm) vm.check() host = test_lib.lib_find_host_by_vm(vm.get_vm()) ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) vm.destroy() vm.expunge() conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('uuid', '!=', host.uuid, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_name('test_image_cache_cleanup_vm2') vm2 = test_vm_header.ZstackTestVm() vm2.set_creation_option(vm_creation_option) vm2.create() host2 = test_lib.lib_find_host_by_vm(vm2.get_vm()) test_obj_dict.add_vm(vm2) new_image.delete() new_image.expunge() if ps.type == 'SharedMountPoint': test_util.test_skip( 'CleanUpImageCacheOnPrimaryStorage not supported on SMP storage, skip test.' ) elif ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE: test_util.test_skip( 'ceph is not directly using image cache, skip test.') ps_ops.cleanup_imagecache_on_primary_storage(ps.uuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: image_cache_path = "%s/imagecache/template/%s/%s.qcow2" % ( ps.mountPath, new_image.image.uuid, new_image.image.uuid) if test_lib.lib_check_file_exist(host, image_cache_path): test_util.test_fail('image cache is expected to be deleted') vm2.destroy() vm2.expunge() ps_ops.cleanup_imagecache_on_primary_storage(ps.uuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: image_cache_path = "%s/imagecache/template/%s" % (ps.mountPath, new_image.image.uuid) if test_lib.lib_check_file_exist(host2, image_cache_path): test_util.test_fail('image cache is expected to be deleted') elif ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE: image_cache_path = "%s/imagecache/template/%s" % (ps.mountPath, new_image.image.uuid) if test_lib.lib_check_file_exist(host2, image_cache_path): test_util.test_fail('image cache is expected to be deleted') # elif ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE: # elif ps.type == 'SharedMountPoint': test_util.test_pass('imagecache cleanup Pass.')
def test(): zstack_pri_name = os.environ['l3VlanDNATNetworkName'] zstack_image = os.environ['imageName_net'] zstack_vr_name = os.environ['virtualRouterOfferingName_s'] cond = res_ops.gen_query_conditions('name', '=', zstack_vr_name) zstack_vr_instance = res_ops.query_resource(res_ops.INSTANCE_OFFERING, cond)[0] cond = res_ops.gen_query_conditions('name', '=', zstack_pri_name) zstack_pri = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0] zstack_pri_uuid = zstack_pri.uuid zstack_tag = "guestL3Network::" + zstack_pri_uuid tag_ops.create_system_tag("InstanceOfferingVO", zstack_vr_instance.uuid, zstack_tag) vcenter_pri_name = os.environ['l3vCenterNoVlanNetworkName'] vcenter_image = os.environ['image_dhcp_name'] vcenter_vr_name = os.environ['vCenterVirtualRouterOfferingName'] cond = res_ops.gen_query_conditions('name', '=', vcenter_vr_name) vcenter_vr_instance = res_ops.query_resource(res_ops.INSTANCE_OFFERING, cond)[0] cond = res_ops.gen_query_conditions('name', '=', vcenter_pri_name) vcenter_pri = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0] vcenter_pri_uuid = vcenter_pri.uuid vcenter_tag = "guestL3Network::" + vcenter_pri_uuid tag_ops.create_system_tag("InstanceOfferingVO", vcenter_vr_instance.uuid, vcenter_tag) test_util.test_dsc('Create test vm') vm1 = test_stub.create_vm(vm_name='test_ipsec_1', image_name = zstack_image, l3_name=zstack_pri_name) test_obj_dict.add_vm(vm1) vm2 = test_stub.create_vm_in_vcenter(vm_name='test_ipsec_2', image_name = vcenter_image, l3_name=vcenter_pri_name) test_obj_dict.add_vm(vm2) time.sleep(50) test_util.test_dsc('Create 2 vip with 2 snat ip') pri_l3_uuid1 = vm1.vm.vmNics[0].l3NetworkUuid vr1 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid1)[0] l3_uuid1 = test_lib.lib_find_vr_pub_nic(vr1).l3NetworkUuid vr1_pub_ip = test_lib.lib_find_vr_pub_ip(vr1) vip1 = zstack_vip_header.ZstackTestVip() vip1.get_snat_ip_as_vip(vr1_pub_ip) vip1.isVcenter = True test_obj_dict.add_vip(vip1) pri_l3_uuid2 = vm2.vm.vmNics[0].l3NetworkUuid vr2 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid2)[0] l3_uuid2 = test_lib.lib_find_vr_pub_nic(vr2).l3NetworkUuid vr2_pub_ip = test_lib.lib_find_vr_pub_ip(vr2) vip2 = zstack_vip_header.ZstackTestVip() vip2.get_snat_ip_as_vip(vr2_pub_ip) vip2.isVcenter = True test_obj_dict.add_vip(vip2) test_util.test_dsc('Create ipsec with 2 vip') zstack_pri_cidr = zstack_pri.ipRanges[0].networkCidr vcenter_pri_cidr = vcenter_pri.ipRanges[0].networkCidr ipsec1 = ipsec_ops.create_ipsec_connection('zstack_vcenter', pri_l3_uuid1, vip2.get_vip().ip, '123456', vip1.get_vip().uuid, [vcenter_pri_cidr]) ipsec2 = ipsec_ops.create_ipsec_connection('vcenter_zstack', pri_l3_uuid2, vip1.get_vip().ip, '123456', vip2.get_vip().uuid, [zstack_pri_cidr]) #conditions = res_ops.gen_query_conditions('name', '=', 'test_ipsec_1') #vm1 = res_ops.query_resource(res_ops.VM_INSTANCE, conditions)[0] #conditions = res_ops.gen_query_conditions('name', '=', 'test_ipsec_2') #vm2 = res_ops.query_resource(res_ops.VM_INSTANCE, conditions)[0] if not test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip): test_util.test_fail('vm1 in zstack could not connect to vm2 in vcenter with IPsec') if not test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip): test_util.test_fail('vm2 in vcenter could not connect to vm1 in zstack with IPsec') ipsec_ops.delete_ipsec_connection(ipsec1.uuid) ipsec_ops.delete_ipsec_connection(ipsec2.uuid) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Create Ipsec Success')
def test(): global vm1_inv global vm2_inv zstack_latest_version = os.environ.get('zstackLatestVersion') zstack_latest_path = os.environ.get('zstackLatestInstaller') iso_path = os.environ.get('iso_path') upgrade_script_path = os.environ.get('upgradeScript') test_util.test_dsc( 'Create 2 CentOS7 vm to test install management node installation') conditions = res_ops.gen_query_conditions( 'name', '=', os.environ.get('imageNameBase_21_ex')) image = res_ops.query_resource(res_ops.IMAGE, conditions)[0] vm1_inv = create_vm(image) vm2_inv = create_vm(image) vm1_ip = vm1_inv.vmNics[0].ip vm2_ip = vm2_inv.vmNics[0].ip time.sleep(60) test_stub.make_ssh_no_password(vm1_ip, tmp_file) test_stub.update_iso(vm1_ip, tmp_file, iso_path, upgrade_script_path) target_file = '/root/zstack-all-in-one.tgz' test_stub.prepare_test_env(vm1_inv, target_file) ssh_cmd1 = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm1_ip ssh_cmd2 = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm2_ip test_util.test_dsc('Install zstack 2.1.1 master mangement node on vm1') cmd = '%s "[ -e /usr/local/zstack ] && echo yes || echo no"' % ssh_cmd1 (process_result, cmd_stdout) = test_stub.execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('check /usr/local/zstack fail, cmd_stdout:%s' % cmd_stdout) cmd_stdout = cmd_stdout[:-1] if cmd_stdout == "yes": cmd = '%s "rm -rf /usr/local/zstack"' % ssh_cmd (process_result, cmd_stdout) = test_stub.execute_shell_in_process_stdout( cmd, tmp_file) if process_result != 0: test_util.test_fail('delete /usr/local/zstack fail') test_stub.execute_all_install(ssh_cmd1, target_file, tmp_file) #test_util.test_dsc('Upgrade the latest master zstack') #test_stub.upgrade_zstack(vm1_ip, zstack_latest_path, tmp_file) #test_stub.check_zstack_version(vm1_ip, tmp_file, zstack_latest_version) #test_stub.start_mn(vm1_ip, tmp_file) #test_stub.check_installation(vm1_ip, tmp_file) test_util.test_dsc('only Install one management node on vm2') host_list = 'root:password@%s ' % vm2_ip cmd = '%s "zstack-ctl install_management_node --host=%s"' % (ssh_cmd1, host_list) process_result = test_stub.execute_shell_in_process(cmd, tmp_file) test_util.test_dsc('Check installation on vm1') test_stub.check_installation(vm1_ip, tmp_file) test_util.test_dsc('Check installation on vm2') test_stub.make_ssh_no_password(vm2_ip, tmp_file) cmd = '%s "zstack-ctl start_node"' % ssh_cmd2 process_result = test_stub.execute_shell_in_process(cmd, tmp_file) test_stub.check_installation(vm1_ip, tmp_file) test_util.test_dsc('check installation stop_node one ') test_stub.stop_node(vm1_ip, tmp_file) test_stub.start_node(vm2_ip, tmp_file) test_stub.check_installation(vm2_ip, tmp_file) test_util.test_dsc('check installation stop_node the second one ') test_stub.start_node(vm1_ip, tmp_file) test_stub.stop_node(vm2_ip, tmp_file) test_stub.check_installation(vm1_ip, tmp_file) os.system('rm -f %s' % tmp_file) sce_ops.destroy_vm(zstack_management_ip, vm1_inv.uuid) sce_ops.destroy_vm(zstack_management_ip, vm2_inv.uuid) test_util.test_pass( 'ZStack install management nodes installation Test Success')
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout must_ps_list = [inventory.NFS_PRIMARY_STORAGE_TYPE] test_lib.skip_test_if_any_ps_not_deployed(must_ps_list) if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #l3_name = os.environ.get('l3NoVlanNetworkName1') l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid test_lib.clean_up_all_vr() #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) #vr_host_ips = [] #for vr in vrs: # vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp) # if test_lib.lib_is_vm_running(vr) != True: # vm_ops.start_vm(vr.uuid) #time.sleep(60) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions) #for vr_host_ip in vr_host_ips: # conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() vr_hosts = test_stub.get_host_has_vr() mn_hosts = test_stub.get_host_has_mn() nfs_hosts = test_stub.get_host_has_nfs() if not test_stub.ensure_vm_not_on(vm.get_vm().uuid, vm.get_vm().hostUuid, vr_hosts + mn_hosts + nfs_hosts): test_util.test_fail("Not find out a suitable host") host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid test_stub.ensure_all_vrs_on_host(host_uuid) #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) #target_host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid #for vr in vrs: # if test_lib.lib_find_host_by_vr(vr).managementIp != test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp: # vm_ops.migrate_vm(vr.uuid, target_host_uuid) #vm.check() host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp host_port = test_lib.lib_get_host_port(host_ip) test_util.test_logger("host %s is disconnecting" % (host_ip)) host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") test_util.test_logger("force stop host: %s" % (host_ip)) os.system('bash -ex %s %s' % (os.environ.get('hostForceStopScript'), host_ip)) test_util.test_logger("host is expected to shutdown for a while") test_util.test_logger("wait for 300 seconds") time.sleep(300) vm.update() if test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp == host_ip: test_util.test_fail("VM is expected to start running on another host") vm.set_state(vm_header.RUNNING) vm.check() if test_lib.lib_get_vm_last_host(vm.get_vm()).managementIp != host_ip: test_util.test_fail( "Migrated VM's last host is expected to be the last host[ip:%s]" % (host_ip)) vm.destroy() os.system('bash -ex %s %s' % (os.environ.get('hostRecoverScript'), host_ip)) host_ops.reconnect_host(host_uuid) test_util.test_pass('Test VM ha on host failure Success')
def test(): get_vm_con = res_ops.gen_query_conditions('state', '=', "Running") rebootvms = Reboot_VM_Parall(get_vm_con, "Running") rebootvms.parall_test_run()
def query_monitor_trigger_action(monitor_trigger_action_uuid): cond = res_ops.gen_query_conditions('uuid', '=', monitor_trigger_action_uuid) return res_ops.query_resource(res_ops.MONITOR_TRIGGER_ACTION, cond)
def test(): global my_sns_topic_uuid, email_endpoint_uuid, email_platform_uuid, event_template_uuid, \ alarm_template_uuid, alarm_uuid_list, test_dict smtp_server = 'smtp.zstack.io' pop_server = 'pop3.zstack.io' smtp_port = 25 username = '******' password = '******' email_platform_name = 'Alarm_email' email_platform = zwt_ops.create_sns_email_platform(smtp_server, smtp_port, email_platform_name, username, password) email_platform_uuid = email_platform.uuid try: zwt_ops.validate_sns_email_platform(email_platform_uuid) except: test_util.test_fail( 'Validate SNS Email Platform Failed, Email Plarform: %s' % email_platform_uuid) email_endpoint_uuid = zwt_ops.create_sns_email_endpoint( username, 'test_qa', email_platform_uuid).uuid my_sns_topic = zwt_ops.create_sns_topic('my_sns_topic') my_sns_topic_uuid = my_sns_topic.uuid zwt_ops.subscribe_sns_topic(my_sns_topic_uuid, email_endpoint_uuid) # create alarm namespace = 'ZStack/Image' greater_than_or_equal_to = 'GreaterThanOrEqualTo' greater_than = 'GreaterThan' actions = [{"actionUuid": my_sns_topic_uuid, "actionType": "sns"}] period = 10 threshold_1 = 1 threshold_3 = 3 threshold_10 = 10 threshold_50 = 50 total_image_count = 'TotalImageCount' total_image_count_alarm_uuid = zwt_ops.create_alarm( greater_than_or_equal_to, period, threshold_3, namespace, total_image_count, name='total-count-image', repeat_interval=600, actions=actions).uuid alarm_uuid_list.append(total_image_count_alarm_uuid) ready_image_count = 'ReadyImageCount' ready_image_count_alarm_uuid = zwt_ops.create_alarm( greater_than_or_equal_to, period, threshold_3, namespace, ready_image_count, name='ready_image_count', repeat_interval=600, actions=actions).uuid alarm_uuid_list.append(ready_image_count_alarm_uuid) ready_image_in_percent = 'ReadyImageInPercent' ready_image_in_percent_alarm_uuid = zwt_ops.create_alarm( greater_than_or_equal_to, period, threshold_1, namespace, ready_image_in_percent, name='ready_image_in_percent', repeat_interval=600, actions=actions).uuid alarm_uuid_list.append(ready_image_in_percent_alarm_uuid) root_volume_template_count = 'RootVolumeTemplateCount' root_volume_template_count_alarm_uuid = zwt_ops.create_alarm( greater_than_or_equal_to, period, threshold_3, namespace, root_volume_template_count, name='root_volume_template_count', repeat_interval=600, actions=actions, ).uuid alarm_uuid_list.append(root_volume_template_count_alarm_uuid) root_volume_template_in_percent = 'RootVolumeTemplateInPercent' root_volume_template_in_percent_alarm_uuid = zwt_ops.create_alarm( greater_than, period, threshold_1, namespace, root_volume_template_in_percent, name='root_volume_template_in_percent', repeat_interval=600, actions=actions).uuid alarm_uuid_list.append(root_volume_template_in_percent_alarm_uuid) data_volume_template_count = 'DataVolumeTemplateCount' data_volume_template_count_alarm_uuid = zwt_ops.create_alarm( greater_than_or_equal_to, period, threshold_3, namespace, data_volume_template_count, name='data_volume_template_count', repeat_interval=600, actions=actions).uuid alarm_uuid_list.append(data_volume_template_count_alarm_uuid) data_volume_template_in_percent = 'DataVolumeTemplateInPercent' data_volume_template_in_percent_alarm_uuid = zwt_ops.create_alarm( greater_than, period, threshold_1, namespace, data_volume_template_in_percent, name='data_volume_template_in_percent', repeat_interval=600, actions=actions).uuid alarm_uuid_list.append(data_volume_template_in_percent_alarm_uuid) iso_count = 'ISOCount' iso_count_alarm_uuid = zwt_ops.create_alarm(greater_than_or_equal_to, period, threshold_3, namespace, iso_count, name='iso_count', repeat_interval=600, actions=actions).uuid alarm_uuid_list.append(iso_count_alarm_uuid) iso_in_percent = 'ISOInPercent' iso_in_percent_alarm_uuid = zwt_ops.create_alarm(greater_than, period, threshold_1, namespace, iso_in_percent, name='iso_in_percent', repeat_interval=600, actions=actions).uuid alarm_uuid_list.append(iso_in_percent_alarm_uuid) # create Image image_name = os.environ.get('imageName_s') l3_name = os.environ.get('l3VlanNetworkName1') vm_name = 'multihost_basic_vm' vm = test_stub.create_vm(vm_name, image_name, l3_name) test_dict.add_vm(vm) volume = test_stub.create_volume() test_dict.add_volume(volume) volume.attach(vm) zone_uuid = vm.get_vm().zoneUuid root_volume_uuid = test_lib.lib_get_root_volume_uuid(vm.get_vm()) bs_uuid_list = test_lib.lib_get_backup_storage_uuid_list_by_zone(zone_uuid) image_option = test_util.ImageOption() image_option.set_root_volume_uuid(root_volume_uuid) image_option.set_format('qcow2') image_option.set_backup_storage_uuid_list(bs_uuid_list) # image_option.set_mediaType('ISO') for i in range(threshold_3): image_option.set_name('root_volume_template_for_test_' + str(i)) root_volume_template = zstack_image_header.ZstackTestImage() root_volume_template.set_creation_option(image_option) root_volume_template.create() test_dict.add_image(root_volume_template) iso = test_stub.add_test_minimal_iso("iso_for_test_" + str(i)) test_dict.add_image(iso) time.sleep(30) # before change template flag = zwt_ops.check_sns_email(pop_server, username, password, total_image_count, total_image_count_alarm_uuid) if flag != 1: test_util.test_fail('No send event email') flag = zwt_ops.check_sns_email(pop_server, username, password, ready_image_count, ready_image_count_alarm_uuid) if flag != 1: test_util.test_fail('No send event email') flag = zwt_ops.check_sns_email(pop_server, username, password, ready_image_in_percent, ready_image_in_percent_alarm_uuid) if flag != 1: test_util.test_fail('No send event email') flag = zwt_ops.check_sns_email(pop_server, username, password, root_volume_template_count, root_volume_template_count_alarm_uuid) if flag != 1: test_util.test_fail('No send event email') flag = zwt_ops.check_sns_email(pop_server, username, password, root_volume_template_in_percent, root_volume_template_in_percent_alarm_uuid) if flag != 1: test_util.test_fail('No send event email') flag = zwt_ops.check_sns_email(pop_server, username, password, iso_count, iso_count_alarm_uuid) if flag != 1: test_util.test_fail('No send event email') flag = zwt_ops.check_sns_email(pop_server, username, password, iso_in_percent, iso_in_percent_alarm_uuid) if flag != 1: test_util.test_fail('No send event email') alarm_keywords = 'TemplateForAlarmOn' if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords, total_image_count_alarm_uuid): test_util.test_fail('email already exsist before test') if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords, ready_image_count_alarm_uuid): test_util.test_fail('email already exsist before test') if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords, ready_image_in_percent_alarm_uuid): test_util.test_fail('email already exsist before test') if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords, root_volume_template_count_alarm_uuid): test_util.test_fail('email already exsist before test') if zwt_ops.check_keywords_in_email( pop_server, username, password, alarm_keywords, root_volume_template_in_percent_alarm_uuid): test_util.test_fail('email already exsist before test') if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords, data_volume_template_count_alarm_uuid): test_util.test_fail('email already exsist before test') if zwt_ops.check_keywords_in_email( pop_server, username, password, alarm_keywords, data_volume_template_in_percent_alarm_uuid): test_util.test_fail('email already exsist before test') if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords, iso_count_alarm_uuid): test_util.test_fail('email already exsist before test') if zwt_ops.check_keywords_in_email(pop_server, username, password, alarm_keywords, iso_in_percent_alarm_uuid): test_util.test_fail('email already exsist before test') application_platform_type = 'Email' alarm_template_name = 'my-alarm-template' alarm_template = '${ALARM_NAME} Change status to ${ALARM_CURRENT_STATUS}' \ 'ALARM_UUID:${ALARM_UUID}' \ 'keyword1:ThisWordIsKeyWord' \ 'keyword2:TemplateForAlarmOn' \ '(Using for template changes email check)' alarm_template_uuid = zwt_ops.create_sns_text_template( alarm_template_name, application_platform_type, alarm_template, default_template=False).uuid event_template_name = 'my-event-template' event_keywords = 'TemplateForEventHappened' event_template = '${EVENT_NAME} IN ${EVENT_NAMESPACE}' \ 'keyword1:ThisWordIsKeyWord' \ 'keyword2:TemplateForEventHappened' \ 'keyword3{PARAM_EVENT_SUBSCRIPTION_UUID}' \ '(Using for template changes email check)' event_template_uuid = zwt_ops.create_sns_text_template( event_template_name, application_platform_type, event_template, default_template=True).uuid # test update text template zwt_ops.update_sns_text_template(alarm_template_uuid, description='this is a new description', default_template=True) cond = res_ops.gen_query_conditions('uuid', '=', alarm_template_uuid) inv = res_ops.query_resource(res_ops.SNS_TEXT_TEMPLATE, cond)[0] if inv.defaultTemplate == False or inv.description != 'this is a new description': test_util.test_fail('change template fail') for i in range(threshold_3): data_volume_template = volume.create_template( bs_uuid_list, name="vol_temp_for_volume_test_" + str(i)) test_dict.add_image(data_volume_template) # wait for reboot and send email time.sleep(30) test_lib.lib_robot_cleanup(test_dict) zwt_ops.delete_sns_text_template(alarm_template_uuid) zwt_ops.delete_sns_text_template(event_template_uuid) for alarm_uuid in alarm_uuid_list: zwt_ops.delete_alarm(alarm_uuid) zwt_ops.delete_sns_topic(my_sns_topic_uuid) zwt_ops.delete_sns_application_endpoint(email_endpoint_uuid) zwt_ops.delete_sns_application_platform(email_platform_uuid) if zwt_ops.check_keywords_in_email( pop_server, username, password, alarm_keywords, data_volume_template_count_alarm_uuid ) and zwt_ops.check_keywords_in_email( pop_server, username, password, alarm_keywords, data_volume_template_in_percent_alarm_uuid): test_util.test_pass('success check all keywords in the email') else: test_util.test_fail('cannt check all mail')
def query_vmnic(ip): cond = res_ops.gen_query_conditions('ip','=',ip) return res_ops.query_resource(res_ops.VM_NIC, cond)
def test(): if test_lib.lib_get_active_host_number() < 2: test_util.test_fail('Not available host to do maintenance, since there are not 2 hosts') vm = test_stub.create_vm(vm_name = 'migrate_stopped_vm_with_snapshot') host_uuid = vm.get_vm().hostUuid root_volume_uuid = test_lib.lib_get_root_volume_uuid(vm.get_vm()) test_obj_dict.add_vm(vm) test_util.test_dsc('Create volume for snapshot testing') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_name('volume for snapshot testing') volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) #make sure utility vm is starting and running vm.check() volume.attach(vm) volume.detach() test_util.test_dsc('create snapshot and check') snapshots = test_obj_dict.get_volume_snapshot(volume.get_volume().uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshots.check() vm.stop() conditions = res_ops.gen_query_conditions('uuid', '!=', host_uuid) conditions = res_ops.gen_query_conditions('state', '=', host_header.ENABLED, conditions) conditions = res_ops.gen_query_conditions('status', '=', host_header.CONNECTED, conditions) rest_hosts = res_ops.query_resource(res_ops.HOST, conditions) target_host = random.choice(rest_hosts) test_util.test_dsc('migrate vm and volumes') vol_ops.migrate_volume(root_volume_uuid, target_host.uuid) vol_ops.migrate_volume(volume.get_volume().uuid, target_host.uuid) vm.start() vm.check() snapshots.check() snapshot1 = snapshots.get_current_snapshot() snapshots.create_snapshot('create_snapshot2') snapshots.check() snapshots.use_snapshot(snapshot1) snapshots.create_snapshot('create_snapshot1.1.1') snapshots.check() snapshots.use_snapshot(snapshot1) snapshots.create_snapshot('create_snapshot1.2.1') snapshots.check() snapshots.delete() test_obj_dict.rm_volume_snapshot(snapshots) volume.check() volume.delete() test_obj_dict.rm_volume(volume) vm.destroy() test_util.test_pass('Create Snapshot with VM migration test Success')
def test(): global disaster_bs_uuid global data_volume_uuid global image_uuid disasterBsUrls = os.environ.get('disasterBsUrls') name = 'disaster_bs' description = 'backup storage for disaster' url = '/zstack_bs' sshport = 22 hostname = disasterBsUrls.split('@')[1] username = disasterBsUrls.split(':')[0] password = disasterBsUrls.split('@')[0].split(':')[1] test_util.test_logger( 'Disaster bs server hostname is %s, username is %s, password is %s' % (hostname, username, password)) #AddDisasterImageStoreBackupStorage disaster_backup_storage = bs_ops.add_disaster_image_store_bs( url, hostname, username, password, sshport, name, description) disaster_bs_uuid = disaster_backup_storage.uuid #AttachBackupStorageToZone zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid bs_ops.attach_backup_storage(disaster_bs_uuid, zone_uuid) #Create data volume cond = res_ops.gen_query_conditions( 'name', '=', os.environ.get('nfsPrimaryStorageName')) primary_storage_uuid = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)[0].uuid disk_offering_uuid = res_ops.query_resource(res_ops.DISK_OFFERING)[0].uuid cond = res_ops.gen_query_conditions( 'name', '=', os.environ.get('imageStoreBackupStorageName')) local_bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid volume_option = test_util.VolumeOption() volume_option.set_disk_offering_uuid(disk_offering_uuid) volume_option.set_name('data_volume_for_data_protect_test') volume_option.set_primary_storage_uuid(primary_storage_uuid) data_volume = vol_ops.create_volume_from_offering(volume_option) #CreateDataVolumeTemplateFromVolume data_volume_uuid = data_volume.uuid image_option = test_util.ImageOption() image_option.set_data_volume_uuid(data_volume_uuid) image_option.set_name('create_data_iso_to_image_store') image_option.set_backup_storage_uuid_list([disaster_bs_uuid]) image = img_ops.create_data_volume_template(image_option) disaster_bs_image_uuid = image.uuid #Check if the image's media_type correct cond = res_ops.gen_query_conditions('uuid', '=', disaster_bs_image_uuid) media_type = res_ops.query_resource(res_ops.IMAGE, cond)[0].mediaType if media_type != 'DataVolumeTemplate': test_util.test_fail( 'Wrong image media type, the expect is "DataVolumeTemplate", the real is "%s"' % media_type) #Check if create data volume with volume template success cond = res_ops.gen_query_conditions( 'name', '=', os.environ.get('nfsPrimaryStorageName')) ps_uuid = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond)[0].uuid new_volume = vol_ops.create_volume_from_template(disaster_bs_image_uuid, ps_uuid) vol_ops.delete_volume(new_volume.uuid) #Check if the system tag of the image in disaster bs is 'remote' cond = res_ops.gen_query_conditions('resourceUuid', '=', disaster_bs_image_uuid) system_tag = res_ops.query_resource(res_ops.SYSTEM_TAG, cond)[0] if system_tag.tag != "remote": test_util.test_fail( "Here isn't 'remote' system tag for image in data protect bs") #Check recovery data volume recovery_image = img_ops.recovery_image_from_image_store_backup_storage( local_bs_uuid, disaster_bs_uuid, disaster_bs_image_uuid) #Check the process status when recoverying image cond = res_ops.gen_query_conditions('resourceUuid', '=', local_bs_uuid) system_tag = res_ops.query_resource(res_ops.SYSTEM_TAG, cond)[0].tag status = system_tag.split('::')[7] if status not in ['running', 'success']: test_util.test_fail('Error status for recovery image, status: %s' % status) #Check if recovery data volume success if recovery_image.backupStorageRefs[0].backupStorageUuid != local_bs_uuid: test_util.test_fail('Recovery image failed, image uuid is %s' % recovery_image.uuid) if recovery_image.mediaType != 'DataVolumeTemplate': test_util.test_fail( 'Wrong image media type after recovery, the expect is "DataVolumeTemplate", the real is "%s"' % media_type) image_uuid = recovery_image.uuid try: #Try to recovery the same image again, it's negative test recovery_image = img_ops.recovery_image_from_image_store_backup_storage( local_bs_uuid, disaster_bs_uuid, disaster_bs_image_uuid) except Exception, e: if unicode(e).encode("utf-8").find('包含') != -1: test_util.test_pass( 'Try to recovery the same image again and get the error info expectly: %s' % unicode(e).encode("utf-8"))
def test(): bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") image_option = test_util.ImageOption() image_option.set_name('test_image_cache_cleanup') image_option.set_format('qcow2') image_option.set_mediaType('RootVolumeTemplate') image_option.set_url(os.environ.get('imageUrl_s')) image_option.set_backup_storage_uuid_list([bss[0].uuid]) new_image = zstack_image_header.ZstackTestImage() new_image.set_creation_option(image_option) new_image.add_root_volume_template() l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option = test_util.VmOption() vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(new_image.image.uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('test_image_cache_cleanup_vm1') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() test_obj_dict.add_vm(vm) vm.check() host = test_lib.lib_find_host_by_vm(vm.get_vm()) ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) vm.destroy() vm.expunge() conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('uuid', '!=', host.uuid, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_name('test_image_cache_cleanup_vm2') vm2 = test_vm_header.ZstackTestVm() vm2.set_creation_option(vm_creation_option) vm2.create() host2 = test_lib.lib_find_host_by_vm(vm2.get_vm()) test_obj_dict.add_vm(vm2) disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume1 = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume1) volume1.attach(vm2) if ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE: test_util.test_skip( 'ceph is not directly using image cache, skip test.') if ps.type == "SharedBlock": path = "/dev/" + ps.uuid + '/' + new_image.image.uuid if not test_lib.lib_check_sharedblock_file_exist(host, path): test_util.test_fail('image cache is expected to exist') else: if ps.type == "AliyunNAS": image_cache_path = "%s/datas/imagecache/template/%s" % ( ps.mountPath, new_image.image.uuid) else: image_cache_path = "%s/imagecache/template/%s" % ( ps.mountPath, new_image.image.uuid) if not test_lib.lib_check_file_exist(host, image_cache_path): test_util.test_fail('image cache is expected to exist') if bss[0].type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: image_cache_path = "%s/zstore-cache/%s" % (ps.mountPath, new_image.image.uuid) if not test_lib.lib_check_file_exist(host, image_cache_path): test_util.test_fail('image cache is expected to exist') new_image.delete() new_image.expunge() ps_ops.cleanup_imagecache_on_primary_storage(ps.uuid) if ps.type == inventory.LOCAL_STORAGE_TYPE: count = 0 while True: image_cache_path = "%s/imagecache/template/%s/%s.qcow2" % ( ps.mountPath, new_image.image.uuid, new_image.image.uuid) if not test_lib.lib_check_file_exist(host, image_cache_path): break elif count > 5: test_util.test_fail('image cache is expected to be deleted') test_util.test_logger('check %s times: image cache still exist' % (count)) time.sleep(5) count += 1 vm2.destroy() vm2.expunge() ps_ops.cleanup_imagecache_on_primary_storage(ps.uuid) if ps.type == "SharedBlock": image_cache_path = "/dev/" + ps.uuid + '/' + new_image.image.uuid count = 0 while True: if not test_lib.lib_check_sharedblock_file_exist( host, image_cache_path): break elif count > 5: test_util.test_fail('image cache is expected to be deleted') test_util.test_logger('check %s times: image cache still exist' % (count)) time.sleep(10) count += 1 test_util.test_pass('imagecache cleanup Pass.') count = 0 while True: image_cache_path = "%s/imagecache/template/%s" % (ps.mountPath, new_image.image.uuid) if not test_lib.lib_check_file_exist(host, image_cache_path): break elif count > 5: test_util.test_fail('image cache is expected to be deleted') test_util.test_logger('check %s times: image cache still exist' % (count)) time.sleep(5) count += 1 count = 0 while True: image_cache_path = "%s/zstore-cache/%s" % (ps.mountPath, new_image.image.uuid) if not test_lib.lib_check_file_exist(host, image_cache_path): break elif count > 5: test_util.test_fail('image cache is expected to be deleted') test_util.test_logger('check %s times: image cache still exist' % (count)) time.sleep(5) count += 1 test_util.test_pass('imagecache cleanup Pass.')
def test(): global vm_inv global zone_inv global cluster_inv global host_inv global ps_inv global bs_inv global image_inv global vmoffering_inv global l2_inv global l3_inv global l3_uuid global image_uuid global vmoffering_uuid global image_uuid global vm_ip test_util.test_dsc( 'Create test vm to test zstack install MN on centos7.1 and add the HOST' ) conditions = res_ops.gen_query_conditions( 'name', '=', os.environ.get('imageNameBase_c72')) image = res_ops.query_resource(res_ops.IMAGE, conditions)[0] vm_inv = create_vm(image) time.sleep(100) iso_path = os.environ.get('iso_path') upgrade_script_path = os.environ.get('upgradeScript') test_util.test_dsc('Install zstack with -o') vm_ip = vm_inv.vmNics[0].ip test_stub.make_ssh_no_password(vm_ip, tmp_file) test_util.test_dsc('Upgrade master iso') test_util.test_logger('Update MN IP') test_stub.update_mn_hostname(vm_ip, tmp_file) test_stub.update_mn_ip(vm_ip, tmp_file) test_stub.start_mn(vm_ip, tmp_file) test_stub.update_iso(vm_ip, tmp_file, iso_path, upgrade_script_path) test_util.test_dsc('Install zstack with default path') ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip cmd = '%s "[ -e /usr/local/zstack ] && echo yes || echo no"' % ssh_cmd (process_result, cmd_stdout) = test_stub.execute_shell_in_process_stdout(cmd, tmp_file) if process_result != 0: test_util.test_fail('check /usr/local/zstack fail, cmd_stdout:%s' % cmd_stdout) cmd_stdout = cmd_stdout[:-1] if cmd_stdout == "yes": cmd = '%s "rm -rf /usr/local/zstack"' % ssh_cmd (process_result, cmd_stdout) = test_stub.execute_shell_in_process_stdout( cmd, tmp_file) if process_result != 0: test_util.test_fail('delete /usr/local/zstack fail') target_file = '/root/zstack-all-in-one.tgz' test_stub.prepare_test_env(vm_inv, target_file) ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip args = "-D" test_util.test_dsc('start install the latest zstack-MN') test_stub.execute_install_with_args(ssh_cmd, args, target_file, tmp_file) test_util.test_dsc('create zone names is zone1') zone_inv = test_stub.create_zone1(vm_ip, tmp_file) zone_uuid = zone_inv.uuid test_util.test_dsc('create cluster names is clsuter1') cluster_inv = test_stub.create_cluster1(vm_ip, zone_uuid, tmp_file) cluster_uuid = cluster_inv.uuid test_util.test_dsc('add HOST names is HOST1') host_inv = test_stub.add_kvm_host1(vm_ip, cluster_uuid, tmp_file) host_uuid = host_inv.uuid test_util.test_dsc('add ps names is PS1') ps_inv = test_stub.create_local_ps(vm_ip, zone_uuid, tmp_file) ps_uuid = ps_inv.uuid test_stub.attach_ps(vm_ip, ps_uuid, cluster_uuid, tmp_file) test_util.test_dsc('add BS names is bs1') bs_inv = test_stub.create_sftp_backup_storage(vm_ip, tmp_file) bs_uuid = bs_inv.uuid test_stub.attach_bs(vm_ip, bs_uuid, zone_uuid, tmp_file) test_util.test_dsc('add image names is image1.4') image_inv = test_stub.add_image_local(vm_ip, bs_uuid, tmp_file) image_uuid = image_inv.uuid test_util.test_dsc('add vm instance offering names is 1-1G') vmoffering_inv = test_stub.create_vm_offering(vm_ip, tmp_file) vmoffering_uuid = vmoffering_inv.uuid test_util.test_dsc('create L2_vlan network names is L2_vlan') l2_inv = sce_ops.create_l2_vlan(vm_ip, 'L2_vlan', 'eth0', '2200', zone_uuid) l2_uuid = l2_inv.inventory.uuid test_util.test_dsc('attach L2 netowrk to cluster') sce_ops.attach_l2(vm_ip, l2_uuid, cluster_uuid) test_util.test_dsc('create L3_flat_network names is L3_flat_network') l3_inv = sce_ops.create_l3(vm_ip, 'l3_flat_network', 'L3BasicNetwork', l2_uuid, 'local.com') l3_uuid = l3_inv.inventory.uuid l3_dns = '223.5.5.5' start_ip = '192.168.109.5' end_ip = '192.168.109.200' gateway = '192.168.109.1' netmask = '255.255.255.0' test_util.test_dsc('add DNS and IP_Range for L3_flat_network') sce_ops.add_dns_to_l3(vm_ip, l3_uuid, l3_dns) sce_ops.add_ip_range(vm_ip, 'IP_range', l3_uuid, start_ip, end_ip, gateway, netmask) test_util.test_dsc( 'query flat provider and attach network service to L3_flat_network') provider_name = 'Flat Network Service Provider' conditions = res_ops.gen_query_conditions('name', '=', provider_name) net_provider_list = sce_ops.query_resource( vm_ip, res_ops.NETWORK_SERVICE_PROVIDER, conditions).inventories[0] pro_uuid = net_provider_list.uuid sce_ops.attach_flat_network_service_to_l3network(vm_ip, l3_uuid, pro_uuid) test_util.test_dsc('create a vm with L3_flat_network') new_vm_inv = create_new_vm(image_inv) os.system('rm -f %s' % tmp_file) sce_ops.destroy_vm(zstack_management_ip, vm_inv.uuid) test_util.test_pass( 'Install ZStack with -o on centos7.2 and create a vmSuccess')
def test(): test_util.test_dsc("Test Resource template Apis") cond = res_ops.gen_query_conditions('status', '=', 'Connected') cond = res_ops.gen_query_conditions('state', '=', 'Enabled', cond) bs_queried = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond) cond = res_ops.gen_query_conditions("category", '=', "Public") l3_queried = res_ops.query_resource(res_ops.L3_NETWORK, cond) if len(l3_queried) == 0: cond = res_ops.gen_query_conditions("category", '=', "Private") l3_queried = res_ops.query_resource(res_ops.L3_NETWORK, cond) resource_stack_option = test_util.ResourceStackOption() resource_stack_option.set_name("Create_VM") templateContent = ''' { "ZStackTemplateFormatVersion": "2018-06-18", "Description": "Just create a flat network & VM", "Parameters": { "L3NetworkUuid":{ "Type": "String", "Label": "三层网络" }, "BackupStorageUuid":{ "Type": "CommaDelimitedList", "Label": "镜像服务器" } }, "Resources": { "VmInstance": { "Type": "ZStack::Resource::VmInstance", "Properties": { "name": "VM-STACK", "instanceOfferingUuid": {"Fn::GetAtt":["InstanceOffering","uuid"]}, "imageUuid":{"Fn::GetAtt":["Image","uuid"]}, "l3NetworkUuids":[{"Ref":"L3NetworkUuid"}] } }, "InstanceOffering": { "Type": "ZStack::Resource::InstanceOffering", "Properties": { "name":"1G-1CPU-STACK", "description":"测试创建计算规格", "cpuNum": 1, "memorySize": 1073741824 } }, "Image": { "Type": "ZStack::Resource::Image", "Properties": { "name": "IMAGE-STACK", "backupStorageUuids": {"Ref":"BackupStorageUuid"}, "url":"file:///opt/zstack-dvd/zstack-image-1.4.qcow2", "format": "qcow2" } } }, "Outputs": { "VmInstance": { "Value": { "Ref": "VmInstance" } } } } ''' #1.create resource stack 5 times with the same name parameter = '{"L3NetworkUuid":"%s","BackupStorageUuid":"%s"}' % ( l3_queried[0].uuid, bs_queried[0].uuid) for i in range(5): resource_stack_option.set_templateContent(templateContent) resource_stack_option.set_parameters(parameter) preview_resource_stack = resource_stack_ops.preview_resource_stack( resource_stack_option) resource_stack = resource_stack_ops.create_resource_stack( resource_stack_option) #2.query resource stack cond = res_ops.gen_query_conditions('name', '=', 'Create_VM') resource_stack_queried = res_ops.query_resource(res_ops.RESOURCE_STACK, cond) cond = res_ops.gen_query_conditions('name', '=', 'VM-STACK') vm_queried = res_ops.query_resource(res_ops.VM_INSTANCE, cond) cond = res_ops.gen_query_conditions('name', '=', '1G-1CPU-STACK') instance_offering_queried = res_ops.query_resource( res_ops.INSTANCE_OFFERING, cond) cond = res_ops.gen_query_conditions('name', '=', 'IMAGE-STACK') image_queried = res_ops.query_resource(res_ops.IMAGE, cond) test_util.test_logger(len(image_queried)) if len(resource_stack_queried) != 5: test_util.test_fail("Fail to query 5 resource stacks") for i in range(5): if resource_stack_queried[i].status == 'Created': if len(vm_queried) != 5 or len( instance_offering_queried) != 5 or len(image_queried) != 5: test_util.test_fail( "Fail to create all resource when resource stack status is Created" ) #5.delete resource stack for i in range(5): resource_stack_ops.delete_resource_stack( resource_stack_queried[i].uuid) cond = res_ops.gen_query_conditions('name', '=', 'Create_VM') resource_stack_queried = res_ops.query_resource(res_ops.RESOURCE_STACK, cond) cond = res_ops.gen_query_conditions('name', '=', 'VM-STACK') vm_queried = res_ops.query_resource(res_ops.VM_INSTANCE, cond) cond = res_ops.gen_query_conditions('name', '=', '1G-1CPU-STACK') instance_offering_queried = res_ops.query_resource( res_ops.INSTANCE_OFFERING, cond) cond = res_ops.gen_query_conditions('name', '=', 'IMAGE-STACK') image_queried = res_ops.query_resource(res_ops.IMAGE, cond) if len(resource_stack_queried) != 0: test_util.test_fail( "Fail to delete all resource stack and there are still %s resource stacks not deleted." ) % len(resource_stack_queried) elif len(vm_queried) != 0 or len(instance_offering_queried) != 0 or len( image_queried) != 0: test_util.test_fail( "Fail to delete resource when resource stack is deleted and there are still %s vm, %s instance offering, %s image not deleted." ) % (len(vm_queried), len(instance_offering_queried), len(image_queried)) test_util.test_pass( 'Create Resource Stack With The Same Name Test Success')
def test(): global MEVOCO_LOG_FOLDER, MEVOCO_LOG_PATH if not test_lib.lib_check_version_is_mevoco(): MEVOCO_LOG_FOLDER = r"collect-log-zstack_*" MEVOCO_LOG_FOLDER_PATTERN = "collect-log-zstack" #Step1: clean env below TEMPT_FOLDER and run collect log retVal = os.system(" cd " + TEMPT_FOLDER + "; rm -rf " + MEVOCO_LOG_FOLDER + "; zstack-ctl collect_log") if retVal != 0: test_util.test_logger("os.system return value: %d" % (retVal)) test_util.test_fail('run zstack-ctl collect_log failed.') #Step2: refresh MEVOCO_LOG_FOLDER value to real one below TEMPT_FOLDER find_mevoco_log_folder_name() #Step3: verify sftpbackupstorage logs are saved bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid', 'type']) if not bss: test_util.test_skip("not find available backup storage. Skip test") for bs in bss: #test_util.test_logger(bs.dump()) if bs.type == "SftpBackupStorage": #bs_sftp_cond = res_ops.gen_query_conditions("type", "=", "SftpBackupStorage") #hostIP = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_sftp_cond, None, fields=["hostname"]) hostIP = test_lib.lib_get_backup_storage_host(bs.uuid) sftpLogFolderName = "sftp_bs-" + hostIP.managementIp dmesgFilePath = MEVOCO_LOG_PATH + sftpLogFolderName + "/dmesg" hostInfoFilePath = MEVOCO_LOG_PATH + sftpLogFolderName + "/host_info" messagesFilePath = MEVOCO_LOG_PATH + sftpLogFolderName + "/messages" sftpbackupstorageFilePath = MEVOCO_LOG_PATH + sftpLogFolderName + "/zstack-sftpbackupstorage.log" if not os.path.exists(dmesgFilePath): test_util.test_fail(dmesgFilePath + ' is not exist.') if not os.path.exists(hostInfoFilePath): test_util.test_fail(hostInfoFilePath + ' is not exist.') if not os.path.exists(messagesFilePath): test_util.test_fail(messagesFilePath + ' is not exist.') if not os.path.exists(sftpbackupstorageFilePath): test_util.test_fail(sftpbackupstorageFilePath + ' is not exist.') elif bs.type == "ImageStoreBackupStorage": pass #Step4: verify hosts logs are saved #conditions = res_ops.gen_query_conditions('state', '=', host_header.ENABLED) #conditions = res_ops.gen_query_conditions('status', '=', host_header.CONNECTED, conditions) #all_hosts = res_ops.query_resource(res_ops.HOST, conditions) #if len(all_hosts) < 1: # test_util.test_skip('Not available host to check') #for host in all_hosts: # hostFolderName = host.managementIp # dmesgFilePath = MEVOCO_LOG_PATH + hostFolderName + "/dmesg" # hostInfoFilePath = MEVOCO_LOG_PATH + hostFolderName + "/host_info" # messagesFilePath = MEVOCO_LOG_PATH + hostFolderName + "/messages" # kvmagentFilePath = MEVOCO_LOG_PATH + hostFolderName + "/zstack-kvmagent.log" # zstackFilePath = MEVOCO_LOG_PATH + hostFolderName + "/zstack.log" # if not os.path.exists(dmesgFilePath): # test_util.test_fail( dmesgFilePath + ' is not exist.') # if not os.path.exists(hostInfoFilePath): # test_util.test_fail( hostInfoFilePath + ' is not exist.') # if not os.path.exists(messagesFilePath): # test_util.test_fail( messagesFilePath + ' is not exist.') # if not os.path.exists(kvmagentFilePath): # test_util.test_fail( kvmagentFilePath + ' is not exist.') # if not os.path.exists(zstackFilePath): # test_util.test_fail( zstackFilePath + 'is not exist.') #Step5: verify management node logs are saved #conditions = res_ops.gen_query_conditions("status", '=', "Connected") all_mn = res_ops.query_resource(res_ops.MANAGEMENT_NODE) if len(all_mn) < 1: test_util.test_skip('Not available mn to check') for mn in all_mn: mnHostIP = mn.hostName mnFolderName = "management-node-" + mnHostIP dmesgFilePath = MEVOCO_LOG_PATH + mnFolderName + "/dmesg" hostInfoFilePath = MEVOCO_LOG_PATH + mnFolderName + "/host_info" messagesFilePath = MEVOCO_LOG_PATH + mnFolderName + "/messages" manageServerPath = MEVOCO_LOG_PATH + mnFolderName + "/management-server.log" if not os.path.exists(dmesgFilePath): test_util.test_fail(dmesgFilePath + ' is not exist.') if not os.path.exists(hostInfoFilePath): test_util.test_fail(hostInfoFilePath + ' is not exist.') if not os.path.exists(messagesFilePath): test_util.test_fail(messagesFilePath + ' is not exist.') if not os.path.exists(manageServerPath): test_util.test_fail(manageServerPath + ' is not exist.')
def test(): global vm global test_host global host_uuid global host_ip global max_attempts global storagechecker_timeout allow_ps_list = [inventory.NFS_PRIMARY_STORAGE_TYPE] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) test_stub.skip_if_not_storage_network_separate(test_lib.all_scenario_config) if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid test_lib.clean_up_all_vr() mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() vm.check() vr_hosts = test_stub.get_host_has_vr() mn_hosts = test_stub.get_host_has_mn() nfs_hosts = test_stub.get_host_has_nfs() if not test_stub.ensure_vm_not_on(vm.get_vm().uuid, vm.get_vm().hostUuid, vr_hosts+mn_hosts+nfs_hosts): test_util.test_fail("Not find out a suitable host") host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp host_port = test_lib.lib_get_host_port(host_ip) test_util.test_logger("host %s is disconnecting" %(host_ip)) ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") host_list = test_stub.get_sce_hosts(test_lib.all_scenario_config, test_lib.scenario_file) for host in host_list: if host.ip_ == host_ip: test_host = host break if not test_host: test_util.test_fail('there is no host with ip %s in scenario file.' %(host_ip)) test_stub.stop_host(test_host, test_lib.all_scenario_config) test_stub.check_if_vm_starting_incorrectly_on_original_host(vm.get_vm().uuid, host_uuid, max_count=300) test_stub.start_host(test_host, test_lib.all_scenario_config) test_stub.recover_host_vlan(test_host, test_lib.all_scenario_config, test_lib.deploy_config) conditions = res_ops.gen_query_conditions('managementIp', '=', host_ip) kvm_host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid host_ops.reconnect_host(kvm_host_uuid) vm.set_state(vm_header.RUNNING) vm.check() vm.update() if test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp == host_ip: test_util.test_fail("VM is expected to start running on another host") vm.destroy() test_util.test_pass('Test checking vm status after graceful stop and start success')
def test(): global host_config curr_deploy_conf = exp_ops.export_zstack_deployment_config( test_lib.deploy_config) vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #pick up host1 host1_name = os.environ.get('hostName') host1 = res_ops.get_resource(res_ops.HOST, name=host1_name)[0] host3_name = os.environ.get('hostName3') host3 = res_ops.get_resource(res_ops.HOST, name=host3_name)[0] cond = res_ops.gen_query_conditions('clusterUuid', '=', host1.clusterUuid) cluster_hosts = res_ops.query_resource(res_ops.HOST, cond) if not len(cluster_hosts) > 1: test_util.test_skip( 'Skip test, since [cluster:] %s did not include more than 1 host' % host1.clusterUuid) for host in cluster_hosts: if host.uuid != host1.uuid: host2 = host conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multizones_basic_vm') vm_creation_option.set_host_uuid(host1.uuid) l3_name = os.environ.get('l3VlanNetworkName1') l3 = res_ops.get_resource(res_ops.L3_NETWORK, name=l3_name)[0] vm_creation_option.set_l3_uuids([l3.uuid]) vm1 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm1) vr_vm_inv = test_lib.lib_find_vr_by_vm(vm1.get_vm())[0] vr_host_uuid = vr_vm_inv.hostUuid #migrate vr to host3. if vr_host_uuid != host3.uuid: vm_ops.migrate_vm(vr_vm_inv.uuid, host3.uuid) host_config.set_cluster_uuid(host3.clusterUuid) host_config.set_username(os.environ.get('hostUsername')) host_config.set_password(os.environ.get('hostPassword')) host_config.set_name(host3_name) host_config.set_management_ip(host3.managementIp) target_host_uuid = host3.uuid test_util.test_dsc( "Delete VR VM's host. VR should be migrated to Cluster1") host_ops.delete_host(target_host_uuid) vm1.check() #using the same L3 to create VM2 to check if VR is working well. vm_creation_option.set_host_uuid(None) vm2 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm2) vm2.check() host_ops.add_kvm_host(host_config) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Test VR migration when deleting Host is Success')
def test(): global host if test_lib.lib_get_active_host_number() < 2: test_util.test_fail( 'Not available host to do maintenance, since there are not 2 hosts' ) vm1 = test_stub.create_vm(vm_name='maintain_host_vm1') test_obj_dict.add_vm(vm1) vm2 = test_stub.create_vm(vm_name='maintain_host_vm2') test_obj_dict.add_vm(vm2) #vm1.check() #vm2.check() test_util.test_dsc('Create volume and check') disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) test_util.test_dsc('Attach volume and check') volume.attach(vm1) volume.check() current_host1 = test_lib.lib_get_vm_host(vm1.vm) conditions = res_ops.gen_query_conditions('state', '=', host_header.ENABLED) conditions = res_ops.gen_query_conditions('status', '=', host_header.CONNECTED, conditions) all_hosts = res_ops.query_resource(res_ops.HOST, conditions) target_host = random.choice(all_hosts) if current_host1.uuid != target_host.uuid: vm1.migrate(target_host.uuid) current_host2 = test_lib.lib_get_vm_host(vm2.vm) if current_host2.uuid != target_host.uuid: vm2.migrate(target_host.uuid) new_host = test_lib.lib_get_vm_host(vm1.vm) if new_host.uuid != target_host.uuid: test_util.test_fail( 'VM did not migrate to target [host:] %s, but to [host:] %s' % (target_host.uuid, new_host.uuid)) volume.check() host = test_kvm_host.ZstackTestKvmHost() host.set_host(target_host) host.maintain() #need to update vm's inventory, since they will be changed by maintenace mode vm1.update() vm2.update() vm1.set_state(vm_header.STOPPED) vm2.set_state(vm_header.STOPPED) vm1.check() vm2.check() volume.check() host.change_state(test_kvm_host.ENABLE_EVENT) if not linux.wait_callback_success(is_host_connected, host.get_host().uuid, 120): test_util.test_fail( 'host status is not changed to connected, after changing its state to Enable' ) volume.detach() vm1_root_volume = test_lib.lib_get_root_volume(vm1.get_vm()) vm2_root_volume = test_lib.lib_get_root_volume(vm2.get_vm()) conditions = res_ops.gen_query_conditions('uuid', '!=', target_host.uuid, conditions) rest_hosts = res_ops.query_resource(res_ops.HOST, conditions) new_target_host = random.choice(rest_hosts) vol_ops.migrate_volume(vm1_root_volume.uuid, new_target_host.uuid) vol_ops.migrate_volume(vm2_root_volume.uuid, new_target_host.uuid) vol_ops.migrate_volume(volume.get_volume().uuid, new_target_host.uuid) volume.attach(vm1) vm1.start() vm2.start() vm1.check() vm2.check() volume.check() vm1.destroy() test_obj_dict.rm_vm(vm1) vm2.destroy() test_obj_dict.rm_vm(vm2) volume.delete() test_obj_dict.rm_volume(volume) test_util.test_pass('Maintain Host Test Success')
def test(): global test_obj_dict # need at least 2 zones zones_inv = res_ops.query_resource(res_ops.ZONE) if len(zones_inv) < 2: test_util.test_skip('test need at least 2 zones') zone1_uuid = zones_inv[0].uuid zone2_uuid = zones_inv[1].uuid iam2_ops.clean_iam2_enviroment() username = '******' password = '******' platform_admin_uuid = iam2_ops.create_iam2_virtual_id(username, password).uuid attributes = [{"name": "__PlatformAdmin__"}, {"name": "__PlatformAdminRelatedZone__", "value": zone1_uuid}] iam2_ops.add_attributes_to_iam2_virtual_id(platform_admin_uuid, attributes) username2 = 'username2' password2 = 'b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86' platform_admin2_uuid = iam2_ops.create_iam2_virtual_id(username2, password2).uuid attributes = [{"name": "__PlatformAdmin__"}, {"name": "__PlatformAdminRelatedZone__", "value": zone2_uuid}] iam2_ops.add_attributes_to_iam2_virtual_id(platform_admin2_uuid, attributes) zone1_cluster = [] zone2_cluster = [] cond = res_ops.gen_query_conditions('zoneUuid', '=', zone1_uuid) cluster_inv = res_ops.query_resource(res_ops.CLUSTER, cond) for cluster in cluster_inv: zone1_cluster.append(cluster.uuid) cond = res_ops.gen_query_conditions('zoneUuid', '=', zone2_uuid) cluster_inv = res_ops.query_resource(res_ops.CLUSTER, cond) for cluster in cluster_inv: zone2_cluster.append(cluster.uuid) platform_admin_session_uuid = iam2_ops.login_iam2_virtual_id(username, password) cluster_list = res_ops.query_resource(res_ops.CLUSTER, session_uuid=platform_admin_session_uuid) for cluster in cluster_list: if cluster.uuid not in zone1_cluster: test_util.test_fail("can't get zone1:[%s] cluster [%s]" % (zone1_uuid, cluster.uuid)) if cluster.uuid in zone2_cluster: test_util.test_fail( "platformadmin has no permission get zone2:[%s] cluster [%s]" % (zone2_uuid, cluster.uuid)) zone1_hosts = [] zone2_hosts = [] cond = res_ops.gen_query_conditions('zoneUuid', '=', zone1_uuid) hosts_inv = res_ops.query_resource(res_ops.HOST, cond) for host in hosts_inv: zone1_hosts.append(host.uuid) cond = res_ops.gen_query_conditions('zoneUuid', '=', zone2_uuid) hosts_inv = res_ops.query_resource(res_ops.HOST, cond) for host in hosts_inv: zone2_hosts.append(host.uuid) host_list = res_ops.query_resource_fields(res_ops.HOST, session_uuid=platform_admin_session_uuid) for host in host_list: if host.uuid not in zone1_hosts: test_util.test_fail("can't get zone1:[%s] host [%s]" % (zone1_uuid, host.uuid)) if host.uuid in zone2_hosts: test_util.test_fail("platformadmin has no permission get zone2:[%s] host [%s]" % (zone2_uuid, host.uuid)) vm = test_stub.create_vm(session_uuid=platform_admin_session_uuid) test_obj_dict.add_vm(vm) vm_uuid = vm.get_vm().uuid volume = test_stub.create_volume(session_uuid=platform_admin_session_uuid) test_obj_dict.add_volume(volume) volume_uuid = volume.get_volume().uuid acc_ops.logout(platform_admin_session_uuid) platform_admin2_session_uuid = iam2_ops.login_iam2_virtual_id(username2, password2) # TODO:there is a bug below this operation ZSTAC-13105 # vm_inv=res_ops.query_resource(res_ops.VM_INSTANCE,session_uuid=platform_admin2_session_uuid) # if vm_inv: # if vm_inv.uuid == vm_uuid: # test_util.test_fail("zone2:[%s] platformadmin can't query zone1 vm "%zone2_uuid) volume_inv = res_ops.query_resource(res_ops.VOLUME, session_uuid=platform_admin2_session_uuid) if volume_inv: if volume_inv[0].uuid == volume_uuid: test_util.test_fail("zone2:[%s] platformadmin can't query zone1 volume " % zone2_uuid) test_lib.lib_robot_cleanup(test_obj_dict) iam2_ops.clean_iam2_enviroment() test_util.test_pass('success test iam2 login in by admin!')