def check(self): super(zstack_kvm_vm_stopped_checker, self).check() return self.judge(self.exp_result) vm = self.test_obj.vm host = test_lib.lib_get_vm_host(vm) test_lib.lib_install_testagent_to_host(host) test_lib.lib_set_vm_host_l2_ip(vm) cmd = vm_plugin.VmStatusCmd() cmd.vm_uuids = [vm.uuid] test_util.test_logger( 'Check [vm:] %s stopped status on host [name:] %s [uuid:] %s.' % (vm.uuid, host.name, host.uuid)) rspstr = http.json_dump_post( testagent.build_http_path(host.managementIp, vm_plugin.VM_STATUS), cmd) rsp = jsonobject.loads(rspstr) check_result = rsp.vm_status[vm.uuid].strip() if check_result == vm_plugin.VmAgent.VM_STATUS_STOPPED: test_util.test_logger( 'Check result: [vm:] %s is STOPPED on [host:] %s .' % (vm.uuid, host.name)) return self.judge(True) else: test_util.test_logger( 'Check result: [vm:] %s is NOT STOPPED on [host:] %s . ; Expected status: %s ; Actual status: %s' % (vm.uuid, host.name, vm_plugin.VmAgent.VM_STATUS_STOPPED, check_result)) return self.judge(False)
def test(): global session_to global session_mc session_to = con_ops.change_global_config('identity', 'session.timeout', '720000') session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000') test_util.test_dsc('Create test vm as utility vm') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) #use root volume to skip add_checking_point test_util.test_dsc('Use root volume for snapshot testing') root_volume_inv = test_lib.lib_get_root_volume(vm.get_vm()) root_volume = zstack_volume_header.ZstackTestVolume() root_volume.set_volume(root_volume_inv) root_volume.set_state(volume_header.ATTACHED) root_volume.set_target_vm(vm) test_obj_dict.add_volume(root_volume) vm.check() snapshots = test_obj_dict.get_volume_snapshot(root_volume.get_volume().uuid) snapshots.set_utility_vm(vm) ori_num = 100 index = 1 while index < 101: thread = threading.Thread(target=create_snapshot, args=(snapshots, index,)) thread.start() index += 1 while threading.activeCount() > 1: time.sleep(0.1) #snapshot.check() doesn't work for root volume #snapshots.check() #check if snapshot exists in install_path ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) if ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE or ps.type == inventory.LOCAL_STORAGE_TYPE: host = test_lib.lib_get_vm_host(vm.get_vm()) for snapshot in snapshots.get_snapshot_list(): snapshot_inv = snapshot.get_snapshot() sp_ps_install_path = snapshot_inv.primaryStorageInstallPath if test_lib.lib_check_file_exist(host, sp_ps_install_path): test_util.test_logger('Check result: snapshot %s is found in host %s in path %s' % (snapshot_inv.name, host.managementIp, sp_ps_install_path)) else: test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_fail('Check result: snapshot %s is not found in host %s in path %s' % (snapshot_inv.name, host.managementIp, sp_ps_install_path)) else: test_util.test_logger('Skip check file install path for %s primary storage' % (ps.type)) cond = res_ops.gen_query_conditions('volumeUuid', '=', root_volume.get_volume().uuid) sps_num = res_ops.query_resource_count(res_ops.VOLUME_SNAPSHOT, cond) if sps_num != ori_num: test_util.test_fail('Create %d snapshots, but only %d snapshots were successfully created' % (ori_num, sps_num)) try: test_lib.lib_robot_cleanup(test_obj_dict) except: test_lib.test_logger('Delete VM may timeout') test_util.test_pass('Test create 100 snapshots simultaneously success')
def test(): instance_offering_name = os.environ.get('instanceOfferingName_m') instance_offering_uuid = test_lib.lib_get_instance_offering_by_name( instance_offering_name).uuid vm = test_stub.create_vm(image_name=os.environ.get('sshkeyImageName'), instance_offering_uuid=instance_offering_uuid) test_obj_dict.add_vm(vm) vm_ip = vm.get_vm().vmNics[0].ip time.sleep(10) test_lib.lib_add_vm_sshkey(vm.get_vm().uuid, os.environ.get('sshkeyPubKey')) host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid host_ops.reconnect_host(host_uuid) vm.reboot() test_lib.lib_wait_target_up(vm_ip, '22', 240) time.sleep(10) for i in range(5): ssh_cmd = 'timeout 5 ssh -i %s -oPasswordAuthentication=no -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s echo pass' % ( os.environ.get('sshkeyPriKey_file'), vm_ip) process_result = test_stub.execute_shell_in_process(ssh_cmd, tmp_file) if process_result == 0: break else: time.sleep(10) else: test_util.test_fail("fail to use ssh key connect to VM") vm.destroy() test_util.test_pass('Create VM Test Success')
def get_ssh_ip_result(self): vm = self.vm_list[0] host = test_lib.lib_get_vm_host(vm) port = self.lbl.get_creation_option().get_load_balancer_port() iport = self.lbl.get_creation_option().get_instance_port() if iport == 22: vm_command = '/sbin/ip a|grep inet' vm_cmd_result = test_lib.lib_execute_ssh_cmd(self.vip_ip, \ test_lib.lib_get_vm_username(vm), \ test_lib.lib_get_vm_password(vm), \ vm_command,\ port = port) if iport == 80: vm_command = 'curl %s:%s' % (self.vip_ip, port) vm_cmd_result = shell.call('%s' % vm_command) if not vm_cmd_result: test_util.test_logger( 'Checker result: FAIL to execute test ssh command in vip: %s for lb: %s.' % (self.vip_ip, self.lbl_uuid)) return False for key, values in self.vm_ip_test_dict.iteritems(): if key in vm_cmd_result: self.vm_ip_test_dict[key] += 1 break return True
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() #ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) #ps_uuid = ps.uuid #ps_ops.change_primary_storage_state(ps_uuid, 'disable') test_stub.disable_all_pss() if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to runnning when PS change to disable state') vm.set_state(vm_header.RUNNING) vm.check() vm.suspend() vm.check() #ps_ops.change_primary_storage_state(ps_uuid, 'enable') test_stub.enable_all_pss() host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) test_util.test_pass('PS disable mode Test Success')
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] #vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) ps_uuid = ps.uuid ps_ops.change_primary_storage_state(ps_uuid, 'maintain') if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to stop when PS change to maintain state') vm.set_state(vm_header.STOPPED) vm.check() ps_ops.change_primary_storage_state(ps_uuid, 'enable') host_ops.reconnect_host(host_uuid) #vm_ops.reconnect_vr(vr_uuid) vrs = test_lib.lib_get_all_vrs() for vr in vrs: vm_ops.start_vm(vr.uuid) vm.start() vm.check() vm.destroy() test_util.test_pass('PS maintain mode Test Success')
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None) if not bss: test_util.test_skip("not find available backup storage. Skip test") if bss[0].type != inventory.CEPH_BACKUP_STORAGE_TYPE: test_util.test_skip("not find available ceph backup storage. Skip test") test_lib.lib_set_delete_policy('vm', 'Delay') test_lib.lib_set_delete_policy('volume', 'Delay') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.attach(vm) ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) ps_uuid = ps.uuid ps_ops.change_primary_storage_state(ps_uuid, 'disable') if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to running when PS change to disable state') vm.set_state(vm_header.RUNNING) vm.check() volume.delete() #volume.expunge() volume.check() ps_ops.change_primary_storage_state(ps_uuid, 'enable') host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) vm.destroy() test_lib.lib_set_delete_policy('vm', 'Direct') test_lib.lib_set_delete_policy('volume', 'Direct') test_util.test_pass('Delete volume under PS disable mode Test Success')
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE, "SharedBlock"] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) test_util.test_dsc('Create test vm and check') bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None) if not bss: test_util.test_skip("not find available backup storage. Skip test") test_lib.lib_set_delete_policy('vm', 'Delay') test_lib.lib_set_delete_policy('volume', 'Delay') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.attach(vm) #ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) #ps_uuid = ps.uuid #ps_ops.change_primary_storage_state(ps_uuid, 'disable') test_stub.disable_all_pss() if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to running when PS change to disable state') vm.set_state(vm_header.RUNNING) vm.check() volume.delete() volume.check() volume.expunge() volume.check() #ps_ops.change_primary_storage_state(ps_uuid, 'enable') test_stub.enable_all_pss() host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) vm.destroy() test_lib.lib_set_delete_policy('vm', 'Direct') test_lib.lib_set_delete_policy('volume', 'Direct') test_util.test_pass('Delete volume under PS disable mode Test Success')
def check(self): super(zstack_kvm_volume_attach_checker, self).check() volume = self.test_obj.volume if not volume.vmInstanceUuid: test_util.test_logger('Check result: [volume:] %s does NOT have vmInstanceUuid. It is not attached to any vm.' % volume.uuid) return self.judge(False) if not self.test_obj.target_vm: test_util.test_logger('Check result: test [volume:] %s does NOT have vmInstance record in test structure. Can not do furture checking.' % volume.uuid) return self.judge(False) vm = self.test_obj.target_vm.vm volume_installPath = volume.installPath if not volume_installPath: test_util.test_logger('Check result: [installPath] is Null for [volume uuid: ] %s. Can not check if volume is attached to vm.' % volume.uuid) return self.judge(False) host = test_lib.lib_get_vm_host(vm) cmd = vm_plugin.VmStatusCmd() cmd.vm_uuids = [vm.uuid] rspstr = http.json_dump_post(testagent.build_http_path(host.managementIp, vm_plugin.VM_BLK_STATUS), cmd) rsp = jsonobject.loads(rspstr) output = jsonobject.dumps(rsp.vm_status[vm.uuid]) if volume_installPath in output: test_util.test_logger('Check result: [volume file:] %s is found in [vm:] %s on [host:] %s .' % (volume.uuid, vm.uuid, host.managementIp)) return self.judge(True) else: test_util.test_logger('Check result: [volume file:] %s is not found in [vm:] %s on [host:] %s .' % (volume.uuid, vm.uuid, host.managementIp)) return self.judge(False)
def test(): global host if test_lib.lib_get_active_host_number() < 2: test_util.test_fail('Not available host to do maintenance, since there are not 2 hosts') vm_cpu = 1 vm_memory = 1073741824 #1G cond = res_ops.gen_query_conditions('name', '=', 'ttylinux') image_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid l3_network_uuid = res_ops.query_resource(res_ops.L3_NETWORK)[0].uuid vm = test_stub.create_mini_vm([l3_network_uuid], image_uuid, cpu_num = vm_cpu, memory_size = vm_memory) test_obj_dict.add_vm(vm) host_uuid = test_lib.lib_get_vm_host(vm.vm).uuid host_ops.change_host_state(host_uuid, 'maintain') #need to update vm's inventory, since they will be changed by maintenace mode vm.update() vm.set_state(vm_header.STOPPED) vm.check() host_ops.change_host_state(host_uuid, 'enable') if not linux.wait_callback_success(is_host_connected, host_uuid, 120): test_util.test_fail('host status is not changed to connected, after changing its state to Enable') vm.start() vm.check() vm.destroy() test_obj_dict.rm_vm(vm) test_util.test_pass('Maintain Host Test Success')
def check(self): super(zstack_kvm_vm_snat_checker, self).check() vm = self.test_obj.vm test_lib.lib_install_testagent_to_vr(vm) host = test_lib.lib_get_vm_host(vm) vm_cmd_result = None vr_vms = test_lib.lib_find_vr_by_vm(vm) test_lib.lib_set_vm_host_l2_ip(vm) for vr_vm in vr_vms: test_util.test_logger("Begin to check [vm:] %s SNAT" % vm.uuid) nic = test_lib.lib_get_vm_nic_by_vr(vm, vr_vm) if not 'SNAT' in test_lib.lib_get_l3_service_type(nic.l3NetworkUuid): test_util.test_logger("Skip [VR:] %s, since it doesn't provide SNAT service" % vr_vm.uuid) continue ping_target = test_lib.test_config.pingTestTarget.text_ #Check if there is a SG rule to block ICMP checking if test_lib.lib_is_sg_rule_exist(nic.uuid, None, None, inventory.EGRESS): if not test_lib.lib_is_sg_rule_exist(nic.uuid, inventory.ICMP, ping_target, inventory.EGRESS): test_util.test_warn('Skip SNAT checker: because there is ICMP Egress Rule was assigned to [nic:] %s and the allowed target ip is not %s' % (nic.uuid, ping_target)) return self.judge(self.exp_result) guest_ip = nic.ip vm_command = 'ping -c 5 -W 5 %s >/tmp/ping_result 2>&1; ret=$?; cat /tmp/ping_result; exit $ret' % ping_target vm_cmd_result = test_lib.lib_ssh_vm_cmd_by_agent_with_retry(host.managementIp, nic.ip, test_lib.lib_get_vm_username(vm), test_lib.lib_get_vm_password(vm), vm_command, self.exp_result) if not vm_cmd_result: test_util.test_logger('Checker result: FAIL to ping [target:] %s from [vm:] %s .' % (ping_target, vm.uuid)) if self.exp_result == True: test_util.test_logger("network connection result is not expected pass, will print VR's network configuration:") test_lib.lib_print_vr_network_conf(vr_vm) return self.judge(False) else: test_util.test_logger('Checker result: SUCCEED to ping [target:] %s from [vm:] %s .' % (ping_target, vm.uuid)) return self.judge(True)
def test(): instance_offering_name = os.environ.get('instanceOfferingName_m') instance_offering_uuid = test_lib.lib_get_instance_offering_by_name(instance_offering_name).uuid vm = test_stub.create_vm(image_name = os.environ.get('sshkeyImageName'), instance_offering_uuid = instance_offering_uuid) test_obj_dict.add_vm(vm) vm_ip = vm.get_vm().vmNics[0].ip time.sleep(10) test_lib.lib_add_vm_sshkey(vm.get_vm().uuid, os.environ.get('sshkeyPubKey')) host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid host_ops.reconnect_host(host_uuid) vm.reboot() test_lib.lib_wait_target_up(vm_ip, '22', 240) time.sleep(10) for i in range(5): ssh_cmd = 'timeout 5 ssh -i %s -oPasswordAuthentication=no -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s echo pass' % (os.environ.get('sshkeyPriKey_file'), vm_ip) process_result = test_stub.execute_shell_in_process(ssh_cmd, tmp_file) if process_result == 0: break else: time.sleep(10) else: test_util.test_fail("fail to use ssh key connect to VM") vm.destroy() test_util.test_pass('Create VM Test Success')
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] #vr_uuid = vr.uuid #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) #volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.attach(vm) #ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) #ps_uuid = ps.uuid #ps_ops.change_primary_storage_state(ps_uuid, 'maintain') test_stub.maintain_all_pss() if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail( 'VM is expected to stop when PS change to maintain state') vm.set_state(vm_header.STOPPED) vm.check() volume.detach(vm.get_vm().uuid) #ps_ops.change_primary_storage_state(ps_uuid, 'enable') test_stub.enable_all_pss() host_ops.reconnect_host(host_uuid) #vm_ops.reconnect_vr(vr_uuid) time.sleep(5) vrs = test_lib.lib_get_all_vrs() for vr in vrs: vm_ops.start_vm(vr.uuid) vm.start() vm.check() volume.delete() #volume.expunge() volume.check() vm.destroy() test_util.test_pass('Delete volume under PS maintain mode Test Success')
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] #vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() test_util.test_dsc('Add ISO Image') cond = res_ops.gen_query_conditions("status", '=', "Connected") bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid img_option = test_util.ImageOption() img_option.set_name('iso') img_option.set_backup_storage_uuid_list([bs_uuid]) os.system("echo fake iso for test only > %s/apache-tomcat/webapps/zstack/static/test.iso" % (os.environ.get('zstackInstallPath'))) img_option.set_url('http://%s:8080/zstack/static/test.iso' % (os.environ.get('node1Ip'))) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) ps_uuid = ps.uuid ps_ops.change_primary_storage_state(ps_uuid, 'maintain') if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to stop when PS change to maintain state') vm.set_state(vm_header.STOPPED) vm.check() test_util.test_dsc('Attach ISO to VM') cond = res_ops.gen_query_conditions('name', '=', 'iso') iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid img_ops.attach_iso(iso_uuid, vm.vm.uuid) ps_ops.change_primary_storage_state(ps_uuid, 'enable') host_ops.reconnect_host(host_uuid) #vm_ops.reconnect_vr(vr_uuid) vrs = test_lib.lib_get_all_vrs() for vr in vrs: vm_ops.start_vm(vr.uuid) vm.start() vm.check() vm.destroy() vm.check() #vm.expunge() #vm.check() test_util.test_pass('PS maintain mode Test Success')
def check(self): super(zstack_kvm_vm_network_checker, self).check() vm = self.test_obj.vm host = test_lib.lib_get_vm_host(vm) test_lib.lib_install_testagent_to_host(host) test_lib.lib_set_vm_host_l2_ip(vm) vr_vms = test_lib.lib_find_vr_by_vm(vm) if not vr_vms: test_util.test_warn('No Virtual Router was found for VM: %s. Skip testing.' % vm.uuid) return self.judge(self.exp_result) for vr_vm in vr_vms: nic = test_lib.lib_get_vm_nic_by_vr(vm, vr_vm) if not 'DHCP' in test_lib.lib_get_l3_service_type(nic.l3NetworkUuid): test_util.test_logger("Skip [VR:] %s, since it doesn't provide DHCP service" % vr_vm.uuid) continue guest_ip = nic.ip command = 'ping -c 5 -W 5 %s >/tmp/ping_result 2>&1; ret=$?; cat /tmp/ping_result; exit $ret' % guest_ip if not test_lib.lib_execute_sh_cmd_by_agent_with_retry(host.managementIp, command, self.exp_result): test_util.test_logger('Checker result: FAIL to ping [target:] %s [ip:] %s from [host:] %s' % (vm.uuid, guest_ip, host.uuid)) if self.exp_result == True: test_util.test_logger("network connection result is not expected pass, will print VR's network configuration:") test_lib.lib_print_vr_network_conf(vr_vm) return self.judge(False) else: test_util.test_logger('Checker result: SUCCESSFULLY ping [target:] %s [ip:] %s from [host:] %s' % (vm.uuid, guest_ip, host.uuid)) test_util.test_logger("Checker result: ping target [vm:] %s from [host:] %s SUCCESS" % (vm.uuid, host.uuid)) return self.judge(True)
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() test_util.test_dsc('Add ISO Image') cond = res_ops.gen_query_conditions("status", '=', "Connected") bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid img_option = test_util.ImageOption() img_option.set_name('iso') img_option.set_backup_storage_uuid_list([bs_uuid]) os.system( "echo fake iso for test only > %s/apache-tomcat/webapps/zstack/static/test.iso" % (os.environ.get('zstackInstallPath'))) img_option.set_url('http://%s:8080/zstack/static/test.iso' % (os.environ.get('node1Ip'))) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) test_util.test_dsc('Attach ISO to VM') cond = res_ops.gen_query_conditions('name', '=', 'iso') iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid img_ops.attach_iso(iso_uuid, vm.vm.uuid) ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) ps_uuid = ps.uuid ps_ops.change_primary_storage_state(ps_uuid, 'disable') if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail( 'VM is expected to running when PS change to disable state') vm.set_state(vm_header.RUNNING) vm.check() test_util.test_dsc('Detach ISO to VM') img_ops.detach_iso(vm.vm.uuid) ps_ops.change_primary_storage_state(ps_uuid, 'enable') host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) vm.destroy() vm.check() #vm.expunge() vm.check() test_util.test_pass('PS disable mode Test Success')
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_lib.lib_set_delete_policy('vm', 'Delay') test_lib.lib_set_delete_policy('volume', 'Delay') test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() disk_offering = test_lib.lib_get_disk_offering_by_name( os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags( ['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.delete() volume.check() ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) ps_uuid = ps.uuid ps_ops.change_primary_storage_state(ps_uuid, 'disable') if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail( 'VM is expected to running when PS change to disable state') vm.set_state(vm_header.RUNNING) vm.check() volume.recover() volume.check() ps_ops.change_primary_storage_state(ps_uuid, 'enable') host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) volume.delete() volume.expunge() volume.check() vm.destroy() test_lib.lib_set_delete_policy('vm', 'Direct') test_lib.lib_set_delete_policy('volume', 'Direct') test_util.test_pass('Delete volume under PS disable mode Test Success')
def check(self): super(zstack_kvm_share_volume_attach_checker, self).check() volume = self.test_obj.volume sv_cond = res_ops.gen_query_conditions("volumeUuid", '=', volume.uuid) share_volume_vm_uuids = res_ops.query_resource_fields( res_ops.SHARE_VOLUME, sv_cond, None, fields=['vmInstanceUuid']) #if not volume.vmInstanceUuid: if not share_volume_vm_uuids: test_util.test_logger( 'Check result: [volume:] %s does NOT have vmInstanceUuid. It is not attached to any vm.' % volume.uuid) return self.judge(False) if not self.test_obj.target_vm: test_util.test_logger( 'Check result: test [volume:] %s does NOT have vmInstance record in test structure. Can not do furture checking.' % volume.uuid) return self.judge(False) vm = self.test_obj.target_vm.vm volume_installPath = volume.installPath if not volume_installPath: test_util.test_logger( 'Check result: [installPath] is Null for [volume uuid: ] %s. Can not check if volume is attached to vm.' % volume.uuid) return self.judge(False) host = test_lib.lib_get_vm_host(vm) cmd = vm_plugin.VmStatusCmd() cmd.vm_uuids = [vm.uuid] rspstr = http.json_dump_post( testagent.build_http_path(host.managementIp, vm_plugin.VM_BLK_STATUS), cmd) rsp = jsonobject.loads(rspstr) output = jsonobject.dumps(rsp.vm_status[vm.uuid]) #if volume_installPath.startswith('iscsi'): # volume_installPath = volume_installPath.split(';')[0].split('/iqn')[1] # volume_installPath = 'iqn%s' % volume_installPath # volume_installPath = volume_installPath[:-2] #elif volume_installPath.startswith('ceph'): if volume_installPath.startswith('ceph'): volume_installPath = volume_installPath.split('ceph://')[1] elif volume_installPath.startswith('sharedblock'): volume_installPath = "/dev/" + volume_installPath.split( 'sharedblock://')[1] if volume_installPath in output: test_util.test_logger( 'Check result: [volume:] %s [file:] %s is found in [vm:] %s on [host:] %s .' % (volume.uuid, volume_installPath, vm.uuid, host.managementIp)) return self.judge(True) else: test_util.test_logger( 'Check result: [volume:] %s [file:] %s is not found in [vm:] %s on [host:] %s .' % (volume.uuid, volume_installPath, vm.uuid, host.managementIp)) return self.judge(False)
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') test_lib.lib_set_delete_policy('vm', 'Delay') test_lib.lib_set_delete_policy('volume', 'Delay') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] #vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.delete() volume.check() ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) ps_uuid = ps.uuid ps_ops.change_primary_storage_state(ps_uuid, 'maintain') if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to stop when PS change to maintain state') vm.set_state(vm_header.STOPPED) vm.check() volume.recover() volume.check() ps_ops.change_primary_storage_state(ps_uuid, 'enable') host_ops.reconnect_host(host_uuid) #vm_ops.reconnect_vr(vr_uuid) vrs = test_lib.lib_get_all_vrs() for vr in vrs: vm_ops.start_vm(vr.uuid) vm.start() vm.check() volume.delete() #volume.expunge() volume.check() vm.destroy() test_lib.lib_set_delete_policy('vm', 'Direct') test_lib.lib_set_delete_policy('volume', 'Direct') test_util.test_pass('Delete volume under PS maintain mode Test Success')
def test(): ps_env = test_stub.PSEnvChecker() local_nfs_env = ps_env.is_local_nfs_env local_smp_env = ps_env.is_local_smp_env local_ps, another_ps = ps_env.get_two_ps() vm = test_stub.create_multi_vms(name_prefix='test-', count=1)[0] test_obj_dict.add_vm(vm) volume_in_local = [] if not local_nfs_env: volume_in_local = test_stub.create_multi_volumes(count=VOLUME_NUMBER, ps=local_ps, host_uuid=test_lib.lib_get_vm_host(vm.get_vm()).uuid) volume_in_another = test_stub.create_multi_volumes(count=VOLUME_NUMBER, ps=another_ps, host_uuid=None if local_nfs_env else test_lib.lib_get_vm_host(vm.get_vm()).uuid) for volume in volume_in_local + volume_in_another: test_obj_dict.add_volume(volume) for volume in volume_in_local + volume_in_another: volume.attach(vm) volume.check() vm.check() for volume in volume_in_local + volume_in_another: volume.detach() volume.check() vm.check() target_host = test_lib.lib_find_random_host(vm.get_vm()) vm.stop() vm.check() vol_ops.migrate_volume(test_lib.lib_get_root_volume(vm.get_vm()).uuid, target_host.uuid) for volume in volume_in_local: vol_ops.migrate_volume(volume.get_volume().uuid, target_host.uuid) if not (local_nfs_env or local_smp_env): for volume in volume_in_another: vol_ops.migrate_volume(volume.get_volume().uuid, target_host.uuid) for volume in volume_in_local + volume_in_another: volume.attach(vm) volume.check() vm.start() vm.check() for volume in volume_in_local + volume_in_another: assert volume.get_volume().vmInstanceUuid == vm.get_vm().uuid test_util.test_pass('Multi PrimaryStorage Test Pass')
def check(self): super(zstack_kvm_virtioscsi_shareable_checker, self).check() volume = self.test_obj.volume has_volume = False shareable = False check_result = False #sv_cond = res_ops.gen_query_conditions("volumeUuid", '=', volume.uuid) #share_volume_vm_uuids = res_ops.query_resource_fields(res_ops.SHARE_VOLUME, sv_cond, None, fields=['vmInstanceUuid']) #test_util.test_logger('share_volume_vm_uuids is %s' %share_volume_vm_uuids) print "volume_uuid= %s" % (volume.uuid) sv_cond = res_ops.gen_query_conditions("volumeUuid", '=', volume.uuid) volume_vmInstanceUuid = res_ops.query_resource_fields( res_ops.SHARE_VOLUME, sv_cond, None, fields=['vmInstanceUuid'])[0].vmInstanceUuid pv_cond = res_ops.gen_query_conditions("volume.uuid", '=', volume.uuid) volume_ps_type = res_ops.query_resource_fields(res_ops.PRIMARY_STORAGE, pv_cond, None, fields=['type'])[0].type host = test_lib.lib_get_vm_host( test_lib.lib_get_vm_by_uuid(volume_vmInstanceUuid)) test_util.test_logger('vmInstanceUuid_host.ip is %s' % host.managementIp) test_util.test_logger('vmInstanceUuid is %s' % volume_vmInstanceUuid) #xml = os.popen('virsh dumpxml %s' % volume.vmInstanceUuid) xml = os.popen( 'sshpass -p password ssh root@%s -p %s "virsh dumpxml %s"' % (host.managementIp, host.sshPort, volume_vmInstanceUuid)) tree = ET.parse(xml) root = tree.getroot() for domain in root: if domain.tag == "devices": for device in domain: if device.tag == "disk": for disk in device: if disk.tag == "source": if volume_ps_type == "Ceph": if disk.get("name").find(volume.uuid) > 0: has_volume = True if volume_ps_type == "SharedBlock": if disk.get("file").find(volume.uuid) > 0: has_volume = True if disk.tag == "shareable": shareable = True if has_volume and shareable: check_result = True break test_util.test_logger( 'Check result: The result of check VirtioSCSI shareable label is %s' % check_result) return self.judge(check_result)
def check(self): super(zstack_kvm_volume_attach_checker, self).check() volume = self.test_obj.volume if not volume.vmInstanceUuid: test_util.test_logger('Check result: [volume:] %s does NOT have vmInstanceUuid. It is not attached to any vm.' % volume.uuid) return self.judge(False) if not self.test_obj.target_vm: test_util.test_logger('Check result: test [volume:] %s does NOT have vmInstance record in test structure. Can not do furture checking.' % volume.uuid) return self.judge(False) vm = self.test_obj.target_vm.vm volume_installPath = volume.installPath if not volume_installPath: test_util.test_logger('Check result: [installPath] is Null for [volume uuid: ] %s. Can not check if volume is attached to vm.' % volume.uuid) return self.judge(False) host = test_lib.lib_get_vm_host(vm) cmd = vm_plugin.VmStatusCmd() cmd.vm_uuids = [vm.uuid] rspstr = http.json_dump_post(testagent.build_http_path(host.managementIp, vm_plugin.VM_BLK_STATUS), cmd) rsp = jsonobject.loads(rspstr) output = jsonobject.dumps(rsp.vm_status[vm.uuid]) if volume_installPath.startswith('iscsi'): volume_installPath = volume_installPath.split(';')[0].split('/iqn')[1] volume_installPath = 'iqn%s' % volume_installPath volume_installPath = volume_installPath[:-2] elif volume_installPath.startswith('ceph'): volume_installPath = volume_installPath.split('ceph://')[1] elif volume_installPath.startswith('fusionstor'): volume_installPath = volume_installPath.split('fusionstor://')[1] elif volume_installPath.startswith('sharedblock'): volume_installPath = "/dev/" + volume_installPath.split('sharedblock://')[1] elif volume_installPath.startswith('mini'): _cmd = "drbdsetup show %s | grep device | awk -F';' '{print $1}' | awk '{print $3}'" % volume.uuid result = test_lib.lib_execute_ssh_cmd(host.managementIp,host.username, host.password, _cmd, 180) volume_installPath = '/dev/drbd' + result.strip() elif volume_installPath.startswith('ebs'): ps_uuid = volume.primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) url = ps.url.replace('ocean/api', 'dev/name') vol_id = volume_installPath.split(';')[1].split('volumeId=')[-1] req = urllib2.Request(url, headers={'Volumeid': vol_id}) volume_installPath = '/dev/' + urllib2.urlopen(req).read().split('"')[-2] if volume_installPath in output: test_util.test_logger('Check result: [volume:] %s [file:] %s is found in [vm:] %s on [host:] %s .' % (volume.uuid, volume_installPath, vm.uuid, host.managementIp)) return self.judge(True) else: test_util.test_logger('Check result: [volume:] %s [file:] %s is not found in [vm:] %s on [host:] %s .' % (volume.uuid, volume_installPath, vm.uuid, host.managementIp)) return self.judge(False)
def check(self): super(zstack_kvm_vm_dns_checker, self).check() vm = self.test_obj.vm test_lib.lib_install_testagent_to_vr(vm) host = test_lib.lib_get_vm_host(vm) test_lib.lib_install_testagent_to_host(host) test_lib.lib_set_vm_host_l2_ip(vm) default_l3_uuid = vm.defaultL3NetworkUuid vr = test_lib.lib_find_vr_by_pri_l3(default_l3_uuid) nic = test_lib.lib_get_vm_nic_by_vr(vm, vr) test_util.test_logger("Begin to check [vm:] %s DNS setting" % vm.uuid) if not 'DNS' in test_lib.lib_get_l3_service_type(nic.l3NetworkUuid): test_util.test_logger( 'Checker result: SKIP DNS checker, since VM [VR:] %s does not provide DNS service. ' % vr.uuid) return self.judge(self.exp_result) command = 'cat /etc/resolv.conf' cmd_result = test_lib.lib_ssh_vm_cmd_by_agent_with_retry( host.managementIp, nic.ip, test_lib.lib_get_vm_username(vm), test_lib.lib_get_vm_password(vm), command, self.exp_result) if not cmd_result: test_util.test_logger( 'Checker result: FAIL to execute test ssh command in test [vm:] %s throught [host:] %s.' % (vm.uuid, host.name)) return self.judge(False) vr_guest_ip = test_lib.lib_find_vr_private_ip(vr) if isinstance(cmd_result, str) and vr_guest_ip in cmd_result: test_util.test_logger( 'Checker result: VR [IP:] %s is SUCCESSFULLY set in guest [vm:] %s /etc/resolv.conf. ' % (vr_guest_ip, vm.uuid)) else: test_util.test_logger( 'Checker result: VR [IP:] %s is NOT set in guest [vm:] %s /etc/resolv.conf' % (vr_guest_ip, vm.uuid)) return self.judge(False) l3_inv = test_lib.lib_get_l3_by_uuid(default_l3_uuid) if l3_inv.domainName: if not l3_inv.domainName in cmd_result: test_util.test_logger( 'Checker result: L3: %s, Domain Name: %s is NOT set in guest [vm:] %s /etc/resolv.conf' % (l3_inv.uuid, l3_inv.domainName, vm.uuid)) return self.judge(False) else: test_util.test_logger( 'Checker result: L3: %s, Domain Name: %s is set in guest [vm:] %s /etc/resolv.conf' % (l3_inv.uuid, l3_inv.domainName, vm.uuid)) return self.judge(True)
def migrate_vm_to_random_host(vm): test_util.test_dsc("migrate pf_vm to random host") target_host = test_lib.lib_find_random_host(vm.vm) vm.migrate(target_host.uuid) new_host = test_lib.lib_get_vm_host(vm.vm) if not new_host: test_util.test_fail('Not find available Hosts to do migration') if new_host.uuid != target_host.uuid: test_util.test_fail('[vm:] did not migrate from [host:] %s to target [host:] %s, but to [host:] %s' % (vm.vm.uuid, current_host.uuid, target_host.uuid, new_host.uuid)) else: test_util.test_logger('[vm:] %s has been migrated from [host:] %s to [host:] %s' % (vm.vm.uuid, current_host.uuid, target_host.uuid))
def test(): global vcenter_uuid, vm vcenter1_name = os.environ['vcenter2_name'] vcenter1_domain_name = os.environ['vcenter2_ip'] vcenter1_username = os.environ['vcenter2_domain_name'] vcenter1_password = os.environ['vcenter2_password'] ova_image_name = os.environ['vcenter2_template_exist'] network_pattern1 = os.environ['vcenter2_network_pattern1'] zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid) vcenter_uuid = inv.uuid if vcenter_uuid == None: test_util.test_fail("vcenter_uuid is None") #this test suppose user has already deployed a cluster with more than 2 hosts included, #the vm is created in one of the host, then start the vm with another host uuid, #which leads vm migration triggering. vm = test_stub.create_vm_in_vcenter(vm_name='vm-start-stop-test', image_name=ova_image_name, l3_name=network_pattern1) vm.check() vm.stop() vm.check() vm_host_uuid = test_lib.lib_get_vm_host(vm.get_vm()).uuid host_cond = res_ops.gen_query_conditions("status", '=', "Connected") host_uuids = res_ops.query_resource_fields(res_ops.HOST, host_cond, None, fields=['uuid']) for host_uuid in host_uuids: if host_uuid != vm_host_uuid: another_host_uuid = host_uuid break test_stub.start_vm_with_host_uuid(vm.get_vm(), vm_host_uuid) vm.check() vm.destroy() vm.check() vm.expunge() vct_ops.delete_vcenter(vcenter_uuid) test_util.test_pass("vm start and stop of vcenter test passed.")
def test(): test_util.test_dsc('Test Host Reconnect function and check if the available CPU and memory number are aligned between before and after reconnect action') vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm) zone_uuid = vm.get_vm().zoneUuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid tot_res1 = test_lib.lib_get_cpu_memory_capacity([zone_uuid]) host_ops.reconnect_host(host_uuid) time.sleep(5) tot_res2 = test_lib.lib_get_cpu_memory_capacity([zone_uuid]) if compare_capacity( tot_res1, tot_res2): test_util.test_logger("the resource consumption are same after reconnect host") else: test_util.test_fail("the resource consumption are different after reconnect host: %s " % host_uuid) vm_offering_uuid = vm.get_vm().instanceOfferingUuid cond = res_ops.gen_query_conditions('uuid', '=', vm_offering_uuid) vm_offering = res_ops.query_resource(res_ops.INSTANCE_OFFERING, cond)[0] vm_cpu = vm_offering.cpuNum vm_memory = vm_offering.memorySize vm.destroy() test_obj_dict.rm_vm(vm) tot_res3 = test_lib.lib_get_cpu_memory_capacity([zone_uuid]) if compare_capacity(tot_res1, tot_res3, vm_cpu, vm_memory): test_util.test_logger("the resource consumption are aligned after destroy a vm") else: test_util.test_fail("the resource consumption are not aligned after destroy vm: %s on host: %s" % (vm.get_vm().uuid, host_uuid)) test_stub.ensure_hosts_connected(120) test_stub.ensure_pss_connected() vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm) tot_res4 = test_lib.lib_get_cpu_memory_capacity([zone_uuid]) if compare_capacity(tot_res1, tot_res4): test_util.test_logger("the resource consumption are aligned after create a new vm") else: test_util.test_fail("the resource consumption are not aligned after create a new vm: %s " % vm.get_vm().uuid) vm.destroy() test_util.test_pass('Reconnect Host and Test CPU/Memory Capacity Pass')
def test(): global vm global ha_vm global mn_host ha_vm = test_stub.create_ha_vm() ha_vm.check() ha_vm_host = test_lib.lib_get_vm_host(ha_vm.vm) mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file) if len(mn_host) != 1: test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host)) if ha_vm_host.managementIp != mn_host[0].ip_: conditions = res_ops.gen_query_conditions('managementIp', '=', mn_host[0].ip_) host = res_ops.query_resource(res_ops.HOST, conditions) ha_vm_host.migrate(host[0].uuid) test_util.test_logger("force shutdown host [%s] that mn vm is running on" % (mn_host[0].ip_)) test_stub.stop_host(mn_host[0], test_lib.all_scenario_config, 'cold') test_util.test_logger("wait for 20 seconds to see if management node VM starts on another host") time.sleep(20) new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file) if new_mn_host_ip == "" or new_mn_host_ip == mn_host[0].ip_: test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host[0].ip_)) count = 60 while count > 0: new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file) if len(new_mn_host) == 1: test_util.test_logger("management node VM run after its former host down for 30s") break elif len(new_mn_host) > 1: test_util.test_fail("management node VM runs on more than one host after its former host down") time.sleep(5) count -= 1 if len(new_mn_host) == 0: test_util.test_fail("management node VM does not run after its former host down for 30s") elif len(new_mn_host) > 1: test_util.test_fail("management node VM runs on more than one host after its former host down") #node_ops.wait_for_management_server_start(300) test_stub.wrapper_of_wait_for_management_server_start(600) conditions = res_ops.gen_query_conditions('managementIp', '=', mn_host[0].ip_) host = res_ops.query_resource(res_ops.HOST, conditions) if host[0].status != "Connected": test_util.test_fail("Target host:%s is not connected as expected." %(host[0].uuid)) ha_vm.destroy() test_util.test_pass('Create VM Test Success')
def migrate_vm_to_random_host(vm, timeout = None): test_util.test_dsc("migrate vm to random host") target_host = test_lib.lib_find_random_host(vm.vm) current_host = test_lib.lib_find_host_by_vm(vm.vm) vm.migrate(target_host.uuid, timeout) new_host = test_lib.lib_get_vm_host(vm.vm) if not new_host: test_util.test_fail('Not find available Hosts to do migration') if new_host.uuid != target_host.uuid: test_util.test_fail('[vm:] did not migrate from [host:] %s to target [host:] %s, but to [host:] %s' % (vm.vm.uuid, current_host.uuid, target_host.uuid, new_host.uuid)) else: test_util.test_logger('[vm:] %s has been migrated from [host:] %s to [host:] %s' % (vm.vm.uuid, current_host.uuid, target_host.uuid))
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) #volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.check() volume.attach(vm) #ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) #ps_uuid = ps.uuid #ps_ops.change_primary_storage_state(ps_uuid, 'disable') test_stub.disable_all_pss() if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to running when PS change to disable state') vm.set_state(vm_header.RUNNING) vm.check() volume.detach(vm.get_vm().uuid) #ps_ops.change_primary_storage_state(ps_uuid, 'enable') test_stub.enable_all_pss() host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) volume.delete() #volume.expunge() volume.check() vm.destroy() test_util.test_pass('Delete volume under PS disable mode Test Success')
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid test_obj_dict.add_vm(vm) vm.check() root_volume_uuid = test_lib.lib_get_root_volume_uuid(vm.get_vm()) test_util.test_dsc('create snapshot and check') snapshots = test_obj_dict.get_volume_snapshot(root_volume_uuid) snapshots.set_utility_vm(vm) vm.check() snapshots.create_snapshot('create_root_snapshot1') snapshots.check() snapshot1 = snapshots.get_current_snapshot() host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) ps_uuid = ps.uuid ps_ops.change_primary_storage_state(ps_uuid, 'disable') if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail( 'VM is expected to running when PS change to disable state') vm.set_state(vm_header.RUNNING) vm.check() vm.stop() vm.check() test_util.test_dsc('Use snapshot, volume and check') snapshots.use_snapshot(snapshot1) snapshots.check() ps_ops.change_primary_storage_state(ps_uuid, 'enable') host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) vm.destroy() test_util.test_pass('PS disable mode Test Success')
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid test_obj_dict.add_vm(vm) vm.check() root_volume_uuid = test_lib.lib_get_root_volume_uuid(vm.get_vm()) test_util.test_dsc('create snapshot and check') snapshots = test_obj_dict.get_volume_snapshot(root_volume_uuid) snapshots.set_utility_vm(vm) vm.check() snapshots.create_snapshot('create_root_snapshot1') snapshots.check() snapshot1 = snapshots.get_current_snapshot() host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) ps_uuid = ps.uuid ps_ops.change_primary_storage_state(ps_uuid, 'disable') if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to running when PS change to disable state') vm.set_state(vm_header.RUNNING) vm.check() vm.stop() vm.check() test_util.test_dsc('Use snapshot, volume and check') snapshots.use_snapshot(snapshot1) snapshots.check() ps_ops.change_primary_storage_state(ps_uuid, 'enable') host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) vm.destroy() test_util.test_pass('PS disable mode Test Success')
def test(): test_util.test_dsc('Test Host Reconnect function and check if the available CPU and memory number are aligned between before and after reconnect action') vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm) zone_uuid = vm.get_vm().zoneUuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid tot_res1 = test_lib.lib_get_cpu_memory_capacity([zone_uuid]) host_ops.reconnect_host(host_uuid) tot_res2 = test_lib.lib_get_cpu_memory_capacity([zone_uuid]) if compare_capacity( tot_res1, tot_res2): test_util.test_logger("the resource consumption are same after reconnect host") else: test_util.test_fail("the resource consumption are different after reconnect host: %s " % host_uuid) vm_offering_uuid = vm.get_vm().instanceOfferingUuid cond = res_ops.gen_query_conditions('uuid', '=', vm_offering_uuid) vm_offering = res_ops.query_resource(res_ops.INSTANCE_OFFERING, cond)[0] vm_cpu = vm_offering.cpuNum * vm_offering.cpuSpeed vm_memory = vm_offering.memorySize vm.destroy() test_obj_dict.rm_vm(vm) tot_res3 = test_lib.lib_get_cpu_memory_capacity([zone_uuid]) if compare_capacity(tot_res1, tot_res3, vm_cpu, vm_memory): test_util.test_logger("the resource consumption are aligned after destroy a vm") else: test_util.test_fail("the resource consumption are not aligned after destroy vm: %s on host: %s" % (vm.get_vm().uuid, host_uuid)) vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm) tot_res4 = test_lib.lib_get_cpu_memory_capacity([zone_uuid]) if compare_capacity(tot_res1, tot_res4): test_util.test_logger("the resource consumption are aligned after create a new vm") else: test_util.test_fail("the resource consumption are not aligned after create a new vm: %s " % vm.get_vm().uuid) vm.destroy() test_util.test_pass('Reconnect Host and Test CPU/Memory Capacity Pass')
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] #vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() #ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) #ps_uuid = ps.uuid #ps_ops.change_primary_storage_state(ps_uuid, 'maintain') test_stub.maintain_all_pss() if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to stop when PS change to maintain state') vm.set_state(vm_header.STOPPED) vm.check() #ps_ops.change_primary_storage_state(ps_uuid, 'enable') test_stub.enable_all_pss() host_ops.reconnect_host(host_uuid) #vm_ops.reconnect_vr(vr_uuid) test_stub.ensure_pss_connected() vrs = test_lib.lib_get_all_vrs() for vr in vrs: vr_cond = res_ops.gen_query_conditions('uuid', '=', vr.uuid) vr_inv = res_ops.query_resource(res_ops.VM_INSTANCE, vr_cond)[0] if vr_inv.state == 'Stopped': vm_ops.start_vm(vr.uuid) else: test_lib.lib_wait_target_up(vr_inv.vmNics[0].ip, '22', 360) for _ in xrange(100): if res_ops.query_resource(res_ops.VM_INSTANCE, vr_cond)[0].state != 'Running': time.sleep(3) else: break vm.start() vm.check() vm.destroy() test_util.test_pass('PS maintain mode Test Success')
def check(self): super(zstack_kvm_vm_default_l3_checker, self).check() vm = self.test_obj.vm default_l3_uuid \ = self.test_obj.get_creation_option().get_default_l3_uuid() if vm.defaultL3NetworkUuid != default_l3_uuid: test_util.test_logger( 'Checker Fail: VM: %s setting default l3 uuid: %s is different with the one in database: %s' % (vm.uuid, default_l3_uuid, vm.defaultL3NetworkUuid)) return self.judge(False) for vm_nic in vm.vmNics: if vm_nic.l3NetworkUuid == default_l3_uuid: gateway = vm_nic.gateway break else: test_util.test_logger( 'Checker Fail: Did not find default l3: %s is belonged to any VM: %s vmNics: %s' % (default_l3_uuid, vm.uuid, vm.vmNics)) return self.judge(False) test_lib.lib_install_testagent_to_vr(vm) host = test_lib.lib_get_vm_host(vm) test_lib.lib_install_testagent_to_host(host) test_lib.lib_set_vm_host_l2_ip(vm) nic = test_lib.lib_get_vm_nic_by_l3(vm, default_l3_uuid) command = 'route|grep default' cmd_result = test_lib.lib_ssh_vm_cmd_by_agent_with_retry( host.managementIp, nic.ip, test_lib.lib_get_vm_username(vm), test_lib.lib_get_vm_password(vm), command, self.exp_result) if not cmd_result: test_util.test_logger( 'Checker result: FAIL to execute test ssh command in test [vm:] %s throught [host:] %s.' % (vm.uuid, host.name)) return self.judge(False) if isinstance(cmd_result, str) and gateway in cmd_result: test_util.test_logger( 'Checker result: gateway %s is SUCCESSFULLY set in guest [vm:] %s default router. ' % (gateway, vm.uuid)) return self.judge(True) else: test_util.test_logger( 'Checker result: gateway: %s is NOT set in guest [vm:] %s default router. The default route is : %s' % (gateway, vm.uuid, cmd_result)) return self.judge(False)
def migrate_vm_to_random_host(vm): test_util.test_dsc("migrate vm to random host") if not test_lib.lib_check_vm_live_migration_cap(vm.vm): test_util.test_skip('skip migrate if live migrate not supported') target_host = test_lib.lib_find_random_host(vm.vm) current_host = test_lib.lib_find_host_by_vm(vm.vm) vm.migrate(target_host.uuid) new_host = test_lib.lib_get_vm_host(vm.vm) if not new_host: test_util.test_fail('Not find available Hosts to do migration') if new_host.uuid != target_host.uuid: test_util.test_fail('[vm:] did not migrate from [host:] %s to target [host:] %s, but to [host:] %s' % (vm.vm.uuid, current_host.uuid, target_host.uuid, new_host.uuid)) else: test_util.test_logger('[vm:] %s has been migrated from [host:] %s to [host:] %s' % (vm.vm.uuid, current_host.uuid, target_host.uuid))
def check(self): super(zstack_kvm_vm_ssh_no_vr_checker, self).check() vm = self.test_obj.vm host = test_lib.lib_get_vm_host(vm) test_lib.lib_install_testagent_to_host(host) test_lib.lib_set_vm_host_l2_ip(vm) nic = vm.vmNics[0] ip = nic.ip shell_command = 'exit 0' vm_cmd_result = test_lib.lib_ssh_vm_cmd_by_agent_with_retry(host.managementIp, ip, test_lib.lib_get_vm_username(vm), test_lib.lib_get_vm_password(vm), shell_command, self.exp_result) if not vm_cmd_result: test_util.test_logger('Checker result: FAIL to execute test ssh command in test [vm:] %s throught [host:] %s.' % (vm.uuid, host.name)) return self.judge(False) test_util.test_logger('Checker result: Success to execute test ssh command in test [vm:] %s throught [host:] %s.' % (vm.uuid, host.name)) return self.judge(True)
def test(): recnt_timeout=5000 test_util.test_dsc('Test Host Reconnect within %s ms' % recnt_timeout) vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm) zone_uuid = vm.get_vm().zoneUuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid host_ops.reconnect_host(host_uuid, timeout=recnt_timeout) vm.destroy() test_obj_dict.rm_vm(vm) test_util.test_pass('Reconnect Host within %s ms' % recnt_timeout)
def get_ssh_ip_result(self): vm = self.vm_list[0] host = test_lib.lib_get_vm_host(vm) vm_command = '/sbin/ip a|grep inet' vm_cmd_result = test_lib.lib_execute_ssh_cmd(self.vip_ip, \ test_lib.lib_get_vm_username(vm), \ test_lib.lib_get_vm_password(vm), \ vm_command) if not vm_cmd_result: test_util.test_logger('Checker result: FAIL to execute test ssh command in vip: %s for lb: %s.' % (self.vip_ip, self.lbl_uuid)) return False for key, values in self.vm_ip_test_dict.iteritems(): if key in vm_cmd_result: self.vm_ip_test_dict[key] += 1 break return True
def check(self): super(zstack_kvm_vm_destroyed_checker, self).check() vm = self.test_obj.vm host = test_lib.lib_get_vm_host(vm) test_lib.lib_install_testagent_to_host(host) test_lib.lib_set_vm_host_l2_ip(vm) cmd = vm_plugin.VmStatusCmd() cmd.vm_uuids = [vm.uuid] test_util.test_logger('Check [vm:] %s status on host [name:] %s [uuid:] %s.' % (vm.uuid, host.name, host.uuid)) rspstr = http.json_dump_post(testagent.build_http_path(host.managementIp, vm_plugin.VM_STATUS), cmd) rsp = jsonobject.loads(rspstr) check_result = rsp.vm_status[vm.uuid].strip() if check_result != vm_plugin.VmAgent.VM_STATUS_RUNNING and check_result != vm_plugin.VmAgent.VM_STATUS_STOPPED: test_util.test_logger('Check result: [vm:] %s is DESTROYED on [host:] %s .' % (vm.uuid, host.name)) return self.judge(True) else: test_util.test_logger('Check result: [vm:] %s is NOT DESTROYED on [host:] %s . vm status is: %s' % (vm.uuid, host.name, check_result)) return self.judge(False)
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') vm = test_stub.create_vr_vm('vm1', 'imageName_net', 'l3VlanNetwork3') test_obj_dict.add_vm(vm) backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.vm) for bs in backup_storage_list: if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE: break else: vm.destroy() test_util.test_skip('Not find ceph type backup storage.') l3_1_name = os.environ.get('l3VlanNetwork3') l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) ps_uuid = ps.uuid ps_ops.change_primary_storage_state(ps_uuid, 'disable') if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail( 'VM is expected to stop when PS change to disable state') vm.set_state(vm_header.STOPPED) vm.check() test_stub.migrate_vm_to_random_host(vm) vm.check() volume.check() ps_ops.change_primary_storage_state(ps_uuid, 'Enabled') host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) test_util.test_pass('PS disable mode Test Success')
def test(): test_util.test_dsc('Create test vm with EIP and check.') vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm) host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid pri_l3_name = os.environ.get('l3VlanNetworkName1') pri_l3_uuid = test_lib.lib_get_l3_by_name(pri_l3_name).uuid pub_l3_name = os.environ.get('l3PublicNetworkName') pub_l3_uuid = test_lib.lib_get_l3_by_name(pub_l3_name).uuid vm_nic = vm.vm.vmNics[0] vm_nic_uuid = vm_nic.uuid vip = test_stub.create_vip('create_eip_test', pub_l3_uuid) test_obj_dict.add_vip(vip) eip = test_stub.create_eip('create eip test', vip_uuid=vip.get_vip().uuid, vnic_uuid=vm_nic_uuid, vm_obj=vm) vip.attach_eip(eip) vm.check() host_ops.reconnect_host(host_uuid) if not test_lib.lib_check_directly_ping(vip.get_vip().ip): test_util.test_fail('expected to be able to ping vip while it fail') vm.destroy() test_obj_dict.rm_vm(vm) if test_lib.lib_check_directly_ping(vip.get_vip().ip): test_util.test_fail( 'not expected to be able to ping vip while it succeed') eip.delete() vip.delete() test_obj_dict.rm_vip(vip) test_util.test_pass('Create EIP for VM Success')
def test(): global vcenter_uuid, vm vcenter1_name = os.environ['vcenter2_name'] vcenter1_domain_name = os.environ['vcenter2_ip'] vcenter1_username = os.environ['vcenter2_domain_name'] vcenter1_password = os.environ['vcenter2_password'] ova_image_name = os.environ['vcenter2_template_exist'] network_pattern1 = os.environ['vcenter2_network_pattern1'] zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid) vcenter_uuid = inv.uuid if vcenter_uuid == None: test_util.test_fail("vcenter_uuid is None") #this test suppose user has already deployed a cluster with more than 2 hosts included, #the vm is created in one of the host, then start the vm with another host uuid, #which leads vm migration triggering. vm = test_stub.create_vm_in_vcenter(vm_name = 'vm-start-stop-test', image_name = ova_image_name, l3_name = network_pattern1) vm.check() vm.stop() vm.check() vm_host_uuid = test_lib.lib_get_vm_host(vm.get_vm()).uuid host_cond = res_ops.gen_query_conditions("status", '=', "Connected") host_uuids = res_ops.query_resource_fields(res_ops.HOST, host_cond, None, fields=['uuid']) for host_uuid in host_uuids: if host_uuid != vm_host_uuid: another_host_uuid = host_uuid break test_stub.start_vm_with_host_uuid(vm.get_vm(), vm_host_uuid) vm.check() vm.destroy() vm.check() vm.expunge() vct_ops.delete_vcenter(vcenter_uuid) test_util.test_pass("vm start and stop of vcenter test passed.")
def test(): recnt_timeout=30000 test_util.test_dsc('Test Host Reconnect within %s ms' % recnt_timeout) vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm) zone_uuid = vm.get_vm().zoneUuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid try: host_ops.reconnect_host(host_uuid, timeout=recnt_timeout) except: host_ops.reconnect_host(host_uuid, timeout=recnt_timeout) vm.destroy() test_obj_dict.rm_vm(vm) test_util.test_pass('Reconnect Host within %s ms' % recnt_timeout)
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') vm = test_stub.create_vr_vm('vm1', 'imageName_net', 'l3VlanNetwork3') test_obj_dict.add_vm(vm) backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.vm) for bs in backup_storage_list: if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE: break else: vm.destroy() test_util.test_skip('Not find ceph type backup storage.') l3_1_name = os.environ.get('l3VlanNetwork3') l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) ps_uuid = ps.uuid ps_ops.change_primary_storage_state(ps_uuid, 'disable') if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to stop when PS change to disable state') vm.set_state(vm_header.STOPPED) vm.check() test_stub.migrate_vm_to_random_host(vm) vm.check() volume.check() ps_ops.change_primary_storage_state(ps_uuid, 'Enabled') host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) test_util.test_pass('PS disable mode Test Success')
def vm_check(self, test_result): vm = self.test_obj.vm test_util.test_logger("Begin to check VM DHCP in VM: %s" % vm.uuid) nic = test_lib.lib_get_nic_by_uuid(self.test_obj.get_creation_option().get_vm_nic_uuid()) test_lib.lib_find_vr_by_vm(vm) guest_ip = nic.ip host = test_lib.lib_get_vm_host(vm) vm_command = '/sbin/ifconfig' vm_cmd_result = test_lib.lib_ssh_vm_cmd_by_agent_with_retry(host.managementIp, self.test_obj.vip.ip, test_lib.lib_get_vm_username(vm), test_lib.lib_get_vm_password(vm), vm_command) if not vm_cmd_result: test_util.test_logger('Checker result: FAIL to execute test ssh command in test [vm:] %s throught [host:] %s.' % (vm.uuid, host.name)) return self.judge(False) if guest_ip in vm_cmd_result: test_util.test_logger('Checker result: guest [ip:] %s is SET in guest [vm:] %s.' % (guest_ip, vm.uuid)) else: test_util.test_logger('Checker result: guest [ip:] %s is NOT found in guest [vm:] %s. \n It might be because the ifconfig is not reflect the ip address yet. \n The current ifconfig result is: %s' % (guest_ip, vm.uuid, vm_cmd_result)) return self.judge(False) return self.judge(True)
def get_ssh_ip_result(self): vm = self.vm_list[0] host = test_lib.lib_get_vm_host(vm) vm_command = '/sbin/ip a|grep inet' vm_cmd_result = test_lib.lib_execute_ssh_cmd(self.vip_ip, \ test_lib.lib_get_vm_username(vm), \ test_lib.lib_get_vm_password(vm), \ vm_command) if not vm_cmd_result: test_util.test_logger( 'Checker result: FAIL to execute test ssh command in vip: %s for lb: %s.' % (self.vip_ip, self.lbl_uuid)) return False for key, values in self.vm_ip_test_dict.iteritems(): if key in vm_cmd_result: self.vm_ip_test_dict[key] += 1 break return True
def test(): recnt_timeout = 30000 test_util.test_dsc('Test Host Reconnect within %s ms' % recnt_timeout) vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm) zone_uuid = vm.get_vm().zoneUuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid host_management_ip = host.managementIp cmd = "mkdir /var/lib/ebtables/; touch /var/lib/ebtables/lock; touch /run/ebtables.lock" try: ssh.execute(cmd, host_management_ip, "root", "password", True, 22) except: ssh.execute(cmd, host_management_ip, "root", "password", True, 22) try: host_ops.reconnect_host(host_uuid, timeout=recnt_timeout) except: host_ops.reconnect_host(host_uuid, timeout=recnt_timeout) test_util.test_pass('Reconnect Host within %s ms' % recnt_timeout)
def test(): test_util.test_dsc('Create test vm with EIP and check.') vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm) host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid pri_l3_name = os.environ.get('l3VlanNetworkName1') pri_l3_uuid = test_lib.lib_get_l3_by_name(pri_l3_name).uuid pub_l3_name = os.environ.get('l3PublicNetworkName') pub_l3_uuid = test_lib.lib_get_l3_by_name(pub_l3_name).uuid vm_nic = vm.vm.vmNics[0] vm_nic_uuid = vm_nic.uuid vip = test_stub.create_vip('create_eip_test', pub_l3_uuid) test_obj_dict.add_vip(vip) eip = test_stub.create_eip('create eip test', vip_uuid=vip.get_vip().uuid, vnic_uuid=vm_nic_uuid, vm_obj=vm) vip.attach_eip(eip) vm.check() host_ops.reconnect_host(host_uuid) if not test_lib.lib_check_directly_ping(vip.get_vip().ip): test_util.test_fail('expected to be able to ping vip while it fail') vm.destroy() test_obj_dict.rm_vm(vm) if test_lib.lib_check_directly_ping(vip.get_vip().ip): test_util.test_fail('not expected to be able to ping vip while it succeed') eip.delete() vip.delete() test_obj_dict.rm_vip(vip) test_util.test_pass('Create EIP for VM Success')
def test(): test_util.test_dsc('Test Host Start/Stop function') vm_cpu = 1 vm_memory = 1073741824 #1G cond = res_ops.gen_query_conditions('name', '=', 'ttylinux') image_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid l3_network_uuid = res_ops.query_resource(res_ops.L3_NETWORK)[0].uuid vm = test_stub.create_mini_vm([l3_network_uuid], image_uuid, cpu_num = vm_cpu, memory_size = vm_memory) test_obj_dict.add_vm(vm) host_uuid = test_lib.lib_get_vm_host(vm.vm).uuid host_ops.change_host_state(host_uuid, 'disable') if not linux.wait_callback_success(is_host_disabled, host_uuid, 120): test_util.test_fail('host state is not changed to disabled') host_ops.change_host_state(host_uuid, 'enable') if not linux.wait_callback_success(is_host_enabled, host_uuid, 120): test_util.test_fail('host state is not changed to enabled') vm.destroy() test_obj_dict.rm_vm(vm) test_util.test_pass('Stop/Start Host Test Pass')
def test(): ps_env = test_stub.PSEnvChecker() local_nfs_env = ps_env.is_local_nfs_env local_smp_env = ps_env.is_local_smp_env local_ps, another_ps = ps_env.get_two_ps() vm = test_stub.create_multi_vms(name_prefix='test-', count=1)[0] test_obj_dict.add_vm(vm) volume_in_local = [] if not local_nfs_env: volume_in_local = test_stub.create_multi_volumes( count=VOLUME_NUMBER, ps=local_ps, host_uuid=test_lib.lib_get_vm_host(vm.get_vm()).uuid) volume_in_another = test_stub.create_multi_volumes( count=VOLUME_NUMBER, ps=another_ps, host_uuid=None if local_nfs_env else test_lib.lib_get_vm_host(vm.get_vm()).uuid) for volume in volume_in_local + volume_in_another: test_obj_dict.add_volume(volume) for volume in volume_in_local + volume_in_another: volume.attach(vm) volume.check() vm.check() for volume in volume_in_local + volume_in_another: volume.detach() volume.check() vm.check() target_host = test_lib.lib_find_random_host(vm.get_vm()) vm.stop() vm.check() vol_ops.migrate_volume( test_lib.lib_get_root_volume(vm.get_vm()).uuid, target_host.uuid) for volume in volume_in_local: vol_ops.migrate_volume(volume.get_volume().uuid, target_host.uuid) if not (local_nfs_env or local_smp_env): for volume in volume_in_another: vol_ops.migrate_volume(volume.get_volume().uuid, target_host.uuid) for volume in volume_in_local + volume_in_another: volume.attach(vm) volume.check() vm.start() vm.check() for volume in volume_in_local + volume_in_another: assert volume.get_volume().vmInstanceUuid == vm.get_vm().uuid test_util.test_pass('Multi PrimaryStorage Test Pass')
def check(self): super(zstack_kvm_vm_dhcp_checker, self).check() vm = self.test_obj.vm test_lib.lib_install_testagent_to_vr(vm) host = test_lib.lib_get_vm_host(vm) test_lib.lib_install_testagent_to_host(host) test_lib.lib_set_vm_host_l2_ip(vm) vm_cmd_result = None vr_vms = test_lib.lib_find_vr_by_vm(vm) print('find %d vr vms.' % len(vr_vms)) for vr_vm in vr_vms: test_util.test_logger( "Begin to check [vm:] %s DHCP binding setting in [VR:] %s" % (vm.uuid, vr_vm.uuid)) nic = test_lib.lib_get_vm_nic_by_vr(vm, vr_vm) if not 'DHCP' in \ test_lib.lib_get_l3_service_type(nic.l3NetworkUuid): test_util.test_logger( "Skip [VR:] %s, since it doesn't provide DHCP service" % vr_vm.uuid) continue guest_ip = nic.ip guest_mac = nic.mac vr_ip = test_lib.lib_find_vr_mgmt_ip(vr_vm) if vr_vm.hasattr( 'applianceVmType') and vr_vm.applianceVmType == 'vrouter': command = '/bin/cli-shell-api showCfg' else: command = 'cat /etc/hosts.dhcp' vr_cmd_result = test_lib.lib_execute_sh_cmd_by_agent_with_retry( vr_ip, command, self.exp_result) if not vr_cmd_result: test_util.test_logger( 'Checker result: FAIL to execute shell commaond in [vm:] %s' % vr_vm.uuid) return self.judge(False) if vr_cmd_result == True: test_util.test_logger( 'Checker result: FAIL to get ssh result in [vm:] %s' % vr_vm.uuid) return self.judge(False) if not guest_mac in vr_cmd_result or not guest_ip in vr_cmd_result: test_util.test_logger( 'Checker result: [vm:] %s [mac:] %s is not found in [vr:] %s. VR ip/mac result is %s.' % (vm.uuid, guest_mac, vr_vm.uuid, vr_cmd_result)) return self.judge(False) else: test_util.test_logger( 'Checker result: [vm:] %s [mac:] %s is found in VR %s .' % (vm.uuid, guest_mac, vr_vm.uuid)) test_util.test_logger("Begin to check VM DHCP in VM: %s" % vm.uuid) if not vm_cmd_result: vm_command = '/sbin/ip a' vm_cmd_result = test_lib.lib_ssh_vm_cmd_by_agent_with_retry( host.managementIp, nic.ip, test_lib.lib_get_vm_username(vm), test_lib.lib_get_vm_password(vm), vm_command, self.exp_result) if not vm_cmd_result: test_util.test_logger( 'Checker result: FAIL to execute test ssh command in test [vm:] %s throught [host:] %s.' % (vm.uuid, host.name)) return self.judge(False) if isinstance(vm_cmd_result, str) and guest_ip in vm_cmd_result: test_util.test_logger( 'Checker result: guest [ip:] %s is SET in guest [vm:] %s.' % (guest_ip, vm.uuid)) else: test_util.test_logger( 'Checker result: guest [ip:] %s is NOT found in guest [vm:] %s. \n Will Try again. It might be because the ifconfig is not reflect the ip address yet. \n The current ifconfig result is: %s' % (guest_ip, vm.uuid, vm_cmd_result)) vm_cmd_result = test_lib.lib_ssh_vm_cmd_by_agent_with_retry( host.managementIp, nic.ip, test_lib.lib_get_vm_username(vm), test_lib.lib_get_vm_password(vm), vm_command, self.exp_result) if not vm_cmd_result: test_util.test_logger( 'Checker result: FAIL to execute test ssh command in test [vm:] %s throught [host:] %s.' % (vm.uuid, host.name)) return self.judge(False) if isinstance(vm_cmd_result, str) and guest_ip in vm_cmd_result: test_util.test_logger( 'Checker result: guest [ip:] %s is SET in guest [vm:] %s.' % (guest_ip, vm.uuid)) else: if not guest_ip in vm_cmd_result: test_util.test_logger( 'Checker result: guest [ip:] %s is NOT found in guest [vm:] %s. The current ifconfig result is: %s' % (guest_ip, vm.uuid, vm_cmd_result)) else: test_util.test_logger( 'vm_cmd_result: %s is not string type. It is: %s .' % (vm_cmd_result, type(vm_cmd_result))) return self.judge(False) return self.judge(True)
def test(): global session_to global session_mc session_to = con_ops.change_global_config('identity', 'session.timeout', '720000') session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000') test_util.test_dsc('Create test vm as utility vm') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) #use root volume to skip add_checking_point test_util.test_dsc('Use root volume for snapshot testing') root_volume_inv = test_lib.lib_get_root_volume(vm.get_vm()) root_volume = zstack_volume_header.ZstackTestVolume() root_volume.set_volume(root_volume_inv) root_volume.set_state(volume_header.ATTACHED) root_volume.set_target_vm(vm) test_obj_dict.add_volume(root_volume) vm.check() snapshots = test_obj_dict.get_volume_snapshot( root_volume.get_volume().uuid) snapshots.set_utility_vm(vm) ori_num = 100 index = 1 while index < 101: thread = threading.Thread(target=create_snapshot, args=( snapshots, index, )) thread.start() index += 1 while threading.activeCount() > 1: time.sleep(0.1) #snapshot.check() doesn't work for root volume #snapshots.check() #check if snapshot exists in install_path ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) if ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE or ps.type == inventory.LOCAL_STORAGE_TYPE: host = test_lib.lib_get_vm_host(vm.get_vm()) for snapshot in snapshots.get_snapshot_list(): snapshot_inv = snapshot.get_snapshot() sp_ps_install_path = snapshot_inv.primaryStorageInstallPath if test_lib.lib_check_file_exist(host, sp_ps_install_path): test_util.test_logger( 'Check result: snapshot %s is found in host %s in path %s' % (snapshot_inv.name, host.managementIp, sp_ps_install_path)) else: test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_fail( 'Check result: snapshot %s is not found in host %s in path %s' % (snapshot_inv.name, host.managementIp, sp_ps_install_path)) else: test_util.test_logger( 'Skip check file install path for %s primary storage' % (ps.type)) cond = res_ops.gen_query_conditions('volumeUuid', '=', root_volume.get_volume().uuid) sps_num = res_ops.query_resource_count(res_ops.VOLUME_SNAPSHOT, cond) if sps_num != ori_num: test_util.test_fail( 'Create %d snapshots, but only %d snapshots were successfully created' % (ori_num, sps_num)) try: test_lib.lib_robot_cleanup(test_obj_dict) except: test_lib.test_logger('Delete VM may timeout') test_util.test_pass('Test create 100 snapshots simultaneously success')