def test(): global vm vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') l3_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vm("test_resize_vm", image_name, l3_name) vm.check() vm.stop() vm.check() vol_size = test_lib.lib_get_root_volume(vm.get_vm()).size volume_uuid = test_lib.lib_get_root_volume(vm.get_vm()).uuid set_size = 1024*1024*1024*5 vol_ops.resize_volume(volume_uuid, set_size) vm.update() vol_size_after = test_lib.lib_get_root_volume(vm.get_vm()).size if set_size != vol_size_after: test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after) vm.start() set_size = 1024*1024*1024*6 vol_ops.resize_volume(volume_uuid, set_size) vm.update() vol_size_after = test_lib.lib_get_root_volume(vm.get_vm()).size if set_size != vol_size_after: test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after) vm.destroy() test_util.test_pass('Resize VM Test Success')
def execute_shell_in_process(cmd, tmp_file, timeout = 1200, no_timeout_excep = False): logfd = open(tmp_file, 'w', 0) process = subprocess.Popen(cmd, executable='/bin/sh', shell=True, stdout=logfd, stderr=logfd, universal_newlines=True) start_time = time.time() while process.poll() is None: curr_time = time.time() test_time = curr_time - start_time if test_time > timeout: process.kill() logfd.close() logfd = open(tmp_file, 'r') test_util.test_logger('[shell:] %s [timeout logs:] %s' % (cmd, '\n'.join(logfd.readlines()))) logfd.close() if no_timeout_excep: test_util.test_logger('[shell:] %s timeout, after %d seconds' % (cmd, test_time)) return 1 else: os.system('rm -f %s' % tmp_file) test_util.test_fail('[shell:] %s timeout, after %d seconds' % (cmd, timeout)) if test_time%10 == 0: print('shell script used: %ds' % int(test_time)) time.sleep(1) logfd.close() logfd = open(tmp_file, 'r') test_util.test_logger('[shell:] %s [logs]: %s' % (cmd, '\n'.join(logfd.readlines()))) logfd.close() return process.returncode
def test_scp_vm_inbound_speed(vm_inv, bandwidth): ''' bandwidth unit is KB ''' timeout = TEST_TIME + 30 vm_ip = vm_inv.vmNics[0].ip file_size = bandwidth * TEST_TIME seek_size = file_size / 1024 - 1 cmd = 'dd if=/dev/zero of=%s bs=1M count=1 seek=%d' \ % (test_file, seek_size) os.system(cmd) cmd = 'scp -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s %s:/dev/null' \ % (test_file, vm_ip) start_time = time.time() if execute_shell_in_process(cmd, timeout) != 0: test_util.test_fail('scp test file failed') end_time = time.time() os.system('rm -f %s' % test_file) scp_time = end_time - start_time if scp_time < TEST_TIME: test_util.test_fail('network inbound QOS test file failed, since the scp time: %d is smaller than the expected test time: %d. It means the bandwidth limitation: %d KB/s is not effect. ' % (scp_time, TEST_TIME, bandwidth)) else: test_util.test_logger('network inbound QOS test file pass, since the scp time: %d is bigger than the expected test time: %d. It means the bandwidth limitation: %d KB/s is effect. ' % (scp_time, TEST_TIME, bandwidth)) return True
def test(): global vcenter_uuid, vm vcenter1_name = os.environ['vcenter2_name'] vcenter1_domain_name = os.environ['vcenter2_ip'] vcenter1_username = os.environ['vcenter2_domain_name'] vcenter1_password = os.environ['vcenter2_password'] ova_image_name = os.environ['vcenter2_template_exist'] network_pattern1 = os.environ['vcenter2_network_pattern1'] zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid) vcenter_uuid = inv.uuid if vcenter_uuid == None: test_util.test_fail("vcenter_uuid is None") vm = test_stub.create_vm_in_vcenter(vm_name = 'vm-start-stop-test', image_name = ova_image_name, l3_name = network_pattern1) vm.check() vm.stop() vm.check() vm.start() vm.check() vm.destroy() vm.check() vm.expunge() vct_ops.delete_vcenter(vcenter_uuid) test_util.test_pass("vm start and stop of vcenter test passed.")
def test(): if "test-config-vpc-dns.xml" != os.path.basename(os.environ.get('WOODPECKER_TEST_CONFIG_FILE')).strip(): test_util.test_skip('Skip test on test config except test-config-vpc-dns.xml') test_util.test_dsc("1. create vpc vrouter") vr = test_stub.create_vpc_vrouter(vpc_vr_name) vr_uuid = vr.inv.uuid vpc_l3_uuid = test_lib.lib_get_l3_by_name(vpc_l3_name).uuid test_util.test_dsc("2. attach vpc network to vpc router") test_stub.attach_l3_to_vpc_vr_by_uuid(vr, vpc_l3_uuid) test_util.test_dsc("3. disable and enable vpc snat service for 100 times") for i in range(1,100): vpc_ops.set_vpc_vrouter_network_service_state(vr_uuid, networkService='SNAT', state='disable') vpc_ops.set_vpc_vrouter_network_service_state(vr_uuid, networkService='SNAT', state='enable') serviceState = vpc_ops.get_vpc_vrouter_network_service_state(vr_uuid, networkService='SNAT') if serviceState.env.state != 'enable': test_util.test_fail("enable SNAT failed.") test_util.test_dsc("4. enable and disable vpc snat service for 100 times") for i in range(1,100): vpc_ops.set_vpc_vrouter_network_service_state(vr_uuid, networkService='SNAT', state='enable') vpc_ops.set_vpc_vrouter_network_service_state(vr_uuid, networkService='SNAT', state='disable') serviceState = vpc_ops.get_vpc_vrouter_network_service_state(vr_uuid, networkService='SNAT') if serviceState.env.state != 'disable': test_util.test_fail("disable SNAT failed.")
def test(): system_time1 = int(time.time()) current_time = schd_ops.get_current_time().currentTime system_time2 = int(time.time()) if system_time1 != current_time.Seconds and system_time2 != current_time.Seconds: test_util.test_fail('get_current_time not get expected time') test_util.test_pass('Create VM Test Success')
def test(): global vm test_util.test_dsc('create VM with setting password') for root_password in root_password_list: test_util.test_dsc("root_password: \"%s\"" %(root_password)) #vm = test_stub.create_vm(vm_name = 'c7-vm-no-sys-tag', image_name = "imageName_i_c7_no_tag", root_password=root_password) vm = test_stub.create_vm(vm_name = 'c7-vm-no-sys-tag', image_name = "imageName_i_c7_no_tag") backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.vm) for bs in backup_storage_list: if bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break if bs.type == inventory.SFTP_BACKUP_STORAGE_TYPE: break if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE: break else: vm.destroy() test_util.test_skip('Not find image store type backup storage.') #if not test_lib.lib_check_login_in_vm(vm.get_vm(), "root", root_password): # test_util.test_fail("create vm with root password: %s failed", root_password) # stop vm && change vm password #vm.stop() vm.check() try: vm_ops.change_vm_password(vm.get_vm().uuid, "root", root_password) except Exception, e: if "CreateSystemTag" in str(e): test_util.test_pass("negative test of change a no system tag image passed.") else: test_util.test_fail("negative test failed with not expected log: %s", str(e))
def test(): test_util.test_dsc(''' Test Description: Will create 1 VM with 3 l3 networks. 1 l3_network is not using VR; 1 l3_network is using novlan VR; 1 l3_network is using vlan VR. Resource required: Need support 3 VMs (1 test VM + 2 VR VMs) existing at the same time. This test required a special image, which was configed with at least 3 enabled NICs (e.g. eth0, eth1, eth2). ''') image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid l3_net_list = [l3_net_uuid] l3_name = os.environ.get('l3VlanNetworkName3') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid l3_net_list.append(l3_net_uuid) l3_name = os.environ.get('l3VlanNetworkName4') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid l3_net_list.append(l3_net_uuid) vm = test_stub.create_vm(l3_net_list, image_uuid, '3_l3_vm') test_obj_dict.add_vm(vm) vm.check() if len(vm.vm.vmNics) == 3: test_util.test_logger("Find 3 expected Nics in new created VM.") else: test_util.test_fail("New create VM doesn't not have 3 Nics. It only have %s" % len(vm.get_vm().vmNics)) vm.destroy() test_util.test_pass('Create 1 VM with 3 l3_network (1 vlan VR, 1 novlan VR and 1 no VR L3network) successfully.')
def test(): global ldap_server_uuid global new_account_uuid global new_account_uuid2 system_tags = ["ldapCleanBindingFilter::(uidNumber=1002)", "ldapUseAsLoginName::uid"] ldap_server = ldp_ops.add_ldap_server('ldap1', 'ldap for test', os.environ.get('ldapServerUrl'), os.environ.get('ldapServerBase'), os.environ.get('ldapServerUsername'), os.environ.get('ldapServerPassword'), 'None', system_tags) ldap_server_uuid = ldap_server.inventory.uuid conditions = res_ops.gen_query_conditions('type', '=', 'SystemAdmin') account = res_ops.query_resource(res_ops.ACCOUNT, conditions)[0] new_account = acc_ops.create_account('new_account', 'password', 'Normal') new_account_uuid = new_account.uuid ldap_account = ldp_ops.bind_ldap_account(os.environ.get('ldapDn'), new_account.uuid) ldap_account_uuid = ldap_account.inventory.uuid session_uuid = acc_ops.login_by_ldap(os.environ.get('ldapUid'), os.environ.get('ldapPassword')) acc_ops.logout(session_uuid) ldp_ops.clean_invalid_ldap_binding() get_excepted_exception = False try: session_uuid = acc_ops.login_by_ldap(os.environ.get('ldapUid'), os.environ.get('ldapPassword')) acc_ops.logout(session_uuid) except: get_excepted_exception = True if not get_excepted_exception: test_util.test_fail('should not be able to login with filter account') new_account2 = acc_ops.create_account('new_account2', 'password', 'Normal') new_account_uuid2 = new_account2.uuid ldap_account2 = ldp_ops.bind_ldap_account('uid=ldapuser3,ou=People,dc=mevoco,dc=com', new_account2.uuid) ldap_account_uuid2 = ldap_account2.inventory.uuid session_uuid2 = acc_ops.login_by_ldap('ldapuser3', 'password') acc_ops.logout(session_uuid) '''
def test(): global vm pxe_uuid = test_lib.lib_get_pxe_by_name(os.environ.get('pxename')).uuid # Create VM vm = test_stub.create_vm() vm.check() # Create Virtual BMC test_stub.create_vbmc(vm=vm, port=6230) # Create Chassis chassis = os.environ.get('ipminame') test_stub.create_chassis(chassis_name=chassis) test_stub.hack_ks(port=6230) chassis_uuid = test_lib.lib_get_chassis_by_name(chassis).uuid # First time Provision bare_operations.provision_baremetal(chassis_uuid) bare_operations.stop_pxe(pxe_uuid) if not test_stub.verify_chassis_status(chassis_uuid, "PxeBootFailed"): test_util.test_fail( 'Chassis failed to get PxeBootFailed after the first provision') bare_operations.start_pxe(pxe_uuid) if test_lib.lib_get_pxe_by_name(os.environ.get('pxename')).status != "Running": test_util.test_fail('Fail to start PXE') test_stub.delete_vbmc(vm=vm) bare_operations.delete_chassis(chassis_uuid) vm.destroy() test_util.test_pass('Create chassis Test Success')
def test(): global vm test_util.test_dsc('create VM with setting password') for root_password in root_password_list: test_util.test_dsc("root_password: \"%s\"" %(root_password)) vm = test_stub.create_vm(vm_name = 'u13-vm', image_name = "imageName_i_u13", root_password=root_password) backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.vm) for bs in backup_storage_list: if bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break if bs.type == inventory.SFTP_BACKUP_STORAGE_TYPE: break if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE: break else: vm.destroy() test_util.test_skip('Not find image store type backup storage.') if not test_lib.lib_check_login_in_vm(vm.get_vm(), "root", root_password): test_util.test_fail("create vm with root password: %s failed", root_password) vm.destroy() vm.check() vm.expunge() vm.check() test_util.test_pass('Set password when VM is creating is successful.')
def test(): test_util.test_dsc('Create test vm with EIP and check.') vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm) pri_l3_name = os.environ.get('l3VlanNetworkName1') pri_l3_uuid = test_lib.lib_get_l3_by_name(pri_l3_name).uuid pub_l3_name = os.environ.get('l3PublicNetworkName') pub_l3_uuid = test_lib.lib_get_l3_by_name(pub_l3_name).uuid vm_nic = vm.vm.vmNics[0] vm_nic_uuid = vm_nic.uuid vip = test_stub.create_vip('create_eip_test', pub_l3_uuid) test_obj_dict.add_vip(vip) eip = test_stub.create_eip('create eip test', vip_uuid=vip.get_vip().uuid, vnic_uuid=vm_nic_uuid, vm_obj=vm) vip.attach_eip(eip) vm.check() if not test_lib.lib_check_directly_ping(vip.get_vip().ip): test_util.test_fail('expected to be able to ping vip while it fail') vm.destroy() test_obj_dict.rm_vm(vm) if test_lib.lib_check_directly_ping(vip.get_vip().ip): test_util.test_fail('not expected to be able to ping vip while it succeed') eip.delete() vip.delete() test_obj_dict.rm_vip(vip) test_util.test_pass('Create EIP for VM Success')
def _get(self, uri): _rsp = json_post(uri=uri, headers={'charset': 'utf-8'}, method='GET', fail_soon=True) rsp = jsonobject.loads(_rsp.replace('null', '"null"')) if rsp.status != 0: test_util.test_fail('URL request failed! uri: %s, reason: %s' % (uri, rsp.value.message)) else: return rsp
def create_vm(vm_creation_option=None, volume_uuids=None, root_disk_uuid=None, \ image_uuid=None, session_uuid=None): if not vm_creation_option: instance_offering_uuid = res_ops.get_resource(res_ops.INSTANCE_OFFERING, session_uuid)[0].uuid cond = res_ops.gen_query_conditions('mediaType', '!=', 'ISO') image_uuid = res_ops.query_resource(res_ops.IMAGE, cond, session_uuid)[0].uuid l3net_uuid = res_ops.get_resource(res_ops.L3_NETWORK, session_uuid)[0].uuid vm_creation_option = test_util.VmOption() vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_l3_uuids([l3net_uuid]) if volume_uuids: if isinstance(volume_uuids, list): vm_creation_option.set_data_disk_uuids(volume_uuids) else: test_util.test_fail('volume_uuids type: %s is not "list".' % type(volume_uuids)) if root_disk_uuid: vm_creation_option.set_root_disk_uuid(root_disk_uuid) if image_uuid: vm_creation_option.set_image_uuid(image_uuid) if session_uuid: vm_creation_option.set_session_uuid(session_uuid) vm = test_vm.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() return vm
def test(): global vm vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') l3_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vm("test_resize_vm", image_name, l3_name) test_obj_dict.add_vm(vm) vm.check() vm.stop() vm.check() vol_size = test_lib.lib_get_root_volume(vm.get_vm()).size volume_uuid = test_lib.lib_get_root_volume(vm.get_vm()).uuid set_size = 1024*1024*1024*5 snapshots = test_obj_dict.get_volume_snapshot(volume_uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshots.check() vol_ops.resize_volume(volume_uuid, set_size) vm.update() vol_size_after = test_lib.lib_get_root_volume(vm.get_vm()).size if set_size != vol_size_after: test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after) snapshots.delete() test_obj_dict.rm_volume_snapshot(snapshots) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Resize VM Snapshot Test Success')
def test(): if res_ops.query_resource(res_ops.SFTP_BACKUP_STORAGE): test_util.test_skip("sftp backupstorage doesn't support for clone test. Skip test") global vm vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') l3_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vm("test_resize_vm", image_name, l3_name) test_obj_dict.add_vm(vm) vm.check() vol_size = test_lib.lib_get_root_volume(vm.get_vm()).size volume_uuid = test_lib.lib_get_root_volume(vm.get_vm()).uuid set_size = 1024*1024*1024*5 vol_ops.resize_volume(volume_uuid, set_size) vm.update() vol_size_after = test_lib.lib_get_root_volume(vm.get_vm()).size if set_size != vol_size_after: test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after) new_vm = vm.clone(['vm_clone'])[0] test_obj_dict.add_vm(new_vm) new_volume_uuid = test_lib.lib_get_root_volume_uuid(new_vm.get_vm()) vol_size_after = test_lib.lib_get_root_volume(new_vm.get_vm()).size if set_size != vol_size_after: test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Resize VM Snapshot Test Success')
def test(): global vm create_vm_option = test_util.VmOption() create_vm_option.set_rootVolume_systemTags(["volumeProvisioningStrategy::ThinProvisioning"]) create_vm_option.set_name('test_resize_vm_root_volume') vm = test_lib.lib_create_vm(create_vm_option) vm.check() vm.stop() vm.check() vol_size = test_lib.lib_get_root_volume(vm.get_vm()).size volume_uuid = test_lib.lib_get_root_volume(vm.get_vm()).uuid set_size = 1024*1024*1024*5 vol_ops.resize_volume(volume_uuid, set_size) vm.update() vol_size_after = test_lib.lib_get_root_volume(vm.get_vm()).size if set_size != vol_size_after: test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after) vm.start() set_size = 1024*1024*1024*6 vol_ops.resize_volume(volume_uuid, set_size) vm.update() vol_size_after = test_lib.lib_get_root_volume(vm.get_vm()).size if set_size != vol_size_after: test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after) vm.destroy() test_util.test_pass('Resize VM Test Success')
def test(): ag1 = ag_ops.create_affinity_group(name="ag1", policy="antiHard") vm1 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid) test_obj_dict.add_vm(vm1) vm2 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid) test_obj_dict.add_vm(vm2) assert vm1.get_vm().hostUuid != vm2.get_vm().hostUuid vm3 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid) test_obj_dict.add_vm(vm3) assert vm1.get_vm().hostUuid != vm3.get_vm().hostUuid assert vm2.get_vm().hostUuid != vm3.get_vm().hostUuid try: vm4 = None vm4 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid) except: if not vm4: test_util.test_logger("vm4 isn't created as expected") finally: if vm4: test_util.test_fail("Test Fail, vm4 [uuid:%s] is not expected to be created" % vm4.get_vm().uuid) test_lib.lib_error_cleanup(test_obj_dict) ag_ops.delete_affinity_group(ag1.uuid) test_util.test_pass("Affinity Group antiHard policy pass")
def test(): imagestore_backup_storage = test_lib.lib_get_image_store_backup_storage() if not imagestore_backup_storage: test_util.test_skip('Not find image store type backup storage.') img_option = test_util.ImageOption() img_option.set_name('iso') root_disk_uuid = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')).uuid bs_uuid = imagestore_backup_storage.uuid img_option.set_backup_storage_uuid_list([bs_uuid]) command = "command -v genisoimage" result = test_lib.lib_execute_ssh_cmd(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], 'root', 'password', command) if not result: command = "yum -y install genisoimage --disablerepo=* --enablerepo=zstack-local" test_lib.lib_execute_ssh_cmd(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], 'root', 'password', command) command = "genisoimage -o %s/apache-tomcat/webapps/zstack/static/zstack-repo/7/x86_64/os/test.iso /tmp/" % os.environ.get('zstackInstallPath') test_lib.lib_execute_ssh_cmd(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], 'root', 'password', command) img_option.set_url('http://%s:8080/zstack/static/zstack-repo/7/x86_64/os/test.iso' % (os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'])) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) image_url = image.export() image.delete_exported_image() test_lib.lib_robot_cleanup(test_obj_dict) if image_url.endswith('.iso'): test_util.test_pass('Export ISO Image Test Success') else: test_util.test_fail('Export ISO Image Test Fail, exported ISO image ends with %s' % (image_url.split('.')[-1]))
def test(): system_time1 = int(time.time()) current_time = schd_ops.get_current_time().currentTime system_time2 = int(time.time()) if system_time1 > current_time.Seconds and system_time2 < current_time.Seconds: test_util.test_fail('get_current_time not get expected time[%s, %s]: %s' % (system_time1, system_time2, current_time.Seconds)) test_util.test_pass('Create VM Test Success')
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] #vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() test_util.test_dsc('Add ISO Image') cond = res_ops.gen_query_conditions("status", '=', "Connected") bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid img_option = test_util.ImageOption() img_option.set_name('iso') img_option.set_backup_storage_uuid_list([bs_uuid]) os.system("echo fake iso for test only > %s/apache-tomcat/webapps/zstack/static/test.iso" % (os.environ.get('zstackInstallPath'))) img_option.set_url('http://%s:8080/zstack/static/test.iso' % (os.environ.get('node1Ip'))) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) ps_uuid = ps.uuid ps_ops.change_primary_storage_state(ps_uuid, 'maintain') if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to stop when PS change to maintain state') vm.set_state(vm_header.STOPPED) vm.check() test_util.test_dsc('Attach ISO to VM') cond = res_ops.gen_query_conditions('name', '=', 'iso') iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid img_ops.attach_iso(iso_uuid, vm.vm.uuid) ps_ops.change_primary_storage_state(ps_uuid, 'enable') host_ops.reconnect_host(host_uuid) #vm_ops.reconnect_vr(vr_uuid) vrs = test_lib.lib_get_all_vrs() for vr in vrs: vm_ops.start_vm(vr.uuid) vm.start() vm.check() vm.destroy() vm.check() #vm.expunge() #vm.check() test_util.test_pass('PS maintain mode Test Success')
def test(): global vm # Create VM vm = test_stub.create_vm() vm.check() # Create Virtual BMC test_stub.create_vbmc(vm = vm, port = 6230) # Create Chassis chassis = os.environ.get('ipminame') test_stub.create_chassis(chassis_name = chassis) test_stub.hack_ks(port = 6230) chassis_uuid = test_lib.lib_get_chassis_by_name(chassis).uuid bare_operations.power_off_baremetal(chassis_uuid) status = bare_operations.get_power_status(chassis_uuid) if status.status == "Chassis Power is off": bare_operations.power_reset_baremetal(chassis_uuid) status = bare_operations.get_power_status(chassis_uuid) if status.status != "Chassis Power is on": test_util.test_fail('Failed to power reset chassis') else: test_util.test_fail('Failed to power off chassis') test_stub.delete_vbmc(vm = vm) bare_operations.delete_chassis(chassis_uuid) vm.destroy() test_util.test_pass('Test Power Reset Success')
def check_scheduler_state(schd,target_state): conditions = res_ops.gen_query_conditions('uuid', '=', schd.uuid) schd_state = res_ops.query_resource(res_ops.SCHEDULER, conditions)[0].state if schd_state != target_state: test_util.test_fail('check scheduler state, it is expected to be %s, but it is %s' % (target_state, schd_state)) return True
def test(): global test_obj_dict, bs, ps #judge whether BS is imagestore bs = res_ops.query_resource(res_ops.BACKUP_STORAGE) for i in bs: if i.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break else: test_util.test_skip('Skip test on non-imagestore') #judge whether PS is SharedBlock ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE) for i in ps: if i.type in ['SharedBlock', 'AliyunNAS']: test_util.test_skip('Skip test on SharedBlock and PS') image_name = os.environ.get('imageName_s') l3_name = os.environ.get('l3PublicNetworkName') vm = test_stub.create_vm("test_vm", image_name, l3_name) #vm.check() test_obj_dict.add_vm(vm) new_vm = vm.clone(['test_vm_clone_with_on_data_volume'], full=True)[0] test_obj_dict.add_vm(new_vm) volumes_number = len(test_lib.lib_get_all_volumes(new_vm.vm)) if volumes_number != 1: test_util.test_fail('Did not find 1 volumes for [vm:] %s. But we assigned 1 data volume when create the vm. We only catch %s volumes' % (new_vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 1 volumes for [vm:] %s.' % new_vm.vm.uuid) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Test clone vm with one data volume Success')
def test(): test_util.test_dsc('Test storage capacity when using expunge vm') if conf_ops.get_global_config_value('vm', 'deletionPolicy') != 'Delay' : test_util.test_skip('vm delete_policy is not Delay, skip test.') return zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid cond = res_ops.gen_query_conditions('state', '=', 'Enabled') cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond) host = res_ops.query_resource_with_num(res_ops.HOST, cond, limit = 1) if not host: test_util.test_skip('No Enabled/Connected host was found, skip test.' ) return True ps = res_ops.query_resource_with_num(res_ops.PRIMARY_STORAGE, cond, limit = 1) if not ps: test_util.test_skip('No Enabled/Connected primary storage was found, skip test.' ) return True host = host[0] ps = ps[0] host_res = vol_ops.get_local_storage_capacity(host.uuid, ps.uuid)[0] avail_cap = host_res.availableCapacity vm = test_stub.create_vm(vm_name = 'basic-test-vm', host_uuid = host.uuid) test_obj_dict.add_vm(vm) time.sleep(1) vm.destroy() vm.expunge() host_res2 = vol_ops.get_local_storage_capacity(host.uuid, ps.uuid)[0] avail_cap2 = host_res.availableCapacity if avail_cap != avail_cap2: test_util.test_fail('PS capacity is not same after create/expunge vm on host: %s. Capacity before create vm: %s, after expunge vm: %s ' % (host.uuid, avail_cap, avail_cap2)) test_util.test_pass('Expunge VM Test Success')
def test(): os.system('dd if=/dev/zero of=%s bs=1M count=1 seek=300' % test_image) time.sleep(10) image_name = 'test-image-%s' % time.time() image_option = test_util.ImageOption() image_option.set_name(image_name) image_option.set_description('test image which is upload from local filesystem.') image_option.set_url('file://%s' % test_image) bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0] image_option.set_backup_storage_uuid_list([bs.uuid]) image_option.set_format('raw') image_option.set_mediaType('RootVolumeTemplate') image_inv = img_ops.add_root_volume_template(image_option) time.sleep(10) image = zstack_image_header.ZstackTestImage() image.set_creation_option(image_option) image.set_image(image_inv) test_obj_dict.add_image(image) image.check() vm = test_stub.create_vm(image_name = image_name) vm.destroy() image.delete() if not os.path.exists(test_image): test_util.test_fail('test image disappeared, after add image.') os.system('rm -f %s' % test_image) test_util.test_pass('Test adding image from local stroage pass.')
def test(): vm = test_stub.create_user_vlan_vm() test_obj_dict.add_vm(vm) vm.check() vm_inv = vm.get_vm() vm_ip = vm_inv.vmNics[0].ip cmd = 'touch /root/test-file-for-reinit' rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) if rsp == False: test_util.test_fail('Fail to create file in VM') vm.stop() vm.reinit() vm.update() vm.check() vm.start() cmd = '[ -e /root/test-file-for-reinit ] && echo yes || echo no' rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) if rsp == 'yes': test_util.test_fail('VM does not be reverted to image used for creating the VM, the later file still exists') vm.destroy() test_util.test_pass('Re-init VM Test Success')
def ensure_storage_online(vm): ret, output, stderr = ssh.execute("o2cb.init status", vm.get_vm().vmNics[0].ip, "root", "password", False, 22) if ret != 0: test_util.test_fail( cmd + " failed") if "online" not in output.lower(): test_util.test_fail("not found storage online")
def remove_vswitch(host, name=None): nics = get_busy_nics(host) for i in nics: if ''.join(i.values()) == name: get_host_networkSystem(host).RemoveVirtualSwitch(name) return test_util.test_fail("no vswicth named %s" % name)
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() #ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) #ps_uuid = ps.uuid #ps_ops.change_primary_storage_state(ps_uuid, 'disable') test_stub.disable_all_pss() if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to runnning when PS change to disable state') vm.set_state(vm_header.RUNNING) vm.check() vm.suspend() vm.check() #ps_ops.change_primary_storage_state(ps_uuid, 'enable') test_stub.enable_all_pss() host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) test_util.test_pass('PS disable mode Test Success')
def test(): flavor = case_flavor[os.environ.get('CASE_FLAVOR')] test_util.test_dsc('vr auto reconnection check test') l3_1_name = os.environ.get('l3VlanNetworkName1') l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) if not l3_1: test_util.test_skip('No network for vr auto reconnect test') #create VRs. vrs = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid) if not vrs: image_name = os.environ.get('imageName_net') vm = test_stub.create_vr_vm('vm_for_vr', image_name, l3_1_name) vm.destroy() vr1 = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] else: vr1 = vrs[0] resource_type = res_ops.VIRTUALROUTER_VM if flavor['virtualrouter']: if vr1.applianceVmType != "VirtualRouter": test_util.test_skip('No network for vr auto reconnect test') if flavor['vrouter']: if vr1.applianceVmType != "vrouter": test_util.test_skip('No network for vr auto reconnect test') test_lib.lib_install_testagent_to_vr_with_vr_vm(vr1) test_util.test_logger("kill vr agent on vr %s" % (vr1.uuid)) if flavor['virtualrouter']: if flavor['kill']: cmd = "pkill -9 -f 'from virtualrouter import virtualrouterdaemon'" else: cmd = "service zstack-virtualrouter stop" elif flavor['vrouter']: if flavor['kill']: cmd = "pkill -9 -f '/opt/vyatta/sbin/zvr -i'" # else: # cmd = "service zstack-imagestorebackupstorage stop" vr_ip = test_lib.lib_find_vr_pub_ip(vr1) if test_lib.lib_execute_sh_cmd_by_agent(vr_ip, cmd) == False: test_util.test_fail("CMD:%s execute failed on %s" % (cmd, vr_ip)) test_util.test_logger( "vr %s is expected to disconnect and start reconnect automatically" % (vr1.uuid)) conditions = res_ops.gen_query_conditions('uuid', '=', vr1.uuid) count = 0 while count < 24: vr = res_ops.query_resource(resource_type, conditions)[0] if vr.status == "Connecting": break time.sleep(5) count += 1 if vr.status != "Connecting": test_util.test_fail( "vr %s is not disconnect and start reconnect automatically in 120 seconds" % (vr1.uuid)) test_util.test_logger( "vr %s is expected to reconnect success automatically" % (vr1.uuid)) count = 0 while count < 24: vr = res_ops.query_resource(resource_type, conditions)[0] if vr.status == "Connected": break time.sleep(5) count += 1 if vr.status != "Connected": test_util.test_fail( "vr %s not reconnect success automatically in 120 seconds" % (vr.uuid)) test_util.test_pass("Auto reconnect backup storage pass")
def test(): test_util.test_dsc("Test Resource template Apis") resource_stack_option = test_util.ResourceStackOption() resource_stack_option.set_name("test") templateContent = ''' { "ZStackTemplateFormatVersion": "2018-06-18", "Description": "test", "Parameters": { "TestStringBasicEcho": { "Type": "String", "DefaultValue":"testonly" } }, "Resources": { "InstanceOffering": { "Type": "ZStack::Resource::InstanceOffering", "Properties": { "name": "8cpu-8g", "cpuNum": 8, "memorySize": 8589934592 } } }, "Outputs": { "InstanceOffering": { "Value": { "Ref": "InstanceOffering" } } } } ''' parameter = ''' { "TestStringBasicEcho": "Just a string Possiple" } ''' resource_stack_option.set_templateContent(templateContent) resource_stack_option.set_parameters(parameter) preview_resource_stack = resource_stack_ops.preview_resource_stack( resource_stack_option) resource_stack = resource_stack_ops.create_resource_stack( resource_stack_option) cond = res_ops.gen_query_conditions('uuid', '=', resource_stack.uuid) resource_stack_queried = res_ops.query_resource(res_ops.RESOURCE_STACK, cond) if len(resource_stack_queried) == 0: test_util.test_fail("Fail to query stack template") #Add a template via text. resource = resource_stack_ops.get_resource_from_resource_stack( resource_stack.uuid) print resource if resource == None: test_util.test_fail("Fail to get resource from resource_stack") print resource_stack.uuid cond = res_ops.gen_query_conditions('stackUuid', '=', resource_stack.uuid) event = res_ops.query_event_from_resource_stack(cond) print event if event == None: test_util.test_fail("Fail to get event from resource_stack") resource_stack_ops.delete_resource_stack(resource_stack.uuid) cond = res_ops.gen_query_conditions('uuid', '=', resource_stack.uuid) resource_stack_queried = res_ops.query_resource(res_ops.RESOURCE_STACK, cond) if len(resource_stack_queried) != 0: test_util.test_fail("Fail to query stack template") test_util.test_pass('Create Stack Template Test Success')
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout allow_ps_list = [inventory.LOCAL_STORAGE_TYPE] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") test_lib.lib_skip_if_ps_num_is_not_eq_number(2) vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid test_lib.clean_up_all_vr() mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('ls_vm_ha_self_start') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() test_stub.ensure_host_has_no_vr(host_uuid) #vm.check() host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp test_util.test_logger("host %s is disconnecting" % (host_ip)) ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") test_stub.down_host_network(host_ip, test_lib.all_scenario_config) vm_stop_time = None cond = res_ops.gen_query_conditions('name', '=', 'ls_vm_ha_self_start') cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid, cond) for i in range(0, max_time): vm_stop_time = i if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Unknown": test_stub.up_host_network(host_ip, test_lib.all_scenario_config) break time.sleep(1) if vm_stop_time is None: vm_stop_time = max_time for i in range(vm_stop_time, max_time): if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Running": break time.sleep(1) else: test_util.test_fail( "vm has not been changed to running as expected within %s s." % (max_time)) vm.destroy() test_util.test_pass('Test VM ha change to running within %s s Success' % (max_time))
if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE: break else: vm.destroy() test_util.test_skip('Not find image store type backup storage.') #if not test_lib.lib_check_login_in_vm(vm.get_vm(), "root", root_password): # test_util.test_fail("create vm with root password: %s failed", root_password) # stop vm && change vm password #vm.stop() vm.check() try: vm_ops.change_vm_password(vm.get_vm().uuid, "root", root_password) except Exception, e: test_util.test_pass( "negative test of change a no system tag image passed.") test_util.test_fail( 'negative test failed because no system tag image has been set vm password successfully, but it should be a failure.' ) #Will be called only if exception happens in test(). def error_cleanup(): global vm pass if vm: vm.destroy() vm.expunge()
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout allow_ps_list = [ inventory.CEPH_PRIMARY_STORAGE_TYPE, inventory.NFS_PRIMARY_STORAGE_TYPE, 'SharedMountPoint' ] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #l3_name = os.environ.get('l3NoVlanNetworkName1') l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) for vr in vrs: if test_lib.lib_is_vm_running(vr) != True: vm_ops.start_vm(vr.uuid) time.sleep(60) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() test_stub.ensure_host_has_no_vr(host_uuid) #vm.check() host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "OnHostFailure") l2_network_interface = os.environ.get('l2ManagementNetworkInterface') cmd = "ifconfig %s down && sleep 180 && ifconfig %s up" % ( l2_network_interface, l2_network_interface) host_username = os.environ.get('hostUsername') host_password = os.environ.get('hostPassword') rsp = test_lib.lib_execute_ssh_cmd(host_ip, host_username, host_password, cmd, 180) if rsp: test_util.test_fail( "host is expected to shutdown after its network down for a while") else: test_util.test_logger("host may have been shutdown") test_util.test_logger("wait for 600 seconds") time.sleep(600) vm.update() if test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp == host_ip: test_util.test_fail("VM is expected to start running on another host") vm.set_state(vm_header.RUNNING) vm.check() vm.destroy() os.system('bash -ex %s %s' % (os.environ.get('hostRecoverScript'), host_ip)) host_ops.reconnect_host(host_uuid) test_util.test_pass('Test VM ha on host failure Success')
def test(): global vm bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") image_option = test_util.ImageOption() image_option.set_name('test_image_cache_cleanup') image_option.set_format('qcow2') image_option.set_mediaType('RootVolumeTemplate') image_option.set_url(os.environ.get('imageUrl_s')) image_option.set_backup_storage_uuid_list([bss[0].uuid]) new_image = zstack_image_header.ZstackTestImage() new_image.set_creation_option(image_option) new_image.add_root_volume_template() l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option = test_util.VmOption() vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(new_image.image.uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('test_image_cache_cleanup_vm1') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() ps = test_lib.lib_get_primary_storage_by_uuid(vm.get_vm().allVolumes[0].primaryStorageUuid) if ps.type != inventory.LOCAL_STORAGE_TYPE: test_util.test_skip('Skip test on non-localstorage') test_obj_dict.add_vm(vm) vm.check() host = test_lib.lib_find_host_by_vm(vm.get_vm()) target_host = test_lib.lib_find_random_host(vm.vm) vm.stop() vol_ops.migrate_volume(vm.get_vm().allVolumes[0].uuid, target_host.uuid) vm.check() vm.start() vm.check() new_image.delete() new_image.expunge() ps_ops.cleanup_imagecache_on_primary_storage(ps.uuid) count = 0 while True: image_cache_path = "%s/imagecache/template/%s" % (ps.mountPath, new_image.image.uuid) if not test_lib.lib_check_file_exist(host, image_cache_path): break elif count > 5: test_util.test_fail('image cache is expected to be deleted') test_util.test_logger('check %s times: image cache still exist' % (count)) time.sleep(5) count += 1 count = 0 while True: image_cache_path = "%s/zstore-cache/%s" % (ps.mountPath, new_image.image.uuid) if not test_lib.lib_check_file_exist(host, image_cache_path): break elif count > 5: test_util.test_fail('image cache is expected to be deleted') test_util.test_logger('check %s times: image cache still exist' % (count)) time.sleep(5) count += 1 vm.destroy() test_util.test_pass('Migrate VM Test Success')
def test(): global session_uuid global session_to global session_mc vm_num = os.environ.get('ZSTACK_TEST_NUM') if not vm_num: vm_num = 0 else: vm_num = int(vm_num) test_util.test_logger('ZSTACK_THREAD_THRESHOLD is %d' % thread_threshold) test_util.test_logger('ZSTACK_TEST_NUM is %d' % vm_num) org_num = vm_num vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid cond = res_ops.gen_query_conditions('networkServiceType', '=', \ 'PortForwarding') service_providers = res_ops.query_resource_fields(res_ops.NETWORK_SERVICE_PROVIDER_L3_REF, cond, session_uuid, ['l3NetworkUuid'], start = 0, limit = 2) l3_name = os.environ.get('l3VlanNetworkName1') conditions = res_ops.gen_query_conditions('name', '=', l3_name) l3_uuids = [res_ops.query_resource_with_num(res_ops.L3_NETWORK, \ conditions, session_uuid, start = 0, limit = 1)[0].uuid] if len(service_providers) < 2: test_util.test_fail('Do not find at least 3 L3 service provider could support port forwarding service') else: for sp in service_providers: l3_uuids.append(sp.l3NetworkUuid) vm_creation_option.set_l3_uuids(l3_uuids) conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) session_uuid = acc_ops.login_as_admin() #change account session timeout. session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid) session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid) vm_creation_option.set_session_uuid(session_uuid) vm = test_vm_header.ZstackTestVm() random_name = random.random() vm_name = 'multihost_basic_vm_%s' % str(random_name) vm_creation_option.set_name(vm_name) while vm_num > 0: check_thread_exception() vm.set_creation_option(vm_creation_option) vm_num -= 1 thread = threading.Thread(target=create_vm, args=(vm,)) while threading.active_count() > thread_threshold: time.sleep(1) thread.start() while threading.active_count() > 1: time.sleep(0.01) cond = res_ops.gen_query_conditions('name', '=', vm_name) vms = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid) con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid) con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid) acc_ops.logout(session_uuid) if vms == org_num: test_util.test_pass('Create %d VMs Test Success' % org_num) else: test_util.test_fail('Create %d VMs Test Failed. Only find %d VMs.' % (org_num, vms))
def test(): global vm global host_uuid global test_host global host_ip global max_attempts global storagechecker_timeout must_ps_list = [inventory.LOCAL_STORAGE_TYPE, inventory.NFS_PRIMARY_STORAGE_TYPE] test_lib.skip_test_if_any_ps_not_deployed(must_ps_list) if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid test_lib.clean_up_all_vr() #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) #vr_host_ips = [] #for vr in vrs: # vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp) # if test_lib.lib_is_vm_running(vr) != True: # vm_ops.start_vm(vr.uuid) #time.sleep(60) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions) #for vr_host_ip in vr_host_ips: # conditions = res_ops.gen_query_conditions('managementIp', '=', vr_host_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('ls_vm_ha_self_start') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() vr_hosts = test_stub.get_host_has_vr() mn_hosts = test_stub.get_host_has_mn() nfs_hosts = test_stub.get_host_has_nfs() if not test_stub.ensure_vm_not_on(vm.get_vm().uuid, vm.get_vm().hostUuid, vr_hosts+mn_hosts+nfs_hosts): test_util.test_fail("Not find out a suitable host") host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid test_stub.ensure_all_vrs_on_host(host_uuid) #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) #target_host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid #for vr in vrs: # if test_lib.lib_find_host_by_vr(vr).managementIp != test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp: # vm_ops.migrate_vm(vr.uuid, target_host_uuid) #vm.check() host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp test_util.test_logger("host %s is disconnecting" %(host_ip)) ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") host_list = test_stub.get_sce_hosts(test_lib.all_scenario_config, test_lib.scenario_file) for host in host_list: if host.ip_ == host_ip: test_host = host break if not test_host: test_util.test_fail('there is no host with ip %s in scenario file.' %(host_ip)) test_stub.stop_host(test_host, test_lib.all_scenario_config, 'cold') vm_stop_time = None cond = res_ops.gen_query_conditions('name', '=', 'ls_vm_ha_self_start') cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid, cond) for i in range(0, 180): vm_stop_time = i if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Stopped": test_stub.start_host(test_host, test_lib.all_scenario_config) test_stub.recover_host_vlan(test_host, test_lib.all_scenario_config, test_lib.deploy_config) conditions = res_ops.gen_query_conditions('managementIp', '=', host_ip) kvm_host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid host_ops.reconnect_host(kvm_host_uuid) break time.sleep(1) if vm_stop_time is None: vm_stop_time = 180 for i in range(vm_stop_time, 180): if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Starting": break time.sleep(1) else: test_util.test_fail("vm has not been changed to running as expected within 180s.") vm.destroy() test_util.test_pass('Test checking VM ha and none status when force stop vm Success.')
def test(): if test_lib.scenario_config == None or test_lib.scenario_file == None: test_util.test_fail('Suite Setup Fail without scenario') if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists( test_lib.scenario_file): scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) test_util.test_skip('Suite Setup Success') if test_lib.scenario_config != None and test_lib.scenario_destroy != None: scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy) #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine. #no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases. linux.create_vlan_eth("eth0", 10) linux.create_vlan_eth("eth0", 11) #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run() test_lib.setup_plan.deploy_test_agent() cmd = host_plugin.CreateVlanDeviceCmd() cmd.ethname = 'eth0' cmd.vlan = 10 cmd2 = host_plugin.CreateVlanDeviceCmd() cmd2.ethname = 'eth0' cmd2.vlan = 11 testHosts = test_lib.lib_get_all_hosts_from_plan() if type(testHosts) != type([]): testHosts = [testHosts] for host in testHosts: http.json_dump_post( testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) http.json_dump_post( testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd2) config_json = os.environ.get('configJson') ha_deploy_tool = os.environ.get('zstackHaInstaller') mn_img = os.environ.get('mnImage') test_stub.deploy_ha_env(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config, config_json, ha_deploy_tool, mn_img) if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s" % EXTRA_SUITE_SETUP_SCRIPT) deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file) for host in testHosts: os.system("bash %s %s" % (EXTRA_HOST_SETUP_SCRIPT, host.managementIp_)) if test_lib.lib_get_ha_selffencer_maxattempts() != None: test_lib.lib_set_ha_selffencer_maxattempts('60') test_lib.lib_set_ha_selffencer_storagechecker_timeout('60') test_lib.lib_set_primary_storage_imagecache_gc_interval(1) test_lib.lib_set_reserved_memory('8G') test_util.test_pass('Suite Setup Success')
def test(): global vm global mn_host_list global test_mn_host_list test_util.test_skip("2 hosts down at the same time is not support") test_stub.skip_if_scenario_is_multiple_networks() mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config, test_lib.scenario_file) mn_host_num = len(mn_host_list) test_mn_host_list = random.sample(range(mn_host_num), (mn_host_num + 1) / 2) for index in test_mn_host_list: test_util.test_logger("force stop host [%s]" % (mn_host_list[index].ip_)) test_stub.stop_host(mn_host_list[index], test_lib.all_scenario_config, 'cold') test_util.test_logger("wait 10s for MN VM to stop") time.sleep(10) mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file) if len(mn_host) != 0: test_util.test_fail('MN VM is still running on %d host(s)' % len(mn_host)) test_util.test_logger("recover host [%s]" % (mn_host_list[test_mn_host_list[-1]])) test_stub.recover_host(mn_host_list[test_mn_host_list[-1]], test_lib.all_scenario_config, test_lib.deploy_config) test_mn_host_list.pop() test_util.test_logger( "wait for 20 seconds to see if management node VM starts on any host") time.sleep(20) new_mn_host_ip = test_stub.get_host_by_consul_leader( test_lib.all_scenario_config, test_lib.scenario_file) if new_mn_host_ip == "": test_util.test_fail( "management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host_list[0].ip_)) count = 60 while count > 0: new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file) if len(new_mn_host) == 1: test_util.test_logger( "management node VM run after its former host down for 30s") break elif len(new_mn_host) > 1: test_util.test_fail( "management node VM runs on more than one host after its former host down" ) time.sleep(5) count -= 1 if len(new_mn_host) == 0: test_util.test_fail( "management node VM does not run after its former host down for 30s" ) elif len(new_mn_host) > 1: test_util.test_fail( "management node VM runs on more than one host after its former host down" ) test_util.test_logger( "wait for 5 minutes to see if management node starts again") #node_ops.wait_for_management_server_start(300) test_stub.wrapper_of_wait_for_management_server_start(600) test_stub.ensure_hosts_connected() test_stub.ensure_bss_host_connected_from_stop(test_lib.scenario_file, test_lib.all_scenario_config, test_lib.deploy_config) test_stub.ensure_pss_connected() test_stub.ensure_bss_connected() vm = test_stub.create_basic_vm() vm.check() vm.destroy() test_util.test_pass('Create VM Test Success')
def exec_cmd_in_vm(vm, cmd, fail_msg): ret, output, stderr = ssh.execute(cmd, vm.get_vm().vmNics[0].ip, "root", "password", False, 22) if ret != 0: test_util.test_fail(fail_msg)
def test(): global session_to global session_mc session_to = con_ops.change_global_config('identity', 'session.timeout', '720000') session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000') test_util.test_dsc('Create test vm as utility vm') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) #use root volume to skip add_checking_point test_util.test_dsc('Use root volume for snapshot testing') root_volume_inv = test_lib.lib_get_root_volume(vm.get_vm()) root_volume = zstack_volume_header.ZstackTestVolume() root_volume.set_volume(root_volume_inv) root_volume.set_state(volume_header.ATTACHED) root_volume.set_target_vm(vm) test_obj_dict.add_volume(root_volume) vm.check() snapshots = test_obj_dict.get_volume_snapshot( root_volume.get_volume().uuid) snapshots.set_utility_vm(vm) ori_num = 100 index = 1 while index < 101: thread = threading.Thread(target=create_snapshot, args=( snapshots, index, )) thread.start() index += 1 while threading.activeCount() > 1: time.sleep(0.1) #snapshot.check() doesn't work for root volume #snapshots.check() #check if snapshot exists in install_path ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) if ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE or ps.type == inventory.LOCAL_STORAGE_TYPE: host = test_lib.lib_get_vm_host(vm.get_vm()) for snapshot in snapshots.get_snapshot_list(): snapshot_inv = snapshot.get_snapshot() sp_ps_install_path = snapshot_inv.primaryStorageInstallPath if test_lib.lib_check_file_exist(host, sp_ps_install_path): test_util.test_logger( 'Check result: snapshot %s is found in host %s in path %s' % (snapshot_inv.name, host.managementIp, sp_ps_install_path)) else: test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_fail( 'Check result: snapshot %s is not found in host %s in path %s' % (snapshot_inv.name, host.managementIp, sp_ps_install_path)) else: test_util.test_logger( 'Skip check file install path for %s primary storage' % (ps.type)) cond = res_ops.gen_query_conditions('volumeUuid', '=', root_volume.get_volume().uuid) sps_num = res_ops.query_resource_count(res_ops.VOLUME_SNAPSHOT, cond) if sps_num != ori_num: test_util.test_fail( 'Create %d snapshots, but only %d snapshots were successfully created' % (ori_num, sps_num)) try: test_lib.lib_robot_cleanup(test_obj_dict) except: test_lib.test_logger('Delete VM may timeout') test_util.test_pass('Test create 100 snapshots simultaneously success')
def test(): test_util.test_dsc("Test Resource template Apis") cond = res_ops.gen_query_conditions('status', '=', 'Ready') cond = res_ops.gen_query_conditions('state', '=', 'Enabled', cond) cond = res_ops.gen_query_conditions('system', '=', 'false', cond) image_queried = res_ops.query_resource(res_ops.IMAGE, cond) cond = res_ops.gen_query_conditions("category", '=', "Public") l3_pub_queried = res_ops.query_resource(res_ops.L3_NETWORK, cond) cond = res_ops.gen_query_conditions("category", '=', "Private") cond = res_ops.gen_query_conditions('networkServices.networkServiceType', '=', 'EIP') l3_pri_queried = res_ops.query_resource(res_ops.L3_NETWORK, cond) cond = res_ops.gen_query_conditions('state', '=', 'Enabled') cond = res_ops.gen_query_conditions('type', '=', 'UserVm', cond) instance_offering_queried = res_ops.query_resource( res_ops.INSTANCE_OFFERING, cond) resource_stack_option = test_util.ResourceStackOption() resource_stack_option.set_name("Create_STACK") resource_stack_option.set_rollback("true") templateContent = ''' { "ZStackTemplateFormatVersion": "2018-06-18", "Description": "Just create a VM with eip", "Parameters": { "InstanceOfferingUuid": { "Type": "String", "Label": "vm instance offering" }, "ImageUuid":{ "Type": "String" }, "PrivateNetworkUuid":{ "Type": "String" }, "PublicNetworkUuid":{ "Type": "String" } }, "Resources": { "VmInstance": { "Type": "ZStack::Resource::VmInstance", "Properties": { "name": {"Fn::Join":["-",[{"Ref":"ZStack::StackName"},"VM"]]}, "instanceOfferingUuid": {"Ref":"InstanceOfferingUuid"}, "imageUuid":{"Ref":"ImageUuid"}, "l3NetworkUuids":[{"Ref":"PrivateNetworkUuid"}] } }, "VIP": { "Type": "ZStack::Resource::Vip", "Properties": { "name": {"Fn::Join":["-",[{"Ref":"ZStack::StackName"},"VIP"]]}, "l3NetworkUuid":{"Ref":"PublicNetworkUuid"} } }, "EIP":{ "Type": "ZStack::Resource::Eip", "Properties": { "name": {"Fn::Join":["-",[{"Ref":"ZStack::StackName"},"EIP"]]}, "vipUuid":{"Fn::GetAtt":["VIP","uuid"]}, "vmNicUuid":{"Fn::GetAtt":[{"Fn::Select":[0,{"Fn::GetAtt":["VmInstance","vmNics"]}]},"uuid"]} } } }, "Outputs": { "VmInstance": { "Value": { "Ref": "VmInstance" } } } } ''' #1.create resource stack test_util.test_logger( '{"PrivateNetworkUuid":"%s","PublicNetworkUuid":"%s","ImageUuid":"%s","InstanceOfferingUuid":"%s"}' % (l3_pri_queried[0].uuid, l3_pub_queried[0].uuid, image_queried[0].uuid, instance_offering_queried[0].uuid)) parameter = '{"PrivateNetworkUuid":"%s","PublicNetworkUuid":"%s","ImageUuid":"%s","InstanceOfferingUuid":"%s"}' % ( l3_pri_queried[0].uuid, l3_pub_queried[0].uuid, image_queried[0].uuid, instance_offering_queried[0].uuid) resource_stack_option.set_templateContent(templateContent) resource_stack_option.set_parameters(parameter) preview_resource_stack = resource_stack_ops.preview_resource_stack( resource_stack_option) resource_stack = resource_stack_ops.create_resource_stack( resource_stack_option) #2.query resource stack cond = res_ops.gen_query_conditions('uuid', '=', resource_stack.uuid) resource_stack_queried = res_ops.query_resource(res_ops.RESOURCE_STACK, cond) cond = res_ops.gen_query_conditions('name', '=', 'Create_STACK-VM') vm_queried = res_ops.query_resource(res_ops.VM_INSTANCE, cond) cond = res_ops.gen_query_conditions('name', '=', 'Create_STACK-VIP') vip_queried = res_ops.query_resource(res_ops.VIP, cond) cond = res_ops.gen_query_conditions('name', '=', 'Create_STACK-EIP') eip_queried = res_ops.query_resource(res_ops.EIP, cond) if len(resource_stack_queried) == 0: test_util.test_fail("Fail to query resource stack") if resource_stack_queried[0].status == 'Created': if len(vm_queried) == 0 or len(vip_queried) == 0 or len( eip_queried) == 0: test_util.test_fail( "Fail to create all resource when resource stack status is Created" ) elif len(vm_queried) != 0 or len(vip_queried) != 0 or len( eip_queried) != 0: test_util.test_fail( "Fail to delete all resource when resource stack status is Rollbacked or Deleted" ) #3.get resource from resource stack resource = resource_stack_ops.get_resource_from_resource_stack( resource_stack.uuid) if resource == None or len(resource) != 3: test_util.test_fail("Fail to get resource from resource_stack") #4.query event from resource stack cond = res_ops.gen_query_conditions('stackUuid', '=', resource_stack.uuid) event = res_ops.query_event_from_resource_stack(cond) if event == None or len(event) != 6: test_util.test_fail("Fail to get event from resource_stack") #5.delete resource stack resource_stack_ops.delete_resource_stack(resource_stack.uuid) cond = res_ops.gen_query_conditions('uuid', '=', resource_stack.uuid) resource_stack_queried = res_ops.query_resource(res_ops.RESOURCE_STACK, cond) cond = res_ops.gen_query_conditions('name', '=', 'Create_STACK-VM') vm_queried = res_ops.query_resource(res_ops.VM_INSTANCE, cond) cond = res_ops.gen_query_conditions('name', '=', 'Create_STACK-VIP') vip_queried = res_ops.query_resource(res_ops.VIP, cond) cond = res_ops.gen_query_conditions('name', '=', 'Create_STACK-EIP') eip_queried = res_ops.query_resource(res_ops.EIP, cond) if len(resource_stack_queried) != 0: test_util.test_fail("Fail to delete resource stack") elif len(vm_queried) != 0 or len(vip_queried) != 0 or len( eip_queried) != 0: test_util.test_fail( "Fail to delete resource when resource stack is deleted") test_util.test_pass('Create Resource Stack Test Success')
def test(): mn_vm_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file) if not test_stub.upgrade_zsha(test_lib.all_scenario_config, test_lib.scenario_file): test_util.test_fail('Fail to upgrade zsha') if len(mn_vm_host) != 1: test_util.test_fail('MN VM is running on %d host(s)' % len(mn_vm_host)) test_util.test_logger('wait for 10s to see if something happens') time.sleep(10) try: new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file) if len(new_mn_host) == 0: test_util.test_fail("management node VM was destroyed after upgrade zsha") elif len(new_mn_host) > 1: test_util.test_fail("management node VM starts on more than one host after upgrade zsha") except: test_util.test_fail("management node VM was destroyed after upgrade zsha") if new_mn_host[0].ip_ != mn_vm_host[0].ip_: test_util.test_fail('management node VM starts on another host after upgrade zsha') else: try: vm = test_stub.create_basic_vm() vm.check() vm.destroy() except: test_util.test_fail('Fail to create vm after mn is ready') test_util.test_pass('Create VM Test Success')
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout allow_ps_list = [ inventory.CEPH_PRIMARY_STORAGE_TYPE, inventory.NFS_PRIMARY_STORAGE_TYPE, 'SharedMountPoint' ] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #l3_name = os.environ.get('l3NoVlanNetworkName1') l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid l3_name2 = os.environ.get('l3VlanNetwork2') l3_net_uuid2 = test_lib.lib_get_l3_by_name(l3_name2).uuid test_lib.clean_up_all_vr() #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) #vr_host_ips = [] #for vr in vrs: # vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp) # if test_lib.lib_is_vm_running(vr) != True: # vm_ops.start_vm(vr.uuid) #time.sleep(60) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions) #for vr_host_ip in vr_host_ips: # conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid, l3_net_uuid2]) vm_creation_option.set_default_l3_uuid(l3_net_uuid) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() test_stub.ensure_host_has_no_vr(host_uuid) #vm.check() host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp host_port = test_lib.lib_get_host_port(host_ip) test_util.test_logger("host %s is disconnecting" % (host_ip)) ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") test_stub.down_host_network(host_ip, test_lib.all_scenario_config) test_util.test_logger("wait for 240 seconds") time.sleep(240) test_stub.up_host_network(host_ip, test_lib.all_scenario_config) #vm.update() #bug for host uuid is not updated cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid) vm_inv = res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0] vm_host_ip = test_lib.lib_find_host_by_vm(vm_inv).managementIp for i in range(0, max_time): test_util.test_logger("vm_host_ip:%s; host_ip:%s" % (vm_host_ip, host_ip)) time.sleep(1) vm_inv = res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0] vm_host_ip = test_lib.lib_find_host_by_vm(vm_inv).managementIp if vm_host_ip != host_ip: break else: test_util.test_fail("VM is expected to start running on another host") test_lib.lib_set_vm_host_l2_ip(vm_inv) #vm.check() #bug when multi-networks #if test_lib.lib_wait_target_up(vm_inv.vmNics[0].ip, '22', 120): # test_util.test_logger("%s can be connected within 120s" %(vm_inv.vmNics[0].ip)) #elif test_lib.lib_wait_target_up(vm_inv.vmNics[1].ip, '22', 120): # test_util.test_logger("%s can be connected within 120s" %(vm_inv.vmNics[1].ip)) #else: # test_util.test_fail("Both %s and %s can't be connected." %(vm_inv.vmNics[0].ip, vm_inv.vmNics[1].ip)) ssh_timeout = test_lib.SSH_TIMEOUT test_lib.SSH_TIMEOUT = 120 if not test_lib.lib_ssh_vm_cmd_by_agent_with_retry( vm_host_ip, vm_inv.vmNics[0].ip, 'root', 'password', "pwd"): test_lib.SSH_TIMEOUT = ssh_timeout test_util.test_fail("vm can't be access by %s." % (vm_inv.vmNics[0].ip)) if not test_lib.lib_ssh_vm_cmd_by_agent_with_retry( vm_host_ip, vm_inv.vmNics[1].ip, 'root', 'password', "pwd"): test_lib.SSH_TIMEOUT = ssh_timeout test_util.test_fail("vm can't be access by %s." % (vm_inv.vmNics[1].ip)) test_lib.SSH_TIMEOUT = ssh_timeout vm.destroy() test_util.test_pass( 'Test VM ha with multiple networks disconnect host Success')
def test(): global vm global schd1 global schd2 vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) start_date = int(time.time()) schd1 = vm_ops.stop_vm_scheduler(vm.get_vm().uuid, 'simple', 'simple_stop_vm_scheduler', start_date+10, 20) schd2 = vm_ops.start_vm_scheduler(vm.get_vm().uuid, 'simple', 'simple_start_vm_scheduler', start_date+20, 20) test_stub.sleep_util(start_date+45) test_util.test_dsc('check scheduler state after create scheduler') check_scheduler_state(schd1, 'Enabled') check_scheduler_state(schd2, 'Enabled') if not check_scheduler_msg('run scheduler for job: StopVmInstanceJob', start_date+10): test_util.test_fail('StopVmInstanceJob not executed at expected timestamp range') if not check_scheduler_msg('run scheduler for job: StartVmInstanceJob', start_date+20): test_util.test_fail('StartVmInstanceJob not executed at expected timestamp range') schd_ops.change_scheduler_state(schd1.uuid, 'disable') schd_ops.change_scheduler_state(schd2.uuid, 'disable') current_time = int(time.time()) except_start_time = start_date + 20 * (((current_time - start_date) % 20) + 1) test_stub.sleep_util(except_start_time+45) test_util.test_dsc('check scheduler state after pause scheduler') check_scheduler_state(schd1, 'Disabled') check_scheduler_state(schd2, 'Disabled') if check_scheduler_msg('run scheduler for job: StopVmInstanceJob', except_start_time+10): test_util.test_fail('StopVmInstanceJob executed at unexpected timestamp range') if check_scheduler_msg('run scheduler for job: StartVmInstanceJob', except_start_time+20): test_util.test_fail('StartVmInstanceJob executed at unexpected timestamp range') schd_ops.change_scheduler_state(schd1.uuid, 'enable') schd_ops.change_scheduler_state(schd2.uuid, 'enable') current_time = int(time.time()) except_start_time = start_date + 20 * (((current_time - start_date) % 20) + 1) test_stub.sleep_util(except_start_time+45) test_util.test_dsc('check scheduler state after resume scheduler') check_scheduler_state(schd1, 'Enabled') check_scheduler_state(schd2, 'Enabled') if not check_scheduler_msg('run scheduler for job: StopVmInstanceJob', except_start_time+10): test_util.test_fail('StopVmInstanceJob not executed at expected timestamp range') if not check_scheduler_msg('run scheduler for job: StartVmInstanceJob', except_start_time+20): test_util.test_fail('StartVmInstanceJob not executed at expected timestamp range') schd_ops.delete_scheduler(schd1.uuid) schd_ops.delete_scheduler(schd2.uuid) vm.destroy() test_util.test_pass('Check Scheduler State after Pause and Resume Scheduler Success')
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout allow_ps_list = [inventory.NFS_PRIMARY_STORAGE_TYPE] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) test_stub.skip_if_not_storage_network_separate(test_lib.all_scenario_config) if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid test_lib.clean_up_all_vr() mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() vm.check() vr_hosts = test_stub.get_host_has_vr() mn_hosts = test_stub.get_host_has_mn() nfs_hosts = test_stub.get_host_has_nfs() if not test_stub.ensure_vm_not_on(vm.get_vm().uuid, vm.get_vm().hostUuid, vr_hosts+mn_hosts+nfs_hosts): test_util.test_fail("Not find out a suitable host") host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid test_stub.ensure_all_vrs_on_host(host_uuid) host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp host_port = test_lib.lib_get_host_port(host_ip) test_util.test_logger("host %s is disconnecting" %(host_ip)) ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") host_list = test_stub.get_sce_hosts(test_lib.all_scenario_config, test_lib.scenario_file) for host in host_list: if host.ip_ == host_ip: test_host = host break if not test_host: test_util.test_fail('there is no host with ip %s in scenario file.' %(host_ip)) test_stub.stop_host(test_host, test_lib.all_scenario_config) test_stub.check_if_vm_starting_incorrectly_on_original_host(vm.get_vm().uuid, host_uuid, max_count=300) test_stub.start_host(test_host, test_lib.all_scenario_config) test_stub.recover_host_vlan(test_host, test_lib.all_scenario_config, test_lib.deploy_config) conditions = res_ops.gen_query_conditions('managementIp', '=', host_ip) kvm_host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid host_ops.reconnect_host(kvm_host_uuid) vm.set_state(vm_header.RUNNING) vm.check() vm.update() if test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp == host_ip: test_util.test_fail("VM is expected to start running on another host") vm.destroy() test_util.test_pass('Test checking vm status after graceful stop and start success')
def test(): global kvm_host_uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') if res_ops.query_resource(res_ops.HOST, conditions): kvm_host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid else: test_util.test_skip("There is no host. Skip test") test_util.test_dsc('Test KVM Host Infomation: password, sshPort, username') #====================== Password ====================== test_util.test_dsc('Update Password') host_ops.update_kvm_host(kvm_host_uuid, 'password', 'zstackmevoco') exception_catch = 0 try: host_ops.reconnect_host(kvm_host_uuid) except: exception_catch = 1 finally: if exception_catch == 0: test_util.test_fail('not catch the exception, but shuold fail to reconnect KVM host after updating the password of KVM host') elif exception_catch == 1: test_util.test_dsc('catch the exception, cannot reconnect KVM host after updating the password of KVM host') test_util.test_dsc('Update KVM Host Password') cmd = 'echo "zstackmevoco"| passwd --stdin root' os.system(cmd) host_ops.reconnect_host(kvm_host_uuid) test_util.test_dsc('Recover KVM Host Password') host_ops.update_kvm_host(kvm_host_uuid, 'password', 'password') cmd = 'echo "password"| passwd --stdin root' os.system(cmd) #====================== sshPort ====================== test_util.test_dsc('Update sshPort') host_ops.update_kvm_host(kvm_host_uuid, 'sshPort', '23') exception_catch = 0 try: host_ops.reconnect_host(kvm_host_uuid) except: exception_catch = 1 finally: if exception_catch == 0: test_util.test_fail('not catch the exception, but shuold fail to reconnect KVM host after updating the sshPort of KVM host') elif exception_catch == 1: test_util.test_dsc('catch the exception, cannot reconnect KVM host after updating the sshPort of KVM host') test_util.test_dsc('Update KVM Host SSH Port') cmd = 'sed -i \'/#Port 22/ i Port 23\' /etc/ssh/sshd_config' os.system(cmd) cmd = 'service sshd restart' os.system(cmd) host_ops.reconnect_host(kvm_host_uuid) test_util.test_dsc('Recover KVM Host SSH Port') host_ops.update_kvm_host(kvm_host_uuid, 'sshPort', '22') cmd = 'sed -i \'/Port 23/d\' /etc/ssh/sshd_config' os.system(cmd) cmd = 'service sshd restart' os.system(cmd) #====================== username ====================== test_util.test_dsc('Update Username') host_ops.update_kvm_host(kvm_host_uuid, 'username', 'test') exception_catch = 0 try: host_ops.reconnect_host(kvm_host_uuid) except: exception_catch = 1 finally: if exception_catch == 0: test_util.test_fail('not catch the exception, but shuold fail to reconnect KVM host after updating the username of KVM host') elif exception_catch == 1: test_util.test_dsc('catch the exception, cannot reconnect KVM host after updating the username of KVM host') test_util.test_dsc('Update KVM Host username') cmd = 'adduser test' os.system(cmd) cmd = 'echo "password"| passwd --stdin test' os.system(cmd) host_ops.reconnect_host(kvm_host_uuid) test_util.test_dsc('Recover KVM Host username') host_ops.update_kvm_host(kvm_host_uuid, 'username', 'root') cmd = 'userdel test' os.system(cmd) test_util.test_pass('KVM Host Update Infomation SUCCESS')
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout allow_ps_list = [ inventory.CEPH_PRIMARY_STORAGE_TYPE, inventory.NFS_PRIMARY_STORAGE_TYPE, 'SharedMountPoint' ] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #l3_name = os.environ.get('l3NoVlanNetworkName1') l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid test_lib.clean_up_all_vr() #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) #vr_host_ips = [] #for vr in vrs: # vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp) # if test_lib.lib_is_vm_running(vr) != True: # vm_ops.start_vm(vr.uuid) #time.sleep(60) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions) #for vr_host_ip in vr_host_ips: # conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() test_stub.ensure_host_has_no_vr(host_uuid) #vm.check() host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp test_util.test_logger("host %s is disconnecting" % (host_ip)) ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") test_stub.down_host_network(host_ip, test_lib.all_scenario_config) #Here we wait for 180 seconds for all vms have been killed, but test result show: #no need to wait, the reaction of killing the vm is very quickly. #test_util.test_logger("wait for 180 seconds") #time.sleep(180) vm_stop_time = None cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid) for i in range(0, max_time): vm_stop_time = i if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Unknown": test_stub.up_host_network(host_ip, test_lib.all_scenario_config) break time.sleep(1) if vm_stop_time is None: vm_stop_time = max_time for i in range(vm_stop_time, max_time): if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Running": break time.sleep(1) else: test_util.test_fail( "vm has not been changed to running as expected within %s s." % (max_time)) vm.destroy() test_util.test_pass('Test VM ha change to running within 120s Success')
def test(): global session_uuid global session_to global session_mc global sp_name thread_threshold = os.environ.get('ZSTACK_THREAD_THRESHOLD') if not thread_threshold: thread_threshold = 1000 else: thread_threshold = int(thread_threshold) sp_num = os.environ.get('ZSTACK_TEST_NUM') if not sp_num: sp_num = 0 else: sp_num = int(sp_num) #change account session timeout. session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid) session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid) session_uuid = acc_ops.login_as_admin() cond = res_ops.gen_query_conditions('type', '=', 'Root') vol_num = res_ops.query_resource_count(res_ops.VOLUME, cond, session_uuid) if vol_num < thread_threshold: test_util.test_fail( 'This test needs: %d VM instances for volume attaching and detaching operations. But there are only %d VMs root volumes. Please use this case: test_crt_basic_vm_with_max_threads.py to create required VMs.' % (thread_threshold, vol_num)) vols = res_ops.query_resource_fields(res_ops.VOLUME, cond, session_uuid, \ ['uuid'], start = 0, limit = thread_threshold) test_util.test_logger('ZSTACK_THREAD_THRESHOLD is %d' % thread_threshold) test_util.test_logger('ZSTACK_TEST_NUM is %d' % sp_num) org_num = sp_num random_name = random.random() sp_name = 'perf_sp_%s' % str(random_name) vol_num = 0 while sp_num > 0: check_thread_exception() sp_num -= 1 if vol_num > (thread_threshold - 1): vol_num = 0 thread = threading.Thread(target=create_sp, \ args = (vols[vol_num].uuid, )) vol_num += 1 while threading.active_count() > thread_threshold: time.sleep(0.1) thread.start() while threading.active_count() > 1: time.sleep(0.1) cond = res_ops.gen_query_conditions('name', '=', sp_name) sp_num = res_ops.query_resource_count(res_ops.VOLUME_SNAPSHOT, cond, session_uuid) con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid) con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid) acc_ops.logout(session_uuid) if sp_num == org_num: test_util.test_pass('Create %d Volumes Snapshot Perf Test Success' % org_num) else: test_util.test_fail( 'Create %d Volumes Snapshot Perf Test Failed. Only find %d Volume Snapshots.' % (org_num, sp_num))
def test(): global vm, exist_users test_util.test_dsc('cloned vm change password test') vm = test_stub.create_vm(vm_name='1st-created-vm-u12', image_name="imageName_i_u12") test_obj_dict.add_vm(vm) vm.check() force_vm_auto_boot(vm) test_util.test_logger("change vm password for initial created vm") vm_ops.change_vm_password(vm.get_vm().uuid, "root", "password", skip_stopped_vm=None, session_uuid=None) backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.vm) for bs in backup_storage_list: if bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break #if bs.type == inventory.SFTP_BACKUP_STORAGE_TYPE: # break #if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE: # break else: vm.destroy() test_util.test_skip('Not find image store type backup storage.') for (usr, passwd) in zip(users, passwds): if usr not in exist_users: test_util.test_logger("find new account: <%s:%s>" % (usr, passwd)) test_stub.create_user_in_vm(vm.get_vm(), usr, passwd) exist_users.append(usr) #new vm->cloned new_vm1/new_vm2 test_util.test_logger("1st clone") new_vms = vm.clone(vm_names) if len(new_vms) != len(vm_names): test_util.test_fail( 'only %s VMs have been cloned, which is less than required: %s' % (len(new_vms), vm_names)) for new_vm in new_vms: new_vm.update() #new_vm.check() test_obj_dict.add_vm(new_vm) #When vm is running: test_util.test_logger( "vm running && change 1st cloned vm password:<%s:%s:%s>" % (new_vm, usr, passwd)) vm_ops.change_vm_password(new_vm.get_vm().uuid, usr, passwd, skip_stopped_vm=None, session_uuid=None) if not test_lib.lib_check_login_in_vm(new_vm.get_vm(), usr, passwd): test_util.test_fail( "check login cloned vm with user:%s password: %s failed", usr, passwd) #When vm is stopped: #new_vm.stop() test_util.test_logger( "vm stopped && change 1st cloned vm password:<%s:%s:%s>" % (new_vm, usr, passwd)) vm_ops.change_vm_password(new_vm.get_vm().uuid, "root", test_stub.original_root_password) #new_vm.start() new_vm.check() #test use the cloned vm change password to clone new vm and then change password test_util.test_logger("2nd cloned") in_new_vms = new_vm.clone(in_vm_names) new_vm.destroy() new_vm.check() new_vm.expunge() new_vm.check() for in_new_vm in in_new_vms: in_new_vm.update() test_obj_dict.add_vm(in_new_vm) test_util.test_logger( "vm running && change 2nd cloned vm password:<%s:%s:%s>" % (new_vm, usr, passwd)) vm_ops.change_vm_password(in_new_vm.get_vm().uuid, usr, passwd, skip_stopped_vm=None, session_uuid=None) if not test_lib.lib_check_login_in_vm(in_new_vm.get_vm(), usr, passwd): test_util.test_fail( "check login cloned in_vm with user:%s password: %s failed", usr, passwd) #When vm is stopped: #in_new_vm.stop() test_util.test_logger( "vm stopped && change 2nd cloned vm password:<%s:%s:%s>" % (new_vm, usr, passwd)) vm_ops.change_vm_password(in_new_vm.get_vm().uuid, "root", test_stub.original_root_password) #in_new_vm.start() in_new_vm.check() in_new_vm.destroy() in_new_vm.check() in_new_vm.expunge() in_new_vm.check() vm.destroy() vm.check() vm.expunge() vm.check() test_util.test_pass('Set password when VM is creating is successful.')
def test(): global mevoco1_ip global mevoco2_ip global ipsec1 global ipsec2 mevoco1_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] mevoco2_ip = os.environ['secondZStackMnIp'] test_util.test_dsc('Create test vm in mevoco1') vm1 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict1.add_vm(vm1) vm1.check() vm_nic1 = vm1.get_vm().vmNics[0] vm_nic1_uuid = vm_nic1.uuid vm3 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict1.add_vm(vm3) vm3.check() vm_nic3 = vm3.get_vm().vmNics[0] vm_nic3_uuid = vm_nic3.uuid pri_l3_uuid1 = vm1.vm.vmNics[0].l3NetworkUuid vr1 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid1)[0] l3_uuid1 = test_lib.lib_find_vr_pub_nic(vr1).l3NetworkUuid vr1_pub_ip = test_lib.lib_find_vr_pub_ip(vr1) vip1 = test_stub.create_vip('vip for multi-services', l3_uuid1) vip_uuid = vip1.get_vip().uuid cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid1) first_zstack_cidrs = res_ops.query_resource( res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_util.test_dsc('Create test vm in mevoco2') vm2 = test_stub.create_vlan_vm(os.environ.get('l3VlanDNATNetworkName')) test_obj_dict2.add_vm(vm2) vm2.check() pri_l3_uuid2 = vm2.vm.vmNics[0].l3NetworkUuid vr2 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid2)[0] l3_uuid2 = test_lib.lib_find_vr_pub_nic(vr2).l3NetworkUuid vip2 = test_stub.create_vip('ipsec2_vip', l3_uuid2) cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid2) second_zstack_cidrs = res_ops.query_resource( res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_util.test_dsc('Create PF in mevoco1') l3_name = os.environ.get('l3NoVlanNetworkName1') vr = test_stub.create_vr_vm(test_obj_dict1, l3_name) l3_name = os.environ.get('l3VlanNetworkName4') vr = test_stub.create_vr_vm(test_obj_dict1, l3_name) vr_pub_ip = test_lib.lib_find_vr_pub_ip(vr) pf_creation_opt1 = PfRule.generate_pf_rule_option( vr_pub_ip, protocol=inventory.TCP, vip_target_rule=Port.rule4_ports, private_target_rule=Port.rule4_ports, vip_uuid=vip_uuid) test_pf1 = zstack_pf_header.ZstackTestPortForwarding() test_pf1.set_creation_option(pf_creation_opt1) test_pf1.create() vip1.attach_pf(test_pf1) vip1.check() test_pf1.attach(vm_nic1_uuid, vm1) vip1.check() test_util.test_dsc('Create LB in mevoco1') lb = zstack_lb_header.ZstackTestLoadBalancer() lb.create('create lb test', vip1.get_vip().uuid) test_obj_dict1.add_load_balancer(lb) vip1.attach_lb(lb) lb_creation_option = test_lib.lib_create_lb_listener_option(lbl_port=222, lbi_port=22) lbl = lb.create_listener(lb_creation_option) lbl.add_nics([vm_nic1_uuid, vm_nic3_uuid]) lb.check() vip1.check() test_util.test_dsc('Create ipsec in mevoco1') ipsec1 = ipsec_ops.create_ipsec_connection('ipsec1', pri_l3_uuid1, vip2.get_vip().ip, '123456', vip1.get_vip().uuid, [second_zstack_cidrs]) vip1_db = test_lib.lib_get_vip_by_uuid(vip_uuid) assert "IPsec" in vip1_db.useFor assert vip1_db.useFor.count("IPsec") == 1 os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_util.test_dsc('Create ipsec in mevoco2') ipsec2 = ipsec_ops.create_ipsec_connection('ipsec2', pri_l3_uuid2, vip1.get_vip().ip, '123456', vip2.get_vip().uuid, [first_zstack_cidrs]) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip if not test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip): test_util.test_fail( 'vm in mevoco1[MN:%s] could not connect to vm in mevoco2[MN:%s]' % (mevoco1_ip, mevoco2_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip if not test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip): test_util.test_fail( 'vm in mevoco1[MN:%s] could not connect to vm in mevoco2[MN:%s]' % (mevoco2_ip, mevoco1_ip)) # delete ipsec os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip ipsec_ops.delete_ipsec_connection(ipsec1.uuid) if test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip, no_exception=True): test_util.test_fail( 'vm in mevoco1[MN:%s] could still connect to vm in mevoco2[MN:%s] after Ipsec is deleted' % (mevoco1_ip, mevoco2_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip if test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip, no_exception=True): test_util.test_fail( 'vm in mevoco2[MN:%s] could still connect to vm in mevoco1[MN:%s] after Ipsec is deleted' % (mevoco2_ip, mevoco1_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip vip1_db = test_lib.lib_get_vip_by_uuid(vip_uuid) assert "IPsec" not in vip1_db.useFor # delete PF test_pf1.delete() vip1_db = test_lib.lib_get_vip_by_uuid(vip_uuid) assert "PortForwarding" not in vip1_db.useFor # delete LB lb.delete() vip1_db = test_lib.lib_get_vip_by_uuid(vip_uuid) assert vip1_db.useFor is None test_lib.lib_error_cleanup(test_obj_dict1) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip ipsec_ops.delete_ipsec_connection(ipsec2.uuid) vip2.delete() test_lib.lib_error_cleanup(test_obj_dict2) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip vip1.delete() test_util.test_pass('Create multiple service with 1 snat IP Success')
def test(): global vm global mn_host_list global test_mn_host_list mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config, test_lib.scenario_file) mn_host_num = len(mn_host_list) test_mn_host_list = random.sample(range(mn_host_num), (mn_host_num + 1) / 2) for index in test_mn_host_list: test_util.test_logger("shutdown host [%s]" % (mn_host_list[index].ip_)) test_stub.stop_host(mn_host_list[index], test_lib.all_scenario_config) test_util.test_logger("wait 10s for MN VM to stop") time.sleep(10) mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file) if len(mn_host) != 0: test_util.test_fail('MN VM is still running on %d host(s)' % len(mn_host)) test_util.test_logger("recover host [%s]" % (mn_host_list[test_mn_host_list[-1]])) test_stub.recover_host(mn_host_list[test_mn_host_list[-1]], test_lib.all_scenario_config, test_lib.deploy_config) test_mn_host_list.pop() test_util.test_logger( "wait for 20 seconds to see if management node VM starts on any host") time.sleep(20) new_mn_host_ip = test_stub.get_host_by_consul_leader( test_lib.all_scenario_config, test_lib.scenario_file) if new_mn_host_ip == "": test_util.test_fail( "management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host_list[0].ip_)) count = 60 while count > 0: new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file) if len(new_mn_host) == 1: test_util.test_logger( "management node VM run after its former host down for 30s") break elif len(new_mn_host) > 1: test_util.test_fail( "management node VM runs on more than one host after its former host down" ) time.sleep(5) count -= 1 if len(new_mn_host) == 0: test_util.test_fail( "management node VM does not run after its former host down for 30s" ) elif len(new_mn_host) > 1: test_util.test_fail( "management node VM runs on more than one host after its former host down" ) test_util.test_logger( "wait for 5 minutes to see if management node starts again") try: node_ops.wait_for_management_server_start(300) except: test_util.test_fail( "management node does not recover after recover one mn host") test_util.test_logger("try to create vm, timeout is 30s") time_out = 30 while time_out > 0: try: vm = test_stub.create_basic_vm() break except: time.sleep(1) time_out -= 1 if time_out == 0: test_util.test_fail('Fail to create vm after mn is ready') vm.check() vm.destroy() test_util.test_pass('Create VM Test Success')
def test(): test_util.test_dsc('Test update instance offering') vm = test_stub.create_basic_vm() instance_offering = test_lib.lib_get_instance_offering_by_uuid(vm.get_vm().instanceOfferingUuid) test_obj_dict.add_vm(vm) vm_ops.update_vm(vm.get_vm().uuid, instance_offering.cpuNum * 2, None) vm_ops.update_vm(vm.get_vm().uuid, None, instance_offering.memorySize * 2) vm.update() if (vm.get_vm().cpuNum != instance_offering.cpuNum * 2): test_util.test_fail("cpuNum is expected to change") if (vm.get_vm().memorySize != instance_offering.memorySize * 2): test_util.test_fail("memorySize is expected to change") vm.stop() vm.update() if (vm.get_vm().cpuNum != instance_offering.cpuNum * 2): test_util.test_fail("cpuNum is expected to change") if (vm.get_vm().memorySize != instance_offering.memorySize * 2): test_util.test_fail("memorySize is expected to change") vm.start() if (vm.get_vm().cpuNum != instance_offering.cpuNum * 2): test_util.test_fail("cpuNum change is expected to take effect after Vm restart") if (vm.get_vm().memorySize != instance_offering.memorySize * 2): test_util.test_fail("memorySize change is expected to take effect after Vm restart") vm.check() test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Test update instance cpu memory Pass')
def test(): global trigger global media global trigger_action test_item = "host.disk.io" resource_type = "HostVO" vm_monitor_item = test_stub.get_monitor_item(resource_type) if test_item not in vm_monitor_item: test_util.test_fail('%s is not available for monitor' % test_item) hosts = res_ops.get_resource(res_ops.HOST) host = hosts[0] duration = 60 expression = "host.disk.io{type=\"bandwidth\", direction=\"read\"} > 2000.0" monitor_trigger = mon_ops.create_monitor_trigger(host.uuid, duration, expression) send_email = test_stub.create_email_media() media = send_email.uuid trigger_action_name = "trigger" + ''.join( map(lambda xx: (hex(ord(xx))[2:]), os.urandom(8))) trigger = monitor_trigger.uuid receive_email = os.environ.get('receive_email') monitor_trigger_action = mon_ops.create_email_monitor_trigger_action( trigger_action_name, send_email.uuid, trigger.split(), receive_email) trigger_action = monitor_trigger_action.uuid host.password = os.environ.get('hostPassword') ssh_cmd = test_stub.ssh_cmd_line(host.managementIp, host.username, host.password, port=int(host.sshPort)) rw = 'read' t = threading.Thread(target=test_stub.run_disk_load1, args=( ssh_cmd, rw, )) t.start() time.sleep(110) test_stub.kill(ssh_cmd) status_problem, status_ok = test_stub.query_trigger_in_loop(trigger, 50) test_util.action_logger( 'Trigger old status: %s triggered. Trigger new status: %s recovered' % (status_problem, status_ok)) if status_problem != 1 or status_ok != 1: test_util.test_fail( '%s Monitor Test failed, expected Problem or OK status not triggered' % test_item) mail_list = test_stub.receive_email() keywords = "fired" mail_flag = test_stub.check_email(mail_list, keywords, trigger, host.uuid) if mail_flag == 0: test_util.test_fail('Failed to Get Target: %s for: %s Trigger Mail' % (host.uuid, test_item)) mon_ops.delete_monitor_trigger_action(trigger_action) mon_ops.delete_monitor_trigger(trigger) mon_ops.delete_email_media(media)
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE, inventory.NFS_PRIMARY_STORAGE_TYPE, 'SharedMountPoint'] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #l3_name = os.environ.get('l3NoVlanNetworkName1') l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) for vr in vrs: if test_lib.lib_is_vm_running(vr) != True: vm_ops.start_vm(vr.uuid) time.sleep(60) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() test_stub.ensure_host_has_no_vr(host_uuid) #vm.check() host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "OnHostFailure") l2_network_interface = os.environ.get('l2ManagementNetworkInterface') cmd = "ifconfig %s down && sleep 30 && ifconfig %s up" % (l2_network_interface, l2_network_interface) try: rsp = test_lib.lib_execute_sh_cmd_by_agent(host_ip, cmd) test_util.test_logger("host is not expected to shutdown after its network down just for a little while") except: test_util.test_fail("host may have been shutdown, while it's not expected to shutdown") cmd = "date" try: rsp = test_lib.lib_execute_sh_cmd_by_agent(host_ip, cmd) test_util.test_logger("host is still alive") except: test_util.test_fail("host is not expected to shutdown after its network down just for a little while") test_stub.exercise_connection(100, 10) vm.destroy() time.sleep(60) test_util.test_pass('Test Host Self fence Success')
def test(): global mevoco1_ip global mevoco2_ip global ipsec1 global ipsec2 test_util.test_skip( 'According issue #2720, the similar issue are won\'t fix ') mevoco1_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] mevoco2_ip = os.environ['secondZStackMnIp'] test_util.test_dsc('Create test vm in mevoco1') vm1 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict1.add_vm(vm1) vm1.check() pri_l3_uuid1 = vm1.vm.vmNics[0].l3NetworkUuid vr1 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid1)[0] l3_uuid1 = test_lib.lib_find_vr_pub_nic(vr1).l3NetworkUuid vip1 = test_stub.create_vip('ipsec1_vip', l3_uuid1) cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid1) first_zstack_cidrs = res_ops.query_resource( res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_util.test_dsc('Create test vm in mevoco2') vm2 = test_stub.create_vlan_vm(os.environ.get('l3VlanDNATNetworkName')) test_obj_dict2.add_vm(vm2) vm2.check() pri_l3_uuid2 = vm2.vm.vmNics[0].l3NetworkUuid vr2 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid2)[0] l3_uuid2 = test_lib.lib_find_vr_pub_nic(vr2).l3NetworkUuid vip2 = test_stub.create_vip('ipsec2_vip', l3_uuid2) cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid2) second_zstack_cidrs = res_ops.query_resource( res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_util.test_dsc('Create ipsec in mevoco1') ipsec1 = ipsec_ops.create_ipsec_connection('ipsec1', pri_l3_uuid1, vip2.get_vip().ip, '123456', vip1.get_vip().uuid, [second_zstack_cidrs], pfs="dh-group2") os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_util.test_dsc('Create ipsec in mevoco2') ipsec2 = ipsec_ops.create_ipsec_connection('ipsec2', pri_l3_uuid2, vip1.get_vip().ip, '123456', vip2.get_vip().uuid, [first_zstack_cidrs], pfs="dh-group5") os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip if test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip, no_exception=True): test_util.test_fail( 'vm in mevoco1[MN:%s] could still connect to vm in mevoco2[MN:%s] even pfs is different' % (mevoco1_ip, mevoco2_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip if test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip, no_exception=True): test_util.test_fail( 'vm in mevoco2[MN:%s] could still connect to vm in mevoco1[MN:%s] even pfs is different' % (mevoco2_ip, mevoco1_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip ipsec_ops.delete_ipsec_connection(ipsec2.uuid) test_util.test_dsc('Create ipsec in mevoco2') ipsec2 = ipsec_ops.create_ipsec_connection('ipsec2', pri_l3_uuid2, vip1.get_vip().ip, '123456', vip2.get_vip().uuid, [first_zstack_cidrs]) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip if not test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip): test_util.test_fail( 'vm in mevoco1[MN:%s] could not connect to vm in mevoco2[MN:%s]' % (mevoco1_ip, mevoco2_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip if not test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip): test_util.test_fail( 'vm in mevoco1[MN:%s] could not connect to vm in mevoco2[MN:%s]' % (mevoco2_ip, mevoco1_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip ipsec_ops.delete_ipsec_connection(ipsec1.uuid) if test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip, no_exception=True): test_util.test_fail( 'vm in mevoco1[MN:%s] could still connect to vm in mevoco2[MN:%s] after Ipsec is deleted' % (mevoco1_ip, mevoco2_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip if test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip, no_exception=True): test_util.test_fail( 'vm in mevoco2[MN:%s] could still connect to vm in mevoco1[MN:%s] after Ipsec is deleted' % (mevoco2_ip, mevoco1_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_lib.lib_error_cleanup(test_obj_dict1) vip1.delete() test_obj_dict1.rm_vip(vip1) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip ipsec_ops.delete_ipsec_connection(ipsec2.uuid) test_lib.lib_error_cleanup(test_obj_dict2) vip2.delete() test_obj_dict2.rm_vip(vip2) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_util.test_pass('Create Ipsec Success')
def test(): global vm global mn_host_list global need_recover_mn_host_list test_stub.skip_if_scenario_is_multiple_networks() mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config, test_lib.scenario_file) mn_host_num = len(mn_host_list) test_mn_host_list = random.sample(range(mn_host_num), (mn_host_num + 1) / 2) for host in mn_host_list: test_util.test_logger("shutdown host's network [%s]" % (host.ip_)) test_stub.shutdown_host_network(host, test_lib.all_scenario_config) need_recover_mn_host_list = range(mn_host_num) test_util.test_logger("wait 10s for MN VM to stop") time.sleep(10) mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file) if len(mn_host) != 0: test_util.test_fail('MN VM is still running on %d host(s)' % len(mn_host)) for index in test_mn_host_list: test_util.test_logger("recover host [%s]" % (mn_host_list[index].ip_)) test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config) need_recover_mn_host_list.remove(index) test_util.test_logger( "wait for 20 seconds to see if management node VM starts on any host") time.sleep(20) new_mn_host_ip = test_stub.get_host_by_consul_leader( test_lib.all_scenario_config, test_lib.scenario_file) if new_mn_host_ip == "": test_util.test_fail( "management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host_list[0].ip_)) count = 60 while count > 0: new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file) if len(new_mn_host) == 1: test_util.test_logger( "management node VM run after its former host down for 30s") break elif len(new_mn_host) > 1: test_util.test_fail( "management node VM runs on more than one host after its former host down" ) time.sleep(5) count -= 1 if len(new_mn_host) == 0: test_util.test_fail( "management node VM does not run after its former host down for 30s" ) elif len(new_mn_host) > 1: test_util.test_fail( "management node VM runs on more than one host after its former host down" ) #node_ops.wait_for_management_server_start(300) test_stub.wrapper_of_wait_for_management_server_start(600) test_stub.ensure_hosts_connected( exclude_host=[mn_host_list[need_recover_mn_host_list[0]]]) test_stub.ensure_bss_host_connected_from_sep_net_down( test_lib.scenario_file, test_lib.all_scenario_config, downMagt=False) test_stub.ensure_bss_connected() test_stub.ensure_pss_connected() test_stub.return_pass_ahead_if_3sites("TEST PASS") vm = test_stub.create_basic_vm() vm.check() vm.destroy() test_util.test_pass('Create VM Test Success')
image_option = test_util.ImageOption() image_option.set_format('iso') image_option.set_name('test_negative_qcow2') image_option.set_system_tags('qemuga') image_option.set_mediaType('RootVolumeTemplate') image_option.set_url(os.environ.get('negativeQcow2Url')) image_option.set_backup_storage_uuid_list([bss[0].uuid]) image_option.set_timeout(3600 * 1000) new_image = zstack_image_header.ZstackTestImage() new_image.set_creation_option(image_option) try: new_image.add_root_volume_template() except Exception, e: if "Inject" in str(e): test_util.test_pass('test add negative image passed.') test_util.test_fail('test add negative image failed.') new_image.delete() #new_image.expunge([bss[0].uuid]) #Will be called only if exception happens in test(). def error_cleanup(): global new_image if new_image: new_image.delete() pass
def test(): global vcenter_uuid1 global vcenter_uuid2 global mevoco1_ip global mevoco2_ip global img_uuid global delete_policy1 global delete_policy2 print os.environ vcenter1_name = os.environ['vcenter2_name'] vcenter1_domain_name = os.environ['vcenter2_ip'] vcenter1_username = os.environ['vcenter2_domain_name'] vcenter1_password = os.environ['vcenter2_password'] sync_image_url = os.environ['vcenter2_sync_image_url'] image_name = os.environ['vcenter2_sync_image_name'] mevoco1_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] mevoco2_ip = os.environ['serverIp2'] os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip delete_policy1 = test_lib.lib_set_delete_policy('image', 'Delay') zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid) vcenter_uuid1 = inv.uuid if vcenter_uuid1 == None: test_util.test_fail("vcenter_uuid is None") os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip delete_policy2 = test_lib.lib_set_delete_policy('image', 'Delay') zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid) vcenter_uuid2 = inv.uuid if vcenter_uuid2 == None: test_util.test_fail("vcenter_uuid is None") #bs_cond = res_ops.gen_query_conditions("name", '=', "vCenter[vm-center]") bs_cond = res_ops.gen_query_conditions("type", '=', "VCenter") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None, fields=['uuid']) if not bss: test_util.test_skip("not find available backup storage. Skip test") #add sync image in mevoco2 image_option = test_util.ImageOption() image_option.set_name(image_name) #image_option.set_mediaType('RootVolumeTemplate') image_option.set_format('vmtx') image_option.set_system_tags('vcenter::datacenter::datacenter1') #image_option.set_url(os.environ.get(sync_image_url)) image_option.set_url(sync_image_url) image_option.set_backup_storage_uuid_list([bss[0].uuid]) new_image = zstack_image_header.ZstackTestImage() new_image.set_creation_option(image_option) #if a error happens here, check whether the image with the same name is already #exist in vcenter, which is also raise exception about can't download on all backup storage test_util.test_logger("add image from url:%s" % (sync_image_url)) new_image.add_root_volume_template() #reconnect vcenter and check newly add image in mevoco1 test_util.test_logger("check image sync from mevoco1") os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip bs_cond = res_ops.gen_query_conditions("type", '=', "VCenter") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, None, fields=['uuid']) bs_ops.reconnect_backup_storage(bss[0].uuid) image_cond = res_ops.gen_query_conditions("name", '=', image_name) img_inv = res_ops.query_resource_fields(res_ops.IMAGE, image_cond, None, fields=['uuid'])[0] img_uuid = img_inv.uuid if not img_uuid: test_util.test_fail("local woodpecker image uuid is null") #delete image in mevoco2 test_util.test_logger("delete image from mevoco2") os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip image_cond = res_ops.gen_query_conditions("name", '=', image_name) img_inv = res_ops.query_resource_fields(res_ops.IMAGE, image_cond, None, fields=['uuid'])[0] img_uuid = img_inv.uuid img_ops.delete_image(img_uuid) img_ops.expunge_image(img_uuid) #check image in mevoco1 test_util.test_logger("check image delete sync from mevoco1") os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip bs_cond = res_ops.gen_query_conditions("type", '=', "VCenter") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, None, fields=['uuid']) bs_ops.reconnect_backup_storage(bss[0].uuid) image_cond = res_ops.gen_query_conditions("name", '=', image_name) #img_inv = res_ops.query_resource_fields(res_ops.IMAGE, image_cond, None, fields=['uuid'])[0] #img_uuid = img_inv.uuid img_inv = res_ops.query_resource_fields(res_ops.IMAGE, image_cond, None, fields=['uuid']) if img_inv: test_util.test_fail( "local woodpecker image is not deleted as expected") os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_lib.lib_set_delete_policy('image', delete_policy2) if vcenter_uuid2: vct_ops.delete_vcenter(vcenter_uuid2) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_lib.lib_set_delete_policy('image', delete_policy1) if vcenter_uuid1: vct_ops.delete_vcenter(vcenter_uuid1) test_util.test_pass("vcenter sync image test passed.")