def test(): pxe_servers = res_ops.query_resource(res_ops.PXE_SERVER) if pxe_servers != None: for pxe in pxe_servers: bare_ops.delete_pxe(pxe.uuid) test_stub.create_pxe() test_util.test_pass('Create PXE Test Success')
def test(): global test_obj_dict, bs, ps #judge whether BS is imagestore bs = res_ops.query_resource(res_ops.BACKUP_STORAGE) for i in bs: if i.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break else: test_util.test_skip('Skip test on non-imagestore') #judge whether PS is SharedBlock ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE) for i in ps: if i.type in ['SharedBlock', 'AliyunNAS']: test_util.test_skip('Skip test on SharedBlock and PS') image_name = os.environ.get('imageName_s') l3_name = os.environ.get('l3PublicNetworkName') vm = test_stub.create_vm("test_vm", image_name, l3_name) #vm.check() test_obj_dict.add_vm(vm) new_vm = vm.clone(['test_vm_clone_with_on_data_volume'], full=True)[0] test_obj_dict.add_vm(new_vm) volumes_number = len(test_lib.lib_get_all_volumes(new_vm.vm)) if volumes_number != 1: test_util.test_fail('Did not find 1 volumes for [vm:] %s. But we assigned 1 data volume when create the vm. We only catch %s volumes' % (new_vm.vm.uuid, volumes_number)) else: test_util.test_logger('Find 1 volumes for [vm:] %s.' % new_vm.vm.uuid) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Test clone vm with one data volume Success')
def test(): global bs_username, bs_hostname, bs_password, bs_name, bs_username, bs_url, bs_sshport global new_image file_path = test_stub.gen_license('woodpecker', '*****@*****.**', '1', 'Prepaid', '1', '') test_stub.load_license(file_path) issued_date = test_stub.get_license_info().issuedDate expired_date = test_stub.license_date_cal(issued_date, 86400 * 1) test_stub.check_license("*****@*****.**", 1, None, False, 'Paid', issued_date=issued_date, expired_date=expired_date) test_util.test_logger('create zone and add the bs of the imagestore') node_uuid = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].uuid test_stub.create_zone() zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid bs_name = 'BS1' bs_hostname = os.environ.get('node1Ip') bs_username = os.environ.get('nodeUserName') bs_password = os.environ.get('nodePassword') bs_url = '/zstack_bs' bs_sshport = '22' test_stub.create_image_store_backup_storage(bs_name, bs_hostname, bs_username, bs_password, bs_url, bs_sshport) bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0].uuid test_stub.reload_default_license() test_util.test_logger('Check default community license') #test_stub.check_license(None, None, 2147483647, False, 'Community') try: bs_ops.reconnect_backup_storage(bs_uuid) except Exception, e: if "commercial" in str(e): test_util.test_pass('test reconnect bs failed, An operation failed, details: commercial license is required to use ImageStore.')
def test(): global vm vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') l3_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vm("test_resize_vm", image_name, l3_name) vm.check() vm.stop() vm.check() vol_size = test_lib.lib_get_root_volume(vm.get_vm()).size volume_uuid = test_lib.lib_get_root_volume(vm.get_vm()).uuid set_size = 1024*1024*1024*5 vol_ops.resize_volume(volume_uuid, set_size) vm.update() vol_size_after = test_lib.lib_get_root_volume(vm.get_vm()).size if set_size != vol_size_after: test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after) vm.start() set_size = 1024*1024*1024*6 vol_ops.resize_volume(volume_uuid, set_size) vm.update() vol_size_after = test_lib.lib_get_root_volume(vm.get_vm()).size if set_size != vol_size_after: test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after) vm.destroy() test_util.test_pass('Resize VM Test Success')
def test(): ps_env = test_stub.PSEnvChecker() if ps_env.is_sb_ceph_env: env = test_stub.SanAndCephPrimaryStorageEnv(test_object_dict=test_obj_dict, first_ps_vm_number=VM_COUNT, second_ps_vm_number=VM_COUNT, first_ps_volume_number=VOLUME_NUMBER, second_ps_volume_number=VOLUME_NUMBER) else: env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict, first_ps_vm_number=VM_COUNT, second_ps_vm_number=VM_COUNT, first_ps_volume_number=VOLUME_NUMBER, second_ps_volume_number=VOLUME_NUMBER) env.check_env() env.deploy_env() first_ps_vm_list = env.first_ps_vm_list second_ps_vm_list = env.second_ps_vm_list if env.new_ps: new_ps_list.append(env.second_ps) test_util.test_dsc('detach random one Primary Storage from cluster') selected_ps = random.choice([env.first_ps, env.second_ps]) if selected_ps is env.first_ps: another_ps = env.second_ps else: another_ps = env.first_ps ps_ops.detach_primary_storage(selected_ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid) detached_ps_list.append(selected_ps) test_util.test_dsc('All vm in selected ps should STOP') for vm in first_ps_vm_list + second_ps_vm_list: vm.update() for vm in env.get_vm_list_from_ps(selected_ps): assert vm.get_vm().state == inventory.STOPPED for vm in env.get_vm_list_from_ps(another_ps): assert vm.get_vm().state == inventory.RUNNING with test_stub.expected_failure('start vm in ps that not attached to cluster', Exception): random.choice(env.get_vm_list_from_ps(selected_ps)).start() with test_stub.expected_failure("Create vm in detached ps", Exception): test_stub.create_multi_vms(name_prefix='test-vm', count=1, ps_uuid=selected_ps.uuid) test_util.test_dsc("Create 5 vms and check all should be in enabled PS") if ps_env.is_sb_ceph_env: if selected_ps.type == "SharedBlock": vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5, bs_type='Ceph') else: vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5, bs_type='ImageStoreBackupStorage') else: vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5) for vm in vm_list: test_obj_dict.add_vm(vm) for vm in vm_list: assert vm.get_vm().allVolumes[0].primaryStorageUuid == another_ps.uuid test_util.test_pass('Multi PrimaryStorage Test Pass')
def test(): os.environ['ZSTACK_THREAD_THRESHOLD']='1000' os.environ['ZSTACK_TEST_NUM']='1000' test_lib.lib_set_provision_memory_rate(20) test_lib.lib_set_provision_storage_rate(20) lib_set_provision_cpu_rate(20) Create() time.sleep(180) create_vm_begin_time = get_begin_time() create_vm_end_time = get_end_time() print ("begin time = %s") % create_vm_begin_time print ("end time = %s") % create_vm_end_time if create_vm_end_time != 0 and create_vm_begin_time != 0: create_1000_vm_time = create_vm_end_time - create_vm_begin_time test_util.test_dsc("create_vm_time is "+str(create_1000_vm_time)) Destroy_VM() time.sleep(180) Expunge_VM() time.sleep(180) zone_name = os.environ.get('zoneName') zone = res_ops.get_resource(res_ops.ZONE, name = zone_name)[0] zone_ops.delete_zone(zone.uuid) test_util.test_pass('Create 1000 vms success,takes %s time' % create_1000_vm_time)
def test(): # Clear ECS instance remained in Aliyun hybrid.add_datacenter_iz() hybrid.tear_down() clean_util.cleanup_all_vms_violently() clean_util.cleanup_none_vm_volumes_violently() clean_util.umount_all_primary_storages_violently() clean_util.cleanup_backup_storage() #linux.remove_vlan_eth("eth0", 10) #linux.remove_vlan_eth("eth0", 11) cmd = host_plugin.DeleteVlanDeviceCmd() cmd.vlan_ethname = 'eth0.10' hosts = test_lib.lib_get_all_hosts_from_plan() if type(hosts) != type([]): hosts = [hosts] for host in hosts: http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.DELETE_VLAN_DEVICE_PATH), cmd) cmd.vlan_ethname = 'eth0.11' for host in hosts: http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.DELETE_VLAN_DEVICE_PATH), cmd) test_lib.setup_plan.stop_node() test_lib.lib_cleanup_host_ip_dict() test_util.test_pass('Hybrid Teardown Success')
def test(): os.system('dd if=/dev/zero of=%s bs=1M count=1 seek=300' % test_image) time.sleep(10) image_name = 'test-image-%s' % time.time() image_option = test_util.ImageOption() image_option.set_name(image_name) image_option.set_description('test image which is upload from local filesystem.') image_option.set_url('file://%s' % test_image) bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0] image_option.set_backup_storage_uuid_list([bs.uuid]) image_option.set_format('raw') image_option.set_mediaType('RootVolumeTemplate') image_inv = img_ops.add_root_volume_template(image_option) time.sleep(10) image = zstack_image_header.ZstackTestImage() image.set_creation_option(image_option) image.set_image(image_inv) test_obj_dict.add_image(image) image.check() vm = test_stub.create_vm(image_name = image_name) vm.destroy() image.delete() if not os.path.exists(test_image): test_util.test_fail('test image disappeared, after add image.') os.system('rm -f %s' % test_image) test_util.test_pass('Test adding image from local stroage pass.')
def test(): test_util.test_dsc(''' Will doing random test operations, including vm create/stop/start/reboot /destroy, volume create/attach/detach/delete. It doesn't include SG VIP and snapshots operations. If reach max 4 coexisting running vm, testing will success and quit. ''') target_running_vm = 4 test_util.test_dsc('Random Test Begin. Test target: 4 coexisting running VM (not include VR).') robot_test_obj = test_util.Robot_Test_Object() robot_test_obj.set_test_dict(test_dict) robot_test_obj.set_exclusive_actions_list(\ test_state.TestAction.sg_actions \ + test_state.TestAction.vip_actions \ + test_state.TestAction.snapshot_actions) priority_actions = test_state.TestAction.volume_actions * 4 priority_action_obj = action_select.ActionPriority() priority_action_obj.add_priority_action_list(priority_actions) robot_test_obj.set_priority_actions(priority_action_obj) rounds = 1 while len(test_dict.get_vm_list(vm_header.RUNNING)) < target_running_vm: print "test_dict: %s" % test_dict test_util.test_dsc('New round %s starts: random operation pickup.' % rounds) test_lib.lib_vm_random_operation(robot_test_obj) test_util.test_dsc('Round %s finished. Begin status checking.' % rounds) rounds += 1 test_lib.lib_robot_status_check(test_dict) test_util.test_dsc('Reach test pass exit criterial.') test_lib.lib_robot_cleanup(test_dict) test_util.test_pass('Create random VM Test Success')
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] vr_uuid = vr.uuid l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() #ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) #ps_uuid = ps.uuid #ps_ops.change_primary_storage_state(ps_uuid, 'disable') test_stub.disable_all_pss() if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to runnning when PS change to disable state') vm.set_state(vm_header.RUNNING) vm.check() vm.suspend() vm.check() #ps_ops.change_primary_storage_state(ps_uuid, 'enable') test_stub.enable_all_pss() host_ops.reconnect_host(host_uuid) vm_ops.reconnect_vr(vr_uuid) test_util.test_pass('PS disable mode Test Success')
def test(): global test_obj_dict global ps_uuid global host_uuid global vr_uuid test_util.test_dsc('Create test vm and check') l3_1_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vlan_vm(l3_name=l3_1_name) #l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) #vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] #vr_uuid = vr.uuid host = test_lib.lib_get_vm_host(vm.get_vm()) host_uuid = host.uuid test_obj_dict.add_vm(vm) vm.check() test_util.test_dsc('Add ISO Image') cond = res_ops.gen_query_conditions("status", '=', "Connected") bs_uuid = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0].uuid img_option = test_util.ImageOption() img_option.set_name('iso') img_option.set_backup_storage_uuid_list([bs_uuid]) os.system("echo fake iso for test only > %s/apache-tomcat/webapps/zstack/static/test.iso" % (os.environ.get('zstackInstallPath'))) img_option.set_url('http://%s:8080/zstack/static/test.iso' % (os.environ.get('node1Ip'))) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) ps_uuid = ps.uuid ps_ops.change_primary_storage_state(ps_uuid, 'maintain') if not test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, '22', 90): test_util.test_fail('VM is expected to stop when PS change to maintain state') vm.set_state(vm_header.STOPPED) vm.check() test_util.test_dsc('Attach ISO to VM') cond = res_ops.gen_query_conditions('name', '=', 'iso') iso_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid img_ops.attach_iso(iso_uuid, vm.vm.uuid) ps_ops.change_primary_storage_state(ps_uuid, 'enable') host_ops.reconnect_host(host_uuid) #vm_ops.reconnect_vr(vr_uuid) vrs = test_lib.lib_get_all_vrs() for vr in vrs: vm_ops.start_vm(vr.uuid) vm.start() vm.check() vm.destroy() vm.check() #vm.expunge() #vm.check() test_util.test_pass('PS maintain mode Test Success')
def test(): vm = test_stub.create_user_vlan_vm() test_obj_dict.add_vm(vm) vm.check() vm_inv = vm.get_vm() vm_ip = vm_inv.vmNics[0].ip cmd = 'touch /root/test-file-for-reinit' rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) if rsp == False: test_util.test_fail('Fail to create file in VM') vm.stop() vm.reinit() vm.update() vm.check() vm.start() cmd = '[ -e /root/test-file-for-reinit ] && echo yes || echo no' rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) if rsp == 'yes': test_util.test_fail('VM does not be reverted to image used for creating the VM, the later file still exists') vm.destroy() test_util.test_pass('Re-init VM Test Success')
def test(): global test_obj_dict global delete_policy delete_policy = test_lib.lib_set_delete_policy('vm', 'Delay') l3_name = os.environ.get('l3VlanNetworkName1') #l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid image_name = os.environ.get('imageName_net') vm = test_stub.create_vm(l3_name=l3_name, image_name=image_name, vm_name='basic-test-vm') test_obj_dict.add_vm(vm) vm.check() vm_nic_uuid = vm.vm.vmNics[0].uuid net_ops.detach_l3(vm_nic_uuid) vm.destroy() vm.set_state(vm_header.DESTROYED) vm.check() vm.recover() vm.set_state(vm_header.STOPPED) vm.check() test_lib.lib_set_delete_policy('vm', delete_policy) try: vm.start() except Exception, e: #if "please attach a nic and try again" in str(e): test_util.test_pass('test detach l3 check vm passed.')
def test(): h1_name = os.environ.get("hostName") cond = res_ops.gen_query_conditions('name', '=', h1_name) h1 = res_ops.query_resource(res_ops.HOST, cond) ag1 = ag_ops.create_affinity_group(name="ag1", policy="antiHard") vm1 = test_stub.create_ag_vm(host_uuid=h1[0].uuid) assert vm1.get_vm().hostUuid == h1[0].uuid test_obj_dict.add_vm(vm1) new_vm = vm1.clone(names=["clone-vm1", "clone-vm2", "clone-vm3"], systemtag=["affinityGroupUuid::%s" % ag1.uuid]) test_obj_dict.add_vm(new_vm[0]) test_obj_dict.add_vm(new_vm[1]) test_obj_dict.add_vm(new_vm[2]) vmuuids = [] ag = test_lib.lib_get_affinity_group_by_name(name="ag1") for usage in ag.usages: vmuuids.append(usage.resourceUuid) assert new_vm[0].get_vm().uuid in vmuuids assert new_vm[1].get_vm().uuid in vmuuids assert new_vm[2].get_vm().uuid in vmuuids assert len(vmuuids) == 3 try: ag_ops.add_vm_to_affinity_group(ag1.uuid, vm1.get_vm().uuid) except: test_util.test_logger("vm1 is not expected to add into affinity group [uuid: %s]" % ag1.uuid) vmuuids = [] ag = test_lib.lib_get_affinity_group_by_name(name="ag1") for usage in ag.usages: vmuuids.append(usage.resourceUuid) assert vm1.get_vm().uuid not in vmuuids test_lib.lib_error_cleanup(test_obj_dict) ag_ops.delete_affinity_group(ag1.uuid) test_util.test_pass("Affinity Group antiHard policy pass")
def test(): global vcenter_uuid, vm vcenter1_name = os.environ['vcenter2_name'] vcenter1_domain_name = os.environ['vcenter2_ip'] vcenter1_username = os.environ['vcenter2_domain_name'] vcenter1_password = os.environ['vcenter2_password'] ova_image_name = os.environ['vcenter2_template_exist'] network_pattern1 = os.environ['vcenter2_network_pattern1'] zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid) vcenter_uuid = inv.uuid if vcenter_uuid == None: test_util.test_fail("vcenter_uuid is None") vm = test_stub.create_vm_in_vcenter(vm_name = 'vm-start-stop-test', image_name = ova_image_name, l3_name = network_pattern1) vm.check() vm.stop() vm.check() vm.start() vm.check() vm.destroy() vm.check() vm.expunge() vct_ops.delete_vcenter(vcenter_uuid) test_util.test_pass("vm start and stop of vcenter test passed.")
def test(): imagestore_backup_storage = test_lib.lib_get_image_store_backup_storage() if not imagestore_backup_storage: test_util.test_skip('Not find image store type backup storage.') img_option = test_util.ImageOption() img_option.set_name('iso') root_disk_uuid = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')).uuid bs_uuid = imagestore_backup_storage.uuid img_option.set_backup_storage_uuid_list([bs_uuid]) command = "command -v genisoimage" result = test_lib.lib_execute_ssh_cmd(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], 'root', 'password', command) if not result: command = "yum -y install genisoimage --disablerepo=* --enablerepo=zstack-local" test_lib.lib_execute_ssh_cmd(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], 'root', 'password', command) command = "genisoimage -o %s/apache-tomcat/webapps/zstack/static/zstack-repo/7/x86_64/os/test.iso /tmp/" % os.environ.get('zstackInstallPath') test_lib.lib_execute_ssh_cmd(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], 'root', 'password', command) img_option.set_url('http://%s:8080/zstack/static/zstack-repo/7/x86_64/os/test.iso' % (os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'])) image_inv = img_ops.add_iso_template(img_option) image = test_image.ZstackTestImage() image.set_image(image_inv) image.set_creation_option(img_option) test_obj_dict.add_image(image) image_url = image.export() image.delete_exported_image() test_lib.lib_robot_cleanup(test_obj_dict) if image_url.endswith('.iso'): test_util.test_pass('Export ISO Image Test Success') else: test_util.test_fail('Export ISO Image Test Fail, exported ISO image ends with %s' % (image_url.split('.')[-1]))
def test(): global vm_inv test_util.test_dsc('Create test vm to test zstack upgrade by -u.') image_name = os.environ.get('imageName_i_c7_z_1.9') #iso_path = os.environ.get('iso_path') zstack_latest_version = os.environ.get('zstackLatestVersion') zstack_latest_path = os.environ.get('zstackLatestInstaller') vm_name = os.environ.get('vmName') #upgrade_script_path = os.environ.get('upgradeScript') vm_inv = test_stub.create_vm_scenario(image_name, vm_name) vm_ip = vm_inv.vmNics[0].ip test_lib.lib_wait_target_up(vm_ip, 22) test_stub.make_ssh_no_password(vm_ip, tmp_file) test_util.test_logger('Update MN IP') test_stub.update_mn_hostname(vm_ip, tmp_file) test_stub.update_mn_ip(vm_ip, tmp_file) test_stub.start_mn(vm_ip, tmp_file) test_stub.check_installation(vm_ip, tmp_file) test_util.test_logger('Upgrade zstack to latest with repo') #test_stub.update_iso(vm_ip, tmp_file, iso_path, upgrade_script_path) test_stub.update_repo(vm_ip, tmp_file) test_stub.upgrade_zstack(vm_ip, zstack_latest_path, tmp_file) test_stub.check_zstack_version(vm_ip, tmp_file, zstack_latest_version) test_stub.start_mn(vm_ip, tmp_file) test_stub.check_mn_running(vm_ip, tmp_file) test_stub.check_installation(vm_ip, tmp_file) os.system('rm -f %s' % tmp_file) test_stub.destroy_vm_scenario(vm_inv.uuid) test_util.test_pass('ZStack upgrade Test Success')
def test(): global vm create_vm_option = test_util.VmOption() create_vm_option.set_rootVolume_systemTags(["volumeProvisioningStrategy::ThinProvisioning"]) create_vm_option.set_name('test_resize_vm_root_volume') vm = test_lib.lib_create_vm(create_vm_option) vm.check() vm.stop() vm.check() vol_size = test_lib.lib_get_root_volume(vm.get_vm()).size volume_uuid = test_lib.lib_get_root_volume(vm.get_vm()).uuid set_size = 1024*1024*1024*5 vol_ops.resize_volume(volume_uuid, set_size) vm.update() vol_size_after = test_lib.lib_get_root_volume(vm.get_vm()).size if set_size != vol_size_after: test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after) vm.start() set_size = 1024*1024*1024*6 vol_ops.resize_volume(volume_uuid, set_size) vm.update() vol_size_after = test_lib.lib_get_root_volume(vm.get_vm()).size if set_size != vol_size_after: test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after) vm.destroy() test_util.test_pass('Resize VM Test Success')
def test(): global vm vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') l3_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vm("test_resize_vm", image_name, l3_name) test_obj_dict.add_vm(vm) vm.check() vm.stop() vm.check() vol_size = test_lib.lib_get_root_volume(vm.get_vm()).size volume_uuid = test_lib.lib_get_root_volume(vm.get_vm()).uuid set_size = 1024*1024*1024*5 snapshots = test_obj_dict.get_volume_snapshot(volume_uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_snapshot1') snapshots.check() vol_ops.resize_volume(volume_uuid, set_size) vm.update() vol_size_after = test_lib.lib_get_root_volume(vm.get_vm()).size if set_size != vol_size_after: test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after) snapshots.delete() test_obj_dict.rm_volume_snapshot(snapshots) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Resize VM Snapshot Test Success')
def test(): global vm test_util.test_dsc('create VM with setting password') for root_password in root_password_list: test_util.test_dsc("root_password: \"%s\"" %(root_password)) #vm = test_stub.create_vm(vm_name = 'c7-vm-no-sys-tag', image_name = "imageName_i_c7_no_tag", root_password=root_password) vm = test_stub.create_vm(vm_name = 'c7-vm-no-sys-tag', image_name = "imageName_i_c7_no_tag") backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.vm) for bs in backup_storage_list: if bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: break if bs.type == inventory.SFTP_BACKUP_STORAGE_TYPE: break if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE: break else: vm.destroy() test_util.test_skip('Not find image store type backup storage.') #if not test_lib.lib_check_login_in_vm(vm.get_vm(), "root", root_password): # test_util.test_fail("create vm with root password: %s failed", root_password) # stop vm && change vm password #vm.stop() vm.check() try: vm_ops.change_vm_password(vm.get_vm().uuid, "root", root_password) except Exception, e: if "CreateSystemTag" in str(e): test_util.test_pass("negative test of change a no system tag image passed.") else: test_util.test_fail("negative test failed with not expected log: %s", str(e))
def test(): system_time1 = int(time.time()) current_time = schd_ops.get_current_time().currentTime system_time2 = int(time.time()) if system_time1 != current_time.Seconds and system_time2 != current_time.Seconds: test_util.test_fail('get_current_time not get expected time') test_util.test_pass('Create VM Test Success')
def test(): test_util.test_dsc('Test storage capacity when using expunge vm') if conf_ops.get_global_config_value('vm', 'deletionPolicy') != 'Delay' : test_util.test_skip('vm delete_policy is not Delay, skip test.') return zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid cond = res_ops.gen_query_conditions('state', '=', 'Enabled') cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond) host = res_ops.query_resource_with_num(res_ops.HOST, cond, limit = 1) if not host: test_util.test_skip('No Enabled/Connected host was found, skip test.' ) return True ps = res_ops.query_resource_with_num(res_ops.PRIMARY_STORAGE, cond, limit = 1) if not ps: test_util.test_skip('No Enabled/Connected primary storage was found, skip test.' ) return True host = host[0] ps = ps[0] host_res = vol_ops.get_local_storage_capacity(host.uuid, ps.uuid)[0] avail_cap = host_res.availableCapacity vm = test_stub.create_vm(vm_name = 'basic-test-vm', host_uuid = host.uuid) test_obj_dict.add_vm(vm) time.sleep(1) vm.destroy() vm.expunge() host_res2 = vol_ops.get_local_storage_capacity(host.uuid, ps.uuid)[0] avail_cap2 = host_res.availableCapacity if avail_cap != avail_cap2: test_util.test_fail('PS capacity is not same after create/expunge vm on host: %s. Capacity before create vm: %s, after expunge vm: %s ' % (host.uuid, avail_cap, avail_cap2)) test_util.test_pass('Expunge VM Test Success')
def test(): ps_env = test_stub.PSEnvChecker() nfs_ps = ps_env.get_random_nfs() test_util.test_dsc("Create 1 vm with {} data volume".format(VOLUME_NUMBER)) vm = test_stub.create_multi_vms(name_prefix='test-', count=1, data_volume_number=VOLUME_NUMBER)[0] test_obj_dict.add_vm(vm) test_util.test_dsc("disable NFS PS") ps_ops.change_primary_storage_state(nfs_ps.uuid, state='disable') disabled_ps_list.append(nfs_ps) test_util.test_dsc("make sure VM till OK and running") vm.update() vm.check() assert vm.get_vm().state == inventory.RUNNING with test_stub.expected_failure("Create datavol in nfs-local env when nfs disabled", Exception): test_stub.create_multi_volumes(count=1, ps=nfs_ps) test_util.test_dsc("Try to create vm") new_vm = test_stub.create_multi_vms(name_prefix='test-vm', count=1)[0] test_obj_dict.add_vm(new_vm) test_util.test_pass('Multi PrimaryStorage Test Pass')
def test(): system_time1 = int(time.time()) current_time = schd_ops.get_current_time().currentTime system_time2 = int(time.time()) if system_time1 > current_time.Seconds and system_time2 < current_time.Seconds: test_util.test_fail('get_current_time not get expected time[%s, %s]: %s' % (system_time1, system_time2, current_time.Seconds)) test_util.test_pass('Create VM Test Success')
def test(): ag1 = ag_ops.create_affinity_group(name="ag1", policy="antiHard") vm1 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid) test_obj_dict.add_vm(vm1) vm2 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid) test_obj_dict.add_vm(vm2) assert vm1.get_vm().hostUuid != vm2.get_vm().hostUuid vm3 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid) test_obj_dict.add_vm(vm3) assert vm1.get_vm().hostUuid != vm3.get_vm().hostUuid assert vm2.get_vm().hostUuid != vm3.get_vm().hostUuid try: vm4 = None vm4 = test_stub.create_ag_vm(affinitygroup_uuid=ag1.uuid) except: if not vm4: test_util.test_logger("vm4 isn't created as expected") finally: if vm4: test_util.test_fail("Test Fail, vm4 [uuid:%s] is not expected to be created" % vm4.get_vm().uuid) test_lib.lib_error_cleanup(test_obj_dict) ag_ops.delete_affinity_group(ag1.uuid) test_util.test_pass("Affinity Group antiHard policy pass")
def test(): global vm global vip_s_vm_cfg_lst vip_s_vm_cfg_lst = test_stub.get_s_vm_cfg_lst_vip_bind(test_lib.all_scenario_config, test_lib.scenario_file) if len(vip_s_vm_cfg_lst) != 1: test_util.test_fail('vip has been running on %d host(s)' % len(vip_s_vm_cfg_lst)) test_util.test_logger("disconnect host [%s]" % (vip_s_vm_cfg_lst[0].ip_)) #test_stub.down_host_network(vip_s_vm_cfg_lst[0].ip_, test_lib.all_scenario_config) test_stub.exec_zsha2_demote(vip_s_vm_cfg_lst[0].ip_, "root", "password") time.sleep(5) expected_vip_s_vm_cfg_lst_ip = test_stub.get_expected_vip_s_vm_cfg_lst_after_switch(test_lib.all_scenario_config, test_lib.scenario_file, vip_s_vm_cfg_lst[0].ip_) if not test_stub.check_if_vip_is_on_host(test_lib.all_scenario_config, test_lib.scenario_file, expected_vip_s_vm_cfg_lst_ip): test_util.test_fail("find vip should drift on ip %s, but is not on it." %(expected_vip_s_vm_cfg_lst_ip)) vip_s_vm_cfg_lst_new = test_stub.get_s_vm_cfg_lst_vip_bind(test_lib.all_scenario_config, test_lib.scenario_file) if len(vip_s_vm_cfg_lst_new) != 1: test_util.test_fail('vip has been running on %d host(s)' % len(vip_s_vm_cfg_lst_new)) test_stub.wrapper_of_wait_for_management_server_start(600) test_stub.ensure_hosts_connected(exclude_host=[vip_s_vm_cfg_lst[0]]) test_stub.ensure_bss_connected() test_stub.ensure_pss_connected() vm = test_stub.create_basic_vm() vm.check() vm.destroy() test_util.test_pass('Create VM Test Success')
def test(): global threads global checker_threads bs_cond = res_ops.gen_query_conditions("status", '=', "Connected") bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, \ None) if not bss: test_util.test_skip("not find available backup storage. Skip test") if bss[0].type != inventory.CEPH_BACKUP_STORAGE_TYPE: if hasattr(inventory, 'IMAGE_STORE_BACKUP_STORAGE_TYPE') and bss[0].type != inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE: test_util.test_skip("not find available imagestore or ceph backup storage. Skip test") for i in range(0, threads_num): threads[i] = threading.Thread(target=add_image, args=(bss[0].uuid, i, )) threads[i].start() for i in range(0, threads_num): checker_threads[i] = threading.Thread(target=check_add_image_progress, args=(i, )) checker_threads[i].start() for i in range(0, threads_num): checker_threads[i].join() threads[i].join() images[i].check() images[i].delete() test_util.test_pass('Add image Progress Test Success')
def test(): test_util.test_dsc(''' Test Description: Will create 1 VM with 3 l3 networks. 1 l3_network is not using VR; 1 l3_network is using novlan VR; 1 l3_network is using vlan VR. Resource required: Need support 3 VMs (1 test VM + 2 VR VMs) existing at the same time. This test required a special image, which was configed with at least 3 enabled NICs (e.g. eth0, eth1, eth2). ''') image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid l3_net_list = [l3_net_uuid] l3_name = os.environ.get('l3VlanNetworkName3') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid l3_net_list.append(l3_net_uuid) l3_name = os.environ.get('l3VlanNetworkName4') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid l3_net_list.append(l3_net_uuid) vm = test_stub.create_vm(l3_net_list, image_uuid, '3_l3_vm') test_obj_dict.add_vm(vm) vm.check() if len(vm.vm.vmNics) == 3: test_util.test_logger("Find 3 expected Nics in new created VM.") else: test_util.test_fail("New create VM doesn't not have 3 Nics. It only have %s" % len(vm.get_vm().vmNics)) vm.destroy() test_util.test_pass('Create 1 VM with 3 l3_network (1 vlan VR, 1 novlan VR and 1 no VR L3network) successfully.')
def test(): if res_ops.query_resource(res_ops.SFTP_BACKUP_STORAGE): test_util.test_skip("sftp backupstorage doesn't support for clone test. Skip test") global vm vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') l3_name = os.environ.get('l3VlanNetworkName1') vm = test_stub.create_vm("test_resize_vm", image_name, l3_name) test_obj_dict.add_vm(vm) vm.check() vol_size = test_lib.lib_get_root_volume(vm.get_vm()).size volume_uuid = test_lib.lib_get_root_volume(vm.get_vm()).uuid set_size = 1024*1024*1024*5 vol_ops.resize_volume(volume_uuid, set_size) vm.update() vol_size_after = test_lib.lib_get_root_volume(vm.get_vm()).size if set_size != vol_size_after: test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after) new_vm = vm.clone(['vm_clone'])[0] test_obj_dict.add_vm(new_vm) new_volume_uuid = test_lib.lib_get_root_volume_uuid(new_vm.get_vm()) vol_size_after = test_lib.lib_get_root_volume(new_vm.get_vm()).size if set_size != vol_size_after: test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after) test_lib.lib_error_cleanup(test_obj_dict) test_util.test_pass('Resize VM Snapshot Test Success')
def test(): if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists(test_lib.scenario_file): scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) test_util.test_skip('Suite Setup Success') if test_lib.scenario_config != None and test_lib.scenario_destroy != None: scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy) setup = setup_actions.SetupAction() setup.plan = test_lib.all_config setup.run() if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists(test_lib.scenario_file): mn_ips = deploy_operations.get_nodes_from_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s '%s'" % (EXTRA_SUITE_SETUP_SCRIPT, mn_ips)) elif os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s" % (EXTRA_SUITE_SETUP_SCRIPT)) deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file) delete_policy = test_lib.lib_set_delete_policy('vm', 'Direct') delete_policy = test_lib.lib_set_delete_policy('volume', 'Direct') delete_policy = test_lib.lib_set_delete_policy('image', 'Direct') if test_lib.lib_get_ha_selffencer_maxattempts() != None: test_lib.lib_set_ha_selffencer_maxattempts('60') test_lib.lib_set_ha_selffencer_storagechecker_timeout('60') test_lib.lib_set_primary_storage_imagecache_gc_interval(1) test_util.test_pass('Suite Setup Success')
def test(): global res global original_rate test_util.test_dsc('Test storage over provision method') primary_storage_list = res_ops.query_resource(res_ops.PRIMARY_STORAGE) for ps in primary_storage_list: # if ps.type == "SharedBlock": if ps.type in ["SharedBlock", "AliyunNAS"]: test_util.test_skip( 'SharedBlock primary storage does not support overProvision') test_lib.lib_skip_if_ps_num_is_not_eq_number(1) zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid cond = res_ops.gen_query_conditions('state', '=', 'Enabled') cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond) host = res_ops.query_resource_with_num(res_ops.HOST, cond, limit=1) if not host: test_util.test_skip('No Enabled/Connected host was found, skip test.') return True ps = res_ops.query_resource_with_num(res_ops.PRIMARY_STORAGE, cond, limit=1) if not ps: test_util.test_skip( 'No Enabled/Connected primary storage was found, skip test.') return True host = host[0] ps = ps[0] ps_type = ps.type #TODO: Fix ceph testing if ps_type == 'Ceph' or ps_type == 'SharedMountPoint': test_util.test_skip('skip test for ceph and smp.') over_provision_rate = 2.5 target_volume_num = 12 kept_disk_size = 10 * 1024 * 1024 vm = test_stub.create_vm(vm_name = 'storage_over_prs_vm_1', \ host_uuid = host.uuid) test_obj_dict.add_vm(vm) vm.check() avail_cap = get_storage_capacity(ps_type, host.uuid, ps.uuid) if avail_cap < kept_disk_size: test_util.test_skip( 'available disk capacity:%d is too small, skip test.' % avail_cap) return True res = sizeunit.get_size(test_lib.lib_get_reserved_primary_storage()) original_rate = test_lib.lib_set_provision_storage_rate( over_provision_rate) #data_volume_size = int(over_provision_rate * (avail_cap - kept_disk_size) / target_volume_num) data_volume_size = int(over_provision_rate * (avail_cap - res) / target_volume_num) #will change the rate back to check if available capacity is same with original one. This was a bug, that only happened when system create 1 vm. test_lib.lib_set_provision_storage_rate(original_rate) avail_cap_tmp = get_storage_capacity(ps_type, host.uuid, ps.uuid) if avail_cap != avail_cap_tmp: test_util.test_fail( 'disk size is not same, between 2 times provision. Before change over rate, 1st cap: %d; 2nd cap: %d' % (avail_cap, avail_cap_tmp)) test_lib.lib_set_provision_storage_rate(over_provision_rate) test_util.test_logger( 'Will create a serial of volume. Each of them will have %d size.' % data_volume_size) disk_offering_option = test_util.DiskOfferingOption() disk_offering_option.set_name('storage-over-ps-test') disk_offering_option.set_diskSize(data_volume_size) data_volume_offering = vol_ops.create_volume_offering(disk_offering_option) test_obj_dict.add_disk_offering(data_volume_offering) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_disk_offering_uuid(data_volume_offering.uuid) times = 1 while (times <= target_volume_num): try: volume_creation_option.set_name('volume-%d' % times) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) test_util.test_logger( 'Current available storage size: %d' % get_storage_capacity(ps_type, host.uuid, ps.uuid)) volume.attach(vm) except Exception as e: test_util.test_logger( "Unexpected volume Creation Failure in storage over provision test. " ) raise e times += 1 time.sleep(2) avail_cap2 = (get_storage_capacity(ps_type, host.uuid, ps.uuid) - res) if avail_cap2 > data_volume_size: test_util.test_fail( 'Available disk size: %d is still bigger than offering disk size: %d , after creating %d volumes.' % (avail_cap2, data_volume_size, target_volume_num)) try: volume_creation_option.set_name('volume-%d' % (times + 1)) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.attach(vm) except: test_util.test_logger( "Expected Volume Creation Failure in storage over provision test. " ) else: test_util.test_fail( "The %dth Volume is still attachable, which is wrong" % (target_volume_num + 1)) test_lib.lib_set_provision_storage_rate(original_rate) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Memory Over Provision Test Pass')
def test(): global vm global host_uuid global test_host global host_ip global max_attempts global storagechecker_timeout allow_ps_list = [inventory.LOCAL_STORAGE_TYPE] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid test_lib.clean_up_all_vr() #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) #vr_host_ips = [] #for vr in vrs: # vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp) # if test_lib.lib_is_vm_running(vr) != True: # vm_ops.start_vm(vr.uuid) #time.sleep(60) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions) #for vr_host_ip in vr_host_ips: # conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('ls_vm_ha_self_start') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() vm_creation_option.set_name('ls_vm_none_status') vm2 = test_vm_header.ZstackTestVm() vm2.set_creation_option(vm_creation_option) vm2.create() test_stub.ensure_host_has_no_vr(host_uuid) #vm.check() host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp test_util.test_logger("host %s is disconnecting" % (host_ip)) ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") host_list = test_stub.get_sce_hosts(test_lib.all_scenario_config, test_lib.scenario_file) for host in host_list: if host.ip_ == host_ip: test_host = host break if not test_host: test_util.test_fail('there is no host with ip %s in scenario file.' % (host_ip)) test_stub.stop_host(test_host, test_lib.all_scenario_config) vm_stop_time = None cond = res_ops.gen_query_conditions('name', '=', 'ls_vm_ha_self_start') cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid, cond) for i in range(0, 300): vm_stop_time = i if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Stopped": test_stub.start_host(test_host, test_lib.all_scenario_config) test_stub.recover_host_vlan(test_host, test_lib.all_scenario_config, test_lib.deploy_config) break time.sleep(1) if vm_stop_time is None: vm_stop_time = 300 for i in range(vm_stop_time, 300): if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Running": break time.sleep(1) else: test_util.test_fail( "vm has not been changed to running as expected within 300s.") vm.destroy() cond = res_ops.gen_query_conditions('name', '=', 'ls_vm_none_status') cond = res_ops.gen_query_conditions('uuid', '=', vm2.vm.uuid, cond) if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state != "Stopped": test_util.test_fail("vm none is not change to Stopped as expected.") test_util.test_pass( 'Test checking VM ha and none after host graceful stop Success')
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout allow_ps_list = [ inventory.CEPH_PRIMARY_STORAGE_TYPE, inventory.NFS_PRIMARY_STORAGE_TYPE, 'SharedMountPoint' ] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #l3_name = os.environ.get('l3NoVlanNetworkName1') l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid test_lib.clean_up_all_vr() #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) #vr_host_ips = [] #for vr in vrs: # vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp) # if test_lib.lib_is_vm_running(vr) != True: # vm_ops.start_vm(vr.uuid) #time.sleep(60) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions) #for vr_host_ip in vr_host_ips: # conditions = res_ops.gen_query_conditions('managementIp', '=', vr_host_ip, conditions) # break host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) #target_host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid #for vr in vrs: # if test_lib.lib_find_host_by_vr(vr).managementIp != test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp: # vm_ops.migrate_vm(vr.uuid, target_host_uuid) test_stub.ensure_all_vrs_on_host(host_uuid) #vm.check() host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp host_port = test_lib.lib_get_host_port(host_ip) test_util.test_logger("host %s is disconnecting" % (host_ip)) host_uuid = test_lib.lib_find_host_by_vm(vm.get_vm()).uuid ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") test_util.test_logger("force stop host: %s" % (host_ip)) os.system('bash -ex %s %s' % (os.environ.get('hostForceStopScript'), host_ip)) test_util.test_logger("host is expected to shutdown for a while") cost_time = 0 for i in range(240 * 2): cost_time = i time.sleep(1) vm.update() new_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp if new_ip != host_ip: break else: test_util.test_fail( "VM is expected to start running on another host within 480s.") if cost_time > 240: test_util.test_fail("Running on another host cost time:%s >240 sec." % (str(cost_time))) vm.set_state(vm_header.RUNNING) vm.check() if test_lib.lib_get_vm_last_host(vm.get_vm()).managementIp != host_ip: test_util.test_fail( "Migrated VM's last host is expected to be the last host[ip:%s]" % (host_ip)) vm.destroy() os.system('bash -ex %s %s' % (os.environ.get('hostRecoverScript'), host_ip)) host_ops.reconnect_host(host_uuid) test_util.test_pass('Test VM ha on host failure Success')
def test(): global vm global mn_host_list global need_recover_mn_host_list mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config, test_lib.scenario_file) mn_host_num = len(mn_host_list) test_mn_host_list = random.sample(range(mn_host_num), (mn_host_num + 1) / 2) for host in mn_host_list: test_util.test_logger("shutdown host [%s]" % (host.ip_)) test_stub.stop_host(host, test_lib.all_scenario_config) need_recover_mn_host_list = range(mn_host_num) test_util.test_logger("wait 10s for MN VM to stop") time.sleep(10) mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file) if len(mn_host) != 0: test_util.test_fail('MN VM is still running on %d host(s)' % len(mn_host)) for index in test_mn_host_list: test_util.test_logger("recover host [%s]" % (mn_host_list[index].ip_)) test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config) need_recover_mn_host_list.remove(index) test_util.test_logger( "wait for 20 seconds to see if management node VM starts on any host") time.sleep(20) new_mn_host_ip = test_stub.get_host_by_consul_leader( test_lib.all_scenario_config, test_lib.scenario_file) if new_mn_host_ip == "": test_util.test_fail( "management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host_list[0].ip_)) count = 60 while count > 0: new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file) if len(new_mn_host) == 1: test_util.test_logger( "management node VM run after its former host down for 30s") break elif len(new_mn_host) > 1: test_util.test_fail( "management node VM runs on more than one host after its former host down" ) time.sleep(5) count -= 1 if len(new_mn_host) == 0: test_util.test_fail( "management node VM does not run after its former host down for 30s" ) elif len(new_mn_host) > 1: test_util.test_fail( "management node VM runs on more than one host after its former host down" ) try: node_ops.wait_for_management_server_start(300) except: test_util.test_fail( "management node does not recover after MN VM is running") test_util.test_logger("try to create vm, timeout is 30s") time_out = 30 while time_out > 0: try: vm = test_stub.create_basic_vm() break except: time.sleep(1) time_out -= 1 if time_out == 0: test_util.test_fail('Fail to create vm after mn is ready') vm.check() vm.destroy() test_util.test_pass('Create VM Test Success')
def test(): global curr_deploy_conf global l2_name2 curr_deploy_conf = exp_ops.export_zstack_deployment_config( test_lib.deploy_config) vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #pick up l3 l3_1 = res_ops.get_resource(res_ops.L3_NETWORK, name=l3_name1)[0] l3_2 = res_ops.get_resource(res_ops.L3_NETWORK, name=l3_name2)[0] l2_2 = res_ops.get_resource(res_ops.L2_NETWORK, \ uuid = l3_2.l2NetworkUuid)[0] l2_name2 = l2_2.name conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multizones_basic_vm') vm_creation_option.set_l3_uuids([l3_1.uuid, l3_2.uuid]) cluster1_name = os.environ.get('clusterName2') cluster1 = res_ops.get_resource(res_ops.CLUSTER, name=cluster1_name)[0] vm_creation_option.set_cluster_uuid(cluster1.uuid) vm1 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm1) test_util.test_dsc('Delete l2_2') net_ops.delete_l2(l2_2.uuid) #Since 0.8, delete L3 won't delete VM. It will just detach L3 nic. #test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.STOPPED) vm1.update() #vm1.set_state(vm_header.STOPPED) vm1.check() #test_util.test_dsc('start vm again. vm should remove the deleted l2') #vm1.start() #add l2 resource will also add l3 resource net_ops.add_l2_resource(curr_deploy_conf, l2_name=l2_name2) #update l3_2, since it is readded. l3_2 = res_ops.get_resource(res_ops.L3_NETWORK, name=l3_name2)[0] vm_creation_option.set_l3_uuids([l3_1.uuid, l3_2.uuid]) vm2 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm2) #check vm1 vm2 status. vm1.check() if not len(vm1.get_vm().vmNics) == 1: test_util.test_fail( 'vm1 vmNics still have L3: %s, even if it is deleted' % l3_2.uuid) vm2.check() test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Delete L2 Test Success')
def test(): global vm global schds vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) start_date = int(time.time()) test_util.test_logger('Setup start VM scheduler') for ops_id in range(1000): thread = threading.Thread(target=create_start_vm_scheduler, args=( vm.get_vm().uuid, start_date, ops_id, )) while threading.active_count() > 10: time.sleep(0.5) exc = sys.exc_info() thread.start() while threading.activeCount() > 1: exc = sys.exc_info() time.sleep(0.1) test_stub.sleep_util(start_date + 200) start_msg_mismatch = 0 for i in range(0, 100): if not test_lib.lib_find_in_local_management_server_log( start_date + 100 + i, '[msg send]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid): start_msg_mismatch += 1 test_util.test_warn( 'StartVmInstanceMsg is expected to execute at %s' % (start_date + 100 + i)) if start_msg_mismatch > 5: test_util.test_fail( '%s of 58 StartVmInstanceMsg not executed at expected timestamp' % (start_msg_mismatch)) for schd_job in schd_jobs: thread = threading.Thread(target=delete_scheduler_job, args=(schd_job.uuid, )) while threading.active_count() > 10: time.sleep(0.5) exc = sys.exc_info() thread.start() while threading.activeCount() > 1: exc = sys.exc_info() time.sleep(0.1) for schd_trigger in schd_triggers: thread = threading.Thread(target=delete_scheduler_trigger, args=(schd_trigger.uuid, )) while threading.active_count() > 10: time.sleep(0.5) exc = sys.exc_info() thread.start() while threading.activeCount() > 1: exc = sys.exc_info() time.sleep(0.1) try: vm.destroy() except: test_util.test_logger( 'expected exception when destroy VM since too many queued task') test_util.test_pass('Create 1000 Simple VM Start Scheduler Success')
def test(): global session_to global session_mc global session_uuid session_uuid = acc_ops.login_as_admin() session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid) session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid) cond = res_ops.gen_query_conditions('type', '=', inventory.USER_VM_TYPE) num = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid) if num <= thread_threshold: vms = res_ops.query_resource(res_ops.VM_INSTANCE, cond, session_uuid) destroy_vms(vms) else: start = 0 limit = thread_threshold - 1 curr_num = start vms = [] while curr_num < num: vms_temp = res_ops.query_resource_fields(res_ops.VM_INSTANCE, \ cond, session_uuid, ['uuid'], start, limit) vms.extend(vms_temp) curr_num += limit start += limit destroy_vms(vms) vip_num = res_ops.query_resource_count(res_ops.VIP, [], session_uuid) if vip_num <= thread_threshold: vips = res_ops.query_resource(res_ops.VIP, [], session_uuid) destroy_vips(vips) else: start = 0 limit = thread_threshold - 1 curr_num = start vms = [] while curr_num < vip_num: vips_temp = res_ops.query_resource_fields(res_ops.VIP, \ [], session_uuid, ['uuid'], start, limit) vips.extend(vips_temp) curr_num += limit start += limit destroy_vips(vips) #con_ops.change_global_config('identity', 'session.timeout', session_to) #con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc) left_num = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid) if left_num == 0: test_util.test_pass('None VR VMs destroy Success. Destroy %d VMs.' % num) else: test_util.test_fail( 'None VR VMs destroy Fail. %d VMs are not Destroied.' % left_num) left_num = res_ops.query_resource_count(res_ops.VIP, [], session_uuid) if left_num == 0: test_util.test_pass('VIP destroy Success. Destroy %d VIP.' % num) else: test_util.test_fail('VIP destroy Fail. %d VIP are not Destroied.' % left_num) con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid) con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid) acc_ops.logout(session_uuid)
def test(): global vm global host_uuid global test_host global host_ip global max_attempts global storagechecker_timeout must_ps_list = [ inventory.LOCAL_STORAGE_TYPE, inventory.NFS_PRIMARY_STORAGE_TYPE ] test_lib.skip_test_if_any_ps_not_deployed(must_ps_list) test_lib.lib_cur_env_is_not_scenario() if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid test_lib.clean_up_all_vr() mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('ls_vm_ha_self_start') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() vr_hosts = test_stub.get_host_has_vr() mn_hosts = test_stub.get_host_has_mn() nfs_hosts = test_stub.get_host_has_nfs() if not test_stub.ensure_vm_not_on(vm.get_vm().uuid, vm.get_vm().hostUuid, vr_hosts + mn_hosts + nfs_hosts): test_util.test_fail("Not find out a suitable host") #vm.check() host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp test_util.test_logger("host %s is disconnecting" % (host_ip)) ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") test_stub.stop_ha_vm(vm.get_vm().uuid) vm.set_state(vm_header.STOPPED) vm.check() vm.start() vm.check() host_list = test_stub.get_sce_hosts(test_lib.all_scenario_config, test_lib.scenario_file) for host in host_list: if host.ip_ == host_ip or host.managementIp_ == host_ip: test_host = host break if not test_host: test_util.test_fail('there is no host with ip %s in scenario file.' % (host_ip)) test_stub.stop_host(test_host, test_lib.all_scenario_config, 'cold') vm_stop_time = None cond = res_ops.gen_query_conditions('name', '=', 'ls_vm_ha_self_start') cond = res_ops.gen_query_conditions('uuid', '=', vm.vm.uuid, cond) for i in range(0, 180): vm_stop_time = i if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Stopped": test_stub.start_host(test_host, test_lib.all_scenario_config) test_stub.recover_host_vlan(test_host, test_lib.all_scenario_config, test_lib.deploy_config) conditions = res_ops.gen_query_conditions('managementIp', '=', host_ip) kvm_host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid host_ops.reconnect_host(kvm_host_uuid) break time.sleep(1) if vm_stop_time is None: vm_stop_time = 180 for i in range(vm_stop_time, 180): if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == "Starting": break time.sleep(1) else: test_util.test_fail( "vm has not been changed to running as expected within 180s.") vm.destroy() test_util.test_pass( 'Test checking VM ha and none status when force stop vm Success.')
def test(): global ps_inv global ps_uuid global cluster_uuid global tag curr_deploy_conf = exp_ops.export_zstack_deployment_config(test_lib.deploy_config) vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #pick up primary storage 1 and set system tag for instance offering. ps_name1 = os.environ.get('nfsPrimaryStorageName1') ps_inv = res_ops.get_resource(res_ops.PRIMARY_STORAGE, name = ps_name1)[0] ps_uuid = ps_inv.uuid conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, \ conditions)[0].uuid vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multizones_vm_ps_ops') tag = tag_ops.create_system_tag('InstanceOfferingVO', \ instance_offering_uuid, \ 'primaryStorage::allocator::uuid::%s' % ps_uuid) l3_name = os.environ.get('l3VlanNetworkName1') l3 = res_ops.get_resource(res_ops.L3_NETWORK, name = l3_name)[0] vm_creation_option.set_l3_uuids([l3.uuid]) vm1 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm1) cluster_uuid = vm1.get_vm().clusterUuid test_util.test_dsc("Detach Primary Storage") ps_ops.detach_primary_storage(ps_uuid, cluster_uuid) test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.STOPPED) vm1.update() vm1.set_state(vm_header.STOPPED) vm1.check() vm1.start() vm2 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm2) test_util.test_dsc("Delete Primary Storage") tag_ops.delete_tag(tag.uuid) ps_ops.delete_primary_storage(ps_inv.uuid) test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.DESTROYED) vm1.set_state(vm_header.DESTROYED) vm1.check() test_obj_dict.mv_vm(vm2, vm_header.RUNNING, vm_header.DESTROYED) vm2.set_state(vm_header.DESTROYED) vm2.check() try: vm3 = test_lib.lib_create_vm(vm_creation_option) except: test_util.test_logger('Catch expected vm creation exception, since primary storage has been deleted. ') else: test_util.test_fail('Fail: Primary Storage has been deleted. But vm is still created with it.') recover_ps() test_util.test_dsc("Attach Primary Storage") test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Test primary storage operations Success')
def test(): test_util.test_dsc('Create test vm with lb.') vm1 = test_stub.create_lb_vm() test_obj_dict.add_vm(vm1) vm2 = test_stub.create_lb_vm() test_obj_dict.add_vm(vm2) #l3_name = os.environ.get('l3VlanNetworkName1') #vr1 = test_stub.get_vr_by_private_l3_name(l3_name) #l3_name = os.environ.get('l3NoVlanNetworkName1') #vr2 = test_stub.get_vr_by_private_l3_name(l3_name) vm_nic1 = vm1.get_vm().vmNics[0] vm_nic1_uuid = vm_nic1.uuid vm_nic1_ip = vm_nic1.ip vm_nic2 = vm2.get_vm().vmNics[0] vm_nic2_uuid = vm_nic2.uuid vm_nic2_ip = vm_nic2.ip vm1.check() vm2.check() #test_lib.lib_wait_target_up(vm_nic1_ip, "root", 120) #test_lib.lib_wait_target_up(vm_nic2_ip, "root", 120) test_stub.set_httpd_in_vm(vm_nic1_ip, "root", "password") test_stub.set_httpd_in_vm(vm_nic2_ip, "root", "password") pri_l3_uuid = vm_nic1.l3NetworkUuid vr = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid)[0] vr_pub_nic = test_lib.lib_find_vr_pub_nic(vr) l3_uuid = vr_pub_nic.l3NetworkUuid vip = test_stub.create_vip('vip_for_lb_test', l3_uuid) test_obj_dict.add_vip(vip) lb = zstack_lb_header.ZstackTestLoadBalancer() lb2 = zstack_lb_header.ZstackTestLoadBalancer() lb.create('create lb test', vip.get_vip().uuid) lb2.create('create lb2 test', vip.get_vip().uuid) test_obj_dict.add_load_balancer(lb) test_obj_dict.add_load_balancer(lb2) vip.attach_lb(lb) vip.attach_lb(lb2) lb_creation_option = test_lib.lib_create_lb_listener_option() lb2_creation_option = test_lib.lib_create_lb_listener_option(lbl_port=2222, lbi_port=80) lbl = lb.create_listener(lb_creation_option) lbl2 = lb2.create_listener(lb2_creation_option) lbl.add_nics([vm_nic1_uuid, vm_nic2_uuid]) lbl2.add_nics([vm_nic1_uuid, vm_nic2_uuid]) vm1.check() vm2.check() lb.check() lb2.check() vip.check() lb.delete() lb2.delete() vip.delete() test_obj_dict.rm_vip(vip) test_obj_dict.rm_load_balancer(lb) test_obj_dict.rm_load_balancer(lb2) lb.check() lb2.check() test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Create Load Balancer Test Success')
def test(): global mevoco1_ip global mevoco2_ip global ipsec1 global ipsec2 mevoco1_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] mevoco2_ip = os.environ['secondZStackMnIp'] test_util.test_dsc('Create test vm in mevoco1') vm1 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict1.add_vm(vm1) vm1.check() pri_l3_uuid1 = vm1.vm.vmNics[0].l3NetworkUuid vr1 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid1)[0] l3_uuid1 = test_lib.lib_find_vr_pub_nic(vr1).l3NetworkUuid vip1 = test_stub.create_vip('ipsec1_vip', l3_uuid1) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_util.test_dsc('Create test vm in mevoco2') vm2 = test_stub.create_vlan_vm(os.environ.get('l3VlanDNATNetworkName')) test_obj_dict2.add_vm(vm2) vm2.check() pri_l3_uuid2 = vm2.vm.vmNics[0].l3NetworkUuid vr2 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid2)[0] l3_uuid2 = test_lib.lib_find_vr_pub_nic(vr2).l3NetworkUuid vip2 = test_stub.create_vip('ipsec2_vip', l3_uuid2) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_util.test_dsc('Create ipsec in mevoco1') ipsec1 = ipsec_ops.create_ipsec_connection( 'ipsec1', pri_l3_uuid1, vip2.get_vip().ip, '123456', vip1.get_vip().uuid, [os.environ['secondZStackCidrs']], policy_auth_algorithm="sha512") os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip test_util.test_dsc('Create ipsec in mevoco2') ipsec2 = ipsec_ops.create_ipsec_connection( 'ipsec2', pri_l3_uuid2, vip1.get_vip().ip, '123456', vip2.get_vip().uuid, [os.environ['firstZStackCidrs']], policy_auth_algorithm="sha512") os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip if not test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip): test_util.test_fail( 'vm in mevoco1[MN:%s] could not connect to vm in mevoco2[MN:%s]' % (mevoco1_ip, mevoco2_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip if not test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip): test_util.test_fail( 'vm in mevoco1[MN:%s] could not connect to vm in mevoco2[MN:%s]' % (mevoco2_ip, mevoco1_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip ipsec_ops.delete_ipsec_connection(ipsec1.uuid) if test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip, no_exception=True): test_util.test_fail( 'vm in mevoco1[MN:%s] could still connect to vm in mevoco2[MN:%s] after Ipsec is deleted' % (mevoco1_ip, mevoco2_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip if test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip, no_exception=True): test_util.test_fail( 'vm in mevoco2[MN:%s] could still connect to vm in mevoco1[MN:%s] after Ipsec is deleted' % (mevoco2_ip, mevoco1_ip)) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_lib.lib_error_cleanup(test_obj_dict1) vip1.delete() test_obj_dict1.rm_vip(vip1) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip ipsec_ops.delete_ipsec_connection(ipsec2.uuid) test_lib.lib_error_cleanup(test_obj_dict2) vip2.delete() test_obj_dict2.rm_vip(vip2) os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip test_util.test_pass('Create Ipsec Success')
def test(): global vm global host_uuid global host_ip global max_attempts global storagechecker_timeout allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) if test_lib.lib_get_ha_enable() != 'true': test_util.test_skip("vm ha not enabled. Skip test") set_quick_ha_properties() set_quick_ha_params() vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid test_lib.clean_up_all_vr() #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) #vr_host_ips = [] #for vr in vrs: # vr_host_ips.append(test_lib.lib_find_host_by_vr(vr).managementIp) # if test_lib.lib_is_vm_running(vr) != True: # vm_ops.start_vm(vr.uuid) # time.sleep(60) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid conditions = res_ops.gen_query_conditions('state', '=', 'Enabled') conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions) conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions) #for vr_host_ip in vr_host_ips: # conditions = res_ops.gen_query_conditions('managementIp', '!=', vr_host_ip, conditions) host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid vm_creation_option.set_host_uuid(host_uuid) vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multihost_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() vm.check() test_stub.ensure_host_has_no_vr(host_uuid) host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp host_port = test_lib.lib_get_host_port(host_ip) test_util.test_logger("host %s is disconnecting" % (host_ip)) ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop") host_list = test_stub.get_sce_hosts(test_lib.all_scenario_config, test_lib.scenario_file) for host in host_list: if host.ip_ == host_ip: test_host = host break if not test_host: test_util.test_fail('there is no host with ip %s in scenario file.' % (host_ip)) test_stub.stop_host(test_host, test_lib.all_scenario_config) test_util.test_logger("wait for 120 seconds") time.sleep(120) test_stub.start_host(test_host, test_lib.all_scenario_config) test_stub.recover_host_vlan(test_host, test_lib.all_scenario_config, test_lib.deploy_config) vm.set_state(vm_header.RUNNING) vm.check() vm.update() if test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp == host_ip: test_util.test_fail("VM is expected to start running on another host") vm.destroy() test_util.test_pass( 'Test checking vm status after graceful stop and start success')
def test(): global vm_inv test_util.test_dsc('Create test vm to test zstack upgrade by -u.') image_name = os.environ.get('imageName_i_c7_z_1.6') iso_path = os.environ.get('iso_path') iso_19_path = os.environ.get('iso_19_path') iso_10_path = os.environ.get('iso_10_path') iso_20_path = os.environ.get('iso_20_path') iso_21_path = os.environ.get('iso_21_path') zstack_latest_version = os.environ.get('zstackLatestVersion') zstack_latest_path = os.environ.get('zstackLatestInstaller') vm_name = os.environ.get('vmName') upgrade_script_path = os.environ.get('upgradeScript') vm_inv = test_stub.create_vm_scenario(image_name, vm_name) vm_ip = vm_inv.vmNics[0].ip test_lib.lib_wait_target_up(vm_ip, 22) test_stub.make_ssh_no_password(vm_ip, tmp_file) test_util.test_dsc('Update MN IP') test_stub.update_mn_hostname(vm_ip, tmp_file) test_stub.update_mn_ip(vm_ip, tmp_file) test_stub.reset_rabbitmq(vm_ip, tmp_file) test_stub.start_mn(vm_ip, tmp_file) test_stub.check_installation(vm_ip, tmp_file) test_stub.update_19_iso(vm_ip, tmp_file, iso_19_path, upgrade_script_path) #pkg_num = 1.7 release_ver = ['1.7', '1.8', '1.9', '1.10', '2.0.0', '2.1.0', '2.2.0'] curren_num = float(os.environ.get('releasePkgNum')) for pkg_num in release_ver: #while pkg_num <= curren_num: test_util.test_logger('Upgrade zstack to %s' % pkg_num) #if str(pkg_num) == '1.7': # test_stub.update_19_iso(vm_ip, tmp_file, iso_19_path, upgrade_script_path) if str(pkg_num) == '1.10': test_stub.update_10_iso(vm_ip, tmp_file, iso_10_path, upgrade_script_path) if str(pkg_num) == '2.0.0': test_stub.update_20_iso(vm_ip, tmp_file, iso_20_path, upgrade_script_path) if str(pkg_num) == '2.1.0': test_stub.update_21_iso(vm_ip, tmp_file, iso_21_path, upgrade_script_path) upgrade_pkg = os.environ.get('zstackPkg_%s' % pkg_num) test_stub.upgrade_zstack(vm_ip, upgrade_pkg, tmp_file) test_stub.start_mn(vm_ip, tmp_file) test_stub.check_zstack_version(vm_ip, tmp_file, str(pkg_num)) test_stub.check_installation(vm_ip, tmp_file) #pkg_num = pkg_num + 0.1 test_util.test_dsc('Upgrade zstack to latest') test_stub.update_iso(vm_ip, tmp_file, iso_path, upgrade_script_path) test_stub.upgrade_zstack(vm_ip, zstack_latest_path, tmp_file) test_stub.start_mn(vm_ip, tmp_file) test_stub.check_zstack_version(vm_ip, tmp_file, zstack_latest_version) test_stub.check_installation(vm_ip, tmp_file) os.system('rm -f %s' % tmp_file) test_stub.destroy_vm_scenario(vm_inv.uuid) test_util.test_pass('ZStack upgrade Test Success')
def test(): global session_uuid session_uuid = acc_ops.login_as_admin() l3_1_name = os.environ.get('l3VlanNetworkName1') l3_2_name = os.environ.get('l3VlanDNATNetworkName') l3_3_name = os.environ.get('l3VlanNetworkName3') #l3_4_name = os.environ.get('l3VlanNetworkName5') l3_1 = test_lib.lib_get_l3_by_name(l3_1_name) l3_2 = test_lib.lib_get_l3_by_name(l3_2_name) l3_3 = test_lib.lib_get_l3_by_name(l3_3_name) #l3_4 = test_lib.lib_get_l3_by_name(l3_4_name) #create 4 VRs. vrs = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid) if not vrs: vm = test_stub.create_vlan_vm(l3_name=l3_1_name) vm.destroy() vr1 = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0] else: vr1 = vrs[0] vrs = test_lib.lib_find_vr_by_l3_uuid(l3_2.uuid) if not vrs: vm = test_stub.create_vlan_vm(l3_name=l3_2_name) vm.destroy() vr2 = test_lib.lib_find_vr_by_l3_uuid(l3_2.uuid)[0] else: vr2 = vrs[0] vrs = test_lib.lib_find_vr_by_l3_uuid(l3_3.uuid) if not vrs: vm = test_stub.create_vlan_vm(l3_name=l3_3_name) vm.destroy() vr3 = test_lib.lib_find_vr_by_l3_uuid(l3_3.uuid)[0] else: vr3 = vrs[0] #vrs = test_lib.lib_find_vr_by_l3_uuid(l3_4.uuid) #if not vrs: # vm = test_stub.create_vlan_vm(l3_name=l3_4_name) # vm.destroy() # vr4 = test_lib.lib_find_vr_by_l3_uuid(l3_4.uuid)[0] #else: # vr4 = vrs[0] vrs = [vr1, vr2, vr3] #vrs = [vr1, vr2, vr3, vr4] for vr in vrs: thread = threading.Thread(target=stop_vm, args=(vr.uuid,)) thread.start() while threading.activeCount() > 1: check_exception() time.sleep(0.1) for vr in vrs: cond = res_ops.gen_query_conditions('resourceUuid', '=', vr.uuid) cond = res_ops.gen_query_conditions('tag', '=', "ha::NeverStop", cond) if res_ops.query_resource(res_ops.SYSTEM_TAG, cond) and res_ops.query_resource(res_ops.SYSTEM_TAG, cond)[0]: continue if not linux.wait_callback_success(check_status, (vr.uuid, 'Stopped'), 10): test_util.test_fail('VM: %s is not stopped, after waiting for extra 10s' % vr.uuid) check_exception() for vr in vrs: cond = res_ops.gen_query_conditions('resourceUuid', '=', vr.uuid) cond = res_ops.gen_query_conditions('tag', '=', "ha::NeverStop", cond) if res_ops.query_resource(res_ops.SYSTEM_TAG, cond) and res_ops.query_resource(res_ops.SYSTEM_TAG, cond)[0]: continue thread = threading.Thread(target=start_vm, args=(vr.uuid,)) thread.start() time.sleep(1) acc_ops.logout(session_uuid) while threading.activeCount() > 1: check_exception() time.sleep(0.1) check_exception() test_util.test_pass('Test start VRs simultaneously success')