def test(): global vm vm = test_stub.create_vm() #1 hostname = 'vm123.zstack.org' vm_ops.set_vm_hostname(vm.get_vm().uuid, 'vm123.zstack.org') host = test_lib.lib_find_host_by_vm(vm.get_vm()) host_ops.reconnect_host(host.uuid) hostname_inv = vm_ops.get_vm_hostname(vm.get_vm().uuid) if hostname_inv != hostname: test_util.test_fail( 'can not get the vm hostname after set vm hostname') vm_inv = res_ops.get_resource(res_ops.VM_INSTANCE, uuid=vm.get_vm().uuid)[0] if vm_inv.vmNics[0].ip != vm.get_vm().vmNics[0].ip: test_util.test_fail( 'can not get the correct ip address after set vm hostname and reconnected host' ) #2 hostname = 'vm1234.zstack.org' vm_ops.set_vm_hostname(vm.get_vm().uuid, hostname) host = test_lib.lib_find_host_by_vm(vm.get_vm()) vm_ops.reboot_vm(vm.get_vm().uuid) hostname_inv = vm_ops.get_vm_hostname(vm.get_vm().uuid) if hostname_inv != hostname: test_util.test_fail( 'can not get the vm hostname after set vm hostname') vm_inv = res_ops.get_resource(res_ops.VM_INSTANCE, uuid=vm.get_vm().uuid)[0] if vm_inv.vmNics[0].ip != vm.get_vm().vmNics[0].ip: test_util.test_fail( 'can not get the correct ip address after set vm hostname and reboot vm' ) #3 hostname = 'vm12345.zstack.org' vm_ops.set_vm_hostname(vm.get_vm().uuid, hostname) host = test_lib.lib_find_host_by_vm(vm.get_vm()) host_ops.reconnect_host(host.uuid) vm_ops.reboot_vm(vm.get_vm().uuid) hostname_inv = vm_ops.get_vm_hostname(vm.get_vm().uuid) if hostname_inv != hostname: test_util.test_fail( 'can not get the vm hostname after set vm hostname') vm_inv = res_ops.get_resource(res_ops.VM_INSTANCE, uuid=vm.get_vm().uuid)[0] if vm_inv.vmNics[0].ip != vm.get_vm().vmNics[0].ip: test_util.test_fail( 'can not get the correct ip address after set vm hostname and reboot vm and reconnect host' ) test_util.test_pass('SetVMHostname and get vm\'s correct ip')
def create_vm(vm_creation_option=None, volume_uuids=None, root_disk_uuid=None, \ image_uuid=None, session_uuid=None): if not vm_creation_option: instance_offering_uuid = res_ops.get_resource(res_ops.INSTANCE_OFFERING, session_uuid)[0].uuid cond = res_ops.gen_query_conditions('mediaType', '!=', 'ISO') image_uuid = res_ops.query_resource(res_ops.IMAGE, cond, session_uuid)[0].uuid l3net_uuid = res_ops.get_resource(res_ops.L3_NETWORK, session_uuid)[0].uuid vm_creation_option = test_util.VmOption() vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_l3_uuids([l3net_uuid]) if volume_uuids: if isinstance(volume_uuids, list): vm_creation_option.set_data_disk_uuids(volume_uuids) else: test_util.test_fail('volume_uuids type: %s is not "list".' % type(volume_uuids)) if root_disk_uuid: vm_creation_option.set_root_disk_uuid(root_disk_uuid) if image_uuid: vm_creation_option.set_image_uuid(image_uuid) if session_uuid: vm_creation_option.set_session_uuid(session_uuid) vm = test_vm.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() return vm
def test(): global host_config global ps_inv curr_deploy_conf = exp_ops.export_zstack_deployment_config( test_lib.deploy_config) host1 = res_ops.get_resource(res_ops.HOST, name=host1_name)[0] host_config.set_name(host1_name) host_config.set_cluster_uuid(host1.clusterUuid) host_config.set_management_ip(host1.managementIp) host_config.set_username(os.environ.get('hostUsername')) host_config.set_password(os.environ.get('hostPassword')) test_util.test_dsc('delete host') host_ops.delete_host(host1.uuid) test_util.test_dsc('delete primary storage') ps_name = os.environ.get('nfsPrimaryStorageName1') ps_inv = res_ops.get_resource(res_ops.PRIMARY_STORAGE, name=ps_name)[0] ps_ops.delete_primary_storage(ps_inv.uuid) test_util.test_dsc("Recover Primary Storage") recover_ps() test_util.test_dsc("Recover Host") host_ops.add_kvm_host(host_config) host1 = res_ops.get_resource(res_ops.HOST, name=host1_name)[0] ps1 = res_ops.get_resource(res_ops.PRIMARY_STORAGE, name=ps_name)[0] test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Delete Host and Primary Storage Test Success')
def test(): global curr_deploy_conf global l2_name2 curr_deploy_conf = exp_ops.export_zstack_deployment_config(test_lib.deploy_config) vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #pick up l3 l3_1 = res_ops.get_resource(res_ops.L3_NETWORK, name = l3_name1)[0] l3_2 = res_ops.get_resource(res_ops.L3_NETWORK, name = l3_name2)[0] l2_2 = res_ops.get_resource(res_ops.L2_NETWORK, \ uuid = l3_2.l2NetworkUuid)[0] l2_name2 = l2_2.name conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multizones_basic_vm') vm_creation_option.set_l3_uuids([l3_1.uuid, l3_2.uuid]) cluster1_name = os.environ.get('clusterName2') cluster1 = res_ops.get_resource(res_ops.CLUSTER, name = cluster1_name)[0] vm_creation_option.set_cluster_uuid(cluster1.uuid) vm1 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm1) test_util.test_dsc('Delete l2_2') net_ops.delete_l2(l2_2.uuid) test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.STOPPED) vm1.update() vm1.set_state(vm_header.STOPPED) vm1.check() test_util.test_dsc('start vm again. vm should remove the deleted l2') vm1.start() net_ops.add_l2_resource(curr_deploy_conf, l2_name = l2_name2) #update l3_2, since it is readded. l3_2 = res_ops.get_resource(res_ops.L3_NETWORK, name = l3_name2)[0] vm_creation_option.set_l3_uuids([l3_1.uuid, l3_2.uuid]) vm2 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm2) #check vm1 vm2 status. vm1.check() if not len(vm1.get_vm().vmNics) == 1: test_util.test_fail('vm1 vmNics still have L3: %s, even if it is deleted' % l3_2.uuid) vm2.check() test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Delete L2 Test Success')
def revoke_admin_resource(account_uuid_list, session_uuid=None): instance_offerings = res_ops.get_resource(res_ops.INSTANCE_OFFERING) for instance_offering in instance_offerings: acc_ops.revoke_resources(account_uuid_list, [instance_offering.uuid], session_uuid) cond2 = res_ops.gen_query_conditions('mediaType', '!=', 'ISO') images = res_ops.query_resource(res_ops.IMAGE, cond2) for image in images: acc_ops.revoke_resources(account_uuid_list, [image.uuid], session_uuid) l3nets = res_ops.get_resource(res_ops.L3_NETWORK) for l3net in l3nets: acc_ops.revoke_resources(account_uuid_list, [l3net.uuid], session_uuid) l2vxlan_pool_uuid = res_ops.get_resource( res_ops.L2_VXLAN_NETWORK_POOL)[0].uuid acc_ops.revoke_resources(account_uuid_list, [l2vxlan_pool_uuid], session_uuid) virtual_router_offerings = res_ops.get_resource(res_ops.VR_OFFERING) for virtual_router_offering in virtual_router_offerings: acc_ops.revoke_resources(account_uuid_list, [virtual_router_offering.uuid], session_uuid) volume_offerings = res_ops.get_resource(res_ops.DISK_OFFERING) for volume_offering in volume_offerings: acc_ops.revoke_resources(account_uuid_list, [volume_offering.uuid], session_uuid)
def test(): global host_config global ps_inv curr_deploy_conf = exp_ops.export_zstack_deployment_config(test_lib.deploy_config) host1 = res_ops.get_resource(res_ops.HOST, name = host1_name)[0] host_config.set_name(host1_name) host_config.set_cluster_uuid(host1.clusterUuid) host_config.set_management_ip(host1.managementIp) host_config.set_username(os.environ.get('hostUsername')) host_config.set_password(os.environ.get('hostPassword')) test_util.test_dsc('delete host') host_ops.delete_host(host1.uuid) test_util.test_dsc('delete primary storage') ps_name = os.environ.get('nfsPrimaryStorageName1') ps_inv = res_ops.get_resource(res_ops.PRIMARY_STORAGE, name = ps_name)[0] ps_ops.delete_primary_storage(ps_inv.uuid) test_util.test_dsc("Recover Primary Storage") recover_ps() test_util.test_dsc("Recover Host") host_ops.add_kvm_host(host_config) host1 = res_ops.get_resource(res_ops.HOST, name = host1_name)[0] ps1 = res_ops.get_resource(res_ops.PRIMARY_STORAGE, name = ps_name)[0] test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Delete Host and Primary Storage Test Success')
def test(): test_util.test_dsc("Create {} vm each with {} data volume".format( VM_COUNT, DATA_VOLUME_NUMBER)) vm_list = test_stub.create_multi_vms(name_prefix='test-', count=VM_COUNT, data_volume_number=DATA_VOLUME_NUMBER) for vm in vm_list: test_obj_dict.add_vm(vm) test_util.test_dsc( "Check all root volumes in LOCAL PS, all data volumes in NFS PS") for vm in vm_list: root_volume = test_lib.lib_get_root_volume(vm.get_vm()) ps_uuid = root_volume.primaryStorageUuid ps = res_ops.get_resource(res_ops.PRIMARY_STORAGE, uuid=ps_uuid)[0] assert ps.type == inventory.LOCAL_STORAGE_TYPE data_volume_list = [ vol for vol in vm.get_vm().allVolumes if vol.type != 'Root' ] for date_volume in data_volume_list: ps_uuid = date_volume.primaryStorageUuid ps = res_ops.get_resource(res_ops.PRIMARY_STORAGE, uuid=ps_uuid)[0] assert ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE test_util.test_pass('Multi PrimaryStorage Test Pass')
def test(): test_util.test_dsc("create autoscaling group") test_util.test_dsc("create alarm") alarm_1Uuid = autoscaling.create_alarm('GreaterThan', 60, 99, 'ZStack/VM', 'MemoryUsedInPercent','Average','alarm_add',10).uuid alarm_2Uuid = autoscaling.create_alarm('LessThan', 60, 1, 'ZStack/VM', 'MemoryUsedInPercent','Average','alarm_removal',10).uuid test_util.test_dsc("get l3 network uuid") l3_public_name = os.environ.get(test_stub.L3_SYSTEM_NAME_LIST[0]) test_util.test_logger("%s" %(l3_public_name)) l3NetworkUuids = test_lib.lib_get_l3_by_name(l3_public_name).uuid test_util.test_logger("%s" %(l3NetworkUuids)) test_util.test_logger("get vm InstanceOffer uuid") vmInstanceOfferingUuid = res_ops.get_resource(res_ops.INSTANCE_OFFERING,None,None,os.environ.get('instanceOfferingName_s'))[0].uuid test_util.test_logger("%s" %(vmInstanceOfferingUuid)) test_util.test_logger("get vm Image uuid") imageUuid = res_ops.get_resource(res_ops.IMAGE,None,None,os.environ.get('imageName_s'))[0].uuid test_util.test_logger("%s" %(imageUuid)) test_util.test_logger("get vm template uuid") listerUuid = res_ops.get_resource(res_ops.LOAD_BALANCER_LISTENER)[0].uuid vm_templateUuid = autoscaling.create_autoScaling_vmTemplate([l3NetworkUuids],vmInstanceOfferingUuid,imageUuid,l3NetworkUuids,["loadBalancerListenerUuids::"+listerUuid]).uuid test_util.test_logger("%s" %(vm_templateUuid)) test_util.test_logger("get autoscaling group uuid") autoscaling_groupUuid = autoscaling.create_autoScaling_group(maxvm_number,minvm_number,["initialInstanceNumber::3"]).uuid test_util.test_logger("%s" %(autoscaling_groupUuid)) test_util.test_logger("attach vm template to autoscaling group") autoscaling.attach_autoScaling_templateToGroup(autoscaling_groupUuid,vm_templateUuid) test_util.test_logger("add removal rule to autoscaling group") groupremovalinstanceruleUuid = autoscaling.create_autoScaling_group_removalInstanceRule(1,30,autoscaling_groupUuid).uuid autoscaling.create_autoScaling_ruleAlarmTrigger(alarm_2Uuid,groupremovalinstanceruleUuid) test_util.test_logger("add new instance rule to autoscaling group") groupnewinstanceruleUuid = autoscaling.create_autoScaling_group_addingNewInstanceRule(1,autoscaling_groupUuid,30).uuid autoscaling.create_autoScaling_ruleAlarmTrigger(alarm_1Uuid,groupnewinstanceruleUuid) test_util.test_logger("check autoscaling init") test_stub.check_autoscaling_init_vmm_number(initvm_number,autoscaling_groupUuid) test_util.test_logger("update memory percent") autoscaling.update_alarm(alarm_1Uuid,period,memory_threshold) autoscaling.update_alarm(alarm_2Uuid,period,memory_threshold) test_util.test_logger("update modify add new instance number") autoscaling.update_autoscalinggroup_addingnewinstance(groupnewinstanceruleUuid,adjustment_number,cooldown_time) time.sleep(30) test_util.test_logger("check new instance number") test_stub.check_add_newinstance_vmm_number(maxvm_number,maxvm_number,autoscaling_groupUuid) test_util.test_dsc("Delete autoscaling group") autoscaling.delete_autoScaling_group(autoscaling_groupUuid) test_stub.check_deleteautoscaling_vmm_number() test_util.test_pass("Test AutoScaling Group Successfully")
def share_admin_resource(account_uuid_list): instance_offering_uuid = res_ops.get_resource(res_ops.INSTANCE_OFFERING)[0].uuid cond = res_ops.gen_query_conditions('mediaType', '!=', 'ISO') image_uuid = res_ops.query_resource(res_ops.IMAGE, cond)[0].uuid l3net_uuid = res_ops.get_resource(res_ops.L3_NETWORK)[0].uuid root_disk_uuid = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName')).uuid data_disk_uuid = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')).uuid acc_ops.share_resources(account_uuid_list, [instance_offering_uuid, image_uuid, l3net_uuid, root_disk_uuid, data_disk_uuid])
def _get_host_from_primary_storage(primary_storage_uuid, session_uuid): result = res_ops.get_resource(res_ops.PRIMARY_STORAGE, session_uuid, uuid=primary_storage_uuid)[0] hosts = res_ops.get_resource(res_ops.HOST, session_uuid) for host in hosts: if host.zoneUuid == result.zoneUuid: return host
def test(): ps_env = test_stub.PSEnvChecker() if ps_env.is_sb_ceph_env: env = test_stub.SanAndCephPrimaryStorageEnv(test_object_dict=test_obj_dict, first_ps_vm_number=VM_COUNT, second_ps_vm_number=VM_COUNT, first_ps_volume_number=VOLUME_NUMBER, second_ps_volume_number=VOLUME_NUMBER) else: env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict, first_ps_vm_number=VM_COUNT, second_ps_vm_number=VM_COUNT, first_ps_volume_number=VOLUME_NUMBER, second_ps_volume_number=VOLUME_NUMBER) env.check_env() env.deploy_env() first_ps_vm_list = env.first_ps_vm_list second_ps_vm_list = env.second_ps_vm_list if env.new_ps: new_ps_list.append(env.second_ps) test_util.test_dsc('detach random one Primary Storage from cluster') selected_ps = random.choice([env.first_ps, env.second_ps]) if selected_ps is env.first_ps: another_ps = env.second_ps else: another_ps = env.first_ps for _ in xrange(5): ps_ops.detach_primary_storage(selected_ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid) detached_ps_list.append(selected_ps) ps_ops.attach_primary_storage(selected_ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid) detached_ps_list.pop() test_util.test_dsc('All vm in selected ps should STOP') for vm in first_ps_vm_list + second_ps_vm_list: vm.update() for vm in env.get_vm_list_from_ps(selected_ps): assert vm.get_vm().state == inventory.STOPPED for vm in env.get_vm_list_from_ps(another_ps): assert vm.get_vm().state == inventory.RUNNING test_util.test_dsc("Recover the vm in the selected ps") for vm in env.get_vm_list_from_ps(selected_ps): vm.start() for vm in env.get_vm_list_from_ps(selected_ps): vm.check() vm.update() assert vm.get_vm().state == inventory.RUNNING test_util.test_dsc("Create one vm in selected ps") vm = test_stub.create_multi_vms(name_prefix='test-vm', count=1, ps_uuid=selected_ps.uuid)[0] test_obj_dict.add_vm(vm) test_util.test_pass('Multi PrimaryStorage Test Pass')
def test(): global curr_deploy_conf #This conf should only be put in test(), since test_lib.deploy_config # should be set by woodpecker. curr_deploy_conf = exp_ops.export_zstack_deployment_config( test_lib.deploy_config) vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #pick up cluster1 cluster1 = res_ops.get_resource(res_ops.CLUSTER, name=cluster1_name)[0] conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multizones_basic_vm') vm_creation_option.set_cluster_uuid(cluster1.uuid) vm1 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm1) vm2 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm2) vm3 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm3) vm4 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm4) test_util.test_dsc('delete cluster') cluster_ops.delete_cluster(cluster1.uuid) test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.STOPPED) test_obj_dict.mv_vm(vm2, vm_header.RUNNING, vm_header.STOPPED) test_obj_dict.mv_vm(vm3, vm_header.RUNNING, vm_header.STOPPED) test_obj_dict.mv_vm(vm4, vm_header.RUNNING, vm_header.STOPPED) vm1.update() vm2.update() vm3.update() vm4.update() test_lib.lib_robot_status_check(test_obj_dict) cluster_ops.add_cluster_resource(curr_deploy_conf, cluster1_name) cluster1 = res_ops.get_resource(res_ops.CLUSTER, name=cluster1_name)[0] vm_creation_option.set_cluster_uuid(cluster1.uuid) vm_creation_option.set_l3_uuids([]) vm1.start() vm2.start() vm3.start() vm4.start() test_lib.lib_robot_status_check(test_obj_dict) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Delete Cluster Test Success')
def test(): global host_config curr_deploy_conf = exp_ops.export_zstack_deployment_config( test_lib.deploy_config) vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #pick up host1 host1 = res_ops.get_resource(res_ops.HOST, name=host1_name)[0] conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multizones_basic_vm') vm_creation_option.set_host_uuid(host1.uuid) vm1 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm1) vm2 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm2) host_config.set_name(host1_name) host_config.set_cluster_uuid(host1.clusterUuid) host_config.set_management_ip(host1.managementIp) host_config.set_username(os.environ.get('hostUsername')) host_config.set_password(os.environ.get('hostPassword')) test_util.test_dsc('delete host') host_ops.delete_host(host1.uuid) test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.STOPPED) test_obj_dict.mv_vm(vm2, vm_header.RUNNING, vm_header.STOPPED) vm1.update() vm1.set_state(vm_header.STOPPED) vm2.update() vm2.set_state(vm_header.STOPPED) test_lib.lib_robot_status_check(test_obj_dict) test_util.test_dsc('start vm on other host') vm1.start() vm2.start() test_lib.lib_robot_status_check(test_obj_dict) host_ops.add_kvm_host(host_config) host1 = res_ops.get_resource(res_ops.HOST, name=host1_name)[0] #vm_creation_option.set_host_uuid(host1.uuid) #vm_creation_option.set_l3_uuids([]) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Delete Host Test Success')
def test(): env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict, first_ps_vm_number=VM_COUNT, second_ps_vm_number=VM_COUNT, first_ps_volume_number=VOLUME_NUMBER, second_ps_volume_number=VOLUME_NUMBER) env.check_env() env.deploy_env() first_ps_vm_list = env.first_ps_vm_list second_ps_vm_list = env.second_ps_vm_list if env.new_ps: new_ps_list.append(env.second_ps) test_util.test_dsc('detach random one Primary Storage from cluster') selected_ps = random.choice([env.first_ps, env.second_ps]) if selected_ps is env.first_ps: another_ps = env.second_ps else: another_ps = env.first_ps for _ in xrange(5): ps_ops.detach_primary_storage( selected_ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid) detached_ps_list.append(selected_ps) ps_ops.attach_primary_storage( selected_ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid) detached_ps_list.pop() test_util.test_dsc('All vm in selected ps should STOP') for vm in first_ps_vm_list + second_ps_vm_list: vm.update() for vm in env.get_vm_list_from_ps(selected_ps): assert vm.get_vm().state == inventory.STOPPED for vm in env.get_vm_list_from_ps(another_ps): assert vm.get_vm().state == inventory.RUNNING test_util.test_dsc("Recover the vm in the selected ps") for vm in env.get_vm_list_from_ps(selected_ps): vm.start() for vm in env.get_vm_list_from_ps(selected_ps): vm.check() vm.update() assert vm.get_vm().state == inventory.RUNNING test_util.test_dsc("Create one vm in selected ps") vm = test_stub.create_multi_vms(name_prefix='test-vm', count=1, ps_uuid=selected_ps.uuid)[0] test_obj_dict.add_vm(vm) test_util.test_pass('Multi PrimaryStorage Test Pass')
def test(): test_util.test_dsc("create autoscaling group") test_util.test_dsc("create alarm") alarm_1Uuid = autoscaling.create_alarm('GreaterThan', 60, 99, 'ZStack/VM', 'MemoryUsedInPercent').uuid alarm_2Uuid = autoscaling.create_alarm('LessThan', 60, 1, 'ZStack/VM', 'MemoryUsedInPercent').uuid test_util.test_dsc("get l3 network uuid") l3_public_name = os.environ.get(test_stub.L3_SYSTEM_NAME_LIST[0]) test_util.test_logger("%s" %(l3_public_name)) l3NetworkUuids = test_lib.lib_get_l3_by_name(l3_public_name).uuid test_util.test_logger("%s" %(l3NetworkUuids)) test_util.test_logger("get vm InstanceOffer uuid") vmInstanceOfferingUuid = res_ops.get_resource(res_ops.INSTANCE_OFFERING,None,None,os.environ.get('instanceOfferingName_s'))[0].uuid test_util.test_logger("%s" %(vmInstanceOfferingUuid)) test_util.test_logger("get vm Image uuid") imageUuid = res_ops.get_resource(res_ops.IMAGE,None,None,os.environ.get('imageName3'))[0].uuid test_util.test_logger("%s" %(imageUuid)) test_util.test_logger("get vm template uuid") listerUuid = res_ops.get_resource(res_ops.LOAD_BALANCER_LISTENER)[0].uuid vm_templateUuid = autoscaling.create_autoScaling_vmTemplate([l3NetworkUuids],vmInstanceOfferingUuid,imageUuid,l3NetworkUuids,["loadBalancerListenerUuids::"+listerUuid]).uuid test_util.test_logger("%s" %(vm_templateUuid)) test_util.test_logger("get autoscaling group uuid") autoscaling_groupUuid = autoscaling.create_autoScaling_group(maxvm_number,minvm_number,["initialInstanceNumber::3","vmInstanceHealthStrategy::VmInstanceStatus","automaticallyRemoveUnhealthyInstance::true"]).uuid test_util.test_logger("%s" %(autoscaling_groupUuid)) test_util.test_logger("attach vm template to autoscaling group") autoscaling.attach_autoScaling_templateToGroup(autoscaling_groupUuid,vm_templateUuid) test_util.test_logger("add removal rule to autoscaling group") groupremovalinstanceruleUuid = autoscaling.create_autoScaling_group_removalInstanceRule(1,30,autoscaling_groupUuid).uuid autoscaling.create_autoScaling_ruleAlarmTrigger(alarm_2Uuid,groupremovalinstanceruleUuid) test_util.test_logger("add new instance rule to autoscaling group") groupnewinstanceruleUuid = autoscaling.create_autoScaling_group_addingNewInstanceRule(1,autoscaling_groupUuid,30).uuid autoscaling.create_autoScaling_ruleAlarmTrigger(alarm_1Uuid,groupnewinstanceruleUuid) test_util.test_logger("check vmm instance number") test_stub.check_autoscaling_init_vmm_number(initvm_number,autoscaling_groupUuid) test_util.test_logger("stop autoscaling vmm instance") test_stub.query_autoscaling_vm_instance(imageUuid) time.sleep(60) test_util.test_logger("check vmm instance numkber") test_stub.check_autoscaling_init_vmm_number(minvm_number,autoscaling_groupUuid) test_util.test_dsc("Delete autoscaling group") autoscaling.delete_autoScaling_group(autoscaling_groupUuid) test_stub.check_deleteautoscaling_vmm_number() test_util.test_pass("Test AutoScaling Group Successfully")
def test(): global curr_deploy_conf #This conf should only be put in test(), since test_lib.deploy_config # should be set by woodpecker. curr_deploy_conf = exp_ops.export_zstack_deployment_config(test_lib.deploy_config) vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_net') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #pick up cluster1 cluster1 = res_ops.get_resource(res_ops.CLUSTER, name = cluster1_name)[0] conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multizones_basic_vm') vm_creation_option.set_cluster_uuid(cluster1.uuid) vm1 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm1) vm2 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm2) vm3 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm3) vm4 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm4) test_util.test_dsc('delete cluster') cluster_ops.delete_cluster(cluster1.uuid) test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.STOPPED) test_obj_dict.mv_vm(vm2, vm_header.RUNNING, vm_header.STOPPED) test_obj_dict.mv_vm(vm3, vm_header.RUNNING, vm_header.STOPPED) test_obj_dict.mv_vm(vm4, vm_header.RUNNING, vm_header.STOPPED) vm1.update() vm2.update() vm3.update() vm4.update() test_lib.lib_robot_status_check(test_obj_dict) cluster_ops.add_cluster_resource(curr_deploy_conf, cluster1_name) cluster1 = res_ops.get_resource(res_ops.CLUSTER, name = cluster1_name)[0] vm_creation_option.set_cluster_uuid(cluster1.uuid) vm_creation_option.set_l3_uuids([]) vm1.start() vm2.start() vm3.start() vm4.start() test_lib.lib_robot_status_check(test_obj_dict) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Delete Cluster Test Success')
def test(): global host_config curr_deploy_conf = exp_ops.export_zstack_deployment_config(test_lib.deploy_config) vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid #pick up host1 host1 = res_ops.get_resource(res_ops.HOST, name = host1_name)[0] conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multizones_basic_vm') vm_creation_option.set_host_uuid(host1.uuid) vm1 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm1) vm2 = test_lib.lib_create_vm(vm_creation_option) test_obj_dict.add_vm(vm2) host_config.set_name(host1_name) host_config.set_cluster_uuid(host1.clusterUuid) host_config.set_management_ip(host1.managementIp) host_config.set_username(os.environ.get('hostUsername')) host_config.set_password(os.environ.get('hostPassword')) test_util.test_dsc('delete host') host_ops.delete_host(host1.uuid) test_obj_dict.mv_vm(vm1, vm_header.RUNNING, vm_header.STOPPED) test_obj_dict.mv_vm(vm2, vm_header.RUNNING, vm_header.STOPPED) vm1.update() vm1.set_state(vm_header.STOPPED) vm2.update() vm2.set_state(vm_header.STOPPED) test_lib.lib_robot_status_check(test_obj_dict) test_util.test_dsc('start vm on other host') vm1.start() vm2.start() test_lib.lib_robot_status_check(test_obj_dict) host_ops.add_kvm_host(host_config) host1 = res_ops.get_resource(res_ops.HOST, name = host1_name)[0] #vm_creation_option.set_host_uuid(host1.uuid) #vm_creation_option.set_l3_uuids([]) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Delete Host Test Success')
def create_random_vm(): instance_offering_uuid = random.choice(res_ops.get_resource(res_ops.INSTANCE_OFFERING, session_uuid=None)).uuid image_uuid = random.choice(res_ops.get_resource(res_ops.IMAGE, session_uuid=None)).uuid l3net_uuid = random.choice(res_ops.get_resource(res_ops.L3_NETWORK, session_uuid=None)).uuid vm_creation_option = test_util.VmOption() vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_l3_uuids([l3net_uuid]) vm = test_vm.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() return vm
def test(): ps_env = test_stub.PSEnvChecker() ps1, ps2 = ps_env.get_two_ps() for _ in xrange(5): test_util.test_dsc('Remove ps2') ps_ops.detach_primary_storage( ps2.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid) ps_ops.delete_primary_storage(ps2.uuid) delete_ps_list.append(ps2) test_util.test_dsc('Add ps2 back') ps_config = test_util.PrimaryStorageOption() ps_config.set_name(ps2.name) ps_config.set_description(ps2.description) ps_config.set_zone_uuid(ps2.zoneUuid) ps_config.set_type(ps2.type) ps_config.set_url(ps2.url) if ps2.type == inventory.LOCAL_STORAGE_TYPE: ps2 = ps_ops.create_local_primary_storage(ps_config) elif ps2.type == inventory.NFS_PRIMARY_STORAGE_TYPE: ps2 = ps_ops.create_nfs_primary_storage(ps_config) else: ps2 = None time.sleep(5) ps_ops.attach_primary_storage( ps2.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid) time.sleep(5) delete_ps_list.pop() test_util.test_dsc('create VM by default para') vm1 = test_stub.create_multi_vms(name_prefix='vm1', count=1, data_volume_number=VOLUME_NUMBER)[0] test_obj_dict.add_vm(vm1) if ps_env.is_local_nfs_env: test_util.test_dsc('create date volume in ps2') volume = test_stub.create_multi_volumes(count=VOLUME_NUMBER, ps=ps2) test_obj_dict.add_volume(volume) else: test_util.test_dsc('create VM in ps2') vm2 = test_stub.create_multi_vms(name_prefix='vm2', count=1, ps_uuid=ps2.uuid, data_volume_number=VOLUME_NUMBER)[0] test_obj_dict.add_vm(vm2) test_util.test_pass('Multi PrimaryStorage Test Pass')
def share_admin_resource(account_uuid_list): def get_uuid(resource): temp_list = [] for item in resource: temp_list.append(item.uuid) return temp_list resource_list = [] resource_list.extend(get_uuid(res_ops.get_resource(res_ops.INSTANCE_OFFERING))) resource_list.extend(get_uuid(res_ops.get_resource(res_ops.IMAGE))) resource_list.extend(get_uuid(res_ops.get_resource(res_ops.L3_NETWORK))) resource_list.extend(get_uuid(res_ops.get_resource(res_ops.DISK_OFFERING))) acc_ops.share_resources(account_uuid_list, resource_list)
def test(): global l2_net_uuid global cluster_uuid global vm cluster1 = res_ops.get_resource(res_ops.CLUSTER)[0] cluster2 = res_ops.get_resource(res_ops.CLUSTER)[1] vm_creation_option = test_util.VmOption() image_name = os.environ.get('imageName_s') image_uuid = test_lib.lib_get_image_by_name(image_name).uuid l3_name = os.environ.get('l3VlanNetworkName1') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid l2_net_uuid = test_lib.lib_get_l3_by_name(l3_name).l2NetworkUuid l2_net_type = res_ops.get_resource(res_ops.L2_NETWORK, uuid=l2_net_uuid)[0].type test_util.test_logger("l2_network.type@@:%s" %(l2_net_type)) if l2_net_type == "VxlanNetwork": test_util.test_skip("Vxlan network not support detach l2 network, therefore, skip the test") conditions = res_ops.gen_query_conditions('type', '=', 'UserVm') instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid vm_creation_option.set_l3_uuids([l3_net_uuid]) vm_creation_option.set_image_uuid(image_uuid) vm_creation_option.set_cluster_uuid(cluster1.uuid) vm_creation_option.set_instance_offering_uuid(instance_offering_uuid) vm_creation_option.set_name('multicluster_basic_vm') vm = test_vm_header.ZstackTestVm() vm.set_creation_option(vm_creation_option) vm.create() vm2 = test_vm_header.ZstackTestVm() vm_creation_option.set_cluster_uuid(cluster2.uuid) vm2.set_creation_option(vm_creation_option) vm2.create() vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) if len(vrs) == 0: test_util.test_skip("skip the test for non vr") vr = vrs[0] cluster_uuid = vr.clusterUuid net_ops.detach_l2(l2_net_uuid, cluster_uuid) vrs = test_lib.lib_find_vr_by_l3_uuid(l3_net_uuid) if len(vrs) == 0: test_util.test_skip("skip the test for non vr") vr = vrs[0] if vr.clusterUuid == cluster_uuid: test_util.test_logger('vr is expected to migrate to another cluster') vm.destroy() vm2.destroy() net_ops.attach_l2(l2_net_uuid, cluster_uuid) test_util.test_pass('Create detach l2 from clsuter vr migrate Test Success')
def share_admin_resource(account_uuid_list): def get_uuid(resource): temp_list = [] for item in resource: temp_list.append(item.uuid) return temp_list resource_list = [] resource_list.extend( get_uuid(res_ops.get_resource(res_ops.INSTANCE_OFFERING))) resource_list.extend(get_uuid(res_ops.get_resource(res_ops.IMAGE))) resource_list.extend(get_uuid(res_ops.get_resource(res_ops.L3_NETWORK))) resource_list.extend(get_uuid(res_ops.get_resource(res_ops.DISK_OFFERING))) acc_ops.share_resources(account_uuid_list, resource_list)
def test(): global vm vm = test_stub.create_vm() #1 hostname='vm123.zstack.org' vm_ops.set_vm_hostname(vm.get_vm().uuid,'vm123.zstack.org') host=test_lib.lib_find_host_by_vm(vm.get_vm()) host_ops.reconnect_host(host.uuid) hostname_inv=vm_ops.get_vm_hostname(vm.get_vm().uuid) if hostname_inv != hostname: test_util.test_fail('can not get the vm hostname after set vm hostname') vm_inv=res_ops.get_resource(res_ops.VM_INSTANCE,uuid=vm.get_vm().uuid)[0] if vm_inv.vmNics[0].ip !=vm.get_vm().vmNics[0].ip: test_util.test_fail('can not get the correct ip address after set vm hostname and reconnected host') #2 hostname = 'vm1234.zstack.org' vm_ops.set_vm_hostname(vm.get_vm().uuid,hostname) host=test_lib.lib_find_host_by_vm(vm.get_vm()) vm_ops.reboot_vm(vm.get_vm().uuid) hostname_inv=vm_ops.get_vm_hostname(vm.get_vm().uuid) if hostname_inv != hostname: test_util.test_fail('can not get the vm hostname after set vm hostname') vm_inv=res_ops.get_resource(res_ops.VM_INSTANCE,uuid=vm.get_vm().uuid)[0] if vm_inv.vmNics[0].ip !=vm.get_vm().vmNics[0].ip: test_util.test_fail('can not get the correct ip address after set vm hostname and reboot vm') #3 hostname = 'vm12345.zstack.org' vm_ops.set_vm_hostname(vm.get_vm().uuid, hostname) host = test_lib.lib_find_host_by_vm(vm.get_vm()) host_ops.reconnect_host(host.uuid) vm_ops.reboot_vm(vm.get_vm().uuid) hostname_inv = vm_ops.get_vm_hostname(vm.get_vm().uuid) if hostname_inv != hostname: test_util.test_fail('can not get the vm hostname after set vm hostname') vm_inv = res_ops.get_resource(res_ops.VM_INSTANCE, uuid=vm.get_vm().uuid)[0] if vm_inv.vmNics[0].ip != vm.get_vm().vmNics[0].ip: test_util.test_fail('can not get the correct ip address after set vm hostname and reboot vm and reconnect host') test_util.test_pass('SetVMHostname and get vm\'s correct ip')
def test(): global vcenter_uuid vcenter1_name = os.environ['vcenter1_name'] vcenter1_domain_name = os.environ['vcenter1_ip'] vcenter1_username = os.environ['vcenter1_domain_name'] vcenter1_password = os.environ['vcenter1_password'] vm_name_pattern1 = os.environ['vcenter1_vm_pattern1'] vm_name_pattern2 = os.environ['vcenter1_vm_pattern2'] #add vcenter senario1: zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid) vcenter_uuid = inv.uuid if vcenter_uuid == None: test_util.test_fail("vcenter_uuid is None") #insert the basic operations for the newly join in vcenter resourse vm_list = [] vm_names = res_ops.query_resource_fields(res_ops.VM_INSTANCE, [], None, fields=['name']) for vm_name in vm_names: vm_list.append(vm_name.name) test_util.test_logger( ", ".join( [ str(vm_name_tmp) for vm_name_tmp in vm_list ] ) ) if vm_name_pattern1 not in vm_list: test_util.test_fail("newly joined vcenter missing fingerprint vm1, test failed") if vm_name_pattern2 not in vm_list: test_util.test_fail("newly joined vcenter missing fingerprint vm2, test failed") vct_ops.delete_vcenter(vcenter_uuid) test_util.test_pass("add && delete vcenter test passed.")
def check_detach_l2(pre_cluster_uuid, l2_uuid, vm, is_other_cluster): l2 = res_ops.get_resource(res_ops.L2_NETWORK, uuid = l2_uuid)[0] attached_clusters = l2.attachedClusterUuids if pre_cluster_uuid in attached_clusters: test_util.test_fail('[cluster:] %s is still in [l2:] %s attached list.'\ % (pre_cluster_uuid, l2_uuid)) test_util.test_dsc('start vm again. vm should be started in different cluster, if there has.') if attached_clusters : if not is_other_cluster: test_util.test_fail('There should not be available cluster for [l2:] %s. But catch some.' % l2_uuid) vm.start() new_cluster_uuid = vm.get_vm().clusterUuid if new_cluster_uuid == pre_cluster_uuid : test_util.test_fail('\ VM start on old [cluster]: %s, which is detached by [l2:] %s ' \ % (vm.get_vm().uuid, new_cluster_uuid, l2_uuid)) vm.check() else: if is_other_cluster: test_util.test_fail('There should be available cluster for [l2:] %s. But did not catch.' % l2_uuid) #no cluster is attached with l2. vm will start failure. try: vm.start() except: test_util.test_logger('\ Expected: VM start failed, since there is not cluster is attached to [l2]: %s, \ after [cluster:] %s is detached' % (l2_uuid, pre_cluster_uuid)) else: test_util.test_fail('[vm]: %s is Wrongly started up, since there is\ not cluster is attached with [l2]: %s, after previous detaching ops' % \ (vm.get_vm().uuid, l2_uuid))
def add_l3_resource(deploy_config, l3_name, l2_name = None, zone_name = None, \ session_uuid = None): session_uuid_flag = True if not session_uuid: session_uuid = acc_ops.login_as_admin() session_uuid_flag = False try: dep_ops.add_l3_network(None, None, deploy_config, session_uuid, l3_name = l3_name, \ l2_name = l2_name, zone_name = zone_name) dep_ops.add_virtual_router(None, None, deploy_config, session_uuid, \ l3_name = l3_name, zone_name = zone_name) l3_uuid = res_ops.get_resource(res_ops.L3_NETWORK, session_uuid, \ name = l3_name)[0].uuid except Exception as e: test_util.test_logger( '[Error] zstack deployment meets exception when adding l3 resource .' ) traceback.print_exc(file=sys.stdout) raise e finally: if not session_uuid_flag: acc_ops.logout(session_uuid) test_util.action_logger('Complete add l3 resources for [uuid:] %s' \ % l3_uuid)
def test(): test_stub.reload_default_license() test_util.test_logger('Check default community license') test_stub.check_license(None, None, 2147483647, False, 'Community') test_util.test_logger('Load and Check TrialExt license with 10 day and 3 CPU') file_path = test_stub.gen_license('woodpecker', '*****@*****.**', '10', 'Prepaid', '3', '') test_stub.load_license(file_path) issued_date = test_stub.get_license_info().issuedDate expired_date = test_stub.license_date_cal(issued_date, 86400 * 10) test_stub.check_license("*****@*****.**", 3, None, False, 'Paid', issued_date=issued_date, expired_date=expired_date) # add the vcenter 1.203 test_stub.create_zone() username = os.environ.get("vcenteruser") password = os.environ.get("vcenterpwd") zone_name = "ZONE1" conditions = res_ops.gen_query_conditions('name', '=', zone_name) zone_uuid = res_ops.query_resource(res_ops.ZONE, conditions)[0].uuid https = "true" vcenterdomain = "172.20.0.50" vct_ops.add_vcenter("vcenter_test", vcenterdomain, username, password, https, zone_uuid) vcenter_uuid = res_ops.get_resource(res_ops.VCENTER)[0].uuid time.sleep(5) vct_ops.delete_vcenter(vcenter_uuid) time.sleep(5) zone_ops.delete_zone(zone_uuid) test_util.test_pass('Check License and add the vcenter Test Success')
def test(): global trigger global media global trigger_action test_item = "create.email.server" hosts = res_ops.get_resource(res_ops.HOST) host = hosts[0] duration = 60 expression = "host.cpu.util{cpu=-1,type=\"used\"}>200" monitor_trigger = mon_ops.create_monitor_trigger(host.uuid, duration, expression) send_email = test_stub.create_email_media() media = send_email.uuid trigger_action_name = "trigger"+ ''.join(map(lambda xx:(hex(ord(xx))[2:]),os.urandom(8))) trigger = monitor_trigger.uuid receive_email = os.environ.get('receive_email') monitor_trigger_action = mon_ops.create_email_monitor_trigger_action(trigger_action_name, send_email.uuid, trigger.split(), receive_email) trigger_action = monitor_trigger_action.uuid time.sleep(100) mail_list = test_stub.receive_email() keywords = "ZStack-Monitor" result = 'success' mail_flag = test_stub.check_email(mail_list, keywords, trigger_action, result) if mail_flag == 0: test_util.test_fail('Failed to Get Target: %s for: %s Trigger Mail' % (host.uuid, test_item)) mon_ops.delete_monitor_trigger_action(trigger_action) mon_ops.delete_monitor_trigger(trigger) mon_ops.delete_email_media(media)
def env_recover(): test_lib.lib_error_cleanup(test_obj_dict) for delete_ps in delete_ps_list: ps_config = test_util.PrimaryStorageOption() ps_config.set_name(delete_ps.name) ps_config.set_description(delete_ps.description) ps_config.set_zone_uuid(delete_ps.zoneUuid) ps_config.set_type(delete_ps.type) ps_config.set_url(delete_ps.url) if delete_ps.type == inventory.LOCAL_STORAGE_TYPE: new_ps = ps_ops.create_local_primary_storage(ps_config) elif delete_ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE: new_ps = ps_ops.create_nfs_primary_storage(ps_config) elif delete_ps.type == "SharedBlock": host = random.choice(res_ops.query_resource(res_ops.HOST)) cmd = "vgchange --lock-start %s && vgremove %s -y" % ( delete_ps.uuid, delete_ps.uuid) host_username = os.environ.get('hostUsername') host_password = os.environ.get('hostPassword') rsp = test_lib.lib_execute_ssh_cmd(host.managementIp, host_username, host_password, cmd, 240) if not rsp: test_util.test_logger("vgremove failed") new_ps = ps_ops.create_sharedblock_primary_storage( ps_config, disk_uuid) else: new_ps = None ps_ops.attach_primary_storage( new_ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
def test(): global vcenter_uuid, vm vcenter1_name = os.environ['vcenter2_name'] vcenter1_domain_name = os.environ['vcenter2_ip'] vcenter1_username = os.environ['vcenter2_domain_name'] vcenter1_password = os.environ['vcenter2_password'] ova_image_name = os.environ['vcenter2_template_exist'] network_pattern1 = os.environ['vcenter2_network_pattern1'] zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid) vcenter_uuid = inv.uuid if vcenter_uuid == None: test_util.test_fail("vcenter_uuid is None") vm = test_stub.create_vm_in_vcenter(vm_name = 'vm-create-test', image_name = ova_image_name, l3_name = network_pattern1) vm.check() vm.destroy() vm.check() vm.expunge() vct_ops.delete_vcenter(vcenter_uuid) test_util.test_pass("Creating vm of vcenter test passed.")
def test(): global test_obj_dict pss = res_ops.get_resource(res_ops.PRIMARY_STORAGE) if pss[0].type != "LocalStorage": test_util.test_skip( "this test is designed to run on localstorage, skip on other ps type." ) vm = test_stub.create_vr_vm('vm1', 'imageName_net', 'l3VlanNetwork3') test_obj_dict.add_vm(vm) vm.check() vm_nic_uuid = vm.vm.vmNics[0].uuid net_ops.detach_l3(vm_nic_uuid) vm.stop() vm.check() #test_stub.migrate_vm_to_random_host(vm) target_host = test_lib.lib_find_random_host(vm.vm) vol_ops.migrate_volume(vm.get_vm().allVolumes[0].uuid, target_host.uuid) l3_name = os.environ.get('l3VlanNetwork3') l3_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid net_ops.attach_l3(l3_uuid, vm.vm.uuid) vm.start() vm.check() test_util.test_pass('test detach l3, migrate and attaching passed.')
def test(): ps_env = test_stub.PSEnvChecker() ps, another = ps_env.get_two_ps() disk_offering_uuids = [random.choice(res_ops.get_resource(res_ops.DISK_OFFERING)).uuid] vm = test_stub.create_iso_vm_with_random_offering(vm_name='iso_vm', l3_name='l3VlanNetworkName1') test_obj_dict.add_vm(vm) for root_volume_ps_uuid in [None, ps.uuid]: for data_vol_ps_uuid in [None, another.uuid]: if ps_env.is_local_shared_env or ps_env.is_sb_ceph_env: if type(root_volume_ps_uuid) != type(data_vol_ps_uuid): continue vm = test_stub.create_iso_vm_with_random_offering(vm_name='test_iso_vm', disk_offering_uuids=disk_offering_uuids if data_vol_ps_uuid else None, ps_uuid=root_volume_ps_uuid, l3_name='l3VlanNetworkName1', system_tags=['primaryStorageUuidForDataVolume::{}'.format(data_vol_ps_uuid)] if data_vol_ps_uuid else None) if root_volume_ps_uuid: root_vol = test_lib.lib_get_root_volume(vm.get_vm()) assert root_vol.primaryStorageUuid == ps.uuid if data_vol_ps_uuid: data_vol_list = [vol for vol in vm.get_vm().allVolumes if vol.type != 'Root'] for data_vol in data_vol_list: assert data_vol.primaryStorageUuid == another.uuid test_obj_dict.add_vm(vm) test_util.test_pass('Multi PrimaryStorage Test Pass')
def test(): test_util.test_dsc( "Create {0} volume in the first primaryStorage".format(VOLUME_NUMBER)) ps_list = res_ops.get_resource(res_ops.PRIMARY_STORAGE) first_ps = random.choice(ps_list) volume_list = test_stub.create_multi_volumes(count=VOLUME_NUMBER, ps=first_ps) for volume in volume_list: test_obj_dict.add_volume(volume) if len(ps_list) == 1: test_util.test_dsc("Add Another primaryStorage") second_ps = test_stub.add_primaryStorage(first_ps=first_ps) new_ps_list.append(second_ps) else: second_ps = random.choice( [ps for ps in ps_list if ps.uuid != first_ps.uuid]) test_util.test_dsc( "Create {0} volume in the second primaryStorage".format(VOLUME_NUMBER)) volume_list = test_stub.create_multi_volumes(count=VOLUME_NUMBER, ps=second_ps) for volume in volume_list: test_obj_dict.add_volume(volume) test_util.test_dsc("Create one more volume in the first primaryStorage") volume = test_stub.create_multi_volumes(count=1, ps=first_ps)[0] test_obj_dict.add_volume(volume) test_util.test_dsc("Check the capacity") #To do test_util.test_pass('Multi PrimaryStorage Test Pass')
def test(): global vcenter_uuid vcenter1_name = os.environ['vcenter1_name'] vcenter1_domain_name = os.environ['vcenter1_ip'] vcenter1_username = os.environ['vcenter1_domain_name'] vcenter1_password = os.environ['vcenter1_password'] ova_template_pattern1 = os.environ['vcenter1_template_exist'] #add vcenter senario1: zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid) vcenter_uuid = inv.uuid if vcenter_uuid == None: test_util.test_fail("vcenter_uuid is None") #insert the basic operations for the newly join in vcenter resourse image_list = [] image_names = res_ops.query_resource_fields(res_ops.IMAGE, [], None, fields=['name']) for image_name in image_names: image_list.append(image_name.name) test_util.test_logger( ", ".join( [ str(image_name_tmp) for image_name_tmp in image_list ] ) ) if ova_template_pattern1 not in image_list: test_util.test_fail("newly joined vcenter missing fingerprint vm1, test failed") vct_ops.delete_vcenter(vcenter_uuid) test_util.test_pass("add && delete vcenter test passed.")
def test(): global vcenter_uuid vcenter1_name = os.environ['vcenter1_name'] vcenter1_domain_name = os.environ['vcenter1_ip'] vcenter1_username = os.environ['vcenter1_domain_name'] vcenter1_password = os.environ['vcenter1_password'] zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid) vcenter_uuid = inv.uuid if vcenter_uuid == None: test_util.test_fail("vcenter_uuid is None") vcenter_backup_storage_cond = res_ops.gen_query_conditions("name", '=', vcenter_backup_storage_name) vcbs_inv = res_ops.query_resource_fields(res_ops.VCENTER_BACKUP_STORAGE, vcenter_backup_storage_cond, None, fields=['uuid'])[0] vcbs_uuid = vcbs_inv.uuid if not vcbs_uuid: test_util.test_fail("not found vcenter backup storage") vct_ops.delete_vcenter(vcenter_uuid) test_util.test_pass("add && delete vcenter test passed.")
def add_zone_resource(deploy_config, zone_name): session_uuid = acc_ops.login_as_admin() try: test_util.test_dsc('-------add zone operation-------') dep_ops.add_zone(deploy_config, session_uuid, zone_name = zone_name) test_util.test_dsc('-------add l2 operation-------') dep_ops.add_l2_network(deploy_config, session_uuid, \ zone_name = zone_name) test_util.test_dsc('-------add primary stroage operation-------') dep_ops.add_primary_storage(deploy_config, session_uuid, \ zone_name = zone_name) test_util.test_dsc('-------add cluster operation-------') dep_ops.add_cluster(deploy_config, session_uuid, \ zone_name = zone_name) test_util.test_dsc('-------add host operation-------') dep_ops.add_host(deploy_config, session_uuid, \ zone_name = zone_name) test_util.test_dsc('-------add l3 operation-------') dep_ops.add_l3_network(deploy_config, session_uuid, \ zone_name = zone_name) test_util.test_dsc('-------add virtual router offering operation-------') dep_ops.add_virtual_router(deploy_config, session_uuid, \ zone_name = zone_name) zone = res_ops.get_resource(res_ops.ZONE, session_uuid, \ name = zone_name)[0] except Exception as e: test_util.test_logger('[Error] zstack deployment meets exception when adding zone resource .') traceback.print_exc(file=sys.stdout) raise e finally: acc_ops.logout(session_uuid) test_util.action_logger('Complete add zone resources for [uuid:] %s' \ % zone.uuid)
def test(): global trigger global media global trigger_action test_item = "host.mem.util" resource_type = "HostVO" host_monitor_item = test_stub.get_monitor_item(resource_type) if test_item not in host_monitor_item: test_util.test_fail('%s is not available for monitor' % test_item) hosts = res_ops.get_resource(res_ops.HOST) host = hosts[0] duration = 300 total = os.popen('free|grep Mem|awk \'{print $2}\'').read().replace( '\n', '') free = os.popen('free|grep Mem|awk \'{print $4}\'').read().replace( '\n', '') rate = float(free) / float(total) expression = "host.mem.util{type=\"free\"} < " + str(rate) monitor_trigger = mon_ops.create_monitor_trigger(host.uuid, duration, expression) send_email = test_stub.create_email_media() media = send_email.uuid trigger_action_name = "trigger" + ''.join( map(lambda xx: (hex(ord(xx))[2:]), os.urandom(8))) trigger = monitor_trigger.uuid receive_email = os.environ.get('receive_email') monitor_trigger_action = mon_ops.create_email_monitor_trigger_action( trigger_action_name, send_email.uuid, trigger.split(), receive_email) trigger_action = monitor_trigger_action.uuid host.password = os.environ.get('hostPassword') ssh_cmd = test_stub.ssh_cmd_line(host.managementIp, host.username, host.password, port=int(host.sshPort)) test_stub.run_mem_load(ssh_cmd, 320) status_problem, status_ok = test_stub.query_trigger_in_loop(trigger, 80) test_util.action_logger( 'Trigger old status: %s triggered. Trigger new status: %s recovered' % (status_problem, status_ok)) if status_problem != 1 or status_ok != 1: test_util.test_fail( '%s Monitor Test failed, expected Problem or OK status not triggered' % test_item) mail_list = test_stub.receive_email() keywords = "fired" mail_flag = test_stub.check_email(mail_list, keywords, trigger, host.uuid) if mail_flag == 0: test_util.test_fail('Failed to Get Target: %s for: %s Trigger Mail' % (host.uuid, test_item)) mon_ops.delete_monitor_trigger_action(trigger_action) mon_ops.delete_monitor_trigger(trigger) mon_ops.delete_email_media(media)
def test(): os.environ['ZSTACK_THREAD_THRESHOLD'] = '1000' os.environ['ZSTACK_TEST_NUM'] = '1000' test_lib.lib_set_provision_memory_rate(20) test_lib.lib_set_provision_storage_rate(20) lib_set_provision_cpu_rate(20) Create() time.sleep(180) create_vm_begin_time = get_begin_time() create_vm_end_time = get_end_time() print("begin time = %s") % create_vm_begin_time print("end time = %s") % create_vm_end_time if create_vm_end_time != 0 and create_vm_begin_time != 0: create_1000_vm_time = create_vm_end_time - create_vm_begin_time test_util.test_dsc("create_vm_time is " + str(create_1000_vm_time)) Destroy_VM() time.sleep(180) Expunge_VM() time.sleep(180) zone_name = os.environ.get('zoneName') zone = res_ops.get_resource(res_ops.ZONE, name=zone_name)[0] zone_ops.delete_zone(zone.uuid) test_util.test_pass('Create 1000 vms success,takes %s time' % create_1000_vm_time)
def cleanup_none_vm_volumes_violently(): session_uuid = acc_ops.login_as_admin() try: priSto_host_list = {} result = res_ops.get_resource(res_ops.VOLUME, session_uuid) for volume in result: if not volume.installPath: continue volume_path = os.path.dirname(volume.installPath) #VM volume has been cleanup in destroy_vm_and_storage_violently() if not volume.hasattr('vmInstanceUuid'): pri_sto_uuid = volume.primaryStorageUuid if priSto_host_list.has_key(pri_sto_uuid): host_ip = priSto_host_list[pri_sto_uuid] else: #TODO: need to add multi hosts, if primary storage is local storage. host = _get_host_from_primary_storage( pri_sto_uuid, session_uuid) host_ip = host.managementIp priSto_host_list[pri_sto_uuid] = host_ip thread = threading.Thread(target = _delete_file, \ args = (host_ip, volume_path)) thread.start() while threading.active_count() > 1: time.sleep(0.1) except Exception as e: test_util.test_logger("cleanup volumes violently meet exception") traceback.print_exc(file=sys.stdout) raise e finally: acc_ops.logout(session_uuid)
def test(): test_util.test_dsc("Create {0} volume in the first primaryStorage".format(VOLUME_NUMBER)) ps_env = test_stub.PSEnvChecker() ps_list = res_ops.get_resource(res_ops.PRIMARY_STORAGE) if ps_env.is_sb_ceph_env: first_ps = random.choice([ps for ps in ps_list if ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE]) else: first_ps = random.choice(ps_list) volume_list = test_stub.create_multi_volumes(count=VOLUME_NUMBER, ps=first_ps) for volume in volume_list: test_obj_dict.add_volume(volume) if len(ps_list) == 1: test_util.test_dsc("Add Another primaryStorage") second_ps = test_stub.add_primaryStorage(first_ps=first_ps) new_ps_list.append(second_ps) else: second_ps = random.choice([ps for ps in ps_list if ps.uuid != first_ps.uuid]) test_util.test_dsc("Create {0} volume in the second primaryStorage".format(VOLUME_NUMBER)) volume_list = test_stub.create_multi_volumes(count=VOLUME_NUMBER, ps=second_ps) for volume in volume_list: test_obj_dict.add_volume(volume) test_util.test_dsc("Create one more volume in the first primaryStorage") volume = test_stub.create_multi_volumes(count=1, ps=first_ps)[0] test_obj_dict.add_volume(volume) test_util.test_dsc("Check the capacity") #To do test_util.test_pass('Multi PrimaryStorage Test Pass')
def env_recover(): test_lib.lib_error_cleanup(test_obj_dict) for delete_ps in delete_ps_list: ps_config = test_util.PrimaryStorageOption() ps_config.set_name(delete_ps.name) ps_config.set_description(delete_ps.description) ps_config.set_zone_uuid(delete_ps.zoneUuid) ps_config.set_type(delete_ps.type) ps_config.set_url(delete_ps.url) if delete_ps.type == inventory.LOCAL_STORAGE_TYPE: new_ps = ps_ops.create_local_primary_storage(ps_config) elif delete_ps.type == inventory.NFS_PRIMARY_STORAGE_TYPE: new_ps = ps_ops.create_nfs_primary_storage(ps_config) elif delete_ps.type == "SharedBlock": host = random.choice(res_ops.query_resource(res_ops.HOST)) cmd = "vgchange --lock-start %s && vgremove %s -y" % (delete_ps.uuid, delete_ps.uuid) host_username = os.environ.get('hostUsername') host_password = os.environ.get('hostPassword') rsp = test_lib.lib_execute_ssh_cmd(host.managementIp, host_username, host_password, cmd, 240) if not rsp: test_util.test_logger("vgremove failed") new_ps = ps_ops.create_sharedblock_primary_storage(ps_config, disk_uuid) else: new_ps = None ps_ops.attach_primary_storage(new_ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid)
def test(): for bandwidth, net_out, net_in in itertools.product((None, BANDWIDTH), (None, NET_OUT), (None, NET_IN)): instance_offering = test_lib.lib_create_instance_offering(name='test_offering', volume_bandwidth=bandwidth, net_outbound_bandwidth=net_out, net_inbound_bandwidth=net_in) test_obj_dict.add_instance_offering(instance_offering) volume_list = test_stub.create_multi_volumes(ps=random.choice(res_ops.get_resource(res_ops.PRIMARY_STORAGE))) for volume in volume_list: test_obj_dict.add_volume(volume) test_vm = test_stub.create_vm_with_random_offering(vm_name='test_vm', instance_offering_uuid=instance_offering.uuid, l3_name='l3VlanNetwork2', image_name='imageName_net') test_obj_dict.add_vm(test_vm) for volume in volume_list: volume.attach(test_vm) volume.check() test_vm.check() for volume in volume_list: volume.detach() volume.check() test_vm.check() test_vm_with_datavol = test_stub.create_vm_with_random_offering(vm_name='test_vm_datavol', instance_offering_uuid=instance_offering.uuid, disk_offering_uuids=[random.choice(res_ops.get_resource(res_ops.DISK_OFFERING)).uuid], l3_name='l3VlanNetwork2', image_name='imageName_net') test_obj_dict.add_vm(test_vm_with_datavol) test_util.test_pass('Volume attach on QOS vm TEST PASS')
def cleanup_none_vm_volumes_violently(): session_uuid = acc_ops.login_as_admin() try: priSto_host_list = {} result = res_ops.get_resource(res_ops.VOLUME, session_uuid) for volume in result: if not volume.installPath: continue volume_path = os.path.dirname(volume.installPath) # VM volume has been cleanup in destroy_vm_and_storage_violently() if not volume.hasattr("vmInstanceUuid"): pri_sto_uuid = volume.primaryStorageUuid if priSto_host_list.has_key(pri_sto_uuid): host_ip = priSto_host_list[pri_sto_uuid] else: # TODO: need to add multi hosts, if primary storage is local storage. host = _get_host_from_primary_storage(pri_sto_uuid, session_uuid) host_ip = host.managementIp priSto_host_list[pri_sto_uuid] = host_ip thread = threading.Thread(target=_delete_file, args=(host_ip, volume_path)) thread.start() while threading.active_count() > 1: time.sleep(0.1) except Exception as e: test_util.test_logger("cleanup volumes violently meet exception") traceback.print_exc(file=sys.stdout) raise e finally: acc_ops.logout(session_uuid)
def test(): global vcenter_uuid vcenter1_name = os.environ['vcenter1_name'] vcenter1_domain_name = os.environ['vcenter1_ip'] vcenter1_username = os.environ['vcenter1_domain_name'] vcenter1_password = os.environ['vcenter1_password'] zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid) vcenter_uuid = inv.uuid if vcenter_uuid == None: test_util.test_fail("vcenter_uuid is None") vcenter_backup_storage_cond = res_ops.gen_query_conditions( "name", '=', vcenter_backup_storage_name) vcbs_inv = res_ops.query_resource_fields(res_ops.VCENTER_BACKUP_STORAGE, vcenter_backup_storage_cond, None, fields=['uuid'])[0] vcbs_uuid = vcbs_inv.uuid if not vcbs_uuid: test_util.test_fail("not found vcenter backup storage") vct_ops.delete_vcenter(vcenter_uuid) test_util.test_pass("add && delete vcenter test passed.")
def create_image_template(self): ''' @return: zstack_test_image() object ''' import zstackwoodpecker.zstack_test.zstack_test_image as \ zstack_image_header if self.state == sp_header.DELETED: test_util.test_fail(\ 'Should not be called, as snapshot volume:%s has been deleted. Snapshot can \ not be created to a new template' % \ self.target_volume.get_volume().uuid) if not self.image_option.get_root_volume_uuid(): self.image_option.set_root_volume_uuid(self.snapshot.uuid) if not self.image_option.get_backup_storage_uuid_list(): bs_uuid = res_ops.get_resource(res_ops.BACKUP_STORAGE)[0].uuid self.image_option.set_backup_storage_uuid_list([bs_uuid]) img_inv = img_ops.create_template_from_snapshot(self.image_option) super(ZstackTestSnapshot, self).create_image_template() img_obj = zstack_image_header.ZstackTestImage() img_obj.set_image(img_inv) img_obj.set_state(image_header.CREATED) #ROOT Volume won't create checking point. So skip. if self.get_volume_type() != volume_header.ROOT_VOLUME: img_obj.set_original_checking_points(self.get_checking_points()) return img_obj
def test(): global curr_deploy_conf global test_obj_dict global l3_name global l3 curr_deploy_conf = exp_ops.export_zstack_deployment_config( test_lib.deploy_config) delete_policy = test_lib.lib_set_delete_policy('vm', 'Delay') l3_name = os.environ.get('l3VlanNetworkName1') l3 = res_ops.get_resource(res_ops.L3_NETWORK, name=l3_name)[0] vm = test_stub.create_vlan_vm(l3_name) vm.check() test_obj_dict.add_vm(vm) net_ops.delete_l3(l3.uuid) if test_lib.lib_get_l3_by_uuid(l3.uuid): test_util.test_fail( 'l3 should not be found when associated L3 is deleted') #vm_nic_uuid = vm.vm.vmNics[0].uuid #net_ops.detach_l3(vm_nic_uuid) vm.destroy() vm.set_state(vm_header.DESTROYED) vm.check() vm.recover() vm.set_state(vm_header.STOPPED) vm.check() test_lib.lib_set_delete_policy('vm', delete_policy) try: vm.start() except Exception, e: #if "please attach a nic and try again" in str(e): test_util.test_pass('test detach l3 check vm passed.')
def test(): global vcenter_uuid, vm vcenter1_name = os.environ['vcenter2_name'] vcenter1_domain_name = os.environ['vcenter2_ip'] vcenter1_username = os.environ['vcenter2_domain_name'] vcenter1_password = os.environ['vcenter2_password'] ova_image_name = os.environ['vcenter2_template_exist'] network_pattern1 = os.environ['vcenter2_network_pattern1'] zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid) vcenter_uuid = inv.uuid if vcenter_uuid == None: test_util.test_fail("vcenter_uuid is None") vm = test_stub.create_vm_in_vcenter(vm_name = 'vm-start-stop-test', image_name = ova_image_name, l3_name = network_pattern1) vm.check() vm.stop() vm.check() vm.start() vm.check() vm.destroy() vm.check() vm.expunge() vct_ops.delete_vcenter(vcenter_uuid) test_util.test_pass("vm start and stop of vcenter test passed.")
def test(): flavor = case_flavor[os.environ.get('CASE_FLAVOR')] ps_env = test_stub.PSEnvChecker() local_ps, shared_ps = ps_env.get_two_ps() disk_offering_uuids = [random.choice(res_ops.get_resource(res_ops.DISK_OFFERING)).uuid] test_util.test_dsc("Create VM: {}".format(os.environ.get('CASE_FLAVOR'))) vm = test_stub.create_vm_with_random_offering(vm_name='test_vm', disk_offering_uuids=disk_offering_uuids if flavor["data_vol"] else None, ps_uuid=local_ps.uuid if flavor["root_vol"] is LOCAL else shared_ps.uuid, l3_name='l3VlanNetworkName1', image_name='imageName_net', system_tags=['primaryStorageUuidForDataVolume::{}'.format(local_ps.uuid if flavor["data_vol"] in (LOCAL, MIXED) else shared_ps.uuid)] if flavor["data_vol"] else None) test_obj_dict.add_vm(vm) vm.check() if flavor['data_vol'] is MIXED: test_util.test_dsc("Create volume from shared_ps and attached to VM") volume = test_stub.create_multi_volumes(count=1, ps=shared_ps)[0] test_obj_dict.add_volume(volume) volume.attach(vm) vm.check() test_util.test_dsc("perform basic ops on vm") for action in ('stop', 'start', 'check', 'reboot', 'check', 'suspend', 'resume', 'check'): getattr(vm, action)() test_lib.lib_error_cleanup(test_obj_dict)
def test(): global vcenter_uuid vcenter1_name = os.environ['vcenter1_name'] vcenter1_domain_name = os.environ['vcenter1_ip'] vcenter1_username = os.environ['vcenter1_domain_name'] vcenter1_password = os.environ['vcenter1_password'] vm_network_pattern1 = os.environ['vcenter1_network_pattern1'] #add vcenter senario1: zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid inv = vct_ops.add_vcenter(vcenter1_name, vcenter1_domain_name, vcenter1_username, vcenter1_password, True, zone_uuid) vcenter_uuid = inv.uuid if vcenter_uuid == None: test_util.test_fail("vcenter_uuid is None") #insert the basic operations for the newly join in vcenter resourse vm_network_list = [] vm_network_names = res_ops.query_resource_fields(res_ops.L3_NETWORK, [], None, fields=['name']) for vm_network in vm_network_names: vm_network_list.append(vm_network.name) test_util.test_logger( ", ".join( [ str(vm_network_tmp) for vm_network_tmp in vm_network_list ] ) ) if vm_network_pattern1 not in vm_network_list: test_util.test_fail("newly joined vcenter missing vm network1, test failed") vct_ops.delete_vcenter(vcenter_uuid) test_util.test_pass("add && delete vcenter test passed.")
def test(): ps_env = test_stub.PSEnvChecker() if ps_env.is_sb_ceph_env: env = test_stub.SanAndCephPrimaryStorageEnv(test_object_dict=test_obj_dict, first_ps_vm_number=VM_COUNT, second_ps_vm_number=VM_COUNT, first_ps_volume_number=VOLUME_NUMBER, second_ps_volume_number=VOLUME_NUMBER) else: env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict, first_ps_vm_number=VM_COUNT, second_ps_vm_number=VM_COUNT, first_ps_volume_number=VOLUME_NUMBER, second_ps_volume_number=VOLUME_NUMBER) env.check_env() env.deploy_env() first_ps_vm_list = env.first_ps_vm_list second_ps_vm_list = env.second_ps_vm_list if env.new_ps: new_ps_list.append(env.second_ps) test_util.test_dsc('detach random one Primary Storage from cluster') selected_ps = random.choice([env.first_ps, env.second_ps]) if selected_ps is env.first_ps: another_ps = env.second_ps else: another_ps = env.first_ps ps_ops.detach_primary_storage(selected_ps.uuid, res_ops.get_resource(res_ops.CLUSTER)[0].uuid) detached_ps_list.append(selected_ps) test_util.test_dsc('All vm in selected ps should STOP') for vm in first_ps_vm_list + second_ps_vm_list: vm.update() for vm in env.get_vm_list_from_ps(selected_ps): assert vm.get_vm().state == inventory.STOPPED for vm in env.get_vm_list_from_ps(another_ps): assert vm.get_vm().state == inventory.RUNNING with test_stub.expected_failure('start vm in ps that not attached to cluster', Exception): random.choice(env.get_vm_list_from_ps(selected_ps)).start() with test_stub.expected_failure("Create vm in detached ps", Exception): test_stub.create_multi_vms(name_prefix='test-vm', count=1, ps_uuid=selected_ps.uuid) test_util.test_dsc("Create 5 vms and check all should be in enabled PS") if ps_env.is_sb_ceph_env: if selected_ps.type == "SharedBlock": vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5, bs_type='Ceph') else: vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5, bs_type='ImageStoreBackupStorage') else: vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5) for vm in vm_list: test_obj_dict.add_vm(vm) for vm in vm_list: assert vm.get_vm().allVolumes[0].primaryStorageUuid == another_ps.uuid test_util.test_pass('Multi PrimaryStorage Test Pass')
def test(): global vm global tag_uuid test_stub.skip_if_scenario_not_multiple_networks() vm = test_stub.create_basic_vm() vm.check() console = test_lib.lib_get_vm_console_address(vm.get_vm().uuid) console_management_ip = console.hostIp cluster = res_ops.get_resource(res_ops.CLUSTER)[0] tag_inv = tag_ops.create_system_tag('ClusterVO', cluster.uuid, "display::network::cidr::172.20.0.0/16") tag_uuid = tag_inv.uuid console = test_lib.lib_get_vm_console_address(vm.get_vm().uuid) console_display_ip = console.hostIp if console_management_ip == console_display_ip: test_util.test_fail("console ip has not been switched as expected: %s and %s" %(console_management_ip, console_display_ip)) console1 = test_lib.lib_request_console_access(vm.get_vm().uuid) console_req_ip = console1.hostname if console_management_ip == console_req_ip: test_util.test_fail("console ip has not been switched as expected: %s and %s" %(console_management_ip, console_req_ip)) test_util.test_pass('Create VM Test Success')
def test(): os.environ['ZSTACK_THREAD_THRESHOLD']='1000' os.environ['ZSTACK_TEST_NUM']='1000' test_lib.lib_set_provision_memory_rate(20) test_lib.lib_set_provision_storage_rate(20) lib_set_provision_cpu_rate(20) Create() time.sleep(180) create_vm_begin_time = get_begin_time() create_vm_end_time = get_end_time() print ("begin time = %s") % create_vm_begin_time print ("end time = %s") % create_vm_end_time if create_vm_end_time != 0 and create_vm_begin_time != 0: create_1000_vm_time = create_vm_end_time - create_vm_begin_time test_util.test_dsc("create_vm_time is "+str(create_1000_vm_time)) Destroy_VM() time.sleep(180) Expunge_VM() time.sleep(180) zone_name = os.environ.get('zoneName') zone = res_ops.get_resource(res_ops.ZONE, name = zone_name)[0] zone_ops.delete_zone(zone.uuid) test_util.test_pass('Create 1000 vms success,takes %s time' % create_1000_vm_time)
def add_zone_resource(deploy_config, zone_name): session_uuid = acc_ops.login_as_admin() try: dep_ops.add_zone(deploy_config, session_uuid, zone_name=zone_name) dep_ops.add_l2_network(deploy_config, session_uuid, \ zone_name = zone_name) dep_ops.add_primary_storage(deploy_config, session_uuid, \ zone_name = zone_name) dep_ops.add_cluster(deploy_config, session_uuid, \ zone_name = zone_name) dep_ops.add_host(deploy_config, session_uuid, \ zone_name = zone_name) dep_ops.add_l3_network(deploy_config, session_uuid, \ zone_name = zone_name) dep_ops.add_virtual_router(deploy_config, session_uuid, \ zone_name = zone_name) zone = res_ops.get_resource(res_ops.ZONE, session_uuid, \ name = zone_name)[0] except Exception as e: test_util.test_logger( '[Error] zstack deployment meets exception when adding zone resource .' ) traceback.print_exc(file=sys.stdout) raise e finally: acc_ops.logout(session_uuid) test_util.action_logger('Complete add zone resources for [uuid:] %s' \ % zone.uuid)
def test(): global trigger global media global trigger_action test_item = "host.network.io" resource_type = "HostVO" vm_monitor_item = test_stub.get_monitor_item(resource_type) if test_item not in vm_monitor_item: test_util.test_fail('%s is not available for monitor' % test_item) hosts = res_ops.get_resource(res_ops.HOST) host = hosts[0] duration = 300 expression = "host.network.io{direction=\"rx\"} > 200" monitor_trigger = mon_ops.create_monitor_trigger(host.uuid, duration, expression) send_email = test_stub.create_email_media() media = send_email.uuid trigger_action_name = "trigger_" + ''.join( map(lambda xx: (hex(ord(xx))[2:]), os.urandom(8))) trigger = monitor_trigger.uuid receive_email = os.environ.get('receive_email') monitor_trigger_action = mon_ops.create_email_monitor_trigger_action( trigger_action_name, send_email.uuid, trigger.split(), receive_email) trigger_action = monitor_trigger_action.uuid host.password = os.environ.get('hostPassword') ssh_cmd = test_stub.ssh_cmd_line(host.managementIp, host.username, host.password, port=int(host.sshPort)) test_stub.yum_install_stress_tool(ssh_cmd) t = threading.Thread(target=test_stub.run_network_rx, args=(ssh_cmd, )) t.start() time.sleep(320) test_stub.kill(ssh_cmd) status_problem, status_ok = test_stub.query_trigger_in_loop(trigger, 50) test_util.action_logger( 'Trigger old status: %s triggered. Trigger new status: %s recovered' % (status_problem, status_ok)) if status_problem != 1 or status_ok != 1: test_util.test_fail( '%s Monitor Test failed, expected Problem or OK status not triggered' % test_item) mail_list = test_stub.receive_email() keywords = "fired" mail_flag = test_stub.check_email(mail_list, keywords, trigger, host.uuid) if mail_flag == 0: test_util.test_fail('Failed to Get Target: %s for: %s Trigger Mail' % (host.uuid, test_item)) mon_ops.delete_monitor_trigger_action(trigger_action) mon_ops.delete_monitor_trigger(trigger) mon_ops.delete_email_media(media)