def __init__(self, get_host_con = None, justify_con = None): self.exc_info = [] self.hosts = [] self.i = 0 self.session_uuid = None self.session_to = None self.session_mc = None self.host_num = os.environ.get('ZSTACK_TEST_NUM') self.thread_threshold = os.environ.get('ZSTACK_THREAD_THRESHOLD') self.get_host_con = get_host_con self.justify_con = justify_con if not self.host_num: self.host_num = 0 else: self.host_num = int(self.host_num) if not self.thread_threshold: self.thread_threshold = 1000 else: self.thread_threshold = int(self.thread_threshold) self.hosts = res_ops.query_resource(res_ops.HOST, self.get_host_con) if self.host_num > len(self.hosts): self.host_num = len(self.hosts) test_util.test_warn('ZSTACK_TEST_NUM is forcibly set as %d\n' % len(self.hosts)) self.session_to = con_ops.change_global_config('identity', 'session.timeout',\ '720000', self.session_uuid) self.session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent',\ '10000', self.session_uuid) self.session_uuid = acc_ops.login_as_admin()
def check(self): super(zstack_kvm_sg_db_exist_checker, self).check() sg_list = self.test_obj.get_sg_list_by_nic(self.nic_uuid) if not sg_list: conditions = res_ops.gen_query_conditions('vmNicUuid', '=', self.nic_uuid) nic_sg = res_ops.query_resource(res_ops.VM_SECURITY_GROUP, conditions) if not nic_sg: test_util.test_logger('Check result: No [Security Group] is found in database for [nic:] %s.' % self.nic_uuid) return self.judge(False) else: test_util.test_warn('Check result: [Security Group] is found in database for [nic:] %s. It is not consistent with test_sg record.' % self.nic_uuid) return self.judge(True) for test_sg in sg_list: try: conditions = res_ops.gen_query_conditions('uuid', '=', test_sg.security_group.uuid) sg = res_ops.query_resource(res_ops.SECURITY_GROUP, conditions)[0] except Exception as e: traceback.print_exc(file=sys.stdout) test_util.test_logger('Check result: [Security Group Inventory uuid:] %s does not exist in database.' % test_sg.security_group.uuid) return self.judge(False) test_util.test_logger('Check result: [SecurityGroup Inventory uuid:] %s exist in database.' % sg.uuid) return self.judge(True)
def check(self): super(zstack_kvm_vm_network_checker, self).check() vm = self.test_obj.vm host = test_lib.lib_get_vm_host(vm) test_lib.lib_install_testagent_to_host(host) test_lib.lib_set_vm_host_l2_ip(vm) vr_vms = test_lib.lib_find_vr_by_vm(vm) if not vr_vms: test_util.test_warn('No Virtual Router was found for VM: %s. Skip testing.' % vm.uuid) return self.judge(self.exp_result) for vr_vm in vr_vms: nic = test_lib.lib_get_vm_nic_by_vr(vm, vr_vm) if not 'DHCP' in test_lib.lib_get_l3_service_type(nic.l3NetworkUuid): test_util.test_logger("Skip [VR:] %s, since it doesn't provide DHCP service" % vr_vm.uuid) continue guest_ip = nic.ip command = 'ping -c 5 -W 5 %s >/tmp/ping_result 2>&1; ret=$?; cat /tmp/ping_result; exit $ret' % guest_ip if not test_lib.lib_execute_sh_cmd_by_agent_with_retry(host.managementIp, command, self.exp_result): test_util.test_logger('Checker result: FAIL to ping [target:] %s [ip:] %s from [host:] %s' % (vm.uuid, guest_ip, host.uuid)) if self.exp_result == True: test_util.test_logger("network connection result is not expected pass, will print VR's network configuration:") test_lib.lib_print_vr_network_conf(vr_vm) return self.judge(False) else: test_util.test_logger('Checker result: SUCCESSFULLY ping [target:] %s [ip:] %s from [host:] %s' % (vm.uuid, guest_ip, host.uuid)) test_util.test_logger("Checker result: ping target [vm:] %s from [host:] %s SUCCESS" % (vm.uuid, host.uuid)) return self.judge(True)
def add_root_volume_template(image_creation_option): ''' Add root volume template ''' action = api_actions.AddImageAction() action.name = image_creation_option.get_name() action.guest_os_type = image_creation_option.get_guest_os_type() action.mediaType = 'RootVolumeTemplate' if image_creation_option.get_mediaType() and \ action.mediaType != image_creation_option.get_mediaType(): test_util.test_warn('image type %s was not %s' % \ (image_creation_option.get_mediaType(), action.mediaType)) action.backupStorageUuids = \ image_creation_option.get_backup_storage_uuid_list() action.bits = image_creation_option.get_bits() action.description = image_creation_option.get_description() action.format = image_creation_option.get_format() if image_creation_option.get_system_tags() != None: action.systemTags = image_creation_option.get_system_tags().split(',') action.url = image_creation_option.get_url() action.timeout = image_creation_option.get_timeout() test_util.action_logger('Add Root Volume Template from url: %s in [backup Storage:] %s' % (action.url, action.backupStorageUuids)) evt = account_operations.execute_action_with_session(action, \ image_creation_option.get_session_uuid()) return evt.inventory
def test(): global vm global schd vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) start_date = int(time.time()) test_util.test_logger('Setup stop and start VM scheduler') schd = vm_ops.start_vm_scheduler(vm.get_vm().uuid, 'simple', 'simple_start_vm_scheduler', 0, 1) actual_startDate = time.mktime(time.strptime(schd.startTime, '%b %d, %Y %H:%M:%S %p')) if actual_startDate != start_date and actual_startDate != start_date + 1: test_util.test_fail('startDate is expectd to set to now, which should be around %s' % (start_date)) test_stub.sleep_util(start_date+58) start_msg_mismatch = 0 for i in range(1, 58): if not test_lib.lib_find_in_local_management_server_log(start_date+i, '[msg send]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid): start_msg_mismatch += 1 test_util.test_warn('StopVmInstanceMsg is expected to execute at %s' % (start_date+i)) if start_msg_mismatch > 5: test_util.test_fail('%s of 58 StartVmInstanceMsg not executed at expected timestamp' % (start_msg_mismatch)) schd_ops.delete_scheduler(schd.uuid) try: vm.destroy() except: test_util.test_logger('expected exception when destroy VM since too many queued task') test_util.test_pass('Create Simple VM Start Scheduler Success')
def check(self): super(zstack_share_volume_attach_db_checker, self).check() volume = self.test_obj.volume try: sv_cond = res_ops.gen_query_conditions("volumeUuid", '=', volume.uuid) share_volume_vm_uuids = res_ops.query_resource_fields(res_ops.SHARE_VOLUME, sv_cond, None, fields=['vmInstanceUuid']) except Exception as e: traceback.print_exc(file=sys.stdout) test_util.test_logger('Check result: [volumeInventory uuid:] %s does not exist in database.' % self.test_obj.volume.uuid) return self.judge(False) if not share_volume_vm_uuids: #update self.test_obj, due to vm destroyed. if self.test_obj.target_vm.state == vm_header.DESTROYED or \ self.test_obj.target_vm.state == vm_header.EXPUNGED: test_util.test_warn('Update test [volume:] %s state, since attached VM was destroyed.' % volume.uuid) self.test_obj.update() else: test_util.test_warn('Check warn: [volume:] %s state is not aligned with DB. DB did not record any attached VM, but test volume has attached vm record: %s.' % (volume.uuid, volume.vmInstanceUuid)) test_util.test_logger('Check result: [volume:] %s does NOT have vmInstanceUuid in Database. It is not attached to any vm.' % volume.uuid) return self.judge(False) if not self.test_obj.target_vm: test_util.test_logger('Check result: test [volume:] %s does NOT have vmInstance record in test structure. Can not do furture checking.' % volume.uuid) return self.judge(False) vm = self.test_obj.target_vm.vm if vm.uuid not in share_volume_vm_uuids: test_util.test_logger('Check result: [volume:] %s is attached to [vm:] %s in zstack database.' % (volume.uuid, vm.uuid)) return self.judge(True) else: test_util.test_logger('Check result: [volume:] %s is NOT attached to [vm:] %s in zstack database.' % (volume.uuid, vm.uuid)) return self.judge(False)
def check(self): super(zstack_kvm_vm_snat_checker, self).check() vm = self.test_obj.vm test_lib.lib_install_testagent_to_vr(vm) host = test_lib.lib_get_vm_host(vm) vm_cmd_result = None vr_vms = test_lib.lib_find_vr_by_vm(vm) test_lib.lib_set_vm_host_l2_ip(vm) for vr_vm in vr_vms: test_util.test_logger("Begin to check [vm:] %s SNAT" % vm.uuid) nic = test_lib.lib_get_vm_nic_by_vr(vm, vr_vm) if not 'SNAT' in test_lib.lib_get_l3_service_type(nic.l3NetworkUuid): test_util.test_logger("Skip [VR:] %s, since it doesn't provide SNAT service" % vr_vm.uuid) continue ping_target = test_lib.test_config.pingTestTarget.text_ #Check if there is a SG rule to block ICMP checking if test_lib.lib_is_sg_rule_exist(nic.uuid, None, None, inventory.EGRESS): if not test_lib.lib_is_sg_rule_exist(nic.uuid, inventory.ICMP, ping_target, inventory.EGRESS): test_util.test_warn('Skip SNAT checker: because there is ICMP Egress Rule was assigned to [nic:] %s and the allowed target ip is not %s' % (nic.uuid, ping_target)) return self.judge(self.exp_result) guest_ip = nic.ip vm_command = 'ping -c 5 -W 5 %s >/tmp/ping_result 2>&1; ret=$?; cat /tmp/ping_result; exit $ret' % ping_target vm_cmd_result = test_lib.lib_ssh_vm_cmd_by_agent_with_retry(host.managementIp, nic.ip, test_lib.lib_get_vm_username(vm), test_lib.lib_get_vm_password(vm), vm_command, self.exp_result) if not vm_cmd_result: test_util.test_logger('Checker result: FAIL to ping [target:] %s from [vm:] %s .' % (ping_target, vm.uuid)) if self.exp_result == True: test_util.test_logger("network connection result is not expected pass, will print VR's network configuration:") test_lib.lib_print_vr_network_conf(vr_vm) return self.judge(False) else: test_util.test_logger('Checker result: SUCCEED to ping [target:] %s from [vm:] %s .' % (ping_target, vm.uuid)) return self.judge(True)
def check(self): if not self.checker_chain: test_util.test_warn('Not find any checker!') return for checker in self.checker_chain: checker.check()
def error_cleanup(): global image_obj test_lib.lib_error_cleanup(test_obj_dict) try: image_obj.add_root_volume_template() except Exception as e: test_util.test_warn('meet exception when try to recover image template') raise e
def check(self): super(zstack_volume_attach_db_checker, self).check() volume = self.test_obj.volume if not volume.vmInstanceUuid: test_util.test_logger( "Check result: test [volume:] %s does NOT record any vmInstanceUuid. It is not attached to any vm yet." % volume.uuid ) return self.judge(False) try: conditions = res_ops.gen_query_conditions("uuid", "=", self.test_obj.volume.uuid) db_volume = res_ops.query_resource(res_ops.VOLUME, conditions)[0] except Exception as e: traceback.print_exc(file=sys.stdout) test_util.test_logger( "Check result: [volumeInventory uuid:] %s does not exist in database." % self.test_obj.volume.uuid ) return self.judge(False) if not db_volume.vmInstanceUuid: # update self.test_obj, due to vm destroyed. if ( self.test_obj.target_vm.state == vm_header.DESTROYED or self.test_obj.target_vm.state == vm_header.EXPUNGED ): test_util.test_warn("Update test [volume:] %s state, since attached VM was destroyed." % volume.uuid) self.test_obj.update() else: test_util.test_warn( "Check warn: [volume:] %s state is not aligned with DB. DB did not record any attached VM, but test volume has attached vm record: %s." % (volume.uuid, volume.vmInstanceUuid) ) test_util.test_logger( "Check result: [volume:] %s does NOT have vmInstanceUuid in Database. It is not attached to any vm." % volume.uuid ) return self.judge(False) if not self.test_obj.target_vm: test_util.test_logger( "Check result: test [volume:] %s does NOT have vmInstance record in test structure. Can not do furture checking." % volume.uuid ) return self.judge(False) vm = self.test_obj.target_vm.vm if db_volume.vmInstanceUuid == vm.uuid: test_util.test_logger( "Check result: [volume:] %s is attached to [vm:] %s in zstack database." % (volume.uuid, vm.uuid) ) return self.judge(True) else: test_util.test_logger( "Check result: [volume:] %s is NOT attached to [vm:] %s in zstack database." % (volume.uuid, vm.uuid) ) return self.judge(False)
def error_cleanup(): global host_config test_lib.lib_error_cleanup(test_obj_dict) if not host1: try: host_ops.add_kvm_host(host_config) except Exception as e: test_util.test_warn('Fail to recover all [host:] %s resource. It will impact later test case.' % host1_name) raise e
def error_cleanup(): global ir_option test_lib.lib_error_cleanup(test_obj_dict) ir = res_ops.get_resource(res_ops.IP_RANGE, name = ir1_name) if not ir: try: net_ops.add_ip_range(ir_option) except Exception as e: test_util.test_warn('Fail to recover [ip range:] %s resource. It will impact later test case.' % ir1_name) raise e
def error_cleanup(): global curr_deploy_conf test_lib.lib_error_cleanup(test_obj_dict) l3_2 = res_ops.get_resource(res_ops.L3_NETWORK, name = l3_name2)[0] if not l3_2: try: net_ops.add_l3_resource(curr_deploy_conf, l3_name = l3_2.name) except Exception as e: test_util.test_warn('Fail to recover [l3:] %s resource. It will impact later test case.' % l3_name2) raise e
def test(): global vm global schds vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) start_date = int(time.time()) test_util.test_logger('Setup start VM scheduler') for ops_id in range(1000): thread = threading.Thread(target=create_start_vm_scheduler, args=(vm.get_vm().uuid, start_date, ops_id, )) while threading.active_count() > 10: time.sleep(0.5) exc = sys.exc_info() thread.start() while threading.activeCount() > 1: exc = sys.exc_info() time.sleep(0.1) test_stub.sleep_util(start_date+200) start_msg_mismatch = 0 for i in range(0, 100): if not test_lib.lib_find_in_local_management_server_log(start_date+100+i, '[msg send]: org.zstack.header.vm.StartVmInstanceMsg {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid): start_msg_mismatch += 1 test_util.test_warn('StartVmInstanceMsg is expected to execute at %s' % (start_date+100+i)) if start_msg_mismatch > 5: test_util.test_fail('%s of 58 StartVmInstanceMsg not executed at expected timestamp' % (start_msg_mismatch)) for schd_job in schd_jobs: thread = threading.Thread(target=delete_scheduler_job, args=(schd_job.uuid, )) while threading.active_count() > 10: time.sleep(0.5) exc = sys.exc_info() thread.start() while threading.activeCount() > 1: exc = sys.exc_info() time.sleep(0.1) for schd_trigger in schd_triggers: thread = threading.Thread(target=delete_scheduler_trigger, args=(schd_trigger.uuid, )) while threading.active_count() > 10: time.sleep(0.5) exc = sys.exc_info() thread.start() while threading.activeCount() > 1: exc = sys.exc_info() time.sleep(0.1) try: vm.destroy() except: test_util.test_logger('expected exception when destroy VM since too many queued task') test_util.test_pass('Create 1000 Simple VM Start Scheduler Success')
def error_cleanup(): global host_config test_lib.lib_error_cleanup(test_obj_dict) host1_name = os.environ.get('hostName') host1 = res_ops.get_resource(res_ops.HOST, name = host1_name)[0] if not host1: try: host_ops.add_kvm_host(host_config) except Exception as e: test_util.test_warn('Fail to recover all [host:] %s resource. It will impact later test case.' % host1_name) raise e
def error_cleanup(): global curr_deploy_conf zone1 = res_ops.get_resource(res_ops.ZONE, name = zone1_name) if not zone1: try: zone_ops.add_zone_resource(curr_deploy_conf, zone1_name) except Exception as e: test_util.test_warn('Fail to recover all [zone:] %s resource. It will impact later test case.' % zone1_name) raise e test_lib.lib_error_cleanup(test_obj_dict)
def error_cleanup(): global curr_deploy_conf cluster1 = res_ops.get_resource(res_ops.CLUSTER, name = cluster1_name) if not cluster1: try: cluster_ops.add_cluster_resource(curr_deploy_conf, cluster1_name) except Exception as e: test_util.test_warn('Fail to recover all [cluster:] %s resource. It will impact later test case.' % cluster1_name) raise e test_lib.lib_error_cleanup(test_obj_dict)
def check(self): super(zstack_volume_attach_db_checker, self).check() volume = self.test_obj.volume if not volume.vmInstanceUuid: test_util.test_logger( 'Check result: test [volume:] %s does NOT record any vmInstanceUuid. It is not attached to any vm yet.' % volume.uuid) return self.judge(False) try: conditions = res_ops.gen_query_conditions( 'uuid', '=', self.test_obj.volume.uuid) db_volume = res_ops.query_resource(res_ops.VOLUME, conditions)[0] except Exception as e: traceback.print_exc(file=sys.stdout) test_util.test_logger( 'Check result: [volumeInventory uuid:] %s does not exist in database.' % self.test_obj.volume.uuid) return self.judge(False) if not db_volume.vmInstanceUuid: #update self.test_obj, due to vm destroyed. if self.test_obj.target_vm.state == vm_header.DESTROYED: test_util.test_warn( 'Update test [volume:] %s state, since attached VM was destroyed.' % volume.uuid) self.test_obj.update() else: test_util.test_warn( 'Check warn: [volume:] %s state is not aligned with DB. DB did not record any attached VM, but test volume has attached vm record: %s.' % (volume.uuid, volume.vmInstanceUuid)) test_util.test_logger( 'Check result: [volume:] %s does NOT have vmInstanceUuid in Database. It is not attached to any vm.' % volume.uuid) return self.judge(False) if not self.test_obj.target_vm: test_util.test_logger( 'Check result: test [volume:] %s does NOT have vmInstance record in test structure. Can not do furture checking.' % volume.uuid) return self.judge(False) vm = self.test_obj.target_vm.vm if db_volume.vmInstanceUuid == vm.uuid: test_util.test_logger( 'Check result: [volume:] %s is attached to [vm:] %s in zstack database.' % (volume.uuid, vm.uuid)) return self.judge(True) else: test_util.test_logger( 'Check result: [volume:] %s is NOT attached to [vm:] %s in zstack database.' % (volume.uuid, vm.uuid)) return self.judge(False)
def error_cleanup(): global curr_deploy_conf global l3_name global l3 test_lib.lib_error_cleanup(test_obj_dict) l3 = res_ops.get_resource(res_ops.L3_NETWORK, name = l3_name) if not l3s: try: net_ops.add_l3_resource(curr_deploy_conf, l3_name) except Exception as e: test_util.test_warn('Fail to recover [l3:] %s resource. It will impact later test case.' % l3_name) raise e
def error_cleanup(): global ir_option test_lib.lib_error_cleanup(test_obj_dict) ir = res_ops.get_resource(res_ops.IP_RANGE, name=ir1_name) if not ir: try: net_ops.add_ip_range(ir_option) except Exception as e: test_util.test_warn( 'Fail to recover [ip range:] %s resource. It will impact later test case.' % ir1_name) raise e
def error_cleanup(): global host_config test_lib.lib_error_cleanup(test_obj_dict) host1 = res_ops.get_resource(res_ops.HOST, name=host1_name) if not host1: try: host_ops.add_kvm_host(host_config) except Exception as e: test_util.test_warn( 'Fail to recover all [host:] %s resource. It will impact later test case.' % host1_name) raise e
def test(): global vm global schds vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) start_date = int(time.time()) test_util.test_logger('Setup start VM scheduler') for ops_id in range(1000): thread = threading.Thread(target=create_start_vm_scheduler, args=(vm.get_vm().uuid, start_date, ops_id, )) while threading.active_count() > 10: time.sleep(0.5) exc = sys.exc_info() thread.start() while threading.activeCount() > 1: exc = sys.exc_info() time.sleep(0.1) test_stub.sleep_util(start_date+200) start_msg_mismatch = 0 for i in range(0, 99): if test_lib.lib_find_in_local_management_server_log(start_date+i, '[msg send]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid): start_msg_mismatch += 1 test_util.test_warn('StartVmInstanceMsg is not expected to execute at %s' % (start_date+i)) if start_msg_mismatch > 5: test_util.test_fail('%s of 58 StartVmInstanceMsg not executed at expected timestamp' % (start_msg_mismatch)) count0 = test_lib.lib_count_in_local_management_server_log(start_date+100, '[msg send]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid) test_util.test_logger('%s of 1000 scheduler executed at the same second' % count0) count1 = test_lib.lib_count_in_local_management_server_log(start_date+100+1, '[msg send]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid) test_util.test_logger('%s of 1000 scheduler executed at the next second' % count1) count2 = test_lib.lib_count_in_local_management_server_log(start_date+100+2, '[msg send]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid) test_util.test_logger('%s of 1000 scheduler executed at the third second' % count2) if count0 + count1 + count2 < 900: test_util.test_fail('only %s of 1000 scheduler executed at the specified first 3 seconds' % (count0 + count1 + count2)) for schd in schds: thread = threading.Thread(target=delete_start_vm_scheduler, args=(schd.uuid, )) while threading.active_count() > 10: time.sleep(0.5) exc = sys.exc_info() thread.start() while threading.activeCount() > 1: exc = sys.exc_info() time.sleep(0.1) try: vm.destroy() except: test_util.test_logger('expected exception when destroy VM since too many queued task') test_util.test_pass('Create Simple VM Stop Start Scheduler Success')
def check(self): super(zstack_kvm_sg_tcp_ingress_checker, self).check() all_ports = port_header.all_ports test_result = True test_util.test_dsc('Check TCP ingress rules') nic = test_lib.lib_get_nic_by_uuid(self.nic_uuid) l3_uuid = nic.l3NetworkUuid if not 'DHCP' in test_lib.lib_get_l3_service_type(l3_uuid): test_util.test_logger("Skip SG test for [l3:] %s. Since it doesn't provide DHCP service, there isn't stable IP address for testint." % l3_uuid) return self.judge(self.exp_result) stub_vm = self.test_obj.get_stub_vm(l3_uuid) if not stub_vm: test_util.test_warn('Did not find test stub vm for [nic:] %s. Skip TCP ingress port checking for this nic.' % self.nic_uuid) return self.judge(self.exp_result) stub_vm = stub_vm.vm stub_vm_ip = test_lib.lib_get_vm_nic_by_l3(stub_vm, l3_uuid).ip target_addr = '%s/32' % stub_vm_ip rules = self.test_obj.get_nic_tcp_ingress_rule_by_addr(self.nic_uuid, target_addr) allowed_ports = [] for rule in rules: rule_allowed_ports = port_header.get_ports(port_header.get_port_rule(rule.startPort)) test_util.test_logger('[SG:] %s [ingress rule]: %s allow to access [nic:] %s [ports]: %s from [vm:] %s' % (rule.securityGroupUuid, rule.uuid, self.nic_uuid, rule_allowed_ports, stub_vm.uuid)) for port in rule_allowed_ports: if not port in allowed_ports: allowed_ports.append(port) if not allowed_ports: #If no allowed port, it means all denied. denied_ports = list(all_ports) else: denied_ports = list_ops.list_minus(all_ports, allowed_ports) test_vm = test_lib.lib_get_vm_by_nic(nic.uuid) if test_vm.state == inventory.RUNNING: try: test_lib.lib_open_vm_listen_ports(test_vm, all_ports, l3_uuid) test_lib.lib_check_vm_ports_in_a_command(stub_vm, test_vm, allowed_ports, denied_ports) except: traceback.print_exc(file=sys.stdout) test_util.test_logger('Check result: [Security Group] meets failure when checking TCP ingress rule for [vm:] %s [nic:] %s. ' % (test_vm.uuid, self.nic_uuid)) test_result = False else: test_util.test_warn('Test [vm:] %s is not running. Skip SG TCP ingress connection checker for this vm.' % test_vm.uuid) test_util.test_logger('Check result: [Security Group] finishes TCP ingress testing for [nic:] %s' % self.nic_uuid) print_iptables(test_vm) return self.judge(test_result)
def check(self): super(zstack_vcenter_sg_tcp_ingress_checker, self).check() all_ports = port_header.all_ports test_result = True test_util.test_dsc('Check TCP ingress rules') nic = test_lib.lib_get_nic_by_uuid(self.nic_uuid) l3_uuid = nic.l3NetworkUuid if not 'DHCP' in test_lib.lib_get_l3_service_type(l3_uuid): test_util.test_logger("Skip SG test for [l3:] %s. Since it doesn't provide DHCP service, there isn't stable IP address for testint." % l3_uuid) return self.judge(self.exp_result) stub_vm = self.test_obj.get_stub_vm(l3_uuid) if not stub_vm: test_util.test_warn('Did not find test stub vm for [nic:] %s. Skip TCP ingress port checking for this nic.' % self.nic_uuid) return self.judge(self.exp_result) stub_vm = stub_vm.vm stub_vm_ip = test_lib.lib_get_vm_nic_by_l3(stub_vm, l3_uuid).ip target_addr = '%s/32' % stub_vm_ip rules = self.test_obj.get_nic_tcp_ingress_rule_by_addr(self.nic_uuid, target_addr) allowed_ports = [] for rule in rules: rule_allowed_ports = port_header.get_ports(port_header.get_port_rule(rule.startPort)) test_util.test_logger('[SG:] %s [ingress rule]: %s allow to access [nic:] %s [ports]: %s from [vm:] %s' % (rule.securityGroupUuid, rule.uuid, self.nic_uuid, rule_allowed_ports, stub_vm.uuid)) for port in rule_allowed_ports: if not port in allowed_ports: allowed_ports.append(port) if not allowed_ports: #If no allowed port, it means all denied. denied_ports = list(all_ports) else: denied_ports = list_ops.list_minus(all_ports, allowed_ports) test_vm = test_lib.lib_get_vm_by_nic(nic.uuid) if test_vm.state == inventory.RUNNING: try: test_lib.lib_open_vm_listen_ports(test_vm, all_ports, l3_uuid) test_lib.lib_check_vm_ports_in_a_command(stub_vm, test_vm, allowed_ports, denied_ports) except: traceback.print_exc(file=sys.stdout) test_util.test_logger('Check result: [Security Group] meets failure when checking TCP ingress rule for [vm:] %s [nic:] %s. ' % (test_vm.uuid, self.nic_uuid)) test_result = False else: test_util.test_warn('Test [vm:] %s is not running. Skip SG TCP ingress connection checker for this vm.' % test_vm.uuid) test_util.test_logger('Check result: [Security Group] finishes TCP ingress testing for [nic:] %s' % self.nic_uuid) print_iptables(test_vm) return self.judge(test_result)
def recalc_allowed_denied_ports(self, allowed_ports, denied_ports, \ pf_allowed_ports, pf_denied_ports): new_allowed_ports = list(allowed_ports) new_denied_ports = list(denied_ports) for allowed_port in pf_allowed_ports: if allowed_port in allowed_ports: #this should not happen test_util.test_warn('Same VIP ports are assigned more than once. ') else: new_allowed_ports.append(allowed_port) new_denied_ports.remove(allowed_port) return new_allowed_ports, new_denied_ports
def error_cleanup(): global curr_deploy_conf cluster1 = res_ops.get_resource(res_ops.CLUSTER, name=cluster1_name) if not cluster1: try: cluster_ops.add_cluster_resource(curr_deploy_conf, cluster1_name) except Exception as e: test_util.test_warn( 'Fail to recover all [cluster:] %s resource. It will impact later test case.' % cluster1_name) raise e test_lib.lib_error_cleanup(test_obj_dict)
def error_cleanup(): global curr_deploy_conf zone1 = res_ops.get_resource(res_ops.ZONE, name=zone1_name) if not zone1: try: zone_ops.add_zone_resource(curr_deploy_conf, zone1_name) except Exception as e: test_util.test_warn( 'Fail to recover all [zone:] %s resource. It will impact later test case.' % zone1_name) raise e test_lib.lib_error_cleanup(test_obj_dict)
def check(self): super(zstack_kvm_vm_snat_checker, self).check() vm = self.test_obj.vm test_lib.lib_install_testagent_to_vr(vm) host = test_lib.lib_get_vm_host(vm) vm_cmd_result = None vr_vms = test_lib.lib_find_vr_by_vm(vm) test_lib.lib_set_vm_host_l2_ip(vm) for vr_vm in vr_vms: test_util.test_logger("Begin to check [vm:] %s SNAT" % vm.uuid) nic = test_lib.lib_get_vm_nic_by_vr(vm, vr_vm) if not 'SNAT' in test_lib.lib_get_l3_service_type( nic.l3NetworkUuid): test_util.test_logger( "Skip [VR:] %s, since it doesn't provide SNAT service" % vr_vm.uuid) continue ping_target = test_lib.test_config.pingTestTarget.text_ #Check if there is a SG rule to block ICMP checking if test_lib.lib_is_sg_rule_exist(nic.uuid, None, None, inventory.EGRESS): if not test_lib.lib_is_sg_rule_exist(nic.uuid, inventory.ICMP, ping_target, inventory.EGRESS): test_util.test_warn( 'Skip SNAT checker: because there is ICMP Egress Rule was assigned to [nic:] %s and the allowed target ip is not %s' % (nic.uuid, ping_target)) return self.judge(self.exp_result) guest_ip = nic.ip vm_command = 'ping -c 5 -W 5 %s >/tmp/ping_result 2>&1; ret=$?; cat /tmp/ping_result; exit $ret' % ping_target vm_cmd_result = test_lib.lib_ssh_vm_cmd_by_agent_with_retry( host.managementIp, nic.ip, test_lib.lib_get_vm_username(vm), test_lib.lib_get_vm_password(vm), vm_command, self.exp_result) if not vm_cmd_result: test_util.test_logger( 'Checker result: FAIL to ping [target:] %s from [vm:] %s .' % (ping_target, vm.uuid)) if self.exp_result == True: test_util.test_logger( "network connection result is not expected pass, will print VR's network configuration:" ) test_lib.lib_print_vr_network_conf(vr_vm) return self.judge(False) else: test_util.test_logger( 'Checker result: SUCCEED to ping [target:] %s from [vm:] %s .' % (ping_target, vm.uuid)) return self.judge(True)
def check_operation_result(self): time.sleep(30) start_msg_mismatch = 1 for k in range(0, 100): for i in range(0, self.i): vm_stat_flag=0 if not test_lib.lib_find_in_local_management_server_log(self.date+k, '[msg send]: {"org.zstack.header.vm.RebootVmInstanceMsg', self.vms[i].uuid): test_util.test_warn('RebootVmInstanceMsg is expected to execute at %s' % (self.date+k)) vm_stat_flag=1 start_msg_mismatch += 1 if vm_stat_flag == 0: break if start_msg_mismatch > 1000: test_util.test_fail('%s of 1000 RebootVmInstanceMsg not executed at expected timestamp' % (start_msg_mismatch))
def error_cleanup(): global host_config global ps_inv test_lib.lib_error_cleanup(test_obj_dict) host1 = res_ops.get_resource(res_ops.HOST, name = host1_name) if not host1: try: test_stub.recover_ps(ps_inv) except Exception as e: test_util.test_warn('Fail to recover all primary storage %s resource. It might impact later test case.' % ps_inv.name) try: host_ops.add_kvm_host(host_config) except Exception as e: test_util.test_warn('Fail to recover all [host:] %s resource. It will impact later test case.' % host1_name) raise e
def check_operation_result(self): time.sleep(30) start_msg_mismatch = 1 for k in range(0, 1000): for i in range(0, self.i): vm_stat_flag=0 vm=test_lib.lib_get_vm_by_uuid(self.vms[i].uuid) root_volume_uuid=test_lib.lib_get_root_volume_uuid(vm) if not test_lib.lib_find_in_local_management_server_log(self.date+k, '[msg send]: {"org.zstack.header.volume.CreateVolumeSnapshotMsg', self.vms[i].uuid): test_util.test_warn('CreateVolumeSnapshotMsg is expected to execute at %s' % (self.date+k)) vm_stat_flag=1 start_msg_mismatch += 1 if vm_stat_flag == 0: break if start_msg_mismatch > 1000: test_util.test_fail('%s of 1000 CreateVolumeSnapshotMsg not executed at expected timestamp' % (start_msg_mismatch))
def add_data_volume_template(image_option): action = api_actions.AddImageAction() action.name = image_option.get_name() action.url = image_option.get_url() action.mediaType = 'DataVolumnTemplate' if image_option.get_mediaType() and \ action.mediaType != image_option.get_mediaType(): test_util.test_warn('image type %s was not %s' % \ (image_option.get_mediaType(), action.mediaType)) action.format = image_option.get_format() action.backupStorageUuids = image_option.get_backup_storage_list() test_util.action_logger('Add [Volume:] %s from [url:] %s ' % (action.name, action.url)) evt = account_operations.execute_action_with_session(action, image_option.get_session_uuid()) test_util.test_logger('[volume:] %s is added.' % evt.inventory.uuid) return evt.inventory
def check(self): super(zstack_kvm_vm_network_checker, self).check() vm = self.test_obj.vm host = test_lib.lib_get_vm_host(vm) test_lib.lib_install_testagent_to_host(host) test_lib.lib_set_vm_host_l2_ip(vm) vr_vms = test_lib.lib_find_vr_by_vm(vm) if not vr_vms: test_util.test_warn( 'No Virtual Router was found for VM: %s. Skip testing.' % vm.uuid) return self.judge(self.exp_result) for vr_vm in vr_vms: nic = test_lib.lib_get_vm_nic_by_vr(vm, vr_vm) if not 'DHCP' in test_lib.lib_get_l3_service_type( nic.l3NetworkUuid): test_util.test_logger( "Skip [VR:] %s, since it doesn't provide DHCP service" % vr_vm.uuid) continue guest_ip = nic.ip command = 'ping -c 5 -W 5 %s >/tmp/ping_result 2>&1; ret=$?; cat /tmp/ping_result; exit $ret' % guest_ip if not test_lib.lib_execute_sh_cmd_by_agent_with_retry( host.managementIp, command, self.exp_result): test_util.test_logger( 'Checker result: FAIL to ping [target:] %s [ip:] %s from [host:] %s' % (vm.uuid, guest_ip, host.uuid)) if self.exp_result == True: test_util.test_logger( "network connection result is not expected pass, will print VR's network configuration:" ) test_lib.lib_print_vr_network_conf(vr_vm) return self.judge(False) else: test_util.test_logger( 'Checker result: SUCCESSFULLY ping [target:] %s [ip:] %s from [host:] %s' % (vm.uuid, guest_ip, host.uuid)) test_util.test_logger( "Checker result: ping target [vm:] %s from [host:] %s SUCCESS" % (vm.uuid, host.uuid)) return self.judge(True)
def add_data_volume_template(image_option): action = api_actions.AddImageAction() action.name = image_option.get_name() action.url = image_option.get_url() action.mediaType = 'DataVolumnTemplate' if image_option.get_mediaType() and \ action.mediaType != image_option.get_mediaType(): test_util.test_warn('image type %s was not %s' % \ (image_option.get_mediaType(), action.mediaType)) action.format = image_option.get_format() action.backupStorageUuids = image_option.get_backup_stroage_list() test_util.action_logger('Add [Volume:] %s from [url:] %s ' % (action.name, action.url)) evt = account_operations.execute_action_with_session(action, image_option.get_session_uuid()) test_util.test_logger('[volume:] %s is added.' % evt.inventory.uuid) return evt.inventory
def test(): global vm global schds vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) start_date = int(time.time()) test_util.test_logger('Setup start VM scheduler') for ops_id in range(1000): thread = threading.Thread(target=create_start_vm_scheduler, args=(vm.get_vm().uuid, start_date, ops_id, )) while threading.active_count() > 10: time.sleep(0.5) exc = sys.exc_info() thread.start() while threading.activeCount() > 1: exc = sys.exc_info() time.sleep(0.1) test_stub.sleep_util(start_date+200) start_msg_mismatch = 0 for i in range(0, 100): if not test_lib.lib_find_in_local_management_server_log(start_date+100+i, '[msg send]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid): start_msg_mismatch += 1 test_util.test_warn('StartVmInstanceMsg is expected to execute at %s' % (start_date+100+i)) if start_msg_mismatch > 5: test_util.test_fail('%s of 58 StartVmInstanceMsg not executed at expected timestamp' % (start_msg_mismatch)) for schd in schds: thread = threading.Thread(target=delete_start_vm_scheduler, args=(schd.uuid, )) while threading.active_count() > 10: time.sleep(0.5) exc = sys.exc_info() thread.start() while threading.activeCount() > 1: exc = sys.exc_info() time.sleep(0.1) try: vm.destroy() except: test_util.test_logger('expected exception when destroy VM since too many queued task') test_util.test_pass('Create 1000 Simple VM Start Scheduler Success')
def check(self): super(zstack_kvm_sg_icmp_ingress_checker, self).check() test_result = True nic = test_lib.lib_get_nic_by_uuid(self.nic_uuid) l3_uuid = nic.l3NetworkUuid test_util.test_dsc('Check ICMP ingress rules') if not 'DHCP' in test_lib.lib_get_l3_service_type(l3_uuid): test_util.test_logger("Skip SG test for [l3:] %s. Since it doesn't provide DHCP service, there isn't stable IP address for testint." % l3_uuid) return self.judge(self.exp_result) stub_vm = self.test_obj.get_stub_vm(l3_uuid) if not stub_vm: #test_util.test_warn('Did not find test stub vm for [target address:] %s. Skip testing some TCP rules' % target_addr) test_util.test_warn('Did not find test stub vm for [l3:] %s. Skip testing some TCP rules' % l3_uuid) return self.judge(self.exp_result) stub_vm = stub_vm.vm stub_vm_ip = test_lib.lib_get_vm_nic_by_l3(stub_vm, l3_uuid).ip target_addr = '%s/32' % stub_vm_ip test_vm = test_lib.lib_get_vm_by_nic(nic.uuid) if test_vm.state == inventory.RUNNING: rules = self.test_obj.get_nic_icmp_ingress_rule_by_addr(self.nic_uuid, target_addr) target_ip = test_lib.lib_get_vm_ip_by_l3(test_vm, l3_uuid) if rules: if test_lib.lib_check_ping(stub_vm, target_ip, no_exception=True): test_util.test_logger('Check result: [Security Group] pass ICMP ingress rule checking to ping [vm:] %s from [vm:] %s' % (test_vm.uuid, stub_vm.uuid)) else: test_util.test_logger('Check result: [Security Group] meets failure to ping [vm:] %s from [vm:] %s when checking ICMP ingress rule. ' % (test_vm.uuid, stub_vm.uuid)) test_result = False else: if not test_lib.lib_check_ping(stub_vm, target_ip, no_exception=True): test_util.test_logger('Check result: [Security Group] pass ICMP ingress rule checking to ping [vm:] %s from [vm:] %s. Expected failure.' % (test_vm.uuid, stub_vm.uuid)) else: test_util.test_logger('Check result: [Security Group] meet failure when checking ICMP ingress rule to ping [vm:] %s from [vm:] %s. Unexpected ping successfully.' % (test_vm.uuid, stub_vm.uuid)) else: test_util.test_warn('Test [vm:] %s is not running. Skip SG ICMP ingress checker for this vm.' % test_vm.uuid) test_util.test_logger('Check result: [Security Group] pass ICMP ingress testing for [vm:] %s [nic:] %s' % (test_vm.uuid, self.nic_uuid)) print_iptables(test_vm) return self.judge(test_result)
def error_cleanup(): global host_config global ps_inv test_lib.lib_error_cleanup(test_obj_dict) host1 = res_ops.get_resource(res_ops.HOST, name=host1_name) if not host1: try: test_stub.recover_ps(ps_inv) except Exception as e: test_util.test_warn( 'Fail to recover all primary storage %s resource. It might impact later test case.' % ps_inv.name) try: host_ops.add_kvm_host(host_config) except Exception as e: test_util.test_warn( 'Fail to recover all [host:] %s resource. It will impact later test case.' % host1_name) raise e
def check(self): super(zstack_vcenter_sg_icmp_ingress_checker, self).check() test_result = True nic = test_lib.lib_get_nic_by_uuid(self.nic_uuid) l3_uuid = nic.l3NetworkUuid test_util.test_dsc('Check ICMP ingress rules') if not 'DHCP' in test_lib.lib_get_l3_service_type(l3_uuid): test_util.test_logger("Skip SG test for [l3:] %s. Since it doesn't provide DHCP service, there isn't stable IP address for testint." % l3_uuid) return self.judge(self.exp_result) stub_vm = self.test_obj.get_stub_vm(l3_uuid) if not stub_vm: test_util.test_warn('Did not find test stub vm for [target address:] %s. Skip testing some TCP rules' % target_addr) return self.judge(self.exp_result) stub_vm = stub_vm.vm stub_vm_ip = test_lib.lib_get_vm_nic_by_l3(stub_vm, l3_uuid).ip target_addr = '%s/32' % stub_vm_ip test_vm = test_lib.lib_get_vm_by_nic(nic.uuid) if test_vm.state == inventory.RUNNING: rules = self.test_obj.get_nic_icmp_ingress_rule_by_addr(self.nic_uuid, target_addr) target_ip = test_lib.lib_get_vm_ip_by_l3(test_vm, l3_uuid) if rules: if test_lib.lib_check_ping(stub_vm, target_ip, no_exception=True): test_util.test_logger('Check result: [Security Group] pass ICMP ingress rule checking to ping [vm:] %s from [vm:] %s' % (test_vm.uuid, stub_vm.uuid)) else: test_util.test_logger('Check result: [Security Group] meets failure to ping [vm:] %s from [vm:] %s when checking ICMP ingress rule. ' % (test_vm.uuid, stub_vm.uuid)) test_result = False else: if not test_lib.lib_check_ping(stub_vm, target_ip, no_exception=True): test_util.test_logger('Check result: [Security Group] pass ICMP ingress rule checking to ping [vm:] %s from [vm:] %s. Expected failure.' % (test_vm.uuid, stub_vm.uuid)) else: test_util.test_logger('Check result: [Security Group] meet failure when checking ICMP ingress rule to ping [vm:] %s from [vm:] %s. Unexpected ping successfully.' % (test_vm.uuid, stub_vm.uuid)) else: test_util.test_warn('Test [vm:] %s is not running. Skip SG ICMP ingress checker for this vm.' % test_vm.uuid) test_util.test_logger('Check result: [Security Group] pass ICMP ingress testing for [vm:] %s [nic:] %s' % (test_vm.uuid, self.nic_uuid)) print_iptables(test_vm) return self.judge(test_result)
def _check_sg_exist(self, test_sg): try: conditions = res_ops.gen_query_conditions('uuid', '=', test_sg.security_group.uuid) sg = res_ops.query_resource(res_ops.SECURITY_GROUP, conditions)[0] except Exception as e: test_util.test_logger('Check result: [SG uuid:] %s does not exist in database.' % test_sg.security_group.uuid) if test_sg.state == sg_header.DELETED: return self.judge(True) else: traceback.print_exc(file=sys.stdout) return self.judge(False) test_util.test_logger('Check result: [SG uuid:] %s is found in database.' % test_sg.security_group.uuid) if test_sg.state == sg_header.DELETED: test_util.test_warn('[SG uuid:] %s should not be found in database, since it is deleted.' % test_sg.security_group.uuid) return self.judge(False) rules = sg.rules test_rules = test_sg.get_all_rules() rule_id_list = [] for rule in rules: rule_id_list.append(rule.uuid) if len(rules) != len(test_rules): test_util.test_warn('[SG uuid:] %s rules number: %s is not aligned with the record: %s in DB.' % (test_sg.security_group.uuid, len(test_rules), len(rules))) return self.judge(False) for rule in test_rules: if not rule.uuid in rule_id_list: test_util.test_warn('[SG uuid:] %s rule: %s is not found in DB.' % (test_sg.security_group.uuid, rule.uuid)) return self.judge(False) test_util.test_logger('Check result: [SG uuid:] %s rules are all found in database.' % test_sg.security_group.uuid) return self.judge(True)
def check_eip_icmp(self, expected_result): vip_ip = self.test_obj.get_vip().ip eip = self.test_obj.get_eip().get_eip() try: if not self.allowed_vr: test_util.test_warn("Not find suitable VR vm to do test testing. Please make sure there are at least 3 VR VMs are exist for EIP testing.") test_lib.lib_check_ping(self.allowed_vr, vip_ip) except: if expected_result: test_util.test_logger("Unexpected Result: catch failure when checking EIP: %s ICMP for target ip: %s from [vm:] %s. " % (eip.uuid, vip_ip, self.allowed_vr.uuid)) return False else: if self.allowed_vr: test_util.test_logger("Expected Result: catch failure when checking EIP: %s ICMP for target ip: %s from [vm:] %s. " % (eip.uuid, vip_ip, self.allowed_vr.uuid)) else: test_util.test_logger("can not do test, due to missing allowed vr. " % (eip.uuid, vip_ip, self.allowed_vr.uuid)) if expected_result: test_util.test_logger("Expected Result: Ping successfully checking EIP: %s ICMP for target ip: %s from [vm:] %s" % (eip.uuid, vip_ip, self.allowed_vr.uuid)) else: test_util.test_logger("Unexpected Result: Ping successfully checking EIP: %s ICMP for target ip: %s from [vm:] %s" % (eip.uuid, vip_ip, self.allowed_vr.uuid)) return True
def test(): global vm global schd vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) start_date = int(time.time()) test_util.test_logger('Setup stop and start VM scheduler') schd = vm_ops.start_vm_scheduler(vm.get_vm().uuid, 'simple', 'simple_start_vm_scheduler', 0, 1) actual_startDate = time.mktime( time.strptime(schd.startTime, '%b %d, %Y %I:%M:%S %p')) if actual_startDate != start_date and actual_startDate != start_date + 1.0: test_util.test_fail( 'startDate is expectd to set to now, which should be around %s' % (start_date)) test_stub.sleep_util(start_date + 58) start_msg_mismatch = 0 for i in range(1, 58): if not test_lib.lib_find_in_local_management_server_log( start_date + i, '[msg send]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid): start_msg_mismatch += 1 test_util.test_warn( 'StopVmInstanceMsg is expected to execute at %s' % (start_date + i)) if start_msg_mismatch > 5: test_util.test_fail( '%s of 58 StartVmInstanceMsg not executed at expected timestamp' % (start_msg_mismatch)) schd_ops.delete_scheduler(schd.uuid) try: vm.destroy() except: test_util.test_logger( 'expected exception when destroy VM since too many queued task') test_util.test_pass('Create Simple VM Start Scheduler Success')
def check(self): super(zstack_kvm_sg_db_exist_checker, self).check() sg_list = self.test_obj.get_sg_list_by_nic(self.nic_uuid) if not sg_list: conditions = res_ops.gen_query_conditions('vmNicUuid', '=', self.nic_uuid) nic_sg = res_ops.query_resource(res_ops.VM_SECURITY_GROUP, conditions) if not nic_sg: test_util.test_logger( 'Check result: No [Security Group] is found in database for [nic:] %s.' % self.nic_uuid) return self.judge(False) else: test_util.test_warn( 'Check result: [Security Group] is found in database for [nic:] %s. It is not consistent with test_sg record.' % self.nic_uuid) return self.judge(True) for test_sg in sg_list: try: conditions = res_ops.gen_query_conditions( 'uuid', '=', test_sg.security_group.uuid) sg = res_ops.query_resource(res_ops.SECURITY_GROUP, conditions)[0] except Exception as e: traceback.print_exc(file=sys.stdout) test_util.test_logger( 'Check result: [Security Group Inventory uuid:] %s does not exist in database.' % test_sg.security_group.uuid) return self.judge(False) test_util.test_logger( 'Check result: [SecurityGroup Inventory uuid:] %s exist in database.' % sg.uuid) return self.judge(True)
def add_root_volume_template(image_creation_option): ''' Add root volume template ''' action = api_actions.AddImageAction() action.name = image_creation_option.get_name() action.guest_os_type = image_creation_option.get_guest_os_type() action.mediaType = 'RootVolumeTemplate' if image_creation_option.get_mediaType() and \ action.mediaType != image_creation_option.get_mediaType(): test_util.test_warn('image type %s was not %s' % \ (image_creation_option.get_mediaType(), action.mediaType)) action.backupStorageUuids = \ image_creation_option.get_backup_storage_uuid_list() action.bits = image_creation_option.get_bits() action.description = image_creation_option.get_description() action.format = image_creation_option.get_format() action.url = image_creation_option.get_url() action.timeout = image_creation_option.get_timeout() test_util.action_logger('Add Root Volume Template from url: %s in [backup Storage:] %s' % (action.url, action.backupStorageUuids)) evt = account_operations.execute_action_with_session(action, \ image_creation_option.get_session_uuid()) return evt.inventory
def _check_sg_destroyed(self): for sg in self.sg_nic_dict.keys(): if sg.state == sg_header.DELETED: self._remove_sg(sg) test_util.test_warn("Catch undeleted SG. It might be because SG deletion is not called by delete_sg() API.")
def test(): global vm global schd1 global schd2 vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_util.test_logger('Update stop and start VM scheduler to cron type') start_date = int(time.time()) test_stub.sleep_util((start_date+59)/60*60) schd1 = vm_ops.stop_vm_scheduler(vm.get_vm().uuid, 'cron', 'cron_stop_vm_scheduler', None, None, None, '0 * * * * ?') schd2 = vm_ops.start_vm_scheduler(vm.get_vm().uuid, 'cron', 'cron_start_vm_scheduler', None, None, None, '0 * * * * ?') change_date = int(time.time()) test_stub.sleep_util(start_date+30) for i in range(2, 30): if test_lib.lib_find_in_local_management_server_log(change_date+i, '[msg received]: {"org.zstack.header.vm.StopVmInstanceMsg', vm.get_vm().uuid): test_util.test_fail('StopVmInstanceMsg is not expected to execute at %s' % (change_date+i)) if test_lib.lib_find_in_local_management_server_log(change_date+i, '[msg received]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid): test_util.test_fail('StartVmInstanceMsg is not expected to execute at %s' % (change_date+i)) schd_ops.update_scheduler(schd1.uuid, 'simple', 'simple_stop_vm_scheduler2', start_date, 2, 10) schd_ops.update_scheduler(schd2.uuid, 'simple', 'simple_start_vm_scheduler2', start_date, 1, 20) change_date = int(time.time()) test_stub.sleep_util(change_date+60) stop_msg_mismatch = 0 start_msg_mismatch = 0 for i in range(1, 15): if (change_date+i-start_date) % 2 == 0: if not test_lib.lib_find_in_local_management_server_log(change_date+i, '[msg received]: {"org.zstack.header.vm.StopVmInstanceMsg', vm.get_vm().uuid): stop_msg_mismatch += 1 test_util.test_warn('StopVmInstanceMsg is expected to execute at %s' % (change_date+i)) else: if test_lib.lib_find_in_local_management_server_log(change_date+i, '[msg received]: {"org.zstack.header.vm.StopVmInstanceMsg', vm.get_vm().uuid): stop_msg_mismatch += 1 test_util.test_warn('StopVmInstanceMsg is not expected to execute at %s' % (change_date+i)) if not test_lib.lib_find_in_local_management_server_log(change_date+i, '[msg received]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid): start_msg_mismatch += 1 test_util.test_warn('StartVmInstanceMsg is expected to execute at %s' % (change_date+i)) for i in range(21, 60): if test_lib.lib_find_in_local_management_server_log(change_date+i, '[msg received]: {"org.zstack.header.vm.StopVmInstanceMsg', vm.get_vm().uuid): stop_msg_mismatch += 1 test_util.test_warn('StopVmInstanceMsg is not expected to execute at %s' % (change_date+i)) if test_lib.lib_find_in_local_management_server_log(change_date+i, '[msg received]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid): start_msg_mismatch += 1 test_util.test_warn('StartVmInstanceMsg is not expected to execute at %s' % (change_date+i)) if stop_msg_mismatch > 5: test_util.test_fail('%s of 58 StopVmInstanceMsg not executed at expected timestamp' % (stop_msg_mismatch)) if start_msg_mismatch > 10: test_util.test_fail('%s of 58 StartVmInstanceMsg not executed at expected timestamp' % (start_msg_mismatch)) schd_ops.delete_scheduler(schd1.uuid) schd_ops.delete_scheduler(schd2.uuid) try: vm.destroy() except: test_util.test_logger('expected exception when destroy VM since too many queued task') test_util.test_pass('Create Simple VM Stop Start Scheduler Success')
def test(): global vm global schds vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) start_date = int(time.time()) test_util.test_logger('Setup start VM scheduler') for ops_id in range(1000): thread = threading.Thread(target=create_start_vm_scheduler, args=( vm.get_vm().uuid, start_date, ops_id, )) while threading.active_count() > 10: time.sleep(0.5) exc = sys.exc_info() thread.start() while threading.activeCount() > 1: exc = sys.exc_info() time.sleep(0.1) test_stub.sleep_util(start_date + 400) start_msg_mismatch = 0 for i in range(0, 299): if test_lib.lib_find_in_local_management_server_log( start_date + i, '[msg send]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid): start_msg_mismatch += 1 test_util.test_warn( 'StartVmInstanceMsg is not expected to execute at %s' % (start_date + i)) if start_msg_mismatch > 5: test_util.test_fail( '%s of 58 StartVmInstanceMsg not executed at expected timestamp' % (start_msg_mismatch)) count0 = test_lib.lib_count_in_local_management_server_log( start_date + 300 - 1, '[msg send]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid) test_util.test_logger('%s of 1000 scheduler executed at the same second' % count0) count1 = test_lib.lib_count_in_local_management_server_log( start_date + 300, '[msg send]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid) test_util.test_logger('%s of 1000 scheduler executed at the next second' % count1) count2 = test_lib.lib_count_in_local_management_server_log( start_date + 300 + 1, '[msg send]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid) test_util.test_logger('%s of 1000 scheduler executed at the third second' % count2) count3 = test_lib.lib_count_in_local_management_server_log( start_date + 300 + 2, '[msg send]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid) test_util.test_logger( '%s of 1000 scheduler executed at the fourth second' % count2) if count0 + count1 + count2 < 900: test_util.test_fail( 'only %s of 1000 scheduler executed at the specified first 4 seconds' % (count0 + count1 + count2 + count3)) for schd_job in schd_jobs: thread = threading.Thread(target=delete_scheduler_job, args=(schd_job.uuid, )) while threading.active_count() > 10: time.sleep(0.5) exc = sys.exc_info() thread.start() while threading.activeCount() > 1: exc = sys.exc_info() time.sleep(0.1) for schd_trigger in schd_triggers: thread = threading.Thread(target=delete_scheduler_trigger, args=(schd_trigger.uuid, )) while threading.active_count() > 10: time.sleep(0.5) exc = sys.exc_info() thread.start() while threading.activeCount() > 1: exc = sys.exc_info() time.sleep(0.1) try: vm.destroy() except: test_util.test_logger( 'expected exception when destroy VM since too many queued task') test_util.test_pass('Create Simple VM Stop Start Scheduler Success')
def test(): global vm global schd1 global schd2 vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) start_date = int(time.time()) test_util.test_logger('Setup stop and start VM scheduler') schd1 = vm_ops.stop_vm_scheduler(vm.get_vm().uuid, 'simple', 'simple_stop_vm_scheduler', start_date, 1) schd2 = vm_ops.start_vm_scheduler(vm.get_vm().uuid, 'simple', 'simple_start_vm_scheduler', start_date, 2) test_stub.sleep_util(start_date + 58) stop_msg_mismatch = 0 start_msg_mismatch = 0 for i in range(0, 58): if not test_lib.lib_find_in_local_management_server_log( start_date + i, '[msg received]: {"org.zstack.header.vm.StopVmInstanceMsg', vm.get_vm().uuid): stop_msg_mismatch += 1 test_util.test_warn( 'StopVmInstanceMsg is expected to execute at %s' % (start_date + i)) if i % 2 == 0: if not test_lib.lib_find_in_local_management_server_log( start_date + i, '[msg received]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid): start_msg_mismatch += 1 test_util.test_warn( 'StartVmInstanceMsg is expected to execute at %s' % (start_date + i)) else: if test_lib.lib_find_in_local_management_server_log( start_date + i, '[msg received]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid): start_msg_mismatch += 1 test_util.test_warn( 'StartVmInstanceMsg is not expected to execute at %s' % (start_date + i)) if stop_msg_mismatch > 10: test_util.test_fail( '%s of 58 StopVmInstanceMsg not executed at expected timestamp' % (stop_msg_mismatch)) if start_msg_mismatch > 5: test_util.test_fail( '%s of 58 StartVmInstanceMsg not executed at expected timestamp' % (start_msg_mismatch)) test_util.test_logger('Update stop and start VM scheduler to cron type') start_date = int(time.time()) test_stub.sleep_util((start_date + 59) / 60 * 60) schd_ops.update_scheduler(schd1.uuid, 'cron', 'cron_stop_vm_scheduler2', None, None, None, '0 * * * * ?') schd_ops.update_scheduler(schd2.uuid, 'cron', 'cron_start_vm_scheduler2', None, None, None, '0 * * * * ?') change_date = int(time.time()) test_stub.sleep_util(start_date + 59) for i in range(2, 58): if test_lib.lib_find_in_local_management_server_log( change_date + i, '[msg received]: {"org.zstack.header.vm.StopVmInstanceMsg', vm.get_vm().uuid): test_util.test_fail( 'StopVmInstanceMsg is not expected to execute at %s' % (change_date + i)) if test_lib.lib_find_in_local_management_server_log( change_date + i, '[msg received]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid): test_util.test_fail( 'StartVmInstanceMsg is not expected to execute at %s' % (change_date + i)) schd_ops.update_scheduler(schd1.uuid, 'cron', 'cron_stop_vm_scheduler3', None, None, None, '0/2 * * * * ?') schd_ops.update_scheduler(schd2.uuid, 'cron', 'cron_start_vm_scheduler3', None, None, None, '* * * * * ?') change_date = int(time.time()) test_stub.sleep_util(change_date + 60) stop_msg_mismatch = 0 start_msg_mismatch = 0 for i in range(1, 58): if (change_date + i) % 2 == 0: if not test_lib.lib_find_in_local_management_server_log( change_date + i, '[msg received]: {"org.zstack.header.vm.StopVmInstanceMsg', vm.get_vm().uuid): stop_msg_mismatch += 1 test_util.test_warn( 'StopVmInstanceMsg is expected to execute at %s' % (change_date + i)) else: if test_lib.lib_find_in_local_management_server_log( change_date + i, '[msg received]: {"org.zstack.header.vm.StopVmInstanceMsg', vm.get_vm().uuid): stop_msg_mismatch += 1 test_util.test_warn( 'StopVmInstanceMsg is not expected to execute at %s' % (change_date + i)) if not test_lib.lib_find_in_local_management_server_log( change_date + i, '[msg received]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid): start_msg_mismatch += 1 test_util.test_warn( 'StartVmInstanceMsg is expected to execute at %s' % (change_date + i)) if stop_msg_mismatch > 5: test_util.test_fail( '%s of 58 StopVmInstanceMsg not executed at expected timestamp' % (stop_msg_mismatch)) if start_msg_mismatch > 10: test_util.test_fail( '%s of 58 StartVmInstanceMsg not executed at expected timestamp' % (start_msg_mismatch)) schd_ops.delete_scheduler(schd1.uuid) schd_ops.delete_scheduler(schd2.uuid) try: vm.destroy() except: test_util.test_logger( 'expected exception when destroy VM since too many queued task') test_util.test_pass('Create Simple VM Stop Start Scheduler Success')