def CleanupTestbed(self): msg = self.__prepare_TestBedMsg(self.prev_ts) resp = api.CleanupTestbed(msg) if resp is None: Logger.error("Failed to cleanup testbed: ") return types.status.FAILURE return types.status.SUCCESS
def __parse_setup_topology(self): topospec = getattr(self.__spec.setup, 'topology', None) if not topospec: Logger.error("Error: No topology specified in the testsuite.") assert(0) self.__topology = topology.Topology(topospec, self.ProvisionInfo()) store.SetTopology(self.__topology) return types.status.SUCCESS
def AllocateInstance(self, type, tag=None): for instance in self.__instpool: if instance.Type == type: if tag != None and tag != getattr(instance, "Tag", None): continue self.__instpool.remove(instance) return instance else: Logger.error("No Nodes available in Testbed of type : %s" % type) sys.exit(1)
def __setup_config(self): for s in self.__spec.setup.config: # Reset the running directory before every step Logger.info("Starting Config Step: ", s.step) api.ChangeDirectory(None) status = loader.RunCallback(s.step, 'Main', True, getattr(s, "args", None)) if status != types.status.SUCCESS: Logger.error("ERROR: Failed to run config step", s.step) return status return types.status.SUCCESS
def __invoke_teardown(self): teardown_spec = getattr(self.__spec, 'teardown', []) if teardown_spec is None: return types.status.SUCCESS for s in teardown_spec: Logger.info("Starting Teardown Step: ", s.step) status = loader.RunCallback(s.step, 'Main', True, getattr(s, "args", None)) if status != types.status.SUCCESS: Logger.error("ERROR: Failed to run teardown step", s.step) return status return types.status.SUCCESS
def UpdateNaplesPipelines(self, pipelines=[]): if GlobalOptions.skip_firmware_upgrade: Logger.debug("user requested to skip firmware upgrade so skipping naples pipeline install") return if not pipelines: pipelines = self.GetNaplesPipelines() if not pipelines: Logger.debug("no pipelines found") return nwarmd = "{0}/iota/warmd.json".format(api.GetTopDir()) with open(GlobalOptions.testbed_json, "r") as warmdFile: updated = False alreadyDownloaded = [] warmd = json.load(warmdFile) for pl in pipelines: if not types.nicModes.valid(pl.mode.upper()): raise ValueError("nic mode {0} is not valid. must be one of: {1}".format(pl.mode, types.nicModes.str_enums.values())) if not types.pipelines.valid(pl.pipeline.upper()): raise ValueError("nic pipeline {0} is not valid. must be one of: {1}".format(pl.pipeline, types.pipelines.str_enums.values())) if pl.nicNumber < 1: raise ValueError("nic number must be >= 1. value from testsuite files was {0}".format(pl.nicNumber)) Logger.debug("checking pipeline info for {0}".format(pl)) topoNode = self.__getNodeByName(pl.node) if not topoNode: Logger.warn("failed to find node {0} in topology node list".format(node.name)) continue instId = topoNode.GetNodeInfo()["InstanceID"] for node in warmd['Instances']: if instId == node.get('ID',None): device = topoNode.GetDeviceByNicNumber(pl.nicNumber) device.SetMode(pl.mode) device.SetNaplesPipeline(pl.pipeline) nic = node['Nics'][pl.nicNumber-1] nic['version'] = pl.version nic['pipeline'] = pl.pipeline nic['mode'] = pl.mode updated = True if pl.version not in alreadyDownloaded: api.DownloadAssets(pl.version) alreadyDownloaded.append(pl.version) Logger.info("upgrading node:nic {0}:{1}".format(topoNode.MgmtIpAddress(),pl.nicNumber)) devices = {instId : { "nics":[pl.nicNumber], "nodeName":pl.node, "pipeline":pl.pipeline} } Logger.debug("writing updated warmd.json to {0}".format(nwarmd)) with open(nwarmd,'w') as outfile: json.dump(warmd,outfile,indent=4) resp = api.ReInstallImage(fw_version=pl.version, dr_version=pl.version, devices=devices) if resp != api.types.status.SUCCESS: Logger.error(f"Failed to install images on the node:nic {0}:{1}".format(topoNode.MgmtIpAddress(),pl.nicNumber)) break else: Logger.warn("failed to find node {0} / id {1} in warmd".format(topoNode.MgmtIpAddress(),instId))
def CollectTechSupport(tsName): try: #global __CURREN_TECHSUPPORT_CNT #__CURREN_TECHSUPPORT_CNT = __CURREN_TECHSUPPORT_CNT + 1 if GlobalOptions.pipeline in ["apulu"]: return types.status.SUCCESS Logger.info("Collecting techsupport for testsuite {0}".format(tsName)) tsName = re.sub('\W', '_', tsName) logDir = GlobalOptions.logdir if not logDir.endswith('/'): logDir += '/' logDir += 'techsupport/' if not os.path.exists(logDir): os.mkdir(logDir) nodes = api.GetNaplesHostnames() req = api.Trigger_CreateExecuteCommandsRequest() for n in nodes: Logger.info("Techsupport for node: %s" % n) common.AddPenctlCommand( req, n, "system tech-support -b %s-tech-support" % (n)) resp = api.Trigger(req) result = types.status.SUCCESS for n, cmd in zip(nodes, resp.commands): #api.PrintCommandResults(cmd) if cmd.exit_code != 0: Logger.error( "Failed to execute penctl system tech-support on node: %s. err: %d" % (n, cmd.exit_code)) result = types.status.FAILURE continue # Copy tech support tar out # TAR files are created at: pensando/iota/entities/node1_host/<test_case> ntsn = "%s-tech-support.tar.gz" % (n) resp = api.CopyFromHost(n, [ntsn], logDir) if resp == None or resp.api_response.api_status != types_pb2.API_STATUS_OK: Logger.error( "Failed to copy techsupport file %s from node: %s" % (ntsn, n)) result = types.status.FAILURE continue os.rename(logDir + ntsn, logDir + tsName + '_' + ntsn) #if __CURREN_TECHSUPPORT_CNT > __MAX_TECHSUPPORT_PER_RUN: # return types.status.CRITICAL return result except AttributeError: Logger.debug('failed to collect tech support. node list not setup yet') except: Logger.debug('failed to collect tech support. error was: {0}'.format( traceback.format_exc())) return types.status.CRITICAL
def __verifyImagePath(self): data = json.load(open(self.__image_manifest_file,'r')) if "Drivers" not in data: Logger.error("failed to find key Drivers in {0}".format(self.__image_manifest_file)) else: for drv in data["Drivers"]: if "drivers_pkg" not in drv: Logger.error("failed to find drivers_pkg key in Drivers {0}".format(drv)) else: if not os.path.exists(os.path.join(GlobalOptions.topdir, drv["drivers_pkg"])): Logger.warn("###############################################") Logger.warn("failed to find driver {0}".format(drv["drivers_pkg"])) Logger.warn("###############################################") if "Firmwares" not in data: Logger.error("failed to find key Firmwars in {0}".format(self.__image_manifest_file)) else: for fw in data["Firmwares"]: if "image" not in fw: Logger.error("failed to find image key in Firmware {0}".format(fw)) else: if not os.path.exists(os.path.join(GlobalOptions.topdir, fw["image"])): Logger.warn("###############################################") Logger.warn("failed to find firmware {0}".format(fw["image"])) Logger.warn("###############################################")
def checkPci(self): result = types.status.SUCCESS if GlobalOptions.dryrun: return result for node in self.GetTopology().GetNodes(): msg = "calling verify_pci.verify_error_lspci() for node {0}".format(node.Name()) Logger.debug(msg) result = verify_pci.verify_errors_lspci(node.Name(), node.GetOs()) if result != types.status.SUCCESS: msg = "PCIe Failure detected on node {0} with OS {1}".format(node.Name(), node.GetOs()) print(msg) Logger.error(msg) return result # raise OfflineTestbedException return result
def __mk_testcase_directory(self, newdir): Logger.debug("Creating Testcase directory: %s" % newdir) command = "mkdir -p %s && chmod 777 %s" % (newdir, newdir) req = api.Trigger_CreateAllParallelCommandsRequest() for nodename in api.GetWorkloadNodeHostnames(): api.Trigger_AddHostCommand(req, nodename, command) for wl in api.GetWorkloads(): if api.IsWorkloadRunning(wl.workload_name): api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, command, timeout=60) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): Logger.error("Failed to create destination directory %s" % newdir) return types.status.FAILURE return types.status.SUCCESS
def Import(modname, packages=[]): for pkg in packages: imp_path = "%s.%s" % (pkg, modname) try: module = importlib.import_module(imp_path) except ModuleNotFoundError: continue except Exception as e: Logger.error(e) continue if module: return module Logger.error("Failed to import module: %s in packages: " % modname, packages) #assert(0) #return None raise Exception("failed to import test module")
def __import_testbundles(self): if not GlobalOptions.skip_sanity: for bunfile in self.__spec.testbundles.sanity: self.__append_testbundle(bunfile) if GlobalOptions.extended: for bunfile in getattr(self.__spec.testbundles, 'extended', []): self.__append_testbundle(bunfile) if GlobalOptions.regression: for bunfile in getattr(self.__spec.testbundles, 'regression', []): self.__append_testbundle(bunfile) if GlobalOptions.precheckin: for bunfile in getattr(self.__spec.testbundles, 'precheckin', []): self.__append_testbundle(bunfile) if GlobalOptions.compat_test: Logger.debug("For compat-test with fw-version %s and driver-version: %s" % (self.GetFirmwareVersion(), self.GetDriverVersion())) if hasattr(self.__spec.testbundles, 'compat'): # first load "common" testbundles for compat-regression for bunfile in getattr(self.__spec.testbundles.compat, 'common', []): self.__append_testbundle(bunfile) # Append with fw-version specific bundles to be included fw_section = 'Fw-' + self.GetFirmwareVersion() for bunfile in getattr(self.__spec.testbundles.compat, self.GetFirmwareVersion(), []): self.__append_testbundle(bunfile) # Append with driver-version specific bundles to be included fw_section = 'Dr-' + self.GetFirmwareVersion() for bunfile in getattr(self.__spec.testbundles.compat, self.GetDriverVersion(), []): self.__append_testbundle(bunfile) else: Logger.error('Missing compat section')
def Main(self): if GlobalOptions.testcases and self.Name( ) not in GlobalOptions.testcases: Logger.info("Skipping Testcase: %s due to cmdline filter." % self.Name()) self.__enable = False return types.status.SUCCESS if GlobalOptions.markers_present: if self.Name() == GlobalOptions.testcase_begin: Logger.debug("Match found for Testcase starting marker %s" % self.Name()) GlobalOptions.inb_markers = True if GlobalOptions.markers_present and not GlobalOptions.inb_markers: Logger.info( "Skipping Testcase: %s due to cmdline testcase begin/end markers." % self.Name()) self.__enable = False return types.status.SUCCESS if self.__enable: Logger.SetTestcase(self.Name()) Logger.info("Starting Testcase: %s" % self.Name()) Logger.info("Testcase {0} timestamp: {1}".format( self.Name(), time.asctime())) self.__timer.Start() try: self.status = self.__execute() except OfflineTestbedException: utils.LogException(Logger) Logger.error( "EXCEPTION: Aborting Testcase Execution. Reason: testbed failure" ) raise except: utils.LogException(Logger) Logger.error("EXCEPTION: Aborting Testcase Execution.") self.status = types.status.ERROR if self.__ignored: Logger.error( "IGNORE: testcase in ignore mode, ignoring exception.") self.status = types.status.SUCCESS self.__timer.Stop() Logger.info("Testcase %s FINAL RESULT = %d" % (self.Name(), self.status)) else: self.status = types.status.SUCCESS if GlobalOptions.markers_present: if self.Name() == GlobalOptions.testcase_end: Logger.debug("Match found for Testcase ending marker %s" % self.Name()) GlobalOptions.inb_markers = False return self.status
def __init_testbed(self): self.__tbid = getattr(self.__tbspec, 'TestbedID', 1) self.__vlan_base = getattr(self.__tbspec, 'TestbedVlanBase', 1) self.__vlan_allocator = resmgr.TestbedVlanAllocator(self.__vlan_base, self.curr_ts.GetDefaultNicMode()) #TODO: merge single allocator into list below self.__multi_vlan_allocators = [] self.__nextVlanAllocator = 0 self.__image_manifest_file = self.curr_ts.GetImageManifestFile() self.curr_ts.DownloadReleaseImages() resp = None msg = self.__prepare_TestBedMsg(self.curr_ts) if not GlobalOptions.skip_setup: status = self.CleanupTestbed() if status != types.status.SUCCESS: return status try: self.__recover_testbed(self.__image_manifest_file) except: utils.LogException(Logger) Logger.error("Failed to recover testbed") Logger.debug(traceback.format_exc()) return types.status.TESTBED_INIT_FAILURE if GlobalOptions.dryrun: status = types.status.SUCCESS resp = api.InitTestbed(msg) else: resp = api.GetTestbed(msg) if resp is None: Logger.error("Failed to initialize testbed: ") raise OfflineTestbedException #return types.status.FAILURE if not api.IsApiResponseOk(resp): Logger.error("Failed to initialize testbed: ") raise OfflineTestbedException #return types.status.FAILURE for instance,node in zip(self.__tbspec.Instances, resp.nodes): if getattr(instance, 'NodeOs', None) == "esx": instance.esx_ctrl_vm_ip = node.esx_ctrl_node_ip_address Logger.info("Testbed allocated vlans {}".format(resp.allocated_vlans)) if resp.allocated_vlans: tbvlans = [] for vlan in resp.allocated_vlans: tbvlans.append(vlan) self.__vlan_allocator = resmgr.TestbedVlanManager(tbvlans) self.__instpool = copy.deepcopy(self.__tbspec.Instances) return types.status.SUCCESS
def __recover_testbed(self, manifest_file, **kwargs): if GlobalOptions.dryrun or GlobalOptions.skip_setup: return proc_hdls = [] logfiles = [] logfile = '' naples_host_only = kwargs.get('naples_host_only', False) firmware_reimage_only = kwargs.get('firmware_reimage_only', False) driver_reimage_only = kwargs.get('driver_reimage_only', False) devices = kwargs.get('devices', {}) devicePipeline = None #if [n for n in self.__tbspec.Instances if n.NodeOs in ["linux","freebsd"]]: # self.__verifyImagePath() for instance in self.__tbspec.Instances: if devices: if instance.ID not in devices: Logger.debug("skipping recover testbed for device {0}".format(instance)) continue else: devicePipeline = devices[instance.ID]['pipeline'] cmd = ["timeout", "3000"] if self.__has_naples_device(instance): cmd.extend([ "%s/iota/scripts/boot_naples_v2.py" % GlobalOptions.topdir ]) if self.curr_ts.GetDefaultNicMode() == "bitw": mem_size = None if GlobalOptions.pipeline in [ "iris", "apollo", "artemis", "apulu" ]: mem_size = "8g" mem_size = getattr(instance, "NicMemorySize", mem_size) if mem_size is not None: cmd.extend(["--naples-mem-size", mem_size]) if firmware_reimage_only: cmd.extend(["--naples-only-setup"]) elif driver_reimage_only: cmd.extend(["--only-init"]) # XXX workaround: remove when host mgmt interface works for apulu if GlobalOptions.pipeline in [ "apulu" ]: cmd.extend(["--no-mgmt"]) cmd.extend(["--testbed", GlobalOptions.testbed_json]) cmd.extend(["--instance-name", instance.Name]) #this will need to be changed when multi nics are supported. if devicePipeline: cmd.extend(["--pipeline", devicePipeline]) else: cmd.extend(["--pipeline", GlobalOptions.pipeline]) # cmd.extend(["--mnic-ip", instance.NicIntMgmtIP]) nics = getattr(instance, "Nics", None) if nics != None and len(nics) != 0: for nic in nics: for port in getattr(nic, "Ports", []): cmd.extend(["--mac-hint", port.MAC]) break break cmd.extend(["--mode", "%s" % self.curr_ts.GetDefaultNicMode()]) if GlobalOptions.skip_driver_install: cmd.extend(["--skip-driver-install"]) if GlobalOptions.use_gold_firmware: cmd.extend(["--use-gold-firmware"]) if GlobalOptions.fast_upgrade: cmd.extend(["--fast-upgrade"]) nic_uuid = None if hasattr(instance.Resource, 'NICUuid'): nic_uuid = instance.Resource.NICUuid elif hasattr(instance, 'Nics'): # Look for oob_mnic0 in the first NIC. FIXME: revisit for dual-nic nic = instance.Nics[0] oob_ports = list(filter(lambda x: x.Name == "oob_mnic0", nic.Ports)) if oob_ports: nic_uuid = oob_ports[0].MAC else: Logger.error("Missing NICUuid for %s" % (instance.Name)) if nic_uuid: cmd.extend(["--uuid", "%s" % nic_uuid]) cmd.extend(["--image-manifest", manifest_file]) if self.curr_ts.ProvisionInfo(): cmd.extend(["--provision-spec", self.curr_ts.ProvisionInfo()]) if GlobalOptions.only_reboot: logfile = "%s/%s-%s-reboot.log" % (GlobalOptions.logdir, self.curr_ts.Name(), instance.Name) Logger.info("Rebooting Node %s (logfile = %s)" % (instance.Name, logfile)) cmd.extend(["--only-mode-change"]) elif GlobalOptions.skip_firmware_upgrade: logfile = "%s/%s-%s-reinit.log" % (GlobalOptions.logdir, self.curr_ts.Name(), instance.Name) Logger.info("Reiniting Node %s (logfile = %s)" % (instance.Name, logfile)) cmd.extend(["--only-init"]) else: logfile = "%s/%s-firmware-upgrade.log" % (GlobalOptions.logdir, instance.Name) Logger.info("Updating Firmware on %s (logfile = %s)" % (instance.Name, logfile)) if GlobalOptions.netagent: cmd.extend(["--auto-discover-on-install"]) else: if GlobalOptions.skip_firmware_upgrade or instance.Type == "vm" or naples_host_only: continue cmd.extend([ "%s/iota/scripts/reboot_node.py" % GlobalOptions.topdir ]) cmd.extend(["--host-ip", instance.NodeMgmtIP]) cmd.extend(["--cimc-ip", instance.NodeCimcIP]) if hasattr(instance, "NodeCimcUsername"): cmd.extend(["--cimc-username", instance.NodeCimcUsername]) cmd.extend(["--os", "%s" % instance.NodeOs]) if instance.NodeOs == "esx": cmd.extend(["--host-username", self.__tbspec.Provision.Vars.EsxUsername]) cmd.extend(["--host-password", self.__tbspec.Provision.Vars.EsxPassword]) else: cmd.extend(["--host-username", self.__tbspec.Provision.Username]) cmd.extend(["--host-password", self.__tbspec.Provision.Password]) logfile = "%s/%s-%s-reboot.log" % (GlobalOptions.logdir, self.curr_ts.Name(), instance.Name) Logger.info("Rebooting Node %s (logfile = %s)" % (instance.Name, logfile)) if (logfile): logfiles.append(logfile) cmdstring = "" for c in cmd: cmdstring += "%s " % c Logger.info("Command = ", cmdstring) loghdl = open(logfile, "w") proc_hdl = subprocess.Popen(cmd, stdout=loghdl, stderr=subprocess.PIPE) proc_hdls.append(proc_hdl) result = 0 starttime = time.asctime() try: for idx in range(len(proc_hdls)): proc_hdl = proc_hdls[idx] Logger.debug('Firmware upgrade started at time: {0}'.format(starttime)) while proc_hdl.poll() is None: time.sleep(5) continue if proc_hdl.returncode != 0: result = proc_hdl.returncode _, err = proc_hdl.communicate() Logger.header("FIRMWARE UPGRADE / MODE CHANGE / REBOOT FAILED: LOGFILE = %s" % logfiles[idx]) Logger.error("Firmware upgrade failed : %d " % result) Logger.error("Firmware upgrade failed : " + err.decode()) if proc_hdl.returncode == 124: Logger.error("TIMEOUT RUNNING BOOTNAPLES.PY. Verify host recovered from reboot/upgrade") result = 124 else: Logger.debug('Firmware upgrade finished at time: {0}'.format(time.asctime())) if len(logfiles[idx])>1: self.__checkNodeInitLogs(logfiles[idx]) except KeyboardInterrupt: result=2 err="SIGINT detected. terminating boot_naples_v2 scripts" Logger.debug(err) for proc in proc_hdls: Logger.debug("sending SIGKILL to pid {0}".format(proc.pid)) proc.terminate() if result != 0: sys.exit(result) self.__fw_upgrade_done = True if GlobalOptions.only_firmware_upgrade: Logger.info("Stopping after firmware upgrade based on cmdline options.") sys.exit(0) return
def __execute(self): final_result = types.status.SUCCESS for iter_data in self.__iters: self.__iterid += 1 Logger.debug("Create new iter TestcaseData. ID:%d" % self.__iterid) iter_data.StartTime() api.ChangeDirectory("") instance_id = self.__get_instance_id(self.__iterid) iter_data.SetInstanceId(instance_id) iter_data.SetSelected(self.selected) iter_data.SetBundleStore(self.bundle_store) Logger.SetTestcase(instance_id) Logger.debug("Testcase Iteration directory = %s" % instance_id) ret = self.__mk_testcase_directory(instance_id) if ret != types.status.SUCCESS: iter_data.SetStatus(ret) iter_data.StopTime() return ret api.ChangeDirectory(instance_id) result = types.status.SUCCESS setup_result = self.__run_common_setups(iter_data) setup_result = loader.RunCallback(self.__tc, 'Setup', False, iter_data) if setup_result != types.status.SUCCESS: Logger.error( "Setup callback failed, Cannot continue, switching to Teardown" ) loader.RunCallback(self.__tc, 'Teardown', False, iter_data) result = setup_result else: for task_name, bt in self.__background_tasks.items(): if bt.IsAutoTriggerEnabled(): Logger.debug( "Triggering BackgroundTask %s - auto trigger" % task_name) bt_trigger_result = bt.StartTask(self.__tc, iter_data) if bt_trigger_result != types.status.SUCCESS: result = bt_trigger_result else: Logger.debug( "Skipping BackgroundTask %s - manual trigger" % task_name) trigger_result = self.__run_common_triggers(iter_data) trigger_result = loader.RunCallback(self.__tc, 'Trigger', True, iter_data) if trigger_result != types.status.SUCCESS: result = trigger_result for task_name, bt in self.__background_tasks.items(): bt_stop_result = bt.StopTask('after_trigger') verify_result = loader.RunCallback(self.__tc, 'Verify', True, iter_data) if verify_result != types.status.SUCCESS: result = verify_result for task_name, bt in self.__background_tasks.items(): bt_stop_result = bt.StopTask('verify') verify_result = self.__run_common_verifs(iter_data) if verify_result != types.status.SUCCESS: Logger.error("Common verifs failed.") result = verify_result self.iterRes.addResults(iter_data.GetInstanceId(), result) iter_data.SetStatus(result) for task_name, bt in self.__background_tasks.items(): bt_stop_result = bt.StopTask('teardown') bt.CollectTask() teardown_result = self.__run_common_teardowns(iter_data) teardown_result = loader.RunCallback(self.__tc, 'Teardown', False, iter_data) if teardown_result != types.status.SUCCESS: Logger.error("Teardown callback failed.") result = teardown_result # If the tests have failed (at any stage), lets run debug actions. if result != types.status.SUCCESS: debug_result = self.__run_common_debugs(iter_data) if debug_result != types.status.SUCCESS: Logger.error( f"Common debugs failed, ret {debug_result}") result = debug_result iter_data.StopTime() if self.__aborted: Logger.info("Iteration Instance: %s FINAL RESULT = %d" % (instance_id, types.status.ABORTED)) iter_data.SetStatus(types.status.ABORTED) return types.status.FAILURE if result != types.status.SUCCESS and GlobalOptions.no_keep_going: Logger.info("Iteration Instance: %s FINAL RESULT = %d" % (instance_id, result)) Logger.error("Error: STOPPING ON FIRST FAILURE.") iter_data.SetStatus(result) raise OfflineTestbedException #return types.status.FAILURE if result == types.status.CRITICAL and GlobalOptions.stop_on_critical: Logger.info("Iteration Instance: %s FINAL RESULT = %d" % (instance_id, result)) Logger.error("Error: STOPPING ON CRITICAL FAILURE.") iter_data.SetStatus(result) return types.status.CRITICAL if result == types.status.OFFLINE_TESTBED: Logger.info("Iteration Instance: %s FINAL RESULT = %d" % (instance_id, result)) Logger.error("Error: STOPPING ON OFFLINE TESTBED REQUEST.") iter_data.SetStatus(result) raise OfflineTestbedException iter_data.SetStatus(result) Logger.info("Iteration Instance: %s FINAL RESULT = %d" % (instance_id, result)) if result != types.status.SUCCESS or GlobalOptions.dryrun: if self.__ignored or result == types.status.IGNORED: Logger.info("Test failed, marking as ignored") Logger.info("Iteration Instance: %s FINAL RESULT = %d" % (instance_id, result)) iter_data.SetStatus(types.status.IGNORED) final_result = types.status.SUCCESS else: final_result = result api.ChangeDirectory("") return final_result
def alarmHandler(sig, frame): Logger.error("testcase {0} timed out".format(func.__name__)) raise TestcaseTimeoutException()