def lineComparison(self, expLines, outLines, stream=""): same = True if self.ignoreEmptyLines: while expLines.count("") > 0: expLines.remove("") while outLines.count("") > 0: outLines.remove("") for line in difflib.unified_diff(expLines, outLines, stream, "expectation"): col = TermColor.White uline = unicode( line, encoding="utf-8", errors="ignore") if type(line) is not unicode else line if uline.startswith(" + "): same = False col = TermColor.Green elif uline.startswith("-"): same = False col = TermColor.Red elif uline.startswith("@"): same = False col = TermColor.Cyan if self.diff: logger.log(TermColor.colorText(uline.rstrip(), col)) return same
def __init__(self): """Initialises the test runner""" #Thread.__init__(self) logger.log("Welcome to pyTest Version 2") self.suite = "suite" """Test suite selector""" self.test = -1 """single test selector""" self.quiet = False """Definition of the programs verbosity""" self.mode = None """Mode for the test suite""" self.file = "" """test bench file""" self.lengthOnly = False """print only number of test""" self.infoOnly = False """Print only the test information""" self.DUT = None self.testCount = 0 self.runsuite = None self.finished = None self.pipe = None self.out = None self.timeout = None self.linesep = os.linesep self.classpath = "."
def __init__(self, flush=False): """Initialises the test runner""" # Thread.__init__(self) logger.log( TermColor.colorText("NIGHTMARE I", TermColor.Red, style=TermColor.Bold) + TermColor.colorText("s of ", TermColor.White) + TermColor.colorText("G", TermColor.Red, style=TermColor.Bold) + TermColor.colorText("enerous ", TermColor.White) + TermColor.colorText("H", TermColor.Red, style=TermColor.Bold) + TermColor.colorText("elp when ", TermColor.White) + TermColor.colorText("T", TermColor.Red, style=TermColor.Bold) + TermColor.colorText("esting; ", TermColor.White) + TermColor.colorText("M", TermColor.Red, style=TermColor.Bold) + TermColor.colorText("ay ", TermColor.White) + TermColor.colorText("A", TermColor.Red, style=TermColor.Bold) + TermColor.colorText("rnold be ", TermColor.White) + TermColor.colorText("R", TermColor.Red, style=TermColor.Bold) + TermColor.colorText("emembered ", TermColor.White) + TermColor.colorText("E", TermColor.Red, style=TermColor.Bold) + TermColor.colorText("ternally", TermColor.White) ) logger.log("Welcome to nightmare Version {}".format(version.Version)) if flush: logger.flush(quiet=False) self.options = dict() self.testCount = 0 self.runsuite = None self.finished = None
def loadSuite(self, fname=None): """Loads a python based suite from a file""" if fname is not None: self.options['bench'] = fname if self.options['bench'] is not None and self.options['bench'] != "" and os.path.exists(self.options['bench']): logger.log("\nReading testfile ...") if self.options['relative']: os.chdir(os.path.dirname(os.path.abspath(self.options['bench']))) logger.log("Current Working Dir is: {}".format(os.getcwd())) self.options['bench'] = os.path.basename(self.options['bench']) if self.options['arnold']: self.runsuite = self.loadArnold() else: self.runsuite = self.loadPython() if self.runsuite is not None: self.runsuite.options['commands'] = self.options['commands'] self.runsuite.setAll( state=TestState.InfoOnly if self.options['info'] else TestState.Waiting, pipe=self.options['pipe'], out=self.options['output'], diff=self.options['diff'], timeout=self.options['timeout'], linesep=self.options['linesep'], ignoreEmptyLines=self.options['ignoreEmptyLines'] ) self.testCount = len(self.runsuite.testList) logger.log("I have loaded {} Testcase{}".format(self.testCount, "s" if self.testCount > 0 else "")) else: logger.log("Sorry, but I failed to load the requested suite") else: logger.log("Sorry, but I couldn't find the file '{}'".format(self.options['bench'])) logger.flush(self.options['quiet']) return self.runsuite
def run(self): """Runs the test""" if self.state == TestState.Disabled: return TestState.Disabled if self.state == TestState.InfoOnly: if self.descr is None: print "{}".format(self.name) else: print "{} - {}".format(self.name, self.descr) return TestState.InfoOnly if self.name == "Badword": # Bad Word Detection Mode # Description holds a matching file patterns # Recursive look through the directory of DUT # Treat command as a list of Badwords words = map(lambda s: re.compile(s), self.cmd) searchpath = os.path.abspath(os.path.dirname(self.DUT)) searchpattern = re.compile(self.descr) hits = [] for dirpath, dirnames, filenames in os.walk(searchpath): for file in filenames: if searchpattern.match(file) is not None: fname = os.path.join(dirpath, file) fHnd = open(fname, "rb") for nr, line in enumerate(fHnd.readlines()): for word in words: if word.search(line) is not None: hits.append((os.path.relpath(fname), nr, line.rstrip(), word.pattern)) fHnd.close() if len(hits) > 0: for file, lineno, text, pattern in hits: logger.log("{} {}[{}]: '{}' matches '{}'".format( TestState.toString(TestState.BadWord), file, lineno, text, pattern)) self.state = TestState.BadWord else: self.state = TestState.Clean return self.state if self.cmd is not None: if isinstance(self.cmd, list): for cmd_ in self.cmd: self.runCmd(cmd_) else: self.runCmd(self.cmd) else: self.state = TestState.Error return self.state
def run(self): """Thread run function""" if self.options['length']: print len(self.runsuite.getTests()) elif len(self.options['save']) == 1: logger.log("Saving Suite to {}".format(self.options['save'][0])) self.saveToFile(self.options['save'][0]) else: logger.flush(self.options['quiet']) self.runsuite.setMode(self.options['mode']) for test in self.runsuite.run(self.options['quiet'], tests=self.options['test']): yield test self.runsuite.stats(self.options['quiet']) if self.finished is not None: self.finished() logger.flush(self.options['quiet']) raise StopIteration()
def loadPython(self): glb = {"__builtins__": __builtins__, # External / Standard libraries "parser": pyparsing, "os": os, "regex": re, "math": math, "itertools": itertools, "struct": struct, "collections": collections, "fractions": fractions, # nightmare specific things "Test": Test, "Suite": TestSuite, "Mode": TestSuiteMode, "State": TestState, "Expectation": Expectation, "ExpectFile": ExpectFile, "Stringifier": Stringifier, "StringifiedFile": StringifiedFile, "CompareFiles": CompareFiles, # Helping functions "readFile": lambda fname: open(fname).read().rstrip() if os.path.exists(fname) else "File not found", } ctx = {self.options['suite']: None, "DUT": None} execfile(self.options['bench'], glb, ctx) if (self.options['suite'] in ctx): suite = None if 'DUT' in ctx and ctx['DUT'] is not None and self.options['dut'] is None: self.setDUT(ctx['DUT']) if (ctx[self.options['suite']] != None): if ctx[self.options['suite']].__class__ == TestSuite: suite = ctx[self.options['suite']] if suite.DUT is None: suite.setDUT(self.options['dut']) if self.options['mode'] is None: self.options['mode'] = suite.mode elif suite.mode is None: suite.mode = self.options['mode'] else: suite = TestSuite(*ctx[self.options['suite']], **{'DUT': self.options['dut'], 'mode': self.options['mode']}) else: logger.log("Sorry, but I can't find any tests inside the suite '{}'".format(self.options['suite'])) else: logger.log("Sorry, but there was no test-suite in the file") return suite
def loadArnold(self): if syntax is not None: logger.log("\t...using Arnold-Mode") syn = syntax() fileHnd = open(self.options['bench']) content = [] for line in fileHnd: if not line.startswith("#") and not line.strip() == "": content.append(line.replace("ä", "ae").replace("Ä", "Ae").replace("ö", "oe").replace("Ö", "Oe").replace("ü", "ue").replace("Ü", "Ue").replace("ß", "ss")) s = "".join(content) ast = syn.parseString(s) testList = buildTestList(ast) suite = TestSuite(*testList) suite.setDUT(self.options['dut']) else: logger.log("\t ... could not init arnold mode due to missing pyparsing package") suite = None return suite
def loadSuite(self, fname = None): """Loads a python based suite from a file""" if fname is not None: self.file = fname logger.log("\nReading testfile ...") if self.file is not None and self.file != "" and os.path.exists(self.file): glb = {"__builtins__":__builtins__, "math":math, "Test":Test, "Suite":TestSuite, "Mode":TestSuiteMode, "State":TestState} ctx = {self.suite:None, "DUT":None} execfile(self.file, glb, ctx) if (self.suite in ctx): self.runsuite = None if (ctx[self.suite] != None): if ctx[self.suite].__class__ == TestSuite: self.runsuite = ctx[self.suite] self.runsuite.setDUT(self.DUT) if self.mode is None: self.mode =self.runsuite.mode elif self.runsuite.mode is None: self.runsuite.mode = self.mode else: self.runsuite = TestSuite(*ctx[self.suite], **{'DUT':self.DUT, 'mode':self.mode}) self.runsuite.setAll( state=TestState.InfoOnly if self.infoOnly else TestState.Waiting, pipe=self.pipe, out=self.out, timeout = self.timeout, linesep = self.linesep ) self.testCount = len(self.runsuite.testList) if 'DUT' in ctx and ctx['DUT'] is not None and self.DUT is None: self.setDUT(ctx['DUT']) logger.log("I could load {} Testcase".format(self.testCount)) else: logger.log("Sorry, but I can't find any tests inside the suite '{}'".format(self.suite)) else: logger.log("Sorry, but there was no test-suite in the file") else: logger.log("Sorry, but I couldn't find the file '{}'".format(self.file)) logger.flush(self.quiet) return self.runsuite
def runAll(self, quiet = False): """ Runs the whole suite of tests @type quiet: Boolean @param quiet: Flag, passed along to the logger """ self.success = 0 self.failed = 0 self.count = 0 self.error = 0 self.lastResult = TestState.Waiting for t in self.testList: self.count = self.count + 1 self.lastResult = t.run() if t.descr is not None: logger.log("Test[{:02}] {} - {}: {}".format(self.count, t.name, t.descr, TestState.toString(t.state))) else: logger.log("Test[{:02}] {}: {}".format(self.count, t.name, TestState.toString(t.state))) logger.flush(quiet) if self.lastResult == TestState.Success: self.success += 1 elif self.lastResult == TestState.Fail: self.failed += 1 elif self.lastResult == TestState.Error: self.error += 1 elif self.lastResult == TestState.Timeout: self.timedout += 1 elif self.lastResult == TestState.SegFault: self.segfaults += 1 elif self.lastResult == TestState.Assertion: self.assertions += 1 yield t if self.lastResult != TestState.Disabled: if (self.mode == TestSuiteMode.BreakOnFail) and (self.lastResult != TestState.Success): break if (self.mode == TestSuiteMode.BreakOnError) and (self.lastResult == TestState.Error): break raise StopIteration()
def run(self, quiet=False, tests=[]): """ Runs the whole suite of tests @type quiet: Boolean @param quiet: Flag, passed along to the logger """ self.success = 0 self.failed = 0 self.count = 0 self.error = 0 self.lastResult = TestState.Waiting for t in self._getTests(tests): self.lastResult = t.run() if t.descr is not None: logger.log("{}[{: 03}] {} - {}: {}".format( TermColor.colorText("Test", TermColor.Purple), self.count, t.name, t.descr, TestState.toString(t.state))) else: logger.log("{}[{: 03}] {}: {}".format( TermColor.colorText("Test", TermColor.Purple), self.count, t.name, TestState.toString(t.state))) if self.options['commands']: logger.log(" --> {}".format(t.cmd), showTime=False) logger.flush(quiet) if self.lastResult in [TestState.Success, TestState.Clean]: self.success += 1 elif self.lastResult == TestState.Fail: self.failed += 1 elif self.lastResult == TestState.Error: self.error += 1 elif self.lastResult == TestState.Timeout: self.timedout += 1 elif self.lastResult == TestState.SegFault: self.segfaults += 1 elif self.lastResult == TestState.Assertion: self.assertions += 1 self.count = self.count + 1 yield t if self.lastResult != TestState.Disabled: if (self.mode == TestSuiteMode.BreakOnFail) and ( self.lastResult != TestState.Success): break if (self.mode == TestSuiteMode.BreakOnError) and ( self.lastResult == TestState.Error): break raise StopIteration()
def runOne(self, n): """ Run one single test @type n: int @param n: Number of the test """ if n < len(self): t = self.testList[n] self.lastResult = t.run() if t.descr is not None: logger.log("Test[{:02}] {} - {}: {}".format(n, t.name, t.descr, TestState.toString(t.state))) else: logger.log("Test[{:02}] {}: {}".format(n, t.name, TestState.toString(t.state))) return t else: logger.log("\tSorry but there is no test #{}".format(n)) self.lastResult = TestState.Error return None
def run(self, quiet=False, tests=[]): """ Runs the whole suite of tests @type quiet: Boolean @param quiet: Flag, passed along to the logger """ self.success = 0 self.failed = 0 self.count = 0 self.error = 0 self.lastResult = TestState.Waiting for t in self._getTests(tests): self.lastResult = t.run() if t.descr is not None: logger.log("{}[{: 03}] {} - {}: {}".format(TermColor.colorText("Test", TermColor.Purple), self.count, t.name, t.descr, TestState.toString(t.state))) else: logger.log("{}[{: 03}] {}: {}".format(TermColor.colorText("Test", TermColor.Purple), self.count, t.name, TestState.toString(t.state))) if self.options['commands']: logger.log(" --> {}".format(t.cmd), showTime=False) logger.flush(quiet) if self.lastResult in [TestState.Success, TestState.Clean]: self.success += 1 elif self.lastResult == TestState.Fail: self.failed += 1 elif self.lastResult == TestState.Error: self.error += 1 elif self.lastResult == TestState.Timeout: self.timedout += 1 elif self.lastResult == TestState.SegFault: self.segfaults += 1 elif self.lastResult == TestState.Assertion: self.assertions += 1 self.count = self.count + 1 yield t if self.lastResult != TestState.Disabled: if (self.mode == TestSuiteMode.BreakOnFail) and (self.lastResult != TestState.Success): break if (self.mode == TestSuiteMode.BreakOnError) and (self.lastResult == TestState.Error): break raise StopIteration()
def stats(self, quiet = False): """ Generate and write the stats @type quiet: Boolean @param quiet: Flag, passed along to the logger """ logger.log("I ran {} out of {} tests in total".format(self.count, len(self.testList))) logger.log(TermColor.colorText("\tSuccess: {}".format(self.success), TermColor.Green)) if (self.failed > 0): logger.log(TermColor.colorText("\tFailed: {}".format(self.failed), TermColor.Red)) if (self.error > 0): logger.log(TermColor.colorText("\tErrors: {}".format(self.error), TermColor.Yellow)) if (self.assertions > 0): logger.log(TermColor.colorText("\tAssertions: {}".format(self.assertions), TermColor.Yellow)) if (self.segfaults > 0): logger.log(TermColor.colorText("\tSegFaults: {}".format(self.segfaults), TermColor.Yellow)) if (self.timedout > 0): logger.log(TermColor.colorText("\tTimeouts: {}".format(self.timedout), TermColor.Purple)) if (self.error == 0) and (self.failed == 0) and (self.timedout == 0): logger.log("\tCongratulations, you passed all tests!") return self.calcRate()
def stats(self, quiet=False): """ Generate and write the stats @type quiet: Boolean @param quiet: Flag, passed along to the logger """ if (self.lastResult != TestState.InfoOnly): logger.log("I ran {} out of {} tests in total".format( self.count, len(self.testList))) fails = self.count - self.success logger.log( TermColor.colorText("\tSuccess: {}".format(self.success), TermColor.Green)) if (self.failed > 0): logger.log( TermColor.colorText("\tFailed: {}".format(self.failed), TermColor.Red)) if (self.error > 0): logger.log( TermColor.colorText("\tErrors: {}".format(self.error), TermColor.Yellow)) if (self.assertions > 0): logger.log( TermColor.colorText( "\tAssertions: {}".format(self.assertions), TermColor.Yellow)) if (self.segfaults > 0): logger.log( TermColor.colorText( "\tSegFaults: {}".format(self.segfaults), TermColor.Yellow)) if (self.timedout > 0): logger.log( TermColor.colorText("\tTimeouts: {}".format(self.timedout), TermColor.Purple)) # A little bit of fun if (self.success == len(self) and self.count > 3): logger.log("\tCongratulations, you passed all tests!") logger.log("\t`grep` yourself a refreshing " + TermColor.colorText( "Beer", TermColor.Yellow, style=TermColor.Bold)) logger.log("") logger.log(" \033[1;37m,%%%%.\033[0m") logger.log( " \033[1;37mi\033[36m====\033[1;37mi\033[1;36m_\033[0m" ) logger.log( " \033[1;36m|\033[1;33m####\033[36m| |\033[0m" ) logger.log( " \033[1;36m|\033[1;33m####\033[36m|-'\033[0m" ) logger.log(" \033[1;36m`-==-'\033[0m") logger.log("") elif (self.success == 0 and self.count > 3 and self.failed > 0): logger.log("\tWhat is wrong with you, not even a single test?") elif fails > 0 and self.count > 3: if self.assertions / fails > 0.6: logger.log( "\tYou do realise that assertions do not replace error handling?" ) elif self.assertions / fails > 0.3: logger.log( "\tWe're a bit lazy with calling environments, aren't we?" ) if self.segfaults / fails > 0.6: logger.log( "\tMay the CPU-Gods have mercy with that poor memory management units soul!" ) elif self.segfaults / fails > 0.3: logger.log( "\tYou know, memory garbage doesn't collect itself?!") return self.calcRate()
def parseArgv(self): """Parses the argument vector""" argv = sys.argv for arg in argv: if arg == "-c": logger.log("\tI'm running in continuous mode now") self.mode = TestSuiteMode.Continuous elif arg == "-e": logger.log("\tI'm running in continuous mode now, but will halt if an error occurs") self.mode = TestSuiteMode.BreakOnError elif arg == "-q": self.quiet = True elif arg == "-v": self.quiet = False elif arg.startswith("--suite="): self.suite = arg[8:] logger.log("\tI'm using the testsuite '{}'".format(self.suite)) elif arg == "--no-color": TermColor.active = False elif arg.startswith("--test="): self.test = int(arg[7:]) logger.log("\tI'm only running test #{}".format(self.test)) elif arg == "-l": self.lengthOnly = True logger.log("\tI will only print the number of tests"); elif arg.startswith("--bench="): self.file = str(arg[8:]) logger.log("\tI'm using testbench '{}'".format(self.file)) elif arg.startswith("--timeout="): self.timeout = int(arg[10:]) logger.log("\tSetting global timeout to {}".format(self.timeout)) elif arg.startswith("--dut=") or arg.startswith("--DUT="): self.setDUT(arg[6:]) logger.log("\tDevice under Test is: {}".format(self.DUT)) elif arg.startswith("--info-only"): self.infoOnly = True self.mode = TestSuiteMode.Continuous logger.log("\tI will only print the test information.") elif arg.startswith("--crln"): self.linesep = "\r\n" elif arg.startswith("--ln"): self.linesep = "\n" elif arg.startswith("--cr"): self.linesep = "\r" elif arg == "-p": self.pipe = True logger.log("\tI will pipe all tests outputs to their respective streams") elif arg == "-o": self.out = True logger.log("\tI will pipe failed tests outputs to their respective streams")
def stats(self, quiet=False): """ Generate and write the stats @type quiet: Boolean @param quiet: Flag, passed along to the logger """ if (self.lastResult != TestState.InfoOnly): logger.log("I ran {} out of {} tests in total".format(self.count, len(self.testList))) fails = self.count - self.success logger.log(TermColor.colorText("\tSuccess: {}".format(self.success), TermColor.Green)) if (self.failed > 0): logger.log(TermColor.colorText("\tFailed: {}".format(self.failed), TermColor.Red)) if (self.error > 0): logger.log(TermColor.colorText("\tErrors: {}".format(self.error), TermColor.Yellow)) if (self.assertions > 0): logger.log(TermColor.colorText("\tAssertions: {}".format(self.assertions), TermColor.Yellow)) if (self.segfaults > 0): logger.log(TermColor.colorText("\tSegFaults: {}".format(self.segfaults), TermColor.Yellow)) if (self.timedout > 0): logger.log(TermColor.colorText("\tTimeouts: {}".format(self.timedout), TermColor.Purple)) # A little bit of fun if (self.success == len(self) and self.count > 3): logger.log("\tCongratulations, you passed all tests!") logger.log("\t`grep` yourself a refreshing " + TermColor.colorText("Beer", TermColor.Yellow, style=TermColor.Bold)) logger.log("") logger.log(" \033[1;37m,%%%%.\033[0m") logger.log(" \033[1;37mi\033[36m====\033[1;37mi\033[1;36m_\033[0m") logger.log(" \033[1;36m|\033[1;33m####\033[36m| |\033[0m") logger.log(" \033[1;36m|\033[1;33m####\033[36m|-'\033[0m") logger.log(" \033[1;36m`-==-'\033[0m") logger.log("") elif (self.success == 0 and self.count > 3 and self.failed > 0): logger.log("\tWhat is wrong with you, not even a single test?") elif fails > 0 and self.count > 3: if self.assertions / fails > 0.6: logger.log("\tYou do realise that assertions do not replace error handling?") elif self.assertions / fails > 0.3: logger.log("\tWe're a bit lazy with calling environments, aren't we?") if self.segfaults / fails > 0.6: logger.log("\tMay the CPU-Gods have mercy with that poor memory management units soul!") elif self.segfaults / fails > 0.3: logger.log("\tYou know, memory garbage doesn't collect itself?!") return self.calcRate()
def parseArgv(self): """Parses the argument vector""" args = argparse.ArgumentParser(description="A test tool for non-interactive commandline programms") group = args.add_argument_group("Test selection") group.add_argument("--bench", action="store", nargs=1, help="File which contains the testbench.") group.add_argument("--suite", action="store", nargs=1, help="Use testsuite SUITE from the testbench.", metavar="SUITE") group.add_argument("--dut", "--DUT", action="store", nargs=1, help="Set the device under test.") group.add_argument("--test", action="store", nargs="+", type=int, help="Run only the specified tests") group.add_argument("--timeout", action="store", nargs=1, type=float, help="Set a global timeout for all tests.") group.add_argument("--arnold", "-a", action="store_true", default=False, dest="arnold", help="Use the arnold mode (requires pyparsing module)") group.add_argument("--save", action="store", nargs=1, help="Save the testsuite as FILE", metavar="FILE") group = args.add_argument_group("Output Control") group.add_argument("--limit", action="store", nargs=1, type=int, default=2000, help="Set a (soft) limit for a number of Bytes, after which output piping will we stopped. Checks are made after each line.") group.add_argument("--quiet", "-q", action="store_const", const=True, default=False, dest="quiet", help="Quiet mode. There will be no output except results.") group.add_argument("--verbose", "-v", action="store_const", const=False, dest="quiet", help="Verbose mode. The program gets chatty (default).") group.add_argument("--commands", "-C", action="store_true", default=False, dest="commands", help="Show the command executed for each test.") group.add_argument("--length", "-l", action="store_true", default=False, dest="length", help="Print only the number of tests in the suite.") group.add_argument("--info-only", "-i", action="store_true", default=False, dest="info", help="Display only test information, but don't run them.") group.add_argument("--pipe-streams", "-p", action="store_true", default=None, dest="pipe", help="Redirect DUT output to their respective streams.") group.add_argument("--output-fails", "-o", action="store_true", default=None, dest="output", help="Redirect DUT output from failed tests to their respective streams.") group.add_argument("--unify-fails", "-u", action="store_true", default=None, dest="diff", help="Display the unified diff of output and expectation.") group.add_argument("--no-color", action="store_false", default=True, dest="color", help="Don't use any colored output.") group = args.add_argument_group("Test Flow") group.add_argument("--continue", "-c", action="store_const", const=TestSuiteMode.Continuous, dest="mode", help="Continuous mode (Don't halt on failed tests).") group.add_argument("--error", "-e", action="store_const", const=TestSuiteMode.BreakOnError, dest="mode", help="Same as '-c', but will halt if an error occurs.") group.add_argument("--ignoreEmptyLines", "-L", action="store_true", default=None, dest="ignoreEmptyLines", help="Ignore empty lines") group.add_argument("--relative", "-r", action="store_true", default=False, dest="relative", help="Use a path relative to the testbench path.") group.add_argument("--cr", action="store_const", const="\r", dest="linesep", help="Force the line separation character (Mac OS).") group.add_argument("--ln", action="store_const", const="\n", dest="linesep", help="Force the line separation character (Unix / Mac OS-X).") group.add_argument("--crln", action="store_const", const="\r\n", dest="linesep", help="Force the line separation character (Windows).") args.add_argument("--gui", action="store_true", default=False, dest="gui", help="Use the GUI (experimental and unstable).") args.add_argument("--no-gui", action="store_true", default=False, dest="gui", help="Don't use the GUI.") args.add_argument("--version", action="store_const", const=True, default=False, help="Display version information") args.set_defaults(linesep=os.linesep, bench=[""], save=[], suite=["suite"], dut=[None], timeout=[None], test=[]) self.options.update(vars(args.parse_args())) self.options['bench'] = self.options['bench'][0] self.options['suite'] = self.options['suite'][0] self.options['dut'] = self.options['dut'][0] self.options['timeout'] = self.options['timeout'][0] logMessages = [ ('mode', lambda v: "I'm running in continuous mode now" if v == TestSuiteMode.Continuous else "I'm running in continuous mode now, but will halt if an error occurs" if v == TestSuiteMode.BreakOnError else "I will halt on first fail."), ('suite', lambda v: "I'm using the testsuite '{}'".format(v)), ('test', lambda v: "I'm only running test {}".format(v) if len(v) > 0 else ""), ('bench', lambda v: "I'm using testbench '{}'".format(v)), ('timeout', lambda v: "Setting global timeout to {}".format(v)), ('dut', lambda v: "Device under Test is: {}".format(v)), ('commands', lambda v: "I will print every command I'll exceute." if v else ""), ('length', lambda v: "I will only print the number of tests" if v else ""), ('info', lambda v: "I will only print the test information." if v else ""), ('pipe', lambda v: "I will pipe all tests outputs to their respective streams" if v else ""), ('output', lambda v: "I will pipe failed tests outputs to their respective streams" if v else ""), ('diff', lambda v: "I will show the differences in output and expectations" if v else ""), ] for option, msgFunc in logMessages: if self.options[option] is not None: msg = msgFunc(self.options[option]) if len(msg) > 0: logger.log("\t{}".format(msg)) logger.flush(self.options['quiet'])