def testSuccessfullRun(self): with DisposableDirectory( "testdir", True) as testDir, DisposableDirectory("basedir") as baseDir: conf = performanceregressionconfig.PerformanceRegressionConfig({}) runner = performancerunner.PerformanceRunner( lambda d, a: self.runfuncSucceedAlwaysSame(d, a), "basebin", "testbin", testDir.name(), conf) (result, message) = runner.runTestCase( "testname", self.makeCommand("infile.graph", "outfile.graph", "visibility")) self.assertTrue(result)
def testRunWithDiff(self): self.__outContent = "abc" with DisposableDirectory("testdir", True) as dir: runner = depthmaprunner.DepthmapRegressionRunner(lambda d, a: self.runfuncDifferentResults(d,a), "basebin", "testbin", dir.name()) (result, message) = runner.runTestCase("testname", self.makeCommand("infile.graph", "outfile.graph", "visibility")) self.assertFalse(result) self.assertEqual(message, "Test outputs differ")
def test_aggregation(self): with DisposableDirectory("testdir") as d: os.makedirs(d.name()) nameTemplate = "test{0}{1}.csv" with open(os.path.join(d.name(), nameTemplate.format(0, 0)), "w") as f: f.write("action,duration\nt1,3\nt2,2\n") with open(os.path.join(d.name(), nameTemplate.format(1, 0)), "w") as f: f.write("action,duration\nt1,1.5\nt2,2.5\n") with open(os.path.join(d.name(), nameTemplate.format(2, 0)), "w") as f: f.write("action,duration\nt1,2\nt2,1.5\n") resFile = performancerunner.aggregatePerformanceStats( d.name(), 3, 1, nameTemplate) with open(resFile, "r") as f: reader = csv.DictReader(f) line = next(reader) self.assertEqual(line["action"], "t1") self.assertEqual(float(line["max"]), 3) self.assertEqual(float(line["min"]), 1.5) self.assertEqual(float(line["average"]), 6.5 / 3) line = next(reader) self.assertEqual(line["action"], "t2") self.assertEqual(float(line["max"]), 2.5) self.assertEqual(float(line["min"]), 1.5) self.assertEqual(float(line["average"]), 2) line = next(reader) self.assertEqual(line["action"], "total") self.assertEqual(float(line["max"]), 5) self.assertEqual(float(line["min"]), 3.5) self.assertEqual(float(line["average"]), 12.5 / 3)
def test_RegressionTestRunnerOneRunFails(self): with DisposableFile("testconfig.json") as f, DisposableDirectory( "testrundir") as d: writeConfig(f.filename(), d.name()) runner = RegressionTestRunner(f.filename(), lambda w, a: self.runfunc(w, a)) self.counter = 2 self.assertFalse(runner.run())
def test_RegressionTestRunnerAllGoesWell(self): with DisposableFile("testconfig.json") as f, DisposableDirectory( "testrundir") as d: writeConfig(f.filename(), d.name()) runner = RegressionTestRunner(f.filename(), lambda w, a: self.runfunc(w, a)) self.counter = -1 self.assertTrue(runner.run())
def testSuccessfullRun(self): with DisposableDirectory("testdir", True) as dir: runner = depthmaprunner.DepthmapRegressionRunner( lambda d, a: self.runfuncSucceedAlwaysSame(d, a), "basebin", "testbin", dir.name()) (result, message) = runner.runTestCase( "testname", self.makeCommand("infile.graph", "outfile.graph", "visibility")) self.assertTrue(result)
def test_capture_pass(self): with DisposableDirectory("testdir_pass", True) as d: retcode, output = runhelpers.runExecutable( d.name(), [sys.executable, "../test_main.py", "-f", "../pass"]) if not retcode: print( "printing the underlying test output to help diagnose the issue:" ) print(output) self.assertTrue(retcode)
def test_prepareDirectory(self): with DisposableDirectory("testdir", True) as d: self.assertTrue(os.path.isdir(d.name())) testfile = os.path.join(d.name(), "testfile.txt") with open(testfile, "w") as f: f.write("123") self.assertTrue(os.path.exists(testfile)) runhelpers.prepareDirectory(d.name()) self.assertTrue(os.path.isdir(d.name())) self.assertFalse(os.path.exists(testfile))
def testTestRunFail(self): with DisposableDirectory("testdir", True) as dir: runner = depthmaprunner.DepthmapRegressionRunner( lambda d, a: self.runfuncFail(d, a, "testbin", True), "basebin", "testbin", dir.name()) (result, message) = runner.runTestCase( "testname", self.makeCommand("infile.graph", "outfile.graph", "visibility")) self.assertFalse(result) self.assertEqual(message, "Test run failed")
def testTestRunOutputMissing(self): with DisposableDirectory("testdir", True) as dir: runner = depthmaprunner.DepthmapRegressionRunner( lambda d, a: self.runfuncWriteNoFile(d, a, "testbin"), "basebin", "testbin", dir.name()) (result, message) = runner.runTestCase( "testname", self.makeCommand("infile.graph", "outfile.graph", "visibility")) self.assertFalse(result) self.assertEqual( message, "Test output {0} does not exist".format( os.path.join(dir.name(), "testname" + "_test", "outfile.graph")))
def test_runExecutableException(self): with DisposableDirectory("testdir") as d: runhelpers.prepareDirectory(d.name()) retcode, output = runhelpers.runExecutable( d.name(), [sys.executable, "-c", "raise Exception()"]) self.assertFalse(retcode) self.assertEqual(output, 'Traceback (most recent call last):\n File "<string>", line 1, in <module>\nException\n')
def test_runExecutableFail(self): with DisposableDirectory("testdir") as d: runhelpers.prepareDirectory(d.name()) retcode, output = runhelpers.runExecutable( d.name(), [sys.executable, "-c", "exit(-1)"]) self.assertFalse(retcode) self.assertEqual(output, "")
def test_runExecutable(self): with DisposableDirectory("testdir", True) as d: retcode, output = runhelpers.runExecutable( d.name(), [sys.executable, "-c", "print('foo')"]) self.assertTrue(retcode) self.assertEqual(output, "foo\n")
def test_cd(self): currentpath = os.getcwd() with DisposableDirectory("testdir", True) as d: with runhelpers.cd("testdir"): self.assertEqual(os.getcwd(), os.path.join(currentpath, d.name())) self.assertEqual(os.getcwd(), currentpath, d.name())