if not nm.startswith("test_") and not nm in tested_examples: suite.addTest(test_example(f, nm, self._excluded_modules)) return suite def parse_options(): parser = OptionParser() parser.add_option("--results", dest="results", type="string", default="-", help="write details of the test failures to this file") parser.add_option("--excluded", dest="excluded", type="string", default="-", help="list of modules to exclude") return parser.parse_args() if __name__ == "__main__": opts, args = parse_options() if opts.excluded: excluded_modules = opts.excluded.split(":") else: excluded_modules=[] files = args sys.argv=[sys.argv[0], "-v"] r = RegressionTest(files, excluded_modules) main=unittest.main(defaultTest="r", testRunner=IMP.test._TestRunner) if opts.results: out= file(opts.results, "w") if len(main.result.errors) > 0: print >> out, "Errors:",", ".join([main.result.getDescription(r[0]) for r in main.result.errors]) if len(main.result.skipped) > 0: print >> out, "Skips:",", ".join([main.result.getDescription(r[0]) for r in main.result.skipped]) sys.exit(not main.result.wasSuccessful())
os.chdir("%(dir)s") cmd="python %(script)s %(args)s" app=self.run_script("%(script)s", %(args)s) out, err = app.communicate() self.assertApplicationExitedCleanly(app.returncode, err) return""" % {"shortname":shortname, "dir":dir, "script":os.path.join(dir,filename), "args":str(args)}) return RunExample("test_run_example") global files global excluded_modules global working_dir def regressionTest(): modobjs = [] suite = unittest.TestSuite() # For all examples that don't have an explicit test to exercise them, # just run them to make sure they don't crash for i,f in enumerate(files): nm= os.path.split(f)[1] dir= os.path.split(f)[0] suite.addTest(_test_example(dir, working_dir, i, f, nm)) return suite if __name__ == "__main__": files = sys.argv[2:] working_dir= sys.argv[1] sys.argv=[sys.argv[0], "-v"] unittest.main(defaultTest="regressionTest", testRunner=IMP.test._TestRunner)
# test cases don't clean up memory properly when run as part # of run-all-tests if IMP.build != "fast": IMP.base.set_show_leaked_objects(False) if __name__ == "__main__": opts, args = parse_options() covtest = None if opts.pycoverage != 'no': if coverage: covtest = CoverageTester(opts) else: print >> sys.stderr, "Python coverage was requested but a " \ + "new enough 'coverage' module could not " \ + "be found on your system" import_imp_modules(covtest) r = RegressionTest(args) # Hide our command line options from any module we import sys.argv = [sys.argv[0]] main = unittest.main(defaultTest="r", testRunner=IMP.test._TestRunner, argv=[sys.argv[0], "-v"], exit=False) if opts.results: out= file(opts.results, "w") if len(main.result.errors + main.result.failures) > 0: print >> out, "Errors:",", ".join([main.result.getDescription(r[0]) for r in main.result.errors+main.result.failures]) if len(main.result.skipped) > 0: print >> out, "Skips:",", ".join([main.result.getDescription(r[0]) for r in main.result.skipped]) if covtest: covtest.report() sys.exit(not main.result.wasSuccessful())