def run_tests(modnames, other_files, use_coverage, specific_mods): """Run tests on the specified modules. Returns a list of modules which were tested. """ # Check command line for overrides to module names if specific_mods: newnames = [] for arg in specific_mods: if arg in modnames: newnames.append(arg) else: print "Module `%s' not known" % arg sys.exit(1) modnames = newnames modules, suite = make_suite(modnames, other_files, use_coverage, specific_mods) # Now, run everything. runner = unittest.TextTestRunner() runner.run(suite) if use_coverage: # Finished run - stop the coverage tests import coverage coverage.stop() return modules
def stop(stream, packages, target, collect=False): """Stop coverage""" # Call stop coverage.stop() # Collect if asked if collect: coverage.the_coverage.collect() # Get the file descriptor for the report fd = StringIO.StringIO() # Get the report coverage.report(packages, file=fd) # Flush fd.flush() # Write on stream stream.write(fd.getvalue()) # Write in target ntools.save(fd.getvalue(), os.path.join(target, "coverage.dat"), binary=False) # Get the sources sources = parse(fd.getvalue()) # Close the file descriptor fd.close() # Annotate source files annotate(packages, target, ignore_errors=True) # Create report report(target, sources)
def main(): from optparse import OptionParser usage = ('Usage: %prog [option] [modules to be tested]\n' 'Modules names have to be given in the form utils.mail (without ' 'zine.)\nIf no module names are given, all tests are run') parser = OptionParser(usage=usage) parser.add_option('-c', '--coverage', action='store_true', dest='coverage', help='show coverage information (slow!)') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='show which tests are run') options, args = parser.parse_args(sys.argv[1:]) modnames = ['zine.' + modname for modname in args] if options.coverage: if coverage is not None: use_coverage = True else: sys.stderr.write("coverage information requires Ned Batchelder's " "coverage.py to be installed!\n") sys.exit(1) else: use_coverage = False if use_coverage: coverage.erase() coverage.start() s, covermods = suite(modnames, True) else: s = suite(modnames) TextTestRunner(verbosity=options.verbose + 1).run(s) if use_coverage: coverage.stop() print '\n\n' + '=' * 25 + ' coverage information ' + '=' * 25 coverage.report(covermods)
def run_tests(self, test_labels, verbosity=1, interactive=True, extra_tests=[]): coveragemodules = getattr(settings, 'COVERAGE_MODULES', []) if coveragemodules: coverage.start() self.setup_test_environment() suite = self.build_suite(test_labels, extra_tests) old_config = self.setup_databases() result = self.run_suite(suite) if coveragemodules: coverage.stop() coveragedir = getattr(settings, 'COVERAGE_DIR', './build/coverage') if not os.path.exists(coveragedir): os.makedirs(coveragedir) modules = [] for module_string in coveragemodules: module = __import__(module_string, globals(), locals(), [""]) modules.append(module) f,s,m,mf = coverage.analysis(module) fp = file(os.path.join(coveragedir, module_string + ".html"), "wb") colorize.colorize_file(f, outstream=fp, not_covered=mf) fp.close() coverage.report(modules, show_missing=0) coverage.erase() self.teardown_databases(old_config) self.teardown_test_environment() return len(result.failures) + len(result.errors)
def run(self): testfiles = [] testdirs = ["koan"] for d in testdirs: testdir = os.path.join(os.getcwd(), "tests", d) for t in _glob.glob(os.path.join(testdir, '*.py')): if t.endswith('__init__.py'): continue testfile = '.'.join(['tests', d, os.path.splitext(os.path.basename(t))[0]]) testfiles.append(testfile) tests = unittest.TestLoader().loadTestsFromNames(testfiles) runner = unittest.TextTestRunner(verbosity=1) if coverage: coverage.erase() coverage.start() result = runner.run(tests) if coverage: coverage.stop() sys.exit(int(bool(len(result.failures) > 0 or len(result.errors) > 0)))
def report_coverage(packages): test_re = re.compile('.+test[^%s.]*\..+' % os.sep) coverage.stop() files = set() for name in sys.modules: mod = sys.modules.get(name) if mod is None: continue elif not is_in_packages(name, packages): continue elif is_in_packages(name, ['django']): continue filename = mod.__file__.replace('.pyc', '.py') if test_re.match(filename): continue st = os.stat(filename) if st.st_size > 1: files.add(filename) if files: coverage.report(list(files)) coverage.erase()
def _run_with_coverage(self): import coverage coverage.use_cache(False) coverage.start() try: self._run_tests() finally: coverage.stop() modules = [m for _, m in sys.modules.items() if m is not None and hasattr(m, '__file__') and os.path.splitext(m.__file__)[-1] in ('.py', '.pyc')] # Generate summary file buf = StringIO() coverage.report(modules, file=buf) buf.seek(0) fileobj = open(self.coverage_summary, 'w') try: filter_coverage(buf, fileobj) finally: fileobj.close() if self.coverage_dir: if not os.path.exists(self.coverage_dir): os.makedirs(self.coverage_dir) coverage.annotate(modules, directory=self.coverage_dir, ignore_errors=True)
def doctest(self): import doctest, coverage # (f,t) = doctest.testmod(eval('isconf.Kernel')) # doctest.master.summarize() # sys.exit(f) modules = [] olddir = os.getcwd() os.chdir('lib/python') os.path.walk('.',getmods,modules) os.chdir(olddir) print modules # modules=[rpc822] fail=0 total=0 coverage.erase() coverage.start() for mod in modules: (f,t) = doctest.testmod(mod,report=0) fail += f total += t doctest.master.summarize() coverage.stop() for mod in modules: coverage.analysis(mod) coverage.report(modules) sys.exit(fail)
def run_tests(*args, **kwargs): """Custom test runner. Follows the django.test.simple.run_tests() interface.""" coverage.use_cache(0) # Do not cache any of the coverage.py stuff coverage.start() test_results = django_test_runner(*args, **kwargs) # Stop code coverage after tests have completed coverage.stop() # Print code metrics header print '' print '----------------------------------------------------------------------' print ' Unit Test Code Coverage Results' print '----------------------------------------------------------------------' # Report code coverage metrics coverage_modules = [] for module in settings.COVERAGE_MODULES: coverage_modules.append(__import__(module, globals(), locals(), [''])) coverage.report(coverage_modules, show_missing=1) # Print code metrics footer print '----------------------------------------------------------------------' return test_results
def handle(self, *test_labels, **options): """ Run pylint and test with coverage and xml reports """ patch_for_test_db_setup() verbosity = int(options.get('verbosity', 1)) interactive = options.get('interactive', True) excludes = options.get('excludes', '').split(',') excludes = [ exclude.strip() for exclude in excludes ] tasks = getattr(settings, 'HUDSON_TASKS', ['pylint', 'coverage', 'tests']) output_dir=options.get('output_dir') if not path.exists(output_dir): os.makedirs(output_dir) if not test_labels: test_labels = Command.test_labels() if verbosity > 0: print "Testing and covering the following apps:\n %s" % pprint.pformat(test_labels, ) #TODO: Make lint work and with external rc file if 'pylint' in tasks: pylint().handle(output_file=path.join(output_dir,'pylint.report'), *test_labels) if 'coverage' in tasks: coverage.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]') coverage.start() failures = 0 if 'tests' in tasks: test_runner = XmlDjangoTestSuiteRunner(output_dir=output_dir, interactive=interactive, verbosity=verbosity) failures = test_runner.run_tests(test_labels) #save coverage report if 'coverage' in tasks: coverage.stop() modules = [ module for name, module in sys.modules.items() \ if hasattr(module, "__file__") and self.want_module(module, test_labels, excludes) ] morfs = [ self.src(m.__file__) for m in modules if self.src(m.__file__).endswith(".py")] if verbosity > 0: if excludes: print "Excluding any module containing of these words:" pprint.pprint(excludes) print "Coverage being generated for:" pprint.pprint(morfs) if 'coverage' in tasks: coverage._the_coverage.xml_report(morfs, outfile=path.join(output_dir,'coverage.xml')) if failures: sys.exit(bool(failures))
def test_runner_with_coverage(test_labels, verbosity=1, interactive=True, extra_tests=[]): # This doesn't work with Django 1.4 from django.test.simple import run_tests as django_test_runner import coverage coverage.use_cache(0) coverage.start() test_results = django_test_runner(test_labels, verbosity, interactive, extra_tests) coverage.stop() coverage_modules = [m.__file__ for k, m in sys.modules.iteritems() if m and k.split('.')[0] in test_labels and 'test' not in k] print print '='*80 print 'Coverage results for %s' % ', '.join(test_labels) print '='*80 coverage.report(coverage_modules, show_missing=1) coverage_html_dir = getattr(settings, 'COVERAGE_HTML_DIR', None) if coverage_html_dir is not None: coverage._the_coverage.html_report(coverage_modules, coverage_html_dir) return test_results
def test_runner_with_coverage(test_labels, verbosity=1, interactive=True, extra_tests=[]): """Custom test runner. Follows the django.test.simple.run_tests() interface.""" # Start code coverage before anything else if necessary coverage_modules = [] for module_name in settings.COVERAGE_MODULES: app_name = module_name.split(".")[0] if (not test_labels) or (app_name in test_labels): coverage_modules.append(__import__(module_name, globals(), locals(), [''])) if hasattr(settings, 'COVERAGE_MODULES'): coverage.use_cache(0) # Do not cache any of the coverage.py stuff coverage.start() test_results = django_test_runner(test_labels, verbosity, interactive, extra_tests) # Stop code coverage after tests have completed if hasattr(settings, 'COVERAGE_MODULES'): coverage.stop() # Print code metrics header print '' print '----------------------------------------------------------------------' print ' Unit Test Code Coverage Results' print '----------------------------------------------------------------------' coverage.report(coverage_modules, show_missing=1) # Print code metrics footer print '----------------------------------------------------------------------' else: print "No coverage modules defined in settings.py" return test_results
def test_runner_with_coverage(test_labels, verbosity=1, interactive=True, extra_tests=[]): """Custom test runner. Follows the django.test.simple.run_tests() interface.""" coverage.use_cache(0) # Do not cache any of the coverage.py stuff coverage.start() test_results = django_test_runner(test_labels, verbosity, interactive, extra_tests) coverage.stop() coverage_modules = [] if test_labels: for label in test_labels: # Don't report coverage if you're only running a single # test case. if '.' not in label: app = get_app(label) coverage_modules.extend(get_coverage_modules(app)) else: for app in get_apps(): coverage_modules.extend(get_coverage_modules(app)) if coverage_modules: # Print code metrics header print '' print '----------------------------------------------------------------------' print ' Unit Test Code Coverage Results' print '----------------------------------------------------------------------' coverage.report(coverage_modules, show_missing=1) # Print code metrics footer print '----------------------------------------------------------------------' return test_results
def main_coverage(TESTS): modulenames = MODULE_NAMES coverage.erase() coverage.start() coverage.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]') modules = [] for modulename in modulenames: print modulenames mod = my_import(modulename) modules.append(mod) if 'unittest' in TESTS: print "***** Unittest *****" test_args = {'verbosity': 1} suite = unittest.TestLoader().loadTestsFromNames(TEST_NAMES) unittest.TextTestRunner(**test_args).run(suite) if 'doctest' in TESTS: t0 = time.time() print "\n***** Doctest *****" for mod in modules: doctest.testmod(mod, verbose=VERBOSE) td = time.time() - t0 print " Tests took %.3f seconds" % (td, ) print "\n***** Coverage Python *****" coverage.stop() coverage.report(modules, ignore_errors=1, show_missing=1) coverage.erase()
def run(self): try: # Use system 'coverage' if available import coverage use_coverage = True except: use_coverage = False tests = unittest.TestLoader().loadTestsFromNames(self._testfiles) t = unittest.TextTestRunner(verbosity=1) if use_coverage: coverage.erase() coverage.start() if hasattr(unittest, "installHandler"): try: unittest.installHandler() except: print "installHandler hack failed" try: result = t.run(tests) except KeyboardInterrupt: sys.exit(1) if use_coverage: coverage.stop() sys.exit(int(bool(len(result.failures) > 0 or len(result.errors) > 0)))
def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]): """ Test runner which displays a code coverage report at the end of the run. """ django_test_runner = get_runner(settings) coverage.use_cache(0) coverage.start() results = django_test_runner(test_labels, verbosity, interactive, extra_tests) coverage.stop() coverage_modules = [] if test_labels: for label in test_labels: # Don't report coverage if you're only running a single # test case. if '.' not in label: app = get_app(label) coverage_modules.extend(get_all_coverage_modules(app)) else: for app in get_apps(): coverage_modules.extend(get_all_coverage_modules(app)) if coverage_modules: coverage.report(coverage_modules, show_missing=1) return results
def test_runner_with_coverage(test_labels, verbosity=1, interactive=True, extra_tests=[]): """Custom test runner. Follows the django.test.simple.run_tests() interface.""" do_coverage = hasattr(settings, 'COVERAGE_MODULES') and settings.COVERAGE_MODULES # Start code coverage before anything else if necessary if do_coverage: coverage.use_cache(0) # Do not cache any of the coverage.py stuff coverage.start() test_results = django_test_runner(test_labels, verbosity, interactive, extra_tests) if do_coverage: coverage.stop() # Print code metrics header print print '-' * 60 print ' Unit Test Code Coverage Results' print '-' * 60 coverage_modules = [] for module in settings.COVERAGE_MODULES: coverage_modules.append(__import__(module, globals(), locals(), [''])) coverage.report(coverage_modules, show_missing=1) # Print code metrics footer print '-' * 60 return test_results
def report(): if run: run() if success.count and not failure.count: print("ok") # only clean out tempfiles if test passed import os, os.path, tempfile for file in _tempfiles: try: os.remove(file) except OSError: pass # report? temp_root = os.path.join(tempfile.gettempdir(), 'pillow-tests') try: os.rmdir(temp_root) except OSError: pass if "--coverage" in sys.argv: import coverage coverage.stop() # The coverage module messes up when used from inside an # atexit handler. Do an explicit save to make sure that # we actually flush the coverage cache. coverage.the_coverage.save()
def handle(self, *args, **options): """ Run pylint and test with coverage and xml reports """ output_dir=options.get('output_dir') if not path.exists(output_dir): os.makedirs(output_dir) app_labels = Command.app_list() # pylint pylint().handle(*app_labels, output_file=path.join(output_dir,'pylint.report')) #coverage coverage.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]') coverage.start() #tests test_runner = XmlDjangoTestSuiteRunner(output_dir=output_dir) failures = test_runner.run_tests(app_labels) #save coverage report coverage.stop() modules = [module for name, module in sys.modules.items() \ if module and any([name.startswith(label) for label in app_labels])] morfs = [ m.__file__ for m in modules if hasattr(m, '__file__') ] coverage._the_coverage.xml_report(morfs, outfile=path.join(output_dir,'coverage.xml'))
def run_tests(verbosity): "Run test suite" # list all the files in the top level directory file_list = os.listdir(os.path.join(os.path.abspath( os.path.dirname(os.path.realpath(__file__))))) # list all the files in the tests directory test_list = os.listdir(os.path.join(os.path.abspath( os.path.dirname(os.path.realpath(__file__))), 'tests')) code_modules = [] # loop over all the file names for file_name in file_list: extension = os.path.splitext(file_name)[-1] # if they are python files or the test runner if extension == '.py' and file_name != 'test.py': # work out the module name code_module_name = os.path.splitext(file_name)[0:-1][0] # now import the module module = __import__(code_module_name, globals(), locals(), code_module_name) # and add it to the list of available modules code_modules.append(module) test_modules = [] # loop over all the file names for file_name in test_list: extension = os.path.splitext(file_name)[-1] # if they are python files if extension == '.py': # work out the module name test_module_name = "tests." + os.path.splitext(file_name)[0:-1][0] # now import the module module = __import__(test_module_name, globals(), locals(), test_module_name) # and add it to the list of available modules test_modules.append(module) # populate a test suite from the individual tests from the list of modules suite = unittest.TestSuite(map( unittest.defaultTestLoader.loadTestsFromModule, test_modules)) # set up the test runner runner = unittest.TextTestRunner(verbosity=int(verbosity)) # set up coverage reporting coverage.use_cache(0) coverage.start() # run the tests runner.run(suite) # stop coverage reporting coverage.stop() # output coverage report coverage.report(code_modules, show_missing=1)
def report(self, stream): log.debug("Coverage report") import coverage coverage.stop() modules = [ module for name, module in sys.modules.items() if self.wantModuleCoverage(name, module) ] log.debug("Coverage report will cover modules: %s", modules) coverage.report(modules, file=stream)
def report_coverage(): coverage.stop() module_list = [ mod for name, mod in sys.modules.copy().iteritems() if getattr(mod, '__file__', None) and name.startswith('jinja2.') and name not in IGNORED_MODULES ] module_list.sort() coverage.report(module_list)
def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]): """ Test runner which displays a code coverage report at the end of the run. """ coverage.use_cache(0) for e in settings.COVERAGE_CODE_EXCLUDES: coverage.exclude(e) coverage.start() results = base_run_tests(test_labels, verbosity, interactive, extra_tests) coverage.stop() coverage_modules = [] if test_labels: for label in test_labels: label = label.split('.')[0] app = get_app(label) coverage_modules.append(_get_app_package(app)) else: for app in get_apps(): coverage_modules.append(_get_app_package(app)) coverage_modules.extend(settings.COVERAGE_ADDITIONAL_MODULES) packages, modules, excludes, errors = get_all_modules( coverage_modules, settings.COVERAGE_MODULE_EXCLUDES, settings.COVERAGE_PATH_EXCLUDES) outdir = settings.COVERAGE_REPORT_HTML_OUTPUT_DIR if outdir is None: coverage.report(modules.values(), show_missing=1) if excludes: print >>sys.stdout print >>sys.stdout, "The following packages or modules were excluded:", for e in excludes: print >>sys.stdout, e, print >>sys.stdout if errors: print >>sys.stdout print >>sys.stderr, "There were problems with the following packages or modules:", for e in errors: print >>sys.stderr, e, print >>sys.stdout else: outdir = os.path.abspath(outdir) if settings.COVERAGE_CUSTOM_REPORTS: html_report(outdir, modules, excludes, errors) else: coverage._the_coverage.html_report(modules.values(), outdir) coverage._the_coverage.xml_report(modules.values(), os.path.join(outdir,'coverage.xml') ) print >>sys.stdout print >>sys.stdout, "HTML reports were output to '%s'" %outdir return results
def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]): """ Run the unit tests for all the test labels in the provided list. Labels must be of the form: - app.TestClass.test_method Run a single specific test method - app.TestClass Run all the test methods in a given class - app Search for doctests and unittests in the named application. When looking for tests, the test runner will look in the models and tests modules for the application. A list of 'extra' tests may also be provided; these tests will be added to the test suite. If the settings file has an entry for COVERAGE_MODULES or test_labels is true it will prints the coverage report for modules/apps Returns number of tests that failed. """ do_coverage = hasattr(settings, 'COVERAGE_MODULES') or bool(test_labels) if do_coverage: coverage.erase() coverage.start() from django.test import simple retval = simple.run_tests(test_labels, verbosity, interactive, extra_tests) if do_coverage: coverage.stop() # Print code metrics header print '' print '----------------------------------------------------------------------' print ' Unit Test Code Coverage Results' print '----------------------------------------------------------------------' # try to import all modules for the coverage report. modules = [] if getattr(settings, 'COVERAGE_MODULES', None): modules = [__import__(module, {}, {}, ['']) for module in settings.COVERAGE_MODULES] elif test_labels: modules = [] for label in test_labels: label = label.split('.')[0] #remove test class or test method from label pkg = _get_app_package(label) modules.extend(_package_modules(*pkg)) coverage.report(modules, show_missing=1) return retval
def main(): import unittest from getopt import getopt, GetoptError try: options, arguments = getopt(sys.argv[1:], "d:v", ["database=", "extra-verbose", "super-verbose", "inserts", "coverage"]) except GetoptError: sys.exit("Usage: %s [-d|--database all|type] [-v[v]] [--extra-verbose|--super-verbose] [--inserts] [--coverage]" % sys.argv[0]) dbs = [] newArgs = [] doCoverage = False verbose = 0 for option, value in options: if option in ('-d', '--database'): dbs.append(value) elif option == '--inserts': SQLObjectTest.debugInserts = True elif option == '--coverage': # Handled earlier, so we get better coverage doCoverage = True elif option == '--extra-verbose': verbose = 1 elif option == '--super-verbose': verbose = 2 elif option == '-v': verbose += 1 if verbose >= 1: SQLObjectTest.debugSQL = True if verbose >= 2: SQLObjectTest.debugOutput = True newArgs.append('-vv') newArgs.extend(arguments) sys.argv = [sys.argv[0]] + newArgs if not dbs: dbs = ['mysql'] if dbs == ['all']: dbs = supportedDatabases() for db in dbs: print 'Testing %s' % db curr_db = db setDatabaseType(db) try: unittest.main() except SystemExit: pass if doCoverage: coverage.stop() coverModules()
def run_tests(self, test_labels, extra_tests=None, **kwargs): coverage.use_cache(settings.COVERAGE_USE_CACHE) for e in settings.COVERAGE_CODE_EXCLUDES: coverage.exclude(e) coverage.start() results = super(CoverageRunner, self).run_tests(test_labels, extra_tests, **kwargs) coverage.stop() coverage_modules = [] if test_labels: for label in test_labels: label = label.split('.')[0] app = get_app(label) coverage_modules.append(self._get_app_package(app)) else: for app in get_apps(): coverage_modules.append(self._get_app_package(app)) coverage_modules.extend(settings.COVERAGE_ADDITIONAL_MODULES) packages, modules, excludes, errors = get_all_modules( coverage_modules, settings.COVERAGE_MODULE_EXCLUDES, settings.COVERAGE_PATH_EXCLUDES) if settings.COVERAGE_USE_STDOUT: coverage.report(modules.values(), show_missing=1) if excludes: message = "The following packages or modules were excluded:" print >>sys.stdout print >>sys.stdout, message, for e in excludes: print >>sys.stdout, e, print >>sys.stdout if errors: message = "There were problems with the following packages " message += "or modules:" print >>sys.stdout print >>sys.stderr, message, for e in errors: print >>sys.stderr, e, print >>sys.stdout outdir = settings.COVERAGE_REPORT_HTML_OUTPUT_DIR if outdir: outdir = os.path.abspath(outdir) if settings.COVERAGE_CUSTOM_REPORTS: html_report(outdir, modules, excludes, errors) else: coverage._the_coverage.html_report(modules.values(), outdir) print >>sys.stdout print >>sys.stdout, "HTML reports were output to '%s'" %outdir return results
def report(self, stream): """ Output code coverage report. """ import coverage coverage.stop() modules = [ module for name, module in sys.modules.items() if self.wantModuleCoverage(name, module) ] log.debug("Coverage report will cover modules: %s", modules) morfs = [ m.__file__ for m in modules if hasattr(m, '__file__') ] coverage._the_coverage.xml_report(morfs, outfile=self.xcoverageFile)
def nose_start(LOG, REPO_PATH): # test names """ Find all python modules in REPO_PATH """ WORKING_DIR = getcwd() chdir(REPO_PATH) coverage.erase() coverage.start() nose.run() coverage.stop() LOG.info(coverage.analysis(nose)) LOG.info(coverage.report()) chdir(WORKING_DIR)
def report(self, stream): log.debug("Coverage report") import coverage coverage.stop() modules = [ module for name, module in sys.modules.items() if self.wantModuleCoverage(name, module) ] log.debug("Coverage report will cover modules: %s", modules) if self.coverDir and self.coverAnnotate: coverage.annotate(modules, self.coverDir) fd = open("%s/cover.report" % self.coverDir, "w") coverage.report(modules, file=fd) fd.close()
def runTests(self): testrunner = TextTestRunnerWithTimings( verbosity=self._options.verbosity, timeTests=self._options.time, nrTestsToReport=self._options.time_reports) if self._options.coverage: coverage.erase() coverage.start() result = testrunner.run(self) if self._options.coverage: coverage.stop() print coverage.report(self.getPyFilesFromDir(os.path.join(projectRoot, 'taskcoachlib'))) return result
def run_tests(verbosity): "Run test suite" # list all the files in the top level directory file_list = os.listdir( os.path.join( os.path.abspath(os.path.dirname(os.path.realpath(__file__))))) # list all the files in the tests directory test_list = os.listdir( os.path.join( os.path.abspath(os.path.dirname(os.path.realpath(__file__))), 'tests')) code_modules = [] # loop over all the file names for file_name in file_list: extension = os.path.splitext(file_name)[-1] # if they are python files or the test runner if extension == '.py' and file_name != 'test.py': # work out the module name code_module_name = os.path.splitext(file_name)[0:-1][0] # now import the module module = __import__(code_module_name, globals(), locals(), code_module_name) # and add it to the list of available modules code_modules.append(module) test_modules = [] # loop over all the file names for file_name in test_list: extension = os.path.splitext(file_name)[-1] # if they are python files if extension == '.py': # work out the module name test_module_name = "tests." + os.path.splitext(file_name)[0:-1][0] # now import the module module = __import__(test_module_name, globals(), locals(), test_module_name) # and add it to the list of available modules test_modules.append(module) # populate a test suite from the individual tests from the list of modules suite = unittest.TestSuite( map(unittest.defaultTestLoader.loadTestsFromModule, test_modules)) # set up the test runner runner = unittest.TextTestRunner(verbosity=int(verbosity)) # set up coverage reporting coverage.use_cache(0) coverage.start() # run the tests runner.run(suite) # stop coverage reporting coverage.stop() # output coverage report coverage.report(code_modules, show_missing=1)
def handle(self, *test_labels, **options): """ Run pylint and test with coverage and xml reports """ patch_for_test_db_setup() verbosity = int(options.get('verbosity', 1)) interactive = options.get('interactive', True) excludes = options.get('excludes', '').split(',') excludes = [exclude.strip() for exclude in excludes] tasks = getattr(settings, 'HUDSON_TASKS', ['pylint', 'coverage', 'tests']) output_dir = options.get('output_dir') if not path.exists(output_dir): os.makedirs(output_dir) if not test_labels: test_labels = Command.test_labels() if verbosity > 0: print "Testing and covering the following apps:\n %s" % pprint.pformat( test_labels, ) #TODO: Make lint work and with external rc file if 'pylint' in tasks: pylint().handle(output_file=path.join(output_dir, 'pylint.report'), *test_labels) if 'coverage' in tasks: coverage.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]') coverage.start() failures = 0 if 'tests' in tasks: test_runner = XmlDjangoTestSuiteRunner(output_dir=output_dir, interactive=interactive, verbosity=verbosity) failures = test_runner.run_tests(test_labels) #save coverage report if 'coverage' in tasks: coverage.stop() modules = [ module for name, module in sys.modules.items() \ if hasattr(module, "__file__") and self.want_module(module, test_labels, excludes) ] morfs = [ self.src(m.__file__) for m in modules if self.src(m.__file__).endswith(".py") ] if verbosity > 0: if excludes: print "Excluding any module containing of these words:" pprint.pprint(excludes) print "Coverage being generated for:" pprint.pprint(morfs) if 'coverage' in tasks: coverage._the_coverage.xml_report(morfs, outfile=path.join( output_dir, 'coverage.xml')) if failures: sys.exit(bool(failures))
print >> sys.stderr, '=' * WIDTH if failed: print >>sys.stderr, '%d of %d tests failed.' % \ (error_test_count, total_test_count) print >> sys.stderr, 'Tests failed in:', ', '.join(failed) ret = 1 else: if total_test_count == 1: print >> sys.stderr, '1 test happy.' else: print >> sys.stderr, 'All %d tests happy.' % total_test_count ret = 0 if with_coverage: coverage.stop() modules = [ mod for name, mod in sys.modules.iteritems() if name.startswith('pygments.') and mod ] coverage.report(modules) return ret if __name__ == '__main__': with_coverage = False if sys.argv[1:2] == ['-C']: with_coverage = bool(coverage) del sys.argv[1] sys.exit(run_tests(with_coverage))
def main(self, *args): # # Strip arguments from the command line, so our # args do not confuse another app or library. # sys.argv = [ sys.argv[0] ] # # If they asked for a list of the tests, print that # first. # if self.list_mode: module_tree = self.getModuleTree(args) success = self.listTests(module_tree) # # If they asked for a list of test categories, # print that here. # elif self.list_categories_mode: module_tree = self.getModuleTree(args) success = self.listCategories(module_tree) # # If they asked to have tests run, do that # last. # elif self.run_mode: if coverage and self.code_coverage: # # Possibly override the coverage filename # if self.coverage_filename: self.statusMessage('Writing coverage output to %s' % self.coverage_filename) import os os.environ['COVERAGE_FILE'] = self.coverage_filename # # Clean up in case we have previous coverage data # coverage.erase() # # Add exclude patterns # for pattern in self.coverage_exclude_patterns: coverage.exclude(pattern) # # Start code coverage counter # coverage.start() # # Get the module tree. This needs to be done *after* # coverage monitoring is started so we monitor those # modules, too. # module_tree = self.getModuleTree(args) # # Run the tests # result = self.runTests(module_tree) if coverage and self.code_coverage: # # Stop coverage counter and save its results # coverage.stop() coverage.the_coverage.save() # # Report our success/failure # success = result.wasSuccessful() return success