def test_noexclude_stdlib(self): """Check that stdlib files are covered if not excluded. Only works for python >= 2.5, because of some changes in the way frame objs f_code.co_filename is reported. """ if version_string in ('2.3', '2.4'): # CTB return figleaf.start(ignore_python_lib=False) os.path.dirname('/some/weird/path') # use some stdlib code figleaf.stop() coverage = figleaf.get_data().gather_files() print 'sysdir is:', sysdir found = False for k in coverage: print 'checking:', k if k.startswith(sysdir): found = True break assert found
def generate_coverage(func, path, *args, **kwds): """ Generates code coverage for the function and places the results in the path """ import figleaf from figleaf import annotate_html if os.path.isdir(path): shutil.rmtree(path) # execute the function itself return_vals = func(*args, **kwds) logger.info('generating coverage') coverage = figleaf.get_data().gather_files() annotate_html.prepare_reportdir(path) # skip python modules and the test modules regpatt = lambda patt: re.compile(patt, re.IGNORECASE) patterns = map(regpatt, ['python', 'tests']) annotate_html.report_as_html(coverage, path, exclude_patterns=patterns, files_list='') return return_vals
def generate_coverage_report(self): """ Generates a coverage report in HTML format. Returns the absolute path to the report file. Note that it is generated in a temp directory, so be sure to either move or delete the report. """ if not has_figleaf: return figleaf.stop() tempdir = tempfile.mkdtemp() coverage_file = os.path.join(tempdir, "coverage.txt") logging.info("Writing coverage to %s" % coverage_file) logging.info("coverage info = %r" % figleaf.get_data()) figleaf.write_coverage(coverage_file) coverage = {} d = figleaf.read_coverage(coverage_file) coverage_data = figleaf.combine_coverage(coverage, d) logging.info("Preparing to write report...") report_dir = os.path.join(tempdir, "figleaf-html-report") if not os.path.exists(report_dir): os.makedirs(report_dir) html_report.report_as_html(coverage_data, report_dir, [ re.compile(".*site-packages.*"), re.compile(".*pubsub.*"), re.compile(".*pew/.*") ], {}) logging.info("Writing report to %s" % report_dir) return os.path.join(report_dir, "index.html")
def generate_coverage(func, path, *args, **kwds): """ Generates code coverage for the function and places the results in the path """ import figleaf from figleaf import annotate_html if os.path.isdir(path): shutil.rmtree(path) # execute the function itself return_vals = func(*args, **kwds) logger.info('generating coverage') coverage = figleaf.get_data().gather_files() annotate_html.prepare_reportdir(path) # skip python modules and the test modules regpatt = lambda patt: re.compile(patt, re.IGNORECASE) patterns = map(regpatt, [ 'python', 'tests' ]) annotate_html.report_as_html(coverage, path, exclude_patterns=patterns, files_list='') return return_vals
def test_exclude_stdlib(self): "Check that stdlib files are not covered if excluded." if version_string in ('2.3', '2.4'): # CTB return ### first check that they would be covered ordinarily! figleaf.start(ignore_python_lib=False) os.path.dirname('/some/weird/path') # use some stdlib code figleaf.stop() coverage = figleaf.get_data().gather_files() print 'sysdir is:', sysdir found = False for k in coverage: print 'checking:', k if k.startswith(sysdir): found = True break assert found # this will fail in 2.3, 2.4 ### ok, now check that they're ignored properly. figleaf._t = None # RESET figleaf.start(ignore_python_lib=True) os.path.dirname('/some/weird/path') # use some stdlib code figleaf.stop() coverage = figleaf.get_data().gather_files() for k in coverage: # posixpath & threading somehow evade the discrimination!? if k.startswith(sysdir) and (k.endswith('posixpath.py') or k.endswith('threading.py')): continue assert not k.startswith(sysdir), '%s starts with %s' % (k, sysdir,)
def test_noexclude_misc_path(self): "Check that tests/tst_exclude1.py is covered, if no excl specified" figleaf.init(None, None) figleaf.start() execfile('tests/tst_exclude1.py') figleaf.stop() coverage = figleaf.get_data().gather_files() assert 'tests/tst_exclude1.py' in coverage.keys()
def test_include_misc_path(self): "Check that tests/tst_include1.py is the only thing covered, if set" figleaf.init(None, 'tests') figleaf.start() execfile('tests/tst_include1.py') figleaf.stop() coverage = figleaf.get_data().gather_files() assert 'tests/tst_include1.py' in coverage.keys() assert len(coverage) == 1
def test_exclude_stdlib(self): "Check that stdlib files are not covered if excluded." if version_string in ('2.3', '2.4'): # CTB return ### first check that they would be covered ordinarily! figleaf.start(ignore_python_lib=False) os.path.dirname('/some/weird/path') # use some stdlib code figleaf.stop() coverage = figleaf.get_data().gather_files() found = False for k in coverage: print k if k.startswith(sysdir): found = True break assert found # this will fail in 2.3, 2.4 ### ok, now check that they're ignored properly. figleaf._t = None # RESET figleaf.start(ignore_python_lib=True) os.path.dirname('/some/weird/path') # use some stdlib code figleaf.stop() coverage = figleaf.get_data().gather_files() for k in coverage: assert not k.startswith(sysdir)
def test_exclude_misc_path(self): "Check that tests/tst_exclude1.py is not covered, if excl specified" testspath = os.path.abspath('tests/') print 'IGNORING', testspath figleaf.init(testspath, None) figleaf.start() execfile('tests/tst_exclude1.py') figleaf.stop() coverage = figleaf.get_data().gather_files() print coverage.keys() assert not 'tests/tst_exclude1.py' in coverage.keys()
def post_run(self, runner): """ stops figleaf and returns a report """ figleaf.stop() coverageOutput = figleaf.get_data().gather_files() self.coverageDir = os.path.join(runner.setupDir, 'report', 'pycoverage') # check if there's a dir first if not os.path.exists(self.coverageDir): os.mkdir(self.coverageDir) figleaf.annotate_html.report_as_html(coverageOutput, self.coverageDir, [], {}) # open a browser window with the report openBrowser(runner.config.getBrowserPath(), os.path.join( self.coverageDir, "index.html"))
def generate_coverage(func, path, *args, **kwds): """ Generates code coverage for the function and places the results in the path """ import figleaf from figleaf import annotate_html # Fix for figleaf misbehaving. It is adding a logger at root level # and that will add a handler to all subloggers (ours as well) # needs to be fixed in figleaf import logging root = logging.getLogger() # remove all root handlers for hand in root.handlers: root.removeHandler(hand) if os.path.isdir(path): shutil.rmtree(path) info("collecting coverage information") figleaf.start() # execute the function itself return_vals = func(*args, **kwds) figleaf.stop() info('generating coverage') coverage = figleaf.get_data().gather_files() annotate_html.prepare_reportdir(path) # skip python modules and the test modules regpatt = lambda patt: re.compile(patt, re.IGNORECASE) patterns = map(regpatt, ['python', 'tests', 'django', 'path*']) annotate_html.report_as_html(coverage, path, exclude_patterns=patterns, files_list='') return return_vals
def test_dos_eol(): """ Make sure that we can annotate files with DOS EOL characters in them. """ import figleaf, figleaf.annotate_html figleaf.start() execfile(os.path.join(thisdir, "tst_dos_eol.py")) figleaf.stop() coverage = figleaf.get_data().gather_files() tmpdir = tempfile.mkdtemp(".figleaf") try: figleaf.annotate_html.report_as_html(coverage, tmpdir, [], {}) finally: files = glob.glob("%s/*" % (tmpdir,)) for f in files: os.unlink(f) os.rmdir(tmpdir)
def generate_coverage( func, path, *args, **kwds): """ Generates code coverage for the function and places the results in the path """ import figleaf from figleaf import annotate_html # Fix for figleaf misbehaving. It is adding a logger at root level # and that will add a handler to all subloggers (ours as well) # needs to be fixed in figleaf import logging root = logging.getLogger() # remove all root handlers for hand in root.handlers: root.removeHandler(hand) if os.path.isdir( path ): shutil.rmtree( path ) info( "collecting coverage information" ) figleaf.start() # execute the function itself return_vals = func( *args, **kwds) figleaf.stop() info( 'generating coverage' ) coverage = figleaf.get_data().gather_files() annotate_html.prepare_reportdir( path ) # skip python modules and the test modules regpatt = lambda patt: re.compile( patt, re.IGNORECASE ) patterns = map( regpatt, [ 'python', 'tests', 'django', 'path*' ] ) annotate_html.report_as_html( coverage, path, exclude_patterns=patterns, files_list='') return return_vals
def coro(dirpath_src, build_monitor): """ Send errors to build_monitor if the unit tests for supplied modules fail. """ dirpath_internal = da.lwc.discover.path( 'internal', dirpath_lwc_root = dirpath_src) filepath_ini = os.path.join( dirpath_internal, 'da', 'check', 'pytest.ini') while True: build_element = (yield) filepath_module = build_element['filepath'] # Ignore non-python design documents. if not da.lwc.file.is_python_file(filepath_module): continue # Ignore experimental design documents. if da.lwc.file.is_experimental(filepath_module): continue # Check to ensure that the test files, classes and methods are present. _check_static_coverage(build_element, build_monitor) filepath_test = da.lwc.file.specification_filepath_for(filepath_module) if not os.path.isfile(filepath_test): continue # Ensure the test results dir exists. dirpath_log = build_element['dirpath_log'] da.util.ensure_dir_exists(dirpath_log) # Define test result files. filepath_pytest_log = os.path.join(dirpath_log, 'pytest.log') filepath_pytest_out = os.path.join(dirpath_log, 'pytest_out.log') filepath_pytest_err = os.path.join(dirpath_log, 'pytest_err.log') filepath_junit_xml = os.path.join(dirpath_log, 'pytest.junit.xml') filepath_pytest_json = os.path.join(dirpath_log, 'pytest.json') filepath_cover_pickle = os.path.join(dirpath_log, 'test_cover.pickle') filepath_cover_json = os.path.join(dirpath_log, 'test_cover.json') # Run a py.test session for the current module's test cases. with _pytest_context(dirpath_cwd = dirpath_src, filepath_stdout = filepath_pytest_out, filepath_stderr = filepath_pytest_err): exit_code = pytest.main( [filepath_test, '-p', 'da.check.pytest_da', '--capture=no', '-c=' + filepath_ini, '--result-log=' + filepath_pytest_log, '--junit-xml=' + filepath_junit_xml, '--json=' + filepath_pytest_json, '--coverage-log=' + filepath_cover_pickle]) # Communicate any test case failures. if exit_code != 0: _report_unit_test_failure(filepath_test, filepath_pytest_json, dirpath_src, build_monitor) # The coverage metric is not valid if # the test aborted early due to a test # criterion failing or some sort of # error. We therefore only collect # coverage metrics when the test # passes. # continue # Get coverage data grouped by file. with open(filepath_cover_pickle, 'rb') as figleaf_pickle: figleaf.load_pickled_coverage(figleaf_pickle) coverage = figleaf.get_data() cov_by_file = coverage.gather_files() cov_log = dict() for key, value in cov_by_file.items(): cov_log[key] = list(value) with open(filepath_cover_json, 'wt') as file_cover_json: file_cover_json.write(json.dumps(cov_log, indent = 4, sort_keys = True)) # Get the design elements in the current document that require # test coverage. # ast_module = build_element['ast'] module_name = da.python_source.get_module_name(filepath_module) design_elements = list(da.python_source.gen_ast_paths_depth_first( ast_module, module_name)) # Work out if the coverage provided by the # unit tests is sufficient. # # Initially, we just check to see that # *some* coverage is given for each # document. (Module-level coverage) # # As we mature this system, we will # (conditionally) extend checks to # ensure minimum standards for function- # level coverage and line-level coverage. # if len(design_elements) > 1 and filepath_module not in cov_by_file: relpath_module = build_element['relpath'] build_monitor.report_nonconformity( tool = 'pytest', msg_id = 'E0204', msg = 'No test coverage for module: ' + relpath_module, file = filepath_test) continue
def compare_coverage(filename): print filename fullpath = os.path.abspath(os.path.join(testdir, filename)) ### run file & record coverage maindict = {} maindict.update(__main__.__dict__) figleaf.start() try: execfile(fullpath, maindict) except: pass figleaf.stop() d = figleaf.get_data().gather_files() coverage_info = d.get(fullpath, None) ### ok, now get the interesting lines from our just-executed python script f1 = open(fullpath) SYNTAX_ERROR = False try: line_info = figleaf.get_lines(f1) except SyntaxError: SYNTAX_ERROR = True # not supported in this ver? ### load in the previously annotated lines coverage_file = filename + '.cover.' + version_string try: f2 = open(os.path.join(testdir, coverage_file)) assert not SYNTAX_ERROR except IOError: assert SYNTAX_ERROR # it's ok, skip this test return ########## No syntax error, check aginst previously annotated stuff. actual_lines = f2.readlines() f2.close() ### annotate lines with coverage style 'cover' f1.seek(0) (_, _, output) = figleaf.annotate_cover.make_cover_lines(line_info, coverage_info, f1) ### compare! f1.close() for (i, (check_line, recorded_line)) in enumerate(zip(output, actual_lines)): check_line = check_line.strip() recorded_line = recorded_line.strip() assert check_line == recorded_line, "regression mismatch, file %s:%d\ncurrent '%s'\nactual '%s'\n" % (fullpath, i, check_line, recorded_line,)
def coro(dirpath_src, build_monitor): """ Send errors to build_monitor if the unit tests for supplied modules fail. """ dirpath_internal = da.lwc.discover.path('internal', dirpath_lwc_root=dirpath_src) filepath_ini = os.path.join(dirpath_internal, 'da', 'check', 'pytest.ini') while True: build_unit = (yield) filepath_module = build_unit['filepath'] # Ignore non-python design documents. if not da.lwc.file.is_python_file(filepath_module): continue # Ignore experimental design documents. if da.lwc.file.is_experimental(filepath_module): continue # Ignore documents that failed to parse.. if 'ast' not in build_unit: continue # Check to ensure that the test files, classes and methods are present. _check_static_coverage(build_unit, build_monitor) filepath_test = da.lwc.file.specification_filepath_for(filepath_module) if not os.path.isfile(filepath_test): continue # Ensure the test results dir exists. dirpath_log = build_unit['dirpath_log'] da.util.ensure_dir_exists(dirpath_log) # Define test result files. filepath_pytest_log = os.path.join(dirpath_log, 'pytest.log') filepath_pytest_out = os.path.join(dirpath_log, 'pytest_out.log') filepath_pytest_err = os.path.join(dirpath_log, 'pytest_err.log') filepath_junit_xml = os.path.join(dirpath_log, 'pytest.junit.xml') filepath_pytest_json = os.path.join(dirpath_log, 'pytest.json') filepath_cover_pickle = os.path.join(dirpath_log, 'test_cover.pickle') filepath_cover_json = os.path.join(dirpath_log, 'test_cover.json') # Run a py.test session for the current module's test cases. with _pytest_context(dirpath_cwd=dirpath_src, filepath_stdout=filepath_pytest_out, filepath_stderr=filepath_pytest_err): exit_code = pytest.main([ filepath_test, '-p', 'da.check.pytest_da', # '-m', 'ci' '--capture=no', '-c=' + filepath_ini, '--result-log=' + filepath_pytest_log, '--junit-xml=' + filepath_junit_xml, '--json=' + filepath_pytest_json, '--coverage-log=' + filepath_cover_pickle ]) # Communicate any test case failures. if exit_code != 0: _report_unit_test_failure(filepath_test, filepath_pytest_json, dirpath_src, build_monitor) # The coverage metric is not valid if # the test aborted early due to a test # criterion failing or some sort of # error. We therefore only collect # coverage metrics when the test # passes. # continue # Get coverage data grouped by file. with open(filepath_cover_pickle, 'rb') as figleaf_pickle: figleaf.load_pickled_coverage(figleaf_pickle) coverage = figleaf.get_data() cov_by_file = coverage.gather_files() cov_log = dict() for key, value in cov_by_file.items(): cov_log[key] = list(value) with open(filepath_cover_json, 'wt') as file_cover_json: file_cover_json.write(json.dumps(cov_log, indent=4, sort_keys=True)) # Get the design elements in the current document that require # test coverage. # ast_module = build_unit['ast'] module_name = da.python_source.get_module_name(filepath_module) design_elements = list( da.python_source.gen_ast_paths_depth_first(ast_module, module_name)) # Work out if the coverage provided by the # unit tests is sufficient. # # Initially, we just check to see that # *some* coverage is given for each # document. (Module-level coverage) # # As we mature this system, we will # (conditionally) extend checks to # ensure minimum standards for function- # level coverage and line-level coverage. # if len(design_elements) > 1 and filepath_module not in cov_by_file: relpath_module = build_unit['relpath'] build_monitor.report_nonconformity( tool='pytest', msg_id=da.check.constants.PYTEST_NO_COVERAGE, msg='No test coverage for module: ' + relpath_module, path=filepath_test) continue
if MODE == 'coverage': # coverage with coverage module, will write to standard output import coverage coverage.erase() coverage.start() import test_all from statlib import stats, pstat suite = test_all.get_suite() unittest.TextTestRunner(verbosity=2).run(suite) coverage.report( [ stats, pstat ] ) elif MODE == 'figleaf': # coverage with figleaf, will write to the html directrory import figleaf from figleaf import annotate_html figleaf.start() import test_all from statlib import stats, pstat suite = test_all.get_suite() unittest.TextTestRunner(verbosity=2).run(suite) figleaf.stop() figleaf.write_coverage('.figleaf') data = figleaf.get_data().gather_files() annotate_html.report_as_html( data, 'cover-reports', [] ) else: print 'Invalid mode %s' % mode
if MODE == 'coverage': # coverage with coverage module, will write to standard output import coverage coverage.erase() coverage.start() import test_all from statlib import stats, pstat suite = test_all.get_suite() unittest.TextTestRunner(verbosity=2).run(suite) coverage.report([stats, pstat]) elif MODE == 'figleaf': # coverage with figleaf, will write to the html directrory import figleaf from figleaf import annotate_html figleaf.start() import test_all from statlib import stats, pstat suite = test_all.get_suite() unittest.TextTestRunner(verbosity=2).run(suite) figleaf.stop() figleaf.write_coverage('.figleaf') data = figleaf.get_data().gather_files() annotate_html.report_as_html(data, 'html', []) else: print 'Invalid mode %s' % mode