def test_subprocess_with_pth_files(self): # pragma: no metacov if env.METACOV: self.skipTest( "Can't test sub-process pth file suppport during metacoverage") # An existing data file should not be read when a subprocess gets # measured automatically. Create the data file here with bogus data in # it. data = coverage.CoverageData() data.add_lines({os.path.abspath('sub.py'): dict.fromkeys(range(100))}) data.write_file(".mycovdata") self.make_file( "coverage.ini", """\ [run] data_file = .mycovdata """) self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") import main # pylint: disable=import-error, unused-variable with open("out.txt") as f: self.assertEqual(f.read(), "Hello, world!\n") # Read the data from .coverage self.assert_exists(".mycovdata") data = coverage.CoverageData() data.read_file(".mycovdata") self.assertEqual(data.line_counts()['sub.py'], 3)
def __init__(self, funcinv, solver="z3", query_store=None, solvetimeouts=None, pathtimeout=None, coverage_pruning=None, workers=1, scheduling_policy="central_queue"): self.invocation = funcinv # the input to the function self.symbolic_inputs = {} # string -> SymbolicType self.one_execution_coverage = coverage.CoverageData() self.global_execution_coverage = coverage.CoverageData() # initialize for n in funcinv.getNames(): self.symbolic_inputs[n] = funcinv.createArgumentValue(n) self.constraints_to_solve = PriorityQueue() self.new_constraints = [] self.num_processed_constraints = 0 self.path = PathToConstraint(lambda c: self.addConstraint(c), funcinv.name) # link up SymbolicObject to PathToConstraint in order to intercept control-flow symbolic_type.SymbolicObject.SI = self.path if solvetimeouts is None: solvetimeouts = ExplorationEngine.DEFAULT_SOLVE_TIMEOUTS self.solvetimeouts = sorted(set(solvetimeouts)) self.pathtimeout = pathtimeout self.coverage_pruning = coverage_pruning self.solver = solver self.total_solve_time = 0 self.last_solve_time = 0 self.worker_pool = {i: None for i in range(1, workers + 1)} self.worker_jobs = {i: None for i in range(1, workers + 1)} log.info("Using {} solver workers".format(len(self.worker_pool))) self.finished_queries = Queue() self.scheduling_policy = attrgetter(scheduling_policy)( symbolic.scheduling_policies) self.query_store = query_store if self.query_store is not None: if not path.isdir(self.query_store): raise IOError("Query folder {} not found".format( self.query_store)) # outputs self.solved_constraints = set() self.outstanding_constraint_attempts = {} self.generated_inputs = [] self.execution_return_values = []
def _coverage_wrapper(paths): try: import coverage # NoQA except ImportError: click.secho( 'Error: "coverage" package is missing, cannot run tests ' 'with --cov') sys.exit(1) for path in edb.__path__: cov_rc = pathlib.Path(path).parent / '.coveragerc' if cov_rc.exists(): break else: raise RuntimeError('cannot locate the .coveragerc file') with tempfile.TemporaryDirectory() as td: cov_config = devmode.CoverageConfig( paths=paths, config=str(cov_rc), datadir=td) cov_config.save_to_environ() main_cov = cov_config.new_coverage_object() main_cov.start() try: yield finally: main_cov.stop() main_cov.save() data = coverage.CoverageData() with os.scandir(td) as it: for entry in it: new_data = coverage.CoverageData() new_data.read_file(entry.path) data.update(new_data) covfile = str(pathlib.Path(td) / '.coverage') data.write_file(covfile) report_cov = cov_config.new_custom_coverage_object( config_file=str(cov_rc), data_file=covfile, ) report_cov.load() click.secho('Coverage:') report_cov.report() # store the coverage file in cwd, so it can be used to produce # additional reports with coverage cli shutil.copy(covfile, '.')
def test_subprocess_with_pth_files(self): # pragma: not covered if os.environ.get('COVERAGE_COVERAGE', ''): raise SkipTest( "Can't test subprocess pth file suppport during metacoverage") # Main will run sub.py self.make_file( "main.py", """\ import os, os.path, sys ex = os.path.basename(sys.executable) os.system(ex + " sub.py") """) # sub.py will write a few lines. self.make_file( "sub.py", """\ f = open("out.txt", "w") f.write("Hello, world!\\n") f.close() """) self.make_file( "coverage.ini", """\ [run] data_file = .mycovdata """) self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") import main # pylint: disable=F0401,W0612 self.assertEqual(open("out.txt").read(), "Hello, world!\n") # Read the data from .coverage self.assert_exists(".mycovdata") data = coverage.CoverageData() data.read_file(".mycovdata") self.assertEqual(data.summary()['sub.py'], 3)
def test_excepthook(self): self.make_file("excepthook.py", """\ import sys def excepthook(*args): print('in excepthook') if maybe == 2: print('definitely') sys.excepthook = excepthook maybe = 1 raise RuntimeError('Error Outside') """) cov_st, cov_out = self.run_command_status("coverage run excepthook.py") py_st, py_out = self.run_command_status("python excepthook.py") if not env.JYTHON: assert cov_st == py_st assert cov_st == 1 assert "in excepthook" in py_out assert cov_out == py_out # Read the coverage file and see that excepthook.py has 7 lines # executed. data = coverage.CoverageData() data.read() assert line_counts(data)['excepthook.py'] == 7
def test_subprocess_with_pth_files_and_parallel(self): # https://github.com/nedbat/coveragepy/issues/492 self.make_file("coverage.ini", """\ [run] parallel = true """) self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") self.run_command("coverage run main.py") with open("out.txt") as f: assert f.read() == "Hello, world!\n" self.run_command("coverage combine") # assert that the combined .coverage data file is correct self.assert_exists(".coverage") data = coverage.CoverageData() data.read() assert line_counts(data)['sub.py'] == 3 # assert that there are *no* extra data files left over after a combine data_files = glob.glob(os.getcwd() + '/.coverage*') msg = ( "Expected only .coverage after combine, looks like there are " + f"extra data files that were not cleaned up: {data_files!r}" ) assert len(data_files) == 1, msg
def test_combine_parallel_data(self): self.make_file("b_or_c.py", """\ import sys a = 1 if sys.argv[1] == 'b': b = 1 else: c = 1 d = 1 print ('done') """) out = self.run_command("coverage -x -p b_or_c.py b") self.assertEqual(out, 'done\n') self.assertFalse(os.path.exists(".coverage")) out = self.run_command("coverage -x -p b_or_c.py c") self.assertEqual(out, 'done\n') self.assertFalse(os.path.exists(".coverage")) # After two -p runs, there should be two .coverage.machine.123 files. self.assertEqual(self.number_of_data_files(), 2) # Combine the parallel coverage data files into .coverage . self.run_command("coverage -c") self.assertTrue(os.path.exists(".coverage")) # After combining, there should be only the .coverage file. self.assertEqual(self.number_of_data_files(), 1) # Read the coverage file and see that b_or_c.py has all 7 lines # executed. data = coverage.CoverageData() data.read_file(".coverage") self.assertEqual(data.summary()['b_or_c.py'], 7)
def test_excepthook(self): self.make_file( "excepthook.py", """\ import sys def excepthook(*args): print('in excepthook') if maybe == 2: print('definitely') sys.excepthook = excepthook maybe = 1 raise RuntimeError('Error Outside') """) cov_st, cov_out = self.run_command_status("coverage run excepthook.py") py_st, py_out = self.run_command_status("python excepthook.py") if not env.JYTHON: self.assertEqual(cov_st, py_st) self.assertEqual(cov_st, 1) self.assertIn("in excepthook", py_out) self.assertEqual(cov_out, py_out) # Read the coverage file and see that excepthook.py has 7 lines # executed. data = coverage.CoverageData() data.read_file(".coverage") self.assertEqual(data.line_counts()['excepthook.py'], 7)
def main(coverage_file): data = coverage.CoverageData() data.read_file(coverage_file) for filename in data._lines: lines = data.lines(filename) assert lines is not None if not os.path.exists(filename): # It could be unlinked before continue if not lines: print(filename, 'not covered, removing') os.unlink(filename) continue with open(filename) as fp: tree = pasta.parse(fp.read()) new_tree = rewrite(tree, lines) try: to_write = pasta.dump(new_tree) except pasta.base.codegen.PrintError: print("Error with file", filename) continue with open(filename, 'w') as fp: fp.write(to_write)
def test_combine_parallel_data_with_a_corrupt_file(self): self.make_b_or_c_py() out = self.run_command("coverage run -p b_or_c.py b") self.assertEqual(out, 'done\n') self.assert_doesnt_exist(".coverage") self.assertEqual(self.number_of_data_files(), 1) out = self.run_command("coverage run -p b_or_c.py c") self.assertEqual(out, 'done\n') self.assert_doesnt_exist(".coverage") # After two -p runs, there should be two .coverage.machine.123 files. self.assertEqual(self.number_of_data_files(), 2) # Make a bogus data file. self.make_file(".coverage.bad", "This isn't a coverage data file.") # Combine the parallel coverage data files into .coverage . out = self.run_command("coverage combine") self.assert_exists(".coverage") self.assert_exists(".coverage.bad") warning_regex = ( r"Coverage.py warning: Couldn't read data from '.*\.coverage\.bad': " r"CoverageException: Doesn't seem to be a coverage\.py data file") self.assertRegex(out, warning_regex) # After combining, those two should be the only data files. self.assertEqual(self.number_of_data_files(), 2) # Read the coverage file and see that b_or_c.py has all 7 lines # executed. data = coverage.CoverageData() data.read_file(".coverage") self.assertEqual(data.line_counts()['b_or_c.py'], 7)
def test_subprocess_with_pth_files(self): # pragma: not covered if env.METACOV: self.skip( "Can't test sub-process pth file suppport during metacoverage") # Main will run sub.py self.make_file( "main.py", """\ import os, os.path, sys ex = os.path.basename(sys.executable) os.system(ex + " sub.py") """) # sub.py will write a few lines. self.make_file( "sub.py", """\ with open("out.txt", "w") as f: f.write("Hello, world!\\n") """) self.make_file( "coverage.ini", """\ [run] data_file = .mycovdata """) self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") import main # pylint: disable=import-error,unused-variable with open("out.txt") as f: self.assertEqual(f.read(), "Hello, world!\n") # Read the data from .coverage self.assert_exists(".mycovdata") data = coverage.CoverageData() data.read_file(".mycovdata") self.assertEqual(data.line_counts()['sub.py'], 2)
def test_subprocess_with_pth_files_and_parallel( self): # pragma: no metacov # https://bitbucket.org/ned/coveragepy/issues/492/subprocess-coverage-strange-detection-of if env.METACOV: self.skipTest( "Can't test sub-process pth file suppport during metacoverage") self.make_file( "coverage.ini", """\ [run] parallel = true """) self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") self.run_command("coverage run main.py") with open("out.txt") as f: self.assertEqual(f.read(), "Hello, world!\n") self.run_command("coverage combine") # assert that the combined .coverage data file is correct self.assert_exists(".coverage") data = coverage.CoverageData() data.read_file(".coverage") self.assertEqual(data.line_counts()['sub.py'], 3) # assert that there are *no* extra data files left over after a combine data_files = glob.glob(os.getcwd() + '/.coverage*') self.assertEqual( len(data_files), 1, "Expected only .coverage after combine, looks like there are " "extra data files that were not cleaned up: %r" % data_files)
def test_combine_parallel_data_in_two_steps(self): self.make_b_or_c_py() out = self.run_command("coverage run -p b_or_c.py b") self.assertEqual(out, 'done\n') self.assert_doesnt_exist(".coverage") self.assertEqual(self.number_of_data_files(), 1) # Combine the (one) parallel coverage data file into .coverage . self.run_command("coverage combine") self.assert_exists(".coverage") self.assertEqual(self.number_of_data_files(), 1) out = self.run_command("coverage run -p b_or_c.py c") self.assertEqual(out, 'done\n') self.assert_exists(".coverage") self.assertEqual(self.number_of_data_files(), 2) # Combine the parallel coverage data files into .coverage . self.run_command("coverage combine --append") self.assert_exists(".coverage") # After combining, there should be only the .coverage file. self.assertEqual(self.number_of_data_files(), 1) # Read the coverage file and see that b_or_c.py has all 7 lines # executed. data = coverage.CoverageData() data.read_file(".coverage") self.assertEqual(data.line_counts()['b_or_c.py'], 7)
def test_fullcoverage(self): # pragma: no metacov if env.PY2: # This doesn't work on Python 2. self.skipTest("fullcoverage doesn't work on Python 2.") # It only works with the C tracer, and if we aren't measuring ourselves. if not env.C_TRACER or env.METACOV: self.skipTest("fullcoverage only works with the C tracer.") # fullcoverage is a trick to get stdlib modules measured from # the very beginning of the process. Here we import os and # then check how many lines are measured. self.make_file( "getenv.py", """\ import os print("FOOEY == %s" % os.getenv("FOOEY")) """) fullcov = os.path.join(os.path.dirname(coverage.__file__), "fullcoverage") self.set_environ("FOOEY", "BOO") self.set_environ("PYTHONPATH", fullcov) out = self.run_command("python -m coverage run -L getenv.py") self.assertEqual(out, "FOOEY == BOO\n") data = coverage.CoverageData() data.read_file(".coverage") # The actual number of executed lines in os.py when it's # imported is 120 or so. Just running os.getenv executes # about 5. self.assertGreater(data.line_counts()['os.py'], 50)
def _merge_nbval_coverage_data(cov): """Merge nbval coverage data into pytest-cov data.""" if not cov: return suffix = _make_suffix(cov) if suffix is True: # Note: If suffix is true, we are running in parallel, so several # files will be generated. This will cause some warnings about "no coverage" # but is otherwise OK. Do nothing. return # Get the filename of the nbval coverage: filename = cov.data_files.filename + '.' + suffix # Read coverage generated by nbval in this run: nbval_data = coverage.CoverageData(debug=cov.debug) try: nbval_data.read_file(os.path.abspath(filename)) except coverage.CoverageException: return # Set up aliases (following internal coverage.py code here) aliases = None if cov.config.paths: aliases = coverage.files.PathAliases() for paths in cov.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) # Merge nbval data into pytest-cov data: cov.data.update(nbval_data, aliases=aliases) # Delete our nbval coverage data coverage.misc.file_be_gone(filename)
def enumerate_python_arcs( path, # type: str coverage, # type: coverage_module modules, # type: t.Dict[str, str] collection_search_re, # type: t.Optional[t.Pattern] collection_sub_re, # type: t.Optional[t.Pattern] ): # type: (...) -> t.Generator[t.Tuple[str, t.Set[t.Tuple[int, int]]]] """Enumerate Python code coverage arcs in the given file.""" if os.path.getsize(path) == 0: display.warning('Empty coverage file: %s' % path, verbosity=2) return original = coverage.CoverageData() try: original.read_file(path) except Exception as ex: # pylint: disable=locally-disabled, broad-except display.error(u'%s' % ex) return for filename in original.measured_files(): arcs = original.arcs(filename) if not arcs: # This is most likely due to using an unsupported version of coverage. display.warning('No arcs found for "%s" in coverage file: %s' % (filename, path)) continue filename = sanitize_filename(filename, modules=modules, collection_search_re=collection_search_re, collection_sub_re=collection_sub_re) if not filename: continue yield filename, set(arcs)
def __init2__(self): self.constraints_to_solve = [ ] # consists of the constraints that are going to be solved by the solver self.path = PathToConstraint() self.in_out = [] self.coverage_data = coverage.CoverageData() self.coverage_accumulated_missing_lines = {} self.var_to_types = {}
def test_coverage(): answer_path = os.path.join(BASE_DIR, 'answer.py') subprocess.check_call( ['coverage', 'run', '--branch', '-m', 'unittest', answer_path], cwd=BASE_DIR) cov_data = coverage.CoverageData() cov_data.read_file(os.path.join(BASE_DIR, '.coverage')) assert cov_data.line_counts()['function.py'] == 4
def assert_pth_and_source_work_together(self, dashm, package, source): # pragma: not covered if env.METACOV: raise SkipTest( "Can't test sub-process pth file suppport during metacoverage") def fullname(modname): if package and dashm: return '.'.join((package, modname)) else: return modname def path(basename): return os.path.join(package, basename) # Main will run sub.py. self.make_file( path("main.py"), """\ import %s if True: pass """ % fullname('sub')) if package: self.make_file(path("__init__.py"), "") # sub.py will write a few lines. self.make_file( path("sub.py"), """\ with open("out.txt", "w") as f: f.write("Hello, world!") """) self.make_file( "coverage.ini", """\ [run] source = %s """ % fullname(source)) self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") if dashm: cmd = (sys.executable, dashm, fullname('main')) else: cmd = (sys.executable, path('main.py')) # TODO: can we use run_command here instead of Popen? from subprocess import Popen Popen(cmd).wait() with open("out.txt") as f: self.assertEqual(f.read(), "Hello, world!") # Read the data from .coverage self.assert_exists(".coverage") data = coverage.CoverageData() data.read_file(".coverage") summary = data.summary() print(summary) self.assertEqual(summary[source + '.py'], 2) self.assertEqual(len(summary), 1)
def make_data_file(): data = coverage.CoverageData(".coverage.1") data.add_lines( {abs_file('ci/girder/g1.py'): dict.fromkeys(range(10))}) data.add_lines({ abs_file('ci/girder/plugins/p1.py'): dict.fromkeys(range(10)) }) data.write()
def make_data_file(self, basename=None, suffix=None, lines=None, file_tracers=None): """Write some data into a coverage data file.""" data = coverage.CoverageData(basename=basename, suffix=suffix) if lines: data.add_lines(lines) if file_tracers: data.add_file_tracers(file_tracers) data.write() return data
def read_coverage_data(use_coverage): if use_coverage: print('Using coverage data from .coverage file') # noinspection PyPackageRequirements,PyUnresolvedReferences import coverage coverage_data = coverage.CoverageData() coverage_data.read_file('.coverage') return coverage_data else: return None
def run(self, test): if self.coverage: import coverage for path in edb.__path__: cov_rc = pathlib.Path(path).parent / '.coveragerc' if cov_rc.exists(): break else: raise RuntimeError('cannot locate the .coveragerc file') with tempfile.TemporaryDirectory() as td: cov_config = devmode.CoverageConfig( paths=self.coverage, config=str(cov_rc), datadir=td) cov_config.save_to_environ() main_cov = cov_config.new_coverage_object() main_cov.start() try: return self._run(test) finally: main_cov.stop() main_cov.save() data = coverage.CoverageData() with os.scandir(td) as it: for entry in it: new_data = coverage.CoverageData() new_data.read_file(entry.path) data.update(new_data) data.write_file('.coverage') report_cov = cov_config.new_custom_coverage_object( config_file=str(cov_rc)) report_cov.load() report_cov.report() else: return self._run(test)
def read_python_coverage_native(path: str, coverage: coverage_module) -> PythonArcs: """Return coverage arcs from the specified coverage file using the coverage API.""" try: data = coverage.CoverageData(path) data.read() arcs = {filename: data.arcs(filename) for filename in data.measured_files()} except Exception as ex: raise CoverageError(path, f'Error reading coverage file using coverage API: {ex}') from ex return arcs
def write_cov_file(line_data: Dict[str, List[int]], fname: str) -> None: """Write a coverage file supporting both Coverage v4 and v5. Args: line_data: Dictionary of line data for the coverage file. fname: string filename for output location (absolute path) Returns: None """ if coverage.version_info[0] == 4: covdata = coverage.CoverageData() covdata.add_lines(line_data) covdata.write_file(fname) else: # assume coverage v 5 covdata = coverage.CoverageData(basename=fname) covdata.add_lines(line_data) covdata.write()
def test_combine_with_aliases(self): self.make_file( "d1/x.py", """\ a = 1 b = 2 print("%s %s" % (a, b)) """) self.make_file( "d2/x.py", """\ # 1 # 2 # 3 c = 4 d = 5 print("%s %s" % (c, d)) """) self.make_file( ".coveragerc", """\ [run] parallel = True [paths] source = src */d1 */d2 """) out = self.run_command("coverage run " + os.path.normpath("d1/x.py")) self.assertEqual(out, '1 2\n') out = self.run_command("coverage run " + os.path.normpath("d2/x.py")) self.assertEqual(out, '4 5\n') self.assertEqual(self.number_of_data_files(), 2) self.run_command("coverage combine") self.assert_exists(".coverage") # After combining, there should be only the .coverage file. self.assertEqual(self.number_of_data_files(), 1) # Read the coverage data file and see that the two different x.py # files have been combined together. data = coverage.CoverageData() data.read_file(".coverage") summary = data.line_counts(fullpath=True) self.assertEqual(len(summary), 1) actual = os.path.normcase(os.path.abspath(list(summary.keys())[0])) expected = os.path.normcase(os.path.abspath('src/x.py')) self.assertEqual(actual, expected) self.assertEqual(list(summary.values())[0], 6)
def test_combine_with_rc(self): self.make_file( "b_or_c.py", """\ import sys a = 1 if sys.argv[1] == 'b': b = 1 else: c = 1 d = 1 print('done') """) self.make_file( ".coveragerc", """\ [run] parallel = true """) out = self.run_command("coverage run b_or_c.py b") self.assertEqual(out, 'done\n') self.assert_doesnt_exist(".coverage") out = self.run_command("coverage run b_or_c.py c") self.assertEqual(out, 'done\n') self.assert_doesnt_exist(".coverage") # After two runs, there should be two .coverage.machine.123 files. self.assertEqual(self.number_of_data_files(), 2) # Combine the parallel coverage data files into .coverage . self.run_command("coverage combine") self.assert_exists(".coverage") self.assert_exists(".coveragerc") # After combining, there should be only the .coverage file. self.assertEqual(self.number_of_data_files(), 1) # Read the coverage file and see that b_or_c.py has all 7 lines # executed. data = coverage.CoverageData() data.read_file(".coverage") self.assertEqual(data.summary()['b_or_c.py'], 7) # Reporting should still work even with the .rc file out = self.run_command("coverage report") self.assertMultiLineEqual( out, textwrap.dedent("""\ Name Stmts Miss Cover ------------------------------- b_or_c.py 7 0 100% """))
def test_append_can_create_a_data_file(self): self.make_b_or_c_py() out = self.run_command("coverage run --append b_or_c.py b") self.assertEqual(out, 'done\n') self.assert_exists(".coverage") self.assertEqual(self.number_of_data_files(), 1) # Read the coverage file and see that b_or_c.py has only 6 lines # executed. data = coverage.CoverageData() data.read_file(".coverage") self.assertEqual(data.line_counts()['b_or_c.py'], 6)
def test_append_can_create_a_data_file(self): self.make_b_or_c_py() out = self.run_command("coverage run --append b_or_c.py b") assert out == 'done\n' self.assert_exists(".coverage") self.assert_file_count(".coverage.*", 0) # Read the coverage file and see that b_or_c.py has only 6 lines # executed. data = coverage.CoverageData() data.read() assert line_counts(data)['b_or_c.py'] == 6
def test_combine_with_aliases(self): self.make_file("d1/x.py", """\ a = 1 b = 2 print(f"{a} {b}") """) self.make_file("d2/x.py", """\ # 1 # 2 # 3 c = 4 d = 5 print(f"{c} {d}") """) self.make_file(".coveragerc", """\ [run] source = . parallel = True [paths] source = src */d1 */d2 """) out = self.run_command("coverage run " + os.path.normpath("d1/x.py")) assert out == '1 2\n' out = self.run_command("coverage run " + os.path.normpath("d2/x.py")) assert out == '4 5\n' self.assert_file_count(".coverage.*", 2) self.run_command("coverage combine") self.assert_exists(".coverage") # After combining, there should be only the .coverage file. self.assert_file_count(".coverage.*", 0) # Read the coverage data file and see that the two different x.py # files have been combined together. data = coverage.CoverageData() data.read() summary = line_counts(data, fullpath=True) assert len(summary) == 1 actual = abs_file(list(summary.keys())[0]) expected = abs_file('src/x.py') assert expected == actual assert list(summary.values())[0] == 6