def test_thread_safe_save_data(tmpdir): # Non-regression test for: https://github.com/nedbat/coveragepy/issues/581 # Create some Python modules and put them in the path modules_dir = tmpdir.mkdir('test_modules') module_names = [f"m{i:03d}" for i in range(1000)] for module_name in module_names: modules_dir.join(module_name + ".py").write("def f(): pass\n") # Shared variables for threads should_run = [True] imported = [] old_dir = os.getcwd() os.chdir(modules_dir.strpath) try: # Make sure that all dummy modules can be imported. for module_name in module_names: import_local_file(module_name) def random_load(): # pragma: nested """Import modules randomly to stress coverage.""" while should_run[0]: module_name = random.choice(module_names) mod = import_local_file(module_name) mod.f() imported.append(mod) # Spawn some threads with coverage enabled and attempt to read the # results right after stopping coverage collection with the threads # still running. duration = 0.01 for _ in range(3): cov = coverage.Coverage() cov.start() threads = [threading.Thread(target=random_load) for _ in range(10)] # pragma: nested should_run[0] = True # pragma: nested for t in threads: # pragma: nested t.start() time.sleep(duration) # pragma: nested cov.stop() # pragma: nested # The following call used to crash with running background threads. cov.get_data() # Stop the threads should_run[0] = False for t in threads: t.join() if (not imported) and duration < 10: # pragma: only failure duration *= 2 finally: os.chdir(old_dir) should_run[0] = False
def test_start_save_stop(self): self.make_code1_code2() cov = coverage.Coverage() cov.start() import_local_file("code1") # pragma: nested cov.save() # pragma: nested import_local_file("code2") # pragma: nested cov.stop() # pragma: nested self.check_code1_code2(cov)
def test_start_save_nostop(self): self.make_code1_code2() cov = coverage.Coverage() cov.start() import_local_file("code1") # pragma: nested cov.save() # pragma: nested import_local_file("code2") # pragma: nested self.check_code1_code2(cov) # pragma: nested # Then stop it, or the test suite gets out of whack. cov.stop() # pragma: nested
def test_cleanup_and_reimport(self): self.make_file("xyzzy.py", "A = 17") xyzzy = import_local_file("xyzzy") assert xyzzy.A == 17 self.clean_local_file_imports() self.make_file("xyzzy.py", "A = 42") xyzzy = import_local_file("xyzzy") assert xyzzy.A == 42
def test_two_getdata_only_warn_once(self): self.make_code1_code2() cov = coverage.Coverage(source=["."], omit=["code1.py"]) cov.start() import_local_file("code1") # pragma: nested cov.stop() # pragma: nested # We didn't collect any data, so we should get a warning. with self.assert_warnings(cov, ["No data was collected"]): cov.get_data() # But calling get_data a second time with no intervening activity # won't make another warning. with self.assert_warnings(cov, []): cov.get_data()
def test_two_getdata_warn_twice(self): self.make_code1_code2() cov = coverage.Coverage(source=["."], omit=["code1.py", "code2.py"]) cov.start() import_local_file("code1") # pragma: nested # We didn't collect any data, so we should get a warning. with self.assert_warnings(cov, ["No data was collected"]): # pragma: nested cov.save() # pragma: nested import_local_file("code2") # pragma: nested # Calling get_data a second time after tracing some more will warn again. with self.assert_warnings(cov, ["No data was collected"]): # pragma: nested cov.get_data() # pragma: nested # Then stop it, or the test suite gets out of whack. cov.stop() # pragma: nested
def random_load(): # pragma: nested """Import modules randomly to stress coverage.""" while should_run[0]: module_name = random.choice(module_names) mod = import_local_file(module_name) mod.f() imported.append(mod)
def test_deep_source(self): # When using source=, the XML report needs to mention those directories # in the <source> elements. # https://github.com/nedbat/coveragepy/issues/439 self.make_file("src/main/foo.py", "a = 1") self.make_file("also/over/there/bar.py", "b = 2") cov = coverage.Coverage( source=["src/main", "also/over/there", "not/really"]) cov.start() mod_foo = import_local_file("foo", "src/main/foo.py") # pragma: nested mod_bar = import_local_file("bar", "also/over/there/bar.py") # pragma: nested cov.stop() # pragma: nested with pytest.warns(Warning) as warns: cov.xml_report([mod_foo, mod_bar]) assert_coverage_warnings( warns, "Module not/really was never imported. (module-not-imported)", ) dom = ElementTree.parse("coverage.xml") self.assert_source(dom, "src/main") self.assert_source(dom, "also/over/there") sources = dom.findall(".//source") assert len(sources) == 2 foo_class = dom.findall(".//class[@name='foo.py']") assert len(foo_class) == 1 assert foo_class[0].attrib == { 'branch-rate': '0', 'complexity': '0', 'filename': 'foo.py', 'line-rate': '1', 'name': 'foo.py', } bar_class = dom.findall(".//class[@name='bar.py']") assert len(bar_class) == 1 assert bar_class[0].attrib == { 'branch-rate': '0', 'complexity': '0', 'filename': 'bar.py', 'line-rate': '1', 'name': 'bar.py', }
def run_all_functions(self, cov, suite_name): # pragma: nested """Run all functions in `suite_name` under coverage.""" cov.start() suite = import_local_file(suite_name) try: # Call all functions in this module for name in dir(suite): variable = getattr(suite, name) if inspect.isfunction(variable): variable() finally: cov.stop()
def _run_scenario(self, file_count, call_count, line_count): self.module_cleaner.clean_local_file_imports() for idx in range(file_count): make_file(f'test{idx}.py', TEST_FILE) make_file('testmain.py', mk_main(file_count, call_count, line_count)) # Run it once just to get the disk caches loaded up. import_local_file("testmain") self.module_cleaner.clean_local_file_imports() # Run it to get the baseline time. start = time.perf_counter() import_local_file("testmain") baseline = time.perf_counter() - start self.module_cleaner.clean_local_file_imports() # Run it to get the covered time. start = time.perf_counter() cov = coverage.Coverage() cov.start() try: # pragma: nested # Import the Python file, executing it. import_local_file("testmain") finally: # pragma: nested # Stop coverage.py. covered = time.perf_counter() - start stats = cov._collector.tracers[0].get_stats() if stats: stats = stats.copy() cov.stop() return baseline, covered, stats
def start_import_stop(self, cov, modname, modfile=None): """Start coverage, import a file, then stop coverage. `cov` is started and stopped, with an `import_local_file` of `modname` in the middle. `modfile` is the file to import as `modname` if it isn't in the current directory. The imported module is returned. """ cov.start() try: # pragma: nested # Import the Python file, executing it. mod = import_local_file(modname, modfile) finally: # pragma: nested # Stop coverage.py. cov.stop() return mod
def test_switch_context_with_static(self): # This test simulates a coverage-aware test runner, # measuring labeled coverage via public API, # with static label prefix. self.make_test_files() # Test runner starts cov = coverage.Coverage(context="mysuite") cov.start() if "pragma: nested": # Imports the test suite suite = import_local_file("testsuite") # Measures test case 1 cov.switch_context('multiply_zero') suite.test_multiply_zero() # Measures test case 2 cov.switch_context('multiply_six') suite.test_multiply_six() # Runner finishes cov.save() cov.stop() # Labeled data is collected data = cov.get_data() expected = ['mysuite', 'mysuite|multiply_six', 'mysuite|multiply_zero'] assert expected == sorted(data.measured_contexts()) filenames = self.get_measured_filenames(data) suite_filename = filenames['testsuite.py'] data.set_query_context("mysuite|multiply_six") assert [2, 8] == sorted(data.lines(suite_filename)) data.set_query_context("mysuite|multiply_zero") assert [2, 5] == sorted(data.lines(suite_filename))
def test_exception(self): # Python 2.3's trace function doesn't get called with "return" if the # scope is exiting due to an exception. This confounds our trace # function which relies on scope announcements to track which files to # trace. # # This test is designed to sniff this out. Each function in the call # stack is in a different file, to try to trip up the tracer. Each # file has active lines in a different range so we'll see if the lines # get attributed to the wrong file. self.make_file( "oops.py", """\ def oops(args): a = 2 raise Exception("oops") a = 4 """) self.make_file( "fly.py", "\n" * 100 + """\ def fly(calls): a = 2 calls[0](calls[1:]) a = 4 """) self.make_file( "catch.py", "\n" * 200 + """\ def catch(calls): try: a = 3 calls[0](calls[1:]) a = 5 except: a = 7 """) self.make_file( "doit.py", "\n" * 300 + """\ def doit(calls): try: calls[0](calls[1:]) except: a = 5 """) # Import all the modules before starting coverage, so the def lines # won't be in all the results. for mod in "oops fly catch doit".split(): import_local_file(mod) # Each run nests the functions differently to get different # combinations of catching exceptions and letting them fly. runs = [ ("doit fly oops", { 'doit.py': [302, 303, 304, 305], 'fly.py': [102, 103], 'oops.py': [2, 3], }), ("doit catch oops", { 'doit.py': [302, 303], 'catch.py': [202, 203, 204, 206, 207], 'oops.py': [2, 3], }), ("doit fly catch oops", { 'doit.py': [302, 303], 'fly.py': [102, 103, 104], 'catch.py': [202, 203, 204, 206, 207], 'oops.py': [2, 3], }), ("doit catch fly oops", { 'doit.py': [302, 303], 'catch.py': [202, 203, 204, 206, 207], 'fly.py': [102, 103], 'oops.py': [2, 3], }), ] for callnames, lines_expected in runs: # Make the list of functions we'll call for this test. callnames = callnames.split() calls = [getattr(sys.modules[cn], cn) for cn in callnames] cov = coverage.Coverage() cov.start() # Call our list of functions: invoke the first, with the rest as # an argument. calls[0](calls[1:]) # pragma: nested cov.stop() # pragma: nested # Clean the line data and compare to expected results. # The file names are absolute, so keep just the base. clean_lines = {} data = cov.get_data() for callname in callnames: filename = callname + ".py" lines = data.lines(abs_file(filename)) clean_lines[filename] = sorted(lines) if env.JYTHON: # pragma: only jython # Jython doesn't report on try or except lines, so take those # out of the expected lines. invisible = [202, 206, 302, 304] for lines in lines_expected.values(): lines[:] = [l for l in lines if l not in invisible] assert clean_lines == lines_expected