def test_update_run_info(self): self.skip_unless_data_storage_is("json") covdata1 = CoverageData() covdata1.add_arcs(ARCS_3) covdata1.add_run_info(hello="there", count=17) covdata2 = CoverageData() covdata2.add_arcs(ARCS_4) covdata2.add_run_info(hello="goodbye", count=23) covdata3 = CoverageData() covdata3.update(covdata1) covdata3.update(covdata2) self.assertEqual(covdata3.run_infos(), [ { 'hello': 'there', 'count': 17 }, { 'hello': 'goodbye', 'count': 23 }, ])
def test_combining_with_aliases(self): covdata1 = CoverageData() covdata1.add_line_data({ '/home/ned/proj/src/a.py': { 1: None, 2: None }, '/home/ned/proj/src/sub/b.py': { 3: None }, }) covdata1.write(suffix='1') covdata2 = CoverageData() covdata2.add_line_data({ r'c:\ned\test\a.py': { 4: None, 5: None }, r'c:\ned\test\sub\b.py': { 6: None }, }) covdata2.write(suffix='2') covdata3 = CoverageData() aliases = PathAliases() aliases.add("/home/ned/proj/src/", "./") aliases.add(r"c:\ned\test", "./") covdata3.combine_parallel_data(aliases=aliases) self.assert_summary(covdata3, { './a.py': 4, './sub/b.py': 2 }, fullpath=True) self.assert_measured_files(covdata3, ['./a.py', './sub/b.py'])
def test_combining_from_different_directories(self): os.makedirs('cov1') covdata1 = CoverageData('cov1/.coverage.1') covdata1.add_lines(LINES_1) covdata1.write() os.makedirs('cov2') covdata2 = CoverageData('cov2/.coverage.2') covdata2.add_lines(LINES_2) covdata2.write() # This data won't be included. covdata_xxx = CoverageData('.coverage.xxx') covdata_xxx.add_arcs(ARCS_3) covdata_xxx.write() covdata3 = CoverageData() combine_parallel_data(covdata3, data_paths=['cov1', 'cov2']) self.assert_line_counts(covdata3, SUMMARY_1_2) self.assert_measured_files(covdata3, MEASURED_FILES_1_2) self.assert_doesnt_exist("cov1/.coverage.1") self.assert_doesnt_exist("cov2/.coverage.2") self.assert_exists(".coverage.xxx")
def test_file_format_with_arcs(self): # Write with CoverageData, then read the JSON explicitly. covdata = CoverageData() covdata.add_arcs(ARCS_3) self.data_files.write(covdata) data = self.read_json_data_file(".coverage") self.assertNotIn('lines', data) arcs = data['arcs'] self.assertCountEqual(arcs.keys(), MEASURED_FILES_3) self.assertCountEqual(arcs['x.py'], map(list, X_PY_ARCS_3)) self.assertCountEqual(arcs['y.py'], map(list, Y_PY_ARCS_3)) # If no file tracers were involved, there's no file_tracers entry. self.assertNotIn('file_tracers', data)
def test_file_format(self): # Write with CoverageData, then read the pickle explicitly. covdata = CoverageData() covdata.add_line_data(DATA_1) covdata.write() with open(".coverage", 'rb') as fdata: data = pickle.load(fdata) lines = data['lines'] self.assertCountEqual(lines.keys(), MEASURED_FILES_1) self.assertCountEqual(lines['a.py'], A_PY_LINES_1) self.assertCountEqual(lines['b.py'], B_PY_LINES_1) # If not measuring branches, there's no arcs entry. self.assertEqual(data.get('arcs', 'not there'), 'not there')
def test_file_format_with_arcs(self): # Write with CoverageData, then read the pickle explicitly. covdata = CoverageData() covdata.add_arc_data(ARC_DATA_3) covdata.write() fdata = open(".coverage", 'rb') try: data = pickle.load(fdata) finally: fdata.close() self.assertSameElements(data['lines'].keys(), []) arcs = data['arcs'] self.assertSameElements(arcs['x.py'], X_PY_ARCS_3) self.assertSameElements(arcs['y.py'], Y_PY_ARCS_3)
def test_file_format(self): # Write with CoverageData, then read the JSON explicitly. covdata = CoverageData() covdata.add_lines(LINES_1) covdata.write() data = self.read_json_data_file(".coverage") lines = data['lines'] self.assertCountEqual(lines.keys(), MEASURED_FILES_1) self.assertCountEqual(lines['a.py'], A_PY_LINES_1) self.assertCountEqual(lines['b.py'], B_PY_LINES_1) # If not measuring branches, there's no arcs entry. self.assertNotIn('arcs', data) # If no file tracers were involved, there's no file_tracers entry. self.assertNotIn('file_tracers', data)
def merge_coverage(coverage_data, from_path, to_path): new_coverage_data = CoverageData() assert coverage_data._filename != new_coverage_data._filename for filename in coverage_data.measured_files(): result_filename = filename.split(from_path)[-1] if filename != result_filename: result_filename = result_filename.lstrip('/') result_filename = os.path.join(to_path, result_filename) result_filename = os.path.abspath(result_filename) assert os.path.exists(result_filename), result_filename new_coverage_data.add_arcs( {result_filename: coverage_data.arcs(filename)} ) return new_coverage_data
def raise_exc(exc, content, ex, ex2, outfile, destcov, source, dests, inter, cov): from coverage.data import CoverageData def shorten(t): if len(t) > 2000: return t[:2000] + "\n..." else: return t if len(content) > 2000: content = content[:2000] + '\n...' ex = "\n-\n".join(shorten(_) for _ in ex) ex2 = "\n-\n".join(shorten(_) for _ in ex2) rows = ["destcov='{0}'".format(destcov), "outfile='{0}'".format(outfile), "source='{0}'".format(source), "cov.source={0}".format(cov.source), "dests='{0}'".format(';'.join(dests)), "inter={0}".format(inter)] if cov is not None and cov.data is not None and cov.data._lines is not None: rows.append("----- LINES") end = min(5, len(cov.data._lines)) for k, v in list(sorted(cov.data._lines.items()))[:end]: rows.append(' {0}:{1}'.format(k, v)) rows.append("----- RUNS") end = min(5, len(cov.data._runs)) for k in cov.data._runs[:end]: rows.append(' {0}'.format(k)) rows.append("----- END") for d in dests: dd = CoverageData() dd.read_file(d + "~") rows.append("------- LINES - '{0}'".format(d)) end = min(5, len(dd._lines)) for k, v in list(sorted(dd._lines.items()))[:end]: rows.append(' {0}:{1}'.format(k, v)) rows.append("------- RUNS - '{0}'".format(d)) end = min(5, len(dd._runs)) for k in dd._runs[:end]: rows.append(' {0}'.format(k)) rows.append("------- END") mes = "{5}. In '{0}'.\n{1}\n{2}\n---AFTER---\n{3}\n---BEGIN---\n{4}" raise RuntimeError(mes.format(output_path, "\n".join( rows), content, ex, ex2, exc, cov)) from exc
def test_file_format(self): # Write with CoverageData, then read the pickle explicitly. covdata = CoverageData() covdata.add_line_data(DATA_1) covdata.write() fdata = open(".coverage", 'rb') try: data = pickle.load(fdata) finally: fdata.close() lines = data['lines'] self.assertSameElements(lines.keys(), EXECED_FILES_1) self.assertSameElements(lines['a.py'], A_PY_LINES_1) self.assertSameElements(lines['b.py'], B_PY_LINES_1) # If not measuring branches, there's no arcs entry. self.assertEqual(data.get('arcs', 'not there'), 'not there')
def test_debug_data(self): data = CoverageData() data.add_lines({ "file1.py": dict.fromkeys(range(1, 18)), "file2.py": dict.fromkeys(range(1, 24)), }) data.add_file_tracers({"file1.py": "a_plugin"}) data.write() self.command_line("debug data") assert self.stdout() == textwrap.dedent("""\ -- data ------------------------------------------------------ path: FILENAME has_arcs: False 2 files: file1.py: 17 lines [a_plugin] file2.py: 23 lines """).replace("FILENAME", data.data_filename())
def test_thread_stress(self): covdata = CoverageData() exceptions = [] def thread_main(): """Every thread will try to add the same data.""" try: covdata.add_lines(LINES_1) except Exception as ex: exceptions.append(ex) threads = [threading.Thread(target=thread_main) for _ in range(10)] for t in threads: t.start() for t in threads: t.join() self.assert_lines1_data(covdata) assert exceptions == []
def test_run_omit_vs_report_omit(self): # https://github.com/nedbat/coveragepy/issues/622 # report:omit shouldn't clobber run:omit. self.make_mycode() self.make_file(".coveragerc", """\ [run] omit = */covmodzip1.py [report] omit = */covmod1.py """) self.run_command("coverage run mycode.py") # Read the data written, to see that the right files have been omitted from running. covdata = CoverageData() covdata.read() files = [os.path.basename(p) for p in covdata.measured_files()] assert "covmod1.py" in files assert "covmodzip1.py" not in files
def test_run_omit_vs_report_omit(self): # https://bitbucket.org/ned/coveragepy/issues/622/report-omit-overwrites-run-omit # report:omit shouldn't clobber run:omit. self.make_mycode() self.make_file(".coveragerc", """\ [run] omit = */covmodzip1.py [report] omit = */covmod1.py """) self.run_command("coverage run mycode.py") # Read the data written, to see that the right files have been omitted from running. covdata = CoverageData() covdata.read_file(".coverage") files = [os.path.basename(p) for p in covdata.measured_files()] self.assertIn("covmod1.py", files) self.assertNotIn("covmodzip1.py", files)
def test_combining_line_contexts(self): red_data, blue_data = self.run_red_blue() for datas in [[red_data, blue_data], [blue_data, red_data]]: combined = CoverageData(suffix="combined") for data in datas: combined.update(data) self.assertEqual(combined.measured_contexts(), {'red', 'blue'}) full_names = {os.path.basename(f): f for f in combined.measured_files()} self.assertCountEqual(full_names, ['red.py', 'blue.py']) fred = full_names['red.py'] fblue = full_names['blue.py'] self.assertEqual(combined.lines(fred, context='red'), self.LINES) self.assertEqual(combined.lines(fred, context='blue'), []) self.assertEqual(combined.lines(fblue, context='red'), []) self.assertEqual(combined.lines(fblue, context='blue'), self.LINES)
def coverage_data(self) -> CoverageData: """Read the coverage file for lines and arcs data. This is cached locally and updated if the coverage_file is changed. Returns: A CoverageData object based on the ``coverage_file``. Raises: FileNotFoundError: if coverage_file does not exist. """ if not self.coverage_file.exists(): raise FileNotFoundError( f"{self.coverage_file.resolve()} does not exist. " "Set the coverage_file property to a valid file." ) if self._coverage_data is None: self._coverage_data = CoverageData() self._coverage_data.read_file(self.coverage_file) return self._coverage_data
def test_read_errors(self): covdata = CoverageData() msg = r"Couldn't read data from '{0}': \S+" self.make_file("xyzzy.dat", "xyzzy") with self.assertRaisesRegex(CoverageException, msg.format("xyzzy.dat")): covdata.read_file("xyzzy.dat") self.make_file("empty.dat", "") with self.assertRaisesRegex(CoverageException, msg.format("empty.dat")): covdata.read_file("empty.dat") with self.assertRaisesRegex(CoverageException, msg.format("nonexistent.dat")): covdata.read_file("nonexistent.dat") self.make_file("misleading.dat", CoverageData._GO_AWAY + " this isn't JSON") with self.assertRaisesRegex(CoverageException, msg.format("misleading.dat")): covdata.read_file("misleading.dat") # After all that, no data should be in our CoverageData. self.assertFalse(covdata)
def testnodedown(self, node, error): """Collect data file name from slave.""" # If slave doesn't return any data then it is likely that this # plugin didn't get activated on the slave side. if not (hasattr(node, 'slaveoutput') and 'cov_slave_node_id' in node.slaveoutput): self.failed_slaves.append(node) return # If slave is not collocated then we must save the data file # that it returns to us. if 'cov_slave_data' in node.slaveoutput: data_suffix = '%s.%s.%06d.%s' % ( socket.gethostname(), os.getpid(), random.randint( 0, 999999), node.slaveoutput['cov_slave_node_id']) cov = coverage.coverage(source=self.cov_source, branch=self.cov_branch, data_suffix=data_suffix, config_file=self.cov_config) cov.start() if hasattr(self.cov.data, 'read_fileobj'): # for coverage 4.0 data = CoverageData() data.read_fileobj(StringIO(node.slaveoutput['cov_slave_data'])) cov.data.update(data) else: cov.data.lines, cov.data.arcs = node.slaveoutput[ 'cov_slave_data'] cov.stop() cov.save() path = node.slaveoutput['cov_slave_path'] self.cov.config.paths['source'].append(path) # Record the slave types that contribute to the data file. rinfo = node.gateway._rinfo() node_desc = self.get_node_desc(rinfo.platform, rinfo.version_info) self.node_descs.add(node_desc)
def test_combining_arc_contexts(self): red_data, blue_data = self.run_red_blue(branch=True) for datas in [[red_data, blue_data], [blue_data, red_data]]: combined = CoverageData(suffix="combined") for data in datas: combined.update(data) assert combined.measured_contexts() == {'red', 'blue'} full_names = { os.path.basename(f): f for f in combined.measured_files() } self.assertCountEqual(full_names, ['red.py', 'blue.py']) fred = full_names['red.py'] fblue = full_names['blue.py'] def assert_combined_lines(filename, context, lines): # pylint: disable=cell-var-from-loop combined.set_query_context(context) assert combined.lines(filename) == lines assert_combined_lines(fred, 'red', self.LINES) assert_combined_lines(fred, 'blue', []) assert_combined_lines(fblue, 'red', []) assert_combined_lines(fblue, 'blue', self.LINES) def assert_combined_arcs(filename, context, lines): # pylint: disable=cell-var-from-loop combined.set_query_context(context) assert combined.arcs(filename) == lines assert_combined_arcs(fred, 'red', self.ARCS) assert_combined_arcs(fred, 'blue', []) assert_combined_arcs(fblue, 'red', []) assert_combined_arcs(fblue, 'blue', self.ARCS)
def test_fix_coverage(tmpdir): base_file = tmpdir.join('foo.py') base_file.ensure() sub_file = tmpdir.join('site-packages/foo.py') sub_file.ensure() unrelated_file = tmpdir.join('bar.py') unrelated_file.ensure() coverage_data = CoverageData(basename='.coverage.orig') coverage_data.add_arcs({ str(base_file): { (1, 2): None }, str(sub_file): { (3, 4): None }, str(unrelated_file): { (5, 6): None }, }) assert coverage_data.lines(base_file) == [1, 2] assert coverage_data.lines(sub_file) == [3, 4] assert coverage_data.lines(unrelated_file) == [5, 6] new_coverage_data = merge_coverage(coverage_data, '/site-packages/', str(tmpdir)) # The new file should contain all the lines and arcs assert new_coverage_data.lines(base_file) == [1, 2, 3, 4] assert new_coverage_data.arcs(base_file) == [(1, 2), (3, 4)] assert new_coverage_data.lines(unrelated_file) == [5, 6] assert new_coverage_data.arcs(unrelated_file) == [(5, 6)] # And it should not contain the original, un-merged names. assert sub_file not in new_coverage_data.measured_files()
def test_cant_add_lines_with_arcs(self): covdata = CoverageData() covdata.add_arcs(ARCS_3) with self.assertRaisesRegex(CoverageException, "Can't add lines to existing arc data"): covdata.add_lines(LINES_1)
def get_coverage_data(self, lines): """Get a CoverageData object that includes the requested lines.""" data = CoverageData() data.add_lines(lines) return data
def test_combining_from_nonexistent_directories(self): covdata = CoverageData() msg = "Couldn't combine from non-existent path 'xyzzy'" with self.assertRaisesRegex(CoverageException, msg): combine_parallel_data(covdata, data_paths=['xyzzy'])
def test_reading_missing(self): self.assert_doesnt_exist(".coverage") covdata = CoverageData() covdata.read() self.assert_line_counts(covdata, {})
def test_empty_arcs_are_still_arcs(self): covdata = CoverageData() covdata.add_arcs({}) covdata.touch_file("abc.py") self.assertTrue(covdata.has_arcs())
def test_asking_isnt_measuring(self): # Asking about an unmeasured file shouldn't make it seem measured. covdata = CoverageData() self.assert_measured_files(covdata, []) self.assertEqual(covdata.arcs("missing.py"), None) self.assert_measured_files(covdata, [])
def test_contexts_by_lineno_with_unknown_file(self): covdata = CoverageData() self.assertDictEqual(covdata.contexts_by_lineno('xyz.py'), {})
def test_no_lines_vs_unmeasured_file(self): covdata = CoverageData() covdata.add_lines(LINES_1) covdata.touch_file('zzz.py') self.assertEqual(covdata.lines('zzz.py'), []) self.assertIsNone(covdata.lines('no_such_file.py'))
def test_touch_file_with_arcs(self): covdata = CoverageData() covdata.add_arcs(ARCS_3) covdata.touch_file('zzz.py') self.assert_measured_files(covdata, MEASURED_FILES_3 + ['zzz.py'])
def test_touch_file_with_lines(self): covdata = CoverageData() covdata.add_lines(LINES_1) covdata.touch_file('zzz.py') self.assert_measured_files(covdata, MEASURED_FILES_1 + ['zzz.py'])