def test_no_duplicate_lines(self): covdata = CoverageData() covdata.set_context("context1") covdata.add_lines(LINES_1) covdata.set_context("context2") covdata.add_lines(LINES_1) self.assertEqual(covdata.lines('a.py'), A_PY_LINES_1)
def test_explicit_suffix(self): self.assert_doesnt_exist(".coverage.SUFFIX") covdata = CoverageData() covdata.set_lines(LINES_1) self.data_files.write(covdata, suffix='SUFFIX') self.assert_exists(".coverage.SUFFIX") self.assert_doesnt_exist(".coverage")
def test_no_duplicate_arcs(self): covdata = CoverageData() covdata.set_context("context1") covdata.add_arcs(ARCS_3) covdata.set_context("context2") covdata.add_arcs(ARCS_3) self.assertEqual(covdata.arcs('x.py'), X_PY_ARCS_3)
def test_explicit_suffix(self): self.assert_doesnt_exist(".coverage.SUFFIX") covdata = CoverageData(suffix='SUFFIX') covdata.add_lines(LINES_1) covdata.write() self.assert_exists(".coverage.SUFFIX") self.assert_doesnt_exist(".coverage")
def test_adding_lines(self): covdata = CoverageData() covdata.set_lines(LINES_1) self.assert_line_counts(covdata, SUMMARY_1) self.assert_measured_files(covdata, MEASURED_FILES_1) self.assertCountEqual(covdata.lines("a.py"), A_PY_LINES_1) self.assertFalse(covdata.has_arcs())
def test_combining_with_aliases(self): covdata1 = CoverageData() covdata1.set_lines({ '/home/ned/proj/src/a.py': {1: None, 2: None}, '/home/ned/proj/src/sub/b.py': {3: None}, '/home/ned/proj/src/template.html': {10: None}, }) covdata1.set_file_tracers({ '/home/ned/proj/src/template.html': 'html.plugin', }) self.data_files.write(covdata1, suffix='1') covdata2 = CoverageData() covdata2.set_lines({ r'c:\ned\test\a.py': {4: None, 5: None}, r'c:\ned\test\sub\b.py': {3: None, 6: None}, }) self.data_files.write(covdata2, suffix='2') covdata3 = CoverageData() aliases = PathAliases() aliases.add("/home/ned/proj/src/", "./") aliases.add(r"c:\ned\test", "./") self.data_files.combine_parallel_data(covdata3, aliases=aliases) apy = canonical_filename('./a.py') sub_bpy = canonical_filename('./sub/b.py') template_html = canonical_filename('./template.html') self.assert_line_counts(covdata3, {apy: 4, sub_bpy: 2, template_html: 1}, fullpath=True) self.assert_measured_files(covdata3, [apy, sub_bpy, template_html]) self.assertEqual(covdata3.file_tracer(template_html), 'html.plugin')
def testnodedown(self, node, error): """Collect data file name from slave.""" # If slave doesn't return any data then it is likely that this # plugin didn't get activated on the slave side. if not (hasattr(node, 'slaveoutput') and 'cov_slave_node_id' in node.slaveoutput): self.failed_slaves.append(node) return # If slave is not collocated then we must save the data file # that it returns to us. if 'cov_slave_data' in node.slaveoutput: data_suffix = '%s.%s.%06d.%s' % ( socket.gethostname(), os.getpid(), random.randint(0, 999999), node.slaveoutput['cov_slave_node_id'] ) cov = coverage.coverage(source=self.cov_source, branch=self.cov_branch, data_suffix=data_suffix, config_file=self.cov_config) cov.start() data = CoverageData() data.read_fileobj(StringIO(node.slaveoutput['cov_slave_data'])) cov.data.update(data) cov.stop() cov.save() path = node.slaveoutput['cov_slave_path'] self.cov.config.paths['source'].append(path) # Record the slave types that contribute to the data file. rinfo = node.gateway._rinfo() node_desc = self.get_node_desc(rinfo.platform, rinfo.version_info) self.node_descs.add(node_desc)
def apply_path_aliases(cov, alias_map): """Adjust filenames in coverage data.""" data = CoverageData() aliases = PathAliases() for k, v in alias_map.items(): aliases.add(k, v) data.update(cov.data, aliases) cov.data = data
def test_writing_and_reading(self): covdata1 = CoverageData() covdata1.add_lines(DATA_1) self.data_files.write(covdata1) covdata2 = CoverageData() self.data_files.read(covdata2) self.assert_summary(covdata2, SUMMARY_1)
def test_read_json_errors(self): self.skip_unless_data_storage_is("json") self.make_file("misleading.dat", CoverageData._GO_AWAY + " this isn't JSON") msg = r"Couldn't .* '.*[/\\]{0}': \S+" with self.assertRaisesRegex(CoverageException, msg.format("misleading.dat")): covdata = CoverageData("misleading.dat") covdata.read() self.assertFalse(covdata)
def test_erasing_parallel(self): self.make_file("datafile.1") self.make_file("datafile.2") self.make_file(".coverage") data = CoverageData("datafile") data.erase(parallel=True) self.assert_file_count("datafile.*", 0) self.assert_exists(".coverage")
def test_writing_and_reading(self): covdata1 = CoverageData() covdata1.set_lines(LINES_1) self.data_files.write(covdata1) covdata2 = CoverageData() self.data_files.read(covdata2) self.assert_line_counts(covdata2, SUMMARY_1)
def test_debug_data_with_no_data(self): data = CoverageData() self.command_line("debug data") self.assertMultiLineEqual(self.stdout(), textwrap.dedent("""\ -- data ------------------------------------------------------ path: FILENAME No data collected """).replace("FILENAME", data.data_filename()))
def test_add_to_hash_with_lines(self): covdata = CoverageData() covdata.set_lines(LINES_1) hasher = mock.Mock() covdata.add_to_hash("a.py", hasher) self.assertEqual(hasher.method_calls, [ mock.call.update([1, 2]), # lines mock.call.update(""), # file_tracer name ])
def test_erasing(self): covdata1 = CoverageData() covdata1.add_line_data(DATA_1) covdata1.write() covdata1.erase() self.assert_summary(covdata1, {}) covdata2 = CoverageData() covdata2.read() self.assert_summary(covdata2, {})
def test_add_to_lines_hash_with_missing_file(self): # https://bitbucket.org/ned/coveragepy/issues/403 covdata = CoverageData() covdata.add_lines(LINES_1) hasher = mock.Mock() covdata.add_to_hash("missing.py", hasher) self.assertEqual(hasher.method_calls, [ mock.call.update([]), mock.call.update(None), ])
def test_add_to_hash_with_arcs(self): covdata = CoverageData() covdata.add_arcs(ARCS_3) covdata.add_file_tracers({"y.py": "hologram_plugin"}) hasher = mock.Mock() add_data_to_hash(covdata, "y.py", hasher) self.assertEqual(hasher.method_calls, [ mock.call.update([(-1, 17), (17, 23), (23, -1)]), # arcs mock.call.update("hologram_plugin"), # file_tracer name ])
def test_writing_to_other_file(self): data_files = CoverageDataFiles(".otherfile") covdata = CoverageData() covdata.set_lines(LINES_1) data_files.write(covdata) self.assert_doesnt_exist(".coverage") self.assert_exists(".otherfile") data_files.write(covdata, suffix="extra") self.assert_exists(".otherfile.extra") self.assert_doesnt_exist(".coverage")
def test_read_write_lines(self): covdata1 = CoverageData() covdata1.set_lines(LINES_1) covdata1.write_file("lines.dat") covdata2 = CoverageData() covdata2.read_file("lines.dat") self.assert_line_counts(covdata2, SUMMARY_1) self.assert_measured_files(covdata2, MEASURED_FILES_1) self.assertCountEqual(covdata2.lines("a.py"), A_PY_LINES_1) self.assertEqual(covdata2.run_infos(), [])
def test_erasing(self): covdata1 = CoverageData() covdata1.add_lines(LINES_1) covdata1.write() covdata1.erase() self.assert_line_counts(covdata1, {}) covdata2 = CoverageData() covdata2.read() self.assert_line_counts(covdata2, {})
def test_add_to_arcs_hash_with_missing_file(self): # https://bitbucket.org/ned/coveragepy/issues/403 covdata = CoverageData() covdata.add_arcs(ARCS_3) covdata.add_file_tracers({"y.py": "hologram_plugin"}) hasher = mock.Mock() add_data_to_hash(covdata, "missing.py", hasher) self.assertEqual(hasher.method_calls, [ mock.call.update([]), mock.call.update(None), ])
def test_no_arcs_vs_unmeasured_file(self): covdata = CoverageData() covdata.set_arcs(ARCS_3) covdata.touch_file('zzz.py') self.assertEqual(covdata.lines('zzz.py'), []) self.assertIsNone(covdata.lines('no_such_file.py')) self.assertEqual(covdata.arcs('zzz.py'), []) self.assertIsNone(covdata.arcs('no_such_file.py'))
def test_update_cant_mix_lines_and_arcs(self): covdata1 = CoverageData() covdata1.set_lines(LINES_1) covdata2 = CoverageData() covdata2.set_arcs(ARCS_3) with self.assertRaisesRegex(CoverageException, "Can't combine arc data with line data"): covdata1.update(covdata2) with self.assertRaisesRegex(CoverageException, "Can't combine line data with arc data"): covdata2.update(covdata1)
def test_erasing(self): covdata1 = CoverageData() covdata1.set_lines(LINES_1) self.data_files.write(covdata1) covdata1.erase() self.assert_line_counts(covdata1, {}) self.data_files.erase() covdata2 = CoverageData() self.data_files.read(covdata2) self.assert_line_counts(covdata2, {})
def test_erasing(self): covdata1 = CoverageData() covdata1.add_lines(DATA_1) self.data_files.write(covdata1) covdata1.erase() self.assert_summary(covdata1, {}) self.data_files.erase() covdata2 = CoverageData() self.data_files.read(covdata2) self.assert_summary(covdata2, {})
def test_debug_output_without_debug_option(self): # With a debug object, but not the dataio option, we don't get debug # output. debug = DebugControlString(options=[]) covdata1 = CoverageData(debug=debug) covdata1.set_lines(LINES_1) self.data_files.write(covdata1) covdata2 = CoverageData(debug=debug) self.data_files.read(covdata2) self.assert_line_counts(covdata2, SUMMARY_1) self.assertEqual(debug.get_output(), "")
def test_file_format_with_arcs(self): # Write with CoverageData, then read the pickle explicitly. covdata = CoverageData() covdata.add_arc_data(ARC_DATA_3) covdata.write() with open(".coverage", 'rb') as fdata: data = pickle.load(fdata) self.assertCountEqual(data['lines'].keys(), []) arcs = data['arcs'] self.assertCountEqual(arcs['x.py'], X_PY_ARCS_3) self.assertCountEqual(arcs['y.py'], Y_PY_ARCS_3)
def test_update_arcs(self): covdata1 = CoverageData() covdata1.set_arcs(ARCS_3) covdata2 = CoverageData() covdata2.set_arcs(ARCS_4) covdata3 = CoverageData() covdata3.update(covdata1) covdata3.update(covdata2) self.assert_line_counts(covdata3, SUMMARY_3_4) self.assert_measured_files(covdata3, MEASURED_FILES_3_4) self.assertEqual(covdata3.run_infos(), [])
def pickle2json(infile, outfile): """Convert a coverage.py 3.x pickle data file to a 4.x JSON data file.""" try: old_read_raw_data = CoverageData._read_raw_data CoverageData._read_raw_data = pickle_read_raw_data covdata = CoverageData() with open(infile, 'rb') as inf: covdata.read(inf) covdata.write_file(outfile) finally: CoverageData._read_raw_data = old_read_raw_data
def test_run_info(self): covdata = CoverageData() self.assertEqual(covdata.run_infos(), []) covdata.add_run_info(hello="there") self.assertEqual(covdata.run_infos(), [{"hello": "there"}]) covdata.add_run_info(count=17) self.assertEqual(covdata.run_infos(), [{"hello": "there", "count": 17}])
def test_combining(self): self.assert_file_count(".coverage.*", 0) covdata1 = CoverageData(suffix='1') covdata1.add_lines(LINES_1) covdata1.write() self.assert_exists(".coverage.1") self.assert_file_count(".coverage.*", 1) covdata2 = CoverageData(suffix='2') covdata2.add_lines(LINES_2) covdata2.write() self.assert_exists(".coverage.2") self.assert_file_count(".coverage.*", 2) covdata3 = CoverageData() combine_parallel_data(covdata3) self.assert_line_counts(covdata3, SUMMARY_1_2) self.assert_measured_files(covdata3, MEASURED_FILES_1_2) self.assert_file_count(".coverage.*", 0)
def test_reading_missing(self): self.assert_doesnt_exist(".coverage") covdata = CoverageData() covdata.read() self.assert_line_counts(covdata, {})
def test_update_file_tracers(self): covdata1 = CoverageData(suffix='1') covdata1.add_lines({ "p1.html": dict.fromkeys([1, 2, 3, 4]), "p2.html": dict.fromkeys([5, 6, 7]), "main.py": dict.fromkeys([10, 11, 12]), }) covdata1.add_file_tracers({ "p1.html": "html.plugin", "p2.html": "html.plugin2", }) covdata2 = CoverageData(suffix='2') covdata2.add_lines({ "p1.html": dict.fromkeys([3, 4, 5, 6]), "p2.html": dict.fromkeys([7, 8, 9]), "p3.foo": dict.fromkeys([1000, 1001]), "main.py": dict.fromkeys([10, 11, 12]), }) covdata2.add_file_tracers({ "p1.html": "html.plugin", "p2.html": "html.plugin2", "p3.foo": "foo_plugin", }) covdata3 = CoverageData(suffix='3') covdata3.update(covdata1) covdata3.update(covdata2) self.assertEqual(covdata3.file_tracer("p1.html"), "html.plugin") self.assertEqual(covdata3.file_tracer("p2.html"), "html.plugin2") self.assertEqual(covdata3.file_tracer("p3.foo"), "foo_plugin") self.assertEqual(covdata3.file_tracer("main.py"), "")
def test_true_suffix(self): self.assert_file_count(".coverage.*", 0) # suffix=True will make a randomly named data file. covdata1 = CoverageData(suffix=True) covdata1.add_lines(LINES_1) covdata1.write() self.assert_doesnt_exist(".coverage") data_files1 = glob.glob(".coverage.*") self.assertEqual(len(data_files1), 1) # Another suffix=True will choose a different name. covdata2 = CoverageData(suffix=True) covdata2.add_lines(LINES_1) covdata2.write() self.assert_doesnt_exist(".coverage") data_files2 = glob.glob(".coverage.*") self.assertEqual(len(data_files2), 2) # In addition to being different, the suffixes have the pid in them. self.assertTrue(all(str(os.getpid()) in fn for fn in data_files2))
def test_touch_file_with_arcs(self): covdata = CoverageData() covdata.add_arcs(ARCS_3) covdata.touch_file('zzz.py') self.assert_measured_files(covdata, MEASURED_FILES_3 + ['zzz.py'])
def test_line_data_is_true(self): covdata = CoverageData() covdata.add_lines(LINES_1) self.assertTrue(covdata)
def test_no_lines_vs_unmeasured_file(self): covdata = CoverageData() covdata.add_lines(LINES_1) covdata.touch_file('zzz.py') self.assertEqual(covdata.lines('zzz.py'), []) self.assertIsNone(covdata.lines('no_such_file.py'))
def test_touch_file_with_lines(self): covdata = CoverageData() covdata.add_lines(LINES_1) covdata.touch_file('zzz.py') self.assert_measured_files(covdata, MEASURED_FILES_1 + ['zzz.py'])
def test_empty_arcs_are_still_arcs(self): covdata = CoverageData() covdata.add_arcs({}) covdata.touch_file("abc.py") self.assertTrue(covdata.has_arcs())
def test_combining_with_aliases(self): covdata1 = CoverageData(suffix='1') covdata1.add_lines({ '/home/ned/proj/src/a.py': { 1: None, 2: None }, '/home/ned/proj/src/sub/b.py': { 3: None }, '/home/ned/proj/src/template.html': { 10: None }, }) covdata1.add_file_tracers({ '/home/ned/proj/src/template.html': 'html.plugin', }) covdata1.write() covdata2 = CoverageData(suffix='2') covdata2.add_lines({ r'c:\ned\test\a.py': { 4: None, 5: None }, r'c:\ned\test\sub\b.py': { 3: None, 6: None }, }) covdata2.write() self.assert_file_count(".coverage.*", 2) covdata3 = CoverageData() aliases = PathAliases() aliases.add("/home/ned/proj/src/", "./") aliases.add(r"c:\ned\test", "./") combine_parallel_data(covdata3, aliases=aliases) self.assert_file_count(".coverage.*", 0) # covdata3 hasn't been written yet. Should this file exist or not? #self.assert_exists(".coverage") apy = canonical_filename('./a.py') sub_bpy = canonical_filename('./sub/b.py') template_html = canonical_filename('./template.html') self.assert_line_counts(covdata3, { apy: 4, sub_bpy: 2, template_html: 1 }, fullpath=True) self.assert_measured_files(covdata3, [apy, sub_bpy, template_html]) self.assertEqual(covdata3.file_tracer(template_html), 'html.plugin')
def test_combining_from_files(self): os.makedirs('cov1') covdata1 = CoverageData('cov1/.coverage.1') covdata1.add_lines(LINES_1) covdata1.write() os.makedirs('cov2') covdata2 = CoverageData('cov2/.coverage.2') covdata2.add_lines(LINES_2) covdata2.write() # This data won't be included. covdata_xxx = CoverageData('.coverage.xxx') covdata_xxx.add_arcs(ARCS_3) covdata_xxx.write() covdata_2xxx = CoverageData('cov2/.coverage.xxx') covdata_2xxx.add_arcs(ARCS_3) covdata_2xxx.write() covdata3 = CoverageData() combine_parallel_data(covdata3, data_paths=['cov1', 'cov2/.coverage.2']) self.assert_line_counts(covdata3, SUMMARY_1_2) self.assert_measured_files(covdata3, MEASURED_FILES_1_2) self.assert_doesnt_exist("cov1/.coverage.1") self.assert_doesnt_exist("cov2/.coverage.2") self.assert_exists(".coverage.xxx") self.assert_exists("cov2/.coverage.xxx")
def test_combining_from_nonexistent_directories(self): covdata = CoverageData() msg = "Couldn't combine from non-existent path 'xyzzy'" with self.assertRaisesRegex(CoverageException, msg): combine_parallel_data(covdata, data_paths=['xyzzy'])
def test_contexts_by_lineno_with_unknown_file(self): covdata = CoverageData() self.assertDictEqual(covdata.contexts_by_lineno('xyz.py'), {})
def test_empty_line_data_is_false(self): covdata = CoverageData() covdata.add_lines({}) self.assertFalse(covdata)
def test_update_lines(self): covdata1 = CoverageData(suffix='1') covdata1.add_lines(LINES_1) covdata2 = CoverageData(suffix='2') covdata2.add_lines(LINES_2) covdata3 = CoverageData(suffix='3') covdata3.update(covdata1) covdata3.update(covdata2) self.assert_line_counts(covdata3, SUMMARY_1_2) self.assert_measured_files(covdata3, MEASURED_FILES_1_2)
def test_asking_isnt_measuring(self): # Asking about an unmeasured file shouldn't make it seem measured. covdata = CoverageData() self.assert_measured_files(covdata, []) self.assertEqual(covdata.arcs("missing.py"), None) self.assert_measured_files(covdata, [])
def test_lines_with_contexts(self): covdata = CoverageData() covdata.set_context('test_a') covdata.add_lines(LINES_1) self.assertEqual(covdata.lines('a.py'), [1, 2]) covdata.set_query_contexts(['test*']) self.assertEqual(covdata.lines('a.py'), [1, 2]) covdata.set_query_contexts(['other*']) self.assertEqual(covdata.lines('a.py'), [])
def test_arcs_with_contexts(self): covdata = CoverageData() covdata.set_context('test_x') covdata.add_arcs(ARCS_3) self.assertEqual(covdata.arcs('x.py'), [(-1, 1), (1, 2), (2, 3), (3, -1)]) covdata.set_query_contexts(['test*']) self.assertEqual(covdata.arcs('x.py'), [(-1, 1), (1, 2), (2, 3), (3, -1)]) covdata.set_query_contexts(['other*']) self.assertEqual(covdata.arcs('x.py'), [])
def test_empty_data_is_false(self): covdata = CoverageData() self.assertFalse(covdata)
def test_cant_add_lines_with_arcs(self): covdata = CoverageData() covdata.add_arcs(ARCS_3) with self.assertRaisesRegex(CoverageException, "Can't add lines to existing arc data"): covdata.add_lines(LINES_1)
def test_ok_to_add_arcs_twice(self): covdata = CoverageData() covdata.add_arcs(ARCS_3) covdata.add_arcs(ARCS_4) self.assert_line_counts(covdata, SUMMARY_3_4) self.assert_measured_files(covdata, MEASURED_FILES_3_4)
def test_adding_arcs(self): covdata = CoverageData() covdata.add_arcs(ARCS_3) self.assert_arcs3_data(covdata)
def test_ok_to_add_lines_twice(self): covdata = CoverageData() covdata.add_lines(LINES_1) covdata.add_lines(LINES_2) self.assert_line_counts(covdata, SUMMARY_1_2) self.assert_measured_files(covdata, MEASURED_FILES_1_2)
def test_update_cant_mix_lines_and_arcs(self): covdata1 = CoverageData(suffix='1') covdata1.add_lines(LINES_1) covdata2 = CoverageData(suffix='2') covdata2.add_arcs(ARCS_3) with self.assertRaisesRegex(CoverageException, "Can't combine arc data with line data"): covdata1.update(covdata2) with self.assertRaisesRegex(CoverageException, "Can't combine line data with arc data"): covdata2.update(covdata1)
def test_update_file_tracer_vs_no_file_tracer(self): covdata1 = CoverageData(suffix="1") covdata1.add_lines({"p1.html": dict.fromkeys([1, 2, 3])}) covdata1.add_file_tracers({"p1.html": "html.plugin"}) covdata2 = CoverageData(suffix="2") covdata2.add_lines({"p1.html": dict.fromkeys([1, 2, 3])}) msg = "Conflicting file tracer name for 'p1.html': u?'html.plugin' vs u?''" with self.assertRaisesRegex(CoverageException, msg): covdata1.update(covdata2) msg = "Conflicting file tracer name for 'p1.html': u?'' vs u?'html.plugin'" with self.assertRaisesRegex(CoverageException, msg): covdata2.update(covdata1)
def test_arc_data_is_true(self): covdata = CoverageData() covdata.add_arcs(ARCS_3) self.assertTrue(covdata)
def test_update_arcs(self): covdata1 = CoverageData(suffix='1') covdata1.add_arcs(ARCS_3) covdata2 = CoverageData(suffix='2') covdata2.add_arcs(ARCS_4) covdata3 = CoverageData(suffix='3') covdata3.update(covdata1) covdata3.update(covdata2) self.assert_line_counts(covdata3, SUMMARY_3_4) self.assert_measured_files(covdata3, MEASURED_FILES_3_4)