def test_result_setup_passes(testdir, tests_filename): test_to_run = "test_setup_passes" result = testdir.runpytest( "-vs", "--instrument=json,log", f"{tests_filename}::{test_to_run}" ) result.assert_outcomes(error=0, failed=0, passed=1) json_records = helpers.get_json_log_file_from_artifacts_dir_and_return_records( testdir ) helpers.json_validate_each_record(json_records) expected_when = "setup" expected_outcome = "passed" assert ( len( [ record for record in json_records if record["when"] == expected_when and record["outcome"] == expected_outcome ] ) == 1 ) log_records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir ) assert len(log_records[1:]) == len(json_records) assert any( f"{expected_when} {expected_outcome}" in record for record in log_records )
def test_with_single_arg_and_single_kwarg_in_mark_instrument( testdir, tests_filename): result = testdir.runpytest( "-vs", "--instrument=json,log", f"{tests_filename}::test_with_args_and_kwargs_in_mark", ) result.assert_outcomes(passed=1) json_records = helpers.get_json_log_file_from_artifacts_dir_and_return_records( testdir) helpers.json_validate_each_record(json_records) expected_labels = ["a_mark"] assert len([ record for record in json_records if record["labels"] == expected_labels ]) == len(json_records) expected_tags = {"my_mark": "a_mark"} assert len([ record for record in json_records if record["tags"] == expected_tags ]) == len(json_records) log_records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir) assert len(log_records[1:]) == len(json_records)
def test_tag_hook_adds_tag(testdir): tests_folder = "tag_hook" tests_filename = "test_tag_hook_examples.py" test_to_run = "test_pass_with_tag" tag_key = "env" tag_value = "test" testdir.copy_example(tests_folder) result = testdir.runpytest( "-vs", "--instrument=json,log", f"--{tag_key}={tag_value}", f"{tests_filename}::{test_to_run}", ) result.assert_outcomes(passed=1) json_records = helpers.get_json_log_file_from_artifacts_dir_and_return_records( testdir ) helpers.json_validate_each_record(json_records) expected_tags = {"my_mark": "a_mark", tag_key: tag_value} assert len( [record for record in json_records if record["tags"] == expected_tags] ) == len(json_records) log_records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir ) assert len(log_records[1:]) == len(json_records)
def test_two_log_files_are_created_with_json_and_log_instrument_option( testdir, tests_filename): test_to_run = "test_passes" result = testdir.runpytest("-vs", "--instrument=json,log", f"{tests_filename}::{test_to_run}") result.assert_outcomes(error=0, failed=0, passed=1) json_log_files = helpers.get_files_from_artifacts_dir_by_extension( testdir, "json") assert len(json_log_files) == 1 plain_log_files = helpers.get_files_from_artifacts_dir_by_extension( testdir, "log") assert len(plain_log_files) == 1 assert PurePath(json_log_files[0]).stem == PurePath( plain_log_files[0]).stem records = helpers.get_json_log_file_from_artifacts_dir_and_return_records( testdir) session_id_json = records[0]["session_id"] records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir) pattern = re.compile(r"^.+ session id: (.+)$") match = pattern.search(records[0]) session_id_plain = match[1] assert session_id_json == session_id_plain
def test_label_hook_adds_label(testdir): tests_folder = "label_hook" tests_filename = "test_label_hook_examples.py" test_to_run = "test_pass_with_label" label = "test" testdir.copy_example(tests_folder) result = testdir.runpytest( "-vs", "--instrument=json,log", f"--env={label}", f"{tests_filename}::{test_to_run}", ) result.assert_outcomes(passed=1) json_records = helpers.get_json_log_file_from_artifacts_dir_and_return_records( testdir ) helpers.json_validate_each_record(json_records) expected_labels = ["a_mark", label] assert len( [record for record in json_records if record["labels"] == expected_labels] ) == len(json_records) log_records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir ) assert len(log_records[1:]) == len(json_records)
def test_with_test_in_folder(testdir): example_folder = "subdir_example" tests_folder = "subdir" tests_filename = "test_single_test_in_subdir_examples.py" test_to_run = "test_with_logger_passes_in_subdir" testdir.copy_example(example_folder) result = testdir.runpytest( "-vs", "--instrument=json,log", f"{tests_folder}/{tests_filename}::{test_to_run}", ) result.assert_outcomes(passed=1) json_records = helpers.get_json_log_file_from_artifacts_dir_and_return_records( testdir) helpers.json_validate_each_record(json_records) expected_node_id = f"{tests_folder}/{tests_filename}::{test_to_run}" assert len([ record for record in json_records if record["node_id"] == expected_node_id ]) == len(json_records) log_records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir) assert all(expected_node_id in record for record in log_records[1:]) assert len(log_records[1:]) == len(json_records)
def test_record_duration(testdir, tests_filename): test_to_run = "test_passes" result = testdir.runpytest("-vs", "--instrument=json,log", f"{tests_filename}::{test_to_run}") result.assert_outcomes(error=0, failed=0, passed=1) json_records = helpers.get_json_log_file_from_artifacts_dir_and_return_records( testdir) helpers.json_validate_each_record(json_records) for record in json_records: assert float(record["duration"]).is_integer() is False log_records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir) assert len(log_records[1:]) == len(json_records)
def test_logger_in_different_module_from_test(testdir, tests_filename, additional_module_filename): test_to_run = "test_logger_from_different_module" result = testdir.runpytest( "-vs", "--instrument=json,log", "--log-cli-level=debug", f"{tests_filename}::{test_to_run}", ) result.assert_outcomes(error=0, failed=0, passed=1) json_records = helpers.get_json_log_file_from_artifacts_dir_and_return_records( testdir) json_records_instr_log = [ record for record in json_records if record["name"].startswith("instr.log") ] assert len(json_records_instr_log) == 1 helpers.json_validate_each_record(json_records) record_name = f"instr.log.{additional_module_filename[:-3]}" record_level = "WARNING" record_lineno = 5 record_message = "different module" result.stdout.fnmatch_lines( f"{record_level} {record_name}:{additional_module_filename}:{record_lineno} {record_message}" ) assert json_records_instr_log[0]["filename"] == additional_module_filename assert json_records_instr_log[0]["funcName"] == "log_warning_from_child" assert json_records_instr_log[0]["lineno"] == record_lineno assert json_records_instr_log[0][ "node_id"] == f"{tests_filename}::{test_to_run}" log_records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir) assert len(log_records[1:]) == len(json_records) log_records_instr_log = [ record for record in log_records if "instr.log" in record ] assert len(log_records_instr_log) == len(json_records_instr_log) for record in log_records_instr_log: assert ( f"{record_name} - {record_level} - {tests_filename}::{test_to_run} - {record_message}" in record)
def test_get_logger_from_request_fixture_and_emit_log_record( testdir, tests_filename): test_to_run = "test_logger_from_request" result = testdir.runpytest( "-vs", "--instrument=json,log", "--log-cli-level=debug", f"{tests_filename}::{test_to_run}", ) result.assert_outcomes(error=0, failed=0, passed=1) json_records = helpers.get_json_log_file_from_artifacts_dir_and_return_records( testdir) json_records_instr_log = [ record for record in json_records if record["name"] == "instr.log" ] assert len(json_records_instr_log) == 1 helpers.json_validate_each_record(json_records) record_name = "instr.log" record_level = "ERROR" record_lineno = 5 record_message = "Oh no, there is an error!" result.stdout.fnmatch_lines( f"{record_level} {record_name}:{tests_filename}:{record_lineno} {record_message}" ) assert json_records_instr_log[0]["message"] == record_message assert json_records_instr_log[0]["level"] == record_level.lower() assert json_records_instr_log[0]["lineno"] == record_lineno assert json_records_instr_log[0]["name"] == record_name assert json_records_instr_log[0]["filename"] == tests_filename assert json_records_instr_log[0]["funcName"] == test_to_run log_records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir) assert len(log_records[1:]) == len(json_records) log_records_instr_log = [ record for record in log_records if "instr.log" in record ] assert len(log_records_instr_log) == len(json_records_instr_log) for record in log_records_instr_log: assert ( f"{record_name} - {record_level} - {tests_filename}::{test_to_run} - {record_message}" in record)
def test_without_fixtures(testdir, tests_filename): test_to_run = "test_with_no_fixtures" result = testdir.runpytest("-vs", "--instrument=json,log", f"{tests_filename}::{test_to_run}") result.assert_outcomes(error=0, failed=0, passed=1) json_records = helpers.get_json_log_file_from_artifacts_dir_and_return_records( testdir) helpers.json_validate_each_record(json_records) assert len([ record for record in json_records if record["fixtures"] is None ]) == len(json_records) log_records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir) assert len(log_records[1:]) == len(json_records)
def test_logger_using_extra_kwarg(testdir, tests_filename): test_to_run = "test_logger_with_extra" result = testdir.runpytest( "-vs", "--instrument=json,log", "--log-cli-level=debug", f"{tests_filename}::{test_to_run}", ) result.assert_outcomes(error=0, failed=0, passed=1) json_records = helpers.get_json_log_file_from_artifacts_dir_and_return_records( testdir) json_records_instr_log = [ record for record in json_records if record["name"] == "instr.log" ] assert len(json_records_instr_log) == 1 helpers.json_validate_each_record(json_records) record_name = "instr.log" record_level = "INFO" record_lineno = 24 record_message = "This should have something extra." result.stdout.fnmatch_lines( f"{record_level} {record_name}:{tests_filename}:{record_lineno} {record_message}" ) assert json_records_instr_log[0]["a little"] == "a lot" assert json_records_instr_log[0]["filename"] == tests_filename assert json_records_instr_log[0]["funcName"] == test_to_run assert json_records_instr_log[0]["lineno"] == record_lineno log_records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir) assert len(log_records[1:]) == len(json_records) log_records_instr_log = [ record for record in log_records if "instr.log" in record ] assert len(log_records_instr_log) == len(json_records_instr_log) for record in log_records_instr_log: assert ( f"{record_name} - {record_level} - {tests_filename}::{test_to_run} - {record_message}" in record)
def test_with_test_using_all_fixtures_and_loggers(testdir, tests_filename): test_to_run = "test_with_all_fixtures_and_logger" result = testdir.runpytest("-vs", "--instrument=json,log", f"{tests_filename}::{test_to_run}") result.assert_outcomes(passed=1) json_records = helpers.get_json_log_file_from_artifacts_dir_and_return_records( testdir) helpers.json_validate_each_record(json_records) expected_node_id = f"{tests_filename}::{test_to_run}" assert len([ record for record in json_records if record["node_id"] == expected_node_id ]) == len(json_records) log_records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir) assert all(expected_node_id in record for record in log_records[1:]) assert len(log_records[1:]) == len(json_records)
def test_teardown_fixtures_with_different_scopes(testdir, tests_filename, fixture_scope): test_to_run = f"test_teardown_fixture_{fixture_scope}_scope" result = testdir.runpytest("-vs", "--instrument=json,log", f"{tests_filename}::{test_to_run}") result.assert_outcomes(error=0, failed=0, passed=1) json_records = helpers.get_json_log_file_from_artifacts_dir_and_return_records( testdir) helpers.json_validate_each_record(json_records) expected_fixtures = [f"teardown_fixture_with_{fixture_scope}_scope"] assert len([ record for record in json_records if record["fixtures"] == expected_fixtures ]) == len(json_records) log_records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir) assert len(log_records[1:]) == len(json_records)
def test_update_node_id(testdir, tests_filename): result = testdir.runpytest("-vs", "--instrument=json,log", "--log-cli-level=debug", f"{tests_filename}") result.assert_outcomes(error=0, failed=0, passed=2) json_records = helpers.get_json_log_file_from_artifacts_dir_and_return_records( testdir) json_records_instr_log = [ record for record in json_records if record["name"].startswith("instr.log") ] assert len(json_records_instr_log) == 4 helpers.json_validate_each_record(json_records) node_id_first_test = f"{tests_filename}::test_first_test" node_id_second_test = f"{tests_filename}::test_second_test" for record in json_records_instr_log: if record["message"] in ["fixture setup", "first test"]: assert record["node_id"] == node_id_first_test elif record["message"] in ["second test", "fixture teardown"]: assert record["node_id"] == node_id_second_test else: assert False, f"Unexpected message in record {record}" log_records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir) assert len(log_records[1:]) == len(json_records) log_records_instr_log = [ record for record in log_records if "instr.log" in record ] assert len(log_records_instr_log) == len(json_records_instr_log) for record in log_records_instr_log: if "fixture setup" in record or "first test" in record: assert node_id_first_test in record elif "second test" in record or "fixture teardown" in record: assert node_id_second_test in record else: assert False, f"Unexpected message in record {record}"
def test_single_plain_log_file_is_created_with_log_instrument_option( testdir, tests_filename): test_to_run = "test_passes" result = testdir.runpytest("-vs", "--instrument=log", f"{tests_filename}::{test_to_run}") result.assert_outcomes(error=0, failed=0, passed=1) log_files = helpers.get_files_from_artifacts_dir_by_extension( testdir, "log") assert len(log_files) == 1 split_log_file_basename = PurePath(log_files[0]).stem.split("_", maxsplit=1) helpers.validate_timestamp(split_log_file_basename[0], "%Y%m%dT%H%M%S") records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir) pattern = re.compile(r"^.+ session id: (.+)$") match = pattern.search(records[0]) session_id = match[1] assert split_log_file_basename[1] == session_id[:8]
def test_record_id(testdir, tests_filename): result = testdir.runpytest("-vs", "--instrument=json,log", f"{tests_filename}") result.assert_outcomes(error=0, failed=0, passed=4) json_records = helpers.get_json_log_file_from_artifacts_dir_and_return_records( testdir) helpers.json_validate_each_record(json_records) for record in json_records: try: UUID(record["record_id"], version=4) except (AttributeError, ValueError): assert False, f"Record id {record['record_id']} is not a valid v4 UUID." record_ids = [_["record_id"] for _ in json_records] assert len(record_ids) == len(json_records) assert len(record_ids) == len(set(record_ids)) log_records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir) assert len(log_records[1:]) == len(json_records)
def test_fixture_hook_removes_fixture(testdir): tests_folder = "fixture_hook" tests_filename = "test_fixture_hook_examples.py" test_to_run = "test_using_fixture" testdir.copy_example(tests_folder) result = testdir.runpytest( "-vs", "--instrument=json,log", f"{tests_filename}::{test_to_run}" ) result.assert_outcomes(passed=1) json_records = helpers.get_json_log_file_from_artifacts_dir_and_return_records( testdir ) helpers.json_validate_each_record(json_records) assert len( [record for record in json_records if record["fixtures"] is None] ) == len(json_records) log_records = helpers.get_plain_log_file_from_artifacts_dir_and_return_records( testdir ) assert len(log_records[1:]) == len(json_records)