Beispiel #1
0
def parse_run_tests(kernel_output: Iterable[str]) -> Test:
	"""
	Using kernel output, extract KTAP lines, parse the lines for test
	results and print condensed test results and summary line.

	Parameters:
	kernel_output - Iterable object contains lines of kernel output

	Return:
	Test - the main test object with all subtests.
	"""
	stdout.print_with_timestamp(DIVIDER)
	lines = extract_tap_lines(kernel_output)
	test = Test()
	if not lines:
		test.name = '<missing>'
		test.add_error('could not find any KTAP output!')
		test.status = TestStatus.FAILURE_TO_PARSE_TESTS
	else:
		test = parse_test(lines, 0, [])
		if test.status != TestStatus.NO_TESTS:
			test.status = test.counts.get_status()
	stdout.print_with_timestamp(DIVIDER)
	print_summary_line(test)
	return test
Beispiel #2
0
def print_test_result(test: Test) -> None:
	"""
	Prints result line with status of test.

	Example:
	'[PASSED] example'

	Parameters:
	test - Test object representing current test being printed
	"""
	stdout.print_with_timestamp(format_test_result(test))
Beispiel #3
0
def config_tests(linux: kunit_kernel.LinuxSourceTree,
		 request: KunitConfigRequest) -> KunitResult:
	stdout.print_with_timestamp('Configuring KUnit Kernel ...')

	config_start = time.time()
	success = linux.build_reconfig(request.build_dir, request.make_options)
	config_end = time.time()
	if not success:
		return KunitResult(KunitStatus.CONFIG_FAILURE,
				   config_end - config_start)
	return KunitResult(KunitStatus.SUCCESS,
			   config_end - config_start)
Beispiel #4
0
def print_test_footer(test: Test) -> None:
	"""
	Prints test footer with status of test.

	Example:
	'===================== [PASSED] example ====================='

	Parameters:
	test - Test object representing current test being printed
	"""
	message = format_test_result(test)
	stdout.print_with_timestamp(format_test_divider(message,
		len(message) - stdout.color_len()))
Beispiel #5
0
def build_tests(linux: kunit_kernel.LinuxSourceTree,
		request: KunitBuildRequest) -> KunitResult:
	stdout.print_with_timestamp('Building KUnit Kernel ...')

	build_start = time.time()
	success = linux.build_kernel(request.alltests,
				     request.jobs,
				     request.build_dir,
				     request.make_options)
	build_end = time.time()
	if not success:
		return KunitResult(KunitStatus.BUILD_FAILURE,
				   build_end - build_start)
	if not success:
		return KunitResult(KunitStatus.BUILD_FAILURE,
				   build_end - build_start)
	return KunitResult(KunitStatus.SUCCESS,
			   build_end - build_start)
Beispiel #6
0
def print_test_header(test: Test) -> None:
	"""
	Prints test header with test name and optionally the expected number
	of subtests.

	Example:
	'=================== example (2 subtests) ==================='

	Parameters:
	test - Test object representing current test being printed
	"""
	message = test.name
	if test.expected_count:
		if test.expected_count == 1:
			message += ' (1 subtest)'
		else:
			message += f' ({test.expected_count} subtests)'
	stdout.print_with_timestamp(format_test_divider(message, len(message)))
Beispiel #7
0
def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> KunitResult:
	filter_globs = [request.filter_glob]
	if request.run_isolated:
		tests = _list_tests(linux, request)
		if request.run_isolated == 'test':
			filter_globs = tests
		if request.run_isolated == 'suite':
			filter_globs = _suites_from_test_list(tests)
			# Apply the test-part of the user's glob, if present.
			if '.' in request.filter_glob:
				test_glob = request.filter_glob.split('.', maxsplit=2)[1]
				filter_globs = [g + '.'+ test_glob for g in filter_globs]

	metadata = kunit_json.Metadata(arch=linux.arch(), build_dir=request.build_dir, def_config='kunit_defconfig')

	test_counts = kunit_parser.TestCounts()
	exec_time = 0.0
	for i, filter_glob in enumerate(filter_globs):
		stdout.print_with_timestamp('Starting KUnit Kernel ({}/{})...'.format(i+1, len(filter_globs)))

		test_start = time.time()
		run_result = linux.run_kernel(
			args=request.kernel_args,
			timeout=None if request.alltests else request.timeout,
			filter_glob=filter_glob,
			build_dir=request.build_dir)

		_, test_result = parse_tests(request, metadata, run_result)
		# run_kernel() doesn't block on the kernel exiting.
		# That only happens after we get the last line of output from `run_result`.
		# So exec_time here actually contains parsing + execution time, which is fine.
		test_end = time.time()
		exec_time += test_end - test_start

		test_counts.add_subtest_counts(test_result.counts)

	if len(filter_globs) == 1 and test_counts.crashed > 0:
		bd = request.build_dir
		print('The kernel seems to have crashed; you can decode the stack traces with:')
		print('$ scripts/decode_stacktrace.sh {}/vmlinux {} < {} | tee {}/decoded.log | {} parse'.format(
				bd, bd, kunit_kernel.get_outfile_path(bd), bd, sys.argv[0]))

	kunit_status = _map_to_overall_status(test_counts.get_status())
	return KunitResult(status=kunit_status, elapsed_time=exec_time)
Beispiel #8
0
def print_summary_line(test: Test) -> None:
	"""
	Prints summary line of test object. Color of line is dependent on
	status of test. Color is green if test passes, yellow if test is
	skipped, and red if the test fails or crashes. Summary line contains
	counts of the statuses of the tests subtests or the test itself if it
	has no subtests.

	Example:
	"Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0,
	Errors: 0"

	test - Test object representing current test being printed
	"""
	if test.status == TestStatus.SUCCESS:
		color = stdout.green
	elif test.status in (TestStatus.SKIPPED, TestStatus.NO_TESTS):
		color = stdout.yellow
	else:
		color = stdout.red
	stdout.print_with_timestamp(color(f'Testing complete. {test.counts}'))
Beispiel #9
0
def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input_data: Iterable[str]) -> Tuple[KunitResult, kunit_parser.Test]:
	parse_start = time.time()

	test_result = kunit_parser.Test()

	if request.raw_output:
		# Treat unparsed results as one passing test.
		test_result.status = kunit_parser.TestStatus.SUCCESS
		test_result.counts.passed = 1

		output: Iterable[str] = input_data
		if request.raw_output == 'all':
			pass
		elif request.raw_output == 'kunit':
			output = kunit_parser.extract_tap_lines(output)
		for line in output:
			print(line.rstrip())

	else:
		test_result = kunit_parser.parse_run_tests(input_data)
	parse_end = time.time()

	if request.json:
		json_str = kunit_json.get_json_result(
					test=test_result,
					metadata=metadata)
		if request.json == 'stdout':
			print(json_str)
		else:
			with open(request.json, 'w') as f:
				f.write(json_str)
			stdout.print_with_timestamp("Test results stored in %s" %
				os.path.abspath(request.json))

	if test_result.status != kunit_parser.TestStatus.SUCCESS:
		return KunitResult(KunitStatus.TEST_FAILURE, parse_end - parse_start), test_result

	return KunitResult(KunitStatus.SUCCESS, parse_end - parse_start), test_result
Beispiel #10
0
def run_tests(linux: kunit_kernel.LinuxSourceTree,
	      request: KunitRequest) -> KunitResult:
	run_start = time.time()

	config_result = config_tests(linux, request)
	if config_result.status != KunitStatus.SUCCESS:
		return config_result

	build_result = build_tests(linux, request)
	if build_result.status != KunitStatus.SUCCESS:
		return build_result

	exec_result = exec_tests(linux, request)

	run_end = time.time()

	stdout.print_with_timestamp((
		'Elapsed time: %.3fs total, %.3fs configuring, %.3fs ' +
		'building, %.3fs running\n') % (
				run_end - run_start,
				config_result.elapsed_time,
				build_result.elapsed_time,
				exec_result.elapsed_time))
	return exec_result
Beispiel #11
0
    def make_allyesconfig(self, build_dir: str, make_options) -> None:
        stdout.print_with_timestamp('Enabling all CONFIGs for UML...')
        command = ['make', 'ARCH=um', 'O=' + build_dir, 'allyesconfig']
        if make_options:
            command.extend(make_options)
        process = subprocess.Popen(command,
                                   stdout=subprocess.DEVNULL,
                                   stderr=subprocess.STDOUT)
        process.wait()
        stdout.print_with_timestamp(
            'Disabling broken configs to run KUnit tests...')

        with open(get_kconfig_path(build_dir), 'a') as config:
            with open(BROKEN_ALLCONFIG_PATH, 'r') as disable:
                config.write(disable.read())
        stdout.print_with_timestamp(
            'Starting Kernel with all configs takes a few minutes...')
Beispiel #12
0
def main(argv):
	parser = argparse.ArgumentParser(
			description='Helps writing and running KUnit tests.')
	subparser = parser.add_subparsers(dest='subcommand')

	# The 'run' command will config, build, exec, and parse in one go.
	run_parser = subparser.add_parser('run', help='Runs KUnit tests.')
	add_common_opts(run_parser)
	add_build_opts(run_parser)
	add_exec_opts(run_parser)
	add_parse_opts(run_parser)

	config_parser = subparser.add_parser('config',
						help='Ensures that .config contains all of '
						'the options in .kunitconfig')
	add_common_opts(config_parser)

	build_parser = subparser.add_parser('build', help='Builds a kernel with KUnit tests')
	add_common_opts(build_parser)
	add_build_opts(build_parser)

	exec_parser = subparser.add_parser('exec', help='Run a kernel with KUnit tests')
	add_common_opts(exec_parser)
	add_exec_opts(exec_parser)
	add_parse_opts(exec_parser)

	# The 'parse' option is special, as it doesn't need the kernel source
	# (therefore there is no need for a build_dir, hence no add_common_opts)
	# and the '--file' argument is not relevant to 'run', so isn't in
	# add_parse_opts()
	parse_parser = subparser.add_parser('parse',
					    help='Parses KUnit results from a file, '
					    'and parses formatted results.')
	add_parse_opts(parse_parser)
	parse_parser.add_argument('file',
				  help='Specifies the file to read results from.',
				  type=str, nargs='?', metavar='input_file')

	cli_args = parser.parse_args(massage_argv(argv))

	if get_kernel_root_path():
		os.chdir(get_kernel_root_path())

	if cli_args.subcommand == 'run':
		if not os.path.exists(cli_args.build_dir):
			os.mkdir(cli_args.build_dir)

		linux = tree_from_args(cli_args)
		request = KunitRequest(build_dir=cli_args.build_dir,
				       make_options=cli_args.make_options,
				       jobs=cli_args.jobs,
				       alltests=cli_args.alltests,
				       raw_output=cli_args.raw_output,
				       json=cli_args.json,
				       timeout=cli_args.timeout,
				       filter_glob=cli_args.filter_glob,
				       kernel_args=cli_args.kernel_args,
				       run_isolated=cli_args.run_isolated)
		result = run_tests(linux, request)
		if result.status != KunitStatus.SUCCESS:
			sys.exit(1)
	elif cli_args.subcommand == 'config':
		if cli_args.build_dir and (
				not os.path.exists(cli_args.build_dir)):
			os.mkdir(cli_args.build_dir)

		linux = tree_from_args(cli_args)
		request = KunitConfigRequest(build_dir=cli_args.build_dir,
					     make_options=cli_args.make_options)
		result = config_tests(linux, request)
		stdout.print_with_timestamp((
			'Elapsed time: %.3fs\n') % (
				result.elapsed_time))
		if result.status != KunitStatus.SUCCESS:
			sys.exit(1)
	elif cli_args.subcommand == 'build':
		linux = tree_from_args(cli_args)
		request = KunitBuildRequest(build_dir=cli_args.build_dir,
					    make_options=cli_args.make_options,
					    jobs=cli_args.jobs,
					    alltests=cli_args.alltests)
		result = config_and_build_tests(linux, request)
		stdout.print_with_timestamp((
			'Elapsed time: %.3fs\n') % (
				result.elapsed_time))
		if result.status != KunitStatus.SUCCESS:
			sys.exit(1)
	elif cli_args.subcommand == 'exec':
		linux = tree_from_args(cli_args)
		exec_request = KunitExecRequest(raw_output=cli_args.raw_output,
						build_dir=cli_args.build_dir,
						json=cli_args.json,
						timeout=cli_args.timeout,
						alltests=cli_args.alltests,
						filter_glob=cli_args.filter_glob,
						kernel_args=cli_args.kernel_args,
						run_isolated=cli_args.run_isolated)
		result = exec_tests(linux, exec_request)
		stdout.print_with_timestamp((
			'Elapsed time: %.3fs\n') % (result.elapsed_time))
		if result.status != KunitStatus.SUCCESS:
			sys.exit(1)
	elif cli_args.subcommand == 'parse':
		if cli_args.file is None:
			sys.stdin.reconfigure(errors='backslashreplace')  # pytype: disable=attribute-error
			kunit_output = sys.stdin
		else:
			with open(cli_args.file, 'r', errors='backslashreplace') as f:
				kunit_output = f.read().splitlines()
		# We know nothing about how the result was created!
		metadata = kunit_json.Metadata()
		request = KunitParseRequest(raw_output=cli_args.raw_output,
					    json=cli_args.json)
		result, _ = parse_tests(request, metadata, kunit_output)
		if result.status != KunitStatus.SUCCESS:
			sys.exit(1)
	else:
		parser.print_help()
Beispiel #13
0
	def add_error(self, error_message: str) -> None:
		"""Records an error that occurred while parsing this test."""
		self.counts.errors += 1
		stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}')
Beispiel #14
0
def print_log(log: Iterable[str]) -> None:
	"""Prints all strings in saved log for test in yellow."""
	for m in log:
		stdout.print_with_timestamp(stdout.yellow(m))