def view_test_report(self, logger, source_dir, branch, commit, tag): test_count_reports = [] if commit: if tag: logger.warning("Ignoring --tag as --commit was specified") tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" repo = GitRepo(source_dir) revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch) rev_index = gitarchive.rev_find(revs, 'commit', commit) testresults = resultutils.git_get_result(repo, revs[rev_index][2]) elif tag: repo = GitRepo(source_dir) testresults = resultutils.git_get_result(repo, [tag]) else: testresults = resultutils.load_resultsdata(source_dir) for testsuite in testresults: for resultid in testresults[testsuite]: result = testresults[testsuite][resultid] test_count_report = self.get_aggregated_test_result( logger, result) test_count_report['testseries'] = result['configuration'][ 'TESTSERIES'] test_count_report['result_id'] = resultid test_count_reports.append(test_count_report) self.print_test_report('test_report_full_text.txt', test_count_reports)
def view_test_report(self, logger, source_dir, branch, commit, tag): test_count_reports = [] if commit: if tag: logger.warning("Ignoring --tag as --commit was specified") tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" repo = GitRepo(source_dir) revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch) rev_index = gitarchive.rev_find(revs, 'commit', commit) testresults = resultutils.git_get_result(repo, revs[rev_index][2]) elif tag: repo = GitRepo(source_dir) testresults = resultutils.git_get_result(repo, [tag]) else: testresults = resultutils.load_resultsdata(source_dir) for testsuite in testresults: for resultid in testresults[testsuite]: skip = False result = testresults[testsuite][resultid] machine = result['configuration']['MACHINE'] # Check to see if there is already results for these kinds of tests for the machine for key in result['result'].keys(): testtype = str(key).split('.')[0] if ((machine in self.ptests and testtype == "ptestresult" and self.ptests[machine]) or (machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])): print( "Already have test results for %s on %s, skipping %s" % (str(key).split('.')[0], machine, resultid)) skip = True break if skip: break test_count_report = self.get_aggregated_test_result( logger, result, machine) test_count_report['machine'] = machine test_count_report['testseries'] = result['configuration'][ 'TESTSERIES'] test_count_report['result_id'] = resultid test_count_reports.append(test_count_report) self.print_test_report('test_report_full_text.txt', test_count_reports)
def init_git_repo(path, no_create, bare, log): """Initialize local Git repository""" path = os.path.abspath(path) if os.path.isfile(path): raise ArchiveError("Invalid Git repo at {}: path exists but is not a " "directory".format(path)) if not os.path.isdir(path) or not os.listdir(path): if no_create: raise ArchiveError("No git repo at {}, refusing to create " "one".format(path)) if not os.path.isdir(path): try: os.mkdir(path) except (FileNotFoundError, PermissionError) as err: raise ArchiveError("Failed to mkdir {}: {}".format(path, err)) if not os.listdir(path): log.info("Initializing a new Git repo at %s", path) repo = GitRepo.init(path, bare) try: repo = GitRepo(path, is_topdir=True) except GitError: raise ArchiveError("Non-empty directory that is not a Git repository " "at {}\nPlease specify an existing Git repository, " "an empty directory or a non-existing directory " "path.".format(path)) return repo
def git_commit_results(self, repo_path, branch=None, tag=None): """Commit results into a Git repository""" repo = GitRepo(repo_path, is_topdir=True) if not branch: branch = self.git_branch else: # Replace keywords branch = branch.format(git_branch=self.git_branch, tester_host=self.hostname) log.info("Committing test results into %s %s", repo_path, branch) tmp_index = os.path.join(repo_path, '.git', 'index.oe-build-perf') try: # Create new commit object from the new results env_update = {'GIT_INDEX_FILE': tmp_index, 'GIT_WORK_TREE': self.out_dir} repo.run_cmd('add .', env_update) tree = repo.run_cmd('write-tree', env_update) parent = repo.rev_parse(branch) msg = "Results of {}:{}\n".format(self.git_branch, self.git_commit) git_cmd = ['commit-tree', tree, '-m', msg] if parent: git_cmd += ['-p', parent] commit = repo.run_cmd(git_cmd, env_update) # Update branch head git_cmd = ['update-ref', 'refs/heads/' + branch, commit] if parent: git_cmd.append(parent) repo.run_cmd(git_cmd) # Update current HEAD, if we're on branch 'branch' if repo.get_current_branch() == branch: log.info("Updating %s HEAD to latest commit", repo_path) repo.run_cmd('reset --hard') # Create (annotated) tag if tag: # Find tags matching the pattern tag_keywords = dict(git_branch=self.git_branch, git_commit=self.git_commit, git_commit_count=self.git_commit_count, tester_host=self.hostname, tag_num='[0-9]{1,5}') tag_re = re.compile(tag.format(**tag_keywords) + '$') tag_keywords['tag_num'] = 0 for existing_tag in repo.run_cmd('tag').splitlines(): if tag_re.match(existing_tag): tag_keywords['tag_num'] += 1 tag = tag.format(**tag_keywords) msg = "Test run #{} of {}:{}\n".format(tag_keywords['tag_num'], self.git_branch, self.git_commit) repo.run_cmd(['tag', '-a', '-m', msg, tag, commit]) finally: if os.path.exists(tmp_index): os.unlink(tmp_index)
def __init__(self, out_dir): self.results = {} self.out_dir = os.path.abspath(out_dir) if not os.path.exists(self.out_dir): os.makedirs(self.out_dir) # Get Git parameters try: self.repo = GitRepo('.') except GitError: self.repo = None self.git_rev, self.git_branch = self.get_git_revision() log.info("Using Git branch:revision %s:%s", self.git_branch, self.git_rev)
def __init__(self, out_dir, *args, **kwargs): super(BuildPerfTestResult, self).__init__(*args, **kwargs) self.out_dir = out_dir # Get Git parameters try: self.repo = GitRepo('.') except GitError: self.repo = None self.git_revision, self.git_branch = self.get_git_revision() self.hostname = socket.gethostname() self.start_time = self.elapsed_time = None self.successes = [] log.info("Using Git branch:revision %s:%s", self.git_branch, self.git_revision)
def view_test_report(self, logger, source_dir, tag): test_count_reports = [] if tag: repo = GitRepo(source_dir) testresults = resultutils.git_get_result(repo, [tag]) else: testresults = resultutils.load_resultsdata(source_dir) for testsuite in testresults: for resultid in testresults[testsuite]: result = testresults[testsuite][resultid] test_count_report = self.get_aggregated_test_result( logger, result) test_count_report['testseries'] = result['configuration'][ 'TESTSERIES'] test_count_report['result_id'] = resultid test_count_reports.append(test_count_report) self.print_test_report('test_report_full_text.txt', test_count_reports)
def __init__(self, out_dir, *args, **kwargs): super(BuildPerfTestResult, self).__init__(*args, **kwargs) self.out_dir = out_dir # Get Git parameters try: self.repo = GitRepo('.') except GitError: self.repo = None self.git_commit, self.git_commit_count, self.git_branch = \ self.get_git_revision() self.hostname = socket.gethostname() self.product = os.getenv('OE_BUILDPERFTEST_PRODUCT', 'oe-core') self.start_time = self.elapsed_time = None self.successes = [] log.info("Using Git branch:commit %s:%s (%s)", self.git_branch, self.git_commit, self.git_commit_count)
def test_apply_patches(self): """ Summary: Able to apply a single patch to the Linux kernel source Expected: The README file should exist and the patch changes should be displayed at the end of the file. Product: Kernel Development Author: Yeoh Ee Peng <*****@*****.**> AutomatedBy: Mazliana Mohamad <*****@*****.**> """ runCmd('bitbake virtual/kernel -c patch') kernel_source = get_bb_var('STAGING_KERNEL_DIR') readme = os.path.join(kernel_source, 'README') # This test step adds modified file 'README' to git and creates a # patch file '0001-KERNEL_DEV_TEST_CASE.patch' at the same location as file patch_content = 'This is a test to apply a patch to the kernel' with open(readme, 'a+') as f: f.write(patch_content) repo = GitRepo('%s' % kernel_source, is_topdir=True) repo.run_cmd('add %s' % readme) repo.run_cmd(['commit', '-m', 'KERNEL_DEV_TEST_CASE']) repo.run_cmd(['format-patch', '-1']) patch_name = '0001-KERNEL_DEV_TEST_CASE.patch' patchpath = os.path.join(kernel_source, patch_name) runCmd('mv %s %s' % (patchpath, self.recipes_linuxyocto_dir)) runCmd('rm %s ' % readme) self.assertFalse(os.path.exists(readme)) recipe_append = os.path.join(self.recipeskernel_dir, 'linux-yocto_%.bbappend') with open(recipe_append, 'w+') as fh: fh.write('SRC_URI += "file://%s"\n' % patch_name) fh.write('FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"') runCmd('bitbake virtual/kernel -c clean') runCmd('bitbake virtual/kernel -c patch') self.assertTrue(os.path.exists(readme)) result = runCmd('tail -n 1 %s' % readme) self.assertEqual(result.output, patch_content)
def regression_git(args, logger): base_results = {} target_results = {} tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" repo = GitRepo(args.repo) revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch) if args.branch2: revs2 = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch2) if not len(revs2): logger.error("No revisions found to compare against") return 1 if not len(revs): logger.error("No revision to report on found") return 1 else: if len(revs) < 2: logger.error( "Only %d tester revisions found, unable to generate report" % len(revs)) return 1 # Pick revisions if args.commit: if args.commit_number: logger.warning( "Ignoring --commit-number as --commit was specified") index1 = gitarchive.rev_find(revs, 'commit', args.commit) elif args.commit_number: index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number) else: index1 = len(revs) - 1 if args.branch2: revs2.append(revs[index1]) index1 = len(revs2) - 1 revs = revs2 if args.commit2: if args.commit_number2: logger.warning( "Ignoring --commit-number2 as --commit2 was specified") index2 = gitarchive.rev_find(revs, 'commit', args.commit2) elif args.commit_number2: index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2) else: if index1 > 0: index2 = index1 - 1 # Find the closest matching commit number for comparision # In future we could check the commit is a common ancestor and # continue back if not but this good enough for now while index2 > 0 and revs[index2].commit_number > revs[ index1].commit_number: index2 = index2 - 1 else: logger.error("Unable to determine the other commit, use " "--commit2 or --commit-number2 to specify it") return 1 logger.info("Comparing:\n%s\nto\n%s\n" % (revs[index1], revs[index2])) base_results = resultutils.git_get_result(repo, revs[index1][2]) target_results = resultutils.git_get_result(repo, revs[index2][2]) regression_common(args, logger, base_results, target_results) return 0
def view_test_report(self, logger, source_dir, branch, commit, tag, use_regression_map, raw_test, selected_test_case_only): def print_selected_testcase_result(testresults, selected_test_case_only): for testsuite in testresults: for resultid in testresults[testsuite]: result = testresults[testsuite][resultid]['result'] test_case_result = result.get(selected_test_case_only, {}) if test_case_result.get('status'): print( 'Found selected test case result for %s from %s' % (selected_test_case_only, resultid)) print(test_case_result['status']) else: print( 'Could not find selected test case result for %s from %s' % (selected_test_case_only, resultid)) if test_case_result.get('log'): print(test_case_result['log']) test_count_reports = [] configmap = resultutils.store_map if use_regression_map: configmap = resultutils.regression_map if commit: if tag: logger.warning("Ignoring --tag as --commit was specified") tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" repo = GitRepo(source_dir) revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch) rev_index = gitarchive.rev_find(revs, 'commit', commit) testresults = resultutils.git_get_result(repo, revs[rev_index][2], configmap=configmap) elif tag: repo = GitRepo(source_dir) testresults = resultutils.git_get_result(repo, [tag], configmap=configmap) else: testresults = resultutils.load_resultsdata(source_dir, configmap=configmap) if raw_test: raw_results = {} for testsuite in testresults: result = testresults[testsuite].get(raw_test, {}) if result: raw_results[testsuite] = {raw_test: result} if raw_results: if selected_test_case_only: print_selected_testcase_result(raw_results, selected_test_case_only) else: print(json.dumps(raw_results, sort_keys=True, indent=4)) else: print('Could not find raw test result for %s' % raw_test) return 0 if selected_test_case_only: print_selected_testcase_result(testresults, selected_test_case_only) return 0 for testsuite in testresults: for resultid in testresults[testsuite]: skip = False result = testresults[testsuite][resultid] machine = result['configuration']['MACHINE'] # Check to see if there is already results for these kinds of tests for the machine for key in result['result'].keys(): testtype = str(key).split('.')[0] if ((machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])): print( "Already have test results for %s on %s, skipping %s" % (str(key).split('.')[0], machine, resultid)) skip = True break if skip: break test_count_report = self.get_aggregated_test_result( logger, result, machine) test_count_report['machine'] = machine test_count_report['testseries'] = result['configuration'][ 'TESTSERIES'] test_count_report['result_id'] = resultid test_count_reports.append(test_count_report) self.print_test_report('test_report_full_text.txt', test_count_reports)