def init_git_repo(path, no_create, bare, log): """Initialize local Git repository""" path = os.path.abspath(path) if os.path.isfile(path): raise ArchiveError("Invalid Git repo at {}: path exists but is not a " "directory".format(path)) if not os.path.isdir(path) or not os.listdir(path): if no_create: raise ArchiveError("No git repo at {}, refusing to create " "one".format(path)) if not os.path.isdir(path): try: os.mkdir(path) except (FileNotFoundError, PermissionError) as err: raise ArchiveError("Failed to mkdir {}: {}".format(path, err)) if not os.listdir(path): log.info("Initializing a new Git repo at %s", path) repo = GitRepo.init(path, bare) try: repo = GitRepo(path, is_topdir=True) except GitError: raise ArchiveError("Non-empty directory that is not a Git repository " "at {}\nPlease specify an existing Git repository, " "an empty directory or a non-existing directory " "path.".format(path)) return repo
def view_test_report(self, logger, source_dir, branch, commit, tag): test_count_reports = [] if commit: if tag: logger.warning("Ignoring --tag as --commit was specified") tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" repo = GitRepo(source_dir) revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch) rev_index = gitarchive.rev_find(revs, 'commit', commit) testresults = resultutils.git_get_result(repo, revs[rev_index][2]) elif tag: repo = GitRepo(source_dir) testresults = resultutils.git_get_result(repo, [tag]) else: testresults = resultutils.load_resultsdata(source_dir) for testsuite in testresults: for resultid in testresults[testsuite]: result = testresults[testsuite][resultid] test_count_report = self.get_aggregated_test_result( logger, result) test_count_report['testseries'] = result['configuration'][ 'TESTSERIES'] test_count_report['result_id'] = resultid test_count_reports.append(test_count_report) self.print_test_report('test_report_full_text.txt', test_count_reports)
def __init__(self, out_dir): self.results = {} self.out_dir = os.path.abspath(out_dir) if not os.path.exists(self.out_dir): os.makedirs(self.out_dir) # Get Git parameters try: self.repo = GitRepo('.') except GitError: self.repo = None self.git_rev, self.git_branch = self.get_git_revision() log.info("Using Git branch:revision %s:%s", self.git_branch, self.git_rev)
def view_test_report(self, logger, source_dir, branch, commit, tag): test_count_reports = [] if commit: if tag: logger.warning("Ignoring --tag as --commit was specified") tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" repo = GitRepo(source_dir) revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch) rev_index = gitarchive.rev_find(revs, 'commit', commit) testresults = resultutils.git_get_result(repo, revs[rev_index][2]) elif tag: repo = GitRepo(source_dir) testresults = resultutils.git_get_result(repo, [tag]) else: testresults = resultutils.load_resultsdata(source_dir) for testsuite in testresults: for resultid in testresults[testsuite]: skip = False result = testresults[testsuite][resultid] machine = result['configuration']['MACHINE'] # Check to see if there is already results for these kinds of tests for the machine for key in result['result'].keys(): testtype = str(key).split('.')[0] if ((machine in self.ptests and testtype == "ptestresult" and self.ptests[machine]) or (machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])): print( "Already have test results for %s on %s, skipping %s" % (str(key).split('.')[0], machine, resultid)) skip = True break if skip: break test_count_report = self.get_aggregated_test_result( logger, result, machine) test_count_report['machine'] = machine test_count_report['testseries'] = result['configuration'][ 'TESTSERIES'] test_count_report['result_id'] = resultid test_count_reports.append(test_count_report) self.print_test_report('test_report_full_text.txt', test_count_reports)
def __init__(self, out_dir, *args, **kwargs): super(BuildPerfTestResult, self).__init__(*args, **kwargs) self.out_dir = out_dir # Get Git parameters try: self.repo = GitRepo('.') except GitError: self.repo = None self.git_revision, self.git_branch = self.get_git_revision() self.hostname = socket.gethostname() self.start_time = self.elapsed_time = None self.successes = [] log.info("Using Git branch:revision %s:%s", self.git_branch, self.git_revision)
def __init__(self, out_dir, *args, **kwargs): super(BuildPerfTestResult, self).__init__(*args, **kwargs) self.out_dir = out_dir # Get Git parameters try: self.repo = GitRepo('.') except GitError: self.repo = None self.git_commit, self.git_commit_count, self.git_branch = \ self.get_git_revision() self.hostname = socket.gethostname() self.product = os.getenv('OE_BUILDPERFTEST_PRODUCT', 'oe-core') self.start_time = self.elapsed_time = None self.successes = [] log.info("Using Git branch:commit %s:%s (%s)", self.git_branch, self.git_commit, self.git_commit_count)
def view_test_report(self, logger, source_dir, tag): test_count_reports = [] if tag: repo = GitRepo(source_dir) testresults = resultutils.git_get_result(repo, [tag]) else: testresults = resultutils.load_resultsdata(source_dir) for testsuite in testresults: for resultid in testresults[testsuite]: result = testresults[testsuite][resultid] test_count_report = self.get_aggregated_test_result( logger, result) test_count_report['testseries'] = result['configuration'][ 'TESTSERIES'] test_count_report['result_id'] = resultid test_count_reports.append(test_count_report) self.print_test_report('test_report_full_text.txt', test_count_reports)
def test_apply_patches(self): """ Summary: Able to apply a single patch to the Linux kernel source Expected: The README file should exist and the patch changes should be displayed at the end of the file. Product: Kernel Development Author: Yeoh Ee Peng <*****@*****.**> AutomatedBy: Mazliana Mohamad <*****@*****.**> """ runCmd('bitbake virtual/kernel -c patch') kernel_source = get_bb_var('STAGING_KERNEL_DIR') readme = os.path.join(kernel_source, 'README') # This test step adds modified file 'README' to git and creates a # patch file '0001-KERNEL_DEV_TEST_CASE.patch' at the same location as file patch_content = 'This is a test to apply a patch to the kernel' with open(readme, 'a+') as f: f.write(patch_content) repo = GitRepo('%s' % kernel_source, is_topdir=True) repo.run_cmd('add %s' % readme) repo.run_cmd(['commit', '-m', 'KERNEL_DEV_TEST_CASE']) repo.run_cmd(['format-patch', '-1']) patch_name = '0001-KERNEL_DEV_TEST_CASE.patch' patchpath = os.path.join(kernel_source, patch_name) runCmd('mv %s %s' % (patchpath, self.recipes_linuxyocto_dir)) runCmd('rm %s ' % readme) self.assertFalse(os.path.exists(readme)) recipe_append = os.path.join(self.recipeskernel_dir, 'linux-yocto_%.bbappend') with open(recipe_append, 'w+') as fh: fh.write('SRC_URI += "file://%s"\n' % patch_name) fh.write('FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"') runCmd('bitbake virtual/kernel -c clean') runCmd('bitbake virtual/kernel -c patch') self.assertTrue(os.path.exists(readme)) result = runCmd('tail -n 1 %s' % readme) self.assertEqual(result.output, patch_content)
def git_commit_results(self, repo_path, branch=None, tag=None): """Commit results into a Git repository""" repo = GitRepo(repo_path, is_topdir=True) if not branch: branch = self.git_branch else: # Replace keywords branch = branch.format(git_branch=self.git_branch, tester_host=self.hostname) log.info("Committing test results into %s %s", repo_path, branch) tmp_index = os.path.join(repo_path, '.git', 'index.oe-build-perf') try: # Create new commit object from the new results env_update = {'GIT_INDEX_FILE': tmp_index, 'GIT_WORK_TREE': self.out_dir} repo.run_cmd('add .', env_update) tree = repo.run_cmd('write-tree', env_update) parent = repo.rev_parse(branch) msg = "Results of {}:{}\n".format(self.git_branch, self.git_commit) git_cmd = ['commit-tree', tree, '-m', msg] if parent: git_cmd += ['-p', parent] commit = repo.run_cmd(git_cmd, env_update) # Update branch head git_cmd = ['update-ref', 'refs/heads/' + branch, commit] if parent: git_cmd.append(parent) repo.run_cmd(git_cmd) # Update current HEAD, if we're on branch 'branch' if repo.get_current_branch() == branch: log.info("Updating %s HEAD to latest commit", repo_path) repo.run_cmd('reset --hard') # Create (annotated) tag if tag: # Find tags matching the pattern tag_keywords = dict(git_branch=self.git_branch, git_commit=self.git_commit, git_commit_count=self.git_commit_count, tester_host=self.hostname, tag_num='[0-9]{1,5}') tag_re = re.compile(tag.format(**tag_keywords) + '$') tag_keywords['tag_num'] = 0 for existing_tag in repo.run_cmd('tag').splitlines(): if tag_re.match(existing_tag): tag_keywords['tag_num'] += 1 tag = tag.format(**tag_keywords) msg = "Test run #{} of {}:{}\n".format(tag_keywords['tag_num'], self.git_branch, self.git_commit) repo.run_cmd(['tag', '-a', '-m', msg, tag, commit]) finally: if os.path.exists(tmp_index): os.unlink(tmp_index)
class BuildPerfTestResult(unittest.TextTestResult): """Runner class for executing the individual tests""" # List of test cases to run test_run_queue = [] def __init__(self, out_dir, *args, **kwargs): super(BuildPerfTestResult, self).__init__(*args, **kwargs) self.out_dir = out_dir # Get Git parameters try: self.repo = GitRepo('.') except GitError: self.repo = None self.git_revision, self.git_branch = self.get_git_revision() self.hostname = socket.gethostname() self.start_time = self.elapsed_time = None self.successes = [] log.info("Using Git branch:revision %s:%s", self.git_branch, self.git_revision) def get_git_revision(self): """Get git branch and revision under testing""" rev = os.getenv('OE_BUILDPERFTEST_GIT_REVISION') branch = os.getenv('OE_BUILDPERFTEST_GIT_BRANCH') if not self.repo and (not rev or not branch): log.info("The current working directory doesn't seem to be a Git " "repository clone. You can specify branch and revision " "used in test results with OE_BUILDPERFTEST_GIT_REVISION " "and OE_BUILDPERFTEST_GIT_BRANCH environment variables") else: if not rev: rev = self.repo.run_cmd(['rev-parse', 'HEAD']) if not branch: try: # Strip 11 chars, i.e. 'refs/heads' from the beginning branch = self.repo.run_cmd(['symbolic-ref', 'HEAD'])[11:] except GitError: log.debug('Currently on detached HEAD') branch = None return str(rev), str(branch) def addSuccess(self, test): """Record results from successful tests""" super(BuildPerfTestResult, self).addSuccess(test) self.successes.append((test, None)) def startTest(self, test): """Pre-test hook""" test.out_dir = self.out_dir log.info("Executing test %s: %s", test.name, test.shortDescription()) self.stream.write(datetime.now().strftime("[%Y-%m-%d %H:%M:%S] ")) super(BuildPerfTestResult, self).startTest(test) def startTestRun(self): """Pre-run hook""" self.start_time = datetime.utcnow() def stopTestRun(self): """Pre-run hook""" self.elapsed_time = datetime.utcnow() - self.start_time def all_results(self): result_map = {'SUCCESS': self.successes, 'FAIL': self.failures, 'ERROR': self.errors, 'EXP_FAIL': self.expectedFailures, 'UNEXP_SUCCESS': self.unexpectedSuccesses} for status, tests in result_map.items(): for test in tests: yield (status, test) def update_globalres_file(self, filename): """Write results to globalres csv file""" # Map test names to time and size columns in globalres # The tuples represent index and length of times and sizes # respectively gr_map = {'test1': ((0, 1), (8, 1)), 'test12': ((1, 1), (None, None)), 'test13': ((2, 1), (9, 1)), 'test2': ((3, 1), (None, None)), 'test3': ((4, 3), (None, None)), 'test4': ((7, 1), (10, 2))} if self.repo: git_tag_rev = self.repo.run_cmd(['describe', self.git_revision]) else: git_tag_rev = self.git_revision values = ['0'] * 12 for status, (test, msg) in self.all_results(): if status not in ['SUCCESS', 'FAILURE', 'EXP_SUCCESS']: continue (t_ind, t_len), (s_ind, s_len) = gr_map[test.name] if t_ind is not None: values[t_ind:t_ind + t_len] = test.times if s_ind is not None: values[s_ind:s_ind + s_len] = test.sizes log.debug("Writing globalres log to %s", filename) with open(filename, 'a') as fobj: fobj.write('{},{}:{},{},'.format(self.hostname, self.git_branch, self.git_revision, git_tag_rev)) fobj.write(','.join(values) + '\n')
def regression_git(args, logger): base_results = {} target_results = {} tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" repo = GitRepo(args.repo) revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch) if args.branch2: revs2 = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch2) if not len(revs2): logger.error("No revisions found to compare against") return 1 if not len(revs): logger.error("No revision to report on found") return 1 else: if len(revs) < 2: logger.error( "Only %d tester revisions found, unable to generate report" % len(revs)) return 1 # Pick revisions if args.commit: if args.commit_number: logger.warning( "Ignoring --commit-number as --commit was specified") index1 = gitarchive.rev_find(revs, 'commit', args.commit) elif args.commit_number: index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number) else: index1 = len(revs) - 1 if args.branch2: revs2.append(revs[index1]) index1 = len(revs2) - 1 revs = revs2 if args.commit2: if args.commit_number2: logger.warning( "Ignoring --commit-number2 as --commit2 was specified") index2 = gitarchive.rev_find(revs, 'commit', args.commit2) elif args.commit_number2: index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2) else: if index1 > 0: index2 = index1 - 1 # Find the closest matching commit number for comparision # In future we could check the commit is a common ancestor and # continue back if not but this good enough for now while index2 > 0 and revs[index2].commit_number > revs[ index1].commit_number: index2 = index2 - 1 else: logger.error("Unable to determine the other commit, use " "--commit2 or --commit-number2 to specify it") return 1 logger.info("Comparing:\n%s\nto\n%s\n" % (revs[index1], revs[index2])) base_results = resultutils.git_get_result(repo, revs[index1][2]) target_results = resultutils.git_get_result(repo, revs[index2][2]) regression_common(args, logger, base_results, target_results) return 0
class BuildPerfTestResult(unittest.TextTestResult): """Runner class for executing the individual tests""" # List of test cases to run test_run_queue = [] def __init__(self, out_dir, *args, **kwargs): super(BuildPerfTestResult, self).__init__(*args, **kwargs) self.out_dir = out_dir # Get Git parameters try: self.repo = GitRepo('.') except GitError: self.repo = None self.git_commit, self.git_commit_count, self.git_branch = \ self.get_git_revision() self.hostname = socket.gethostname() self.product = os.getenv('OE_BUILDPERFTEST_PRODUCT', 'oe-core') self.start_time = self.elapsed_time = None self.successes = [] log.info("Using Git branch:commit %s:%s (%s)", self.git_branch, self.git_commit, self.git_commit_count) def get_git_revision(self): """Get git branch and commit under testing""" commit = os.getenv('OE_BUILDPERFTEST_GIT_COMMIT') commit_cnt = os.getenv('OE_BUILDPERFTEST_GIT_COMMIT_COUNT') branch = os.getenv('OE_BUILDPERFTEST_GIT_BRANCH') if not self.repo and (not commit or not commit_cnt or not branch): log.info("The current working directory doesn't seem to be a Git " "repository clone. You can specify branch and commit " "displayed in test results with OE_BUILDPERFTEST_GIT_BRANCH, " "OE_BUILDPERFTEST_GIT_COMMIT and " "OE_BUILDPERFTEST_GIT_COMMIT_COUNT environment variables") else: if not commit: commit = self.repo.rev_parse('HEAD^0') commit_cnt = self.repo.run_cmd(['rev-list', '--count', 'HEAD^0']) if not branch: branch = self.repo.get_current_branch() if not branch: log.debug('Currently on detached HEAD') return str(commit), str(commit_cnt), str(branch) def addSuccess(self, test): """Record results from successful tests""" super(BuildPerfTestResult, self).addSuccess(test) self.successes.append((test, None)) def startTest(self, test): """Pre-test hook""" test.base_dir = self.out_dir os.mkdir(test.out_dir) log.info("Executing test %s: %s", test.name, test.shortDescription()) self.stream.write(datetime.now().strftime("[%Y-%m-%d %H:%M:%S] ")) super(BuildPerfTestResult, self).startTest(test) def startTestRun(self): """Pre-run hook""" self.start_time = datetime.utcnow() def stopTestRun(self): """Pre-run hook""" self.elapsed_time = datetime.utcnow() - self.start_time self.write_results_json() def all_results(self): result_map = {'SUCCESS': self.successes, 'FAIL': self.failures, 'ERROR': self.errors, 'EXP_FAIL': self.expectedFailures, 'UNEXP_SUCCESS': self.unexpectedSuccesses, 'SKIPPED': self.skipped} for status, tests in result_map.items(): for test in tests: yield (status, test) def update_globalres_file(self, filename): """Write results to globalres csv file""" # Map test names to time and size columns in globalres # The tuples represent index and length of times and sizes # respectively gr_map = {'test1': ((0, 1), (8, 1)), 'test12': ((1, 1), (None, None)), 'test13': ((2, 1), (9, 1)), 'test2': ((3, 1), (None, None)), 'test3': ((4, 3), (None, None)), 'test4': ((7, 1), (10, 2))} if self.repo: git_tag_rev = self.repo.run_cmd(['describe', self.git_commit]) else: git_tag_rev = self.git_commit values = ['0'] * 12 for status, (test, msg) in self.all_results(): if status in ['ERROR', 'SKIPPED']: continue (t_ind, t_len), (s_ind, s_len) = gr_map[test.name] if t_ind is not None: values[t_ind:t_ind + t_len] = test.times if s_ind is not None: values[s_ind:s_ind + s_len] = test.sizes log.debug("Writing globalres log to %s", filename) with open(filename, 'a') as fobj: fobj.write('{},{}:{},{},'.format(self.hostname, self.git_branch, self.git_commit, git_tag_rev)) fobj.write(','.join(values) + '\n') def write_results_json(self): """Write test results into a json-formatted file""" results = {'tester_host': self.hostname, 'git_branch': self.git_branch, 'git_commit': self.git_commit, 'git_commit_count': self.git_commit_count, 'product': self.product, 'start_time': self.start_time, 'elapsed_time': self.elapsed_time} tests = {} for status, (test, reason) in self.all_results(): tests[test.name] = {'name': test.name, 'description': test.shortDescription(), 'status': status, 'start_time': test.start_time, 'elapsed_time': test.elapsed_time, 'cmd_log_file': os.path.relpath(test.cmd_log_file, self.out_dir), 'measurements': test.measurements} results['tests'] = tests with open(os.path.join(self.out_dir, 'results.json'), 'w') as fobj: json.dump(results, fobj, indent=4, sort_keys=True, cls=ResultsJsonEncoder) def git_commit_results(self, repo_path, branch=None, tag=None): """Commit results into a Git repository""" repo = GitRepo(repo_path, is_topdir=True) if not branch: branch = self.git_branch else: # Replace keywords branch = branch.format(git_branch=self.git_branch, tester_host=self.hostname) log.info("Committing test results into %s %s", repo_path, branch) tmp_index = os.path.join(repo_path, '.git', 'index.oe-build-perf') try: # Create new commit object from the new results env_update = {'GIT_INDEX_FILE': tmp_index, 'GIT_WORK_TREE': self.out_dir} repo.run_cmd('add .', env_update) tree = repo.run_cmd('write-tree', env_update) parent = repo.rev_parse(branch) msg = "Results of {}:{}\n".format(self.git_branch, self.git_commit) git_cmd = ['commit-tree', tree, '-m', msg] if parent: git_cmd += ['-p', parent] commit = repo.run_cmd(git_cmd, env_update) # Update branch head git_cmd = ['update-ref', 'refs/heads/' + branch, commit] if parent: git_cmd.append(parent) repo.run_cmd(git_cmd) # Update current HEAD, if we're on branch 'branch' if repo.get_current_branch() == branch: log.info("Updating %s HEAD to latest commit", repo_path) repo.run_cmd('reset --hard') # Create (annotated) tag if tag: # Find tags matching the pattern tag_keywords = dict(git_branch=self.git_branch, git_commit=self.git_commit, git_commit_count=self.git_commit_count, tester_host=self.hostname, tag_num='[0-9]{1,5}') tag_re = re.compile(tag.format(**tag_keywords) + '$') tag_keywords['tag_num'] = 0 for existing_tag in repo.run_cmd('tag').splitlines(): if tag_re.match(existing_tag): tag_keywords['tag_num'] += 1 tag = tag.format(**tag_keywords) msg = "Test run #{} of {}:{}\n".format(tag_keywords['tag_num'], self.git_branch, self.git_commit) repo.run_cmd(['tag', '-a', '-m', msg, tag, commit]) finally: if os.path.exists(tmp_index): os.unlink(tmp_index)
def view_test_report(self, logger, source_dir, branch, commit, tag, use_regression_map, raw_test, selected_test_case_only): def print_selected_testcase_result(testresults, selected_test_case_only): for testsuite in testresults: for resultid in testresults[testsuite]: result = testresults[testsuite][resultid]['result'] test_case_result = result.get(selected_test_case_only, {}) if test_case_result.get('status'): print( 'Found selected test case result for %s from %s' % (selected_test_case_only, resultid)) print(test_case_result['status']) else: print( 'Could not find selected test case result for %s from %s' % (selected_test_case_only, resultid)) if test_case_result.get('log'): print(test_case_result['log']) test_count_reports = [] configmap = resultutils.store_map if use_regression_map: configmap = resultutils.regression_map if commit: if tag: logger.warning("Ignoring --tag as --commit was specified") tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" repo = GitRepo(source_dir) revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch) rev_index = gitarchive.rev_find(revs, 'commit', commit) testresults = resultutils.git_get_result(repo, revs[rev_index][2], configmap=configmap) elif tag: repo = GitRepo(source_dir) testresults = resultutils.git_get_result(repo, [tag], configmap=configmap) else: testresults = resultutils.load_resultsdata(source_dir, configmap=configmap) if raw_test: raw_results = {} for testsuite in testresults: result = testresults[testsuite].get(raw_test, {}) if result: raw_results[testsuite] = {raw_test: result} if raw_results: if selected_test_case_only: print_selected_testcase_result(raw_results, selected_test_case_only) else: print(json.dumps(raw_results, sort_keys=True, indent=4)) else: print('Could not find raw test result for %s' % raw_test) return 0 if selected_test_case_only: print_selected_testcase_result(testresults, selected_test_case_only) return 0 for testsuite in testresults: for resultid in testresults[testsuite]: skip = False result = testresults[testsuite][resultid] machine = result['configuration']['MACHINE'] # Check to see if there is already results for these kinds of tests for the machine for key in result['result'].keys(): testtype = str(key).split('.')[0] if ((machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])): print( "Already have test results for %s on %s, skipping %s" % (str(key).split('.')[0], machine, resultid)) skip = True break if skip: break test_count_report = self.get_aggregated_test_result( logger, result, machine) test_count_report['machine'] = machine test_count_report['testseries'] = result['configuration'][ 'TESTSERIES'] test_count_report['result_id'] = resultid test_count_reports.append(test_count_report) self.print_test_report('test_report_full_text.txt', test_count_reports)
class BuildPerfTestRunner(object): """Runner class for executing the individual tests""" # List of test cases to run test_run_queue = [] def __init__(self, out_dir): self.results = {} self.out_dir = os.path.abspath(out_dir) if not os.path.exists(self.out_dir): os.makedirs(self.out_dir) # Get Git parameters try: self.repo = GitRepo('.') except GitError: self.repo = None self.git_rev, self.git_branch = self.get_git_revision() log.info("Using Git branch:revision %s:%s", self.git_branch, self.git_rev) def get_git_revision(self): """Get git branch and revision under testing""" rev = os.getenv('OE_BUILDPERFTEST_GIT_REVISION') branch = os.getenv('OE_BUILDPERFTEST_GIT_BRANCH') if not self.repo and (not rev or not branch): log.info("The current working directory doesn't seem to be a Git " "repository clone. You can specify branch and revision " "used in test results with OE_BUILDPERFTEST_GIT_REVISION " "and OE_BUILDPERFTEST_GIT_BRANCH environment variables") else: if not rev: rev = self.repo.run_cmd(['rev-parse', 'HEAD']) if not branch: try: # Strip 11 chars, i.e. 'refs/heads' from the beginning branch = self.repo.run_cmd(['symbolic-ref', 'HEAD'])[11:] except GitError: log.debug('Currently on detached HEAD') branch = None return str(rev), str(branch) def run_tests(self): """Method that actually runs the tests""" self.results['schema_version'] = 1 self.results['git_revision'] = self.git_rev self.results['git_branch'] = self.git_branch self.results['tester_host'] = socket.gethostname() start_time = datetime.utcnow() self.results['start_time'] = start_time self.results['tests'] = {} self.archive_build_conf() for test_class in self.test_run_queue: log.info("Executing test %s: %s", test_class.name, test_class.description) test = test_class(self.out_dir) try: test.run() except Exception: # Catch all exceptions. This way e.g buggy tests won't scrap # the whole test run sep = '-' * 5 + ' TRACEBACK ' + '-' * 60 + '\n' tb_msg = sep + traceback.format_exc() + sep log.error("Test execution failed with:\n" + tb_msg) self.results['tests'][test.name] = test.results self.results['elapsed_time'] = datetime.utcnow() - start_time return 0 def archive_build_conf(self): """Archive build/conf to test results""" src_dir = os.path.join(os.environ['BUILDDIR'], 'conf') tgt_dir = os.path.join(self.out_dir, 'build', 'conf') os.makedirs(os.path.dirname(tgt_dir)) shutil.copytree(src_dir, tgt_dir) def update_globalres_file(self, filename): """Write results to globalres csv file""" if self.repo: git_tag_rev = self.repo.run_cmd(['describe', self.git_rev]) else: git_tag_rev = self.git_rev times = [] sizes = [] for test in self.results['tests'].values(): for measurement in test['measurements']: res_type = measurement['type'] values = measurement['values'] if res_type == BuildPerfTest.SYSRES: e_sec = values['elapsed_time'].total_seconds() times.append('{:d}:{:02d}:{:.2f}'.format( int(e_sec / 3600), int((e_sec % 3600) / 60), e_sec % 60)) elif res_type == BuildPerfTest.DISKUSAGE: sizes.append(str(values['size'])) else: log.warning("Unable to handle '%s' values in " "globalres.log", res_type) log.debug("Writing globalres log to %s", filename) with open(filename, 'a') as fobj: fobj.write('{},{}:{},{},'.format(self.results['tester_host'], self.results['git_branch'], self.results['git_revision'], git_tag_rev)) fobj.write(','.join(times + sizes) + '\n')
class BuildPerfTestResult(unittest.TextTestResult): """Runner class for executing the individual tests""" # List of test cases to run test_run_queue = [] def __init__(self, out_dir, *args, **kwargs): super(BuildPerfTestResult, self).__init__(*args, **kwargs) self.out_dir = out_dir # Get Git parameters try: self.repo = GitRepo('.') except GitError: self.repo = None self.git_revision, self.git_branch = self.get_git_revision() self.hostname = socket.gethostname() self.start_time = self.elapsed_time = None self.successes = [] log.info("Using Git branch:revision %s:%s", self.git_branch, self.git_revision) def get_git_revision(self): """Get git branch and revision under testing""" rev = os.getenv('OE_BUILDPERFTEST_GIT_REVISION') branch = os.getenv('OE_BUILDPERFTEST_GIT_BRANCH') if not self.repo and (not rev or not branch): log.info("The current working directory doesn't seem to be a Git " "repository clone. You can specify branch and revision " "used in test results with OE_BUILDPERFTEST_GIT_REVISION " "and OE_BUILDPERFTEST_GIT_BRANCH environment variables") else: if not rev: rev = self.repo.run_cmd(['rev-parse', 'HEAD']) if not branch: try: # Strip 11 chars, i.e. 'refs/heads' from the beginning branch = self.repo.run_cmd(['symbolic-ref', 'HEAD'])[11:] except GitError: log.debug('Currently on detached HEAD') branch = None return str(rev), str(branch) def addSuccess(self, test): """Record results from successful tests""" super(BuildPerfTestResult, self).addSuccess(test) self.successes.append((test, None)) def startTest(self, test): """Pre-test hook""" test.out_dir = self.out_dir log.info("Executing test %s: %s", test.name, test.shortDescription()) self.stream.write(datetime.now().strftime("[%Y-%m-%d %H:%M:%S] ")) super(BuildPerfTestResult, self).startTest(test) def startTestRun(self): """Pre-run hook""" self.start_time = datetime.utcnow() def stopTestRun(self): """Pre-run hook""" self.elapsed_time = datetime.utcnow() - self.start_time def all_results(self): result_map = { 'SUCCESS': self.successes, 'FAIL': self.failures, 'ERROR': self.errors, 'EXP_FAIL': self.expectedFailures, 'UNEXP_SUCCESS': self.unexpectedSuccesses } for status, tests in result_map.items(): for test in tests: yield (status, test) def update_globalres_file(self, filename): """Write results to globalres csv file""" # Map test names to time and size columns in globalres # The tuples represent index and length of times and sizes # respectively gr_map = { 'test1': ((0, 1), (8, 1)), 'test12': ((1, 1), (None, None)), 'test13': ((2, 1), (9, 1)), 'test2': ((3, 1), (None, None)), 'test3': ((4, 3), (None, None)), 'test4': ((7, 1), (10, 2)) } if self.repo: git_tag_rev = self.repo.run_cmd(['describe', self.git_revision]) else: git_tag_rev = self.git_revision values = ['0'] * 12 for status, (test, msg) in self.all_results(): if status not in ['SUCCESS', 'FAILURE', 'EXP_SUCCESS']: continue (t_ind, t_len), (s_ind, s_len) = gr_map[test.name] if t_ind is not None: values[t_ind:t_ind + t_len] = test.times if s_ind is not None: values[s_ind:s_ind + s_len] = test.sizes log.debug("Writing globalres log to %s", filename) with open(filename, 'a') as fobj: fobj.write('{},{}:{},{},'.format(self.hostname, self.git_branch, self.git_revision, git_tag_rev)) fobj.write(','.join(values) + '\n')