def test_apply_patches(self): """ Summary: Able to apply a single patch to the Linux kernel source Expected: The README file should exist and the patch changes should be displayed at the end of the file. Product: Kernel Development Author: Yeoh Ee Peng <*****@*****.**> AutomatedBy: Mazliana Mohamad <*****@*****.**> """ runCmd('bitbake virtual/kernel -c patch') kernel_source = get_bb_var('STAGING_KERNEL_DIR') readme = os.path.join(kernel_source, 'README') # This test step adds modified file 'README' to git and creates a # patch file '0001-KERNEL_DEV_TEST_CASE.patch' at the same location as file patch_content = 'This is a test to apply a patch to the kernel' with open(readme, 'a+') as f: f.write(patch_content) repo = GitRepo('%s' % kernel_source, is_topdir=True) repo.run_cmd('add %s' % readme) repo.run_cmd(['commit', '-m', 'KERNEL_DEV_TEST_CASE']) repo.run_cmd(['format-patch', '-1']) patch_name = '0001-KERNEL_DEV_TEST_CASE.patch' patchpath = os.path.join(kernel_source, patch_name) runCmd('mv %s %s' % (patchpath, self.recipes_linuxyocto_dir)) runCmd('rm %s ' % readme) self.assertFalse(os.path.exists(readme)) recipe_append = os.path.join(self.recipeskernel_dir, 'linux-yocto_%.bbappend') with open(recipe_append, 'w+') as fh: fh.write('SRC_URI += "file://%s"\n' % patch_name) fh.write('FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"') runCmd('bitbake virtual/kernel -c clean') runCmd('bitbake virtual/kernel -c patch') self.assertTrue(os.path.exists(readme)) result = runCmd('tail -n 1 %s' % readme) self.assertEqual(result.output, patch_content)
class BuildPerfTestResult(unittest.TextTestResult): """Runner class for executing the individual tests""" # List of test cases to run test_run_queue = [] def __init__(self, out_dir, *args, **kwargs): super(BuildPerfTestResult, self).__init__(*args, **kwargs) self.out_dir = out_dir # Get Git parameters try: self.repo = GitRepo('.') except GitError: self.repo = None self.git_commit, self.git_commit_count, self.git_branch = \ self.get_git_revision() self.hostname = socket.gethostname() self.product = os.getenv('OE_BUILDPERFTEST_PRODUCT', 'oe-core') self.start_time = self.elapsed_time = None self.successes = [] log.info("Using Git branch:commit %s:%s (%s)", self.git_branch, self.git_commit, self.git_commit_count) def get_git_revision(self): """Get git branch and commit under testing""" commit = os.getenv('OE_BUILDPERFTEST_GIT_COMMIT') commit_cnt = os.getenv('OE_BUILDPERFTEST_GIT_COMMIT_COUNT') branch = os.getenv('OE_BUILDPERFTEST_GIT_BRANCH') if not self.repo and (not commit or not commit_cnt or not branch): log.info("The current working directory doesn't seem to be a Git " "repository clone. You can specify branch and commit " "displayed in test results with OE_BUILDPERFTEST_GIT_BRANCH, " "OE_BUILDPERFTEST_GIT_COMMIT and " "OE_BUILDPERFTEST_GIT_COMMIT_COUNT environment variables") else: if not commit: commit = self.repo.rev_parse('HEAD^0') commit_cnt = self.repo.run_cmd(['rev-list', '--count', 'HEAD^0']) if not branch: branch = self.repo.get_current_branch() if not branch: log.debug('Currently on detached HEAD') return str(commit), str(commit_cnt), str(branch) def addSuccess(self, test): """Record results from successful tests""" super(BuildPerfTestResult, self).addSuccess(test) self.successes.append((test, None)) def startTest(self, test): """Pre-test hook""" test.base_dir = self.out_dir os.mkdir(test.out_dir) log.info("Executing test %s: %s", test.name, test.shortDescription()) self.stream.write(datetime.now().strftime("[%Y-%m-%d %H:%M:%S] ")) super(BuildPerfTestResult, self).startTest(test) def startTestRun(self): """Pre-run hook""" self.start_time = datetime.utcnow() def stopTestRun(self): """Pre-run hook""" self.elapsed_time = datetime.utcnow() - self.start_time self.write_results_json() def all_results(self): result_map = {'SUCCESS': self.successes, 'FAIL': self.failures, 'ERROR': self.errors, 'EXP_FAIL': self.expectedFailures, 'UNEXP_SUCCESS': self.unexpectedSuccesses, 'SKIPPED': self.skipped} for status, tests in result_map.items(): for test in tests: yield (status, test) def update_globalres_file(self, filename): """Write results to globalres csv file""" # Map test names to time and size columns in globalres # The tuples represent index and length of times and sizes # respectively gr_map = {'test1': ((0, 1), (8, 1)), 'test12': ((1, 1), (None, None)), 'test13': ((2, 1), (9, 1)), 'test2': ((3, 1), (None, None)), 'test3': ((4, 3), (None, None)), 'test4': ((7, 1), (10, 2))} if self.repo: git_tag_rev = self.repo.run_cmd(['describe', self.git_commit]) else: git_tag_rev = self.git_commit values = ['0'] * 12 for status, (test, msg) in self.all_results(): if status in ['ERROR', 'SKIPPED']: continue (t_ind, t_len), (s_ind, s_len) = gr_map[test.name] if t_ind is not None: values[t_ind:t_ind + t_len] = test.times if s_ind is not None: values[s_ind:s_ind + s_len] = test.sizes log.debug("Writing globalres log to %s", filename) with open(filename, 'a') as fobj: fobj.write('{},{}:{},{},'.format(self.hostname, self.git_branch, self.git_commit, git_tag_rev)) fobj.write(','.join(values) + '\n') def write_results_json(self): """Write test results into a json-formatted file""" results = {'tester_host': self.hostname, 'git_branch': self.git_branch, 'git_commit': self.git_commit, 'git_commit_count': self.git_commit_count, 'product': self.product, 'start_time': self.start_time, 'elapsed_time': self.elapsed_time} tests = {} for status, (test, reason) in self.all_results(): tests[test.name] = {'name': test.name, 'description': test.shortDescription(), 'status': status, 'start_time': test.start_time, 'elapsed_time': test.elapsed_time, 'cmd_log_file': os.path.relpath(test.cmd_log_file, self.out_dir), 'measurements': test.measurements} results['tests'] = tests with open(os.path.join(self.out_dir, 'results.json'), 'w') as fobj: json.dump(results, fobj, indent=4, sort_keys=True, cls=ResultsJsonEncoder) def git_commit_results(self, repo_path, branch=None, tag=None): """Commit results into a Git repository""" repo = GitRepo(repo_path, is_topdir=True) if not branch: branch = self.git_branch else: # Replace keywords branch = branch.format(git_branch=self.git_branch, tester_host=self.hostname) log.info("Committing test results into %s %s", repo_path, branch) tmp_index = os.path.join(repo_path, '.git', 'index.oe-build-perf') try: # Create new commit object from the new results env_update = {'GIT_INDEX_FILE': tmp_index, 'GIT_WORK_TREE': self.out_dir} repo.run_cmd('add .', env_update) tree = repo.run_cmd('write-tree', env_update) parent = repo.rev_parse(branch) msg = "Results of {}:{}\n".format(self.git_branch, self.git_commit) git_cmd = ['commit-tree', tree, '-m', msg] if parent: git_cmd += ['-p', parent] commit = repo.run_cmd(git_cmd, env_update) # Update branch head git_cmd = ['update-ref', 'refs/heads/' + branch, commit] if parent: git_cmd.append(parent) repo.run_cmd(git_cmd) # Update current HEAD, if we're on branch 'branch' if repo.get_current_branch() == branch: log.info("Updating %s HEAD to latest commit", repo_path) repo.run_cmd('reset --hard') # Create (annotated) tag if tag: # Find tags matching the pattern tag_keywords = dict(git_branch=self.git_branch, git_commit=self.git_commit, git_commit_count=self.git_commit_count, tester_host=self.hostname, tag_num='[0-9]{1,5}') tag_re = re.compile(tag.format(**tag_keywords) + '$') tag_keywords['tag_num'] = 0 for existing_tag in repo.run_cmd('tag').splitlines(): if tag_re.match(existing_tag): tag_keywords['tag_num'] += 1 tag = tag.format(**tag_keywords) msg = "Test run #{} of {}:{}\n".format(tag_keywords['tag_num'], self.git_branch, self.git_commit) repo.run_cmd(['tag', '-a', '-m', msg, tag, commit]) finally: if os.path.exists(tmp_index): os.unlink(tmp_index)
def git_commit_results(self, repo_path, branch=None, tag=None): """Commit results into a Git repository""" repo = GitRepo(repo_path, is_topdir=True) if not branch: branch = self.git_branch else: # Replace keywords branch = branch.format(git_branch=self.git_branch, tester_host=self.hostname) log.info("Committing test results into %s %s", repo_path, branch) tmp_index = os.path.join(repo_path, '.git', 'index.oe-build-perf') try: # Create new commit object from the new results env_update = {'GIT_INDEX_FILE': tmp_index, 'GIT_WORK_TREE': self.out_dir} repo.run_cmd('add .', env_update) tree = repo.run_cmd('write-tree', env_update) parent = repo.rev_parse(branch) msg = "Results of {}:{}\n".format(self.git_branch, self.git_commit) git_cmd = ['commit-tree', tree, '-m', msg] if parent: git_cmd += ['-p', parent] commit = repo.run_cmd(git_cmd, env_update) # Update branch head git_cmd = ['update-ref', 'refs/heads/' + branch, commit] if parent: git_cmd.append(parent) repo.run_cmd(git_cmd) # Update current HEAD, if we're on branch 'branch' if repo.get_current_branch() == branch: log.info("Updating %s HEAD to latest commit", repo_path) repo.run_cmd('reset --hard') # Create (annotated) tag if tag: # Find tags matching the pattern tag_keywords = dict(git_branch=self.git_branch, git_commit=self.git_commit, git_commit_count=self.git_commit_count, tester_host=self.hostname, tag_num='[0-9]{1,5}') tag_re = re.compile(tag.format(**tag_keywords) + '$') tag_keywords['tag_num'] = 0 for existing_tag in repo.run_cmd('tag').splitlines(): if tag_re.match(existing_tag): tag_keywords['tag_num'] += 1 tag = tag.format(**tag_keywords) msg = "Test run #{} of {}:{}\n".format(tag_keywords['tag_num'], self.git_branch, self.git_commit) repo.run_cmd(['tag', '-a', '-m', msg, tag, commit]) finally: if os.path.exists(tmp_index): os.unlink(tmp_index)
class BuildPerfTestRunner(object): """Runner class for executing the individual tests""" # List of test cases to run test_run_queue = [] def __init__(self, out_dir): self.results = {} self.out_dir = os.path.abspath(out_dir) if not os.path.exists(self.out_dir): os.makedirs(self.out_dir) # Get Git parameters try: self.repo = GitRepo('.') except GitError: self.repo = None self.git_rev, self.git_branch = self.get_git_revision() log.info("Using Git branch:revision %s:%s", self.git_branch, self.git_rev) def get_git_revision(self): """Get git branch and revision under testing""" rev = os.getenv('OE_BUILDPERFTEST_GIT_REVISION') branch = os.getenv('OE_BUILDPERFTEST_GIT_BRANCH') if not self.repo and (not rev or not branch): log.info("The current working directory doesn't seem to be a Git " "repository clone. You can specify branch and revision " "used in test results with OE_BUILDPERFTEST_GIT_REVISION " "and OE_BUILDPERFTEST_GIT_BRANCH environment variables") else: if not rev: rev = self.repo.run_cmd(['rev-parse', 'HEAD']) if not branch: try: # Strip 11 chars, i.e. 'refs/heads' from the beginning branch = self.repo.run_cmd(['symbolic-ref', 'HEAD'])[11:] except GitError: log.debug('Currently on detached HEAD') branch = None return str(rev), str(branch) def run_tests(self): """Method that actually runs the tests""" self.results['schema_version'] = 1 self.results['git_revision'] = self.git_rev self.results['git_branch'] = self.git_branch self.results['tester_host'] = socket.gethostname() start_time = datetime.utcnow() self.results['start_time'] = start_time self.results['tests'] = {} self.archive_build_conf() for test_class in self.test_run_queue: log.info("Executing test %s: %s", test_class.name, test_class.description) test = test_class(self.out_dir) try: test.run() except Exception: # Catch all exceptions. This way e.g buggy tests won't scrap # the whole test run sep = '-' * 5 + ' TRACEBACK ' + '-' * 60 + '\n' tb_msg = sep + traceback.format_exc() + sep log.error("Test execution failed with:\n" + tb_msg) self.results['tests'][test.name] = test.results self.results['elapsed_time'] = datetime.utcnow() - start_time return 0 def archive_build_conf(self): """Archive build/conf to test results""" src_dir = os.path.join(os.environ['BUILDDIR'], 'conf') tgt_dir = os.path.join(self.out_dir, 'build', 'conf') os.makedirs(os.path.dirname(tgt_dir)) shutil.copytree(src_dir, tgt_dir) def update_globalres_file(self, filename): """Write results to globalres csv file""" if self.repo: git_tag_rev = self.repo.run_cmd(['describe', self.git_rev]) else: git_tag_rev = self.git_rev times = [] sizes = [] for test in self.results['tests'].values(): for measurement in test['measurements']: res_type = measurement['type'] values = measurement['values'] if res_type == BuildPerfTest.SYSRES: e_sec = values['elapsed_time'].total_seconds() times.append('{:d}:{:02d}:{:.2f}'.format( int(e_sec / 3600), int((e_sec % 3600) / 60), e_sec % 60)) elif res_type == BuildPerfTest.DISKUSAGE: sizes.append(str(values['size'])) else: log.warning("Unable to handle '%s' values in " "globalres.log", res_type) log.debug("Writing globalres log to %s", filename) with open(filename, 'a') as fobj: fobj.write('{},{}:{},{},'.format(self.results['tester_host'], self.results['git_branch'], self.results['git_revision'], git_tag_rev)) fobj.write(','.join(times + sizes) + '\n')
class BuildPerfTestResult(unittest.TextTestResult): """Runner class for executing the individual tests""" # List of test cases to run test_run_queue = [] def __init__(self, out_dir, *args, **kwargs): super(BuildPerfTestResult, self).__init__(*args, **kwargs) self.out_dir = out_dir # Get Git parameters try: self.repo = GitRepo('.') except GitError: self.repo = None self.git_revision, self.git_branch = self.get_git_revision() self.hostname = socket.gethostname() self.start_time = self.elapsed_time = None self.successes = [] log.info("Using Git branch:revision %s:%s", self.git_branch, self.git_revision) def get_git_revision(self): """Get git branch and revision under testing""" rev = os.getenv('OE_BUILDPERFTEST_GIT_REVISION') branch = os.getenv('OE_BUILDPERFTEST_GIT_BRANCH') if not self.repo and (not rev or not branch): log.info("The current working directory doesn't seem to be a Git " "repository clone. You can specify branch and revision " "used in test results with OE_BUILDPERFTEST_GIT_REVISION " "and OE_BUILDPERFTEST_GIT_BRANCH environment variables") else: if not rev: rev = self.repo.run_cmd(['rev-parse', 'HEAD']) if not branch: try: # Strip 11 chars, i.e. 'refs/heads' from the beginning branch = self.repo.run_cmd(['symbolic-ref', 'HEAD'])[11:] except GitError: log.debug('Currently on detached HEAD') branch = None return str(rev), str(branch) def addSuccess(self, test): """Record results from successful tests""" super(BuildPerfTestResult, self).addSuccess(test) self.successes.append((test, None)) def startTest(self, test): """Pre-test hook""" test.out_dir = self.out_dir log.info("Executing test %s: %s", test.name, test.shortDescription()) self.stream.write(datetime.now().strftime("[%Y-%m-%d %H:%M:%S] ")) super(BuildPerfTestResult, self).startTest(test) def startTestRun(self): """Pre-run hook""" self.start_time = datetime.utcnow() def stopTestRun(self): """Pre-run hook""" self.elapsed_time = datetime.utcnow() - self.start_time def all_results(self): result_map = {'SUCCESS': self.successes, 'FAIL': self.failures, 'ERROR': self.errors, 'EXP_FAIL': self.expectedFailures, 'UNEXP_SUCCESS': self.unexpectedSuccesses} for status, tests in result_map.items(): for test in tests: yield (status, test) def update_globalres_file(self, filename): """Write results to globalres csv file""" # Map test names to time and size columns in globalres # The tuples represent index and length of times and sizes # respectively gr_map = {'test1': ((0, 1), (8, 1)), 'test12': ((1, 1), (None, None)), 'test13': ((2, 1), (9, 1)), 'test2': ((3, 1), (None, None)), 'test3': ((4, 3), (None, None)), 'test4': ((7, 1), (10, 2))} if self.repo: git_tag_rev = self.repo.run_cmd(['describe', self.git_revision]) else: git_tag_rev = self.git_revision values = ['0'] * 12 for status, (test, msg) in self.all_results(): if status not in ['SUCCESS', 'FAILURE', 'EXP_SUCCESS']: continue (t_ind, t_len), (s_ind, s_len) = gr_map[test.name] if t_ind is not None: values[t_ind:t_ind + t_len] = test.times if s_ind is not None: values[s_ind:s_ind + s_len] = test.sizes log.debug("Writing globalres log to %s", filename) with open(filename, 'a') as fobj: fobj.write('{},{}:{},{},'.format(self.hostname, self.git_branch, self.git_revision, git_tag_rev)) fobj.write(','.join(values) + '\n')
class BuildPerfTestResult(unittest.TextTestResult): """Runner class for executing the individual tests""" # List of test cases to run test_run_queue = [] def __init__(self, out_dir, *args, **kwargs): super(BuildPerfTestResult, self).__init__(*args, **kwargs) self.out_dir = out_dir # Get Git parameters try: self.repo = GitRepo('.') except GitError: self.repo = None self.git_revision, self.git_branch = self.get_git_revision() self.hostname = socket.gethostname() self.start_time = self.elapsed_time = None self.successes = [] log.info("Using Git branch:revision %s:%s", self.git_branch, self.git_revision) def get_git_revision(self): """Get git branch and revision under testing""" rev = os.getenv('OE_BUILDPERFTEST_GIT_REVISION') branch = os.getenv('OE_BUILDPERFTEST_GIT_BRANCH') if not self.repo and (not rev or not branch): log.info("The current working directory doesn't seem to be a Git " "repository clone. You can specify branch and revision " "used in test results with OE_BUILDPERFTEST_GIT_REVISION " "and OE_BUILDPERFTEST_GIT_BRANCH environment variables") else: if not rev: rev = self.repo.run_cmd(['rev-parse', 'HEAD']) if not branch: try: # Strip 11 chars, i.e. 'refs/heads' from the beginning branch = self.repo.run_cmd(['symbolic-ref', 'HEAD'])[11:] except GitError: log.debug('Currently on detached HEAD') branch = None return str(rev), str(branch) def addSuccess(self, test): """Record results from successful tests""" super(BuildPerfTestResult, self).addSuccess(test) self.successes.append((test, None)) def startTest(self, test): """Pre-test hook""" test.out_dir = self.out_dir log.info("Executing test %s: %s", test.name, test.shortDescription()) self.stream.write(datetime.now().strftime("[%Y-%m-%d %H:%M:%S] ")) super(BuildPerfTestResult, self).startTest(test) def startTestRun(self): """Pre-run hook""" self.start_time = datetime.utcnow() def stopTestRun(self): """Pre-run hook""" self.elapsed_time = datetime.utcnow() - self.start_time def all_results(self): result_map = { 'SUCCESS': self.successes, 'FAIL': self.failures, 'ERROR': self.errors, 'EXP_FAIL': self.expectedFailures, 'UNEXP_SUCCESS': self.unexpectedSuccesses } for status, tests in result_map.items(): for test in tests: yield (status, test) def update_globalres_file(self, filename): """Write results to globalres csv file""" # Map test names to time and size columns in globalres # The tuples represent index and length of times and sizes # respectively gr_map = { 'test1': ((0, 1), (8, 1)), 'test12': ((1, 1), (None, None)), 'test13': ((2, 1), (9, 1)), 'test2': ((3, 1), (None, None)), 'test3': ((4, 3), (None, None)), 'test4': ((7, 1), (10, 2)) } if self.repo: git_tag_rev = self.repo.run_cmd(['describe', self.git_revision]) else: git_tag_rev = self.git_revision values = ['0'] * 12 for status, (test, msg) in self.all_results(): if status not in ['SUCCESS', 'FAILURE', 'EXP_SUCCESS']: continue (t_ind, t_len), (s_ind, s_len) = gr_map[test.name] if t_ind is not None: values[t_ind:t_ind + t_len] = test.times if s_ind is not None: values[s_ind:s_ind + s_len] = test.sizes log.debug("Writing globalres log to %s", filename) with open(filename, 'a') as fobj: fobj.write('{},{}:{},{},'.format(self.hostname, self.git_branch, self.git_revision, git_tag_rev)) fobj.write(','.join(values) + '\n')