def __init_tree(self, version=None): # # If we have a local copy of the 2.6.14 tarball use that # else let the kernel object use the defined mirrors # to obtain it. # # http://kernel.org/pub/linux/kernel/v2.6/linux-2.6.14.tar.bz2 # # On ia64, we default to 2.6.20, as it can't compile 2.6.14. if version: default_ver = version elif utils.get_current_kernel_arch() == 'ia64': default_ver = '2.6.20' else: default_ver = '2.6.14' tarball = None for dir in (self.bindir, '/usr/local/src'): tar = 'linux-%s.tar.bz2' % default_ver path = os.path.join(dir, tar) if os.path.exists(path): tarball = path break if not tarball: tarball = default_ver # Do the extraction of the kernel tree kernel = self.job.kernel(tarball, self.outputdir, self.tmpdir) kernel.config(defconfig=True, logged=False) return kernel
def set_cross_cc(self, target_arch=None, cross_compile=None, build_target='bzImage'): """Set up to cross-compile. This is broken. We need to work out what the default compile produces, and if not, THEN set the cross compiler. """ if self.target_arch: return # if someone has set build_target, don't clobber in set_cross_cc # run set_build_target before calling set_cross_cc if not self.build_target: self.set_build_target(build_target) # If no 'target_arch' given assume native compilation if target_arch is None: target_arch = utils.get_current_kernel_arch() if target_arch == 'ppc64': if self.build_target == 'bzImage': self.build_target = 'vmlinux' if not cross_compile: cross_compile = self.job.config_get('kernel.cross_cc') if cross_compile: os.environ['CROSS_COMPILE'] = cross_compile else: if os.environ.has_key('CROSS_COMPILE'): del os.environ['CROSS_COMPILE'] return # HACK. Crap out for now. # At this point I know what arch I *want* to build for # but have no way of working out what arch the default # compiler DOES build for. def install_package(package): raise NotImplementedError("I don't exist yet!") if target_arch == 'ppc64': install_package('ppc64-cross') cross_compile = os.path.join(self.autodir, 'sources/ppc64-cross/bin') elif target_arch == 'x86_64': install_package('x86_64-cross') cross_compile = os.path.join(self.autodir, 'sources/x86_64-cross/bin') os.environ['ARCH'] = self.target_arch = target_arch self.cross_compile = cross_compile if self.cross_compile: os.environ['CROSS_COMPILE'] = self.cross_compile
def setup(self, tarball='iozone3_283.tar'): tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(tarball, self.srcdir) os.chdir(os.path.join(self.srcdir, 'src/current')) arch = utils.get_current_kernel_arch() if (arch == 'ppc'): utils.system('make linux-powerpc') elif (arch == 'ppc64'): utils.system('make linux-powerpc64') elif (arch == 'x86_64'): utils.system('make linux-AMD64') else: utils.system('make linux')
def run_once(self): """ Collect a perf callchain profile and check the detailed perf report. """ # Waiting on ARM/perf support if not utils.get_current_kernel_arch().startswith('x86'): return # These boards are not supported unsupported_boards = ['gizmo'] board = utils.get_board() if board in unsupported_boards: return try: graph = os.path.join(self.srcdir, 'graph') perf_file_path = os.tempnam() perf_record_args = [ 'perf', 'record', '-e', 'cycles', '-g', '-o', perf_file_path, '--', graph ] perf_report_args = ['perf', 'report', '-D', '-i', perf_file_path] try: subprocess.check_output(perf_record_args, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as cmd_error: raise error.TestFail( "Running command [%s] failed: %s" % (' '.join(perf_record_args), cmd_error.output)) # Make sure the file still exists. if not os.path.isfile(perf_file_path): raise error.TestFail('Could not find perf output file: ' + perf_file_path) p = subprocess.Popen(perf_report_args, stdout=subprocess.PIPE) result = self.report_has_callchain_length_at_least(p.stdout, 3) for _ in p.stdout: pass p.wait() finally: os.remove(perf_file_path) if not result: raise error.TestFail('Callchain not found')
def initialize(self, config): arch = utils.get_current_kernel_arch() if arch in ['i386', 'i486', 'i586', 'i686', 'athlon']: self.arch = 'ia32' elif arch == 'ppc': self.arch = 'ppc32' elif arch in ['s390', 's390x', 'ia64', 'x86_64', 'ppc64']: self.arch = arch else: e_msg = 'Architecture %s not supported by LSB' % arch raise error.TestError(e_msg) self.config = config_loader(config, self.tmpdir) self.cachedir = os.path.join(self.bindir, 'cache') if not os.path.isdir(self.cachedir): os.makedirs(self.cachedir) self.packages_installed = False self.libraries_linked = False
def setup(self, tarball='iozone3_347.tar'): """ Builds the given version of IOzone from a tarball. @param tarball: Tarball with IOzone @see: http://www.iozone.org/src/current/iozone3_347.tar """ tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(tarball, self.srcdir) os.chdir(os.path.join(self.srcdir, 'src/current')) arch = utils.get_current_kernel_arch() if (arch == 'ppc'): utils.make('linux-powerpc') elif (arch == 'ppc64'): utils.make('linux-powerpc64') elif (arch == 'x86_64'): utils.make('linux-AMD64') else: utils.make('linux')
def get_kernel_build_arch(self, arch=None): """ Work out the current kernel architecture (as a kernel arch) """ if not arch: arch = utils.get_current_kernel_arch() if re.match('i.86', arch): return 'i386' elif re.match('sun4u', arch): return 'sparc64' elif re.match('arm.*', arch): return 'arm' elif re.match('sa110', arch): return 'arm' elif re.match('s390x', arch): return 's390' elif re.match('parisc64', arch): return 'parisc' elif re.match('ppc.*', arch): return 'powerpc' elif re.match('mips.*', arch): return 'mips' else: return arch
def run_once(self, rootdir="/", args=[]): """ Do a find for all the ELF files on the system. For each one, test for compiler options that should have been used when compiling the file. For missing compiler options, print the files. """ parser = OptionParser() parser.add_option('--hardfp', dest='enable_hardfp', default=False, action='store_true', help='Whether to check for hardfp binaries.') (options, args) = parser.parse_args(args) option_sets = [] libc_glob = "/lib/libc-[0-9]*" readelf_cmd = glob.glob("/usr/local/*/binutils-bin/*/readelf")[0] # We do not test binaries if they are built with Address Sanitizer # because it is a separate testing tool. no_asan_used = utils.system_output("%s -s " "/opt/google/chrome/chrome | " "egrep -q \"__asan_init\" || " "echo no ASAN" % readelf_cmd) if not no_asan_used: logging.debug("ASAN detected on /opt/google/chrome/chrome. " "Will skip all checks.") return # Check that gold was used to build binaries. # TODO(jorgelo): re-enable this check once crbug.com/417912 is fixed. # gold_cmd = ("%s -S {} 2>&1 | " # "egrep -q \".note.gnu.gold-ve\"" % readelf_cmd) # gold_find_options = "" # if utils.get_cpu_arch() == "arm": # # gold is only enabled for Chrome on ARM. # gold_find_options = "-path \"/opt/google/chrome/chrome\"" # gold_whitelist = os.path.join(self.bindir, "gold_whitelist") # option_sets.append(self.create_and_filter("gold", # gold_cmd, # gold_whitelist, # gold_find_options)) # Verify non-static binaries have BIND_NOW in dynamic section. now_cmd = ("(%s {} | grep -q statically) ||" "%s -d {} 2>&1 | " "egrep -q \"BIND_NOW\"" % (FILE_CMD, readelf_cmd)) now_whitelist = os.path.join(self.bindir, "now_whitelist") option_sets.append(self.create_and_filter("-Wl,-z,now", now_cmd, now_whitelist)) # Verify non-static binaries have RELRO program header. relro_cmd = ("(%s {} | grep -q statically) ||" "%s -l {} 2>&1 | " "egrep -q \"GNU_RELRO\"" % (FILE_CMD, readelf_cmd)) relro_whitelist = os.path.join(self.bindir, "relro_whitelist") option_sets.append(self.create_and_filter("-Wl,-z,relro", relro_cmd, relro_whitelist)) # Verify non-static binaries are dynamic (built PIE). pie_cmd = ("(%s {} | grep -q statically) ||" "%s -l {} 2>&1 | " "egrep -q \"Elf file type is DYN\"" % (FILE_CMD, readelf_cmd)) pie_whitelist = os.path.join(self.bindir, "pie_whitelist") option_sets.append(self.create_and_filter("-fPIE", pie_cmd, pie_whitelist)) # Verify ELFs don't include TEXTRELs. # FIXME: Remove the i?86 filter after the bug is fixed. # crbug.com/686926 if (utils.get_current_kernel_arch() not in ('i%d86' % i for i in xrange(3,7))): textrel_cmd = ("(%s {} | grep -q statically) ||" "%s -d {} 2>&1 | " "(egrep -q \"0x0+16..TEXTREL\"; [ $? -ne 0 ])" % (FILE_CMD, readelf_cmd)) textrel_whitelist = os.path.join(self.bindir, "textrel_whitelist") option_sets.append(self.create_and_filter("TEXTREL", textrel_cmd, textrel_whitelist)) # Verify all binaries have non-exec STACK program header. stack_cmd = ("%s -lW {} 2>&1 | " "egrep -q \"GNU_STACK.*RW \"" % readelf_cmd) stack_whitelist = os.path.join(self.bindir, "stack_whitelist") option_sets.append(self.create_and_filter("Executable Stack", stack_cmd, stack_whitelist)) # Verify all binaries have W^X LOAD program headers. loadwx_cmd = ("%s -lW {} 2>&1 | " "grep \"LOAD\" | egrep -v \"(RW |R E)\" | " "wc -l | grep -q \"^0$\"" % readelf_cmd) loadwx_whitelist = os.path.join(self.bindir, "loadwx_whitelist") option_sets.append(self.create_and_filter("LOAD Writable and Exec", loadwx_cmd, loadwx_whitelist)) # Verify ARM binaries are all using VFP registers. if (options.enable_hardfp and utils.get_cpu_arch() == 'arm'): hardfp_cmd = ("%s -A {} 2>&1 | " "egrep -q \"Tag_ABI_VFP_args: VFP registers\"" % readelf_cmd) hardfp_whitelist = os.path.join(self.bindir, "hardfp_whitelist") option_sets.append(self.create_and_filter("hardfp", hardfp_cmd, hardfp_whitelist)) fail_msg = "" # There is currently no way to clear binary prebuilts for all devs. # Thus, when a new check is added to this test, the test might fail # for users who have old prebuilts which have not been compiled # in the correct manner. fail_summaries = [] full_msg = "Test results:" num_fails = 0 for cos in option_sets: if len(cos.filtered_set): num_fails += 1 fail_msg += cos.get_fail_message() + "\n" fail_summaries.append(cos.get_fail_summary_message()) full_msg += str(cos) + "\n\n" fail_summary_msg = ", ".join(fail_summaries) logging.error(fail_msg) logging.debug(full_msg) if num_fails: raise error.TestFail(fail_summary_msg)
def __init__(self, job, base_tree, subdir, tmp_dir, build_dir, leave=False): """Initialize the kernel build environment job which job this build is part of base_tree base kernel tree. Can be one of the following: 1. A local tarball 2. A URL to a tarball 3. A local directory (will symlink it) 4. A shorthand expandable (eg '2.6.11-git3') subdir subdir in the results directory (eg "build") (holds config/, debug/, results/) tmp_dir leave Boolean, whether to leave existing tmpdir or not """ super(kernel, self).__init__(job) self.autodir = job.autodir self.src_dir = os.path.join(tmp_dir, 'src') self.build_dir = os.path.join(tmp_dir, build_dir) # created by get_kernel_tree self.config_dir = os.path.join(subdir, 'config') self.log_dir = os.path.join(subdir, 'debug') self.results_dir = os.path.join(subdir, 'results') self.subdir = os.path.basename(subdir) if not leave: if os.path.isdir(self.src_dir): utils.system('rm -rf ' + self.src_dir) if os.path.isdir(self.build_dir): utils.system('rm -rf ' + self.build_dir) if not os.path.exists(self.src_dir): os.mkdir(self.src_dir) for path in [self.config_dir, self.log_dir, self.results_dir]: if os.path.exists(path): utils.system('rm -rf ' + path) os.mkdir(path) logpath = os.path.join(self.log_dir, 'build_log') self.logfile = open(logpath, 'w+') self.applied_patches = [] self.target_arch = None self.build_target = 'bzImage' self.build_image = None arch = utils.get_current_kernel_arch() if arch == 's390' or arch == 's390x': self.build_target = 'image' elif arch == 'ia64': self.build_target = 'all' self.build_image = 'vmlinux.gz' if not leave: self.logfile.write('BASE: %s\n' % base_tree) # Where we have direct version hint record that # for later configuration selection. shorthand = re.compile(r'^\d+\.\d+\.\d+') if shorthand.match(base_tree): self.base_tree_version = base_tree else: self.base_tree_version = None # Actually extract the tree. Make sure we know it occured self.extract(base_tree)