def run_once(self): """ Check for accelerometers, and if present, check data is valid """ # First make sure that the motion sensors are active. If this # check fails it means the EC motion sense task is not running and # therefore not updating acceleration values in shared memory. # Note that this check only works for x86 boards. arch = utils.get_arch() if arch.startswith('x86'): active = utils.system_output('ectool motionsense active') if active == "0": raise error.TestFail("Motion sensing is inactive") # Find the iio sysfs directory for EC accels self._find_sysfs_accel_dir() if self.sysfs_accel_old_path: # Get all accelerometer data accel_info = utils.system_output('ectool motionsense') info = accel_info.splitlines() # If the base accelerometer is present, then verify data if 'None' not in info[1]: self._verify_accel_data(self._ACCEL_BASE_LOC) # If the lid accelerometer is present, then verify data if 'None' not in info[2]: self._verify_accel_data(self._ACCEL_LID_LOC) else: for loc in self.sysfs_accel_paths.keys(): self._verify_accel_data(loc)
def setup(self): """ Test setup. """ self.arch = utils.get_arch() self.userspace_arch = utils.get_arch_userspace() # Report the full uname for anyone reading logs. logging.info('Running %s kernel, %s userspace: %s', self.arch, self.userspace_arch, utils.system_output('uname -a'))
def alsa_rms_test_setup(): """Setup for alsa_rms_test. Different boards/chipsets have different set of mixer controls. Even controls that have the same name on different boards might have different capabilities. The following is a general idea to setup a given class of boards, and some specialized setup for certain boards. """ card_id = alsa_utils.get_first_soundcard_with_control('Mic Jack', 'Mic') arch = utils.get_arch() board = utils.get_board() uses_max98090 = os.path.exists('/sys/module/snd_soc_max98090') if board in ['daisy_spring', 'daisy_skate']: # The MIC controls of the boards do not support dB syntax. alsa_utils.mixer_cmd(card_id, 'sset Headphone ' + _DEFAULT_ALSA_MAX_VOLUME) alsa_utils.mixer_cmd(card_id, 'sset MIC1 ' + _DEFAULT_ALSA_MAX_VOLUME) alsa_utils.mixer_cmd(card_id, 'sset MIC2 ' + _DEFAULT_ALSA_MAX_VOLUME) elif arch in ['armv7l', 'aarch64'] or uses_max98090: # ARM platforms or Intel platforms that uses max98090 codec driver. alsa_utils.mixer_cmd(card_id, 'sset Headphone ' + _DEFAULT_ALSA_MAX_VOLUME) alsa_utils.mixer_cmd(card_id, 'sset MIC1 ' + _DEFAULT_ALSA_CAPTURE_GAIN) alsa_utils.mixer_cmd(card_id, 'sset MIC2 ' + _DEFAULT_ALSA_CAPTURE_GAIN) else: # The rest of Intel platforms. alsa_utils.mixer_cmd(card_id, 'sset Master ' + _DEFAULT_ALSA_MAX_VOLUME) alsa_utils.mixer_cmd(card_id, 'sset Capture ' + _DEFAULT_ALSA_CAPTURE_GAIN)
def run_once(self, host=None): """Run the test. @param host: The client machine to connect to; should be a Host object. """ assert host is not None, "The host must be specified." self._client = host # Report client configuration, to help debug any problems. kernel_ver = self._client.run('uname -r').stdout.rstrip() arch = utils.get_arch(self._client.run) logging.info("Starting kASLR tests for '%s' on '%s'", kernel_ver, arch) # Make sure we're expecting kernel ASLR at all. if utils.compare_versions(kernel_ver, "3.8") < 0: logging.info("kASLR not available on this kernel") return if arch.startswith('arm'): logging.info("kASLR not available on this architecture") return kallsyms_filename = os.path.join(self.resultsdir, 'kallsyms') address_count = {} count = 0 while True: kallsyms = self._read_kallsyms(kallsyms_filename) symbols = self._parse_kallsyms(kallsyms) assert symbols.has_key(self.target_symbol), \ "The '%s' symbol is missing!?" % (self.target_symbol) addr = symbols[self.target_symbol] logging.debug("Reboot %d: Symbol %s @ %s", \ count, self.target_symbol, addr) address_count.setdefault(addr, 0) address_count[addr] += 1 count += 1 if count == self.reboot_count: break self._reboot_machine() unique = len(address_count) logging.info("Unique kernel offsets: %d", unique) highest = 0 for addr in address_count: logging.debug("Address %s: %d", addr, address_count[addr]) if address_count[addr] > highest: highest = address_count[addr] if unique < 2: raise error.TestFail("kASLR not functioning") if unique < (self.reboot_count / 3): raise error.TestFail("kASLR entropy seems very low") if highest > (unique / 10): raise error.TestFail("kASLR entropy seems to clump")
def _firmware_resume_time(self): """Calculate seconds for firmware resume from logged TSC. (x86 only)""" if utils.get_arch() not in ['i686', 'x86_64']: # TODO: support this on ARM somehow return 0 regex = re.compile(r'TSC at resume: (\d+)$') freq = 1000 * int( utils.read_one_line( '/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq')) for line in reversed(self._logs): match = regex.search(line) if match: return float(match.group(1)) / freq raise error.TestError('Failed to find TSC resume value in syslog.')
def __install_packages(self): """ Install all relevant packages from the build that was just downloaded. """ os.chdir(self.srcdir) installable_packages = "" rpm_list = glob.glob("*.rpm") arch = utils.get_arch() for rpm in rpm_list: for pkg in self.pkg_list: # Pass to yum only appropriate packages (ie, non-source and # compatible with the machine's architecture) if (rpm.startswith(pkg) and rpm.endswith(".%s.rpm" % arch) and not rpm.endswith(".src.rpm")): installable_packages += " %s" % rpm utils.system("yum install --nogpgcheck -y %s" % installable_packages)
def run_once(self): arch = utils.get_arch() if not arch.startswith('arm'): logging.info('Skip test for non-ARM arch %s', arch) return media = utils.read_file(self.dts_node_path).strip('\n\x00') if media != 'cros-ec': logging.info('Skip test: Vboot Context storage media is "%s"', media) return sysfs_entry = None for name in os.listdir(self.sys_vbootcontext_path): if name.startswith('cros-ec-vbc'): sysfs_entry = os.path.join(self.sys_vbootcontext_path, name, 'vboot_context') break else: raise error.TestFail('Could not find sysfs entry under %s', self.sys_vbootcontext_path) # Retrieve Vboot Context vboot_context = utils.system_output('mosys nvram vboot read').strip() try: # Test read vc_expect = vboot_context vc_got = utils.read_file(sysfs_entry).strip('\n\x00') if vc_got != vc_expect: raise error.TestFail('Could not read Vboot Context: ' 'Expect "%s" but got "%s"' % (vc_expect, vc_got)) # Test write of a random hex string vc_expect = ''.join(random.choice('0123456789abcdef') for _ in xrange(32)) utils.open_write_close(sysfs_entry, vc_expect) vc_got = utils.system_output('mosys nvram vboot read').strip() if vc_got != vc_expect: raise error.TestFail('Could not write Vboot Context: ' 'Expect "%s" but got "%s"' % (vc_expect, vc_got)) finally: # Restore Vboot Context utils.run('mosys nvram vboot write "%s"' % vboot_context)
def run_once(self): # Cache the architecture to avoid redundant execs to "uname". arch = utils.get_arch() userspace_arch = utils.get_arch_userspace() # Report the full uname for anyone reading logs. logging.info('Running %s kernel, %s userspace: %s', arch, userspace_arch, utils.system_output('uname -a')) # Load the list of kernel config variables. config = kernel_config.KernelConfig() config.initialize() # Adjust for kernel-version-specific changes kernel_ver = os.uname()[2] if utils.compare_versions(kernel_ver, "3.10") >= 0: for entry in self.IS_EXCLUSIVE: if entry['regex'] == 'BINFMT_': entry['builtin'].append('BINFMT_SCRIPT') if utils.compare_versions(kernel_ver, "3.14") >= 0: self.IS_MODULE.append('TEST_ASYNC_DRIVER_PROBE') for entry in self.IS_EXCLUSIVE: if entry['regex'] == 'BINFMT_': entry['builtin'].append('BINFMT_MISC') if utils.compare_versions(kernel_ver, "3.18") >= 0: for entry in self.IS_EXCLUSIVE: if entry['regex'] == '.*_FS$': entry['builtin'].append('SND_PROC_FS') if utils.compare_versions(kernel_ver, "4.4") < 0: for entry in self.IS_EXCLUSIVE: if entry['regex'] == '.*_FS$': entry['builtin'].append('EXT4_USE_FOR_EXT23') # Run the static checks. map(config.has_builtin, self.IS_BUILTIN) map(config.has_module, self.IS_MODULE) map(config.is_enabled, self.IS_ENABLED) map(config.is_missing, self.IS_MISSING) map(config.is_exclusive, self.IS_EXCLUSIVE) # Run the dynamic checks. # Security; NULL-address hole should be as large as possible. # Upstream kernel recommends 64k, which should be large enough to # catch nearly all dereferenced structures. wanted = '65536' if self.is_arm_family(arch): # ... except on ARM where it shouldn't be larger than 32k due # to historical ELF load location. wanted = '32768' config.has_value('DEFAULT_MMAP_MIN_ADDR', [wanted]) # Security; make sure NX page table bits are usable. if self.is_x86_family(arch): if arch == "i386": config.has_builtin('X86_PAE') else: config.has_builtin('X86_64') # Security; marks data segments as RO/NX, text as RO. if (arch == 'armv7l' and utils.compare_versions(kernel_ver, "3.8") < 0): config.is_missing('DEBUG_RODATA') config.is_missing('DEBUG_SET_MODULE_RONX') else: config.has_builtin('DEBUG_RODATA') config.has_builtin('DEBUG_SET_MODULE_RONX') if arch == 'aarch64': config.has_builtin('DEBUG_ALIGN_RODATA') # NaCl; allow mprotect+PROT_EXEC on noexec mapped files. config.has_value('MMAP_NOEXEC_TAINT', ['0']) # Kernel: make sure port 0xED is the one used for I/O delay if self.is_x86_family(arch): config.has_builtin('IO_DELAY_0XED') needed = config.get('CONFIG_IO_DELAY_TYPE_0XED', None) config.has_value('DEFAULT_IO_DELAY_TYPE', [needed]) # Raise a failure if anything unexpected was seen. if len(config.failures()): raise error.TestFail((", ".join(config.failures())))
def run_once(self): errors = set() # Max procs, max threads, and file max are dependent upon total memory. # The kernel uses a formula similar to: # MemTotal-kb / 128 = max procs # MemTotal-kb / 64 = max threads # MemTotal-kb / 10 = file_max # But note that MemTotal changes at the end of initialization. # The values used below for these settings should give sufficient head # room for usage and kernel allocation. ref_min = { 'file_max': 50000, 'kptr_restrict': 1, 'max_open': 1024, 'max_procs': 3000, 'max_threads': 7000, 'ngroups_max': 65536, 'nr_open': 1048576, 'pid_max': 32768, 'mmap_min_addr': 65536, } ref_equal = { 'leases': 1, 'panic': -1, 'protected_hardlinks': 1, 'protected_symlinks': 1, 'ptrace_scope': 1, 'randomize_va_space': 2, 'sched_rt_period_us': 1000000, 'sched_rt_runtime_us': 800000, 'sysrq': 1, 'suid-dump': 2, 'tcp_syncookies': 1, } refpath = { 'file_max': '/proc/sys/fs/file-max', 'leases': '/proc/sys/fs/leases-enable', 'max_open': '/proc/self/limits', 'max_procs': '/proc/self/limits', 'max_threads': '/proc/sys/kernel/threads-max', 'mmap_min_addr': '/proc/sys/vm/mmap_min_addr', 'kptr_restrict': '/proc/sys/kernel/kptr_restrict', 'ngroups_max': '/proc/sys/kernel/ngroups_max', 'nr_open': '/proc/sys/fs/nr_open', 'panic': '/proc/sys/kernel/panic', 'pid_max': '/proc/sys/kernel/pid_max', 'protected_hardlinks': '/proc/sys/fs/protected_hardlinks', 'protected_symlinks': '/proc/sys/fs/protected_symlinks', 'ptrace_scope': '/proc/sys/kernel/yama/ptrace_scope', 'randomize_va_space': '/proc/sys/kernel/randomize_va_space', 'sched_rt_period_us': '/proc/sys/kernel/sched_rt_period_us', 'sched_rt_runtime_us': '/proc/sys/kernel/sched_rt_runtime_us', 'suid-dump': '/proc/sys/fs/suid_dumpable', 'sysrq': '/proc/sys/kernel/sysrq', 'tcp_syncookies': '/proc/sys/net/ipv4/tcp_syncookies', } # Adjust arch-specific values. if utils.get_arch().startswith('arm'): ref_min['mmap_min_addr'] = 32768 if utils.get_arch().startswith('aarch64'): ref_min['mmap_min_addr'] = 32768 # ARM-compatible limit on x86 if ARC++ is present (b/30146997) if utils.is_arc_available(): ref_min['mmap_min_addr'] = 32768 # Adjust version-specific details. kernel_ver = os.uname()[2] if utils.compare_versions(kernel_ver, "3.6") < 0: # Prior to kernel version 3.6, Yama handled link restrictions. refpath['protected_hardlinks'] = \ '/proc/sys/kernel/yama/protected_nonaccess_hardlinks' refpath['protected_symlinks'] = \ '/proc/sys/kernel/yama/protected_sticky_symlinks' # Create osvalue dictionary with the same keys as refpath. osvalue = {} for key in refpath: osvalue[key] = None for key in ref_min: osvalue[key] = self.get_limit(key, refpath[key]) if osvalue[key] < ref_min[key]: logging.warning('%s is %d', refpath[key], osvalue[key]) logging.warning('%s should be at least %d', refpath[key], ref_min[key]) errors.add(key) else: logging.info('%s is %d >= %d', refpath[key], osvalue[key], ref_min[key]) for key in ref_equal: osvalue[key] = self.get_limit(key, refpath[key]) if osvalue[key] != ref_equal[key]: logging.warning('%s is set to %d', refpath[key], osvalue[key]) logging.warning('Expected %d', ref_equal[key]) errors.add(key) else: logging.info('%s is %d', refpath[key], osvalue[key]) # Look for anything from refpath that wasn't checked yet: for key in osvalue: if osvalue[key] == None: logging.warning('%s was never checked', key) errors.add(key) # If self.error is not zero, there were errors. if len(errors) > 0: raise error.TestFail('Found incorrect values: %s' % ', '.join(errors))
def run_once(self): """ The actual test. """ # Cache the architecture to avoid redundant execs to "uname". arch = utils.get_arch() userspace_arch = utils.get_arch_userspace() # Report the full uname for anyone reading logs. logging.info('Running %s kernel, %s userspace: %s', arch, userspace_arch, utils.system_output('uname -a')) # Load the list of kernel config variables. config = kernel_config.KernelConfig() config.initialize(missing_ok=self.MISSING_OK) # Adjust for kernel-version-specific changes kernel_ver = os.uname()[2] # For linux-3.10 or newer. if utils.compare_versions(kernel_ver, "3.10") >= 0: for entry in self.IS_EXCLUSIVE: if entry['regex'] == 'BINFMT_': entry['builtin'].append('BINFMT_SCRIPT') # For linux-3.14 or newer. if utils.compare_versions(kernel_ver, "3.14") >= 0: self.IS_MODULE.append('TEST_ASYNC_DRIVER_PROBE') self.IS_MISSING.remove('INET_DIAG') for entry in self.IS_EXCLUSIVE: if entry['regex'] == 'BINFMT_': entry['builtin'].append('BINFMT_MISC') if entry['regex'] == '.*_FS$': entry['module'].append('NFS_FS') # For linux-3.18 or newer. if utils.compare_versions(kernel_ver, "3.18") >= 0: for entry in self.IS_EXCLUSIVE: if entry['regex'] == '.*_FS$': entry['builtin'].append('SND_PROC_FS') entry['builtin'].append('USB_CONFIGFS_F_FS') entry['builtin'].append('ESD_FS') entry['enabled'].append('CONFIGFS_FS') entry['module'].append('USB_F_FS') # Like FW_LOADER_USER_HELPER, these may be exploited by userspace. # We run udev everywhere which uses netlink sockets for event # propagation rather than executing programs, so don't need this. self.IS_MISSING.append('UEVENT_HELPER') self.IS_MISSING.append('UEVENT_HELPER_PATH') # For kernels older than linux-4.4. if utils.compare_versions(kernel_ver, "4.4") < 0: for entry in self.IS_EXCLUSIVE: if entry['regex'] == '.*_FS$': entry['builtin'].append('EXT4_USE_FOR_EXT23') # For linux-4.4 or newer. if utils.compare_versions(kernel_ver, '4.4') >= 0: self.IS_BUILTIN.append('STATIC_USERMODEHELPER') # For linux-4.19 or newer. if utils.compare_versions(kernel_ver, "4.19") >= 0: self.IS_MISSING.remove('BPF_SYSCALL') self.IS_BUILTIN.append('HAVE_EBPF_JIT') self.IS_BUILTIN.append('BPF_JIT_ALWAYS_ON') self.IS_BUILTIN.remove('CC_STACKPROTECTOR') self.IS_BUILTIN.append('STACKPROTECTOR') # Run the static checks. map(config.has_builtin, self.IS_BUILTIN) map(config.has_module, self.IS_MODULE) map(config.is_enabled, self.IS_ENABLED) map(config.is_missing, self.IS_MISSING) map(config.is_exclusive, self.IS_EXCLUSIVE) # Run the dynamic checks. # Security; NULL-address hole should be as large as possible. # Upstream kernel recommends 64k, which should be large enough # to catch nearly all dereferenced structures. For # compatibility with ARM binaries (even on x86) this needs to # be 32k. wanted = '32768' config.has_value('DEFAULT_MMAP_MIN_ADDR', [wanted]) # Security; make sure usermode helper is our tool for linux-4.4+. if utils.compare_versions(kernel_ver, '4.4') >= 0: wanted = '"/sbin/usermode-helper"' config.has_value('STATIC_USERMODEHELPER_PATH', [wanted]) # Security; make sure NX page table bits are usable. if self.is_x86_family(arch): if arch == "i386": config.has_builtin('X86_PAE') else: config.has_builtin('X86_64') # Security; marks data segments as RO/NX, text as RO. if utils.compare_versions(kernel_ver, "4.11") < 0: config.has_builtin('DEBUG_RODATA') config.has_builtin('DEBUG_SET_MODULE_RONX') else: config.has_builtin('STRICT_KERNEL_RWX') config.has_builtin('STRICT_MODULE_RWX') if arch == 'aarch64': config.has_builtin('DEBUG_ALIGN_RODATA') # NaCl; allow mprotect+PROT_EXEC on noexec mapped files. config.has_value('MMAP_NOEXEC_TAINT', ['0']) # Kernel: make sure port 0xED is the one used for I/O delay. if self.is_x86_family(arch): config.has_builtin('IO_DELAY_0XED') needed = config.get('CONFIG_IO_DELAY_TYPE_0XED', None) config.has_value('DEFAULT_IO_DELAY_TYPE', [needed]) # Raise a failure if anything unexpected was seen. if len(config.failures()): raise error.TestFail((", ".join(config.failures())))
def get(self, src_package, dst_dir, rfilter=None, tag=None, build=None, arch=None): """ Download a list of packages from the build system. This will download all packages originated from source package [package] with given [tag] or [build] for the architecture reported by the machine. @param src_package: Source package name. @param dst_dir: Destination directory for the downloaded packages. @param rfilter: Regexp filter, only download the packages that match that particular filter. @param tag: Build system tag. @param build: Build system ID. @param arch: Package arch. Useful when you want to download noarch packages. @return: List of paths with the downloaded rpm packages. """ if build and build.isdigit(): build = int(build) if tag and build: logging.info("Both tag and build parameters provided, ignoring tag " "parameter...") if not tag and not build: raise ValueError("Koji install selected but neither koji_tag " "nor koji_build parameters provided. Please " "provide an appropriate tag or build name.") if not build: builds = self.session.listTagged(tag, latest=True, inherit=True, package=src_package) if not builds: raise ValueError("Tag %s has no builds of %s" % (tag, src_package)) info = builds[0] else: info = self.session.getBuild(build) if info is None: raise ValueError('No such brew/koji build: %s' % build) if arch is None: arch = utils.get_arch() rpms = self.session.listRPMs(buildID=info['id'], arches=arch) if not rpms: raise ValueError("No %s packages available for %s" % arch, koji.buildLabel(info)) rpm_paths = [] for rpm in rpms: rpm_name = koji.pathinfo.rpm(rpm) url = ("%s/%s/%s/%s/%s" % (self.koji_options['pkgurl'], info['package_name'], info['version'], info['release'], rpm_name)) if rfilter: filter_regexp = re.compile(rfilter, re.IGNORECASE) if filter_regexp.match(os.path.basename(rpm_name)): download = True else: download = False else: download = True if download: r = utils.get_file(url, os.path.join(dst_dir, os.path.basename(url))) rpm_paths.append(r) return rpm_paths