def run_once(self): errors = 0 cpu_arch = power_utils.get_x86_cpu_arch() if not cpu_arch: cpu_arch = utils.get_cpu_arch() if cpu_arch == "arm": logging.info('OK: skipping x86-only test on %s.', cpu_arch) return raise error.TestNAError('Unknown CPU with arch "%s".' % (cpu_arch)) if cpu_arch == 'Stoney': self._cpu_type = 'Stoney' elif cpu_arch == 'Atom': self._cpu_type = 'Atom' else: self._cpu_type = 'Non-Atom' self._registers = power_utils.Registers() # Check running machine. errors += self._check_all() # Pause briefly to make sure the RTC is ready for suspend/resume. time.sleep(3) # Suspend the system to RAM and return after 10 seconds. sys_power.do_suspend(10) # Check resumed machine. errors += self._check_all() if errors > 0: raise error.TestFail('x86 register mismatch detected')
def get_memory_keyvals(self): """ Reads the graphics memory values and returns them as keyvals. """ keyvals = {} # Get architecture type and list of sysfs fields to read. soc = utils.get_cpu_soc_family() arch = utils.get_cpu_arch() if arch == 'x86_64' or arch == 'i386': pci_vga_device = utils.run("lspci | grep VGA").stdout.rstrip('\n') if "Advanced Micro Devices" in pci_vga_device: soc = 'amdgpu' elif "Intel Corporation" in pci_vga_device: soc = 'i915' elif "Cirrus Logic" in pci_vga_device: # Used on qemu with kernels 3.18 and lower. Limited to 800x600 # resolution. soc = 'cirrus' else: pci_vga_device = utils.run('lshw -c video').stdout.rstrip() groups = re.search('configuration:.*driver=(\S*)', pci_vga_device) if groups and 'virtio' in groups.group(1): soc = 'virtio' if not soc in self.arch_fields: raise error.TestFail('Error: Architecture "%s" not yet supported.' % soc) fields = self.arch_fields[soc] for field_name in fields: possible_field_paths = fields[field_name] field_value = None for path in possible_field_paths: if utils.system('ls %s' % path): continue field_value = utils.system_output('cat %s' % path) break if not field_value: logging.error('Unable to find any sysfs paths for field "%s"', field_name) self.num_errors += 1 continue parsed_results = GraphicsKernelMemory._parse_sysfs(field_value) for key in parsed_results: keyvals['%s_%s' % (field_name, key)] = parsed_results[key] if 'bytes' in parsed_results and parsed_results['bytes'] == 0: logging.error('%s reported 0 bytes', field_name) self.num_errors += 1 keyvals['meminfo_MemUsed'] = (utils.read_from_meminfo('MemTotal') - utils.read_from_meminfo('MemFree')) keyvals['meminfo_SwapUsed'] = (utils.read_from_meminfo('SwapTotal') - utils.read_from_meminfo('SwapFree')) return keyvals
def run_once(self): errors = 0 cpu_arch = utils.get_cpu_arch() if cpu_arch == "arm": logging.debug('ok: skipping SMM test for %s.', cpu_arch) return cpu_arch = power_utils.get_x86_cpu_arch() if cpu_arch == 'Stoney': # The SMM registers (MSRC001_0112 and MSRC001_0113) can be # locked from being altered by setting MSRC001_0015[SmmLock]. # Bit 0 : 1=SMM code in the ASeg and TSeg range and the SMM # registers are read-only and SMI interrupts are # not intercepted in SVM for Stoney. Stoney_SMMLock = {'0xc0010015': [('0', 1)]} self._registers = power_utils.Registers() errors = self._registers.verify_msr(Stoney_SMMLock) else: r = utils.run("%s/%s" % (self.srcdir, self.executable), stdout_tee=utils.TEE_TO_LOGS, stderr_tee=utils.TEE_TO_LOGS, ignore_status=True) if r.exit_status != 0 or len(r.stderr) > 0: raise error.TestFail(r.stderr) if 'skipping' in r.stdout: logging.debug(r.stdout) return if 'ok' not in r.stdout: raise error.TestFail(r.stdout) if errors: logging.error('Test Failed for %s', cpu_arch) raise error.TestFail(r.stdout)
def run_once(self): url = 'http://azlaba29.mtv.corp.google.com:9380/auto/google3/java/'\ 'com/google/caribou/ui/pinto/modules/auto/tests/'\ 'latencytest_auto.html' js_expr = 'domAutomationController.send(!!window.G_testRunner'\ '&& window.G_testRunner.isFinished())' # timeout is in ms, so allow a 5 minute timeout # as of jan-11 it normally takes about 2 minutes on x86-mario timeout = 5 * 60 * 1000 os.chdir(self.bindir) # Select correct binary. cpuType = utils.get_cpu_arch() url_fetch_test = 'url_fetch_test' if cpuType == "arm": url_fetch_test += '.arm' # Stop chrome from restarting and kill login manager. try: orig_pid = utils.system_output('pgrep %s' % constants.SESSION_MANAGER) open(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE, 'w').close() except IOError, e: logging.debug(e) raise error.TestError('Failed to disable browser restarting.')
def run_once(self, expect_me_present=True): """Fail unless the ME is locked. @param expect_me_present: False means the system has no ME. """ cpu_arch = utils.get_cpu_arch() if cpu_arch == "arm": raise error.TestNAError( 'This test is not applicable, ' 'because an ARM device has been detected. ' 'ARM devices do not have an ME (Management Engine)') # If sw wp is on, and the ME regions are unlocked, they won't be # writable so will appear locked. if self.determine_sw_wp_status(): raise error.TestFail('Software wp is enabled. Please disable ' 'software wp prior to running this test.') # See if the system even has an ME, and whether we expected that. if self.has_ME(): if not expect_me_present: raise error.TestFail('We expected no ME, but found one anyway') else: if expect_me_present: raise error.TestNAError("No ME found. That's probably wrong.") else: logging.info('We expected no ME and we have no ME, so pass.') return # Make sure manufacturing mode is off. self.check_manufacturing_mode() # Read the image using flashrom. self.flashrom(args=('-r', self.BIOS_FILE)) # Use 'IFWI' fmap region as a proxy for a device which doesn't # have a dedicated ME region in the boot media. r = utils.run('dump_fmap', args=('-p', self.BIOS_FILE)) is_IFWI_platform = r.stdout.find("IFWI") >= 0 # Get the bios image and extract the ME components logging.info('Pull the ME components from the BIOS...') dump_fmap_args = ['-x', self.BIOS_FILE, 'SI_DESC'] inaccessible_sections = [] if is_IFWI_platform: inaccessible_sections.append('DEVICE_EXTENSION') else: inaccessible_sections.append('SI_ME') dump_fmap_args.extend(inaccessible_sections) utils.run('dump_fmap', args=tuple(dump_fmap_args)) # So far, so good, but we need to be certain. Rather than parse what # flashrom tells us about the ME-related registers, we'll just try to # change the ME components. We shouldn't be able to. self.try_to_rewrite('SI_DESC') for sectname in inaccessible_sections: self.check_region_inaccessible(sectname)
def setup(self, tarball='ctcs2.tar.bz2', length='4h', tc_opt='-k', tcf_contents=None): """ Builds the test suite, and sets up the control file that is going to be processed by the ctcs2 engine. @param tarball: CTCS2 tarball @param length: The amount of time we'll run the test suite @param tcf_contents: If the user wants to specify the contents of the CTCS2 control file, he could do so trough this parameter. If this parameter is provided, length is ignored. """ cerberus2_tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(cerberus2_tarball, self.srcdir) os.chdir(self.srcdir) # Apply patch to fix build problems on newer distros (absence of # asm/page.h include, and platform(32/64bit) related issues. p1 = 'patch -p1 < ../0001-Fix-CTCS2-Build.patch' utils.system(p1) if utils.get_cpu_arch() == 'x86_64': p2 = 'patch -p1 < ../0002-Fix-CTCS2-build-in-64-bit-boxes.patch' utils.system(p2) utils.make() # Here we define the cerberus suite control file that will be used. # It will be kept on the debug directory for further analysis. self.tcf_path = os.path.join(self.debugdir, 'autotest.tcf') if not tcf_contents: logging.info('Generating cerberus control file') # Note about the control file generation command - we are creating # a control file with the default tests, except for the kernel # compilation test (flag -k). g_cmd = './newburn-generator %s %s> %s' % \ (tc_opt, length, self.tcf_path) utils.system(g_cmd) else: logging.debug('TCF file contents supplied, ignoring test length' ' altogether') tcf = open(self.tcf_path, 'w') tcf.write(tcf_contents) logging.debug('Contents of the control file that will be passed to' ' CTCS 2:') tcf = open(self.tcf_path, 'r') buf = tcf.read() logging.debug(buf)
def _get_graphics_memory_usage(): """Get the memory usage (in KB) of the graphics module.""" arch = utils.get_cpu_arch() try: path = GEM_OBJECTS_PATH[arch] except KeyError: raise error.TestError('unknown platform: %s' % arch) with open(path, 'r') as input: for line in input: result = GEM_OBJECTS_RE.match(line) if result: return int(result.group(2)) / 1024 # in KB raise error.TestError('Cannot parse the content')
def setup(self, tarball='ctcs2.tar.bz2', length='4h', tc_opt='-k', tcf_contents=None): """ Builds the test suite, and sets up the control file that is going to be processed by the ctcs2 engine. @param tarball: CTCS2 tarball @param length: The amount of time we'll run the test suite @param tcf_contents: If the user wants to specify the contents of the CTCS2 control file, he could do so trough this parameter. If this parameter is provided, length is ignored. """ cerberus2_tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(cerberus2_tarball, self.srcdir) os.chdir(self.srcdir) # Apply patch to fix build problems on newer distros (absence of # asm/page.h include, and platform(32/64bit) related issues. p1 = 'patch -p1 < ../0001-Fix-CTCS2-Build.patch' utils.system(p1) if utils.get_cpu_arch() == 'x86_64': p2 = 'patch -p1 < ../0002-Fix-CTCS2-build-in-64-bit-boxes.patch' utils.system(p2) utils.make() # Here we define the cerberus suite control file that will be used. # It will be kept on the debug directory for further analysis. self.tcf_path = os.path.join(self.debugdir, 'autotest.tcf') if not tcf_contents: logging.info('Generating cerberus control file') # Note about the control file generation command - we are creating # a control file with the default tests, except for the kernel # compilation test (flag -k). g_cmd = './newburn-generator %s %s> %s' % \ (tc_opt, length, self.tcf_path) utils.system(g_cmd) else: logging.debug('TCF file contents supplied, ignoring test length' ' altogether') tcf = open(self.tcf_path, 'w') tcf.write(tcf_contents) logging.debug('Contents of the control file that will be passed to' ' CTCS 2:') tcf = open(self.tcf_path, 'r') buf = tcf.read() logging.debug(buf)
def run_once(self): cpu_arch = utils.get_cpu_arch() if cpu_arch == "arm": logging.debug('ok: skipping SMM test for %s.', cpu_arch) return r = utils.run("%s/%s" % (self.srcdir, self.executable), stdout_tee=utils.TEE_TO_LOGS, stderr_tee=utils.TEE_TO_LOGS, ignore_status=True) if r.exit_status != 0 or len(r.stderr) > 0: raise error.TestFail(r.stderr) if 'skipping' in r.stdout: logging.debug(r.stdout) return if 'ok' not in r.stdout: raise error.TestFail(r.stdout)
def run_once(self): # TODO(zmo@): this may not get total physical memory size on ARM # or some x86 machines. mem_size = utils.memtotal() gb = mem_size / 1024.0 / 1024.0 self.write_perf_keyval({"gb_memory_total": gb}) logging.info("MemTotal: %.3f GB" % gb) # x86 and ARM SDRAM configurations differ significantly from each other. # Use a value specific to the architecture. # On x86, I see 1.85GiB (2GiB - reserved memory). # On ARM, I see 0.72GiB (1GiB - 256MiB carveout). cpuType = utils.get_cpu_arch() limit = 1.65 if cpuType == "arm": limit = 0.65 if gb <= limit: raise error.TestFail("total system memory size < %.3f GB" % limit)
def __init__(self, raise_error_on_hang=True): """ Analyzes the initial state of the GPU and log history. """ # Attempt flushing system logs every second instead of every 10 minutes. self.dirty_writeback_centisecs = utils.get_dirty_writeback_centisecs() utils.set_dirty_writeback_centisecs(100) self._raise_error_on_hang = raise_error_on_hang logging.info(utils.get_board_with_frequency_and_memory()) self.graphics_kernel_memory = GraphicsKernelMemory() if utils.get_cpu_arch() != 'arm': if is_sw_rasterizer(): raise error.TestFail('Refusing to run on SW rasterizer.') logging.info('Initialize: Checking for old GPU hangs...') messages = open(self._MESSAGES_FILE, 'r') for line in messages: for hang in self._HANGCHECK: if hang in line: logging.info(line) self.existing_hangs[line] = line messages.close()
def finalize(self): """ Analyzes the state of the GPU, log history and emits warnings or errors if the state changed since initialize. Also makes a note of the Chrome version for later usage in the perf-dashboard. """ utils.set_dirty_writeback_centisecs(self.dirty_writeback_centisecs) new_gpu_hang = False new_gpu_warning = False if utils.get_cpu_arch() != 'arm': logging.info('Cleanup: Checking for new GPU hangs...') messages = open(self._MESSAGES_FILE, 'r') for line in messages: for hang in self._HANGCHECK: if hang in line: if not line in self.existing_hangs.keys(): logging.info(line) for warn in self._HANGCHECK_WARNING: if warn in line: new_gpu_warning = True logging.warning( 'Saw GPU hang warning during test.') else: logging.warning( 'Saw GPU hang during test.') new_gpu_hang = True messages.close() if not self._run_on_sw_rasterizer and is_sw_rasterizer(): logging.warning('Finished test on SW rasterizer.') raise error.TestFail('Finished test on SW rasterizer.') if self._raise_error_on_hang and new_gpu_hang: raise error.TestError('Detected GPU hang during test.') if new_gpu_hang: raise error.TestWarn('Detected GPU hang during test.') if new_gpu_warning: raise error.TestWarn('Detected GPU warning during test.')
def run_once(self, rootdir="/", args=[]): """ Do a find for all the ELF files on the system. For each one, test for compiler options that should have been used when compiling the file. For missing compiler options, print the files. """ parser = OptionParser() parser.add_option('--hardfp', dest='enable_hardfp', default=False, action='store_true', help='Whether to check for hardfp binaries.') (options, args) = parser.parse_args(args) option_sets = [] libc_glob = "/lib/libc-[0-9]*" readelf_cmd = glob.glob("/usr/local/*/binutils-bin/*/readelf")[0] # We do not test binaries if they are built with Address Sanitizer # because it is a separate testing tool. no_asan_used = utils.system_output("%s -s " "/opt/google/chrome/chrome | " "egrep -q \"__asan_init\" || " "echo no ASAN" % readelf_cmd) if not no_asan_used: logging.debug("ASAN detected on /opt/google/chrome/chrome. " "Will skip all checks.") return # Check that gold was used to build binaries. # TODO(jorgelo): re-enable this check once crbug.com/417912 is fixed. # gold_cmd = ("%s -S {} 2>&1 | " # "egrep -q \".note.gnu.gold-ve\"" % readelf_cmd) # gold_find_options = "" # if utils.get_cpu_arch() == "arm": # # gold is only enabled for Chrome on ARM. # gold_find_options = "-path \"/opt/google/chrome/chrome\"" # gold_whitelist = os.path.join(self.bindir, "gold_whitelist") # option_sets.append(self.create_and_filter("gold", # gold_cmd, # gold_whitelist, # gold_find_options)) # Verify non-static binaries have BIND_NOW in dynamic section. now_cmd = ("(%s {} | grep -q statically) ||" "%s -d {} 2>&1 | " "egrep -q \"BIND_NOW\"" % (FILE_CMD, readelf_cmd)) now_whitelist = os.path.join(self.bindir, "now_whitelist") option_sets.append(self.create_and_filter("-Wl,-z,now", now_cmd, now_whitelist)) # Verify non-static binaries have RELRO program header. relro_cmd = ("(%s {} | grep -q statically) ||" "%s -l {} 2>&1 | " "egrep -q \"GNU_RELRO\"" % (FILE_CMD, readelf_cmd)) relro_whitelist = os.path.join(self.bindir, "relro_whitelist") option_sets.append(self.create_and_filter("-Wl,-z,relro", relro_cmd, relro_whitelist)) # Verify non-static binaries are dynamic (built PIE). pie_cmd = ("(%s {} | grep -q statically) ||" "%s -l {} 2>&1 | " "egrep -q \"Elf file type is DYN\"" % (FILE_CMD, readelf_cmd)) pie_whitelist = os.path.join(self.bindir, "pie_whitelist") option_sets.append(self.create_and_filter("-fPIE", pie_cmd, pie_whitelist)) # Verify ELFs don't include TEXTRELs. # FIXME: Remove the i?86 filter after the bug is fixed. # crbug.com/686926 if (utils.get_current_kernel_arch() not in ('i%d86' % i for i in xrange(3,7))): textrel_cmd = ("(%s {} | grep -q statically) ||" "%s -d {} 2>&1 | " "(egrep -q \"0x0+16..TEXTREL\"; [ $? -ne 0 ])" % (FILE_CMD, readelf_cmd)) textrel_whitelist = os.path.join(self.bindir, "textrel_whitelist") option_sets.append(self.create_and_filter("TEXTREL", textrel_cmd, textrel_whitelist)) # Verify all binaries have non-exec STACK program header. stack_cmd = ("%s -lW {} 2>&1 | " "egrep -q \"GNU_STACK.*RW \"" % readelf_cmd) stack_whitelist = os.path.join(self.bindir, "stack_whitelist") option_sets.append(self.create_and_filter("Executable Stack", stack_cmd, stack_whitelist)) # Verify all binaries have W^X LOAD program headers. loadwx_cmd = ("%s -lW {} 2>&1 | " "grep \"LOAD\" | egrep -v \"(RW |R E)\" | " "wc -l | grep -q \"^0$\"" % readelf_cmd) loadwx_whitelist = os.path.join(self.bindir, "loadwx_whitelist") option_sets.append(self.create_and_filter("LOAD Writable and Exec", loadwx_cmd, loadwx_whitelist)) # Verify ARM binaries are all using VFP registers. if (options.enable_hardfp and utils.get_cpu_arch() == 'arm'): hardfp_cmd = ("%s -A {} 2>&1 | " "egrep -q \"Tag_ABI_VFP_args: VFP registers\"" % readelf_cmd) hardfp_whitelist = os.path.join(self.bindir, "hardfp_whitelist") option_sets.append(self.create_and_filter("hardfp", hardfp_cmd, hardfp_whitelist)) fail_msg = "" # There is currently no way to clear binary prebuilts for all devs. # Thus, when a new check is added to this test, the test might fail # for users who have old prebuilts which have not been compiled # in the correct manner. fail_summaries = [] full_msg = "Test results:" num_fails = 0 for cos in option_sets: if len(cos.filtered_set): num_fails += 1 fail_msg += cos.get_fail_message() + "\n" fail_summaries.append(cos.get_fail_summary_message()) full_msg += str(cos) + "\n\n" fail_summary_msg = ", ".join(fail_summaries) logging.error(fail_msg) logging.debug(full_msg) if num_fails: raise error.TestFail(fail_summary_msg)
def run_once(self): errors = 0 keyval = dict() # The total memory will shrink if the system bios grabs more of the # reserved memory. We derived the value below by giving a small # cushion to allow for more system BIOS usage of ram. The memref value # is driven by the supported netbook model with the least amount of # total memory. ARM and x86 values differ considerably. cpuType = utils.get_cpu_arch() memref = 986392 vmemref = 102400 if cpuType == "arm": memref = 700000 vmemref = 210000 speedref = 1333 os_reserve = 600000 # size reported in /sys/block/zram0/disksize is in byte swapref = int(utils.read_one_line(self.swap_disksize_file)) / 1024 less_refs = ['MemTotal', 'MemFree', 'VmallocTotal'] approx_refs = ['SwapTotal'] # read physical HW size from mosys and adjust memref if need cmd = 'mosys memory spd print geometry -s size_mb' phy_size_run = utils.run(cmd) phy_size = 0 for line in phy_size_run.stdout.split(): phy_size += int(line) # memref is in KB but phy_size is in MB phy_size *= 1024 keyval['PhysicalSize'] = phy_size memref = max(memref, phy_size - os_reserve) freeref = memref / 2 # Special rule for free memory size for parrot and butterfly board = utils.get_board() if board.startswith('parrot'): freeref = 100 * 1024 elif board.startswith('butterfly'): freeref = freeref - 400 * 1024 elif board.startswith('rambi') or board.startswith('expresso'): logging.info('Skipping test on rambi and expresso, ' 'see crbug.com/411401') return ref = { 'MemTotal': memref, 'MemFree': freeref, 'SwapTotal': swapref, 'VmallocTotal': vmemref, } logging.info('board: %s, phy_size: %d memref: %d freeref: %d', board, phy_size, memref, freeref) error_list = [] for k in ref: value = utils.read_from_meminfo(k) keyval[k] = value if k in less_refs: if value < ref[k]: logging.warning('%s is %d', k, value) logging.warning('%s should be at least %d', k, ref[k]) errors += 1 error_list += [k] elif k in approx_refs: if value < ref[k] * 0.9 or ref[k] * 1.1 < value: logging.warning('%s is %d', k, value) logging.warning('%s should be within 10%% of %d', k, ref[k]) errors += 1 error_list += [k] # read spd timings cmd = 'mosys memory spd print timings -s speeds' # result example # DDR3-800, DDR3-1066, DDR3-1333, DDR3-1600 pattern = '[A-Z]*DDR([3-9]|[1-9]\d+)[A-Z]*-(?P<speed>\d+)' timing_run = utils.run(cmd) keyval['speedref'] = speedref for dimm, line in enumerate(timing_run.stdout.split('\n')): if not line: continue max_timing = line.split(', ')[-1] keyval['timing_dimm_%d' % dimm] = max_timing m = re.match(pattern, max_timing) if not m: logging.warning('Error parsing timings for dimm #%d (%s)', dimm, max_timing) errors += 1 continue logging.info('dimm #%d timings: %s', dimm, max_timing) max_speed = int(m.group('speed')) keyval['speed_dimm_%d' % dimm] = max_speed if max_speed < speedref: logging.warning('ram speed is %s', max_timing) logging.warning('ram speed should be at least %d', speedref) error_list += ['speed_dimm_%d' % dimm] errors += 1 # If self.error is not zero, there were errors. if errors > 0: error_list_str = ', '.join(error_list) raise error.TestFail('Found incorrect values: %s' % error_list_str) self.write_perf_keyval(keyval)
def run_once(self): """Runs the test.""" arch = utils.get_cpu_arch() if arch == 'x86_64': arch = utils.get_cpu_soc_family() curr_kernel = utils.get_kernel_version() logging.debug('CPU arch is "%s"', arch) logging.debug('Kernel version is "%s"', curr_kernel) if arch not in self.TESTS: raise error.TestNAError('"%s" arch not in test baseline' % arch) # Kernels <= 3.14 don't have this directory and are expected to abort # with TestNA. if not os.path.exists(self.SYSTEM_CPU_VULNERABILITIES): raise error.TestNAError('"%s" directory not present, not testing' % self.SYSTEM_CPU_VULNERABILITIES) failures = [] for filename, expected in self.TESTS[arch].items(): file = os.path.join(self.SYSTEM_CPU_VULNERABILITIES, filename) if not os.path.exists(file): raise error.TestError('"%s" file does not exist, cannot test' % file) min_kernel = expected[0] if utils.compare_versions(curr_kernel, min_kernel) == -1: # The kernel on the DUT is older than the version where # the mitigation was introduced. info_message = 'DUT kernel version "%s"' % curr_kernel info_message += ' is older than "%s"' % min_kernel info_message += ', skipping "%s" test' % filename logging.info(info_message) continue # E.g.: # Not affected # $ cat /sys/devices/system/cpu/vulnerabilities/meltdown # Not affected # # One mitigation # $ cat /sys/devices/system/cpu/vulnerabilities/meltdown # Mitigation: PTI # # Several mitigations # $ cat /sys/devices/system/cpu/vulnerabilities/spectre_v2 # Mitigation: Full generic retpoline, IBPB, IBRS_FW with open(file) as f: lines = f.readlines() if len(lines) > 1: logging.warning('"%s" has more than one line', file) actual = lines[0].strip() logging.debug('"%s" -> "%s"', file, actual) expected_mitigations = expected[1] if not expected_mitigations: if actual != 'Not affected': failures.append((file, actual, expected_mitigations)) else: # CPU is affected. if 'Mitigation' not in actual: failures.append((file, actual, expected_mitigations)) else: mit_list = actual.split(':', 1)[1].split(',') actual_mitigations = set(t.strip() for t in mit_list) # Test set inclusion. if actual_mitigations < expected_mitigations: failures.append((file, actual_mitigations, expected_mitigations)) if failures: for failure in failures: logging.error('"%s" was "%s", expected "%s"', *failure) raise error.TestFail('CPU vulnerabilities not mitigated properly')
def run_once(self, force_suspend_to_idle=False): """Main test method. """ if utils.get_cpu_arch() != 'x86_64': raise error.TestNAError('This test only supports x86_64 CPU.') if power_utils.get_sleep_state() != 'freeze': if not force_suspend_to_idle: raise error.TestNAError( 'System default config is not suspend to idle.') else: logging.info('System default config is suspend to ram. ' 'Force suspend to idle') self._error_count = 0 self._error_message = [] dmc_firmware_stats = None s0ix_residency_stats = None cpu_packages_stats = None rc6_residency_stats = None with self._log_error_message(): dmc_firmware_stats = power_status.DMCFirmwareStats() if not dmc_firmware_stats.check_fw_loaded(): raise error.TestFail('DMC firmware not loaded.') with self._log_error_message(): pch_powergating_stats = power_status.PCHPowergatingStats() pch_powergating_stats.read_pch_powergating_info() if not pch_powergating_stats.check_s0ix_requirement(): raise error.TestFail('PCH powergating check failed.') with self._log_error_message(): s0ix_residency_stats = power_status.S0ixResidencyStats() with self._log_error_message(): cpu_packages_stats = power_status.CPUPackageStats() with self._log_error_message(): rc6_residency_stats = power_status.RC6ResidencyStats() with self._log_error_message(): suspender = power_suspend.Suspender(self.resultsdir, suspend_state='freeze') suspender.suspend() with self._log_error_message(): if (dmc_firmware_stats and dmc_firmware_stats.is_dc6_supported() and dmc_firmware_stats.get_accumulated_dc6_entry() <= 0): raise error.TestFail('DC6 entry check failed.') with self._log_error_message(): if (s0ix_residency_stats and s0ix_residency_stats.get_accumulated_residency_secs() <= 0): raise error.TestFail('S0ix residency check failed.') with self._log_error_message(): if (cpu_packages_stats and cpu_packages_stats.refresh().get('C10', 0) <= 0): raise error.TestFail('C10 state check failed.') with self._log_error_message(): if (rc6_residency_stats and rc6_residency_stats.get_accumulated_residency_secs() <= 0): raise error.TestFail('RC6 residency check failed.') if self._error_count > 0: raise error.TestFail('Found %d errors: ' % self._error_count, ', '.join(self._error_message))