def _check_collection_failure(self, test_option, failure_string): # Add parameter to core_pattern. old_core_pattern = utils.read_file(self._CORE_PATTERN)[:-1] try: utils.system('echo "%s %s" > %s' % (old_core_pattern, test_option, self._CORE_PATTERN)) result = self._run_crasher_process_and_analyze('root', consent=True) self._check_crashed_and_caught(result) if not self._log_reader.can_find(failure_string): raise error.TestFail('Did not find fail string in log %s' % failure_string) if result['minidump']: raise error.TestFail('failed collection resulted in minidump') if not result['log']: raise error.TestFail('failed collection had no log') log_contents = utils.read_file(result['log']) logging.debug('Log contents were: ' + log_contents) if not failure_string in log_contents: raise error.TestFail('Expected logged error ' '\"%s\" was \"%s\"' % (failure_string, log_contents)) # Verify we are generating appropriate diagnostic output. if ((not '===ps output===' in log_contents) or (not '===meminfo===' in log_contents)): raise error.TestFail('Expected full logs, got: ' + log_contents) self._check_generated_report_sending(result['meta'], result['log'], 'root', result['basename'], 'log', _COLLECTION_ERROR_SIGNATURE) finally: utils.system('echo "%s" > %s' % (old_core_pattern, self._CORE_PATTERN))
def _test_crash_logs_creation(self): # Copy and rename crasher to trigger crash_reporter_logs.conf rule. logs_triggering_crasher = os.path.join(os.path.dirname(self.bindir), 'crash_log_test') result = self._run_crasher_process_and_analyze( 'root', crasher_path=logs_triggering_crasher) self._check_crashed_and_caught(result) contents = utils.read_file(result['log']) if contents != 'hello world\n': raise error.TestFail('Crash log contents unexpected: %s' % contents) if not ('log=' + result['log']) in utils.read_file(result['meta']): raise error.TestFail('Meta file does not reference log')
def _test_crash_logs_creation(self): logs_triggering_crasher = os.path.join(os.path.dirname(self.bindir), 'crash_log_test') # Copy crasher_path to a test location with correct mode and a # special name to trigger crash log creation. utils.system('cp -a "%s" "%s"' % (self._crasher_path, logs_triggering_crasher)) result = self._run_crasher_process_and_analyze( 'root', crasher_path=logs_triggering_crasher) self._check_crashed_and_caught(result) contents = utils.read_file(result['log']) if contents != 'hello world\n': raise error.TestFail('Crash log contents unexpected: %s' % contents) if not ('log=' + result['log']) in utils.read_file(result['meta']): raise error.TestFail('Meta file does not reference log')
def _hwclock_ts(self, not_before, retries=3): """Read the RTC resume timestamp saved by powerd_suspend.""" for retry in xrange(retries + 1): early_wakeup = False if os.path.exists(self.HWCLOCK_FILE): # TODO(crbug.com/733773): Still fragile see bug. match = re.search(r'(.+)(\.\d+)[+-]\d+:?\d+$', utils.read_file(self.HWCLOCK_FILE), re.DOTALL) if match: timeval = time.strptime(match.group(1), "%Y-%m-%d %H:%M:%S") seconds = time.mktime(timeval) seconds += float(match.group(2)) logging.debug('RTC resume timestamp read: %f', seconds) if seconds >= not_before: return seconds early_wakeup = True time.sleep(0.05 * retry) if early_wakeup: logging.debug('Early wakeup, dumping eventlog if it exists:\n') elog = utils.system_output('mosys eventlog list | tail -n %d' % self._RELEVANT_EVENTLOG_LINES, ignore_status=True) wake_elog = (['unknown'] + re.findall(r'Wake Source.*', elog))[-1] for line in reversed(self._logs): match = re.search(r'PM1_STS: WAK.*', line) if match: wake_syslog = match.group(0) break else: wake_syslog = 'unknown' for b, e, s in sys_power.SpuriousWakeupError.S3_WHITELIST: if (re.search(b, utils.get_board()) and re.search(e, wake_elog) and re.search(s, wake_syslog)): logging.warning('Whitelisted spurious wake in S3: %s | %s', wake_elog, wake_syslog) return None raise sys_power.SpuriousWakeupError( 'Spurious wake in S3: %s | %s' % (wake_elog, wake_syslog)) if self._get_board() in ['lumpy', 'stumpy', 'kiev']: logging.debug( 'RTC read failure (crosbug/36004), dumping nvram:\n' + utils.system_output('mosys nvram dump', ignore_status=True)) return None raise error.TestError('Broken RTC timestamp: ' + utils.read_file(self.HWCLOCK_FILE))
def run_once(self): pkey = ownership.known_privkey() pubkey = ownership.known_pubkey() # Pre-configure some owner settings, including initial key. poldata = policy.build_policy_data(self.srcdir, owner=ownership.TESTUSER, guests=False, new_users=True, roaming=True, whitelist=(ownership.TESTUSER, '[email protected]')) policy_string = policy.generate_policy(self.srcdir, pkey, pubkey, poldata) policy.push_policy_and_verify(policy_string, self._sm) # grab key, ensure that it's the same as the known key. if (utils.read_file(constants.OWNER_KEY_FILE) != pubkey): raise error.TestFail('Owner key should not have changed!') # Start a new session, which will trigger the re-taking of ownership. listener = session_manager.OwnershipSignalListener(gobject.MainLoop()) listener.listen_for_new_key_and_policy() self._cryptohome_proxy.mount(ownership.TESTUSER, ownership.TESTPASS, create=True) if not self._sm.StartSession(ownership.TESTUSER, ''): raise error.TestError('Could not start session for owner') listener.wait_for_signals(desc='Re-taking of ownership complete.') # grab key, ensure that it's different than known key if (utils.read_file(constants.OWNER_KEY_FILE) == pubkey): raise error.TestFail('Owner key should have changed!') # RetrievePolicy, check sig against new key, check properties retrieved_policy = self._sm.RetrievePolicy(byte_arrays=True) if retrieved_policy is None: raise error.TestError('Policy not found') policy.compare_policy_response(self.srcdir, retrieved_policy, owner=ownership.TESTUSER, guests=False, new_users=True, roaming=True, whitelist=(ownership.TESTUSER, '[email protected]'))
def start(self, test): """ Start ftrace profiler @param test: Autotest test in which the profiler will operate on. """ # Make sure debugfs is mounted and tracing disabled. utils.system('%s reset' % self.trace_cmd) output_dir = os.path.join(test.profdir, 'ftrace') if not os.path.isdir(output_dir): os.makedirs(output_dir) self.output = os.path.join(output_dir, 'trace.dat') cmd = [self.trace_cmd, 'record', '-o', self.output] cmd += self.trace_cmd_args self.record_job = utils.BgJob(self.join_command(cmd), stderr_tee=utils.TEE_TO_LOGS) # Wait for tracing to be enabled. If trace-cmd dies before enabling # tracing, then there was a problem. tracing_on = os.path.join(self.tracing_dir, 'tracing_on') while (self.record_job.sp.poll() is None and utils.read_file(tracing_on).strip() != '1'): time.sleep(0.1) if self.record_job.sp.poll() is not None: utils.join_bg_jobs([self.record_job]) raise error.CmdError(self.record_job.command, self.record_job.sp.returncode, 'trace-cmd exited early.')
def _log_remove_if_exists(self, filename, message): if not os.path.exists(filename): return contents = utils.read_file(filename).strip() os.remove(filename) if filename == SHUTDOWN_KILLED_PROCESSES_LOG: # Remove all killed processes listed in the white list. An example # log is included below: # # COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME # cryptohom [........] # filtered_contents = filter( lambda line: not line.startswith(PROCESS_WHITELIST), contents.splitlines()) # If there are no lines left but the header, return nothing. if len(filtered_contents) <= 1: return else: contents = '\n'.join(filtered_contents) logging.error('Last shutdown problem: %s. Detailed output was:\n%s' % (message, contents)) self._errors.append(message)
def get_x86_cpu_arch(): """Identify CPU architectural type. Intel's processor naming conventions is a mine field of inconsistencies. Armed with that, this method simply tries to identify the architecture of systems we care about. TODO(tbroch) grow method to cover processors numbers outlined in: http://www.intel.com/content/www/us/en/processors/processor-numbers.html perhaps returning more information ( brand, generation, features ) Returns: String, explicitly (Atom, Core, Celeron) or None """ cpuinfo = utils.read_file('/proc/cpuinfo') if re.search(r'Intel.*Atom.*[NZ][2-6]', cpuinfo): return 'Atom' if re.search(r'Intel.*Celeron.*N2[89][0-9][0-9]', cpuinfo): return 'Celeron N2000' if re.search(r'Intel.*Celeron.*N3[0-9][0-9][0-9]', cpuinfo): return 'Celeron N3000' if re.search(r'Intel.*Celeron.*[0-9]{3,4}', cpuinfo): return 'Celeron' if re.search(r'Intel.*Core.*i[357]-[234][0-9][0-9][0-9]', cpuinfo): return 'Core' logging.info(cpuinfo) return None
def known_privkey(): """Returns the mock owner private key in PEM format. @return: mock owner private key in PEM format. """ dirname = os.path.dirname(__file__) return utils.read_file(os.path.join(dirname, constants.MOCK_OWNER_KEY))
def _find_sysfs_accel_dir(self): """ Return the sysfs directory for accessing EC accels """ for _, dirs, _ in os.walk(self.sysfs_accel_search_path): for d in dirs: namepath = os.path.join(self.sysfs_accel_search_path, d, 'name') try: content = utils.read_file(namepath) except IOError as err: # errno 2 is code for file does not exist, which is ok # here, just continue on to next directory. Any other # error is a problem, raise an error. if (err.errno == 2): continue raise error.TestFail( 'IOError %d while searching for accel' 'sysfs dir in %s', err.errno, namepath) # Correct directory has a file called 'name' with contents # 'cros-ec-accel' if content.strip() == 'cros-ec-accel': return os.path.join(self.sysfs_accel_search_path, d) raise error.TestFail('No sysfs interface to EC accels (cros-ec-accel)')
def _populate_symbols(self): """Set up Breakpad's symbol structure. Breakpad's minidump processor expects symbols to be in a directory hierarchy: <symbol-root>/<module_name>/<file_id>/<module_name>.sym """ self._symbol_dir = os.path.join(os.path.dirname(self._crasher_path), 'symbols') utils.system('rm -rf %s' % self._symbol_dir) os.mkdir(self._symbol_dir) basename = os.path.basename(self._crasher_path) utils.system('/usr/bin/dump_syms %s > %s.sym' % (self._crasher_path, basename)) sym_name = '%s.sym' % basename symbols = utils.read_file(sym_name) # First line should be like: # MODULE Linux x86 7BC3323FBDBA2002601FA5BA3186D6540 crasher_XXX # or # MODULE Linux arm C2FE4895B203D87DD4D9227D5209F7890 crasher_XXX first_line = symbols.split('\n')[0] tokens = first_line.split() if tokens[0] != 'MODULE' or tokens[1] != 'Linux': raise error.TestError('Unexpected symbols format: %s', first_line) file_id = tokens[3] target_dir = os.path.join(self._symbol_dir, basename, file_id) os.makedirs(target_dir) os.rename(sym_name, os.path.join(target_dir, sym_name))
def _check_generated_report_sending(self, meta_path, payload_path, username, exec_name, report_kind, expected_sig=None): # Now check that the sending works result = self._call_sender_one_crash( username=username, report=os.path.basename(payload_path)) if (not result['send_attempt'] or not result['send_success'] or result['report_exists']): raise error.TestFail('Report not sent properly') if result['exec_name'] != exec_name: raise error.TestFail('Executable name incorrect') if result['report_kind'] != report_kind: raise error.TestFail('Expected a minidump report') if result['report_payload'] != payload_path: raise error.TestFail('Sent the wrong minidump payload') if result['meta_path'] != meta_path: raise error.TestFail('Used the wrong meta file') if expected_sig is None: if result['sig'] is not None: raise error.TestFail('Report should not have signature') else: if not 'sig' in result or result['sig'] != expected_sig: raise error.TestFail('Report signature mismatch: %s vs %s' % (result['sig'], expected_sig)) # Check version matches. lsb_release = utils.read_file('/etc/lsb-release') version_match = re.search(r'CHROMEOS_RELEASE_VERSION=(.*)', lsb_release) if not ('Version: %s' % version_match.group(1)) in result['output']: raise error.TestFail('Did not find version %s in log output' % version_match.group(1))
def get_system_times(): proc_stat = utils.read_file('/proc/stat') for line in proc_stat.split('\n'): if line.startswith('cpu '): times = line[4:].strip().split(' ') times = [int(jiffies) for jiffies in times] return dict(zip(PROC_STAT_CPU_FIELDS, times))
def _test_reporter_shutdown(self): """Test the crash_reporter shutdown code works.""" self._log_reader.set_start_by_current() utils.system('%s --clean_shutdown' % self._CRASH_REPORTER_PATH) output = utils.read_file(self._CORE_PATTERN).rstrip() if output != 'core': raise error.TestFail('core pattern should have been core, not %s' % output)
def run_once(self): if not 'asan' in utils.read_file('/etc/ui_use_flags.txt'): raise error.TestFail('Current image not built with ASAN') extension_path = os.path.join(os.path.dirname(__file__), 'asan_crash_ext') with chrome.Chrome(extension_paths=[extension_path]) as cr: pid = utils.get_oldest_pid_by_name('chrome') asan_log_name = '/var/log/chrome/asan_log.%d' % pid logging.info('Browser PID under telemetry control is %d. ' 'So ASAN log is expected at %s.', pid, asan_log_name) logging.info('Initiate simulating memory bug to be caught by ASAN.') extension = cr.get_extension(extension_path) if not extension: raise error.TestFail('Failed to find extension %s' % extension_path) # Catch the exception raised when the browser crashes. cr.did_browser_crash(lambda: extension.ExecuteJavaScript( 'chrome.autotestPrivate.simulateAsanMemoryBug();')) utils.poll_for_condition( lambda: os.path.isfile(asan_log_name), timeout=10, exception=error.TestFail( 'Found no asan log file %s during 10s' % asan_log_name)) ui_log = cros_logging.LogReader(asan_log_name) ui_log.read_all_logs() # We must wait some time until memory bug is simulated (happens # immediately after the return on the call) and caught by ASAN. try: utils.poll_for_condition( lambda: ui_log.can_find('ERROR: AddressSanitizer'), timeout=10, exception=error.TestFail( 'Found no asan log message about ' 'Address Sanitizer catch')) # An example error string is like this # 'testarray:228' <== Memory access at offset 52 overflows # this variable utils.poll_for_condition( lambda: ui_log.can_find("'testarray"), timeout=10, exception=error.TestFail( 'ASAN caught bug but did not mention ' 'the cause in the log')) except: logging.debug('ASAN log content: ' + ui_log.get_logs()) raise # The cbuildbot logic will look for asan logs and process them. # Remove the simulated log file to avoid that. os.remove(asan_log_name)
def _find_sysfs_accel_dir(self): """ Return the sysfs directory for accessing EC accels """ for _, dirs, _ in os.walk(self.sysfs_accel_search_path): for d in dirs: dirpath = os.path.join(self.sysfs_accel_search_path, d) namepath = os.path.join(dirpath, 'name') try: content = utils.read_file(namepath) except IOError as err: # errno 2 is code for file does not exist, which is ok # here, just continue on to next directory. Any other # error is a problem, raise an error. if err.errno == 2: continue raise error.TestFail( 'IOError %d while searching for accel' 'sysfs dir in %s', err.errno, namepath) # Correct directory has a file called 'name' with contents # 'cros-ec-accel' if content.strip() != 'cros-ec-accel': continue locpath = os.path.join(dirpath, 'location') try: location = utils.read_file(locpath) except IOError as err: if err.errno == 2: # We have an older scheme self.new_sysfs_layout = False self.sysfs_accel_old_path = dirpath return raise error.TestFail('IOError %d while reading %s', err.errno, locpath) loc = location.strip() if loc in self._ACCEL_LOCS: self.sysfs_accel_paths[loc] = dirpath if (not self.sysfs_accel_old_path and len(self.sysfs_accel_paths) == 0): raise error.TestFail( 'No sysfs interface to EC accels (cros-ec-accel)')
def read_dmesg(self, filename): """Put the contents of 'dmesg -cT' into the given file. @param filename: The file to write 'dmesg -cT' into. """ with open(filename, 'w') as f: self._client.run('dmesg -cT', stdout_tee=f) return utils.read_file(filename)
def run_once(self): arch = utils.get_arch() if not arch.startswith('arm'): logging.info('Skip test for non-ARM arch %s', arch) return media = utils.read_file(self.dts_node_path).strip('\n\x00') if media != 'cros-ec': logging.info('Skip test: Vboot Context storage media is "%s"', media) return sysfs_entry = None for name in os.listdir(self.sys_vbootcontext_path): if name.startswith('cros-ec-vbc'): sysfs_entry = os.path.join(self.sys_vbootcontext_path, name, 'vboot_context') break else: raise error.TestFail('Could not find sysfs entry under %s', self.sys_vbootcontext_path) # Retrieve Vboot Context vboot_context = utils.system_output('mosys nvram vboot read').strip() try: # Test read vc_expect = vboot_context vc_got = utils.read_file(sysfs_entry).strip('\n\x00') if vc_got != vc_expect: raise error.TestFail('Could not read Vboot Context: ' 'Expect "%s" but got "%s"' % (vc_expect, vc_got)) # Test write of a random hex string vc_expect = ''.join(random.choice('0123456789abcdef') for _ in xrange(32)) utils.open_write_close(sysfs_entry, vc_expect) vc_got = utils.system_output('mosys nvram vboot read').strip() if vc_got != vc_expect: raise error.TestFail('Could not write Vboot Context: ' 'Expect "%s" but got "%s"' % (vc_expect, vc_got)) finally: # Restore Vboot Context utils.run('mosys nvram vboot write "%s"' % vboot_context)
def _read_dmesg(self, filename): """Put the contents of 'dmesg -r' into the given file. @param filename: The file to write 'dmesg -r' into. """ f = open(filename, 'w') self._client.run('dmesg -r', stdout_tee=f) f.close() return utils.read_file(filename)
def _check_cpu_type(self): cpuinfo = utils.read_file('/proc/cpuinfo') # Look for ARM match = re.search(r'ARMv[4-7]', cpuinfo) if match: return True logging.info(cpuinfo) return False
def _get_system_times(self): """Get the CPU information from the system times. @return An list with CPU usage of different processes. """ proc_stat = utils.read_file('/proc/stat') for line in proc_stat.split('\n'): if line.startswith('cpu '): times = line[4:].strip().split(' ') times = [int(jiffies) for jiffies in times] return dict(zip(self.cpu_fields, times))
def _read_file(self, filename): """ Return the contents of the given file or fail. @param filename Full path to the file to be read """ try: content = utils.read_file(filename) except Exception as err: raise error.TestFail('sysfs file problem: %s' % err) return content
def _read_kallsyms(self, filename): """Fetch /proc/kallsyms from client and return lines in the file @param filename: The file to write 'cat /proc/kallsyms' into. """ f = open(filename, 'w') self._client.run('cat /proc/kallsyms', stdout_tee=f) f.close() return utils.read_file(filename)
def _get_perf_count(self): debugfs_file_path = '/sys/kernel/debug/dri/0/i915_edp_psr_status' if not os.path.exists(debugfs_file_path): raise error.TestFail('debugfs entry for PSR status is missing.') psr_status = utils.read_file(debugfs_file_path).splitlines() if len(psr_status) != 4: raise error.TestFail('Incorrect number of lines in %s.' % debugfs_file_path) perf_count_chunks = psr_status[3].split() if len(perf_count_chunks) != 2: raise error.TestFail('Unknown format in %s.' % debugfs_file_path) return int(perf_count_chunks[1])
def _kill_running_sender(self): """Kill the the crash_sender process if running. We use the PID file to find the process ID, then kill it with signal 9. """ if not os.path.exists(self._CRASH_SENDER_RUN_PATH): return running_pid = int(utils.read_file(self._CRASH_SENDER_RUN_PATH)) logging.warning('Detected running crash sender (%d), killing', running_pid) utils.system('kill -9 %d' % running_pid) os.remove(self._CRASH_SENDER_RUN_PATH)
def _read_sysfs_accel_file(cls, fullpath): """ Read the contents of the given accel sysfs file or fail @param fullpath Name of the file within the accel sysfs interface directory """ try: content = utils.read_file(fullpath) except Exception as err: raise error.TestFail('sysfs file problem: %s' % err) return content
def pairgen_as_data(): """Generates keypair, returns keys as data. Generates a fresh owner keypair and then passes back the PEM-encoded private key and the DER-encoded public key. @return: (PEM-encoded private key, DER-encoded public key) """ (keypath, certpath) = pairgen() keyfile = scoped_tempfile(keypath) certfile = scoped_tempfile(certpath) return (utils.read_file(keyfile.name), cert_extract_pubkey_der(certfile.name))
def gather_stats(self, results): per_cpu = os.path.join(self.tracing_dir, 'per_cpu') for cpu in os.listdir(per_cpu): cpu_stats = os.path.join(per_cpu, cpu, 'stats') for line in utils.read_file(cpu_stats).splitlines(): key, val = line.split(': ') key = key.replace(' ', '_') val = int(val) cpu_key = '%s_%s' % (cpu, key) total_key = 'total_' + key results[cpu_key] = val results[total_key] = (results.get(total_key, 0) + results[cpu_key])
def _read_sysfs_accel_file(self, filename): """ Read the contents of the given accel sysfs file or fail @param filename Name of the file within the accel sysfs interface directory """ fullpath = os.path.join(self.sysfs_accel_path, filename) try: content = utils.read_file(fullpath) except Exception as err: raise error.TestFail('sysfs file problem: %s' % err) return content
def _check_hardware_info(self, result): # Get board name lsb_release = utils.read_file('/etc/lsb-release') board_match = re.search(r'CHROMEOS_RELEASE_BOARD=(.*)', lsb_release) if not ('Board: %s' % board_match.group(1)) in result['output']: raise error.TestFail('Missing board name %s in output' % board_match.group(1)) # Get hwid with os.popen("crossystem hwid 2>/dev/null", "r") as hwid_proc: hwclass = hwid_proc.read() if not hwclass: hwclass = 'undefined' if not ('HWClass: %s' % hwclass) in result['output']: raise error.TestFail('Expected hwclass %s in output' % hwclass)