def test_simple_functionality(self): data = "\n\nwhee\n" test_file = mock.SaveDataAfterCloseStringIO() utils.open.expect_call("filename", "w").and_return(test_file) utils.open_write_close("filename", data) self.god.check_playback() self.assertEqual(data, test_file.final_data)
def _execute_code(self, code_file, namespace, protect=True): """ Execute code using a copy of namespace as a server control script. Unless protect_namespace is explicitly set to False, the dict will not be modified. Args: code_file: The filename of the control file to execute. namespace: A dict containing names to make available during execution. protect: Boolean. If True (the default) a copy of the namespace dict is used during execution to prevent the code from modifying its contents outside of this function. If False the raw dict is passed in and modifications will be allowed. """ if protect: namespace = namespace.copy() self._fill_server_control_namespace(namespace, protect=protect) # TODO: Simplify and get rid of the special cases for only 1 machine. if len(self.machines) > 1: machines_text = "\n".join(self.machines) + "\n" # Only rewrite the file if it does not match our machine list. try: machines_f = open(MACHINES_FILENAME, "r") existing_machines_text = machines_f.read() machines_f.close() except EnvironmentError: existing_machines_text = None if machines_text != existing_machines_text: utils.open_write_close(MACHINES_FILENAME, machines_text) execfile(code_file, namespace, namespace)
def configure_crash_handler(self): """ Configure the crash handler by: * Setting up core size to unlimited * Putting an appropriate crash handler on /proc/sys/kernel/core_pattern * Create files that the crash handler will use to figure which tests are active at a given moment The crash handler will pick up the core file and write it to self.debugdir, and perform analysis on it to generate a report. The program also outputs some results to syslog. If multiple tests are running, an attempt to verify if we still have the old PID on the system process table to determine whether it is a parent of the current test execution. If we can't determine it, the core file and the report file will be copied to all test debug dirs. """ self.crash_handling_enabled = False # make sure this script will run with a new enough python to work cmd = ("python -c 'import sys; " "print sys.version_info[0], sys.version_info[1]'") result = utils.run(cmd, ignore_status=True, verbose=False) if result.exit_status != 0: logging.warning('System python is too old, crash handling disabled') return major, minor = [int(x) for x in result.stdout.strip().split()] if (major, minor) < (2, 4): logging.warning('System python is too old, crash handling disabled') return self.pattern_file = '/proc/sys/kernel/core_pattern' try: # Enable core dumps resource.setrlimit(resource.RLIMIT_CORE, (-1, -1)) # Trying to backup core pattern and register our script self.core_pattern_backup = open(self.pattern_file, 'r').read() pattern_file = open(self.pattern_file, 'w') tools_dir = os.path.join(self.autodir, 'tools') crash_handler_path = os.path.join(tools_dir, 'crash_handler.py') pattern_file.write('|' + crash_handler_path + ' %p %t %u %s %h %e') # Writing the files that the crash handler is going to use self.debugdir_tmp_file = ('/tmp/autotest_results_dir.%s' % os.getpid()) utils.open_write_close(self.debugdir_tmp_file, self.debugdir + "\n") except Exception, e: logging.warning('Crash handling disabled: %s', e)
def crash_handler_report(self): """ If core dumps are found on the debugdir after the execution of the test, let the user know. """ if self.crash_handling_enabled: # Remove the debugdir info file os.unlink(self.debugdir_tmp_file) # Restore the core pattern backup try: utils.open_write_close(self.pattern_file, self.core_pattern_backup) except EnvironmentError: pass # Let the user know if core dumps were generated during the test core_dirs = glob.glob('%s/crash.*' % self.debugdir) if core_dirs: logging.warning('Programs crashed during test execution') for dir in core_dirs: logging.warning('Please verify %s for more info', dir)
def configure_crash_handler(self): """ Configure the crash handler by: * Setting up core size to unlimited * Putting an appropriate crash handler on /proc/sys/kernel/core_pattern * Create files that the crash handler will use to figure which tests are active at a given moment The crash handler will pick up the core file and write it to self.debugdir, and perform analysis on it to generate a report. The program also outputs some results to syslog. If multiple tests are running, an attempt to verify if we still have the old PID on the system process table to determine whether it is a parent of the current test execution. If we can't determine it, the core file and the report file will be copied to all test debug dirs. """ self.crash_handling_enabled = False self.pattern_file = '/proc/sys/kernel/core_pattern' try: # Enable core dumps resource.setrlimit(resource.RLIMIT_CORE, (-1, -1)) # Trying to backup core pattern and register our script self.core_pattern_backup = open(self.pattern_file, 'r').read() pattern_file = open(self.pattern_file, 'w') tools_dir = os.path.join(self.autodir, 'tools') crash_handler_path = os.path.join(tools_dir, 'crash_handler.py') pattern_file.write('|' + crash_handler_path + ' %p %t %u %s %h %e') # Writing the files that the crash handler is going to use self.debugdir_tmp_file = ('/tmp/autotest_results_dir.%s' % os.getpid()) utils.open_write_close(self.debugdir_tmp_file, self.debugdir + "\n") except Exception, e: logging.warning('Crash handling disabled: %s', e)
def run(self, cleanup=False, install_before=False, install_after=False, collect_crashdumps=True, namespace={}, control=None, control_file_dir=None, only_collect_crashinfo=False): # for a normal job, make sure the uncollected logs file exists # for a crashinfo-only run it should already exist, bail out otherwise created_uncollected_logs = False if self.resultdir and not os.path.exists(self._uncollected_log_file): if only_collect_crashinfo: # if this is a crashinfo-only run, and there were no existing # uncollected logs, just bail out early logging.info("No existing uncollected logs, " "skipping crashinfo collection") return else: log_file = open(self._uncollected_log_file, "w") pickle.dump([], log_file) log_file.close() created_uncollected_logs = True # use a copy so changes don't affect the original dictionary namespace = namespace.copy() machines = self.machines if control is None: if self.control is None: control = '' else: control = self._load_control_file(self.control) if control_file_dir is None: control_file_dir = self.resultdir self.aborted = False namespace['machines'] = machines namespace['args'] = self.args namespace['job'] = self namespace['ssh_user'] = self._ssh_user namespace['ssh_port'] = self._ssh_port namespace['ssh_pass'] = self._ssh_pass test_start_time = int(time.time()) if self.resultdir: os.chdir(self.resultdir) # touch status.log so that the parser knows a job is running here open(self.get_status_log_path(), 'a').close() self.enable_external_logging() collect_crashinfo = True temp_control_file_dir = None try: try: if install_before and machines: self._execute_code(INSTALL_CONTROL_FILE, namespace) if only_collect_crashinfo: return # determine the dir to write the control files to cfd_specified = (control_file_dir and control_file_dir is not self._USE_TEMP_DIR) if cfd_specified: temp_control_file_dir = None else: temp_control_file_dir = tempfile.mkdtemp( suffix='temp_control_file_dir') control_file_dir = temp_control_file_dir server_control_file = os.path.join(control_file_dir, self._control_filename) client_control_file = os.path.join(control_file_dir, CLIENT_CONTROL_FILENAME) if self._client: namespace['control'] = control utils.open_write_close(client_control_file, control) shutil.copyfile(CLIENT_WRAPPER_CONTROL_FILE, server_control_file) else: utils.open_write_close(server_control_file, control) logging.info("Processing control file") self._execute_code(server_control_file, namespace) logging.info("Finished processing control file") # no error occured, so we don't need to collect crashinfo collect_crashinfo = False except Exception, e: try: logging.exception( 'Exception escaped control file, job aborting:') self.record('INFO', None, None, str(e), {'job_abort_reason': str(e)}) except: pass # don't let logging exceptions here interfere raise finally: if temp_control_file_dir: # Clean up temp directory used for copies of the control files try: shutil.rmtree(temp_control_file_dir) except Exception, e: logging.warn('Could not remove temp directory %s: %s', temp_control_file_dir, e) if machines and (collect_crashdumps or collect_crashinfo): namespace['test_start_time'] = test_start_time if collect_crashinfo: # includes crashdumps self._execute_code(CRASHINFO_CONTROL_FILE, namespace) else: self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace) if self._uncollected_log_file and created_uncollected_logs: os.remove(self._uncollected_log_file) self.disable_external_logging() if cleanup and machines: self._execute_code(CLEANUP_CONTROL_FILE, namespace) if install_after and machines: self._execute_code(INSTALL_CONTROL_FILE, namespace)
def run( self, cleanup=False, install_before=False, install_after=False, collect_crashdumps=True, namespace={}, control=None, control_file_dir=None, only_collect_crashinfo=False, ): # for a normal job, make sure the uncollected logs file exists # for a crashinfo-only run it should already exist, bail out otherwise created_uncollected_logs = False if self.resultdir and not os.path.exists(self._uncollected_log_file): if only_collect_crashinfo: # if this is a crashinfo-only run, and there were no existing # uncollected logs, just bail out early logging.info("No existing uncollected logs, " "skipping crashinfo collection") return else: log_file = open(self._uncollected_log_file, "w") pickle.dump([], log_file) log_file.close() created_uncollected_logs = True # use a copy so changes don't affect the original dictionary namespace = namespace.copy() machines = self.machines if control is None: if self.control is None: control = "" else: control = self._load_control_file(self.control) if control_file_dir is None: control_file_dir = self.resultdir self.aborted = False namespace["machines"] = machines namespace["args"] = self.args namespace["job"] = self namespace["ssh_user"] = self._ssh_user namespace["ssh_port"] = self._ssh_port namespace["ssh_pass"] = self._ssh_pass test_start_time = int(time.time()) if self.resultdir: os.chdir(self.resultdir) # touch status.log so that the parser knows a job is running here open(self.get_status_log_path(), "a").close() self.enable_external_logging() collect_crashinfo = True temp_control_file_dir = None try: try: if install_before and machines: self._execute_code(INSTALL_CONTROL_FILE, namespace) if only_collect_crashinfo: return # determine the dir to write the control files to cfd_specified = control_file_dir and control_file_dir is not self._USE_TEMP_DIR if cfd_specified: temp_control_file_dir = None else: temp_control_file_dir = tempfile.mkdtemp(suffix="temp_control_file_dir") control_file_dir = temp_control_file_dir server_control_file = os.path.join(control_file_dir, self._control_filename) client_control_file = os.path.join(control_file_dir, CLIENT_CONTROL_FILENAME) if self._client: namespace["control"] = control utils.open_write_close(client_control_file, control) shutil.copyfile(CLIENT_WRAPPER_CONTROL_FILE, server_control_file) else: utils.open_write_close(server_control_file, control) logging.info("Processing control file") self._execute_code(server_control_file, namespace) logging.info("Finished processing control file") # no error occured, so we don't need to collect crashinfo collect_crashinfo = False except Exception, e: try: logging.exception("Exception escaped control file, job aborting:") self.record("INFO", None, None, str(e), {"job_abort_reason": str(e)}) except: pass # don't let logging exceptions here interfere raise finally: if temp_control_file_dir: # Clean up temp directory used for copies of the control files try: shutil.rmtree(temp_control_file_dir) except Exception, e: logging.warn("Could not remove temp directory %s: %s", temp_control_file_dir, e) if machines and (collect_crashdumps or collect_crashinfo): namespace["test_start_time"] = test_start_time if collect_crashinfo: # includes crashdumps self._execute_code(CRASHINFO_CONTROL_FILE, namespace) else: self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace) if self._uncollected_log_file and created_uncollected_logs: os.remove(self._uncollected_log_file) self.disable_external_logging() if cleanup and machines: self._execute_code(CLEANUP_CONTROL_FILE, namespace) if install_after and machines: self._execute_code(INSTALL_CONTROL_FILE, namespace)
def __write_file(self, file_name, data): path = os.path.join(self.__base_path, file_name) try: utils.open_write_close(path, data) except IOError as e: logging.warn('write of %s failed: %s', path, str(e))
def set_simple_switch(value, filename): """ Sets simple switch '1' to the file. """ orig = common_lib_utils.read_file(filename).strip() atexit.register(common_lib_utils.open_write_close, filename, orig) common_lib_utils.open_write_close(filename, value)
def __write_file(self, file_name, data): path = os.path.join(self.__base_path, file_name) utils.open_write_close(path, data)
def cleanup(self): # Restore the original setting if system has CPUQuiet feature if os.path.exists(SYSFS_CPUQUIET_ENABLE): utils.open_write_close(SYSFS_CPUQUIET_ENABLE, self.is_cpuquiet_enabled)