def test_enable(self): import faulthandler, sys faulthandler.enable() assert faulthandler.is_enabled() is True faulthandler.enable(file=sys.stderr, all_threads=True) faulthandler.disable() assert faulthandler.is_enabled() is False
def test_is_enabled(self): was_enabled = faulthandler.is_enabled() try: faulthandler.enable() self.assertTrue(faulthandler.is_enabled()) faulthandler.disable() self.assertFalse(faulthandler.is_enabled()) finally: if was_enabled: faulthandler.enable() else: faulthandler.disable()
def initFaulthandler(sigusr1_chain=False): """ Maybe installs signal handlers, SIGUSR1 and SIGUSR2 and others. If no signals handlers are installed yet for SIGUSR1/2, we try to install our own Python handler. This also tries to install the handler from the fauldhandler module, esp for SIGSEGV and others. :param bool sigusr1_chain: whether the default SIGUSR1 handler should also be called. """ # In case that sigusr1_chain, we expect that there is already some handler # for SIGUSR1, and then this will not overwrite this handler. if install_signal_handler_if_default(signal.SIGUSR1): # There is already some handler or we installed our own handler now, # so in any case, it's save that we chain then handler. sigusr1_chain = True # Why not also SIGUSR2... SGE can also send this signal. install_signal_handler_if_default(signal.SIGUSR2) try: import faulthandler except ImportError as e: print("faulthandler import error. %s" % e) else: # Only enable if not yet enabled -- otherwise, leave it in its current state. if not faulthandler.is_enabled(): faulthandler.enable() if os.name != 'nt': faulthandler.register(signal.SIGUSR1, all_threads=True, chain=sigusr1_chain) from Util import to_bool if os.environ.get("DEBUG_SIGNAL_HANDLER") and to_bool( os.environ.get("DEBUG_SIGNAL_HANDLER")): installLibSigSegfault() installNativeSignalHandler()
def register(): from mixer import bl_panels from mixer import bl_operators from mixer import bl_properties, bl_preferences from mixer.blender_data import debug_addon from mixer.log_utils import Formatter, get_log_file if len(logger.handlers) == 0: logger.setLevel(logging.WARNING) formatter = Formatter("{asctime} {levelname[0]} {name:<36} - {message:<80}", style="{") handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) handler = logging.FileHandler(get_log_file()) handler.setFormatter(formatter) logger.addHandler(handler) if not faulthandler.is_enabled(): faulthandler.enable() global _disable_fault_handler _disable_fault_handler = True debug_addon.register() bl_preferences.register() bl_properties.register() bl_panels.register() bl_operators.register() atexit.register(cleanup)
def SIGs_enabled(*sigs): '''Return the signals handled by the C{faulthandler}. @return: An L{Adict} with the C{SIG*} name and value the currently handled signals, if any. ''' return _SIGdict(sigs, _SIGnals if _fh.is_enabled() else ())
def init_faulthandler(sigusr1_chain=False): """ Maybe installs signal handlers, SIGUSR1 and SIGUSR2 and others. If no signals handlers are installed yet for SIGUSR1/2, we try to install our own Python handler. This also tries to install the handler from the fauldhandler module, esp for SIGSEGV and others. :param bool sigusr1_chain: whether the default SIGUSR1 handler should also be called. """ from Util import to_bool # Enable libSigSegfault first, so that we can have both, # because faulthandler will also call the original sig handler. if os.environ.get("DEBUG_SIGNAL_HANDLER") and to_bool(os.environ.get("DEBUG_SIGNAL_HANDLER")): install_lib_sig_segfault() install_native_signal_handler() if sys.platform != 'win32': # In case that sigusr1_chain, we expect that there is already some handler # for SIGUSR1, and then this will not overwrite this handler. if install_signal_handler_if_default(signal.SIGUSR1): # There is already some handler or we installed our own handler now, # so in any case, it's save that we chain then handler. sigusr1_chain = True # Why not also SIGUSR2... SGE can also send this signal. install_signal_handler_if_default(signal.SIGUSR2) try: import faulthandler except ImportError as e: print("faulthandler import error. %s" % e) else: # Only enable if not yet enabled -- otherwise, leave it in its current state. if not faulthandler.is_enabled(): faulthandler.enable() if sys.platform != 'win32': faulthandler.register(signal.SIGUSR1, all_threads=True, chain=sigusr1_chain)
def __init__( self, pattern=None, top_level=None, verbosity=1, interactive=True, failfast=False, keepdb=False, reverse=False, debug_mode=False, debug_sql=False, parallel=0, tags=None, exclude_tags=None, test_name_patterns=None, pdb=False, buffer=False, enable_faulthandler=True, timing=False, shuffle=False, logger=None, **kwargs, ): self.pattern = pattern self.top_level = top_level self.verbosity = verbosity self.interactive = interactive self.failfast = failfast self.keepdb = keepdb self.reverse = reverse self.debug_mode = debug_mode self.debug_sql = debug_sql self.parallel = parallel self.tags = set(tags or []) self.exclude_tags = set(exclude_tags or []) if not faulthandler.is_enabled() and enable_faulthandler: try: faulthandler.enable(file=sys.stderr.fileno()) except (AttributeError, io.UnsupportedOperation): faulthandler.enable(file=sys.__stderr__.fileno()) self.pdb = pdb if self.pdb and self.parallel > 1: raise ValueError( "You cannot use --pdb with parallel tests; pass --parallel=1 to use it." ) self.buffer = buffer self.test_name_patterns = None self.time_keeper = TimeKeeper() if timing else NullTimeKeeper() if test_name_patterns: # unittest does not export the _convert_select_pattern function # that converts command-line arguments to patterns. self.test_name_patterns = { pattern if "*" in pattern else "*%s*" % pattern for pattern in test_name_patterns } self.shuffle = shuffle self._shuffler = None self.logger = logger
def pytest_configure(config: Config) -> None: import faulthandler stderr_fd_copy = os.dup(get_stderr_fileno()) config._store[fault_handler_stderr_key] = open(stderr_fd_copy, "w") config._store[ fault_handler_originally_enabled_key] = faulthandler.is_enabled() faulthandler.enable(file=config._store[fault_handler_stderr_key])
def register(): from mixer import bl_panels from mixer import bl_operators from mixer import bl_properties, bl_preferences from mixer import blender_data from mixer.blender_data import debug_addon from mixer.log_utils import Formatter, get_log_file from mixer import icons from mixer import ui from mixer.utils import utils_ui_operators from mixer import vrtist print("\n ------ UAS: Loading Mixer Add-on ------- ") if len(logger.handlers) == 0: # Add the pid to the log. Just enough for the tests, that merge the logs and need to distinguish # two Blender on the same machine. Pids might collide during regular networked operation old_factory = logging.getLogRecordFactory() pid = str(os.getpid()) def record_factory(*args, **kwargs): record = old_factory(*args, **kwargs) record.custom_attribute = pid return record logging.setLogRecordFactory(record_factory) logger.setLevel(logging.WARNING) formatter = Formatter( "{asctime} {custom_attribute:<6} {levelname[0]} {name:<36} - {message:<80}", style="{") handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) handler = logging.FileHandler(get_log_file()) handler.setFormatter(formatter) logger.addHandler(handler) if not faulthandler.is_enabled(): faulthandler.enable() global _disable_fault_handler _disable_fault_handler = True debug_addon.register() icons.register() bl_preferences.register() bl_properties.register() bl_panels.register() bl_operators.register() utils_ui_operators.register() ui.register() blender_data.register() vrtist.register() atexit.register(cleanup)
def enable_faulthandler(): # Don't clobber someone elses faulthandler settings if not faulthandler.is_enabled(): # SIGUSR2 not available on windows # The attached STDERR might not support what faulthandler wants with contextlib.suppress(AttributeError, io.UnsupportedOperation): faulthandler.enable() faulthandler.register(signal.SIGUSR2, all_threads=True) yield
def pytest_configure(config): import faulthandler # avoid trying to dup sys.stderr if faulthandler is already enabled if faulthandler.is_enabled(): return stderr_fd_copy = os.dup(_get_stderr_fileno()) config.fault_handler_stderr = os.fdopen(stderr_fd_copy, "w") faulthandler.enable(file=config.fault_handler_stderr)
def test_is_enabled(self): orig_stderr = sys.stderr try: # regrtest may replace sys.stderr by io.StringIO object, but # faulthandler.enable() requires that sys.stderr has a fileno() # method sys.stderr = sys.__stderr__ was_enabled = faulthandler.is_enabled() try: faulthandler.enable() self.assertTrue(faulthandler.is_enabled()) faulthandler.disable() self.assertFalse(faulthandler.is_enabled()) finally: if was_enabled: faulthandler.enable() else: faulthandler.disable() finally: sys.stderr = orig_stderr
def reset_log_location(cls, new_log_location: str) -> None: """Re-acquire file handles to error logs based in the new location. Class state: - Overwrites `cls._log_dir`, `cls._pid_specific_error_fileobj`, and `cls._shared_error_fileobj`. OS state: - May create a new directory. - Overwrites signal handlers for many fatal and non-fatal signals (but not SIGUSR2). :raises: :class:`ExceptionSink.ExceptionSinkError` if the directory does not exist or is not writable. """ # We could no-op here if the log locations are the same, but there's no reason not to have the # additional safety of re-acquiring file descriptors each time (and erroring out early if the # location is no longer writable). try: safe_mkdir(new_log_location) except Exception as e: raise cls.ExceptionSinkError( "The provided log location path at '{}' is not writable or could not be created: {}.".format( new_log_location, str(e) ), e, ) pid = os.getpid() pid_specific_log_path = cls.exceptions_log_path(for_pid=pid, in_dir=new_log_location) shared_log_path = cls.exceptions_log_path(in_dir=new_log_location) assert pid_specific_log_path != shared_log_path try: pid_specific_error_stream = safe_open(pid_specific_log_path, mode="w") shared_error_stream = safe_open(shared_log_path, mode="a") except Exception as e: raise cls.ExceptionSinkError( "Error opening fatal error log streams for log location '{}': {}".format( new_log_location, str(e) ) ) # NB: mutate process-global state! if faulthandler.is_enabled(): logger.debug("re-enabling faulthandler") # Call Py_CLEAR() on the previous error stream: # https://github.com/vstinner/faulthandler/blob/master/faulthandler.c faulthandler.disable() # Send a stacktrace to this file if interrupted by a fatal error. faulthandler.enable(file=pid_specific_error_stream, all_threads=True) # NB: mutate the class variables! cls._log_dir = new_log_location cls._pid_specific_error_fileobj = pid_specific_error_stream cls._shared_error_fileobj = shared_error_stream
def is_enabled(sig=None): '''Check whether the C{faulthandler} is enabled. @keyword sig: Check whether C{faulthandler} includes this signal (C{signal.SIG*}). @return: C{True}, C{faulhandler} is currently enabled, C{False} otherwise. ''' r = False if _fh.is_enabled(): r = (sig is None) or (sig in _SIGnals) return r
def exit(self, signum, frame): self.reload_event.set() self.reload = False self._log.info("Zeit zum Begraben gehn...\n PluginManager wird heruntergefahren...") self.pm.shutdown() self._log.info("MQTT Thread wird gestoppt...") self.mqtt_client.loop_stop() self._log.info("Config wird entladen...") self.config.save() self.config.stop() self._log.info("Beende mich...") if faulthandler.is_enabled(): faulthandler.disable() if self.faultFile is not None and not self.faultFile.closed: self.faultFile.close() exit(0)
def pytest_configure(config: Config) -> None: import faulthandler if not faulthandler.is_enabled(): # faulthhandler is not enabled, so install plugin that does the actual work # of enabling faulthandler before each test executes. config.pluginmanager.register(FaultHandlerHooks(), "faulthandler-hooks") else: # Do not handle dumping to stderr if faulthandler is already enabled, so warn # users that the option is being ignored. timeout = FaultHandlerHooks.get_timeout_config_value(config) if timeout > 0: config.issue_config_time_warning( pytest.PytestConfigWarning( "faulthandler module enabled before pytest configuration step, " "'faulthandler_timeout' option ignored"), stacklevel=2, )
def reset_log_location(cls, new_log_location): """Re-acquire file handles to error logs based in the new location. Class state: - Overwrites `cls._log_dir`, `cls._pid_specific_error_fileobj`, and `cls._shared_error_fileobj`. OS state: - May create a new directory. - Overwrites signal handlers for many fatal and non-fatal signals. :raises: :class:`ExceptionSink.ExceptionSinkError` if the directory does not exist or is not writable. """ # We could no-op here if the log locations are the same, but there's no reason not to have the # additional safety of re-acquiring file descriptors each time (and erroring out early if the # location is no longer writable). # Create the directory if possible, or raise if not writable. cls._check_or_create_new_destination(new_log_location) pid_specific_error_stream, shared_error_stream = cls._recapture_fatal_error_log_streams( new_log_location) # NB: mutate process-global state! if faulthandler.is_enabled(): logger.debug('re-enabling faulthandler') # Call Py_CLEAR() on the previous error stream: # https://github.com/vstinner/faulthandler/blob/master/faulthandler.c faulthandler.disable() # Send a stacktrace to this file if interrupted by a fatal error. faulthandler.enable(file=pid_specific_error_stream, all_threads=True) # Log a timestamped exception and exit gracefully on non-fatal signals. for signum in cls.all_gracefully_handled_signals: signal.signal(signum, cls.handle_signal_gracefully) signal.siginterrupt(signum, False) # NB: mutate the class variables! cls._log_dir = new_log_location cls._pid_specific_error_fileobj = pid_specific_error_stream cls._shared_error_fileobj = shared_error_stream
def test_open_excel(self): import pywintypes # not using context manager because we call .quit manually wb1 = open_excel(visible=False) app1 = wb1.app # close workbook but leave app instance open (anything using wb1 will fail now) wb1.close() # disable faulthandler to avoid annoying "Windows fatal exception" messages in the console # See https://github.com/pywinauto/pywinauto/issues/858 # and https://stackoverflow.com/questions/57523762/pytest-windows-fatal-exception-code-0x8001010d faulthandler_enabled = faulthandler.is_enabled() if faulthandler_enabled: faulthandler.disable() with pytest.raises(pywintypes.com_error): wb1.sheet_names() if faulthandler_enabled: faulthandler.enable() wb2 = open_excel(visible=False) app2 = wb2.app assert app1 == app2 == xw_excel.global_app # this effectively close all workbooks but leaves the instance intact (this is probably due to us keeping a # reference to it). app1.quit() # anything using wb2 will fail if faulthandler_enabled: faulthandler.disable() with pytest.raises(pywintypes.com_error): wb2.sheet_names() if faulthandler_enabled: faulthandler.enable() # in any case, this should work with open_excel(visible=False) as wb: wb['test'] = 'content'
def reset_log_location(cls, new_log_location): """Re-acquire file handles to error logs based in the new location. Class state: - Overwrites `cls._log_dir`, `cls._pid_specific_error_fileobj`, and `cls._shared_error_fileobj`. OS state: - May create a new directory. - Overwrites signal handlers for many fatal and non-fatal signals (but not SIGUSR2). :raises: :class:`ExceptionSink.ExceptionSinkError` if the directory does not exist or is not writable. """ # We could no-op here if the log locations are the same, but there's no reason not to have the # additional safety of re-acquiring file descriptors each time (and erroring out early if the # location is no longer writable). # Create the directory if possible, or raise if not writable. cls._check_or_create_new_destination(new_log_location) pid_specific_error_stream, shared_error_stream = cls._recapture_fatal_error_log_streams( new_log_location) # NB: mutate process-global state! if faulthandler.is_enabled(): logger.debug('re-enabling faulthandler') # Call Py_CLEAR() on the previous error stream: # https://github.com/vstinner/faulthandler/blob/master/faulthandler.c faulthandler.disable() # Send a stacktrace to this file if interrupted by a fatal error. faulthandler.enable(file=pid_specific_error_stream, all_threads=True) # NB: mutate the class variables! cls._log_dir = new_log_location cls._pid_specific_error_fileobj = pid_specific_error_stream cls._shared_error_fileobj = shared_error_stream
def testFaultHandler(pytestconfig): ''' Make sure that faulthandler library is enabled during tests ''' assert faulthandler.is_enabled() assert pytestconfig.fault_handler_file is None
better_exchook.better_exchook(exc_type, exc_obj, exc_tb) sys.excepthook = excepthook def initFaulthandler(sigusr1_chain=False): """ :param bool sigusr1_chain: whether the default SIGUSR1 handler should also be called. """ try: import faulthandler except ImportError, e: print "faulthandler import error. %s" % e return # Only enable if not yet enabled -- otherwise, leave it in its current state. if not faulthandler.is_enabled(): faulthandler.enable() if os.name != 'nt': import signal faulthandler.register(signal.SIGUSR1, all_threads=True, chain=sigusr1_chain) @auto_exclude_all_new_threads def initIPythonKernel(): # You can remotely connect to this kernel. See the output on stdout. try: import IPython.kernel.zmq.ipkernel from IPython.kernel.zmq.ipkernel import Kernel from IPython.kernel.zmq.heartbeat import Heartbeat from IPython.kernel.zmq.session import Session from IPython.kernel import write_connection_file
import faulthandler if not faulthandler.is_enabled(): faulthandler.enable()
if stdlogfile != None: stdlog = open(stdlogfile, 'w') sys.stdout = stdlog sys.stderr = stdlog try: logging.config.fileConfig('logging.config') except Exception as e: logging.basicConfig(level=logging.WARN) logging.error('Error while reading logging config: ' + str(e)) try: faulthandler.enable() except Exception as e: logging.error('Error calling Faulthandle.enable(): ' + str(e)) if (faulthandler.is_enabled()): logging.warn('Faulthandler is enabled') #faulthandler.dump_traceback_later(5) else: logging.error('Faulthandler is NOT enabled') if 'nt' in os.name: from ctypes import windll windll.shell32.SetCurrentProcessExplicitAppUserModelID(u'matzman666.pypipboyapp') pipboyApp = PyPipboyApp(sys.argv, inifile) pipboyApp.run() except: traceback.print_exc(file=sys.stdout) time.sleep(1) # Just to make sure that the error is correctly written into the log file
if stdlogfile != None: stdlog = open(stdlogfile, 'w') sys.stdout = stdlog sys.stderr = stdlog try: logging.config.fileConfig('logging.config') except Exception as e: logging.basicConfig(level=logging.WARN) logging.error('Error while reading logging config: ' + str(e)) try: faulthandler.enable() except Exception as e: logging.error('Error calling Faulthandle.enable(): ' + str(e)) if (faulthandler.is_enabled()): logging.warn('Faulthandler is enabled') #faulthandler.dump_traceback_later(5) else: logging.error('Faulthandler is NOT enabled') if 'nt' in os.name: from ctypes import windll windll.shell32.SetCurrentProcessExplicitAppUserModelID( u'matzman666.pypipboyapp') pipboyApp = PyPipboyApp(sys.argv, inifile) pipboyApp.run() except: traceback.print_exc(file=sys.stdout) time.sleep( 1
def sigsegv_handler(): was_enabled = faulthandler.is_enabled() faulthandler.enable() yield if not was_enabled: faulthandler.disable()
def __init__(self, cores=None, version=None, port=None, host='localhost'): # Make sure this is the one and only client. if jpype.isJVMStarted(): error = 'Only one client can be instantiated at a time.' logger.error(error) raise NotImplementedError(error) # Discover Comsol back-end. backend = discovery.backend(version) # Instruct Comsol to limit number of processor cores to use. if cores: os.environ['COMSOL_NUM_THREADS'] = str(cores) # On Windows, turn off fault handlers if enabled. # Without this, pyTest will crash when starting the Java VM. # See "Errors reported by Python fault handler" in JPype docs. # The problem may be the SIGSEGV signal, see JPype issue #886. if platform.system() == 'Windows' and faulthandler.is_enabled(): logger.debug('Turning off Python fault handlers.') faulthandler.disable() # Start the Java virtual machine. logger.debug(f'JPype version is {jpype.__version__}.') logger.info('Starting Java virtual machine.') java_args = [str(backend['jvm'])] if option('classkit'): java_args += ['-Dcs.ckl'] logger.debug(f'JVM arguments: {java_args}') jpype.startJVM(*java_args, classpath=str(backend['root'] / 'plugins' / '*'), convertStrings=False) logger.info('Java virtual machine has started.') # Initialize a stand-alone client if no server port given. from com.comsol.model.util import ModelUtil as java if port is None: logger.info('Initializing stand-alone client.') check_environment(backend) graphics = True java.initStandalone(graphics) logger.info('Stand-alone client initialized.') # Otherwise skip stand-alone initialization and connect to server. else: logger.info(f'Connecting to server "{host}" at port {port}.') java.connect(host, port) # Log number of used processor cores as reported by Comsol instance. cores = java.getPreference('cluster.processor.numberofprocessors') cores = int(str(cores)) noun = 'core' if cores == 1 else 'cores' logger.info(f'Running on {cores} processor {noun}.') # Load Comsol settings from disk so as to not just use defaults. java.loadPreferences() # Override certain settings not useful in headless operation. java.setPreference('updates.update.check', 'off') java.setPreference('tempfiles.saving.warnifoverwriteolder', 'off') java.setPreference('tempfiles.recovery.autosave', 'off') try: # Preference not defined on certain systems, see issue #39. java.setPreference('tempfiles.recovery.checkforrecoveries', 'off') except Exception: logger.debug('Could not turn off check for recovery files.') java.setPreference('tempfiles.saving.optimize', 'filesize') # Save useful information in instance attributes. self.version = backend['name'] self.cores = cores self.host = host self.port = port self.java = java