def start_gui_process(pipe=None):
	global config

	# HACK TO WORK AROUND CRASH ON CONSOLE OUTPUT WITH BBFREEZE GUI_ONLY
	sys.stdout = sys.stderr = utf8file.UTF8File('miaspatch.log', 'a')
	print(time.asctime())

	# Try to get some more useful info on crashes:
	import faulthandler
	faulthandler.enable()

	app = QtGui.QApplication(sys.argv)

	config = ConfigParser.RawConfigParser()
	config.read('miaspatch.cfg')
	try:
		language = config.get('DEFAULT', 'language')
	except:
		pass
	else:
		translator = QtCore.QTranslator()
		translator.load('miaspatch_i18n/%s' % language)
		app.installTranslator(translator)

	window = MiasPatch()
	window.show()

	window.find_install_path()

	app.exec_()
	del window
Example #2
0
def main(args=None):
    faulthandler.enable()
    faulthandler.register(signal.SIGUSR1)

    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    setup_logging(options)

    backend_factory = get_backend_factory(options)
    backend_pool = BackendPool(backend_factory)
    atexit.register(backend_pool.flush)

    # Get paths
    cachepath = get_backend_cachedir(options.storage_url, options.cachedir)

    # Retrieve metadata
    with backend_pool() as backend:
        (param, db) = get_metadata(backend, cachepath)

    retrieve_objects(db, backend_factory, options.corrupted_file,
                     options.missing_file, thread_count=options.parallel,
                     full=options.data, offset=options.start_with)

    if options.corrupted_file.tell() or options.missing_file.tell():
        sys.exit(46)
    else:
        os.unlink(options.corrupted_file.name)
        os.unlink(options.missing_file.name)
        sys.exit(0)
Example #3
0
def start_gui_process(pipe=None):
	global app

	# HACK TO WORK AROUND CRASH ON CONSOLE OUTPUT WITH BBFREEZE GUI_ONLY
	sys.stdout = sys.stderr = utf8file.UTF8File('miasmod.log', 'a')
	print(time.asctime())

	# Try to get some more useful info on crashes:
	import faulthandler
	faulthandler.enable()

	app = QtGui.QApplication(sys.argv)

	window = MiasMod()

	window.show()
	# window.open_saves_dat()
	# window.open_active_environment()
	# window.refresh_mod_list()
	window.load_main_rs5()
	window.synchronise_alocalmod()

	# import trace
	# t = trace.Trace()
	# t.runctx('app.exec_()', globals=globals(), locals=locals())
	app.exec_()

	del window
Example #4
0
def init_faulthandler(sigusr1_chain=False):
  """
  Maybe installs signal handlers, SIGUSR1 and SIGUSR2 and others.
  If no signals handlers are installed yet for SIGUSR1/2, we try to install our own Python handler.
  This also tries to install the handler from the fauldhandler module,
  esp for SIGSEGV and others.

  :param bool sigusr1_chain: whether the default SIGUSR1 handler should also be called.
  """
  from Util import to_bool
  # Enable libSigSegfault first, so that we can have both,
  # because faulthandler will also call the original sig handler.
  if os.environ.get("DEBUG_SIGNAL_HANDLER") and to_bool(os.environ.get("DEBUG_SIGNAL_HANDLER")):
    install_lib_sig_segfault()
    install_native_signal_handler()
  if sys.platform != 'win32':
    # In case that sigusr1_chain, we expect that there is already some handler
    # for SIGUSR1, and then this will not overwrite this handler.
    if install_signal_handler_if_default(signal.SIGUSR1):
      # There is already some handler or we installed our own handler now,
      # so in any case, it's save that we chain then handler.
      sigusr1_chain = True
    # Why not also SIGUSR2... SGE can also send this signal.
    install_signal_handler_if_default(signal.SIGUSR2)
  try:
    import faulthandler
  except ImportError as e:
    print("faulthandler import error. %s" % e)
  else:
    # Only enable if not yet enabled -- otherwise, leave it in its current state.
    if not faulthandler.is_enabled():
      faulthandler.enable()
      if sys.platform != 'win32':
        faulthandler.register(signal.SIGUSR1, all_threads=True, chain=sigusr1_chain)
Example #5
0
 def test_enable(self):
     import faulthandler, sys
     faulthandler.enable()
     assert faulthandler.is_enabled() is True
     faulthandler.enable(file=sys.stderr, all_threads=True)
     faulthandler.disable()
     assert faulthandler.is_enabled() is False
Example #6
0
def _enable_faulthandler():
    #
    # In the event of a segfault, faulthandler will dump the currently
    # active stack so you can figure out what went wrong.
    #
    # Additionally, on non-Windows platforms we register a SIGUSR2
    # handler -- if you send the robot process a SIGUSR2, then
    # faulthandler will dump all of your current stacks. This can
    # be really useful for figuring out things like deadlocks.
    #

    import logging

    logger = logging.getLogger("faulthandler")

    try:
        # These should work on all platforms
        import faulthandler

        faulthandler.enable()
    except Exception as e:
        logger.warn("Could not enable faulthandler: %s", e)
        return

    try:
        import signal

        faulthandler.register(signal.SIGUSR2)
        logger.info("registered SIGUSR2 for PID %s", os.getpid())
    except Exception:
        return
Example #7
0
def InstallFaultHandler(config):
    """
    Install fault handler. If we have a real sys.stderr, we install directly in there. Otherwise
    (frozen executable without console for example) we write into a file next to the executable,
    which is usually a frozen executable.

    .. note:: this is a separate function because we want to test it explicitly.

    :param config: pytest config object
    """
    try:
        import faulthandler
    except ImportError:
        pass
    else:
        if hasattr(sys.stderr, 'fileno'):
            stderr_fd_copy = os.dup(sys.stderr.fileno())
            stderr_copy = os.fdopen(stderr_fd_copy, 'w')
            faulthandler.enable(stderr_copy)
            config.fault_handler_file = None
        else:
            # in frozen executables it might be that sys.stderr is actually a wrapper and not a
            # real object, then write the fault handler to a file
            filename = os.path.splitext(sys.executable)[0] + ('.faulthandler-%d.txt' % os.getpid())
            config.fault_handler_file = file(filename, 'w')
            faulthandler.enable(config.fault_handler_file)
Example #8
0
    def serve(self, *, sock, host, port, reloader_pid):
        faulthandler.enable()
        self.__finalize()

        loop = self.loop
        asyncio.set_event_loop(loop)

        server_coro = loop.create_server(
            lambda: self._protocol_factory(self), sock=sock)

        server = loop.run_until_complete(server_coro)

        loop.add_signal_handler(signal.SIGTERM, loop.stop)
        loop.add_signal_handler(signal.SIGINT, loop.stop)

        if reloader_pid:
            from japronto.reloader import ChangeDetector
            detector = ChangeDetector(loop)
            detector.start()

        print('Accepting connections on http://{}:{}'.format(host, port))

        try:
            loop.run_forever()
        finally:
            server.close()
            loop.run_until_complete(server.wait_closed())
            loop.run_until_complete(self.drain())
            self._reaper.stop()
            loop.close()

            # break reference and cleanup matcher buffer
            del self._matcher
Example #9
0
 def run(self):
     faulthandler.enable()
     try:
         self.httpd.serve_forever()
     except KeyboardInterrupt:
         pass
     self.httpd.server_close()
Example #10
0
def runtests(args=None):
    ''' Run the Bokeh tests under the bokeh python directory using pytest.

    Does not run tests from bokehjs or examples.

    Args:
        args(list, optional): command line arguments accepted by py.test

            e.g. args=['-s', '-k plotting'] prevents capture of standard out
            and only runs tests that match plotting. For more py.test options
            see http://pytest.org/latest/usage.html#usage.

    Returns:
        int: pytest exitcode

    '''

    import pytest
    import os

    try:
        import faulthandler
        faulthandler.enable()
    except ImportError:
        # We can live without in python 2.7
        pass

    # change to the bokeh python source directory, for test collection
    rootdir = os.path.join(os.path.dirname(__file__), os.pardir)
    os.chdir(rootdir)

    return pytest.main(args=args)
Example #11
0
def _setup_logging(verbose):
    log = logging.getLogger('forklift')

    log.logThreads = 0
    log.logProcesses = 0

    debug = 'DEBUG'
    info = 'INFO'

    if verbose:
        info = debug

    try:
        makedirs(dirname(log_location))
    except:
        pass

    file_handler = logging.handlers.RotatingFileHandler(log_location, backupCount=18)
    file_handler.doRollover()
    file_handler.setFormatter(detailed_formatter)
    file_handler.setLevel(debug)

    console_handler = logging.StreamHandler(stream=sys.stdout)
    console_handler.setFormatter(detailed_formatter)
    console_handler.setLevel(info)

    log.addHandler(file_handler)
    log.addHandler(console_handler)
    log.setLevel(debug)

    faulthandler.enable(file_handler.stream)

    return log
Example #12
0
    def __init__(self, config):
        faulthandler.enable()
        assert isinstance(config, CXXConfig)
        self.config = config

        self.index = cindex.Index.create()
        self._translation_units = {}
Example #13
0
def init_faulthandler(fileobj=sys.__stderr__):
    """Enable faulthandler module if available.

    This print a nice traceback on segfaults.

    We use sys.__stderr__ instead of sys.stderr here so this will still work
    when sys.stderr got replaced, e.g. by "Python Tools for Visual Studio".

    Args:
        fobj: An opened file object to write the traceback to.
    """
    if fileobj is None:
        # When run with pythonw.exe, sys.__stderr__ can be None:
        # https://docs.python.org/3/library/sys.html#sys.__stderr__
        # If we'd enable faulthandler in that case, we just get a weird
        # exception, so we don't enable faulthandler if we have no stdout.
        #
        # Later when we have our data dir available we re-enable faulthandler
        # to write to a file so we can display a crash to the user at the next
        # start.
        return
    faulthandler.enable(fileobj)
    if hasattr(faulthandler, "register") and hasattr(signal, "SIGUSR1"):
        # If available, we also want a traceback on SIGUSR1.
        faulthandler.register(signal.SIGUSR1)  # pylint: disable=no-member
Example #14
0
def init_test_environ():
    """This needs to be called before any test can be run.

    Before exiting the process call exit_test_environ() to clean up
    any resources created.
    """

    global _TEMP_DIR, _BUS_INFO, _VDISPLAY, _faulthandler_fobj

    # create a user dir in /tmp and set env vars
    _TEMP_DIR = tempfile.mkdtemp(prefix=fsnative(u"QL-TEST-"))

    # needed for dbus/dconf
    runtime_dir = tempfile.mkdtemp(prefix=fsnative(u"RUNTIME-"), dir=_TEMP_DIR)
    os.chmod(runtime_dir, 0o700)
    environ["XDG_RUNTIME_DIR"] = runtime_dir

    # force the old cache dir so that GStreamer can re-use the GstRegistry
    # cache file
    environ["XDG_CACHE_HOME"] = xdg_get_cache_home()
    # GStreamer will update the cache if the environment has changed
    # (in Gst.init()). Since it takes 0.5s here and doesn't add much,
    # disable it. If the registry cache is missing it will be created
    # despite this setting.
    environ["GST_REGISTRY_UPDATE"] = fsnative(u"no")

    # set HOME and remove all XDG vars that default to it if not set
    home_dir = tempfile.mkdtemp(prefix=fsnative(u"HOME-"), dir=_TEMP_DIR)
    environ["HOME"] = home_dir

    # set to new default
    environ.pop("XDG_DATA_HOME", None)

    if xvfbwrapper is not None:
        _VDISPLAY = xvfbwrapper.Xvfb()
        _VDISPLAY.start()

    _BUS_INFO = None
    if os.name != "nt" and "DBUS_SESSION_BUS_ADDRESS" in environ:
        _BUS_INFO = dbus_launch_user()
        environ.update(_BUS_INFO)

    quodlibet.init(no_translations=True, no_excepthook=True)
    quodlibet.app.name = "QL Tests"

    # to get around pytest silencing
    _faulthandler_fobj = os.fdopen(os.dup(sys.__stderr__.fileno()), "w")
    faulthandler.enable(_faulthandler_fobj)

    # try to make things the same in case a different locale is active.
    # LANG for gettext, setlocale for number formatting etc.
    # Note: setlocale has to be called after Gtk.init()
    try:
        if os.name != "nt":
            environ["LANG"] = locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
        else:
            environ["LANG"] = "en_US.utf8"
            locale.setlocale(locale.LC_ALL, "english")
    except locale.Error:
        pass
Example #15
0
def run_faulthandler():
    if 'faulthandler' in sys.builtin_module_names:
        import faulthandler
        try:
            faulthandler.enable(2)   # manually set to stderr
        except ValueError:
            pass      # ignore "2 is not a valid file descriptor"
 def command(self):
     faulthandler.enable()
     self.basic_setup()
     MailServer((tg.config.get('forgemail.host', '0.0.0.0'),
                 asint(tg.config.get('forgemail.port', 8825))),
                None)
     asyncore.loop()
Example #17
0
def pytest_configure(config):
    def fake_initialize_db(path):
        ratesdb = RatesDB(':memory:', async_=False)
        ratesdb.register_rate_provider = lambda *a: None
        Currencies.set_rates_db(ratesdb)

    import faulthandler
    faulthandler.enable()
    global global_monkeypatch
    monkeypatch = config.pluginmanager.getplugin('monkeypatch')
    global_monkeypatch = monkeypatch.MonkeyPatch()
    # The vast majority of moneyGuru's tests require that ensure_rates is patched to nothing to
    # avoid hitting the currency server during tests. However, some tests still need it. This is
    # why we keep it around so that those tests can re-patch it.
    global_monkeypatch.setattr(currency_module, 'initialize_db', fake_initialize_db)
    # Avoid false test failures caused by timezones messing up our date fakeries.
    # See http://stackoverflow.com/questions/9915058/pythons-fromtimestamp-does-a-discrete-jump
    os.environ['TZ'] = 'UTC'
    try:
        time.tzset()
    except AttributeError:
        # We're on Windows. Oh, well...
        pass

    from . import base
    tmpdir = config.pluginmanager.getplugin('tmpdir')
    tmp_path_factory = tmpdir.TempPathFactory.from_config(config)
    base._global_tmpdir = tmp_path_factory.mktemp('mgtest')
Example #18
0
    def bind_signal_handlers(self):
        """Binds signals for graceful shutdown.

        - SIGTERM and SIGINT lead to a graceful shutdown.
        - SIGSEGV, SIGFPE, SIGABRT, SIGBUS and SIGILL displays a traceback
          using the faulthandler library.

        """
        def signal_graceful_shutdown(signum, frame):
            """
            Note: Function is nested to have a reference to *self*.

            """
            if not self.is_alive:
                return

            logger.info(
                'signal %d caught. Shutting down %s',
                signum,
                self.name,
            )
            self.is_alive = False
            self.stop(graceful=True)

        if 'faulthandler' in sys.modules:
            faulthandler.enable()
        signal.signal(signal.SIGTERM, signal_graceful_shutdown)
        signal.signal(signal.SIGINT, signal_graceful_shutdown)
Example #19
0
def run():
    """ Run BAG Explorer.  Handles all command-line arguments, etc."""

    try:
        import faulthandler
        faulthandler.enable()
    except ImportError:
        pass

    import sys
    import os.path as op

    app = BagExplorerApp(False)

    load_plugins()

    urls = sys.argv[1:]

    for url in urls:
        if "://" not in url:
            # assumed to be file path
            url = utils.path2url(op.abspath(url))
        if not open_store(url):
            log.warning('Failed to open "%s"; no handlers' % url)

    f = frame.InitFrame()

    if utils.is_darwin:
        wx.MenuBar.MacSetCommonMenuBar(f.GetMenuBar())
    else:
        f.Show()

    app.MainLoop()
Example #20
0
def setup_fault_handler(target=None):
	if target is None:
		faulthandler.enable()
		_pre_log_buffer.append('<faulthandler> enabled, target = [console]: %s' % faulthandler)
		return
	_pre_log_buffer.append('<faulthandler> enabled, target = [%s]: %s' % (target, faulthandler))
	faulthandler.enable(file = target)
Example #21
0
def template_error(request):
    import faulthandler
    faulthandler.enable()
    context = {
        'entries': [Entry('v1', 'v1'), Entry('v2', 'v2')]
    }
    ret = render(request, 'my_app/template_error.html', context)
    return ret
Example #22
0
def preflight():
    check_sudo()
    check_db()

    # Print a traceback when the process receives signal SIGSEGV, SIGFPE,
    # SIGABRT, SIGBUS or SIGILL
    import faulthandler
    faulthandler.enable()
Example #23
0
    def _install_faulthandler() -> None:
        """ Utility to help debug segfaults. """
        segfault_filename = Options.nxdrive_home / "logs" / "segfault.log"
        log.info(f"Enabling faulthandler in {segfault_filename!r}")

        with segfault_filename.open(mode="a", encoding="utf-8") as fh:
            fh.write(f"\n\n\n>>> {datetime.now()}\n")
            faulthandler.enable(file=fh)
Example #24
0
File: orca.py Project: GNOME/orca
def main(cacheValues=True):
    """The main entry point for Orca.  The exit codes for Orca will
    loosely be based on signals, where the exit code will be the
    signal used to terminate Orca (if a signal was used).  Otherwise,
    an exit code of 0 means normal completion and an exit code of 50
    means Orca exited because of a hang."""

    if debug.debugFile and os.path.exists(debug.debugFile.name):
        faulthandler.enable(file=debug.debugFile, all_threads=False)
    else:
        faulthandler.enable(all_threads=False)

    # Method to call when we think something might be hung.
    #
    settings.timeoutCallback = timeout

    # Various signal handlers we want to listen for.
    #
    signal.signal(signal.SIGHUP, shutdownOnSignal)
    signal.signal(signal.SIGINT, shutdownOnSignal)
    signal.signal(signal.SIGTERM, shutdownOnSignal)
    signal.signal(signal.SIGQUIT, shutdownOnSignal)
    signal.signal(signal.SIGSEGV, crashOnSignal)

    if not _settingsManager.isAccessibilityEnabled():
        _settingsManager.setAccessibility(True)

    init(pyatspi.Registry)

    try:
        message = messages.START_ORCA
        script = _scriptManager.getDefaultScript()
        script.presentMessage(message)
    except:
        debug.printException(debug.LEVEL_SEVERE)

    script = orca_state.activeScript
    if script:
        window = script.utilities.activeWindow()
        if window and not orca_state.locusOfFocus:
            try:
                app = window.getApplication()
            except:
                msg = "ORCA: Exception getting app for %s" % window
                debug.println(debug.LEVEL_INFO, msg, True)
            else:
                script = _scriptManager.getScript(app, window)
                _scriptManager.setActiveScript(script, "Launching.")
            setLocusOfFocus(None, window)
            focusedObject = script.utilities.focusedObject(window)
            if focusedObject:
                setLocusOfFocus(None, focusedObject)

    try:
        start(pyatspi.Registry, cacheValues) # waits until we stop the registry
    except:
        die(EXIT_CODE_HANG)
    return 0
Example #25
0
def main():
    faulthandler.enable()

    if len(sys.argv) != 3:
        print('usage: %s <disk> <mountpoint>' % sys.argv[0])
        exit(1)

    op_mgr = manager.DiskManager(sys.argv[1])
    fuse_inst = fuse.FUSE(op_mgr, sys.argv[2], foreground=True)
Example #26
0
def _enable_faulthandler():
    try:
        # Enable full stack trace printout in case of a segfault
        # (Requires the faulthandler module from PyPI)
        import faulthandler
    except ImportError:
        return
    else:
        faulthandler.enable()
def setup_faulthandler(m):
    # for reporting the Python line at which a segfault occurs
    try:
        import faulthandler
    except ImportError:
        import log
        log.logger.info('Could not import faulthandler')
    else:
        faulthandler.enable(os.fdopen(m.meta['fileno'], 'w'))
Example #28
0
def setup_faulthandler(args):
    try:
        import faulthandler
    except ImportError:
        sys.stderr.write('running without faulthandler\n')
        return
    else:
        faulthandler.enable()
        faulthandler.register(signal.SIGINT)
Example #29
0
def install_fault_handler(error_file='fault_error_log.txt'):
    """ 
    enable the foult hanlder to find crashes where python dies
    outpu is written to a file instead to sys.stderr becaus this does not work in
    iPython
    """
    f=open(error_file, 'w')
    import faulthandler
    faulthandler.enable(file=f, all_threads=True)
Example #30
0
def _enable_faulthandler():
    """ Enable faulthandler (if we can), so that we get tracebacks
    on segfaults.
    """
    try:
        import faulthandler
        faulthandler.enable()
        print('Faulthandler enabled')
    except Exception:
        print('Could not enable faulthandler')
Example #31
0
        total_tags = len(combined_tag_list)

        # Keep tag if empty list or tag already in list
        if total_tags == 0 or tag in combined_tag_list:
            return tag

        # Get tag probs
        tag_probs = defaultdict(lambda: 0)
        for tag in combined_tag_list:
            tag_probs[tag] += 1 / total_tags
        return max(tag_probs, key=tag_probs.get)

    # Find fault
    segfault_file = 'error_log'
    f = open(segfault_file, 'a')
    faulthandler.enable(f)

    #sys.path.append('/home/liefe/py/we')
    
    # Fix random seed
    np.random.seed(42)

    # Parse arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("--batch_size", default=50, type=int, help="Batch size.")
    parser.add_argument("--cle_dim", default=64, type=int, help="Character-level embedding dimension.")
    parser.add_argument("--cnne_filters", default=16, type=int, help="CNN embedding filters per length.")
    parser.add_argument("--optimizer", default="Adam", type=str, help="Optimizer.")    
    parser.add_argument("--cnne_max", default=4, type=int, help="Maximum CNN filter length.")
    parser.add_argument("--epochs", default=5, type=int, help="Number of epochs.")
    parser.add_argument("--recodex", default=False, action="store_true", help="ReCodEx mode.")
Example #32
0
# todo: soft-code colors and parameters
# todo: add type hints
# todo: DocString on functions/methods

# OPTIMIZATION
# todo: lazy import (do I really?)

# KNOWN BUGS
# todo: if markers are present when ROI is moved and they fall outside ROI, software behaviour is undefined
# todo: while _update_images() is running, moving the mouse wheel caused a marker to appear in the threshold area (???)

if __name__ == '__main__':
    import sys
    from PyQt5.QtWidgets import QApplication

    class TestApp(QApplication):
        def __init__(self, argv):
            super(TestApp, self).__init__(argv)

            self.setApplicationName(APP_NAME)
            self.setApplicationVersion(APP_VERSION)

    with open('traceback_dump.txt', 'w+') as dump_file:
        faulthandler.enable(file=dump_file)
        app = TestApp(sys.argv)

        gui = InvadopodiaGui()
        gui.show()

        sys.exit(app.exec_())
Example #33
0
from hashlib import sha256
from hmac import compare_digest
from functools import wraps
import re
import signal
import faulthandler
import sys, os
import binascii

from PyQt5.QtCore import QObject, QCoreApplication, QThreadPool

from debugger import *; dbg
import api
import settings

faulthandler.enable() #Print backtraces in case of crash. (sigsegv & co)
#sio = socketio.Server(threaded=True, async_handlers=True)
qtApp = QCoreApplication(sys.argv)
threadpool = QThreadPool()

indexHTML = open('app/index.html', 'rb')


availableCalls = re.findall(
	"""\s*?(\w*?)\(""",
	os.popen(
		"gdbus introspect --system --dest ca.krontech.chronos.control --object-path /ca/krontech/chronos/control"
	).read()
)
availableCalls.remove('notify') #Imperfect regex, fix by using dbus introspection or adding the availableMethods call + data to the API. (We need a bit more data to select the right API method, some just get information and can be cached as a GET.)
availableKeys = api.control.callSync('availableKeys')
Example #34
0
def setup_tests(ns):
    try:
        stderr_fd = sys.__stderr__.fileno()
    except (ValueError, AttributeError):
        # Catch ValueError to catch io.UnsupportedOperation on TextIOBase
        # and ValueError on a closed stream.
        #
        # Catch AttributeError for stderr being None.
        stderr_fd = None
    else:
        # Display the Python traceback on fatal errors (e.g. segfault)
        faulthandler.enable(all_threads=True, file=stderr_fd)

        # Display the Python traceback on SIGALRM or SIGUSR1 signal
        signals = []
        if hasattr(signal, 'SIGALRM'):
            signals.append(signal.SIGALRM)
        if hasattr(signal, 'SIGUSR1'):
            signals.append(signal.SIGUSR1)
        for signum in signals:
            faulthandler.register(signum, chain=True, file=stderr_fd)

    replace_stdout()
    support.record_original_stdout(sys.stdout)

    if ns.testdir:
        # Prepend test directory to sys.path, so runtest() will be able
        # to locate tests
        sys.path.insert(0, os.path.abspath(ns.testdir))

    # Some times __path__ and __file__ are not absolute (e.g. while running from
    # Lib/) and, if we change the CWD to run the tests in a temporary dir, some
    # imports might fail.  This affects only the modules imported before os.chdir().
    # These modules are searched first in sys.path[0] (so '' -- the CWD) and if
    # they are found in the CWD their __file__ and __path__ will be relative (this
    # happens before the chdir).  All the modules imported after the chdir, are
    # not found in the CWD, and since the other paths in sys.path[1:] are absolute
    # (site.py absolutize them), the __file__ and __path__ will be absolute too.
    # Therefore it is necessary to absolutize manually the __file__ and __path__ of
    # the packages to prevent later imports to fail when the CWD is different.
    for module in sys.modules.values():
        if hasattr(module, '__path__'):
            for index, path in enumerate(module.__path__):
                module.__path__[index] = os.path.abspath(path)
        if getattr(module, '__file__', None):
            module.__file__ = os.path.abspath(module.__file__)

    if ns.huntrleaks:
        unittest.BaseTestSuite._cleanup = False

    if ns.memlimit is not None:
        support.set_memlimit(ns.memlimit)

    if ns.threshold is not None:
        gc.set_threshold(ns.threshold)

    try:
        import msvcrt
    except ImportError:
        pass
    else:
        msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS
                            | msvcrt.SEM_NOALIGNMENTFAULTEXCEPT
                            | msvcrt.SEM_NOGPFAULTERRORBOX
                            | msvcrt.SEM_NOOPENFILEERRORBOX)
        try:
            msvcrt.CrtSetReportMode
        except AttributeError:
            # release build
            pass
        else:
            for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
                if ns.verbose and ns.verbose >= 2:
                    msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
                    msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
                else:
                    msvcrt.CrtSetReportMode(m, 0)

    support.use_resources = ns.use_resources
Example #35
0
def debug_faults():
    import faulthandler
    faulthandler.enable()
Example #36
0
def install_debugging_signal_handler():
    import faulthandler
    faulthandler.enable()
Example #37
0
def setup_tests(ns):
    try:
        stderr_fd = sys.__stderr__.fileno()
    except (ValueError, AttributeError):
        # Catch ValueError to catch io.UnsupportedOperation on TextIOBase
        # and ValueError on a closed stream.
        #
        # Catch AttributeError for stderr being None.
        stderr_fd = None
    else:
        # Display the Python traceback on fatal errors (e.g. segfault)
        faulthandler.enable(all_threads=True, file=stderr_fd)

        # Display the Python traceback on SIGALRM or SIGUSR1 signal
        signals = []
        if hasattr(signal, 'SIGALRM'):
            signals.append(signal.SIGALRM)
        if hasattr(signal, 'SIGUSR1'):
            signals.append(signal.SIGUSR1)
        for signum in signals:
            faulthandler.register(signum, chain=True, file=stderr_fd)

    replace_stdout()
    support.record_original_stdout(sys.stdout)

    if ns.testdir:
        # Prepend test directory to sys.path, so runtest() will be able
        # to locate tests
        sys.path.insert(0, os.path.abspath(ns.testdir))

    # Some times __path__ and __file__ are not absolute (e.g. while running from
    # Lib/) and, if we change the CWD to run the tests in a temporary dir, some
    # imports might fail.  This affects only the modules imported before os.chdir().
    # These modules are searched first in sys.path[0] (so '' -- the CWD) and if
    # they are found in the CWD their __file__ and __path__ will be relative (this
    # happens before the chdir).  All the modules imported after the chdir, are
    # not found in the CWD, and since the other paths in sys.path[1:] are absolute
    # (site.py absolutize them), the __file__ and __path__ will be absolute too.
    # Therefore it is necessary to absolutize manually the __file__ and __path__ of
    # the packages to prevent later imports to fail when the CWD is different.
    for module in sys.modules.values():
        if hasattr(module, '__path__'):
            for index, path in enumerate(module.__path__):
                module.__path__[index] = os.path.abspath(path)
        if hasattr(module, '__file__'):
            module.__file__ = os.path.abspath(module.__file__)

    # MacOSX (a.k.a. Darwin) has a default stack size that is too small
    # for deeply recursive regular expressions.  We see this as crashes in
    # the Python test suite when running test_re.py and test_sre.py.  The
    # fix is to set the stack limit to 2048.
    # This approach may also be useful for other Unixy platforms that
    # suffer from small default stack limits.
    if sys.platform == 'darwin':
        try:
            import resource
        except ImportError:
            pass
        else:
            soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
            newsoft = min(hard, max(soft, 1024 * 2048))
            resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))

    if ns.huntrleaks:
        unittest.BaseTestSuite._cleanup = False

        # Avoid false positives due to various caches
        # filling slowly with random data:
        warm_caches()

    if ns.memlimit is not None:
        support.set_memlimit(ns.memlimit)

    if ns.threshold is not None:
        gc.set_threshold(ns.threshold)

    try:
        import msvcrt
    except ImportError:
        pass
    else:
        msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS
                            | msvcrt.SEM_NOALIGNMENTFAULTEXCEPT
                            | msvcrt.SEM_NOGPFAULTERRORBOX
                            | msvcrt.SEM_NOOPENFILEERRORBOX)
        try:
            msvcrt.CrtSetReportMode
        except AttributeError:
            # release build
            pass
        else:
            for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
                if ns.verbose and ns.verbose >= 2:
                    msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
                    msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
                else:
                    msvcrt.CrtSetReportMode(m, 0)

    support.use_resources = ns.use_resources
def run():
    """Run command."""
    from mne.commands.utils import get_optparser, _add_verbose_flag

    parser = get_optparser(__file__)

    parser.add_option("-d",
                      "--subjects-dir",
                      dest="subjects_dir",
                      default=None,
                      help="Subjects directory")
    parser.add_option("-s",
                      "--subject",
                      dest="subject",
                      default=None,
                      help="Subject name")
    parser.add_option("-f",
                      "--fiff",
                      dest="inst",
                      default=None,
                      help="FIFF file with digitizer data for coregistration")
    parser.add_option("-t",
                      "--tabbed",
                      dest="tabbed",
                      action="store_true",
                      default=False,
                      help="Option for small screens: Combine "
                      "the data source panel and the coregistration panel "
                      "into a single panel with tabs.")
    parser.add_option("--no-guess-mri",
                      dest="guess_mri_subject",
                      action='store_false',
                      default=None,
                      help="Prevent the GUI from automatically guessing and "
                      "changing the MRI subject when a new head shape source "
                      "file is selected.")
    parser.add_option("--head-opacity",
                      type=float,
                      default=None,
                      dest="head_opacity",
                      help="The opacity of the head surface, in the range "
                      "[0, 1].")
    parser.add_option("--high-res-head",
                      action='store_true',
                      default=False,
                      dest="high_res_head",
                      help="Use a high-resolution head surface.")
    parser.add_option("--low-res-head",
                      action='store_true',
                      default=False,
                      dest="low_res_head",
                      help="Use a low-resolution head surface.")
    parser.add_option('--trans',
                      dest='trans',
                      default=None,
                      help='Head<->MRI transform FIF file ("-trans.fif")')
    parser.add_option('--project-eeg',
                      dest='project_eeg',
                      action='store_true',
                      default=None,
                      help="Project EEG electrodes to the head surface ("
                      "for visualization purposes only)")
    parser.add_option('--orient-to-surface',
                      action='store_true',
                      default=None,
                      dest='orient_to_surface',
                      help='Orient points to the surface.')
    parser.add_option('--scale-by-distance',
                      action='store_true',
                      default=None,
                      dest='scale_by_distance',
                      help='Scale points by distance from the surface.')
    parser.add_option('--mark-inside',
                      action='store_true',
                      default=None,
                      dest='mark_inside',
                      help='Mark points inside the head using a different '
                      'color.')
    parser.add_option('--interaction',
                      type=str,
                      default=None,
                      dest='interaction',
                      help='Interaction style to use, can be "trackball" or '
                      '"terrain".')
    parser.add_option('--scale',
                      type=float,
                      default=None,
                      dest='scale',
                      help='Scale factor for the scene.')
    parser.add_option('--simple-rendering',
                      action='store_false',
                      dest='advanced_rendering',
                      help='Use simplified OpenGL rendering')
    _add_verbose_flag(parser)

    options, args = parser.parse_args()

    if options.low_res_head:
        if options.high_res_head:
            raise ValueError("Can't specify --high-res-head and "
                             "--low-res-head at the same time.")
        head_high_res = False
    elif options.high_res_head:
        head_high_res = True
    else:
        head_high_res = None

    # expanduser allows ~ for --subjects-dir
    subjects_dir = options.subjects_dir
    if subjects_dir is not None:
        subjects_dir = op.expanduser(subjects_dir)
    trans = options.trans
    if trans is not None:
        trans = op.expanduser(trans)
    try:
        import faulthandler
        faulthandler.enable()
    except ImportError:
        pass  # old Python2
    with ETSContext():
        mne.gui.coregistration(options.tabbed,
                               inst=options.inst,
                               subject=options.subject,
                               subjects_dir=subjects_dir,
                               guess_mri_subject=options.guess_mri_subject,
                               head_opacity=options.head_opacity,
                               head_high_res=head_high_res,
                               trans=trans,
                               scrollable=True,
                               project_eeg=options.project_eeg,
                               orient_to_surface=options.orient_to_surface,
                               scale_by_distance=options.scale_by_distance,
                               mark_inside=options.mark_inside,
                               interaction=options.interaction,
                               scale=options.scale,
                               advanced_rendering=options.advanced_rendering,
                               verbose=options.verbose)
Example #39
0
import faulthandler; faulthandler.enable()

#from dummy_depth_pid import *
from depth_pid import *
from Timer import *
from VideoStream import *
from Network import *
from Equation import *
from Observer_Pattern import *
from UDP import *
#from Sensor import *
from HAT import *
from DummySensor import *
#from DummyHat import *
import selectors
import select
import sys


class ROV_19:

    def __init__(self):
        # ================= ROV System =========================
        # For PI 19
        self.RaspberryPi_IP = '10.1.1.15'
        self.Laptop_IP = '10.1.1.14'
#        import os
#        os.kill(os.getpid(),11)
        # For Local
#        self.RaspberryPi_IP = '127.0.0.1'
Example #40
0
import os

os.environ["DUI_STYLE"] = "StyleWhite"
import sys

os.chdir(os.path.dirname(os.path.realpath(sys.argv[0])))
from PyQt5.QtCore import QCoreApplication, Qt, QThread
from PyQt5.QtQml import qmlRegisterType
if os.name == 'posix':
    QCoreApplication.setAttribute(Qt.AA_X11InitThreads, True)

from dwidgets import QSingleApplication
import config
import faulthandler
import threading
from config.constants import LogCachePath

fd = open(os.path.join(LogCachePath, 'crash.log'), 'w')
faulthandler.enable(fd, all_threads=True)

if __name__ == '__main__':
    app = QSingleApplication(config.applicationName, sys.argv)
    if app.isRunning():
        sys.exit(0)

    from app import DeepinPlayer
    deepinPlayer = DeepinPlayer.instance()
    deepinPlayer.show()
    exitCode = app.exec_()
    sys.exit(exitCode)
Example #41
0
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""
Created on Feb 11, 2015

@author: brian
"""

try:
    import faulthandler  # @UnresolvedImport
    faulthandler.enable()  # @UndefinedVariable
except:
    # if there's no console, this fails
    pass

import sys, multiprocessing, logging, traceback, threading

from traits.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'


def log_notification_handler(_, trait_name, old, new):

    (exc_type, exc_value, tb) = sys.exc_info()
    logging.debug('Exception occurred in traits notification '
                  'handler for object: %s, trait: %s, old value: %s, '
Example #42
0
import faulthandler
faulthandler.enable()

import unittest
from qcor import *


class TestKernelBuilder(unittest.TestCase):
    def test_simple_bell(self):
        set_qpu('qpp', {'shots': 100})
        builder = KernelBuilder()
        builder.h(0)
        for i in range(5):
            builder.cnot(i, i + 1)
        builder.measure_all()
        ghz_6 = builder.create()
        q = qalloc(6)
        ghz_6(q)
        counts = q.counts()
        print(counts)
        self.assertTrue('111111' in counts)
        self.assertTrue('000000' in counts)
        self.assertTrue(len(counts) == 2)

    def test_variational_spec(self):
        set_qpu('qpp')
        builder = KernelBuilder()

        builder.x(0)
        builder.ry(1, 't')
        builder.cnot(1, 0)
Example #43
0
    def pytest_configure(self, config):
        import faulthandler

        stderr_fd_copy = os.dup(self._get_stderr_fileno())
        config._store[fault_handler_stderr_key] = open(stderr_fd_copy, "w")
        faulthandler.enable(file=config._store[fault_handler_stderr_key])
Example #44
0
import atexit
import copy
import datetime
import faulthandler
import logging
import os
import platform
import sys
import tempfile
import unittest

import numpy

faulthandler.enable()  # to debug seg faults and timeouts

import cfdm

n_tmpfiles = 1
tmpfiles = [
    tempfile.mkstemp("_test_functions.nc", dir=os.getcwd())[1]
    for i in range(n_tmpfiles)
]
(temp_file,) = tmpfiles


def _remove_tmpfiles():
    """Remove temporary files created during tests."""
    for f in tmpfiles:
        try:
            os.remove(f)
        except OSError:
Example #45
0
    def serve(self, *, sock, host, port, loop, run_async=False):
        faulthandler.enable()

        #pr = cProfile.Profile()
        #pr.enable()
        #cProfile.runctx('test(num)', globals(), locals(), 'prof%d.prof' %num)

        #sock.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 0) #TODO uvloop .9.1 sets this

        #profiler_start(b"mrhttp.log")

        if not loop:
            loop = self.loop
            asyncio.set_event_loop(loop)
        else:
            self._loop = loop

        self.session_backend_type = 1
        if self.session_backend == "mrworkserver":
            self.session_backend_type = 2
        elif self.session_backend == "mrcache":
            self.session_backend_type = 3

        self.requests = [Request() for x in range(128)]
        self.cinit()
        self.router.finalize_routes()
        self.router.setupRoutes()
        self._appStart()

        if self.uses_mrq:
            mrqconf = self.config.get("mrq", None)
            if not mrqconf:
                print("When using MrQ app.config['mrq'] must be set. Exiting")
                exit(1)
            srvs = self.config.get("mrq", None)
            if type(srvs) != list or len(srvs) == 0 or type(srvs[0]) != tuple:
                print(
                    "When using MrQ app.config['mrq'] must be set to a list of (host,port) tuple pairs. Exiting"
                )
                exit(1)
            self._mrq = MrqClient(srvs, self.loop)

        if self.uses_session:

            self.setupSessionClient()

        self.trigger_event("at_start")

        server_coro = loop.create_server(lambda: self._protocol_factory(self),
                                         sock=sock)
        if run_async:
            return server_coro

        # Try except here?
        server = loop.run_until_complete(server_coro)

        print('Accepting connections on http://{}:{}'.format(host, port))

        self.trigger_event("after_start")

        loop.add_signal_handler(signal.SIGTERM, loop.stop)
        #loop.add_signal_handler(signal.SIGINT,  loop.stop)

        try:
            loop.run_forever()
        except KeyboardInterrupt:
            pass
        finally:
            loop.run_until_complete(loop.shutdown_asyncgens())
            loop.run_until_complete(self.drain())
            self._connections.clear()
            server.close()
            loop = asyncio.get_event_loop()
            loop.run_until_complete(server.wait_closed())
            self.trigger_event("at_end")
            loop.close()
            for r in self.requests:
                r.cleanup()
            self.requests = None
Example #46
0
    else:
        application = QApplication(sys.argv)
        application.removePostedEvents(None)
        _crash_handler = CrashHandler(hook_type, value, traceback, has_started)
        # This means the QtApplication could be created and so the splash screen. Then Cura closes it
        if CuraApplication.splash is not None:
            CuraApplication.splash.close()
        _crash_handler.early_crash_dialog.show()
        sys.exit(application.exec_())


# Set exception hook to use the crash dialog handler
sys.excepthook = exceptHook
# Enable dumping traceback for all threads
if sys.stderr:
    faulthandler.enable(file = sys.stderr, all_threads = True)
else:
    faulthandler.enable(file = sys.stdout, all_threads = True)

# Workaround for a race condition on certain systems where there
# is a race condition between Arcus and PyQt. Importing Arcus
# first seems to prevent Sip from going into a state where it
# tries to create PyQt objects on a non-main thread.
import Arcus #@UnusedImport
import Savitar #@UnusedImport
from cura.CuraApplication import CuraApplication


# WORKAROUND: CURA-6739
# The CTM file loading module in Trimesh requires the OpenCTM library to be dynamically loaded. It uses
# ctypes.util.find_library() to find libopenctm.dylib, but this doesn't seem to look in the ".app" application folder
Example #47
0
async def main_async(options, stdout_log_handler):

    # Get paths
    cachepath = options.cachepath

    backend_factory = get_backend_factory(options)
    backend_pool = BackendPool(backend_factory)
    atexit.register(backend_pool.flush)

    # Retrieve metadata
    with backend_pool() as backend:
        (param, db) = get_metadata(backend, cachepath)

    #if param['max_obj_size'] < options.min_obj_size:
    #    raise QuietError('Maximum object size must be bigger than minimum object size.',
    #                     exitcode=2)

    # Handle --cachesize
    rec_cachesize = options.max_cache_entries * param['max_obj_size'] / 2
    avail_cache = shutil.disk_usage(os.path.dirname(cachepath))[2] / 1024
    if options.cachesize is None:
        options.cachesize = min(rec_cachesize, 0.8 * avail_cache)
        log.info('Setting cache size to %d MB', options.cachesize / 1024)
    elif options.cachesize > avail_cache:
        log.warning('Requested cache size %d MB, but only %d MB available',
                    options.cachesize / 1024, avail_cache / 1024)

    if options.nfs:
        # NFS may try to look up '..', so we have to speed up this kind of query
        log.info('Creating NFS indices...')
        db.execute(
            'CREATE INDEX IF NOT EXISTS ix_contents_inode ON contents(inode)')

    else:
        db.execute('DROP INDEX IF EXISTS ix_contents_inode')

    metadata_upload_task = MetadataUploadTask(backend_pool, param, db,
                                              options.metadata_upload_interval)
    block_cache = BlockCache(backend_pool, db, cachepath + '-cache',
                             options.cachesize * 1024,
                             options.max_cache_entries)
    commit_task = CommitTask(block_cache)
    operations = fs.Operations(block_cache,
                               db,
                               max_obj_size=param['max_obj_size'],
                               inode_cache=InodeCache(db, param['inode_gen']),
                               upload_task=metadata_upload_task)
    block_cache.fs = operations
    metadata_upload_task.fs = operations

    async with trio.open_nursery() as nursery:
        with ExitStack() as cm:
            log.info('Mounting %s at %s...', options.storage_url,
                     options.mountpoint)
            try:
                pyfuse3.init(operations, options.mountpoint,
                             get_fuse_opts(options))
            except RuntimeError as exc:
                raise QuietError(str(exc), exitcode=39)

            unmount_clean = False

            def unmount():
                log.info("Unmounting file system...")
                pyfuse3.close(unmount=unmount_clean)

            cm.callback(unmount)

            if options.fg or options.systemd:
                faulthandler.enable()
                faulthandler.register(signal.SIGUSR1)
            else:
                if stdout_log_handler:
                    logging.getLogger().removeHandler(stdout_log_handler)
                crit_log_fd = os.open(
                    os.path.join(options.cachedir, 'mount.s3ql_crit.log'),
                    flags=os.O_APPEND | os.O_CREAT | os.O_WRONLY,
                    mode=0o644)
                faulthandler.enable(crit_log_fd)
                faulthandler.register(signal.SIGUSR1, file=crit_log_fd)
                daemonize(options.cachedir)

            mark_metadata_dirty(backend, cachepath, param)

            block_cache.init(options.threads)

            nursery.start_soon(metadata_upload_task.run,
                               name='metadata-upload-task')
            cm.callback(metadata_upload_task.stop)

            nursery.start_soon(commit_task.run, name='commit-task')
            cm.callback(commit_task.stop)

            exc_info = setup_exchook()

            if options.systemd:
                import systemd.daemon
                systemd.daemon.notify('READY=1')

            ret = None
            try:
                toggle_int_signal_handling(True)
                ret = await pyfuse3.main()
            except KeyboardInterrupt:
                # re-block SIGINT before log.info() call to reduce the possibility for a second KeyboardInterrupt
                toggle_int_signal_handling(False)
                log.info("Got CTRL-C. Exit gracefully.")
            finally:
                # For a clean unmount we need to ignore any repeated SIGINTs from here
                toggle_int_signal_handling(False)
                await operations.destroy()
                await block_cache.destroy(options.keep_cache)

            if ret is not None:
                raise RuntimeError('Received signal %d, terminating' % (ret, ))

            # Re-raise if main loop terminated due to exception in other thread
            if exc_info:
                (exc_inst, exc_tb) = exc_info
                raise exc_inst.with_traceback(exc_tb)

            log.info("FUSE main loop terminated.")

            unmount_clean = True

    # At this point, there should be no other threads left

    # Do not update .params yet, dump_metadata() may fail if the database is
    # corrupted, in which case we want to force an fsck.
    if operations.failsafe:
        log.warning('File system errors encountered, marking for fsck.')
        param['needs_fsck'] = True
    with backend_pool() as backend:
        seq_no = get_seq_no(backend)
        if metadata_upload_task.db_mtime == os.stat(cachepath +
                                                    '.db').st_mtime:
            log.info('File system unchanged, not uploading metadata.')
            del backend['s3ql_seq_no_%d' % param['seq_no']]
            param['seq_no'] -= 1
            save_params(cachepath, param)
        elif seq_no == param['seq_no']:
            param['last-modified'] = time.time()
            dump_and_upload_metadata(backend, db, param)
            save_params(cachepath, param)
        else:
            log.error(
                'Remote metadata is newer than local (%d vs %d), '
                'refusing to overwrite!', seq_no, param['seq_no'])
            log.error(
                'The locally cached metadata will be *lost* the next time the file system '
                'is mounted or checked and has therefore been backed up.')
            for name in (cachepath + '.params', cachepath + '.db'):
                for i in range(4)[::-1]:
                    if os.path.exists(name + '.%d' % i):
                        os.rename(name + '.%d' % i, name + '.%d' % (i + 1))
                os.rename(name, name + '.0')

    log.info('Cleaning up local metadata...')
    db.execute('ANALYZE')
    db.execute('VACUUM')
    db.close()

    log.info('All done.')
Example #48
0
def main():
    # Parse the command-line arguments
    parser = argparse.ArgumentParser(description='Linux Show Player')
    parser.add_argument('-f',
                        '--file',
                        default='',
                        nargs='?',
                        const='',
                        help='Session file path')
    parser.add_argument('-l',
                        '--log',
                        choices=['debug', 'info', 'warning'],
                        default='warning',
                        help='Log level')
    parser.add_argument('--locale', default='', help='Force specified locale')

    args = parser.parse_args()

    # Set the logging level
    if args.log == 'debug':
        log = logging.DEBUG

        # If something bad happen at low-level (e.g. segfault) print the stack
        import faulthandler
        faulthandler.enable()
    elif args.log == 'info':
        log = logging.INFO
    else:
        log = logging.WARNING

    logging.basicConfig(
        format='%(asctime)s.%(msecs)03d %(levelname)s:: %(message)s',
        datefmt='%H:%M:%S',
        level=log)

    # Create the QApplication
    qt_app = QApplication(sys.argv)
    qt_app.setApplicationName('Linux Show Player')
    qt_app.setQuitOnLastWindowClosed(True)

    # Force light font, for environment with "bad" QT support.
    appFont = qt_app.font()
    appFont.setWeight(QFont.Light)
    qt_app.setFont(appFont)
    # Set icons and theme from the application configuration
    QIcon.setThemeSearchPaths(styles.IconsThemePaths)
    QIcon.setThemeName(config['Theme']['icons'])
    styles.apply_style(config['Theme']['theme'])

    # Get/Set the locale
    locale = args.locale
    if locale:
        QLocale().setDefault(QLocale(locale))

    logging.info('Using {} locale'.format(QLocale().name()))

    # Main app translations
    translator = QTranslator()
    translator.load(QLocale(), 'lisp', '_',
                    path.join(path.dirname(path.realpath(__file__)), 'i18n'))

    qt_app.installTranslator(translator)
    ui_translators = [translator]

    # Qt platform translation
    translator = QTranslator()
    translator.load(QLocale(), 'qt', '_',
                    QLibraryInfo.location(QLibraryInfo.TranslationsPath))

    qt_app.installTranslator(translator)
    ui_translators.append(translator)

    # Modules and plugins translations
    for tr_file in chain(modules.translations(), plugins.translations()):
        translator = QTranslator()
        translator.load(QLocale(), tr_file, '_')

        qt_app.installTranslator(translator)
        ui_translators.append(translator)

    # Create the application
    lisp_app = Application()
    # Load modules and plugins
    modules.load_modules()
    plugins.load_plugins()

    # Start/Initialize LiSP Application
    lisp_app.start(session_file=args.file)
    # Start Qt Application (block until exit)
    exit_code = qt_app.exec_()

    # Finalize the application
    lisp_app.finalize()
    # Exit
    sys.exit(exit_code)
Example #49
0
def setup_tests(ns):
    try:
        stderr_fd = sys.__stderr__.fileno()
    except (ValueError, AttributeError):
        # Catch ValueError to catch io.UnsupportedOperation on TextIOBase
        # and ValueError on a closed stream.
        #
        # Catch AttributeError for stderr being None.
        stderr_fd = None
    else:
        # Display the Python traceback on fatal errors (e.g. segfault)
        faulthandler.enable(all_threads=True, file=stderr_fd)

        # Display the Python traceback on SIGALRM or SIGUSR1 signal
        signals = []
        if hasattr(signal, 'SIGALRM'):
            signals.append(signal.SIGALRM)
        if hasattr(signal, 'SIGUSR1'):
            signals.append(signal.SIGUSR1)
        for signum in signals:
            faulthandler.register(signum, chain=True, file=stderr_fd)

    replace_stdout()
    support.record_original_stdout(sys.stdout)

    if ns.testdir:
        # Prepend test directory to sys.path, so runtest() will be able
        # to locate tests
        sys.path.insert(0, os.path.abspath(ns.testdir))

    # Some times __path__ and __file__ are not absolute (e.g. while running from
    # Lib/) and, if we change the CWD to run the tests in a temporary dir, some
    # imports might fail.  This affects only the modules imported before os.chdir().
    # These modules are searched first in sys.path[0] (so '' -- the CWD) and if
    # they are found in the CWD their __file__ and __path__ will be relative (this
    # happens before the chdir).  All the modules imported after the chdir, are
    # not found in the CWD, and since the other paths in sys.path[1:] are absolute
    # (site.py absolutize them), the __file__ and __path__ will be absolute too.
    # Therefore it is necessary to absolutize manually the __file__ and __path__ of
    # the packages to prevent later imports to fail when the CWD is different.
    for module in sys.modules.values():
        if hasattr(module, '__path__'):
            for index, path in enumerate(module.__path__):
                module.__path__[index] = os.path.abspath(path)
        if getattr(module, '__file__', None):
            module.__file__ = os.path.abspath(module.__file__)

    if ns.huntrleaks:
        unittest.BaseTestSuite._cleanup = False

    if ns.memlimit is not None:
        support.set_memlimit(ns.memlimit)

    if ns.threshold is not None:
        gc.set_threshold(ns.threshold)

    suppress_msvcrt_asserts(ns.verbose and ns.verbose >= 2)

    support.use_resources = ns.use_resources

    if hasattr(sys, 'addaudithook'):
        # Add an auditing hook for all tests to ensure PySys_Audit is tested
        def _test_audit_hook(name, args):
            pass

        sys.addaudithook(_test_audit_hook)
Example #50
0
        return

    ctx = await bot.get_context(message)
    if ctx.command is not None:  # builtins first
        await bot.invoke(ctx)
    elif ctx.invoked_with:  # then aliases if there is some word (and not just the prefix)
        await handle_aliases(ctx)


@bot.event
async def on_command(ctx):
    try:
        log.debug(
            "cmd: chan {0.message.channel} ({0.message.channel.id}), serv {0.message.guild} ({0.message.guild.id}), "
            "auth {0.message.author} ({0.message.author.id}): {0.message.content}"
            .format(ctx))
    except AttributeError:
        log.debug(
            "Command in PM with {0.message.author} ({0.message.author.id}): {0.message.content}"
            .format(ctx))


for cog in COGS:
    bot.load_extension(cog)

if __name__ == '__main__':
    faulthandler.enable()  # assumes we log errors to stderr, traces segfaults
    bot.state = "run"
    bot.loop.create_task(compendium.reload_task(bot.mdb))
    bot.run(config.TOKEN)
Example #51
0
 def _setup_faulthandler(self, trace_stream):
   faulthandler.enable(trace_stream)
   # This permits a non-fatal `kill -31 <pants pid>` for stacktrace retrieval.
   faulthandler.register(signal.SIGUSR2, trace_stream, chain=True)
Example #52
0
def main(infile, outfile):
    faulthandler_log_path = os.environ.get("PYTHON_FAULTHANDLER_DIR", None)
    try:
        if faulthandler_log_path:
            faulthandler_log_path = os.path.join(faulthandler_log_path,
                                                 str(os.getpid()))
            faulthandler_log_file = open(faulthandler_log_path, "w")
            faulthandler.enable(file=faulthandler_log_file)

        boot_time = time.time()
        split_index = read_int(infile)
        if split_index == -1:  # for unit tests
            sys.exit(-1)

        version = utf8_deserializer.loads(infile)
        if version != "%d.%d" % sys.version_info[:2]:
            raise RuntimeError((
                "Python in worker has different version %s than that in " +
                "driver %s, PySpark cannot run with different minor versions. "
                + "Please check environment variables PYSPARK_PYTHON and " +
                "PYSPARK_DRIVER_PYTHON are correctly set.") %
                               ("%d.%d" % sys.version_info[:2], version))

        # read inputs only for a barrier task
        isBarrier = read_bool(infile)
        boundPort = read_int(infile)
        secret = UTF8Deserializer().loads(infile)

        # set up memory limits
        memory_limit_mb = int(
            os.environ.get("PYSPARK_EXECUTOR_MEMORY_MB", "-1"))
        if memory_limit_mb > 0 and has_resource_module:
            total_memory = resource.RLIMIT_AS
            try:
                (soft_limit, hard_limit) = resource.getrlimit(total_memory)
                msg = "Current mem limits: {0} of max {1}\n".format(
                    soft_limit, hard_limit)
                print(msg, file=sys.stderr)

                # convert to bytes
                new_limit = memory_limit_mb * 1024 * 1024

                if soft_limit == resource.RLIM_INFINITY or new_limit < soft_limit:
                    msg = "Setting mem limits to {0} of max {1}\n".format(
                        new_limit, new_limit)
                    print(msg, file=sys.stderr)
                    resource.setrlimit(total_memory, (new_limit, new_limit))

            except (resource.error, OSError, ValueError) as e:
                # not all systems support resource limits, so warn instead of failing
                lineno = (getframeinfo(currentframe()).lineno +
                          1 if currentframe() is not None else 0)
                print(
                    warnings.formatwarning(
                        "Failed to set memory limit: {0}".format(e),
                        ResourceWarning,
                        __file__,
                        lineno,
                    ),
                    file=sys.stderr,
                )

        # initialize global state
        taskContext = None
        if isBarrier:
            taskContext = BarrierTaskContext._getOrCreate()
            BarrierTaskContext._initialize(boundPort, secret)
            # Set the task context instance here, so we can get it by TaskContext.get for
            # both TaskContext and BarrierTaskContext
            TaskContext._setTaskContext(taskContext)
        else:
            taskContext = TaskContext._getOrCreate()
        # read inputs for TaskContext info
        taskContext._stageId = read_int(infile)
        taskContext._partitionId = read_int(infile)
        taskContext._attemptNumber = read_int(infile)
        taskContext._taskAttemptId = read_long(infile)
        taskContext._cpus = read_int(infile)
        taskContext._resources = {}
        for r in range(read_int(infile)):
            key = utf8_deserializer.loads(infile)
            name = utf8_deserializer.loads(infile)
            addresses = []
            taskContext._resources = {}
            for a in range(read_int(infile)):
                addresses.append(utf8_deserializer.loads(infile))
            taskContext._resources[key] = ResourceInformation(name, addresses)

        taskContext._localProperties = dict()
        for i in range(read_int(infile)):
            k = utf8_deserializer.loads(infile)
            v = utf8_deserializer.loads(infile)
            taskContext._localProperties[k] = v

        shuffle.MemoryBytesSpilled = 0
        shuffle.DiskBytesSpilled = 0
        _accumulatorRegistry.clear()

        # fetch name of workdir
        spark_files_dir = utf8_deserializer.loads(infile)
        SparkFiles._root_directory = spark_files_dir
        SparkFiles._is_running_on_worker = True

        # fetch names of includes (*.zip and *.egg files) and construct PYTHONPATH
        add_path(
            spark_files_dir)  # *.py files that were added will be copied here
        num_python_includes = read_int(infile)
        for _ in range(num_python_includes):
            filename = utf8_deserializer.loads(infile)
            add_path(os.path.join(spark_files_dir, filename))

        importlib.invalidate_caches()

        # fetch names and values of broadcast variables
        needs_broadcast_decryption_server = read_bool(infile)
        num_broadcast_variables = read_int(infile)
        if needs_broadcast_decryption_server:
            # read the decrypted data from a server in the jvm
            port = read_int(infile)
            auth_secret = utf8_deserializer.loads(infile)
            (broadcast_sock_file,
             _) = local_connect_and_auth(port, auth_secret)

        for _ in range(num_broadcast_variables):
            bid = read_long(infile)
            if bid >= 0:
                if needs_broadcast_decryption_server:
                    read_bid = read_long(broadcast_sock_file)
                    assert read_bid == bid
                    _broadcastRegistry[bid] = Broadcast(
                        sock_file=broadcast_sock_file)
                else:
                    path = utf8_deserializer.loads(infile)
                    _broadcastRegistry[bid] = Broadcast(path=path)

            else:
                bid = -bid - 1
                _broadcastRegistry.pop(bid)

        if needs_broadcast_decryption_server:
            broadcast_sock_file.write(b"1")
            broadcast_sock_file.close()

        _accumulatorRegistry.clear()
        eval_type = read_int(infile)
        if eval_type == PythonEvalType.NON_UDF:
            func, profiler, deserializer, serializer = read_command(
                pickleSer, infile)
        else:
            func, profiler, deserializer, serializer = read_udfs(
                pickleSer, infile, eval_type)

        init_time = time.time()

        def process():
            iterator = deserializer.load_stream(infile)
            out_iter = func(split_index, iterator)
            try:
                serializer.dump_stream(out_iter, outfile)
            finally:
                if hasattr(out_iter, "close"):
                    out_iter.close()

        if profiler:
            profiler.profile(process)
        else:
            process()

        # Reset task context to None. This is a guard code to avoid residual context when worker
        # reuse.
        TaskContext._setTaskContext(None)
        BarrierTaskContext._setTaskContext(None)
    except BaseException as e:
        try:
            exc_info = None
            if os.environ.get("SPARK_SIMPLIFIED_TRACEBACK", False):
                tb = try_simplify_traceback(sys.exc_info()[-1])
                if tb is not None:
                    e.__cause__ = None
                    exc_info = "".join(
                        traceback.format_exception(type(e), e, tb))
            if exc_info is None:
                exc_info = traceback.format_exc()

            write_int(SpecialLengths.PYTHON_EXCEPTION_THROWN, outfile)
            write_with_length(exc_info.encode("utf-8"), outfile)
        except IOError:
            # JVM close the socket
            pass
        except BaseException:
            # Write the error to stderr if it happened while serializing
            print("PySpark worker failed with exception:", file=sys.stderr)
            print(traceback.format_exc(), file=sys.stderr)
        sys.exit(-1)
    finally:
        if faulthandler_log_path:
            faulthandler.disable()
            faulthandler_log_file.close()
            os.remove(faulthandler_log_path)
    finish_time = time.time()
    report_times(outfile, boot_time, init_time, finish_time)
    write_long(shuffle.MemoryBytesSpilled, outfile)
    write_long(shuffle.DiskBytesSpilled, outfile)

    # Mark the beginning of the accumulators section of the output
    write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
    write_int(len(_accumulatorRegistry), outfile)
    for (aid, accum) in _accumulatorRegistry.items():
        pickleSer._write_with_length((aid, accum._value), outfile)

    # check end of stream
    if read_int(infile) == SpecialLengths.END_OF_STREAM:
        write_int(SpecialLengths.END_OF_STREAM, outfile)
    else:
        # write a different value to tell JVM to not reuse this worker
        write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
        sys.exit(-1)
Example #53
0
def run():
    """Run app"""
    faulthandler.enable()
    root = tk.Tk()
    MainApplication(root).pack(side="top", fill="both", expand=True)
    root.mainloop()
Example #54
0
def pytest_configure(config):
    config.option.continue_on_collection_errors = True

    config.from_ya_test = "YA_TEST_RUNNER" in os.environ
    config.test_logs = collections.defaultdict(dict)
    config.test_metrics = {}
    config.suite_metrics = {}
    config.configure_timestamp = time.time()
    context = {
        "project_path": config.option.project_path,
        "test_stderr": config.option.test_stderr,
        "test_debug": config.option.test_debug,
        "build_type": config.option.build_type,
        "test_traceback": config.option.tbstyle,
        "flags": config.option.flags,
        "sanitize": config.option.sanitize,
    }
    config.ya = Ya(
        config.option.mode,
        config.option.source_root,
        config.option.build_root,
        config.option.dep_roots,
        config.option.output_dir,
        config.option.test_params,
        context,
        config.option.python_path,
        config.option.valgrind_path,
        config.option.gdb_path,
        config.option.data_root,
    )
    config.option.test_log_level = {
        "critical": logging.CRITICAL,
        "error": logging.ERROR,
        "warning": logging.WARN,
        "info": logging.INFO,
        "debug": logging.DEBUG,
    }[config.option.test_log_level]

    if not config.option.collectonly:
        setup_logging(os.path.join(config.ya.output_dir, "run.log"),
                      config.option.test_log_level)
    config.current_item_nodeid = None
    config.current_test_name = None
    config.test_cores_count = 0
    config.collect_cores = config.option.collect_cores
    config.sanitizer_extra_checks = config.option.sanitizer_extra_checks
    config.test_tool_bin = config.option.test_tool_bin

    if config.sanitizer_extra_checks:
        for envvar in ['LSAN_OPTIONS', 'ASAN_OPTIONS']:
            if envvar in os.environ:
                os.environ.pop(envvar)
            if envvar + '_ORIGINAL' in os.environ:
                os.environ[envvar] = os.environ[envvar + '_ORIGINAL']

    if config.option.root_dir:
        config.rootdir = config.invocation_dir = py.path.local(
            config.option.root_dir)

    # Arcadia paths from the test DEPENDS section of CMakeLists.txt
    sys.path.insert(
        0, os.path.join(config.option.source_root, config.option.project_path))
    sys.path.extend([
        os.path.join(config.option.source_root, d)
        for d in config.option.dep_roots
    ])
    sys.path.extend([
        os.path.join(config.option.build_root, d)
        for d in config.option.dep_roots
    ])

    # Build root is required for correct import of protobufs, because imports are related to the root
    # (like import devtools.dummy_arcadia.protos.lib.my_proto_pb2)
    sys.path.append(config.option.build_root)
    os.environ["PYTHONPATH"] = os.pathsep.join(
        os.environ.get("PYTHONPATH", "").split(os.pathsep) + sys.path)

    if not config.option.collectonly:
        if config.option.ya_trace_path:
            config.ya_trace_reporter = TraceReportGenerator(
                config.option.ya_trace_path)
        else:
            config.ya_trace_reporter = DryTraceReportGenerator(
                config.option.ya_trace_path)
    config.ya_version = config.option.ya_version

    sys.meta_path.append(
        CustomImporter([config.option.build_root] + [
            os.path.join(config.option.build_root, dep)
            for dep in config.option.dep_roots
        ]))
    if config.option.pdb_on_sigusr1:
        configure_pdb_on_demand()

    # Dump python backtrace in case of any errors
    faulthandler.enable()
    if hasattr(signal, "SIGQUIT"):
        # SIGQUIT is used by test_tool to teardown tests which overruns timeout
        faulthandler.register(signal.SIGQUIT, chain=True)

    if hasattr(signal, "SIGUSR2"):
        signal.signal(signal.SIGUSR2, _graceful_shutdown)
Example #55
0
    query_endpoint = endpoints.TCP4ServerEndpoint(reactor,
                                                  13000,
                                                  interface=interface_ip)
    query_endpoint.listen(BlockScraperFactory())
    stdio.StandardIO(ServerConsole())
    print("[ShipProxy] Loading plugins...")
    import glob

    for plug in glob.glob("plugins/*.py"):
        plug = plug[:-3]
        plug = plug.replace(os.sep, '.')
        print("[ShipProxy] Importing %s..." % plug)
        __import__(plug)
    for f in plugin_manager.onStart:
        f()
    reactor.suggestThreadPoolSize(30)
    reactor.run()
    data.clients.dbManager.close_db()
    for f in plugin_manager.onStop:
        f()


if __name__ == "__main__":
    if not os.path.exists("log/"):
        os.makedirs("log/")
    if useFaulthandler:
        faulthandler.enable(file=open('log/tracestack.log', 'w+'),
                            all_threads=True)
#       faulthandler.dump_traceback_later()
    main()
Example #56
0
File: run.py Project: MBradbury/slp
def main(argv):
    # Print a traceback in the case of segfaults
    faulthandler.enable()

    if __debug__:
        if len(argv) <= 1:
            print(
                "Please provide the algorithm module as the first parameter. (e.g., algorithm.protectionless)",
                file=sys.stderr)
            return 1

    module = argv[1]

    if __debug__:
        if not (module.startswith('algorithm.')
                or module.startswith('cluster.')):
            print(
                "You can only run algorithms in the 'algorithm' or 'cluster' module.",
                file=sys.stderr)
            return 2

    algorithm_module = algorithm.import_algorithm(module, extras=["Arguments"])

    a = algorithm_module.Arguments.Arguments()
    a.parse(argv[2:])

    sim = submodule_loader.load(simulator.sim, a.args.sim)

    if a.args.mode in ("SINGLE", "GUI", "RAW", "PARALLEL"):
        sim.build(module, a)

    # Make the mode SINGLE, as PROFILE is SINGLE except for not building the code
    if a.args.mode == "PROFILE":
        a.args.mode = "SINGLE"

    # Set the thread count, but only for jobs that need it
    if hasattr(a.args, "thread_count") and a.args.thread_count is None:
        import psutil
        # Set the number of usable CPUs
        a.args.thread_count = len(psutil.Process().cpu_affinity())

    # When doing cluster array jobs only print out this header information on the first job
    if a.args.mode != "CLUSTER" or a.args.job_id is None or a.args.job_id == 1:
        from datetime import datetime

        metrics_class = MetricsCommon.import_algorithm_metrics(
            module, a.args.sim, a.args.extra_metrics)

        # Print out the versions of slp-algorithms-tinyos and tinyos being used
        print(f"@version:python={VersionDetection.python_version()}")
        print(f"@version:numpy={VersionDetection.numpy_version()}")

        print(
            f"@version:slp-algorithms={VersionDetection.slp_algorithms_version()}"
        )

        sim.print_version()

        # Print other potentially useful meta data
        print(f"@date:{str(datetime.now())}")
        print(f"@host:{os.uname()}")

        # Record what algorithm is being run and under what simulator
        print(f"@module:{module}")
        print(f"@sim:{a.args.sim}")

        # Print out the argument settings
        sim.print_arguments(module, a)

        # Print the header for the results
        metrics_class.print_header()

        # Make sure this header has been written
        sys.stdout.flush()

    # Because of the way TOSSIM is architectured each individual simulation
    # needs to be run in a separate process.
    if a.args.mode in ("GUI", "SINGLE", "RAW"):
        sim.run_simulation(module, a, print_warnings=True)
    else:
        _run_parallel(sim, module, a, argv)
                                                    "backend.log"),
                                       encoding="UTF-8",
                                       mode="w")
    file_handler.setFormatter(logFormatter)
    file_handler.setLevel(logging.INFO)
    logger.addHandler(file_handler)

    # Don't litter user stderr with thonny logging
    # TODO: Can I somehow send the log to front-end's stderr?
    """
    stream_handler = logging.StreamHandler(stream=sys.stderr)
    stream_handler.setLevel(logging.INFO);
    stream_handler.setFormatter(logFormatter)
    logger.addHandler(stream_handler)
    """

    logger.setLevel(logging.INFO)

    import faulthandler

    fault_out = open(os.path.join(THONNY_USER_DIR, "backend_faults.log"),
                     mode="w")
    faulthandler.enable(fault_out)

    # Disable blurry scaling in Windows
    thonny.set_dpi_aware()

    from thonny.backend import VM  # @UnresolvedImport

    VM().mainloop()
Example #58
0
from parameterized import parameterized, param
from .timeout import run_with_timeout
from .wifi_helpers import _connect_wifi

SPEED_NORMAL = 500
SPEED_GMLAN = 33.3
BUS_SPEEDS = [(0, SPEED_NORMAL), (1, SPEED_NORMAL), (2, SPEED_NORMAL),
              (3, SPEED_GMLAN)]
TIMEOUT = 45
GEN2_HW_TYPES = [Panda.HW_TYPE_BLACK_PANDA, Panda.HW_TYPE_UNO]
GPS_HW_TYPES = [
    Panda.HW_TYPE_GREY_PANDA, Panda.HW_TYPE_BLACK_PANDA, Panda.HW_TYPE_UNO
]

# Enable fault debug
faulthandler.enable(all_threads=False)

# Connect to Panda Jungle
panda_jungle = PandaJungle()

# Find all panda's connected
_panda_serials = None


def init_panda_serials():
    global panda_jungle, _panda_serials
    _panda_serials = []
    panda_jungle.set_panda_power(True)
    time.sleep(5)
    for serial in Panda.list():
        p = Panda(serial=serial)
Example #59
0
 def setupCrashLog(self):
     p = os.path.join(self.pm.base, "crash.log")
     self._crashLog = open(p, "ab", 0)
     faulthandler.enable(self._crashLog)
Example #60
0
 def setUp(self):
     super(GeoNodeBaseTestSupport, self).setUp()
     faulthandler.enable()
     logging.debug(" Test setUp. Creating models.")
     self.get_obj_ids = create_models(type=self.get_type,
                                      integration=self.get_integration)