示例#1
0
 def __init__(self):
     super().__init__()
     # allow to see a thread-dump on SIGQUIT
     faulthandler.register(signal.SIGQUIT, file=sys.stderr)
     self.children = []
     self.received_responses = []
     self.status = None
示例#2
0
def init_faulthandler(fileobj=sys.__stderr__):
    """Enable faulthandler module if available.

    This print a nice traceback on segfaults.

    We use sys.__stderr__ instead of sys.stderr here so this will still work
    when sys.stderr got replaced, e.g. by "Python Tools for Visual Studio".

    Args:
        fobj: An opened file object to write the traceback to.
    """
    if fileobj is None:
        # When run with pythonw.exe, sys.__stderr__ can be None:
        # https://docs.python.org/3/library/sys.html#sys.__stderr__
        # If we'd enable faulthandler in that case, we just get a weird
        # exception, so we don't enable faulthandler if we have no stdout.
        #
        # Later when we have our data dir available we re-enable faulthandler
        # to write to a file so we can display a crash to the user at the next
        # start.
        return
    faulthandler.enable(fileobj)
    if (hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1') and
            sys.stderr is not None):
        # If available, we also want a traceback on SIGUSR1.
        # pylint: disable=no-member,useless-suppression
        faulthandler.register(signal.SIGUSR1)
示例#3
0
  def reset_interactive_output_stream(
    cls,
    interactive_output_stream,
    override_faulthandler_destination=True
  ):
    """
    Class state:
    - Overwrites `cls._interactive_output_stream`.
    OS state:
    - Overwrites the SIGUSR2 handler.

    This method registers a SIGUSR2 handler, which permits a non-fatal `kill -31 <pants pid>` for
    stacktrace retrieval. This is also where the the error message on fatal exit will be printed to.
    """
    try:
      # NB: mutate process-global state!
      # This permits a non-fatal `kill -31 <pants pid>` for stacktrace retrieval.
      if override_faulthandler_destination:
        faulthandler.register(signal.SIGUSR2, interactive_output_stream,
                              all_threads=True, chain=False)
      # NB: mutate the class variables!
      cls._interactive_output_stream = interactive_output_stream
    except ValueError:
      # Warn about "ValueError: IO on closed file" when the stream is closed.
      cls.log_exception(
        "Cannot reset interactive_output_stream -- stream (probably stderr) is closed")
示例#4
0
def init_faulthandler(fileobj=sys.__stderr__):
    """Enable faulthandler module if available.

    This print a nice traceback on segfaults.

    We use sys.__stderr__ instead of sys.stderr here so this will still work
    when sys.stderr got replaced, e.g. by "Python Tools for Visual Studio".

    Args:
        fobj: An opened file object to write the traceback to.
    """
    try:
        faulthandler.enable(fileobj)
    except (RuntimeError, AttributeError) as e:
        # When run with pythonw.exe, sys.__stderr__ can be None:
        # https://docs.python.org/3/library/sys.html#sys.__stderr__
        #
        # With PyInstaller, it can be a NullWriter raising AttributeError on
        # fileno: https://github.com/pyinstaller/pyinstaller/issues/4481
        #
        # Later when we have our data dir available we re-enable faulthandler
        # to write to a file so we can display a crash to the user at the next
        # start.
        log.debug(f"Failed to enable early faulthandler: {e}", exc_info=True)
        return

    if (hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1') and
            sys.stderr is not None):
        # If available, we also want a traceback on SIGUSR1.
        # pylint: disable=no-member,useless-suppression
        faulthandler.register(signal.SIGUSR1)
示例#5
0
def main(args=None):
    faulthandler.enable()
    faulthandler.register(signal.SIGUSR1)

    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    setup_logging(options)

    backend_factory = get_backend_factory(options)
    backend_pool = BackendPool(backend_factory)
    atexit.register(backend_pool.flush)

    # Get paths
    cachepath = get_backend_cachedir(options.storage_url, options.cachedir)

    # Retrieve metadata
    with backend_pool() as backend:
        (param, db) = get_metadata(backend, cachepath)

    retrieve_objects(db, backend_factory, options.corrupted_file,
                     options.missing_file, thread_count=options.parallel,
                     full=options.data, offset=options.start_with)

    if options.corrupted_file.tell() or options.missing_file.tell():
        sys.exit(46)
    else:
        os.unlink(options.corrupted_file.name)
        os.unlink(options.missing_file.name)
        sys.exit(0)
示例#6
0
def show_mail_on_error():
    if six.PY3:
        import faulthandler

        faulthandler.enable()
        with contextlib.suppress(AttributeError, ImportError):
            import signal

            faulthandler.register(signal.SIGUSR2, all_threads=True)
    try:
        yield
    except Exception as e:
        text = u"Please report this error to [email protected]:"
        if len(e.args) == 0:
            e.args = (text,)
        elif issubclass(e.__class__, Sorry):
            raise
        elif len(e.args) == 1:
            if isinstance(e.args[0], six.text_type):
                if six.PY2:
                    e.args = (
                        (text + u" " + e.args[0]).encode(
                            "ascii", errors="xmlcharrefreplace"
                        ),
                    )
                else:
                    e.args = (text + u" " + e.args[0],)
            else:
                e.args = (str(text) + " " + str(e.args[0]),)
        else:
            e.args = (text,) + e.args
        raise
示例#7
0
    def reset_interactive_output_stream(
            cls,
            interactive_output_stream,
            override_faulthandler_destination=True):
        """
    Class state:
    - Overwrites `cls._interactive_output_stream`.
    OS state:
    - Overwrites the SIGUSR2 handler.

    This method registers a SIGUSR2 handler, which permits a non-fatal `kill -31 <pants pid>` for
    stacktrace retrieval. This is also where the the error message on fatal exit will be printed to.
    """
        try:
            # NB: mutate process-global state!
            # This permits a non-fatal `kill -31 <pants pid>` for stacktrace retrieval.
            if override_faulthandler_destination:
                faulthandler.register(signal.SIGUSR2,
                                      interactive_output_stream,
                                      all_threads=True,
                                      chain=False)
            # NB: mutate the class variables!
            cls._interactive_output_stream = interactive_output_stream
        except ValueError:
            # Warn about "ValueError: IO on closed file" when the stream is closed.
            cls.log_exception(
                "Cannot reset interactive_output_stream -- stream (probably stderr) is closed"
            )
示例#8
0
文件: cli.py 项目: aflp91/badwolf
def manage():
    """badwolf - A continuous integration and code lint review system for BitBucket"""
    try:
        import faulthandler
        faulthandler.register(signal.SIGUSR1)
    except ImportError:
        pass
示例#9
0
 def __init__(self):
     super().__init__()
     # allow to see a thread-dump on SIGQUIT
     faulthandler.register(signal.SIGQUIT, file=sys.stderr)
     self.children = []
     self.received_responses = []
     self.status = None
示例#10
0
def initFaulthandler(sigusr1_chain=False):
    """
  Maybe installs signal handlers, SIGUSR1 and SIGUSR2 and others.
  If no signals handlers are installed yet for SIGUSR1/2, we try to install our own Python handler.
  This also tries to install the handler from the fauldhandler module,
  esp for SIGSEGV and others.

  :param bool sigusr1_chain: whether the default SIGUSR1 handler should also be called.
  """
    # In case that sigusr1_chain, we expect that there is already some handler
    # for SIGUSR1, and then this will not overwrite this handler.
    if install_signal_handler_if_default(signal.SIGUSR1):
        # There is already some handler or we installed our own handler now,
        # so in any case, it's save that we chain then handler.
        sigusr1_chain = True
    # Why not also SIGUSR2... SGE can also send this signal.
    install_signal_handler_if_default(signal.SIGUSR2)
    try:
        import faulthandler
    except ImportError as e:
        print("faulthandler import error. %s" % e)
    else:
        # Only enable if not yet enabled -- otherwise, leave it in its current state.
        if not faulthandler.is_enabled():
            faulthandler.enable()
            if os.name != 'nt':
                faulthandler.register(signal.SIGUSR1,
                                      all_threads=True,
                                      chain=sigusr1_chain)
    from Util import to_bool
    if os.environ.get("DEBUG_SIGNAL_HANDLER") and to_bool(
            os.environ.get("DEBUG_SIGNAL_HANDLER")):
        installLibSigSegfault()
        installNativeSignalHandler()
示例#11
0
def _enable_faulthandler():
    #
    # In the event of a segfault, faulthandler will dump the currently
    # active stack so you can figure out what went wrong.
    #
    # Additionally, on non-Windows platforms we register a SIGUSR2
    # handler -- if you send the robot process a SIGUSR2, then
    # faulthandler will dump all of your current stacks. This can
    # be really useful for figuring out things like deadlocks.
    #

    import logging

    logger = logging.getLogger("faulthandler")

    try:
        # These should work on all platforms
        import faulthandler

        faulthandler.enable()
    except Exception as e:
        logger.warn("Could not enable faulthandler: %s", e)
        return

    try:
        import signal

        faulthandler.register(signal.SIGUSR2)
        logger.info("registered SIGUSR2 for PID %s", os.getpid())
    except Exception:
        return
示例#12
0
def init_faulthandler(fileobj=sys.__stderr__):
    """Enable faulthandler module if available.

    This print a nice traceback on segfaults.

    We use sys.__stderr__ instead of sys.stderr here so this will still work
    when sys.stderr got replaced, e.g. by "Python Tools for Visual Studio".

    Args:
        fobj: An opened file object to write the traceback to.
    """
    if fileobj is None:
        # When run with pythonw.exe, sys.__stderr__ can be None:
        # https://docs.python.org/3/library/sys.html#sys.__stderr__
        # If we'd enable faulthandler in that case, we just get a weird
        # exception, so we don't enable faulthandler if we have no stdout.
        #
        # Later when we have our data dir available we re-enable faulthandler
        # to write to a file so we can display a crash to the user at the next
        # start.
        return
    faulthandler.enable(fileobj)
    if hasattr(faulthandler, "register") and hasattr(signal, "SIGUSR1"):
        # If available, we also want a traceback on SIGUSR1.
        faulthandler.register(signal.SIGUSR1)  # pylint: disable=no-member
示例#13
0
    def _run_command_process(self, args):
        """Command (child) process entry point. args contains the function to execute and all arguments."""

        setup_logging(args._level)

        command = ' '.join(sys.argv[1:])
        setproctitle('oc/command/%s' % command)
        faulthandler.register(signal.SIGUSR2, all_threads=True, chain=False)  # pylint:disable=no-member
        self._setup_requests_audit_headers(command)

        ret = 1
        try:
            chain = [self._run]
            if args._profile:
                chain.append(profiling_wrapper)
            if args._pdb:
                chain.append(pdb_wrapper)
            ret = call_chain(chain, args)
        except SystemExit as exc:
            ret = exc.code
        except ParserError as pe:
            pe.report()
        except Exception:  # pylint:disable=broad-except
            logging.exception('Top level exception in command process')
        finally:
            sys.exit(ret)
示例#14
0
def main(args=None):
    faulthandler.enable()
    faulthandler.register(signal.SIGUSR1)

    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    setup_logging(options)

    backend_factory = get_backend_factory(options)

    # Retrieve metadata
    with backend_factory() as backend:
        (param, db) = get_metadata(backend, options.cachepath)

    retrieve_objects(db,
                     backend_factory,
                     options.corrupted_file,
                     options.missing_file,
                     thread_count=options.parallel,
                     full=options.data,
                     offset=options.start_with)

    if options.corrupted_file.tell() or options.missing_file.tell():
        sys.exit(46)
    else:
        os.unlink(options.corrupted_file.name)
        os.unlink(options.missing_file.name)
        sys.exit(0)
示例#15
0
文件: cli.py 项目: clsb/miles
def main():
    parser = argparse.ArgumentParser(description='Milestoning tool')
    parser.add_argument('--profile', required=False, metavar='FILE',
                        type=argparse.FileType('w', encoding='utf-8'),
                        help='run under profiler and save report to '
                        '%(metavar)s')
    parser.add_argument('--random-seed', required=False,
                        metavar='RANDOM-SEED', type=float,
                        default=time.time(), help='use prescribed '
                        'random seed')

    subparsers = parser.add_subparsers(help='sub-command help', dest='cmd')

    cmds = commands.Commands(subparsers, commands.command_list)

    if argcomplete:
        argcomplete.autocomplete(parser)

    args = parser.parse_args(sys.argv[1:])

    random.seed(args.random_seed)

    if args.cmd is None:
        parser.print_help()
        sys.exit(-1)

    faulthandler.register(signal.SIGUSR1)

    cmd = cmds[args.cmd]
    with Profiler(args.profile):
        cmd.do(args)

    if args.profile:
        args.profile.close()
示例#16
0
文件: Debug.py 项目: rwth-i6/returnn
def init_faulthandler(sigusr1_chain=False):
  """
  Maybe installs signal handlers, SIGUSR1 and SIGUSR2 and others.
  If no signals handlers are installed yet for SIGUSR1/2, we try to install our own Python handler.
  This also tries to install the handler from the fauldhandler module,
  esp for SIGSEGV and others.

  :param bool sigusr1_chain: whether the default SIGUSR1 handler should also be called.
  """
  from Util import to_bool
  # Enable libSigSegfault first, so that we can have both,
  # because faulthandler will also call the original sig handler.
  if os.environ.get("DEBUG_SIGNAL_HANDLER") and to_bool(os.environ.get("DEBUG_SIGNAL_HANDLER")):
    install_lib_sig_segfault()
    install_native_signal_handler()
  if sys.platform != 'win32':
    # In case that sigusr1_chain, we expect that there is already some handler
    # for SIGUSR1, and then this will not overwrite this handler.
    if install_signal_handler_if_default(signal.SIGUSR1):
      # There is already some handler or we installed our own handler now,
      # so in any case, it's save that we chain then handler.
      sigusr1_chain = True
    # Why not also SIGUSR2... SGE can also send this signal.
    install_signal_handler_if_default(signal.SIGUSR2)
  try:
    import faulthandler
  except ImportError as e:
    print("faulthandler import error. %s" % e)
  else:
    # Only enable if not yet enabled -- otherwise, leave it in its current state.
    if not faulthandler.is_enabled():
      faulthandler.enable()
      if sys.platform != 'win32':
        faulthandler.register(signal.SIGUSR1, all_threads=True, chain=sigusr1_chain)
示例#17
0
    def __call__(self, resources, test_env, proc_func, args, kwargs,
                 use_dill_for_args):
        """The wrapper function that actually gets run in child process(es)."""

        global _barrier

        self._resources = resources
        _barrier = self._resources.barrier
        proc_func = dill.loads(proc_func)
        if use_dill_for_args:
            args = dill.loads(args)
            kwargs = dill.loads(kwargs)

        if faulthandler is not None:
            faulthandler.enable()
            faulthandler.register(signal.SIGTERM, chain=True)

        # All logging should go to stderr to be streamed to the main process.
        logging.set_stderrthreshold(logging.DEBUG)

        # Assign sys.stdout and sys.stderr as duplicates of `streaming_pipe_w` so
        # print() and logging.*() write directly to `streaming_pipe_w`.
        # Unfortunately since we cannot prepend task_type and task_id information to
        # the streamed logs we will need a thread per subprocess to distinguish
        # where the piece of message is from.
        os.dup2(resources.streaming_pipe_w.fileno(), sys.stdout.fileno())
        os.dup2(resources.streaming_pipe_w.fileno(), sys.stderr.fileno())

        pid = os.getpid()
        logging.info('Subprocess with PID %d (%s, %d) is now being started.',
                     pid, test_env.task_type, test_env.task_id)

        # The thread will be dedicated to checking messages from the parent process.
        threading.Thread(  # pylint: disable=unexpected-keyword-arg
            target=self._message_checking_func,
            args=(test_env.task_type, test_env.task_id),
            daemon=True).start()

        if test_env.v2_enabled:
            v2_compat.enable_v2_behavior()

        with self._runtime_mode(test_env.executing_eagerly):
            info = _run_contained(test_env.task_type, test_env.task_id,
                                  proc_func, args, kwargs)
            self._resources.process_status_queue.put(info)

            # Re-raise the exception in addition to reporting it to the parent
            # process, so that even if `--test_timeout` flag is set and the
            # error doesn't make it to be shown in parent process before bazel's
            # timeout, the log would still show what happens in this subprocess,
            # instead of silently suppressing the error due to early bazel
            # timeout. Raising an error in the subprocess produces stack trace in
            # the log, but the program continues running.
            if not info.is_successful:
                six.reraise(*info.exc_info)

            self._close_streaming()

        # Exit with code 0 as it's considered successful exit at this point.
        sys.exit(0)
示例#18
0
def _enable_faulthandler():
    #
    # In the event of a segfault, faulthandler will dump the currently
    # active stack so you can figure out what went wrong.
    #
    # Additionally, on non-Windows platforms we register a SIGUSR2
    # handler -- if you send the robot process a SIGUSR2, then
    # faulthandler will dump all of your current stacks. This can
    # be really useful for figuring out things like deadlocks.
    #

    import logging

    logger = logging.getLogger("faulthandler")

    try:
        # These should work on all platforms
        import faulthandler

        faulthandler.enable()
    except Exception as e:
        logger.warn("Could not enable faulthandler: %s", e)
        return

    try:
        import signal

        faulthandler.register(signal.SIGUSR2)
        logger.info("registered SIGUSR2 for PID %s", os.getpid())
    except Exception:
        return
示例#19
0
def setup_tests(ns):
    try:
        stderr_fd = sys.__stderr__.fileno()
    except (ValueError, AttributeError):
        # Catch ValueError to catch io.UnsupportedOperation on TextIOBase
        # and ValueError on a closed stream.
        #
        # Catch AttributeError for stderr being None.
        stderr_fd = None
    else:
        # Display the Python traceback on fatal errors (e.g. segfault)
        faulthandler.enable(all_threads=True, file=stderr_fd)

        # Display the Python traceback on SIGALRM or SIGUSR1 signal
        signals = []
        if hasattr(signal, 'SIGALRM'):
            signals.append(signal.SIGALRM)
        if hasattr(signal, 'SIGUSR1'):
            signals.append(signal.SIGUSR1)
        for signum in signals:
            faulthandler.register(signum, chain=True, file=stderr_fd)

    replace_stdout()
    support.record_original_stdout(sys.stdout)

    if ns.testdir:
        # Prepend test directory to sys.path, so runtest() will be able
        # to locate tests
        sys.path.insert(0, os.path.abspath(ns.testdir))

    # Some times __path__ and __file__ are not absolute (e.g. while running from
    # Lib/) and, if we change the CWD to run the tests in a temporary dir, some
    # imports might fail.  This affects only the modules imported before os.chdir().
    # These modules are searched first in sys.path[0] (so '' -- the CWD) and if
    # they are found in the CWD their __file__ and __path__ will be relative (this
    # happens before the chdir).  All the modules imported after the chdir, are
    # not found in the CWD, and since the other paths in sys.path[1:] are absolute
    # (site.py absolutize them), the __file__ and __path__ will be absolute too.
    # Therefore it is necessary to absolutize manually the __file__ and __path__ of
    # the packages to prevent later imports to fail when the CWD is different.
    for module in sys.modules.values():
        if hasattr(module, '__path__'):
            for index, path in enumerate(module.__path__):
                module.__path__[index] = os.path.abspath(path)
        if getattr(module, '__file__', None):
            module.__file__ = os.path.abspath(module.__file__)

    if ns.huntrleaks:
        unittest.BaseTestSuite._cleanup = False

    if ns.memlimit is not None:
        support.set_memlimit(ns.memlimit)

    if ns.threshold is not None:
        gc.set_threshold(ns.threshold)

    suppress_msvcrt_asserts(ns.verbose and ns.verbose >= 2)

    support.use_resources = ns.use_resources
示例#20
0
def registerFaultHandler(signum=14):
    if _g_registered:
        return
    import faulthandler
    faulthandler.register(signum)
    _g_registered[1] = 1
    print(
        "[INFO] faulthandler registered for SIG={}, use `kill -{} {}` to view all stack traces"
        .format(signum, signum, os.getpid()))
示例#21
0
def _install_signal_handlers():
    log.info('Installing exit handler for SIGTERM, SIGQUIT.')
    for s in (signal.SIGTERM, signal.SIGQUIT):
        signal.signal(s, _exit_signal_handler)
    log.info('Installing stacktrace dumper for SIGUSR2.')
    faulthandler.register(signal.SIGUSR2,
                          file=sys.stderr,
                          all_threads=True,
                          chain=False)
示例#22
0
def enable_faulthandler():
    # Don't clobber someone elses faulthandler settings
    if not faulthandler.is_enabled():
        # SIGUSR2 not available on windows
        # The attached STDERR might not support what faulthandler wants
        with contextlib.suppress(AttributeError, io.UnsupportedOperation):
            faulthandler.enable()
            faulthandler.register(signal.SIGUSR2, all_threads=True)
    yield
示例#23
0
def setup_faulthandler(args):
    try:
        import faulthandler
    except ImportError:
        sys.stderr.write('running without faulthandler\n')
        return
    else:
        faulthandler.enable()
        faulthandler.register(signal.SIGINT)
示例#24
0
def setup_tests(ns):
    faulthandler.enable(all_threads=True)
    signals = []
    if hasattr(signal, 'SIGALRM'):
        signals.append(signal.SIGALRM)
    if hasattr(signal, 'SIGUSR1'):
        signals.append(signal.SIGUSR1)
    for signum in signals:
        faulthandler.register(signum, chain=True)
    replace_stdout()
    support.record_original_stdout(sys.stdout)
    if ns.testdir:
        sys.path.insert(0, os.path.abspath(ns.testdir))
    for module in sys.modules.values():
        if hasattr(module, '__path__'):
            for index, path in enumerate(module.__path__):
                module.__path__[index] = os.path.abspath(path)
        if hasattr(module, '__file__'):
            module.__file__ = os.path.abspath(module.__file__)
    if sys.platform == 'darwin':
        try:
            import resource
        except ImportError:
            pass
        else:
            soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
            newsoft = min(hard, max(soft, 1024 * 2048))
            resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
    if ns.huntrleaks:
        unittest.BaseTestSuite._cleanup = False
        warm_caches()
    if ns.memlimit is not None:
        support.set_memlimit(ns.memlimit)
    if ns.threshold is not None:
        gc.set_threshold(ns.threshold)
    try:
        import msvcrt
    except ImportError:
        pass
    else:
        msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS
                            | msvcrt.SEM_NOALIGNMENTFAULTEXCEPT
                            | msvcrt.SEM_NOGPFAULTERRORBOX
                            | msvcrt.SEM_NOOPENFILEERRORBOX)
        try:
            msvcrt.CrtSetReportMode
        except AttributeError:
            pass
        else:
            for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
                if ns.verbose and ns.verbose >= 2:
                    msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
                    msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
                else:
                    msvcrt.CrtSetReportMode(m, 0)
    support.use_resources = ns.use_resources
示例#25
0
    def run(self):
        self.serverThread = threading.Thread(target=self.socketServer, args=(), name="GDB socket server")
        self.serverThread.start()

        # set up signal handler for manual debugging
        # we have to set it to output to a different file because
        #  stderr and stdout work differently inside of GDB
        recordErrorsFile = open('errors_gdbClient.log', 'a')
        faulthandler.enable(file=recordErrorsFile, all_threads=True)
        faulthandler.register(signal.SIGUSR1, file=recordErrorsFile, all_threads=True)
示例#26
0
文件: conftest.py 项目: quivalen/s3ql
def pytest_configure(config):
    # If we are running from the S3QL source directory, make sure that we
    # load modules from here
    basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
    if not config.getoption('installed'):
        if (os.path.exists(os.path.join(basedir, 'setup.py'))
                and os.path.exists(
                    os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
            sys.path = [os.path.join(basedir, 'src')] + sys.path

    # When running from HG repo, enable warnings
    if os.path.exists(os.path.join(basedir, 'MANIFEST.in')):
        import warnings
        warnings.resetwarnings()

        # Not sure what this is or what causes it, bug the internet
        # is full of similar reports so probably a false positive.
        warnings.filterwarnings(
            action='ignore',
            category=ImportWarning,
            message=
            "can't resolve package from __spec__ or __package__, falling "
            "back on __name__ and __path__")

        for cat in (DeprecationWarning, PendingDeprecationWarning):
            warnings.filterwarnings(action='default',
                                    category=cat,
                                    module='s3ql',
                                    append=True)
            warnings.filterwarnings(action='ignore', category=cat, append=True)
        warnings.filterwarnings(action='default', append=True)
        os.environ['S3QL_ENABLE_WARNINGS'] = '1'

    # Enable faulthandler
    faultlog_fd = os.open(os.path.join(basedir, 'tests', 'test_crit.log'),
                          flags=os.O_APPEND | os.O_CREAT | os.O_WRONLY,
                          mode=0o644)
    faulthandler.enable(faultlog_fd)
    faulthandler.register(signal.SIGUSR1, file=faultlog_fd)

    # Configure logging. We don't set a default handler but rely on
    # the catchlog pytest plugin.
    logdebug = config.getoption('logdebug')
    root_logger = logging.getLogger()
    if logdebug is not None:
        logging.disable(logging.NOTSET)
        if 'all' in logdebug:
            root_logger.setLevel(logging.DEBUG)
        else:
            for module in logdebug:
                logging.getLogger(module).setLevel(logging.DEBUG)
    else:
        root_logger.setLevel(logging.INFO)
        logging.disable(logging.DEBUG)
    logging.captureWarnings(capture=True)
示例#27
0
def registerFaulthandler():
    try:
        import faulthandler
    except ImportError:
        # faulthandler is not available until python3
        return

    faulthandler.enable()
    # faulthandler.register is not available on Windows.
    if getattr(faulthandler, 'register', None):
        faulthandler.register(signal.SIGTERM, chain=True)
示例#28
0
文件: app.py 项目: har5ha/qutebrowser
 def _init_crashlogfile(self):
     """Start a new logfile and redirect faulthandler to it."""
     path = utils.get_standard_dir(QStandardPaths.DataLocation)
     logname = os.path.join(path, 'crash.log')
     self._crashlogfile = open(logname, 'w', encoding='ascii')
     faulthandler.enable(self._crashlogfile)
     if (hasattr(faulthandler, 'register') and
             hasattr(signal, 'SIGUSR1')):
         # If available, we also want a traceback on SIGUSR1.
         # pylint: disable=no-member
         faulthandler.register(signal.SIGUSR1)
示例#29
0
def _enable_faulthandler():
    """Display a traceback on crashing with non-Python errors, such as
    segmentation faults, and when the process is signalled with SIGUSR2
    (not available on Windows)"""
    # Ignore errors during setup; SIGUSR2 not available on Windows, and
    # the attached STDERR might not support what faulthandler wants
    try:
        faulthandler.enable()
        faulthandler.register(signal.SIGUSR2)
    except Exception:
        pass
示例#30
0
def monitor_call_stack():
    if in_ipython():
        # see this issue for why: https://github.com/ipython/ipykernel/issues/91
        f = sys.__stderr__
    else:
        f = sys.stderr

    faulthandler.register(signal.SIGUSR1, file=f)
    print 'To monitor call stack, type this at command line: kill -USR1 {}'.format(os.getpid())
    print 'Call stack will be printed to stderr' \
          '(in IPython Notebook, this will show in the terminal where you launched the notebook.)'
示例#31
0
 def test_stderr_None(self):
     # Issue #21497: provide a helpful error if sys.stderr is None,
     # instead of just an attribute error: "None has no attribute fileno".
     with self.check_stderr_none():
         faulthandler.enable()
     with self.check_stderr_none():
         faulthandler.dump_traceback()
     with self.check_stderr_none():
         faulthandler.dump_traceback_later(1e-3)
     if hasattr(faulthandler, "register"):
         with self.check_stderr_none():
             faulthandler.register(signal.SIGUSR1)
示例#32
0
 def test_stderr_None(self):
     # Issue #21497: provide a helpful error if sys.stderr is None,
     # instead of just an attribute error: "None has no attribute fileno".
     with self.check_stderr_none():
         faulthandler.enable()
     with self.check_stderr_none():
         faulthandler.dump_traceback()
     if hasattr(faulthandler, 'dump_traceback_later'):
         with self.check_stderr_none():
             faulthandler.dump_traceback_later(1e-3)
     if hasattr(faulthandler, "register"):
         with self.check_stderr_none():
             faulthandler.register(signal.SIGUSR1)
def main():
    faulthandler.register(signal.SIGUSR1, all_threads=True)
    print(
        "Installed SIGUSR1 handler to print stack traces: pkill -USR1 -f run-tests"
    )

    test_utils.prepare()
    test_utils.start_solr()

    try:
        unittest.main(module="tests", verbosity=1)
    finally:
        print("Tests complete; halting Solr servers…")
        test_utils.stop_solr()
示例#34
0
文件: conftest.py 项目: segator/s3ql
def pytest_configure(config):
    # If we are running from the S3QL source directory, make sure that we
    # load modules from here
    basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
    if not config.getoption('installed'):
        if (os.path.exists(os.path.join(basedir, 'setup.py')) and
            os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
            sys.path = [os.path.join(basedir, 'src')] + sys.path

    # When running from HG repo, enable warnings
    if os.path.exists(os.path.join(basedir, 'MANIFEST.in')):
        import warnings
        warnings.resetwarnings()

        # Not sure what this is or what causes it, bug the internet
        # is full of similar reports so probably a false positive.
        warnings.filterwarnings(
            action='ignore', category=ImportWarning,
            message="can't resolve package from __spec__ or __package__, falling "
            "back on __name__ and __path__")

        for cat in (DeprecationWarning, PendingDeprecationWarning):
            warnings.filterwarnings(action='default', category=cat,
                                    module='s3ql', append=True)
            warnings.filterwarnings(action='ignore', category=cat, append=True)
        warnings.filterwarnings(action='default', append=True)
        os.environ['S3QL_ENABLE_WARNINGS'] = '1'

    # Enable faulthandler
    faultlog_fd = os.open(os.path.join(basedir, 'tests', 'test_crit.log'),
                          flags=os.O_APPEND|os.O_CREAT|os.O_WRONLY, mode=0o644)
    faulthandler.enable(faultlog_fd)
    faulthandler.register(signal.SIGUSR1, file=faultlog_fd)

    # Configure logging. We don't set a default handler but rely on
    # the catchlog pytest plugin.
    logdebug = config.getoption('logdebug')
    root_logger = logging.getLogger()
    if logdebug is not None:
        logging.disable(logging.NOTSET)
        if 'all' in logdebug:
            root_logger.setLevel(logging.DEBUG)
        else:
            for module in logdebug:
                logging.getLogger(module).setLevel(logging.DEBUG)
    else:
        root_logger.setLevel(logging.INFO)
        logging.disable(logging.DEBUG)
    logging.captureWarnings(capture=True)
示例#35
0
def enable_thread_dump_signal(signum=signal.SIGUSR1, dump_file=sys.stderr):
    """Turns on the ability to dump all of the threads to
    Currently this is just a wrapper around the faulthandler module
    :param signum: The OS signal to listen for and when signalled the thread dump should be outputted to `dump_file`.
        The default is the SIGUSR1 signal
    :type signum: int
    :param dump_file: The dump_file to output the threaddump to upon the signal being sent to the process.
    :type dump_file: file
    """
    # Utilities for debugging a python application/process.
    # This is not specifically related testing, but related more to
    # just debugging of code and process which could be in production.
    import faulthandler

    faulthandler.register(signum, file=dump_file, all_threads=True, chain=True)
示例#36
0
 def run(self):
     if HAS_FAULTHANDLER:
         faulthandler.enable()
         if not IS_WINDOWS:
             # windows does not have faulthandler.register
             faulthandler.register(signal.SIGUSR1, chain=True)
     if self.disable_stderr:
         # Disable polluting stderr with errors that are supposed to happen.
         sys.stderr = open(os.devnull, "w")
     try:
         super(ErrorTrackingProcess, self).run()
         self._cconn.send(None)
     except Exception:
         self._cconn.send(ExceptionWrapper(sys.exc_info()))
         raise
示例#37
0
def main():
    try:
        import faulthandler
        faulthandler.register(signal.SIGUSR1, all_threads=True)
        print('Installed SIGUSR1 handler to print stack traces: pkill -USR1 -f run-tests')
    except ImportError:
        pass

    test_utils.prepare()
    test_utils.start_solr()

    try:
        unittest.main(module='tests', verbosity=1)
    finally:
        print('Tests complete; halting Solr servers…')
        test_utils.stop_solr()
示例#38
0
    def before_command_start(self, args, recorder):
        """Called after the args have been parsed but before the command starts
        Reports command start using the configured reporter, sets the process
        title to ``oc/parent/join script args``, and registers a fault handler

        :param args: The parsed args the command was called with
        :type args: :class:`argparse.Namespace`
        :param recorder: The output recorder being used to capture command output
        :type recorder: :class:`capture.Recorder`
        :rtype: None
        """
        # reporting: we send command executions report to a web service;
        #   unless the report.enabled conf key is false
        self.reporter.report_command_start(sys.argv)

        setproctitle('oc/parent/%s' % ' '.join(sys.argv[1:]))
        faulthandler.register(signal.SIGUSR2, all_threads=True, chain=False)  # pylint:disable=no-member
示例#39
0
def main():
    try:
        import faulthandler
        faulthandler.register(signal.SIGUSR1, all_threads=True)
        print(
            'Installed SIGUSR1 handler to print stack traces: pkill -USR1 -f run-tests'
        )
    except ImportError:
        pass

    test_utils.prepare()
    test_utils.start_solr()

    try:
        unittest.main(module='tests', verbosity=1)
    finally:
        print('Tests complete; halting Solr servers…')
        test_utils.stop_solr()
示例#40
0
def bind_signal_handlers(agentPid):
  global _handler
  if OSCheck.get_os_family() != OSConst.WINSRV_FAMILY:
    if os.getpid() == agentPid:
      signal.signal(signal.SIGINT, signal_handler)
      signal.signal(signal.SIGTERM, signal_handler)
      signal.signal(signal.SIGUSR2, remote_debug) # Interrupt running process, and provide a python prompt for it
      try:
        import faulthandler  # This is not default module, has to be installed separately
        faulthandler.enable(file=sys.stderr, all_threads=True)
        faulthandler.register(signal.SIGUSR1, file=sys.stderr, all_threads=True, chain=False)
        sys.stderr.write("Registered faulthandler\n")
      except ImportError:
        pass  # Module is not included into python distribution

    _handler = HeartbeatStopHandlersLinux()
  else:
    _handler = HeartbeatStopHandlersWindows()
  return _handler
示例#41
0
    def enable_faulthandler(cls, signum=signal.SIGUSR1):
        """
        Enable dumping thread stack traces when specified signals are received, similar to java's handling of SIGQUIT

        Note: this must be called from the surviving process in case of daemonization.
        Note that SIGQUIT does not work in all environments with a python process.

        :param int|None signum: Signal number to register for full thread stack dump (use None to disable)
        """
        with cls._lock:
            if not signum:
                cls._disable_faulthandler()
                return
            if not cls.file_handler or faulthandler is None:
                return
            cls.faulthandler_signum = signum
            dump_file = cls.file_handler.stream
            faulthandler.enable(file=dump_file, all_threads=True)
            faulthandler.register(signum, file=dump_file, all_threads=True, chain=False)
示例#42
0
    def __init__(self, rank=0, world=1):
        faulthandler.enable(all_threads=True)
        faulthandler.register(signal.SIGUSR1, all_threads=True, chain=False)
        signal.signal(signal.SIGINT, lambda x, y: sys.exit(0))

        self.rank = rank
        self.world = world

        if not os.getenv("ADAPTDL_REPLICA_RANK"):
            os.environ["ADAPTDL_REPLICA_RANK"] = str(self.rank)
        if not os.getenv("ADAPTDL_MASTER_ADDR"):
            os.environ["ADAPTDL_MASTER_ADDR"] = "127.0.0.1"
        if not os.getenv("ADAPTDL_JOB_ID"):
            os.environ["ADAPTDL_JOB_ID"] = "transformer"

        if not os.getenv("ADAPTDL_PLACEMENT"):
            # localhost,localhost
            os.environ["ADAPTDL_PLACEMENT"] = (self.world * "localhost,")[:-1]

        self.model = TransformerModel(ntokens, emsize, nhead, nhid, nlayers,
                                      dropout).to(device)

        self.criterion = nn.CrossEntropyLoss()
        self.lr = args.lr
        self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr)
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer,
                                                         1.0,
                                                         gamma=0.95)

        self.best_val_loss = float("inf")
        self.best_model = None

        adaptdl.torch.init_process_group("gloo")
        self.model = adl.AdaptiveDataParallel(self.model, self.optimizer,
                                              self.scheduler)
        if args.autoscale_bsz:
            self.max_batch_size = 1024 * batch_size
            self.local_bsz_bounds = (batch_size, 128)
        else:
            self.max_batch_size = None
            self.local_bsz_bounds = None
示例#43
0
    def reset_interactive_output_stream(cls, interactive_output_stream):
        """
    Class state:
    - Overwrites `cls._interactive_output_stream`.
    OS state:
    - Overwrites the SIGUSR2 handler.

    This is where the the error message on exit will be printed to as well.
    """
        # NB: mutate process-global state!
        if faulthandler.unregister(signal.SIGUSR2):
            logger.debug('re-registering a SIGUSR2 handler')
        # This permits a non-fatal `kill -31 <pants pid>` for stacktrace retrieval.
        faulthandler.register(signal.SIGUSR2,
                              interactive_output_stream,
                              all_threads=True,
                              chain=False)

        # NB: mutate the class variables!
        # We don't *necessarily* need to keep a reference to this, but we do here for clarity.
        cls._interactive_output_stream = interactive_output_stream
示例#44
0
    def check_register(self, filename=False, all_threads=False,
                       unregister=False):
        """
        Register a handler displaying the traceback on a user signal. Raise the
        signal and check the written traceback.

        Raise an error if the output doesn't match the expected format.
        """
        signum = signal.SIGUSR1
        code = """
import faulthandler
import os
import signal

def func(signum):
    os.kill(os.getpid(), signum)

signum = {signum}
unregister = {unregister}
if {has_filename}:
    file = open({filename}, "wb")
else:
    file = None
faulthandler.register(signum, file=file, all_threads={all_threads})
if unregister:
    faulthandler.unregister(signum)
func(signum)
if file is not None:
    file.close()
""".strip()
        code = code.format(
            filename=repr(filename),
            has_filename=bool(filename),
            all_threads=all_threads,
            signum=signum,
            unregister=unregister,
        )
        trace, exitcode = self.get_output(code, filename)
        trace = '\n'.join(trace)
        if not unregister:
            if all_threads:
                regex = 'Current thread XXX:\n'
            else:
                regex = 'Traceback \(most recent call first\):\n'
            regex = expected_traceback(6, 17, regex)
            self.assertRegex(trace, regex)
        else:
            self.assertEqual(trace, '')
        if unregister:
            self.assertNotEqual(exitcode, 0)
        else:
            self.assertEqual(exitcode, 0)
示例#45
0
def pytest_configure(config):
    # If we are running from the S3QL source directory, make sure that we
    # load modules from here
    basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
    if not config.getoption('installed'):
        if (os.path.exists(os.path.join(basedir, 'setup.py')) and
            os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
            sys.path = [os.path.join(basedir, 'src')] + sys.path

    # When running from HG repo, enable all warnings
    if os.path.exists(os.path.join(basedir, 'MANIFEST.in')):
        import warnings
        warnings.resetwarnings()
        warnings.simplefilter('default')

    # Enable faulthandler
    global faultlog_fh
    faultlog_fh = open(os.path.join(basedir, 'tests', 'test_crit.log'), 'a')
    faulthandler.enable(faultlog_fh)
    faulthandler.register(signal.SIGUSR1, file=faultlog_fh)

    # Configure logging. We don't set a default handler but rely on
    # the catchlog pytest plugin.
    logdebug = config.getoption('logdebug')
    root_logger = logging.getLogger()
    if logdebug is not None:
        logging.disable(logging.NOTSET)
        if 'all' in logdebug:
            root_logger.setLevel(logging.DEBUG)
        else:
            for module in logdebug:
                logging.getLogger(module).setLevel(logging.DEBUG)
    else:
        root_logger.setLevel(logging.INFO)
        logging.disable(logging.DEBUG)
    logging.captureWarnings(capture=True)
示例#46
0
 def _setup_faulthandler(self, trace_stream):
   faulthandler.enable(trace_stream)
   # This permits a non-fatal `kill -31 <pants pid>` for stacktrace retrieval.
   faulthandler.register(signal.SIGUSR2, trace_stream, chain=True)
示例#47
0
def main():
    check_python_version()

    start = time.time()

    # Early init of console output so we start to show everything consistently.
    console.init(quiet=False)
    # allow to see a thread-dump on SIGQUIT
    faulthandler.register(signal.SIGQUIT, file=sys.stderr)

    pre_configure_logging()
    args = parse_args()

    console.init(quiet=args.quiet)
    console.println(BANNER)

    cfg = config.Config(config_name=args.configuration_name)
    sub_command = derive_sub_command(args, cfg)
    ensure_configuration_present(cfg, args, sub_command)

    if args.effective_start_date:
        cfg.add(config.Scope.application, "system", "time.start", args.effective_start_date)
        cfg.add(config.Scope.application, "system", "time.start.user_provided", True)
    else:
        cfg.add(config.Scope.application, "system", "time.start", datetime.datetime.utcnow())
        cfg.add(config.Scope.application, "system", "time.start.user_provided", False)

    cfg.add(config.Scope.applicationOverride, "system", "quiet.mode", args.quiet)

    # per node?
    cfg.add(config.Scope.applicationOverride, "system", "offline.mode", args.offline)
    cfg.add(config.Scope.applicationOverride, "system", "logging.output", args.logging)

    # Local config per node
    cfg.add(config.Scope.application, "node", "rally.root", paths.rally_root())
    cfg.add(config.Scope.application, "node", "rally.cwd", os.getcwd())

    cfg.add(config.Scope.applicationOverride, "mechanic", "source.revision", args.revision)
    #TODO dm: Consider renaming this one. It's used by different modules
    if args.distribution_version:
        cfg.add(config.Scope.applicationOverride, "mechanic", "distribution.version", args.distribution_version)
    cfg.add(config.Scope.applicationOverride, "mechanic", "distribution.repository", args.distribution_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "repository.name", args.team_repository)
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.names", csv_to_list(args.car))
    cfg.add(config.Scope.applicationOverride, "mechanic", "car.plugins", csv_to_list(args.elasticsearch_plugins))
    cfg.add(config.Scope.applicationOverride, "mechanic", "node.datapaths", csv_to_list(args.data_paths))
    if args.keep_cluster_running:
        cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running", True)
        # force-preserve the cluster nodes.
        cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install", True)
    else:
        cfg.add(config.Scope.applicationOverride, "mechanic", "keep.running", False)
        cfg.add(config.Scope.applicationOverride, "mechanic", "preserve.install", convert.to_bool(args.preserve_install))
    cfg.add(config.Scope.applicationOverride, "mechanic", "telemetry.devices", csv_to_list(args.telemetry))

    cfg.add(config.Scope.applicationOverride, "race", "pipeline", args.pipeline)
    cfg.add(config.Scope.applicationOverride, "race", "laps", args.laps)
    cfg.add(config.Scope.applicationOverride, "race", "user.tag", args.user_tag)

    cfg.add(config.Scope.applicationOverride, "track", "repository.name", args.track_repository)
    cfg.add(config.Scope.applicationOverride, "track", "track.name", args.track)
    cfg.add(config.Scope.applicationOverride, "track", "challenge.name", args.challenge)
    cfg.add(config.Scope.applicationOverride, "track", "include.tasks", csv_to_list(args.include_tasks))
    cfg.add(config.Scope.applicationOverride, "track", "test.mode.enabled", args.test_mode)
    cfg.add(config.Scope.applicationOverride, "track", "auto_manage_indices", to_bool(args.auto_manage_indices))

    cfg.add(config.Scope.applicationOverride, "reporting", "format", args.report_format)
    cfg.add(config.Scope.applicationOverride, "reporting", "output.path", args.report_file)
    if sub_command == "compare":
        cfg.add(config.Scope.applicationOverride, "reporting", "baseline.timestamp", args.baseline)
        cfg.add(config.Scope.applicationOverride, "reporting", "contender.timestamp", args.contender)

    ################################
    # new section name: driver
    ################################
    cfg.add(config.Scope.applicationOverride, "driver", "cluster.health", args.cluster_health)
    cfg.add(config.Scope.applicationOverride, "driver", "profiling", args.enable_driver_profiling)
    cfg.add(config.Scope.applicationOverride, "driver", "load_driver_hosts", csv_to_list(args.load_driver_hosts))
    if sub_command != "list":
        # Also needed by mechanic (-> telemetry) - duplicate by module?
        cfg.add(config.Scope.applicationOverride, "client", "hosts", _normalize_hosts(csv_to_list(args.target_hosts)))
        client_options = kv_to_map(csv_to_list(args.client_options))
        cfg.add(config.Scope.applicationOverride, "client", "options", client_options)
        if "timeout" not in client_options:
            console.info("You did not provide an explicit timeout in the client options. Assuming default of 10 seconds.")

    # split by component?
    if sub_command == "list":
        cfg.add(config.Scope.applicationOverride, "system", "list.config.option", args.configuration)
        cfg.add(config.Scope.applicationOverride, "system", "list.races.max_results", args.limit)

    configure_logging(cfg)
    logger.info("OS [%s]" % str(os.uname()))
    logger.info("Python [%s]" % str(sys.implementation))
    logger.info("Rally version [%s]" % version.version())
    logger.info("Command line arguments: %s" % args)
    # Configure networking
    net.init()
    if not args.offline:
        if not net.has_internet_connection():
            console.warn("No Internet connection detected. Automatic download of track data sets etc. is disabled.",
                         logger=logger)
            cfg.add(config.Scope.applicationOverride, "system", "offline.mode", True)
        else:
            logger.info("Detected a working Internet connection.")

    # Kill any lingering Rally processes before attempting to continue - the actor system needs to be a singleton on this machine
    # noinspection PyBroadException
    try:
        process.kill_running_rally_instances()
    except BaseException:
        logger.exception("Could not terminate potentially running Rally instances correctly. Attempting to go on anyway.")

    success = dispatch_sub_command(cfg, sub_command)

    end = time.time()
    if success:
        console.println("")
        console.info("SUCCESS (took %d seconds)" % (end - start), overline="-", underline="-")
    else:
        console.println("")
        console.info("FAILURE (took %d seconds)" % (end - start), overline="-", underline="-")
        sys.exit(64)
示例#48
0
def main():

    # If SIGUSR1, backtrace all threads; hopefully this is early
    # enough.
    faulthandler.register(signal.SIGUSR1)

    parser = argparse.ArgumentParser(description="Run tests",
                                     epilog="SIGUSR1 will dump all thread stacks")

    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument("directories", metavar="DIRECTORY", nargs="+",
                        help="a testsuite directory, a TESTLIST file, or a list of test directories")
    testsuite.add_arguments(parser)
    runner.add_arguments(parser)
    logutil.add_arguments(parser)
    skip.add_arguments(parser)
    ignore.add_arguments(parser)
    publish.add_arguments(parser)

    # These three calls go together
    args = parser.parse_args()
    logutil.config(args, sys.stdout)
    logger = logutil.getLogger("kvmrunner")

    logger.info("Options:")
    logger.info("  directories: %s", args.directories)
    logger.info("  verbose: %s", args.verbose)
    testsuite.log_arguments(logger, args)
    runner.log_arguments(logger, args)
    logutil.log_arguments(logger, args)
    skip.log_arguments(logger, args)
    ignore.log_arguments(logger, args)
    publish.log_arguments(logger, args)

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args,
                                              log_level=logutil.INFO)
    if not tests:
        logger.error("test or testsuite directory invalid: %s", args.directories)
        return 1

    test_stats = stats.Tests()
    result_stats = stats.Results()

    try:
        exit_code = 0
        logger.info("run started at %s", timing.START_TIME)
        runner.run_tests(logger, args, tests, test_stats, result_stats)
    except KeyboardInterrupt:
        logger.exception("**** interrupted ****")
        exit_code = 1

    test_stats.log_details(args.verbose and logger.info or logger.debug,
                           header="final stat details:", prefix="  ")
    result_stats.log_details(logger.info, header="final test details:", prefix="  ")

    test_stats.log_summary(logger.info, header="final test stats:", prefix="  ")
    result_stats.log_summary(logger.info, header="final test results:", prefix="  ")

    stop_time = datetime.now()
    logger.info("run finished at %s after %s", stop_time, stop_time - timing.START_TIME)

    return exit_code
示例#49
0
文件: Debug.py 项目: chagge/returnn
def initFaulthandler(sigusr1_chain=False):
  """
  :param bool sigusr1_chain: whether the default SIGUSR1 handler should also be called.
  """
  try:
    import faulthandler
  except ImportError, e:
    print "faulthandler import error. %s" % e
    return
  # Only enable if not yet enabled -- otherwise, leave it in its current state.
  if not faulthandler.is_enabled():
    faulthandler.enable()
    if os.name != 'nt':
      import signal
      faulthandler.register(signal.SIGUSR1, all_threads=True, chain=sigusr1_chain)


@auto_exclude_all_new_threads
def initIPythonKernel():
  # You can remotely connect to this kernel. See the output on stdout.
  try:
    import IPython.kernel.zmq.ipkernel
    from IPython.kernel.zmq.ipkernel import Kernel
    from IPython.kernel.zmq.heartbeat import Heartbeat
    from IPython.kernel.zmq.session import Session
    from IPython.kernel import write_connection_file
    import zmq
    from zmq.eventloop import ioloop
    from zmq.eventloop.zmqstream import ZMQStream
    IPython.kernel.zmq.ipkernel.signal = lambda sig, f: None  # Overwrite.
示例#50
0
文件: mount.py 项目: ambled/main
def main(args=None):
    '''Mount S3QL file system'''

    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)

    # Save handler so that we can remove it when daemonizing
    stdout_log_handler = setup_logging(options)

    if not os.path.exists(options.mountpoint):
        raise QuietError('Mountpoint does not exist.', exitcode=36)

    if options.threads is None:
        options.threads = determine_threads(options)

    avail_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
    if avail_fd == resource.RLIM_INFINITY:
        avail_fd = 4096
    resource.setrlimit(resource.RLIMIT_NOFILE, (avail_fd, avail_fd))

    # Subtract some fd's for random things we forgot, and a fixed number for
    # each upload thread (because each thread is using at least one socket and
    # at least one temporary file)
    avail_fd -= 32 + 3 * options.threads

    if options.max_cache_entries is None:
        if avail_fd <= 64:
            raise QuietError("Not enough available file descriptors.",
                             exitcode=37)
        log.info('Autodetected %d file descriptors available for cache entries',
                 avail_fd)
        options.max_cache_entries = avail_fd
    else:
        if options.max_cache_entries > avail_fd:
            log.warning("Up to %d cache entries requested, but detected only %d "
                        "available file descriptors.", options.max_cache_entries, avail_fd)
            options.max_cache_entries = avail_fd

    if options.profile:
        import cProfile
        import pstats
        prof = cProfile.Profile()

    backend_factory = get_backend_factory(options.storage_url, options.backend_options,
                                          options.authfile, options.compress)
    backend_pool = BackendPool(backend_factory)
    atexit.register(backend_pool.flush)

    # Get paths
    cachepath = get_backend_cachedir(options.storage_url, options.cachedir)

    # Retrieve metadata
    with backend_pool() as backend:
        (param, db) = get_metadata(backend, cachepath)

    #if param['max_obj_size'] < options.min_obj_size:
    #    raise QuietError('Maximum object size must be bigger than minimum object size.',
    #                     exitcode=2)

    # Handle --cachesize
    rec_cachesize = options.max_cache_entries * param['max_obj_size'] / 2
    avail_cache = shutil.disk_usage(os.path.dirname(cachepath))[2] / 1024
    if options.cachesize is None:
        options.cachesize = min(rec_cachesize, 0.8 * avail_cache)
        log.info('Setting cache size to %d MB', options.cachesize / 1024)
    elif options.cachesize > avail_cache:
        log.warning('Warning! Requested cache size %d MB, but only %d MB available',
                    options.cachesize / 1024, avail_cache / 1024)

    if options.nfs:
        # NFS may try to look up '..', so we have to speed up this kind of query
        log.info('Creating NFS indices...')
        db.execute('CREATE INDEX IF NOT EXISTS ix_contents_inode ON contents(inode)')

    else:
        db.execute('DROP INDEX IF EXISTS ix_contents_inode')

    metadata_upload_thread = MetadataUploadThread(backend_pool, param, db,
                                                  options.metadata_upload_interval)
    block_cache = BlockCache(backend_pool, db, cachepath + '-cache',
                             options.cachesize * 1024, options.max_cache_entries)
    commit_thread = CommitThread(block_cache)
    operations = fs.Operations(block_cache, db, max_obj_size=param['max_obj_size'],
                               inode_cache=InodeCache(db, param['inode_gen']),
                               upload_event=metadata_upload_thread.event)
    metadata_upload_thread.fs = operations

    with ExitStack() as cm:
        log.info('Mounting %s at %s...', options.storage_url, options.mountpoint)
        try:
            llfuse.init(operations, options.mountpoint, get_fuse_opts(options))
        except RuntimeError as exc:
            raise QuietError(str(exc), exitcode=39)

        unmount_clean = False
        def unmount():
            log.info("Unmounting file system...")
            # Acquire lock so that Operations.destroy() is called with the
            # global lock like all other handlers
            with llfuse.lock:
                llfuse.close(unmount=unmount_clean)
        cm.callback(unmount)

        if options.fg:
            faulthandler.enable()
            faulthandler.register(signal.SIGUSR1)
        else:
            if stdout_log_handler:
                logging.getLogger().removeHandler(stdout_log_handler)
            global crit_log_fh
            crit_log_fh = open(os.path.join(options.cachedir, 'mount.s3ql_crit.log'), 'a')
            faulthandler.enable(crit_log_fh)
            faulthandler.register(signal.SIGUSR1, file=crit_log_fh)
            daemonize(options.cachedir)

        mark_metadata_dirty(backend, cachepath, param)

        block_cache.init(options.threads)
        cm.callback(block_cache.destroy)

        metadata_upload_thread.start()
        cm.callback(metadata_upload_thread.join)
        cm.callback(metadata_upload_thread.stop)

        commit_thread.start()
        cm.callback(commit_thread.join)
        cm.callback(commit_thread.stop)

        if options.upstart:
            os.kill(os.getpid(), signal.SIGSTOP)
        if sd_notify is not None:
            sd_notify('READY=1')
            sd_notify('MAINPID=%d' % os.getpid())

        exc_info = setup_exchook()
        if options.profile:
            prof.runcall(llfuse.main, options.single)
        else:
            llfuse.main(options.single)

        # Allow operations to terminate while block_cache is still available
        # (destroy() will be called again when from llfuse.close(), but at that
        # point the block cache is no longer available).
        with llfuse.lock:
            operations.destroy()

        # Re-raise if main loop terminated due to exception in other thread
        if exc_info:
            (exc_inst, exc_tb) = exc_info
            raise exc_inst.with_traceback(exc_tb)

        log.info("FUSE main loop terminated.")

        unmount_clean = True

    # At this point, there should be no other threads left

    # Do not update .params yet, dump_metadata() may fail if the database is
    # corrupted, in which case we want to force an fsck.
    param['max_inode'] = db.get_val('SELECT MAX(id) FROM inodes')
    if operations.failsafe:
        log.warning('File system errors encountered, marking for fsck.')
        param['needs_fsck'] = True
    with backend_pool() as backend:
        seq_no = get_seq_no(backend)
        if metadata_upload_thread.db_mtime == os.stat(cachepath + '.db').st_mtime:
            log.info('File system unchanged, not uploading metadata.')
            del backend['s3ql_seq_no_%d' % param['seq_no']]
            param['seq_no'] -= 1
            with open(cachepath + '.params', 'wb') as fh:
                fh.write(freeze_basic_mapping(param))
        elif seq_no == param['seq_no']:
            param['last-modified'] = time.time()
            dump_and_upload_metadata(backend, db, param)
            with open(cachepath + '.params', 'wb') as fh:
                fh.write(freeze_basic_mapping(param))
        else:
            log.error('Remote metadata is newer than local (%d vs %d), '
                      'refusing to overwrite!', seq_no, param['seq_no'])
            log.error('The locally cached metadata will be *lost* the next time the file system '
                      'is mounted or checked and has therefore been backed up.')
            for name in (cachepath + '.params', cachepath + '.db'):
                for i in range(4)[::-1]:
                    if os.path.exists(name + '.%d' % i):
                        os.rename(name + '.%d' % i, name + '.%d' % (i + 1))
                os.rename(name, name + '.0')

    log.info('Cleaning up local metadata...')
    db.execute('ANALYZE')
    db.execute('VACUUM')
    db.close()

    if options.profile:
        with tempfile.NamedTemporaryFile() as tmp, \
            open('s3ql_profile.txt', 'w') as fh:
            prof.dump_stats(tmp.name)
            p = pstats.Stats(tmp.name, stream=fh)
            p.strip_dirs()
            p.sort_stats('cumulative')
            p.print_stats(50)
            p.sort_stats('time')
            p.print_stats(50)

    log.info('All done.')
示例#51
0
文件: conftest.py 项目: rootfs/s3ql
def pytest_configure(config):

    # Enable stdout and stderr analysis, unless output capture is disabled
    if config.getoption('capture') != 'no':
        global check_test_output
        check_test_output = pytest.fixture(autouse=True)(check_test_output)

    logdebug = config.getoption('logdebug')

    # If we are running from the S3QL source directory, make sure that we
    # load modules from here
    basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
    if not config.getoption('installed'):
        if (os.path.exists(os.path.join(basedir, 'setup.py')) and
            os.path.exists(os.path.join(basedir, 'src', 's3ql', '__init__.py'))):
            sys.path = [os.path.join(basedir, 'src')] + sys.path

    # When running from HG repo, enable all warnings
    if os.path.exists(os.path.join(basedir, 'MANIFEST.in')):
        import warnings
        warnings.resetwarnings()
        warnings.simplefilter('error')

    # Enable faulthandler
    global faultlog_fh
    faultlog_fh = open(os.path.join(basedir, 'tests', 'test_crit.log'), 'a')
    faulthandler.enable(faultlog_fh)
    faulthandler.register(signal.SIGUSR1, file=faultlog_fh)

    # Enable logging
    import s3ql.logging
    root_logger = logging.getLogger()
    if root_logger.handlers:
        root_logger.warning("Logging already initialized.")
    else:
        handler = logging.handlers.RotatingFileHandler(
            os.path.join(basedir, 'tests', 'test.log'),
            maxBytes=10 * 1024 ** 2, backupCount=0)
        if logdebug is None:
            formatter = logging.Formatter(
                '%(asctime)s.%(msecs)03d [%(process)s] %(threadName)s: '
                '[%(name)s] %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
        else:
            formatter = logging.Formatter(
                '%(asctime)s.%(msecs)03d [%(process)s] %(threadName)s: '
                '[%(name)s.%(funcName)s] %(message)s', datefmt="%Y-%m-%d %H:%M:%S")

        handler.setFormatter(formatter)
        root_logger.addHandler(handler)

        if logdebug is not None:
            if 'all' in logdebug:
                root_logger.setLevel(logging.DEBUG)
            else:
                for module in logdebug:
                    logging.getLogger(module).setLevel(logging.DEBUG)
            logging.disable(logging.NOTSET)
        else:
            root_logger.setLevel(logging.WARNING)

        logging.captureWarnings(capture=True)

    # Make errors and warnings fatal
    s3ql.logging.EXCEPTION_SEVERITY = logging.WARNING
示例#52
0
文件: setup.py 项目: zbrad/cpython
def setup_tests(ns):
    # Display the Python traceback on fatal errors (e.g. segfault)
    faulthandler.enable(all_threads=True)

    # Display the Python traceback on SIGALRM or SIGUSR1 signal
    signals = []
    if hasattr(signal, 'SIGALRM'):
        signals.append(signal.SIGALRM)
    if hasattr(signal, 'SIGUSR1'):
        signals.append(signal.SIGUSR1)
    for signum in signals:
        faulthandler.register(signum, chain=True)

    replace_stdout()
    support.record_original_stdout(sys.stdout)

    # Some times __path__ and __file__ are not absolute (e.g. while running from
    # Lib/) and, if we change the CWD to run the tests in a temporary dir, some
    # imports might fail.  This affects only the modules imported before os.chdir().
    # These modules are searched first in sys.path[0] (so '' -- the CWD) and if
    # they are found in the CWD their __file__ and __path__ will be relative (this
    # happens before the chdir).  All the modules imported after the chdir, are
    # not found in the CWD, and since the other paths in sys.path[1:] are absolute
    # (site.py absolutize them), the __file__ and __path__ will be absolute too.
    # Therefore it is necessary to absolutize manually the __file__ and __path__ of
    # the packages to prevent later imports to fail when the CWD is different.
    for module in sys.modules.values():
        if hasattr(module, '__path__'):
            module.__path__ = [os.path.abspath(path)
                               for path in module.__path__]
        if hasattr(module, '__file__'):
            module.__file__ = os.path.abspath(module.__file__)

    # MacOSX (a.k.a. Darwin) has a default stack size that is too small
    # for deeply recursive regular expressions.  We see this as crashes in
    # the Python test suite when running test_re.py and test_sre.py.  The
    # fix is to set the stack limit to 2048.
    # This approach may also be useful for other Unixy platforms that
    # suffer from small default stack limits.
    if sys.platform == 'darwin':
        try:
            import resource
        except ImportError:
            pass
        else:
            soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
            newsoft = min(hard, max(soft, 1024*2048))
            resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))

    if ns.huntrleaks:
        unittest.BaseTestSuite._cleanup = False

        # Avoid false positives due to various caches
        # filling slowly with random data:
        warm_caches()

    if ns.memlimit is not None:
        support.set_memlimit(ns.memlimit)

    if ns.threshold is not None:
        gc.set_threshold(ns.threshold)

    if ns.nowindows:
        import msvcrt
        msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS|
                            msvcrt.SEM_NOALIGNMENTFAULTEXCEPT|
                            msvcrt.SEM_NOGPFAULTERRORBOX|
                            msvcrt.SEM_NOOPENFILEERRORBOX)
        try:
            msvcrt.CrtSetReportMode
        except AttributeError:
            # release build
            pass
        else:
            for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
                msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
                msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)

    support.use_resources = ns.use_resources
示例#53
0
                connection.tables()

    with pool.connection():
        # At this point the only connection is assigned to this thread,
        # so another thread cannot obtain a connection at this point.

        t = threading.Thread(target=run)
        t.start()
        t.join()


if __name__ == '__main__':
    import logging
    import sys

    # Dump stacktraces using 'kill -USR1', useful for debugging hanging
    # programs and multi threading issues.
    try:
        import faulthandler
    except ImportError:
        pass
    else:
        import signal
        faulthandler.register(signal.SIGUSR1)

    logging.basicConfig(level=logging.DEBUG)

    method_name = 'test_%s' % sys.argv[1]
    method = globals()[method_name]
    method()
示例#54
0
            save_config(default_config, self._config_path)

            print(
                "Default config is set, please don't forget to update your github tokens, webhook tokens, and jenkins configurations appropiately! Location = {}".format(
                    self._config_path
                )
            )


if __name__ == "__main__":
    import signal

    if os.environ.get("SCARLETT_DEBUG_MODE"):
        import faulthandler

        faulthandler.register(signal.SIGUSR2, all_threads=True)

        from scarlett_os.internal.debugger import init_debugger

        init_debugger()

        from scarlett_os.internal.debugger import enable_remote_debugging

        enable_remote_debugging()

    from scarlett_os.logger import setup_logger

    setup_logger()

    import imp  # pylint: disable=W0611
    import os.path
示例#55
0
    def check_register(self, filename=False, all_threads=False,
                       unregister=False, chain=False):
        """
        Register a handler displaying the traceback on a user signal. Raise the
        signal and check the written traceback.

        If chain is True, check that the previous signal handler is called.

        Raise an error if the output doesn't match the expected format.
        """
        signum = signal.SIGUSR1
        code = """
import faulthandler
import os
import signal
import sys

def func(signum):
    os.kill(os.getpid(), signum)

def handler(signum, frame):
    handler.called = True
handler.called = False

exitcode = 0
signum = %s
filename = %s
unregister = %s
all_threads = %s
chain = %s
if bool(filename):
    file = open(filename, "wb")
else:
    file = None
if chain:
    signal.signal(signum, handler)
faulthandler.register(signum, file=file,
                      all_threads=all_threads, chain=chain)
if unregister:
    faulthandler.unregister(signum)
func(signum)
if chain and not handler.called:
    if file is not None:
        output = file
    else:
        output = sys.stderr
    output.write("Error: signal handler not called!\\n")
    exitcode = 1
if file is not None:
    file.close()
sys.exit(exitcode)
""".strip()
        code = code % (
            signum,
            repr(filename),
            unregister,
            all_threads,
            chain,
        )
        trace, exitcode = self.get_output(code, filename)
        trace = '\n'.join(trace)
        if not unregister:
            if all_threads:
                regex = 'Current thread XXX:\n'
            else:
                regex = 'Traceback \(most recent call first\):\n'
            regex = expected_traceback(7, 29, regex)
            self.assertRegex(trace, regex)
        else:
            self.assertEqual(trace, '')
        if unregister:
            self.assertNotEqual(exitcode, 0)
        else:
            self.assertEqual(exitcode, 0)
示例#56
0
def main() -> None:
    faulthandler.register(signal.SIGUSR1)
    app.run(host=HOST, port=PORT, debug=DEBUG)
示例#57
0
def main():

    # If SIGUSR1, backtrace all threads; hopefully this is early
    # enough.
    faulthandler.register(signal.SIGUSR1)

    parser = argparse.ArgumentParser(description="list test results",
                                     epilog="By default this tool uses 'sanitizer.sh' and 'diff' to generate up-to-the-minuite test results (the previously generated files 'OUTPUT/*.console.txt' and 'OUTPUT/*.console.diff' are ignored).  While this makes things a little slower, it has the benefit of always providing the most up-to-date and correct results (for instance, changes to known-good files are reflected immediately).  SIGUSR1 will dump all thread stacks")
    parser.add_argument("--verbose", "-v", action="count", default=0)

    parser.add_argument("--quick", action="store_true",
                        help=("Use the previously generated '.console.txt' and '.console.diff' files"))

    parser.add_argument("--update", action="store_true",
                        help=("Update the '.console.txt' and '.console.diff' files"))

    parser.add_argument("--dump-args", action="store_true")

    # how to parse --print directory,saved-directory,...?
    parser.add_argument("--print", action="store",
                        default=printer.Print(printer.Print.path, printer.Print.result, printer.Print.issues),
                        type=printer.Print, metavar=str(printer.Print),
                        help="comman separate list of attributes to print for each test; default: '%(default)s'")

    parser.add_argument("--stats", action="store", default=Stats.summary, type=Stats,
                        choices=[c for c in Stats],
                        help="provide overview statistics; default: \"%(default)s\"");

    baseline_metavar = "BASELINE-DIRECTORY"
    baseline_help = "additional %(metavar)s containing results to compare against; any divergence between the test and baseline results are displayed"
    parser.add_argument("--baseline", "-b",
                        metavar=baseline_metavar, help=baseline_help)

    parser.add_argument("--json", action="store_true",
                        help="output each result as an individual json object (pipe the output through 'jq -s .' to convert it to a well formed json list")

    parser.add_argument("directories", metavar="DIRECTORY-OR-FILE", nargs="+",
                        help="a directory containing: a test, testsuite, test output, or testsuite output; or a file containing a 'TESTLIST'")

    # Note: this argument serves as documentation only.  The RESULT
    # argument should consumes all remaining parameters.
    parser.add_argument("baseline_ignored", nargs="?",
                        metavar=baseline_metavar, help=baseline_help)

    testsuite.add_arguments(parser)
    logutil.add_arguments(parser)
    # XXX: while checking for an UNTESTED test should be very cheap
    # (does OUTPUT/ exist?) it isn't.  Currently it triggers a full
    # post-mortem analysis.
    skip.add_arguments(parser, skip.skip.untested)
    ignore.add_arguments(parser)
    publish.add_arguments(parser)

    # These three calls go together
    args = parser.parse_args()
    logutil.config(args, sys.stderr)
    logger = logutil.getLogger("kvmresults")

    # The option -vvvvvvv is a short circuit for these; make
    # re-ordering easy by using V as a counter.
    v = 0

    if args.dump_args:
        logger.info("Arguments:")
        logger.info("  Stats: %s", args.stats)
        logger.info("  Print: %s", args.print)
        logger.info("  Baseline: %s", args.baseline)
        logger.info("  Json: %s", args.json)
        logger.info("  Quick: %s", args.quick)
        logger.info("  Update: %s", args.update)
        testsuite.log_arguments(logger, args)
        logutil.log_arguments(logger, args)
        skip.log_arguments(logger, args)
        ignore.log_arguments(logger, args)
        publish.log_arguments(logger, args)
        return 0

    # Try to find a baseline.  If present, pre-load it.
    baseline = None
    if args.baseline:
        # An explict baseline testsuite, can be more forgiving in how
        # it is loaded.
        baseline = testsuite.load(logger, logutil.DEBUG, args,
                                  testsuite_directory=args.baseline,
                                  error_level=logutil.DEBUG)
        if not baseline:
            # Perhaps the baseline just contains output, magic up the
            # corresponding testsuite directory.
            baseline_directory = os.path.join(args.testing_directory, "pluto")
            baseline = testsuite.load(logger, logutil.DEBUG, args,
                                      testsuite_directory=baseline_directory,
                                      testsuite_output_directory=args.baseline,
                                      error_level=logutil.DEBUG)
        if not baseline:
            logger.info("'%s' is not a baseline", args.baseline)
            return 1
    elif len(args.directories) > 1:
        # If there is more than one directory then, perhaps, the last
        # one is a baseline.  A baseline might be: a complete
        # testsuite snapshot; or just output saved as
        # testing/pluto/OUTPUT/TESTDIR.
        baseline = testsuite.load(logger, logutil.DEBUG, args,
                                  testsuite_directory=args.directories[-1])
        if baseline:
            # discard the last argument as consumed above.
            logger.debug("discarding baseline testsuite argument '%s'", args.directories[-1])
            args.directories.pop()

    tests = testsuite.load_testsuite_or_tests(logger, args.directories, args)
    # And check
    if not tests:
        logger.error("Invalid testsuite or test directories")
        return 1

    result_stats = stats.Results()
    try:
        results(logger, tests, baseline, args, result_stats)
    finally:
        if args.stats is Stats.details:
            result_stats.log_details(stderr_log, header="Details:", prefix="  ")
        if args.stats in [Stats.details, Stats.summary]:
            result_stats.log_summary(stderr_log, header="Summary:", prefix="  ")
        publish.json_results(logger, args)
        publish.json_summary(logger, args)

    return 0
示例#58
0
def integrate_faulthandler():
    import faulthandler, signal
    fout = file('/var/canvas/website/run/faulthandler.log', 'a')
    faulthandler.register(signal.SIGUSR2, file=fout)
示例#59
0
def main(argv):
    if faulthandler is not None:
        faulthandler.register(signal.SIGUSR1, all_threads=True)

    opts = parse_args(argv)
    stages = []
    report_stages = []

    ctx = Context()
    ctx.results = {}
    ctx.sensors_data = SensorDatastore()

    if opts.subparser_name == 'test':
        cfg = load_config(opts.config_file)
        make_storage_dir_struct(cfg)
        cfg.comment = opts.comment
        save_run_params(cfg)

        with open(cfg.saved_config_file, 'w') as fd:
            fd.write(pretty_yaml.dumps(cfg.__dict__))

        stages = [
            run_test.discover_stage
        ]

        stages.extend([
            run_test.reuse_vms_stage,
            log_nodes_statistic_stage,
            run_test.save_nodes_stage,
            run_test.connect_stage])

        if cfg.settings.get('collect_info', True):
            stages.append(run_test.collect_hw_info_stage)

        stages.extend([
            # deploy_sensors_stage,
            run_test.run_tests_stage,
            run_test.store_raw_results_stage,
            # gather_sensors_stage
        ])

        cfg.keep_vm = opts.keep_vm
        cfg.no_tests = opts.no_tests
        cfg.dont_discover_nodes = opts.dont_discover_nodes

        ctx.build_meta['build_id'] = opts.build_id
        ctx.build_meta['build_descrption'] = opts.build_description
        ctx.build_meta['build_type'] = opts.build_type

    elif opts.subparser_name == 'ls':
        list_results(opts.result_storage)
        return 0

    elif opts.subparser_name == 'report':
        cfg = load_config(get_test_files(opts.data_dir)['saved_config_file'])
        stages.append(run_test.load_data_from(opts.data_dir))
        opts.no_report = False
        # load build meta

    elif opts.subparser_name == 'compare':
        x = run_test.load_data_from_path(opts.data_path1)
        y = run_test.load_data_from_path(opts.data_path2)
        print(run_test.IOPerfTest.format_diff_for_console(
            [x['io'][0], y['io'][0]]))
        return 0

    if not opts.no_report:
        report_stages.append(run_test.console_report_stage)
        if opts.load_report:
            report_stages.append(run_test.test_load_report_stage)
        report_stages.append(run_test.html_report_stage)

    if opts.log_level is not None:
        str_level = opts.log_level
    else:
        str_level = cfg.settings.get('log_level', 'INFO')

    setup_loggers(getattr(logging, str_level), cfg.log_file)
    logger.info("All info would be stored into " + cfg.results_dir)

    for stage in stages:
        ok = False
        with log_stage(stage):
            stage(cfg, ctx)
            ok = True
        if not ok:
            break

    exc, cls, tb = sys.exc_info()
    for stage in ctx.clear_calls_stack[::-1]:
        with log_stage(stage):
            stage(cfg, ctx)

    logger.debug("Start utils.cleanup")
    for clean_func, args, kwargs in utils.iter_clean_func():
        with log_stage(clean_func):
            clean_func(*args, **kwargs)

    if exc is None:
        for report_stage in report_stages:
            with log_stage(report_stage):
                report_stage(cfg, ctx)

    logger.info("All info stored into " + cfg.results_dir)

    if exc is None:
        logger.info("Tests finished successfully")
        return 0
    else:
        logger.error("Tests are failed. See detailed error above")
        return 1