Beispiel #1
0
    def __init__(self, socket_filename, max_connections=64,
                 connection_timeout=10, poll_timeout=1, num_workers=1,
                 num_worker_processes=2, max_processed_jobs=1,
                 max_queued_jobs=8, logger=None):
        self.socket_address = socket_filename
        self.socket_family = 'AF_UNIX'
        self.socket_kwargs = {}
        self.socket_listener = None
        self.connection_semaphore = Semaphore(max_connections)
        self.connection_handlers = ThreadSet()
        self.connection_timeout = connection_timeout
        self.connection_poll_timeout = poll_timeout
        self.job_queue = Queue(max_queued_jobs)

        self.logger = logger or getLogger(LOGGER_NAME)

        # set verbose multi-processes debugging
        set_stream_handler(mp_util.get_logger())
        mp_util.get_logger().setLevel(mp_util.SUBWARNING)
        worker_semaphore = ProcessSemaphore(num_workers)

        self.worker_pool = ProcessPool(
            max(num_workers, num_worker_processes), init_worker,
            maxtasksperchild=max_processed_jobs,
        )
        self.worker_pool_manager = WorkerPoolManager(
            self.worker_pool, worker_semaphore, self.job_queue
        )
Beispiel #2
0
def teardown():
    # Don't want SUBDEBUG log messages at finalization.
    try:
        from multiprocessing.util import get_logger
    except ImportError:
        pass
    else:
        get_logger().setLevel(logging.WARNING)

    # Make sure test database is removed.
    import os
    if os.path.exists("test.db"):
        try:
            os.remove("test.db")
        except WindowsError:
            pass

    # Make sure there are no remaining threads at shutdown.
    import threading
    remaining_threads = [
        thread for thread in threading.enumerate()
        if thread.getName() != "MainThread"
    ]
    if remaining_threads:
        sys.stderr.write(
            "\n\n**WARNING**: Remaining threads at teardown: %r...\n" %
            (remaining_threads))
Beispiel #3
0
def prepare(data):
    '''
    Try to get current process ready to unpickle process object
    '''
    if 'name' in data:
        process.current_process().name = data['name']

    if 'authkey' in data:
        process.current_process().authkey = data['authkey']

    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()

    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])

    if 'sys_path' in data:
        sys.path = data['sys_path']

    if 'sys_argv' in data:
        sys.argv = data['sys_argv']

    if 'dir' in data:
        os.chdir(data['dir'])

    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']

    if hasattr(mp, 'set_start_method'):
        mp.set_start_method('loky')

    if 'init_main_from_name' in data:
        _fixup_main_from_name(data['init_main_from_name'])
    elif 'init_main_from_path' in data:
        _fixup_main_from_path(data['init_main_from_path'])
Beispiel #4
0
def teardown():
    # Don't want SUBDEBUG log messages at finalization.
    try:
        from multiprocessing.util import get_logger
    except ImportError:
        pass
    else:
        get_logger().setLevel(logging.WARNING)

    # Make sure test database is removed.
    import os
    if os.path.exists('test.db'):
        try:
            os.remove('test.db')
        except WindowsError:
            pass

    # Make sure there are no remaining threads at shutdown.
    import threading
    remaining_threads = [thread for thread in threading.enumerate()
                         if thread.getName() != 'MainThread']
    if remaining_threads:
        sys.stderr.write(
            '\n\n**WARNING**: Remaining threads at teardown: %r...\n' % (
                remaining_threads))
Beispiel #5
0
def start_command(config, args):
    initialize_logger(config)
    print('starting command: %s' % ' '.join(args))
    get_logger().info("Starting command: %s" % " ".join(args))
    try:
        subprocess.call(args, shell=True)
    except:
        pass
Beispiel #6
0
def start_command(config, args):
    initialize_logger(config)
    print('starting command: %s' % ' '.join(args))
    get_logger().info("Starting command: %s" % " ".join(args))
    try:
        subprocess.call(args, shell=True)
    except:
        pass
Beispiel #7
0
def prepare(data):
    """
    Try to get current process ready to unpickle process object
    """
    old_main_modules.append(sys.modules['__main__'])
    if 'name' in data:
        process.current_process().name = data['name']
    if 'authkey' in data:
        process.current_process()._authkey = data['authkey']
    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()
    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])
    if 'sys_path' in data:
        sys.path = data['sys_path']
    if 'sys_argv' in data:
        sys.argv = data['sys_argv']
    if 'dir' in data:
        os.chdir(data['dir'])
    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']
    if 'main_path' in data:
        main_path = data['main_path']
        main_name = os.path.splitext(os.path.basename(main_path))[0]
        main_name = main_name == '__init__' and os.path.basename(
            os.path.dirname(main_path))
    if main_name != 'ipython':
        import imp
        if main_path is None:
            dirs = None
        elif os.path.basename(main_path).startswith('__init__.py'):
            dirs = [os.path.dirname(os.path.dirname(main_path))]
        else:
            dirs = [os.path.dirname(main_path)]
        if not main_name not in sys.modules:
            raise AssertionError(main_name)
            file, path_name, etc = imp.find_module(main_name, dirs)
            try:
                main_module = imp.load_module('__parents_main__', file,
                                              path_name, etc)
            finally:
                if file:
                    file.close()

            sys.modules['__main__'] = main_module
            main_module.__name__ = '__main__'
            for obj in main_module.__dict__.values():
                try:
                    if obj.__module__ == '__parents_main__':
                        obj.__module__ = '__main__'
                except Exception:
                    pass

    return
Beispiel #8
0
def prepare(data):
    '''
    Try to get current process ready to unpickle process object
    '''
    if 'name' in data:
        process.current_process().name = data['name']

    if 'authkey' in data:
        process.current_process().authkey = data['authkey']

    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()

    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])

    if 'log_fmt' in data:
        import logging
        util.get_logger().handlers[0].setFormatter(
            logging.Formatter(data['log_fmt'])
        )

    if 'sys_path' in data:
        sys.path = data['sys_path']

    if 'sys_argv' in data:
        sys.argv = data['sys_argv']

    if 'dir' in data:
        os.chdir(data['dir'])

    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']

    if 'mp_tracker_args' in data:
        from multiprocessing.resource_tracker import (
            _resource_tracker as mp_resource_tracker
        )
        mp_resource_tracker._fd = data['mp_tracker_args']['fd']
        mp_resource_tracker._pid = data['mp_tracker_args']['pid']
    if 'tracker_args' in data:
        from .resource_tracker import _resource_tracker
        _resource_tracker._pid = data["tracker_args"]['pid']
        if sys.platform == 'win32':
            handle = data["tracker_args"]["fh"]
            _resource_tracker._fd = msvcrt.open_osfhandle(handle, 0)
        else:
            _resource_tracker._fd = data["tracker_args"]["fd"]

    if 'init_main_from_name' in data:
        _fixup_main_from_name(data['init_main_from_name'])
    elif 'init_main_from_path' in data:
        _fixup_main_from_path(data['init_main_from_path'])
Beispiel #9
0
def prepare(data):
    """
    Try to get current process ready to unpickle process object
    """
    old_main_modules.append(sys.modules['__main__'])
    if 'name' in data:
        process.current_process().name = data['name']
    if 'authkey' in data:
        process.current_process()._authkey = data['authkey']
    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()
    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])
    if 'sys_path' in data:
        sys.path = data['sys_path']
    if 'sys_argv' in data:
        sys.argv = data['sys_argv']
    if 'dir' in data:
        os.chdir(data['dir'])
    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']
    if 'main_path' in data:
        main_path = data['main_path']
        main_name = os.path.splitext(os.path.basename(main_path))[0]
        main_name = main_name == '__init__' and os.path.basename(os.path.dirname(main_path))
    if main_name != 'ipython':
        import imp
        if main_path is None:
            dirs = None
        elif os.path.basename(main_path).startswith('__init__.py'):
            dirs = [os.path.dirname(os.path.dirname(main_path))]
        else:
            dirs = [os.path.dirname(main_path)]
        if not main_name not in sys.modules:
            raise AssertionError(main_name)
            file, path_name, etc = imp.find_module(main_name, dirs)
            try:
                main_module = imp.load_module('__parents_main__', file, path_name, etc)
            finally:
                if file:
                    file.close()

            sys.modules['__main__'] = main_module
            main_module.__name__ = '__main__'
            for obj in main_module.__dict__.values():
                try:
                    if obj.__module__ == '__parents_main__':
                        obj.__module__ = '__main__'
                except Exception:
                    pass

    return
Beispiel #10
0
def teardown():
    # Don't want SUBDEBUG log messages at finalization.
    from multiprocessing.util import get_logger
    get_logger().setLevel(logging.WARNING)
    import threading
    import os
    if os.path.exists("test.db"):
        os.remove("test.db")
    remaining_threads = [thread for thread in threading.enumerate()
                            if thread.name != "MainThread"]
    if remaining_threads:
        sys.stderr.write(
            "\n\n**WARNING**: Remaning threads at teardown: %r...\n" % (
                remaining_threads))
Beispiel #11
0
def initialize_logger(config):
    class StdErrWrapper:
        """
            Call wrapper for stderr
        """
        def write(self, s):
            get_logger().info(s)
    import logging

    logger = get_logger()
    values = dict(
        format='[%(levelname)s/%(processName)s] %(message)s',
        filename=None,
        level='INFO',
    )
    if config and config.has_section('log'):
        for (name, value) in config.items('log'):
            values[name] = value

    if values['filename']:
        formatter = logging.Formatter(values['format'])
        handler = logging.FileHandler(values['filename'])
        handler.setFormatter(formatter)
        logger.addHandler(handler)
        logger.setLevel(
            getattr(logging, values['level'].upper(), logging.INFO)
        )
        sys.stderr = StdErrWrapper()
Beispiel #12
0
def setup_logging_subsystem(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
        format=conf.CELERYD_LOG_FORMAT, colorize=conf.CELERYD_LOG_COLOR,
        **kwargs):
    global _setup
    if not _setup:
        try:
            mputil._logger = None
        except AttributeError:
            pass
        ensure_process_aware_logger()
        logging.Logger.manager.loggerDict.clear()
        receivers = signals.setup_logging.send(sender=None,
                                               loglevel=loglevel,
                                               logfile=logfile,
                                               format=format,
                                               colorize=colorize)
        if not receivers:
            root = logging.getLogger()

            if conf.CELERYD_HIJACK_ROOT_LOGGER:
                root.handlers = []

            mp = mputil.get_logger()
            for logger in (root, mp):
                _setup_logger(logger, logfile, format, colorize, **kwargs)
                logger.setLevel(loglevel)
        _setup = True
        return receivers
Beispiel #13
0
    def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, **kwargs):
        if Logging._setup:
            return
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.format
        if colorize is None:
            colorize = self.supports_color(logfile)

        if mputil and hasattr(mputil, "_logger"):
            mputil._logger = None
        if not is_py3k:
            ensure_process_aware_logger()
        receivers = signals.setup_logging.send(
            sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize
        )
        if not receivers:
            root = logging.getLogger()

            if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
                root.handlers = []

            mp = mputil.get_logger() if mputil else None
            for logger in filter(None, (root, mp)):
                self._setup_logger(logger, logfile, format, colorize, **kwargs)
                logger.setLevel(mlevel(loglevel))
                signals.after_setup_logger.send(
                    sender=None, logger=logger, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize
                )

        # This is a hack for multiprocessing's fork+exec, so that
        # logging before Process.run works.
        os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), _MP_FORK_LOGFILE_=logfile or "", _MP_FORK_LOGFORMAT_=format)
        Logging._setup = True

        return receivers
def get_logger():
    """
    Return package logger -- if it does not already exist then it is created
    """
    from multiprocessing.util import get_logger

    return get_logger()
Beispiel #15
0
def teardown():
    # Don't want SUBDEBUG log messages at finalization.
    from multiprocessing.util import get_logger
    get_logger().setLevel(logging.WARNING)
    import threading
    import os
    if os.path.exists("test.db"):
        os.remove("test.db")
    remaining_threads = [
        thread for thread in threading.enumerate()
        if thread.name != "MainThread"
    ]
    if remaining_threads:
        sys.stderr.write(
            "\n\n**WARNING**: Remaning threads at teardown: %r...\n" %
            (remaining_threads))
Beispiel #16
0
    def setup_logging_subsystem(self, loglevel=None, logfile=None,
            format=None, colorize=None, **kwargs):
        if Logging._setup:
            return
        loglevel = loglevel or self.loglevel
        format = format or self.format
        if colorize is None:
            colorize = self.supports_color(logfile)

        if mputil:
            try:
                mputil._logger = None
            except AttributeError:
                pass
        ensure_process_aware_logger()
        receivers = signals.setup_logging.send(sender=None,
                                               loglevel=loglevel,
                                               logfile=logfile,
                                               format=format,
                                               colorize=colorize)
        if not receivers:
            root = logging.getLogger()

            if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
                root.handlers = []

            mp = mputil and mputil.get_logger() or None
            for logger in (root, mp):
                if logger:
                    self._setup_logger(logger, logfile, format,
                                       colorize, **kwargs)
                    logger.setLevel(loglevel)
        Logging._setup = True
        return receivers
Beispiel #17
0
def setup_logging_subsystem(loglevel=conf.CELERYD_LOG_LEVEL,
                            logfile=None,
                            format=conf.CELERYD_LOG_FORMAT,
                            colorize=conf.CELERYD_LOG_COLOR,
                            **kwargs):
    global _setup
    if not _setup:
        try:
            mputil._logger = None
        except AttributeError:
            pass
        ensure_process_aware_logger()
        logging.Logger.manager.loggerDict.clear()
        receivers = signals.setup_logging.send(sender=None,
                                               loglevel=loglevel,
                                               logfile=logfile,
                                               format=format,
                                               colorize=colorize)
        if not receivers:
            root = logging.getLogger()

            if conf.CELERYD_HIJACK_ROOT_LOGGER:
                root.handlers = []

            mp = mputil.get_logger()
            for logger in (root, mp):
                _setup_logger(logger, logfile, format, colorize, **kwargs)
                logger.setLevel(loglevel)
        _setup = True
        return receivers
Beispiel #18
0
    def main():
        assert is_forking(sys.argv)
        fd = int(sys.argv[-1])
        from_parent = os.fdopen(fd, 'rb')
        current_process()._inheriting = True
        preparation_data = load(from_parent)
        _forking.prepare(preparation_data)

        # Huge hack to make logging before Process.run work.
        loglevel = os.environ.get("_MP_FORK_LOGLEVEL_")
        logfile = os.environ.get("_MP_FORK_LOGFILE_") or None
        format = os.environ.get("_MP_FORK_LOGFORMAT_")
        if loglevel:
            from multiprocessing import util
            import logging
            logger = util.get_logger()
            logger.setLevel(int(loglevel))
            if not logger.handlers:
                logger._rudimentary_setup = True
                logfile = logfile or sys.__stderr__
                if hasattr(logfile, "write"):
                    handler = logging.StreamHandler(logfile)
                else:
                    handler = logging.FileHandler(logfile)
                formatter = logging.Formatter(format
                                              or util.DEFAULT_LOGGING_FORMAT)
                handler.setFormatter(formatter)
                logger.addHandler(handler)

        self = load(from_parent)
        current_process()._inheriting = False

        exitcode = self._bootstrap()
        exit(exitcode)
def initialize_logger(config):
    class StdErrWrapper:
        """
            Call wrapper for stderr
        """
        def write(self, s):
            get_logger().info(s)

    import logging

    logger = get_logger()
    values = dict(
        format='[%(levelname)s/%(processName)s] %(message)s',
        filename=None,
        level='INFO',
    )
    if config and config.has_section('log'):
        for (name, value) in config.items('log'):
            values[name] = value

    if values['filename']:
        formatter = logging.Formatter(values['format'])
        handler = logging.FileHandler(values['filename'])
        handler.setFormatter(formatter)
        logger.addHandler(handler)
        logger.setLevel(getattr(logging, values['level'].upper(),
                                logging.INFO))
        sys.stderr = StdErrWrapper()
Beispiel #20
0
    def setup_logging_subsystem(self, loglevel=None, logfile=None,
            format=None, colorize=None, **kwargs):
        loglevel = loglevel or self.loglevel
        format = format or self.format
        colorize = self.app.either("CELERYD_LOG_COLOR", colorize)

        if self.__class__._setup:
            return

        try:
            mputil._logger = None
        except AttributeError:
            pass
        ensure_process_aware_logger()
        logging.Logger.manager.loggerDict.clear()
        receivers = signals.setup_logging.send(sender=None,
                                               loglevel=loglevel,
                                               logfile=logfile,
                                               format=format,
                                               colorize=colorize)
        if not receivers:
            root = logging.getLogger()
            mp = mputil.get_logger()
            for logger in (root, mp):
                self._setup_logger(logger, logfile,
                                   format, colorize, **kwargs)
                logger.setLevel(loglevel)
        self.__class__._setup = True
        return receivers
Beispiel #21
0
    def setup_logging_subsystem(self, loglevel=None, logfile=None,
            format=None, colorize=None, **kwargs):
        if Logging._setup:
            return
        loglevel = loglevel or self.loglevel
        format = format or self.format
        if colorize is None:
            colorize = self.supports_color(logfile)

        if mputil and hasattr(mputil, "_logger"):
            mputil._logger = None
        ensure_process_aware_logger()
        receivers = signals.setup_logging.send(sender=None,
                        loglevel=loglevel, logfile=logfile,
                        format=format, colorize=colorize)
        if not receivers:
            root = logging.getLogger()

            if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
                root.handlers = []

            mp = mputil.get_logger() if mputil else None
            for logger in filter(None, (root, mp)):
                self._setup_logger(logger, logfile, format, colorize, **kwargs)
                logger.setLevel(loglevel)
                signals.after_setup_logger.send(sender=None, logger=logger,
                                        loglevel=loglevel, logfile=logfile,
                                        format=format, colorize=colorize)
        Logging._setup = True

        return receivers
Beispiel #22
0
    def setup_logging_subsystem(self, loglevel=None, logfile=None,
            format=None, colorize=None, **kwargs):
        if Logging._setup:
            return
        loglevel = loglevel or self.loglevel
        format = format or self.format
        if colorize is None:
            colorize = self.supports_color(logfile)

        if mputil and hasattr(mputil, "_logger"):
            mputil._logger = None
        ensure_process_aware_logger()
        receivers = signals.setup_logging.send(sender=None,
                        loglevel=loglevel, logfile=logfile,
                        format=format, colorize=colorize)
        if not receivers:
            root = logging.getLogger()

            if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
                root.handlers = []

            mp = mputil.get_logger() if mputil else None
            for logger in filter(None, (root, mp)):
                self._setup_logger(logger, logfile, format, colorize, **kwargs)
                logger.setLevel(loglevel)
        Logging._setup = True
        return receivers
Beispiel #23
0
def setup(enable_debug: bool, no_colors: bool):
    global _no_colors, _init, _wrapper, _debug, _logger

    _init = True

    _wrapper = textwrap.TextWrapper()
    width = shutil.get_terminal_size().columns
    _wrapper.width = width if width > 0 else 80
    _wrapper.subsequent_indent = "\t\t\t\N{DOWNWARDS ARROW WITH TIP RIGHTWARDS} "
    _wrapper.tabsize = 4
    _wrapper.drop_whitespace = False

    # setup the root logger
    rt = logging.getLogger()
    rt.addHandler(_LogHandler())
    rt.setLevel(logging.DEBUG)

    # setup our logger
    _logger = logging.getLogger("yawast")
    _logger.setLevel(logging.DEBUG)
    _logger.addHandler(_LogHandler())
    _logger.propagate = False

    # setup the logger for multiprocessing
    lg = get_logger()
    lg.level = logging.DEBUG
    lg.addHandler(_LogHandler())

    if not no_colors:
        init()
    else:
        _no_colors = True

    if enable_debug:
        toggle_debug()
Beispiel #24
0
    def main():
        assert is_forking(sys.argv)
        fd = int(sys.argv[-1])
        from_parent = os.fdopen(fd, 'rb')
        current_process()._inheriting = True
        preparation_data = load(from_parent)
        _forking.prepare(preparation_data)

        # Huge hack to make logging before Process.run work.
        loglevel = os.environ.get("_MP_FORK_LOGLEVEL_")
        logfile = os.environ.get("_MP_FORK_LOGFILE_") or None
        format = os.environ.get("_MP_FORK_LOGFORMAT_")
        if loglevel:
            from multiprocessing import util
            import logging
            logger = util.get_logger()
            logger.setLevel(int(loglevel))
            if not logger.handlers:
                logger._rudimentary_setup = True
                logfile = logfile or sys.__stderr__
                if hasattr(logfile, "write"):
                    handler = logging.StreamHandler(logfile)
                else:
                    handler = logging.FileHandler(logfile)
                formatter = logging.Formatter(
                        format or util.DEFAULT_LOGGING_FORMAT)
                handler.setFormatter(formatter)
                logger.addHandler(handler)

        self = load(from_parent)
        current_process()._inheriting = False

        exitcode = self._bootstrap()
        exit(exitcode)
def start_django_command(config, args):
    '''
    Start a Django management command.
    
    This commands is supposed to start in a spawned (child process).
    It tries to import the settings of the project before handling the command.
    '''
    initialize_logger(config)

    log('Starting command : %s' % ' '.join(args))
    get_logger().info('Starting command : %s' % ' '.join(args))
    from django.core.management import execute_from_command_line

    try:
        execute_from_command_line(args)
    except:
        error('Exception occured : %s' % traceback.format_exc())
Beispiel #26
0
def start_django_command(config, args):
    '''
    Start a Django management command.
    
    This commands is supposed to start in a spawned (child process).
    It tries to import the settings of the project before handling the command.
    '''    
    initialize_logger(config)

    log('Starting command : %s' % ' '.join(args))
    get_logger().info('Starting command : %s' % ' '.join(args))
    from django.core.management import execute_from_command_line
    
    try:
        execute_from_command_line(args)
    except:
        error('Exception occured : %s' % traceback.format_exc())
Beispiel #27
0
def prepare(data):
    '''
    Try to get current process ready to unpickle process object
    '''
    if 'name' in data:
        process.current_process().name = data['name']

    if 'authkey' in data:
        process.current_process().authkey = data['authkey']

    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()

    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])

    if 'log_fmt' in data:
        import logging
        util.get_logger().handlers[0].setFormatter(
            logging.Formatter(data['log_fmt'])
        )

    if 'sys_path' in data:
        sys.path = data['sys_path']

    if 'sys_argv' in data:
        sys.argv = data['sys_argv']

    if 'dir' in data:
        os.chdir(data['dir'])

    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']

    if hasattr(mp, 'set_start_method'):
        mp.set_start_method('loky', force=True)

    if 'tacker_pid' in data:
        from . import semaphore_tracker
        semaphore_tracker._semaphore_tracker._pid = data["tracker_pid"]

    if 'init_main_from_name' in data:
        _fixup_main_from_name(data['init_main_from_name'])
    elif 'init_main_from_path' in data:
        _fixup_main_from_path(data['init_main_from_path'])
Beispiel #28
0
Datei: spawn.py Projekt: rth/loky
def prepare(data):
    '''
    Try to get current process ready to unpickle process object
    '''
    if 'name' in data:
        process.current_process().name = data['name']

    if 'authkey' in data:
        process.current_process().authkey = data['authkey']

    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()

    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])

    if 'log_fmt' in data:
        import logging
        util.get_logger().handlers[0].setFormatter(
            logging.Formatter(data['log_fmt']))

    if 'sys_path' in data:
        sys.path = data['sys_path']

    if 'sys_argv' in data:
        sys.argv = data['sys_argv']

    if 'dir' in data:
        os.chdir(data['dir'])

    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']

    if hasattr(mp, 'set_start_method'):
        mp.set_start_method('loky', force=True)

    if 'tacker_pid' in data:
        from . import semaphore_tracker
        semaphore_tracker._semaphore_tracker._pid = data["tracker_pid"]

    if 'init_main_from_name' in data:
        _fixup_main_from_name(data['init_main_from_name'])
    elif 'init_main_from_path' in data:
        _fixup_main_from_path(data['init_main_from_path'])
Beispiel #29
0
def get_default_logger(loglevel=None):
    """Get default logger instance.

    :keyword loglevel: Initial log level.

    """
    from multiprocessing.util import get_logger
    logger = get_logger()
    loglevel is not None and logger.setLevel(loglevel)
    return logger
def _hijack_multiprocessing_logger():
    from multiprocessing import util as mputil
    global _hijacked

    if _hijacked:
        return mputil.get_logger()

    ensure_process_aware_logger()

    logging.Logger.manager.loggerDict.clear()

    try:
        if mputil._logger is not None:
            mputil.logger = None
    except AttributeError:
        pass

    _hijacked = True
    return mputil.get_logger()
Beispiel #31
0
def _hijack_multiprocessing_logger():
    from multiprocessing import util as mputil
    global _hijacked

    if _hijacked:
        return mputil.get_logger()

    ensure_process_aware_logger()

    logging.Logger.manager.loggerDict.clear()

    try:
        if mputil._logger is not None:
            mputil.logger = None
    except AttributeError:
        pass

    _hijacked = True
    return mputil.get_logger()
Beispiel #32
0
def idle_watcher():
    return
    logger = get_logger()
    prev = None
    while True:
        idle()
        current = format_run_info()
        if prev != current:
            prev = current
            logger.warning("\n".join(current))
            sleep(0.01)
Beispiel #33
0
def teardown():
    # Don't want SUBDEBUG log messages at finalization.
    from multiprocessing.util import get_logger
    get_logger().setLevel(logging.WARNING)
    import threading
    import os
    if os.path.exists("test.db"):
        os.remove("test.db")
    remaining_threads = [thread for thread in threading.enumerate()
                            if thread.name != "MainThread"]
    if remaining_threads:
        sys.stderr.write(
            "\n\n**WARNING**: Remaning threads at teardown: %r...\n" % (
                remaining_threads))
        for thread in remaining_threads:
            try:
                started_by = thread._started_by[thread.ident]
            except (AttributeError, KeyError):
                pass
            else:
                sys.stderr.write("THREAD %r STARTED BY:\n%r\n" % (
                    thread, started_by))
Beispiel #34
0
def _manager_process(addr):
    logger = get_logger()
    spawn(idle_watcher)
    try:
        listener: Listener = Listener(addr, "AF_UNIX")

        with listener:
            manager = SyncManager()
            manager.start(manager_init)
            try:

                def process_queue(q: Queue, idx: int):
                    for val_idx in range(0, QUEUE_DEPTH):
                        put_string = f"Request #{idx}, Value #{val_idx}"
                        logger.info(f"**** Sending {put_string} on {q._id}")
                        q.put(put_string)
                        logger.info(f"**** Sent {put_string} on {q._id}")
                        sleep(0.05)

                    logger.info(
                        f"**** Putting None in queue request #{idx} to empty on {q._id}"
                    )
                    q.put(None)
                    logger.info(
                        f"**** Waiting for queue request #{idx} to empty on {q._id}"
                    )
                    q.join()
                    logger.info(
                        f"**** All done with request #{idx} on {q._id}")

                def process_conn(conn: Connection, idx: int):
                    with conn:
                        logger.info(f"**** Accepted request #{idx}")
                        q: Queue = manager.Queue(QUEUE_SIZE)
                        logger.info(
                            f"**** Passing request #{idx} queue {q._id}")
                        conn.send(q)
                        logger.info(
                            f"**** Passed request #{idx} queue {q._id}")

                    spawn(process_queue, q, idx)

                for i in range(0, REQUEST_COUNT):
                    spawn(process_conn, listener.accept(), i)

                wait(timeout=300)
                # logger.warning("\n".join(format_run_info()))
            finally:
                manager.shutdown()
    finally:
        get_hub().destroy()
Beispiel #35
0
def teardown():
    # Don't want SUBDEBUG log messages at finalization.
    from multiprocessing.util import get_logger
    get_logger().setLevel(logging.WARNING)
    import threading
    import os
    if os.path.exists("test.db"):
        os.remove("test.db")
    remaining_threads = [
        thread for thread in threading.enumerate()
        if thread.name != "MainThread"
    ]
    if remaining_threads:
        sys.stderr.write(
            "\n\n**WARNING**: Remaning threads at teardown: %r...\n" %
            (remaining_threads))
        for thread in remaining_threads:
            try:
                started_by = thread._started_by[thread.ident]
            except (AttributeError, KeyError):
                pass
            else:
                sys.stderr.write("THREAD %r STARTED BY:\n%r\n" %
                                 (thread, started_by))
Beispiel #36
0
    def setup_logging_subsystem(self,
                                loglevel=None,
                                logfile=None,
                                format=None,
                                colorize=None,
                                **kwargs):
        if Logging._setup:
            return
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.format
        if colorize is None:
            colorize = self.supports_color(logfile)

        if mputil and hasattr(mputil, "_logger"):
            mputil._logger = None
        if not is_py3k:
            ensure_process_aware_logger()
        receivers = signals.setup_logging.send(sender=None,
                                               loglevel=loglevel,
                                               logfile=logfile,
                                               format=format,
                                               colorize=colorize)
        if not receivers:
            root = logging.getLogger()

            if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
                root.handlers = []

            mp = mputil.get_logger() if mputil else None
            for logger in filter(None, (root, mp)):
                self._setup_logger(logger, logfile, format, colorize, **kwargs)
                logger.setLevel(mlevel(loglevel))
                signals.after_setup_logger.send(sender=None,
                                                logger=logger,
                                                loglevel=loglevel,
                                                logfile=logfile,
                                                format=format,
                                                colorize=colorize)

        # This is a hack for multiprocessing's fork+exec, so that
        # logging before Process.run works.
        os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel),
                          _MP_FORK_LOGFILE_=logfile or "",
                          _MP_FORK_LOGFORMAT_=format)
        Logging._setup = True

        return receivers
Beispiel #37
0
def instrument_conn(conn):
    _old_send = conn.send
    _old_recv = conn.recv

    logger = get_logger()
    thread_name = threading.current_thread().name

    def new_send(obj):
        logger.info("%s: %s: sending %r", thread_name, conn.fileno(), obj)
        return _old_send(obj)

    def new_recv():
        obj = _old_recv()
        logger.info("%s: %s: received %r", thread_name, conn.fileno(), obj)
        return obj

    conn.send = new_send
    conn.recv = new_recv

    return conn
Beispiel #38
0
def main():  #pragma no cover
    """
    Code which runs a host manager.
    Expects configuration data from parent on `stdin`.
    Replies with address and optionally public key.
    The environment variable ``OPENMDAO_KEEPDIRS`` can be used to avoid
    removal of the temporary directory used here.
    """
    sys.stdout = open('stdout', 'w')
    sys.stderr = open('stderr', 'w')

    #    util.log_to_stderr(logging.DEBUG)
    # Avoid root possibly masking us.
    logging.getLogger().setLevel(logging.DEBUG)

    import platform
    hostname = platform.node()
    pid = os.getpid()
    ident = '(%s:%d)' % (hostname, pid)
    print '%s main startup' % ident
    sys.stdout.flush()

    # Get data from parent over stdin.
    data = cPickle.load(sys.stdin)
    sys.stdin.close()
    print '%s data received' % ident

    authkey = data['authkey']
    allow_shell = data['allow_shell']
    allowed_users = data['allowed_users']
    print '%s using %s authentication' % (ident, keytype(authkey))
    if allowed_users is None:
        print '%s allowed_users: ANY' % ident
    else:
        print '%s allowed_users: %s' % (ident, sorted(allowed_users.keys()))
    if allow_shell:
        print '%s ALLOWING SHELL ACCESS' % ident
    sys.stdout.flush()
    log_level = data['dist_log_level']
    os.environ['OPENMDAO_KEEPDIRS'] = data['keep_dirs']

    exc = None
    server = None
    try:
        # Update HostManager registry.
        dct = data['registry']
        print '%s registry:' % ident
        for name in dct.keys():
            module = dct[name]
            print '    %s: %s' % (name, module)
            mod = __import__(module, fromlist=name)
            cls = getattr(mod, name)
            register(cls, HostManager)

        # Set some stuff.
        print '%s preparing to fork, log level %d' % (ident, log_level)
        sys.stdout.flush()
        util.get_logger().setLevel(log_level)
        forking.prepare(data)

        # Create Server for a HostManager object.
        name = '%d[%d]' % (data['index'], pid)
        logging.getLogger(name).setLevel(log_level)
        server = OpenMDAO_Server(HostManager._registry, (hostname, 0),
                                 authkey,
                                 'pickle',
                                 name=name,
                                 allowed_users=allowed_users,
                                 allowed_hosts=[data['parent_address'][0]])
    except Exception as exc:
        print '%s caught exception: %s' % (ident, exc)

    # Report server address and public key back to parent.
    print '%s connecting to parent at %s' % (ident, data['parent_address'])
    sys.stdout.flush()
    conn = connection.Client(data['parent_address'], authkey=authkey)
    if exc:
        conn.send((data['index'], None, str(exc)))
    else:
        conn.send((data['index'], server.address, server.public_key_text))
    conn.close()

    if exc:
        print '%s exiting' % ident
        sys.exit(1)

    # Set name etc.
    current_process()._server = server
    current_process()._name = 'Host-%s:%s' % server.address
    current_process().authkey = authkey
    logging.getLogger(current_process()._name).setLevel(log_level)
    util._run_after_forkers()

    # Register a cleanup function.
    def cleanup(directory):
        keep_dirs = int(os.environ.get('OPENMDAO_KEEPDIRS', '0'))
        if not keep_dirs and os.path.exists(directory):
            print '%s removing directory %s' % (ident, directory)
            shutil.rmtree(directory)
        print '%s shutting down host manager' % ident

    util.Finalize(None, cleanup, args=[data['dir']], exitpriority=0)

    # Start host manager.
    print '%s remote host manager starting in %s' % (ident, data['dir'])
    sys.stdout.flush()
    server.serve_forever()
Beispiel #39
0
def prepare(data):
    '''
    Try to get current process ready to unpickle process object
    '''
    old_main_modules.append(sys.modules['__main__'])

    if 'name' in data:
        process.current_process().name = data['name']

    if 'authkey' in data:
        process.current_process()._authkey = data['authkey']

    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()

    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])

    if 'sys_path' in data:
        sys.path = data['sys_path']

    if 'sys_argv' in data:
        sys.argv = data['sys_argv']

    if 'dir' in data:
        os.chdir(data['dir'])

    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']

    if 'main_path' in data:
        # XXX (ncoghlan): The following code makes several bogus
        # assumptions regarding the relationship between __file__
        # and a module's real name. See PEP 302 and issue #10845
        # The problem is resolved properly in Python 3.4+, as
        # described in issue #19946

        main_path = data['main_path']
        main_name = os.path.splitext(os.path.basename(main_path))[0]
        if main_name == '__init__':
            main_name = os.path.basename(os.path.dirname(main_path))

        if main_name == '__main__':
            # For directory and zipfile execution, we assume an implicit
            # "if __name__ == '__main__':" around the module, and don't
            # rerun the main module code in spawned processes
            main_module = sys.modules['__main__']
            main_module.__file__ = main_path
        elif main_name != 'ipython':
            # Main modules not actually called __main__.py may
            # contain additional code that should still be executed
            import imp

            if main_path is None:
                dirs = None
            elif os.path.basename(main_path).startswith('__init__.py'):
                dirs = [os.path.dirname(os.path.dirname(main_path))]
            else:
                dirs = [os.path.dirname(main_path)]

            assert main_name not in sys.modules, main_name
            file, path_name, etc = imp.find_module(main_name, dirs)
            try:
                # We would like to do "imp.load_module('__main__', ...)"
                # here.  However, that would cause 'if __name__ ==
                # "__main__"' clauses to be executed.
                main_module = imp.load_module(
                    '__parents_main__', file, path_name, etc
                    )
            finally:
                if file:
                    file.close()

            sys.modules['__main__'] = main_module
            main_module.__name__ = '__main__'

            # Try to make the potentially picklable objects in
            # sys.modules['__main__'] realize they are in the main
            # module -- somewhat ugly.
            for obj in main_module.__dict__.values():
                try:
                    if obj.__module__ == '__parents_main__':
                        obj.__module__ = '__main__'
                except Exception:
                    pass
Beispiel #40
0
def get_logger():
    """
    Return package logger -- if it does not already exist then it is created
    """
    from multiprocessing.util import get_logger
    return get_logger()
Beispiel #41
0
def get_logger():
    from multiprocessing.util import get_logger
    return get_logger()
Beispiel #42
0
def prepare(data):
    '''
    Try to get current process ready to unpickle process object
    '''
    old_main_modules.append(sys.modules['__main__'])

    if 'name' in data:
        process.current_process().name = data['name']

    if 'authkey' in data:
        process.current_process()._authkey = data['authkey']

    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()

    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])

    if 'sys_path' in data:
        sys.path = data['sys_path']

    if 'sys_argv' in data:
        sys.argv = data['sys_argv']

    if 'dir' in data:
        os.chdir(data['dir'])

    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']

    if 'main_path' in data:
        # XXX (ncoghlan): The following code makes several bogus
        # assumptions regarding the relationship between __file__
        # and a module's real name. See PEP 302 and issue #10845
        main_path = data['main_path']
        main_name = os.path.splitext(os.path.basename(main_path))[0]
        if main_name == '__init__':
            main_name = os.path.basename(os.path.dirname(main_path))

        if main_name == '__main__':
            main_module = sys.modules['__main__']
            main_module.__file__ = main_path
        elif main_name != 'ipython':
            # Main modules not actually called __main__.py may
            # contain additional code that should still be executed
            import imp

            if main_path is None:
                dirs = None
            elif os.path.basename(main_path).startswith('__init__.py'):
                dirs = [os.path.dirname(os.path.dirname(main_path))]
            else:
                dirs = [os.path.dirname(main_path)]

            assert main_name not in sys.modules, main_name
            sys.modules.pop('__mp_main__', None)
            file, path_name, etc = imp.find_module(main_name, dirs)
            try:
                # We should not do 'imp.load_module("__main__", ...)'
                # since that would execute 'if __name__ == "__main__"'
                # clauses, potentially causing a psuedo fork bomb.
                main_module = imp.load_module(
                    '__mp_main__', file, path_name, etc
                    )
            finally:
                if file:
                    file.close()

            sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
Beispiel #43
0
def prepare(data):
    '''
    Try to get current process ready to unpickle process object
    '''
    old_main_modules.append(sys.modules['__main__'])

    if 'name' in data:
        process.current_process().name = data['name']

    if 'authkey' in data:
        process.current_process()._authkey = data['authkey']

    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()

    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])

    if 'sys_path' in data:
        sys.path = data['sys_path']

    if 'sys_argv' in data:
        sys.argv = data['sys_argv']

    if 'dir' in data:
        os.chdir(data['dir'])

    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']

    if 'main_path' in data:
        # XXX (ncoghlan): The following code makes several bogus
        # assumptions regarding the relationship between __file__
        # and a module's real name. See PEP 302 and issue #10845
        main_path = data['main_path']
        main_name = os.path.splitext(os.path.basename(main_path))[0]
        if main_name == '__init__':
            main_name = os.path.basename(os.path.dirname(main_path))

        if main_name == '__main__':
            main_module = sys.modules['__main__']
            main_module.__file__ = main_path
        elif main_name != 'ipython':
            # Main modules not actually called __main__.py may
            # contain additional code that should still be executed
            import imp

            if main_path is None:
                dirs = None
            elif os.path.basename(main_path).startswith('__init__.py'):
                dirs = [os.path.dirname(os.path.dirname(main_path))]
            else:
                dirs = [os.path.dirname(main_path)]

            assert main_name not in sys.modules, main_name
            file, path_name, etc = imp.find_module(main_name, dirs)
            try:
                # We would like to do "imp.load_module('__main__', ...)"
                # here.  However, that would cause 'if __name__ ==
                # "__main__"' clauses to be executed.
                main_module = imp.load_module(
                    '__parents_main__', file, path_name, etc
                    )
            finally:
                if file:
                    file.close()

            sys.modules['__main__'] = main_module
            main_module.__name__ = '__main__'

            # Try to make the potentially picklable objects in
            # sys.modules['__main__'] realize they are in the main
            # module -- somewhat ugly.
            for obj in list(main_module.__dict__.values()):
                try:
                    if obj.__module__ == '__parents_main__':
                        obj.__module__ = '__main__'
                except Exception:
                    pass
Beispiel #44
0
def get_multiprocessing_logger():
    return mputil.get_logger() if mputil else None
Beispiel #45
0
 def __init__(self, limit, logger=None):
     self.limit = limit
     self.logger = logger or get_logger()
     self._pool = None
Beispiel #46
0
def main(args: Iterable[str]) -> int:
    """
    The main program loop

    :param args: Command line arguments
    :return: The program exit code
    """
    # Handle command line arguments
    args = handle_args(args)
    set_verbosity_logger(logger, args.verbosity)

    # Go to the working directory
    config_file = os.path.realpath(args.config)
    os.chdir(os.path.dirname(config_file))

    try:
        # Read the configuration
        config = config_parser.load_config(config_file)
    except (ConfigurationSyntaxError, DataConversionError) as e:
        # Make the config exceptions a bit more readable
        msg = e.message
        if e.lineno and e.lineno != -1:
            msg += ' on line {}'.format(e.lineno)
        if e.url:
            parts = urlparse(e.url)
            msg += ' in {}'.format(parts.path)
        logger.critical(msg)
        return 1
    except ValueError as e:
        logger.critical(e)
        return 1

    # Immediately drop privileges in a non-permanent way so we create logs with the correct owner
    drop_privileges(config.user, config.group, permanent=False)

    # Trigger the forkserver at this point, with dropped privileges, and ignoring KeyboardInterrupt
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    multiprocessing.set_start_method('forkserver')
    forkserver.ensure_running()

    # Initialise the logger
    config.logging.configure(logger, verbosity=args.verbosity)
    logger.info("Starting Python DHCPv6 server v{}".format(dhcpkit.__version__))

    # Create our selector
    sel = selectors.DefaultSelector()

    # Convert signals to messages on a pipe
    signal_r, signal_w = os.pipe()
    flags = fcntl.fcntl(signal_w, fcntl.F_GETFL, 0)
    flags = flags | os.O_NONBLOCK
    fcntl.fcntl(signal_w, fcntl.F_SETFL, flags)
    signal.set_wakeup_fd(signal_w)
    sel.register(signal_r, selectors.EVENT_READ)

    # Ignore normal signal handling by attaching dummy handlers (SIG_IGN will not put messages on the pipe)
    signal.signal(signal.SIGINT, lambda signum, frame: None)
    signal.signal(signal.SIGTERM, lambda signum, frame: None)
    signal.signal(signal.SIGHUP, lambda signum, frame: None)
    signal.signal(signal.SIGINFO, lambda signum, frame: None)

    # Excessive exception catcher
    exception_history = []

    # Some stats
    message_count = 0

    # Initialise the logger again
    config.logging.configure(logger, verbosity=args.verbosity)

    # Create a queue for our children to log to
    logging_queue = multiprocessing.Queue()

    global logging_thread
    logging_thread = queue_logger.QueueLevelListener(logging_queue, *logger.handlers)
    logging_thread.start()

    # Enable multiprocessing logging, mostly useful for development
    if config.logging.log_multiprocessing:
        mp_logger = get_logger()
        mp_logger.propagate = True

    # This will be where we store the new config after a reload
    listeners = []
    stopping = False
    while not stopping:
        # Safety first: assume we want to quit when we break the inner loop unless told otherwise
        stopping = True

        # Restore our privileges while we write the PID file and open network listeners
        restore_privileges()

        # Open the network listeners
        old_listeners = listeners
        listeners = []
        for listener_factory in config.listener_factories:
            # Create new listener while trying to re-use existing sockets
            listeners.append(listener_factory(old_listeners + listeners))

        # Write the PID file
        pid_filename = create_pidfile(args=args, config=config)

        # And Drop privileges again
        drop_privileges(config.user, config.group, permanent=False)

        # Remove any file descriptors from the previous config
        for fd, key in list(sel.get_map().items()):
            # Don't remove our signal handling pipe and still existing listeners
            if key.fileobj == signal_r or key.fileobj in listeners:
                continue

            # Seems we don't need this one anymore
            sel.unregister(key.fileobj)

        # Collect all the file descriptors we want to listen to
        existing_listeners = [key.fileobj for key in sel.get_map().values()]
        for listener in listeners:
            if listener not in existing_listeners:
                sel.register(listener, selectors.EVENT_READ)

        # Configuration tree
        message_handler = config.create_message_handler()

        # Start worker processes
        with multiprocessing.Pool(processes=config.workers,
                                  initializer=setup_worker, initargs=(message_handler, logging_queue)) as pool:

            logger.info("Python DHCPv6 server is ready to handle requests")

            running = True
            while running:
                # noinspection PyBroadException
                try:
                    events = sel.select()
                    for key, mask in events:
                        # Handle signal notifications
                        if key.fileobj == signal_r:
                            signal_nr = os.read(signal_r, 1)
                            if signal_nr[0] in (signal.SIGHUP,):
                                # SIGHUP tells the server to reload
                                try:
                                    # Read the new configuration
                                    config = config_parser.load_config(config_file)

                                    running = False
                                    stopping = False

                                    logger.info("DHCPv6 server restarting after configuration change")

                                    break

                                except (ConfigurationSyntaxError, DataConversionError) as e:
                                    # Make the config exceptions a bit more readable
                                    msg = "Not reloading: " + str(e.message)
                                    if e.lineno and e.lineno != -1:
                                        msg += ' on line {}'.format(e.lineno)
                                    if e.url:
                                        parts = urlparse(e.url)
                                        msg += ' in {}'.format(parts.path)
                                    logger.critical(msg)
                                    return 1
                                except ValueError as e:
                                    logger.critical("Not reloading: " + str(e))
                                    return 1

                            elif signal_nr[0] in (signal.SIGINT, signal.SIGTERM):
                                logger.debug("Received termination request")

                                running = False
                                stopping = True
                                break

                            elif signal_nr[0] in (signal.SIGINFO,):
                                logger.info("Server has processed {} messages".format(message_count))

                            # Unknown signal: ignore
                            continue

                        elif isinstance(key.fileobj, Listener):
                            packet = key.fileobj.recv_request()

                            # Update stats
                            message_count += 1

                            # Create the callback
                            callback, error_callback = create_handler_callbacks(key.fileobj, packet.message_id)

                            # Dispatch
                            pool.apply_async(handle_message, args=(packet,),
                                             callback=callback, error_callback=error_callback)

                except Exception as e:
                    # Catch-all exception handler
                    logger.exception("Caught unexpected exception {!r}".format(e))

                    now = time.monotonic()

                    # Add new exception time to the history
                    exception_history.append(now)

                    # Remove exceptions outside the window from the history
                    cutoff = now - config.exception_window
                    while exception_history and exception_history[0] < cutoff:
                        exception_history.pop(0)

                    # Did we receive too many exceptions shortly after each other?
                    if len(exception_history) > config.max_exceptions:
                        logger.critical("Received more than {} exceptions in {} seconds, "
                                        "exiting".format(config.max_exceptions, config.exception_window))
                        running = False
                        stopping = True

            pool.close()
            pool.join()

        # Regain root so we can delete the PID file
        restore_privileges()
        try:
            if pid_filename:
                os.unlink(pid_filename)
                logger.info("Removing PID-file {}".format(pid_filename))
        except OSError:
            pass

    logger.info("Shutting down Python DHCPv6 server v{}".format(dhcpkit.__version__))

    return 0
Beispiel #47
0
#                                   IMPORTS                                    #
################################################################################

import packages.buskill
from packages.garden.navigationdrawer import NavigationDrawer
from packages.garden.progressspinner import ProgressSpinner
from buskill_version import BUSKILL_VERSION

import os, sys, re, webbrowser

import multiprocessing
from multiprocessing import util

import logging
logger = logging.getLogger(__name__)
util.get_logger().setLevel(util.DEBUG)
multiprocessing.log_to_stderr().setLevel(logging.DEBUG)
#from multiprocessing import get_context

import kivy
#kivy.require('1.0.6') # replace with your current kivy version !

from kivy.app import App
from kivy.properties import ObjectProperty, StringProperty
from kivy.clock import Clock

from kivy.core.window import Window
Window.size = (300, 500)

from kivy.uix.widget import Widget
from kivy.uix.label import Label
def main():  #pragma no cover
    """
    Code which runs a host manager.
    Expects configuration data from parent on `stdin`.
    Replies with address and optionally public key.
    The environment variable ``OPENMDAO_KEEPDIRS`` can be used to avoid
    removal of the temporary directory used here.
    """
    sys.stdout = open('stdout', 'w')
    sys.stderr = open('stderr', 'w')

#    util.log_to_stderr(logging.DEBUG)
    # Avoid root possibly masking us.
    logging.getLogger().setLevel(logging.DEBUG)

    pid = os.getpid()
    ident = '(%s:%d)' % (socket.gethostname(), pid)
    print '%s main startup' % ident
    sys.stdout.flush()

    # Get data from parent over stdin.
    dump = sys.stdin.read()
    sys.stdin.close()
    print '%s data received (%s)' % (ident, len(dump))
    data = cPickle.loads(base64.b64decode(dump))

    hostname = data['hostname']
    print '%s using hostname %s' % (ident, hostname)

    authkey = data['authkey']
    print '%s using %s authentication' % (ident, keytype(authkey))

    allowed_users = data['allowed_users']
    if allowed_users is None:
        print '%s allowed_users: ANY' % ident
    else:
        print '%s allowed_users: %s' % (ident, sorted(allowed_users.keys()))

    allow_shell = data['allow_shell']
    if allow_shell:
        print '%s ALLOWING SHELL ACCESS' % ident

    allow_tunneling = data['allow_tunneling']
    print '%s allow_tunneling: %s' % (ident, allow_tunneling)
    if allow_tunneling:
        hostname = 'localhost'

    sys.stdout.flush()

    log_level = data['dist_log_level']
    os.environ['OPENMDAO_KEEPDIRS'] = data['keep_dirs']

    exc = None
    server = None
    try:
        # Update HostManager registry.
        dct = data['registry']
        print '%s registry:' % ident
        for name in dct.keys():
            module = dct[name]
            print'    %s: %s' % (name, module)
            mod = __import__(module, fromlist=name)
            cls = getattr(mod, name)
            register(cls, HostManager)

        # Set some stuff.
        print '%s preparing to fork, log level %d' % (ident, log_level)
        sys.stdout.flush()
        util.get_logger().setLevel(log_level)
        forking.prepare(data)

        # Create Server for a HostManager object.
        name = '%d[%d]' % (data['index'], pid)
        logging.getLogger(name).setLevel(log_level)
        server = OpenMDAO_Server(HostManager._registry, (hostname, 0),
                                 authkey, 'pickle', name=name,
                                 allowed_users=allowed_users,
                                 allowed_hosts=[data['parent_address'][0]],
                                 allow_tunneling=allow_tunneling)
        print '%s server listening at %s' % (ident, server.address)
    except Exception as exc:
        print '%s caught exception: %s' % (ident, exc)

    # Report server address and public key back to parent.
    print '%s connecting to parent at %s' % (ident, data['parent_address'])
    sys.stdout.flush()
    for retry in range(10):
        try:
            conn = connection.Client(data['parent_address'], authkey=authkey)
        except socket.error as sock_exc:
            print '%s %s' % (ident, sock_exc)
            if retry < 9 and (sock_exc.args[0] == errno.ECONNREFUSED or \
                              sock_exc.args[0] == errno.ENOENT):
                print '%s retrying...' % ident
                time.sleep(1)
            else:
                print '%s exiting' % ident
                sys.exit(1)
        else:
            break
    if exc:
        conn.send((data['index'], None, str(exc)))
    else:
        conn.send((data['index'], server.address, server.public_key_text))
    conn.close()

    if exc:
        print '%s exiting' % ident
        sys.exit(1)

    # Set name etc.
    current_process()._server = server
    current_process()._name = 'Host-%s:%s' % server.address
    current_process().authkey = authkey
    logging.getLogger(current_process()._name).setLevel(log_level)
    util._run_after_forkers()

    # Register a cleanup function.
    def cleanup(directory):
        """ Removes our directory unless OPENMDAO_KEEPDIRS set. """
        keep_dirs = int(os.environ.get('OPENMDAO_KEEPDIRS', '0'))
        if not keep_dirs and os.path.exists(directory):
            print '%s removing directory %s' % (ident, directory)
            try:
                shutil.rmtree(directory, onerror=onerror)
            except WindowsError as exc:
                print '%s %s' % (ident, exc)
        print '%s shutting down host manager' % ident
    util.Finalize(None, cleanup, args=[data['dir']], exitpriority=0)

    # Start host manager.
    print '%s remote host manager starting in %s' % (ident, data['dir'])
    sys.stdout.flush()
    server.serve_forever()
def get_logger():
    from multiprocessing.util import get_logger
    return get_logger()
 def write(self, s):
     get_logger().info(s)
Beispiel #51
0
def main(args: Iterable[str]) -> int:
    """
    The main program loop

    :param args: Command line arguments
    :return: The program exit code
    """
    # Handle command line arguments
    args = handle_args(args)
    set_verbosity_logger(logger, args.verbosity)

    # Go to the working directory
    config_file = os.path.realpath(args.config)
    os.chdir(os.path.dirname(config_file))

    try:
        # Read the configuration
        config = config_parser.load_config(config_file)
    except (ConfigurationSyntaxError, DataConversionError) as e:
        # Make the config exceptions a bit more readable
        msg = e.message
        if e.lineno and e.lineno != -1:
            msg += ' on line {}'.format(e.lineno)
        if e.url:
            parts = urlparse(e.url)
            msg += ' in {}'.format(parts.path)
        logger.critical(msg)
        return 1
    except ValueError as e:
        logger.critical(e)
        return 1

    # Immediately drop privileges in a non-permanent way so we create logs with the correct owner
    drop_privileges(config.user, config.group, permanent=False)

    # Trigger the forkserver at this point, with dropped privileges, and ignoring KeyboardInterrupt
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    multiprocessing.set_start_method('forkserver')
    forkserver.ensure_running()

    # Initialise the logger
    config.logging.configure(logger, verbosity=args.verbosity)
    logger.info("Starting Python DHCPv6 server v{}".format(
        dhcpkit.__version__))

    # Create our selector
    sel = selectors.DefaultSelector()

    # Convert signals to messages on a pipe
    signal_r, signal_w = os.pipe()
    flags = fcntl.fcntl(signal_w, fcntl.F_GETFL, 0)
    flags = flags | os.O_NONBLOCK
    fcntl.fcntl(signal_w, fcntl.F_SETFL, flags)
    signal.set_wakeup_fd(signal_w)
    sel.register(signal_r, selectors.EVENT_READ)

    # Ignore normal signal handling by attaching dummy handlers (SIG_IGN will not put messages on the pipe)
    signal.signal(signal.SIGINT, lambda signum, frame: None)
    signal.signal(signal.SIGTERM, lambda signum, frame: None)
    signal.signal(signal.SIGHUP, lambda signum, frame: None)
    signal.signal(signal.SIGUSR1, lambda signum, frame: None)

    # Excessive exception catcher
    exception_history = []

    # Some stats
    message_count = 0

    # Create a queue for our children to log to
    logging_queue = multiprocessing.Queue()

    statistics = ServerStatistics()
    listeners = []
    control_socket = None
    stopping = False

    while not stopping:
        # Safety first: assume we want to quit when we break the inner loop unless told otherwise
        stopping = True

        # Initialise the logger again
        lowest_log_level = config.logging.configure(logger,
                                                    verbosity=args.verbosity)

        # Enable multiprocessing logging, mostly useful for development
        mp_logger = get_logger()
        mp_logger.propagate = config.logging.log_multiprocessing

        global logging_thread
        if logging_thread:
            logging_thread.stop()

        logging_thread = queue_logger.QueueLevelListener(
            logging_queue, *logger.handlers)
        logging_thread.start()

        # Use the logging queue in the main process as well so messages don't get out of order
        logging_handler = WorkerQueueHandler(logging_queue)
        logging_handler.setLevel(lowest_log_level)
        logger.handlers = [logging_handler]

        # Restore our privileges while we write the PID file and open network listeners
        restore_privileges()

        # Open the network listeners
        old_listeners = listeners
        listeners = []
        for listener_factory in config.listener_factories:
            # Create new listener while trying to re-use existing sockets
            listeners.append(listener_factory(old_listeners + listeners))

        # Forget old listeners
        del old_listeners

        # Write the PID file
        pid_filename = create_pidfile(args=args, config=config)

        # Create a control socket
        if control_socket:
            sel.unregister(control_socket)
            control_socket.close()

        control_socket = create_control_socket(args=args, config=config)
        if control_socket:
            sel.register(control_socket, selectors.EVENT_READ)

        # And Drop privileges again
        drop_privileges(config.user, config.group, permanent=False)

        # Remove any file descriptors from the previous config
        for fd, key in list(sel.get_map().items()):
            # Don't remove our signal handling pipe, control socket, still existing listeners and control connections
            if key.fileobj is signal_r \
                    or (control_socket and key.fileobj is control_socket) \
                    or key.fileobj in listeners \
                    or isinstance(key.fileobj, ControlConnection):
                continue

            # Seems we don't need this one anymore
            sel.unregister(key.fileobj)

        # Collect all the file descriptors we want to listen to
        existing_listeners = [key.fileobj for key in sel.get_map().values()]
        for listener in listeners:
            if listener not in existing_listeners:
                sel.register(listener, selectors.EVENT_READ)

        # Configuration tree
        try:
            message_handler = config.create_message_handler()
        except Exception as e:
            if args.verbosity >= 3:
                logger.exception("Error initialising DHCPv6 server")
            else:
                logger.critical(
                    "Error initialising DHCPv6 server: {}".format(e))
            return 1

        # Make sure we have space to store all the interface statistics
        statistics.set_categories(config.statistics)

        # Start worker processes
        my_pid = os.getpid()
        with NonBlockingPool(processes=config.workers,
                             initializer=setup_worker,
                             initargs=(message_handler, logging_queue,
                                       lowest_log_level, statistics,
                                       my_pid)) as pool:

            logger.info("Python DHCPv6 server is ready to handle requests")

            running = True
            while running:
                count_exception = False

                # noinspection PyBroadException
                try:
                    events = sel.select()
                    for key, mask in events:
                        if isinstance(key.fileobj, Listener):
                            try:
                                packet, replier = key.fileobj.recv_request()

                                # Update stats
                                message_count += 1

                                # Dispatch
                                pool.apply_async(handle_message,
                                                 args=(packet, replier),
                                                 error_callback=error_callback)
                            except IgnoreMessage:
                                # Message isn't complete, leave it for now
                                pass
                            except ClosedListener:
                                # This listener is closed (at least TCP shutdown for incoming data), so forget about it
                                sel.unregister(key.fileobj)
                                listeners.remove(key.fileobj)

                        elif isinstance(key.fileobj, ListenerCreator):
                            # Activity on this object means we have a new listener
                            new_listener = key.fileobj.create_listener()
                            if new_listener:
                                sel.register(new_listener,
                                             selectors.EVENT_READ)
                                listeners.append(new_listener)

                        # Handle signal notifications
                        elif key.fileobj == signal_r:
                            signal_nr = os.read(signal_r, 1)
                            if signal_nr[0] in (signal.SIGHUP, ):
                                # SIGHUP tells the server to reload
                                try:
                                    # Read the new configuration
                                    config = config_parser.load_config(
                                        config_file)
                                except (ConfigurationSyntaxError,
                                        DataConversionError) as e:
                                    # Make the config exceptions a bit more readable
                                    msg = "Not reloading: " + str(e.message)
                                    if e.lineno and e.lineno != -1:
                                        msg += ' on line {}'.format(e.lineno)
                                    if e.url:
                                        parts = urlparse(e.url)
                                        msg += ' in {}'.format(parts.path)
                                    logger.critical(msg)
                                    continue

                                except ValueError as e:
                                    logger.critical("Not reloading: " + str(e))
                                    continue

                                logger.info(
                                    "DHCPv6 server restarting after configuration change"
                                )
                                running = False
                                stopping = False
                                continue

                            elif signal_nr[0] in (signal.SIGINT,
                                                  signal.SIGTERM):
                                logger.debug("Received termination request")

                                running = False
                                stopping = True
                                break

                            elif signal_nr[0] in (signal.SIGUSR1, ):
                                # The USR1 signal is used to indicate initialisation errors in worker processes
                                count_exception = True

                        elif isinstance(key.fileobj, ControlSocket):
                            # A new control connection request
                            control_connection = key.fileobj.accept()
                            if control_connection:
                                # We got a connection, listen to events
                                sel.register(control_connection,
                                             selectors.EVENT_READ)

                        elif isinstance(key.fileobj, ControlConnection):
                            # Let the connection handle received data
                            control_connection = key.fileobj
                            commands = control_connection.get_commands()
                            for command in commands:
                                if command:
                                    logger.debug(
                                        "Received control command '{}'".format(
                                            command))

                                if command == 'help':
                                    control_connection.send(
                                        "Recognised commands:")
                                    control_connection.send("  help")
                                    control_connection.send("  stats")
                                    control_connection.send("  stats-json")
                                    control_connection.send("  reload")
                                    control_connection.send("  shutdown")
                                    control_connection.send("  quit")
                                    control_connection.acknowledge()

                                elif command == 'stats':
                                    control_connection.send(str(statistics))
                                    control_connection.acknowledge()

                                elif command == 'stats-json':
                                    control_connection.send(
                                        json.dumps(statistics.export()))
                                    control_connection.acknowledge()

                                elif command == 'reload':
                                    # Simulate a SIGHUP to reload
                                    os.write(signal_w, bytes([signal.SIGHUP]))
                                    control_connection.acknowledge('Reloading')

                                elif command == 'shutdown':
                                    # Simulate a SIGTERM to reload
                                    control_connection.acknowledge(
                                        'Shutting down')
                                    control_connection.close()
                                    sel.unregister(control_connection)

                                    os.write(signal_w, bytes([signal.SIGTERM]))
                                    break

                                elif command == 'quit' or command is None:
                                    if command == 'quit':
                                        # User nicely signing off
                                        control_connection.acknowledge()

                                    control_connection.close()
                                    sel.unregister(control_connection)
                                    break

                                else:
                                    logger.warning(
                                        "Rejecting unknown control command '{}'"
                                        .format(command))
                                    control_connection.reject()

                except Exception as e:
                    # Catch-all exception handler
                    logger.exception(
                        "Caught unexpected exception {!r}".format(e))
                    count_exception = True

                if count_exception:
                    now = time.monotonic()

                    # Add new exception time to the history
                    exception_history.append(now)

                    # Remove exceptions outside the window from the history
                    cutoff = now - config.exception_window
                    while exception_history and exception_history[0] < cutoff:
                        exception_history.pop(0)

                    # Did we receive too many exceptions shortly after each other?
                    if len(exception_history) > config.max_exceptions:
                        logger.critical(
                            "Received more than {} exceptions in {} seconds, "
                            "exiting".format(config.max_exceptions,
                                             config.exception_window))
                        running = False
                        stopping = True

            pool.close()
            pool.join()

        # Regain root so we can delete the PID file and control socket
        restore_privileges()
        try:
            if pid_filename:
                os.unlink(pid_filename)
                logger.info("Removing PID-file {}".format(pid_filename))
        except OSError:
            pass

        try:
            if control_socket:
                os.unlink(control_socket.socket_path)
                logger.info("Removing control socket {}".format(
                    control_socket.socket_path))
        except OSError:
            pass

    logger.info("Shutting down Python DHCPv6 server v{}".format(
        dhcpkit.__version__))

    return 0
Beispiel #52
0
def prepare(data):
    '''
    Try to get current process ready to unpickle process object
    '''
    old_main_modules.append(sys.modules['__main__'])

    if 'name' in data:
        process.current_process().name = data['name']

    if 'authkey' in data:
        process.current_process()._authkey = data['authkey']

    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()

    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])

    if 'sys_path' in data:
        sys.path = data['sys_path']

    if 'sys_argv' in data:
        sys.argv = data['sys_argv']

    if 'dir' in data:
        os.chdir(data['dir'])

    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']

    if 'main_path' in data:
        main_path = data['main_path']
        main_name = os.path.splitext(os.path.basename(main_path))[0]
        if main_name == '__init__':
            main_name = os.path.basename(os.path.dirname(main_path))

        if main_name != 'ipython':
            import imp

            if main_path is None:
                dirs = None
            elif os.path.basename(main_path).startswith('__init__.py'):
                dirs = [os.path.dirname(os.path.dirname(main_path))]
            else:
                dirs = [os.path.dirname(main_path)]

            assert main_name not in sys.modules, main_name
            file, path_name, etc = imp.find_module(main_name, dirs)
            try:
                # We would like to do "imp.load_module('__main__', ...)"
                # here.  However, that would cause 'if __name__ ==
                # "__main__"' clauses to be executed.
                main_module = imp.load_module(
                    '__parents_main__', file, path_name, etc
                    )
            finally:
                if file:
                    file.close()

            sys.modules['__main__'] = main_module
            main_module.__name__ = '__main__'

            # Try to make the potentially picklable objects in
            # sys.modules['__main__'] realize they are in the main
            # module -- somewhat ugly.
            for obj in main_module.__dict__.values():
                try:
                    if obj.__module__ == '__parents_main__':
                        obj.__module__ = '__main__'
                except Exception:
                    pass
Beispiel #53
0
def prepare(data):
    '''
    Try to get current process ready to unpickle process object
    '''
    old_main_modules.append(sys.modules['__main__'])

    if 'name' in data:
        process.current_process().name = data['name']

    if 'authkey' in data:
        process.current_process()._authkey = data['authkey']

    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()

    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])

    if 'sys_path' in data:
        sys.path = data['sys_path']

    if 'sys_argv' in data:
        sys.argv = data['sys_argv']

    if 'dir' in data:
        os.chdir(data['dir'])

    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']

    if 'main_path' in data:
        main_path = data['main_path']
        main_name = os.path.splitext(os.path.basename(main_path))[0]
        if main_name == '__init__':
            main_name = os.path.basename(os.path.dirname(main_path))

        if main_name != 'ipython':
            import imp

            if main_path is None:
                dirs = None
            elif os.path.basename(main_path).startswith('__init__.py'):
                dirs = [os.path.dirname(os.path.dirname(main_path))]
            else:
                dirs = [os.path.dirname(main_path)]

            assert main_name not in sys.modules, main_name
            file, path_name, etc = imp.find_module(main_name, dirs)
            try:
                # We would like to do "imp.load_module('__main__', ...)"
                # here.  However, that would cause 'if __name__ ==
                # "__main__"' clauses to be executed.
                main_module = imp.load_module('__parents_main__', file,
                                              path_name, etc)
            finally:
                if file:
                    file.close()

            sys.modules['__main__'] = main_module
            main_module.__name__ = '__main__'

            # Try to make the potentially picklable objects in
            # sys.modules['__main__'] realize they are in the main
            # module -- somewhat ugly.
            for obj in main_module.__dict__.values():
                try:
                    if obj.__module__ == '__parents_main__':
                        obj.__module__ = '__main__'
                except Exception:
                    pass
Beispiel #54
0
#   You may obtain a copy of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.

import sys
from time import sleep

from multiprocessing.util import get_logger

logger = get_logger()


def test_no_args():
    logger.info(test_no_args.__name__)
    sleep(1)
    logger.info("exiting")
    sys.exit(10)


def test_queues(r_q, w_q):
    sleep(1)
    logger.info(r_q.get(timeout=5))
    sleep(1)
    w_q.put(test_queues.__name__, timeout=5)
    sleep(1)
Beispiel #55
0
 def write(self, s):
     get_logger().info(s)
Beispiel #56
0
def prepare(data):
    """
    Try to get current process ready to unpickle process object
    """
    old_main_modules.append(sys.modules["__main__"])

    if "name" in data:
        process.current_process().name = data["name"]

    if "authkey" in data:
        process.current_process()._authkey = data["authkey"]

    if "log_to_stderr" in data and data["log_to_stderr"]:
        util.log_to_stderr()

    if "log_level" in data:
        util.get_logger().setLevel(data["log_level"])

    if "sys_path" in data:
        sys.path = data["sys_path"]

    if "sys_argv" in data:
        sys.argv = data["sys_argv"]

    if "dir" in data:
        os.chdir(data["dir"])

    if "orig_dir" in data:
        process.ORIGINAL_DIR = data["orig_dir"]

    if "main_path" in data:
        # XXX (ncoghlan): The following code makes several bogus
        # assumptions regarding the relationship between __file__
        # and a module's real name. See PEP 302 and issue #10845
        # The problem is resolved properly in Python 3.4+, as
        # described in issue #19946

        main_path = data["main_path"]
        main_name = os.path.splitext(os.path.basename(main_path))[0]
        if main_name == "__init__":
            main_name = os.path.basename(os.path.dirname(main_path))

        if main_name == "__main__":
            # For directory and zipfile execution, we assume an implicit
            # "if __name__ == '__main__':" around the module, and don't
            # rerun the main module code in spawned processes
            main_module = sys.modules["__main__"]
            main_module.__file__ = main_path
        elif main_name != "ipython":
            # Main modules not actually called __main__.py may
            # contain additional code that should still be executed
            import imp

            if main_path is None:
                dirs = None
            elif os.path.basename(main_path).startswith("__init__.py"):
                dirs = [os.path.dirname(os.path.dirname(main_path))]
            else:
                dirs = [os.path.dirname(main_path)]

            assert main_name not in sys.modules, main_name
            file, path_name, etc = imp.find_module(main_name, dirs)
            try:
                # We would like to do "imp.load_module('__main__', ...)"
                # here.  However, that would cause 'if __name__ ==
                # "__main__"' clauses to be executed.
                main_module = imp.load_module("__parents_main__", file, path_name, etc)
            finally:
                if file:
                    file.close()

            sys.modules["__main__"] = main_module
            main_module.__name__ = "__main__"

            # Try to make the potentially picklable objects in
            # sys.modules['__main__'] realize they are in the main
            # module -- somewhat ugly.
            for obj in main_module.__dict__.values():
                try:
                    if obj.__module__ == "__parents_main__":
                        obj.__module__ = "__main__"
                except Exception:
                    pass