Ejemplo n.º 1
0
def main(get_handler):
    parser = argparse.ArgumentParser()
    parser.add_argument("--sockname")
    parser.add_argument("--numproc")
    parser.add_argument("--version-serial", type=int)
    args = parser.parse_args()

    ql_parser.preload(allow_rebuild=False)
    gc.freeze()

    if args.numproc is None:
        # Run a single worker process
        run_worker(args.sockname, args.version_serial, get_handler)
        return

    numproc = int(args.numproc)
    assert numproc >= 1

    # Abort the template process if more than `max_worker_spawns`
    # new workers are created continuously - it probably means the
    # worker cannot start correctly.
    max_worker_spawns = numproc * 2

    children = set()
    continuous_num_spawns = 0

    for _ in range(int(args.numproc)):
        # spawn initial workers
        if pid := os.fork():
            # main process
            children.add(pid)
            continuous_num_spawns += 1
        else:
            # child process
            break
Ejemplo n.º 2
0
def create_application():
    api.bootstrap(context="server", confdir=paths.ETC_IPA, log=None)

    try:
        api.finalize()
    except Exception as e:
        logger.error("Failed to start IPA: %s", e)
        raise

    # speed up first request to each worker by 200ms
    populate_schema_cache()

    # collect garbage and freeze all objects that are currently tracked by
    # cyclic garbage collector. We assume that vast majority of currently
    # loaded objects won't be removed in requests. This speeds up GC
    # collections and improve CoW memory handling.
    gc.collect()
    if hasattr(gc, "freeze"):
        # Python 3.7+
        gc.freeze()

    # This is the WSGI callable:
    def application(environ, start_response):
        if not environ["wsgi.multithread"]:
            return api.Backend.wsgi_dispatch(environ, start_response)
        else:
            logger.error("IPA does not work with the threaded MPM, "
                         "use the pre-fork MPM")
            raise RuntimeError("threaded MPM detected")

    return application
Ejemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--sockname')
    parser.add_argument('--numproc')
    args = parser.parse_args()

    numproc = int(args.numproc)
    assert numproc >= 1

    # Abort the template process if more than `max_worker_spawns`
    # new workers are created continuously - it probably means the
    # worker cannot start correctly.
    max_worker_spawns = numproc * 2

    ql_parser.preload()
    gc.freeze()

    children = set()
    continuous_num_spawns = 0

    for _ in range(int(args.numproc)):
        # spawn initial workers
        if pid := os.fork():
            # main process
            children.add(pid)
            continuous_num_spawns += 1
        else:
            # child process
            break
Ejemplo n.º 4
0
def main(config_file=None):
    parse_configs(config_files=config_file)

    if options.app is None:
        log.exception(
            'no frontik application present (`app` option is not specified)')
        sys.exit(1)

    log.info('starting application %s', options.app)

    try:
        if options.app_class is not None and re.match(r'^\w+\.',
                                                      options.app_class):
            app_module_name, app_class_name = options.app_class.rsplit('.', 1)
        else:
            app_module_name = options.app
            app_class_name = options.app_class

        module = importlib.import_module(app_module_name)
    except Exception as e:
        log.exception('failed to import application module "%s": %s',
                      options.app, e)

        sys.exit(1)

    if app_class_name is not None and not hasattr(module, app_class_name):
        log.exception('application class "%s" not found', options.app_class)
        sys.exit(1)

    application = getattr(
        module,
        app_class_name) if app_class_name is not None else FrontikApplication

    try:
        app = application(app_root=os.path.dirname(module.__file__),
                          app_module=app_module_name,
                          **options.as_dict())

        gc.disable()
        gc.collect()
        gc.freeze()
        if options.workers != 1:
            service_discovery_client = get_sync_service_discovery(options)
            fork_workers(
                partial(_run_worker, app),
                num_workers=options.workers,
                after_workers_up_action=service_discovery_client.
                register_service,
                before_workers_shutdown_action=service_discovery_client.
                deregister_service_and_close)
        else:
            # run in single process mode
            _run_worker(app, True)
    except Exception as e:
        log.exception('frontik application exited with exception: %s', e)
        sys.exit(1)
Ejemplo n.º 5
0
    def __init__(self,
                 imgt_version: str = 'Latest',
                 data_dir: str = None,
                 config: dict = None):
        """
        ARD will load valid alleles, xx codes and MAC mappings for the given
        version of IMGT database, downloading and generating the database if
        not already present.

        :param imgt_version: IMGT HLA database version
        :param data_dir: directory path to store cached data
        :param config: directory of configuration options
        """
        self._data_dir = data_dir
        self._config = default_config.copy()
        if config:
            self._config.update(config)

        # Create a database connection for writing
        self.db_connection = db.create_db_connection(data_dir, imgt_version)

        # Load MAC codes
        generate_mac_codes(self.db_connection, False)
        # Load ARS mappings
        self.ars_mappings = generate_ars_mapping(self.db_connection,
                                                 imgt_version)
        # Load Alleles and XX Codes
        self.valid_alleles, self.who_alleles, self.xx_codes, self.who_group = \
            generate_alleles_and_xx_codes_and_who(self.db_connection, imgt_version, self.ars_mappings )
        # Load Alleles and XX Codes
        self.valid_alleles, self.who_alleles, self.xx_codes, self.who_group = generate_alleles_and_xx_codes_and_who(
            self.db_connection, imgt_version, self.ars_mappings)

        # Load Serology mappings
        generate_serology_mapping(self.db_connection, imgt_version)
        # Load V2 to V3 mappings
        generate_v2_to_v3_mapping(self.db_connection, imgt_version)

        # Close the current read-write db connection
        self.db_connection.close()

        # reference data is read-only and can be frozen
        # Works only for Python >= 3.9
        if sys.version_info.major == 3 and sys.version_info.minor >= 9:
            import gc
            gc.freeze()

        # Re-open the connection in read-only mode as we're not updating it anymore
        self.db_connection = db.create_db_connection(data_dir,
                                                     imgt_version,
                                                     ro=True)
Ejemplo n.º 6
0
    def __init__(self,
                 imgt_version: str = 'Latest',
                 remove_invalid: bool = True,
                 data_dir: str = None,
                 refresh_mac: bool = False) -> None:
        """
        ARD will load valid alleles, xx codes and MAC mappings for the given
        version of IMGT database, downloading and generating the database if
        not already present.

        :param imgt_version: IMGT HLA database version
        :param remove_invalid: report only valid alleles for this version
        :param data_dir: directory path to store cached data
        """
        self._remove_invalid = remove_invalid

        # Create a database connection for writing
        self.db_connection = db.create_db_connection(data_dir, imgt_version)

        # Load MAC codes
        generate_mac_codes(self.db_connection, refresh_mac)
        # Load Alleles and XX Codes
        self.valid_alleles, self.xx_codes = generate_alleles_and_xx_codes(
            self.db_connection, imgt_version)
        # Load ARS mappings
        self.ars_mappings = generate_ars_mapping(self.db_connection,
                                                 imgt_version)
        # Load Serology mappings
        generate_serology_mapping(self.db_connection, imgt_version)
        # Load V2 to V3 mappings
        generate_v2_to_v3_mapping(self.db_connection, imgt_version)

        # Close the current read-write db connection
        self.db_connection.close()

        # reference data is read-only and can be frozen
        gc.freeze()

        # Re-open the connection in read-only mode as we're not updating it anymore
        self.db_connection = db.create_db_connection(data_dir,
                                                     imgt_version,
                                                     ro=True)
Ejemplo n.º 7
0
    def launch_service(self, service, workers=1):
        """Launch a service with a given number of workers.

       :param service: a service to launch, must be an instance of
              :class:`oslo_service.service.ServiceBase`
       :param workers: a number of processes in which a service
              will be running
        """
        _check_service_base(service)
        wrap = ServiceWrapper(service, workers)

        # Hide existing objects from the garbage collector, so that most
        # existing pages will remain in shared memory rather than being
        # duplicated between subprocesses in the GC mark-and-sweep. (Requires
        # Python 3.7 or later.)
        if hasattr(gc, 'freeze'):
            gc.freeze()

        LOG.info('Starting %d workers', wrap.workers)
        while self.running and len(wrap.children) < wrap.workers:
            self._start_child(wrap)
Ejemplo n.º 8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--sockname')
    parser.add_argument('--numproc')
    args = parser.parse_args()

    numproc = int(args.numproc)
    assert numproc > 1

    ql_parser.preload()
    gc.freeze()

    for _ in range(int(args.numproc) - 1):
        if not os.fork():
            # child process
            break

    try:
        run_worker(args.sockname)
    except (amsg.PoolClosedError, KeyboardInterrupt):
        exit(0)
Ejemplo n.º 9
0
def start_http(app: web.Application,
               http_port: int = port,
               use_fork: bool = fork):
    """
    Create app instance(s) binding a port.

    :param app: the app to execute in server instances
    :param http_port: port to bind
    :param use_fork: fork or not to use more than one CPU (process)
    """
    http_socket = netutil.bind_sockets(http_port)  # HTTP socket
    if use_fork:
        try:  # try to create threads
            if gc.isenabled():
                # collection before a POSIX fork() call may free pages for future allocation
                gc.freeze()
            process.fork_processes(0)  # fork
        except KeyboardInterrupt:  # except KeyboardInterrupt to "properly" exit
            ioloop.IOLoop.current().stop()
            gc.unfreeze()
            exit(0)
        except AttributeError:  # OS without fork() support ...
            logger.warning('Can not fork, continuing with only one thread ...')
            # do nothing and continue without multi-threading

    logger.info('Start an HTTP request handler on port : ' + str(http_port))
    server = httpserver.HTTPServer(app)
    server.add_sockets(http_socket)  # bind http port

    global servers
    servers.append((server, ioloop.IOLoop.current()))

    # try to stay forever alive to satisfy user's requests, except on KeyboardInterrupt to "properly" exit
    try:
        ioloop.IOLoop.current().start()
    except KeyboardInterrupt:
        server.close_all_connections()
        server.stop()
        ioloop.IOLoop.current().stop()
Ejemplo n.º 10
0
    def start(number):
        freeze()
        pid = os.fork()
        unfreeze()

        if pid:
            children[pid] = number
            return None

        # child process
        seed = int(hexlify(os.urandom(16)), 16)
        random.seed(seed)

        global TASK_ID

        for sig in pass_signals:
            signal.signal(sig, lambda c, *_: exit(c))

        TASK_ID = number

        entrypoint()
        exit(0)
Ejemplo n.º 11
0
    def run(self) -> NoReturn:
        with self.passenger.run_state("SUBPROCESS_LISTEN"):
            sock, path = self.create_and_advertise_server()
            self.server = sock

        gc.collect()
        gc.freeze()

        self.passenger.ready()
        while not self._is_child:
            r, _, _ = select.select([sys.stdin, sock], [], [])
            if sys.stdin in r:
                break

            client, _ = sock.accept()
            reader = client.makefile('r')
            data = reader.readline()
            command = json.loads(data, encoding='utf-8')
            if command['command'] == 'spawn':
                self.make_new_worker(command, client)
            else:
                name = command['command']
                client.sendall(
                    json.dumps(
                        {
                            'result': 'error',
                            'message': f"Unknown command {name}"
                        },
                        ensure_ascii=True))

        if not self._is_child:
            sock.shutdown(socket.SHUT_RDWR)
        sock.close()

        if self._is_child:
            self.pass_to_main()
Ejemplo n.º 12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--sockname')
    parser.add_argument('--numproc')
    args = parser.parse_args()

    numproc = int(args.numproc)
    assert numproc > 1

    ql_parser.preload()
    gc.freeze()

    children = set()
    continuous_num_spawns = 0

    for _ in range(int(args.numproc)):
        # spawn initial workers
        if pid := os.fork():
            # main process
            children.add(pid)
            continuous_num_spawns += 1
        else:
            # child process
            break
Ejemplo n.º 13
0
 def test_get_all_referrers(self):
     obj = 'test_get_all_referrers'
     referrer = [obj]
     gc.freeze()
     self.assertIn(referrer, gc.get_all_referrers(obj))
     gc.unfreeze()
Ejemplo n.º 14
0
 def test_get_frozen_heap(self):
     gc.freeze()
     self.assertEqual(gc.get_freeze_count(), len(gc.get_frozen_objects()))
     gc.unfreeze()
Ejemplo n.º 15
0
async def start(hs: "HomeServer") -> None:
    """
    Start a Synapse server or worker.

    Should be called once the reactor is running.

    Will start the main HTTP listeners and do some other startup tasks, and then
    notify systemd.

    Args:
        hs: homeserver instance
    """
    reactor = hs.get_reactor()

    # We want to use a separate thread pool for the resolver so that large
    # numbers of DNS requests don't starve out other users of the threadpool.
    resolver_threadpool = ThreadPool(name="gai_resolver")
    resolver_threadpool.start()
    reactor.addSystemEventTrigger("during", "shutdown",
                                  resolver_threadpool.stop)
    reactor.installNameResolver(
        GAIResolver(reactor, getThreadPool=lambda: resolver_threadpool))

    # Register the threadpools with our metrics.
    register_threadpool("default", reactor.getThreadPool())
    register_threadpool("gai_resolver", resolver_threadpool)

    # Set up the SIGHUP machinery.
    if hasattr(signal, "SIGHUP"):

        @wrap_as_background_process("sighup")
        async def handle_sighup(*args: Any, **kwargs: Any) -> None:
            # Tell systemd our state, if we're using it. This will silently fail if
            # we're not using systemd.
            sdnotify(b"RELOADING=1")

            for i, args, kwargs in _sighup_callbacks:
                i(*args, **kwargs)

            sdnotify(b"READY=1")

        # We defer running the sighup handlers until next reactor tick. This
        # is so that we're in a sane state, e.g. flushing the logs may fail
        # if the sighup happens in the middle of writing a log entry.
        def run_sighup(*args: Any, **kwargs: Any) -> None:
            # `callFromThread` should be "signal safe" as well as thread
            # safe.
            reactor.callFromThread(handle_sighup, *args, **kwargs)

        signal.signal(signal.SIGHUP, run_sighup)

        register_sighup(refresh_certificate, hs)
        register_sighup(reload_cache_config, hs.config)

    # Apply the cache config.
    hs.config.caches.resize_all_caches()

    # Load the certificate from disk.
    refresh_certificate(hs)

    # Start the tracer
    init_tracer(hs)  # noqa

    # Instantiate the modules so they can register their web resources to the module API
    # before we start the listeners.
    module_api = hs.get_module_api()
    for module, config in hs.config.modules.loaded_modules:
        m = module(config=config, api=module_api)
        logger.info("Loaded module %s", m)

    load_legacy_spam_checkers(hs)
    load_legacy_third_party_event_rules(hs)
    load_legacy_presence_router(hs)
    load_legacy_password_auth_providers(hs)

    # If we've configured an expiry time for caches, start the background job now.
    setup_expire_lru_cache_entries(hs)

    # It is now safe to start your Synapse.
    hs.start_listening()
    hs.get_datastores().main.db_pool.start_profiling()
    hs.get_pusherpool().start()

    # Log when we start the shut down process.
    hs.get_reactor().addSystemEventTrigger("before", "shutdown", logger.info,
                                           "Shutting down...")

    setup_sentry(hs)
    setup_sdnotify(hs)

    # If background tasks are running on the main process, start collecting the
    # phone home stats.
    if hs.config.worker.run_background_tasks:
        start_phone_stats_home(hs)

    # We now freeze all allocated objects in the hopes that (almost)
    # everything currently allocated are things that will be used for the
    # rest of time. Doing so means less work each GC (hopefully).
    #
    # PyPy does not (yet?) implement gc.freeze()
    if hasattr(gc, "freeze"):
        gc.collect()
        gc.freeze()

        # Speed up shutdowns by freezing all allocated objects. This moves everything
        # into the permanent generation and excludes them from the final GC.
        atexit.register(gc.freeze)
Ejemplo n.º 16
0
def start(hs: "synapse.server.HomeServer",
          listeners: Iterable[ListenerConfig]):
    """
    Start a Synapse server or worker.

    Should be called once the reactor is running and (if we're using ACME) the
    TLS certificates are in place.

    Will start the main HTTP listeners and do some other startup tasks, and then
    notify systemd.

    Args:
        hs: homeserver instance
        listeners: Listener configuration ('listeners' in homeserver.yaml)
    """
    try:
        # Set up the SIGHUP machinery.
        if hasattr(signal, "SIGHUP"):

            def handle_sighup(*args, **kwargs):
                # Tell systemd our state, if we're using it. This will silently fail if
                # we're not using systemd.
                sdnotify(b"RELOADING=1")

                for i, args, kwargs in _sighup_callbacks:
                    i(*args, **kwargs)

                sdnotify(b"READY=1")

            signal.signal(signal.SIGHUP, handle_sighup)

            register_sighup(refresh_certificate, hs)

        # Load the certificate from disk.
        refresh_certificate(hs)

        # Start the tracer
        synapse.logging.opentracing.init_tracer(  # type: ignore[attr-defined] # noqa
            hs)

        # It is now safe to start your Synapse.
        hs.start_listening(listeners)
        hs.get_datastore().db_pool.start_profiling()
        hs.get_pusherpool().start()

        # Log when we start the shut down process.
        hs.get_reactor().addSystemEventTrigger("before", "shutdown",
                                               logger.info, "Shutting down...")

        setup_sentry(hs)
        setup_sdnotify(hs)

        # If background tasks are running on the main process, start collecting the
        # phone home stats.
        if hs.config.run_background_tasks:
            start_phone_stats_home(hs)

        # We now freeze all allocated objects in the hopes that (almost)
        # everything currently allocated are things that will be used for the
        # rest of time. Doing so means less work each GC (hopefully).
        #
        # This only works on Python 3.7
        if sys.version_info >= (3, 7):
            gc.collect()
            gc.freeze()
    except Exception:
        traceback.print_exc(file=sys.stderr)
        reactor = hs.get_reactor()
        if reactor.running:
            reactor.stop()
        sys.exit(1)
Ejemplo n.º 17
0
def when_ready(server):
    # mark preloaded app objects as uncollectable
    gc.freeze()
    # enable child memory watcher
    mw = MemoryWatch(server, restart_on_rss)
    mw.start()
Ejemplo n.º 18
0
def fork_analysis(slices, concurrency, analysis_func, kw, preserve_result,
                  output_fds, q):
    from multiprocessing import Process
    import gc
    children = []
    t = monotonic()
    pid = os.getpid()
    if hasattr(gc, 'freeze'):
        # See https://bugs.python.org/issue31558
        # (Though we keep the gc disabled by default.)
        gc.freeze()
    delayed_start = False
    delayed_start_todo = 0
    for i in range(slices):
        if i == concurrency:
            assert concurrency != 0
            # The rest will wait on this queue
            delayed_start = os.pipe()
            delayed_start_todo = slices - i
        p = SimplifiedProcess(target=call_analysis,
                              args=(analysis_func, i, delayed_start, q,
                                    preserve_result, pid, output_fds),
                              kwargs=kw,
                              name='analysis-%d' % (i, ))
        children.append(p)
    for fd in output_fds:
        os.close(fd)
    if delayed_start:
        os.close(delayed_start[0])
    q.make_reader()
    per_slice = []
    temp_files = {}
    no_children_no_messages = False
    reap_time = monotonic() + 5
    exit_count = 0
    while len(per_slice) < slices:
        if exit_count > 0 or reap_time <= monotonic():
            still_alive = []
            for p in children:
                if p.is_alive():
                    still_alive.append(p)
                else:
                    exit_count -= 1
                    if p.exitcode:
                        raise AcceleratorError(
                            "%s terminated with exitcode %d" % (
                                p.name,
                                p.exitcode,
                            ))
            children = still_alive
            reap_time = monotonic() + 5
        # If a process dies badly we may never get a message here.
        # (iowrapper tries to tell us though.)
        # No need to handle that very quickly though, 10 seconds is fine.
        # (Typically this is caused by running out of memory.)
        try:
            msg = q.get(timeout=10)
            if not msg:
                # Notification from iowrapper, so we wake up (quickly) even if
                # the process died badly (e.g. from running out of memory).
                exit_count += 1
                continue
            s_no, s_t, s_temp_files, s_dw_lens, s_dw_minmax, s_dw_compressions, s_tb = msg
        except QueueEmpty:
            if not children:
                # No children left, so they must have all sent their messages.
                # Still, just to be sure there isn't a race, wait one iteration more.
                if no_children_no_messages:
                    raise AcceleratorError(
                        "All analysis processes exited cleanly, but not all returned a result."
                    )
                else:
                    no_children_no_messages = True
            continue
        if s_tb:
            data = [{'analysis(%d)' % (s_no, ): s_tb}, None]
            writeall(_prof_fd, json.dumps(data).encode('utf-8'))
            exitfunction()
        if delayed_start_todo:
            # Another analysis is allowed to run now
            os.write(delayed_start[1], b'a')
            delayed_start_todo -= 1
        per_slice.append((s_no, s_t))
        temp_files.update(s_temp_files)
        for name, lens in s_dw_lens.items():
            dataset._datasetwriters[name]._lens.update(lens)
        for name, minmax in s_dw_minmax.items():
            dataset._datasetwriters[name]._minmax.update(minmax)
        for name, compressions in s_dw_compressions.items():
            dataset._datasetwriters[name]._compressions.update(compressions)
    g.update_top_status("Waiting for all slices to finish cleanup")
    q.close()
    if delayed_start:
        os.close(delayed_start[1])
    for p in children:
        p.join()
    if preserve_result:
        res_seq = ResultIterMagic(
            slices, reuse_msg="analysis_res is an iterator, don't re-use it")
    else:
        res_seq = None
    return [v - t for k, v in sorted(per_slice)], temp_files, res_seq
Ejemplo n.º 19
0
def fork_analysis(slices, concurrency, analysis_func, kw, preserve_result,
                  output_fds):
    from multiprocessing import Process, Queue
    import gc
    q = Queue()
    children = []
    t = monotonic()
    pid = os.getpid()
    if hasattr(gc, 'freeze'):
        # See https://bugs.python.org/issue31558
        # (Though we keep the gc disabled by default.)
        gc.freeze()
    delayed_start = False
    for i in range(slices):
        if i == concurrency:
            assert concurrency != 0
            # The rest will wait on this queue
            delayed_start = Queue()
        p = Process(target=call_analysis,
                    args=(analysis_func, i, delayed_start, q, preserve_result,
                          pid, output_fds),
                    kwargs=kw,
                    name='analysis-%d' % (i, ))
        p.start()
        children.append(p)
    for fd in output_fds:
        os.close(fd)
    per_slice = []
    temp_files = {}
    no_children_no_messages = False
    while len(per_slice) < slices:
        still_alive = []
        for p in children:
            if p.is_alive():
                still_alive.append(p)
            else:
                p.join()
                if p.exitcode:
                    raise Exception("%s terminated with exitcode %d" % (
                        p.name,
                        p.exitcode,
                    ))
        children = still_alive
        # If a process dies badly we may never get a message here.
        # No need to handle that very quickly though, 10 seconds is fine.
        # (Typically this is caused by running out of memory.)
        try:
            s_no, s_t, s_temp_files, s_dw_lens, s_dw_minmax, s_tb = q.get(
                timeout=10)
        except QueueEmpty:
            if not children:
                # No children left, so they must have all sent their messages.
                # Still, just to be sure there isn't a race, wait one iteration more.
                if no_children_no_messages:
                    raise Exception(
                        "All analysis processes exited cleanly, but not all returned a result."
                    )
                else:
                    no_children_no_messages = True
            continue
        if s_tb:
            data = [{'analysis(%d)' % (s_no, ): s_tb}, None]
            writeall(_prof_fd, json.dumps(data).encode('utf-8'))
            exitfunction()
        if delayed_start:
            # Another analysis is allowed to run now
            delayed_start.put(None)
        per_slice.append((s_no, s_t))
        temp_files.update(s_temp_files)
        for name, lens in s_dw_lens.items():
            dataset._datasetwriters[name]._lens.update(lens)
        for name, minmax in s_dw_minmax.items():
            dataset._datasetwriters[name]._minmax.update(minmax)
    g.update_top_status("Waiting for all slices to finish cleanup")
    for p in children:
        p.join()
    if preserve_result:
        res_seq = ResultIterMagic(
            slices, reuse_msg="analysis_res is an iterator, don't re-use it")
    else:
        res_seq = None
    return [v - t for k, v in sorted(per_slice)], temp_files, res_seq
Ejemplo n.º 20
0
 def test_freeze(self):
     gc.freeze()
     self.assertGreater(gc.get_freeze_count(), 0)
     gc.unfreeze()
     self.assertEqual(gc.get_freeze_count(), 0)
Ejemplo n.º 21
0
async def start(hs: "synapse.server.HomeServer"):
    """
    Start a Synapse server or worker.

    Should be called once the reactor is running and (if we're using ACME) the
    TLS certificates are in place.

    Will start the main HTTP listeners and do some other startup tasks, and then
    notify systemd.

    Args:
        hs: homeserver instance
    """
    # Set up the SIGHUP machinery.
    if hasattr(signal, "SIGHUP"):
        reactor = hs.get_reactor()

        @wrap_as_background_process("sighup")
        def handle_sighup(*args, **kwargs):
            # Tell systemd our state, if we're using it. This will silently fail if
            # we're not using systemd.
            sdnotify(b"RELOADING=1")

            for i, args, kwargs in _sighup_callbacks:
                i(*args, **kwargs)

            sdnotify(b"READY=1")

        # We defer running the sighup handlers until next reactor tick. This
        # is so that we're in a sane state, e.g. flushing the logs may fail
        # if the sighup happens in the middle of writing a log entry.
        def run_sighup(*args, **kwargs):
            # `callFromThread` should be "signal safe" as well as thread
            # safe.
            reactor.callFromThread(handle_sighup, *args, **kwargs)

        signal.signal(signal.SIGHUP, run_sighup)

        register_sighup(refresh_certificate, hs)

    # Load the certificate from disk.
    refresh_certificate(hs)

    # Start the tracer
    synapse.logging.opentracing.init_tracer(hs)  # type: ignore[attr-defined] # noqa

    # It is now safe to start your Synapse.
    hs.start_listening()
    hs.get_datastore().db_pool.start_profiling()
    hs.get_pusherpool().start()

    # Log when we start the shut down process.
    hs.get_reactor().addSystemEventTrigger(
        "before", "shutdown", logger.info, "Shutting down..."
    )

    setup_sentry(hs)
    setup_sdnotify(hs)

    # If background tasks are running on the main process, start collecting the
    # phone home stats.
    if hs.config.run_background_tasks:
        start_phone_stats_home(hs)

    # We now freeze all allocated objects in the hopes that (almost)
    # everything currently allocated are things that will be used for the
    # rest of time. Doing so means less work each GC (hopefully).
    #
    # This only works on Python 3.7
    if platform.python_implementation() == "CPython" and sys.version_info >= (3, 7):
        gc.collect()
        gc.freeze()
Ejemplo n.º 22
0
async def start(hs: "HomeServer"):
    """
    Start a Synapse server or worker.

    Should be called once the reactor is running.

    Will start the main HTTP listeners and do some other startup tasks, and then
    notify systemd.

    Args:
        hs: homeserver instance
    """
    # Set up the SIGHUP machinery.
    if hasattr(signal, "SIGHUP"):
        reactor = hs.get_reactor()

        @wrap_as_background_process("sighup")
        def handle_sighup(*args, **kwargs):
            # Tell systemd our state, if we're using it. This will silently fail if
            # we're not using systemd.
            sdnotify(b"RELOADING=1")

            for i, args, kwargs in _sighup_callbacks:
                i(*args, **kwargs)

            sdnotify(b"READY=1")

        # We defer running the sighup handlers until next reactor tick. This
        # is so that we're in a sane state, e.g. flushing the logs may fail
        # if the sighup happens in the middle of writing a log entry.
        def run_sighup(*args, **kwargs):
            # `callFromThread` should be "signal safe" as well as thread
            # safe.
            reactor.callFromThread(handle_sighup, *args, **kwargs)

        signal.signal(signal.SIGHUP, run_sighup)

        register_sighup(refresh_certificate, hs)

    # Load the certificate from disk.
    refresh_certificate(hs)

    # Start the tracer
    synapse.logging.opentracing.init_tracer(
        hs)  # type: ignore[attr-defined] # noqa

    # Instantiate the modules so they can register their web resources to the module API
    # before we start the listeners.
    module_api = hs.get_module_api()
    for module, config in hs.config.modules.loaded_modules:
        module(config=config, api=module_api)

    load_legacy_spam_checkers(hs)
    load_legacy_third_party_event_rules(hs)
    load_legacy_presence_router(hs)

    # If we've configured an expiry time for caches, start the background job now.
    setup_expire_lru_cache_entries(hs)

    # It is now safe to start your Synapse.
    hs.start_listening()
    hs.get_datastore().db_pool.start_profiling()
    hs.get_pusherpool().start()

    # Log when we start the shut down process.
    hs.get_reactor().addSystemEventTrigger("before", "shutdown", logger.info,
                                           "Shutting down...")

    setup_sentry(hs)
    setup_sdnotify(hs)

    # If background tasks are running on the main process, start collecting the
    # phone home stats.
    if hs.config.worker.run_background_tasks:
        start_phone_stats_home(hs)

    # We now freeze all allocated objects in the hopes that (almost)
    # everything currently allocated are things that will be used for the
    # rest of time. Doing so means less work each GC (hopefully).
    #
    # This only works on Python 3.7
    if platform.python_implementation() == "CPython" and sys.version_info >= (
            3, 7):
        gc.collect()
        gc.freeze()

    # Speed up shutdowns by freezing all allocated objects. This moves everything
    # into the permanent generation and excludes them from the final GC.
    # Unfortunately only works on Python 3.7
    if platform.python_implementation() == "CPython" and sys.version_info >= (
            3, 7):
        atexit.register(gc.freeze)
Ejemplo n.º 23
0
 def test_freeze(self):
     gc.freeze()
     self.assertGreater(gc.get_freeze_count(), 0)
     gc.unfreeze()
     self.assertEqual(gc.get_freeze_count(), 0)
Ejemplo n.º 24
0
def main():
    w = MyWindow(WIDTH * ZOOM, HEIGHT * ZOOM)
    import gc

    gc.freeze()
    pyglet.app.run()
Ejemplo n.º 25
0
import gc

from everyclass.auth import create_app
from everyclass.common import trace

trace.patch()

app = create_app()
gc.set_threshold(0)
gc.freeze()

if __name__ == '__main__':
    print(
        "You should not run this file. "
        "Instead, run in Docker or `uwsgi --ini deploy/uwsgi-local.ini` for consistent behaviour."
    )
Ejemplo n.º 26
0
def main():
    gc.freeze()
    args = parse_args()
    train(args)
    print("Exiting!")