Exemplo n.º 1
0
def install_dns_limiter(reactor, max_dns_requests_in_flight=100):
    """Replaces the resolver with one that limits the number of in flight DNS
    requests.

    This is to workaround https://twistedmatrix.com/trac/ticket/9620, where we
    can run out of file descriptors and infinite loop if we attempt to do too
    many DNS queries at once
    """
    new_resolver = _LimitedHostnameResolver(reactor.nameResolver,
                                            max_dns_requests_in_flight)

    reactor.installNameResolver(new_resolver)
Exemplo n.º 2
0
def install_dns_limiter(reactor, max_dns_requests_in_flight=100):
    """Replaces the resolver with one that limits the number of in flight DNS
    requests.

    This is to workaround https://twistedmatrix.com/trac/ticket/9620, where we
    can run out of file descriptors and infinite loop if we attempt to do too
    many DNS queries at once
    """
    new_resolver = _LimitedHostnameResolver(
        reactor.nameResolver, max_dns_requests_in_flight,
    )

    reactor.installNameResolver(new_resolver)
Exemplo n.º 3
0
def install_dns_limiter(reactor, max_dns_requests_in_flight=100):
    """Replaces the resolver with one that limits the number of in flight DNS
    requests.

    This is to workaround https://twistedmatrix.com/trac/ticket/9620, where we
    can run out of file descriptors and infinite loop if we attempt to do too
    many DNS queries at once

    XXX: I'm confused by this. reactor.nameResolver does not use twisted.names unless
    you explicitly install twisted.names as the resolver; rather it uses a GAIResolver
    backed by the reactor's default threadpool (which is limited to 10 threads). So
    (a) I don't understand why twisted ticket 9620 is relevant, and (b) I don't
    understand why we would run out of FDs if we did too many lookups at once.
    -- richvdh 2020/08/29
    """
    new_resolver = _LimitedHostnameResolver(reactor.nameResolver,
                                            max_dns_requests_in_flight)

    reactor.installNameResolver(new_resolver)
Exemplo n.º 4
0
async def start(hs: "HomeServer"):
    """
    Start a Synapse server or worker.

    Should be called once the reactor is running.

    Will start the main HTTP listeners and do some other startup tasks, and then
    notify systemd.

    Args:
        hs: homeserver instance
    """
    reactor = hs.get_reactor()

    # We want to use a separate thread pool for the resolver so that large
    # numbers of DNS requests don't starve out other users of the threadpool.
    resolver_threadpool = ThreadPool(name="gai_resolver")
    resolver_threadpool.start()
    reactor.addSystemEventTrigger("during", "shutdown",
                                  resolver_threadpool.stop)
    reactor.installNameResolver(
        GAIResolver(reactor, getThreadPool=lambda: resolver_threadpool))

    # Register the threadpools with our metrics.
    register_threadpool("default", reactor.getThreadPool())
    register_threadpool("gai_resolver", resolver_threadpool)

    # Set up the SIGHUP machinery.
    if hasattr(signal, "SIGHUP"):

        @wrap_as_background_process("sighup")
        def handle_sighup(*args, **kwargs):
            # Tell systemd our state, if we're using it. This will silently fail if
            # we're not using systemd.
            sdnotify(b"RELOADING=1")

            for i, args, kwargs in _sighup_callbacks:
                i(*args, **kwargs)

            sdnotify(b"READY=1")

        # We defer running the sighup handlers until next reactor tick. This
        # is so that we're in a sane state, e.g. flushing the logs may fail
        # if the sighup happens in the middle of writing a log entry.
        def run_sighup(*args, **kwargs):
            # `callFromThread` should be "signal safe" as well as thread
            # safe.
            reactor.callFromThread(handle_sighup, *args, **kwargs)

        signal.signal(signal.SIGHUP, run_sighup)

        register_sighup(refresh_certificate, hs)

    # Load the certificate from disk.
    refresh_certificate(hs)

    # Start the tracer
    synapse.logging.opentracing.init_tracer(
        hs)  # type: ignore[attr-defined] # noqa

    # Instantiate the modules so they can register their web resources to the module API
    # before we start the listeners.
    module_api = hs.get_module_api()
    for module, config in hs.config.modules.loaded_modules:
        module(config=config, api=module_api)

    load_legacy_spam_checkers(hs)
    load_legacy_third_party_event_rules(hs)
    load_legacy_presence_router(hs)
    load_legacy_password_auth_providers(hs)

    # If we've configured an expiry time for caches, start the background job now.
    setup_expire_lru_cache_entries(hs)

    # It is now safe to start your Synapse.
    hs.start_listening()
    hs.get_datastore().db_pool.start_profiling()
    hs.get_pusherpool().start()

    # Log when we start the shut down process.
    hs.get_reactor().addSystemEventTrigger("before", "shutdown", logger.info,
                                           "Shutting down...")

    setup_sentry(hs)
    setup_sdnotify(hs)

    # If background tasks are running on the main process, start collecting the
    # phone home stats.
    if hs.config.worker.run_background_tasks:
        start_phone_stats_home(hs)

    # We now freeze all allocated objects in the hopes that (almost)
    # everything currently allocated are things that will be used for the
    # rest of time. Doing so means less work each GC (hopefully).
    #
    # This only works on Python 3.7
    if platform.python_implementation() == "CPython" and sys.version_info >= (
            3, 7):
        gc.collect()
        gc.freeze()

    # Speed up shutdowns by freezing all allocated objects. This moves everything
    # into the permanent generation and excludes them from the final GC.
    # Unfortunately only works on Python 3.7
    if platform.python_implementation() == "CPython" and sys.version_info >= (
            3, 7):
        atexit.register(gc.freeze)