def _listener_http(self, config, listener_config): port = listener_config["port"] bind_addresses = listener_config["bind_addresses"] tls = listener_config.get("tls", False) site_tag = listener_config.get("tag", port) if tls and config.no_tls: return resources = {} for res in listener_config["resources"]: for name in res["names"]: resources.update(self._configure_named_resource( name, res.get("compress", False), )) additional_resources = listener_config.get("additional_resources", {}) logger.debug("Configuring additional resources: %r", additional_resources) module_api = ModuleApi(self, self.get_auth_handler()) for path, resmodule in additional_resources.items(): handler_cls, config = load_module(resmodule) handler = handler_cls(config, module_api) resources[path] = AdditionalResource(self, handler.handle_request) #if WEB_CLIENT_PREFIX in resources: # root_resource = RootRedirect(WEB_CLIENT_PREFIX) #else: # root_resource = NoResource() root_resource = NoResource() root_resource = create_resource_tree(resources, root_resource) if tls: listen_ssl( bind_addresses, port, SynapseSite( "synapse.access.https.%s" % (site_tag,), site_tag, listener_config, root_resource, self.version_string, ), self.tls_server_context_factory, ) else: listen_tcp( bind_addresses, port, SynapseSite( "synapse.access.http.%s" % (site_tag,), site_tag, listener_config, root_resource, self.version_string, ) ) logger.info("Synapse now listening on port %d", port)
def phone_stats_home(): logger.info("Gathering stats for reporting") now = int(hs.get_clock().time()) uptime = int(now - start_time) if uptime < 0: uptime = 0 stats["homeserver"] = hs.config.server_name stats["timestamp"] = now stats["uptime_seconds"] = uptime stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users() stats["total_nonbridged_users"] = total_nonbridged_users room_count = yield hs.get_datastore().get_room_count() stats["total_room_count"] = room_count stats["daily_active_users"] = yield hs.get_datastore().count_daily_users() stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore().count_daily_messages() daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages logger.info("Reporting stats to matrix.org: %s" % (stats,)) try: yield hs.get_simple_http_client().put_json( "https://matrix.org/report-usage-stats/push", stats ) except Exception as e: logger.warn("Error reporting stats: %s", e)
def _listener_http(self, config, listener_config): port = listener_config["port"] bind_addresses = listener_config["bind_addresses"] tls = listener_config.get("tls", False) site_tag = listener_config.get("tag", port) if tls and config.no_tls: return resources = {} for res in listener_config["resources"]: for name in res["names"]: resources.update(self._configure_named_resource( name, res.get("compress", False), )) additional_resources = listener_config.get("additional_resources", {}) logger.debug("Configuring additional resources: %r", additional_resources) module_api = ModuleApi(self, self.get_auth_handler()) for path, resmodule in additional_resources.items(): handler_cls, config = load_module(resmodule) handler = handler_cls(config, module_api) resources[path] = AdditionalResource(self, handler.handle_request) if WEB_CLIENT_PREFIX in resources: root_resource = RootRedirect(WEB_CLIENT_PREFIX) else: root_resource = NoResource() root_resource = create_resource_tree(resources, root_resource) if tls: listen_ssl( bind_addresses, port, SynapseSite( "synapse.access.https.%s" % (site_tag,), site_tag, listener_config, root_resource, self.version_string, ), self.tls_server_context_factory, ) else: listen_tcp( bind_addresses, port, SynapseSite( "synapse.access.http.%s" % (site_tag,), site_tag, listener_config, root_resource, self.version_string, ) ) logger.info("Synapse now listening on port %d", port)
def setup(config_options): """ Args: config_options_options: The options passed to Synapse. Usually `sys.argv[1:]`. Returns: HomeServer """ try: config = HomeServerConfig.load_or_generate_config( "Synapse Homeserver", config_options) except ConfigError as e: sys.stderr.write("\n") for f in format_config_error(e): sys.stderr.write(f) sys.stderr.write("\n") sys.exit(1) if not config: # If a config isn't returned, and an exception isn't raised, we're just # generating config files and shouldn't try to continue. sys.exit(0) events.USE_FROZEN_DICTS = config.use_frozen_dicts synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage if config.server.gc_seconds: synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds hs = SynapseHomeServer( config.server_name, config=config, version_string="Synapse/" + get_version_string(synapse), ) synapse.config.logger.setup_logging(hs, config, use_worker_options=False) logger.info("Setting up server") try: hs.setup() except Exception as e: handle_startup_exception(e) async def start(): # Load the OIDC provider metadatas, if OIDC is enabled. if hs.config.oidc_enabled: oidc = hs.get_oidc_handler() # Loading the provider metadata also ensures the provider config is valid. await oidc.load_metadata() await _base.start(hs) hs.get_datastore().db_pool.updates.start_doing_background_updates() register_start(start) return hs
def phone_stats_home(): logger.info("Gathering stats for reporting") now = int(hs.get_clock().time()) uptime = int(now - start_time) if uptime < 0: uptime = 0 stats["homeserver"] = hs.config.server_name stats["timestamp"] = now stats["uptime_seconds"] = uptime version = sys.version_info stats["python_version"] = "{}.{}.{}".format(version.major, version.minor, version.micro) stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore( ).count_nonbridged_users() stats["total_nonbridged_users"] = total_nonbridged_users daily_user_type_results = yield hs.get_datastore( ).count_daily_user_type() for name, count in iteritems(daily_user_type_results): stats["daily_user_type_" + name] = count room_count = yield hs.get_datastore().get_room_count() stats["total_room_count"] = room_count stats["daily_active_users"] = yield hs.get_datastore( ).count_daily_users() stats["daily_active_rooms"] = yield hs.get_datastore( ).count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore( ).count_daily_messages() r30_results = yield hs.get_datastore().count_r30_users() for name, count in iteritems(r30_results): stats["r30_users_" + name] = count daily_sent_messages = yield hs.get_datastore( ).count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages stats["cache_factor"] = CACHE_SIZE_FACTOR stats["event_cache_size"] = hs.config.event_cache_size if len(stats_process) > 0: stats["memory_rss"] = 0 stats["cpu_average"] = 0 for process in stats_process: stats["memory_rss"] += process.memory_info().rss stats["cpu_average"] += int(process.cpu_percent(interval=None)) logger.info("Reporting stats to matrix.org: %s" % (stats, )) try: yield hs.get_simple_http_client().put_json( "https://matrix.org/report-usage-stats/push", stats) except Exception as e: logger.warn("Error reporting stats: %s", e)
def phone_stats_home(): logger.info("Gathering stats for reporting") now = int(hs.get_clock().time()) uptime = int(now - start_time) if uptime < 0: uptime = 0 stats["homeserver"] = hs.config.server_name stats["server_context"] = hs.config.server_context stats["timestamp"] = now stats["uptime_seconds"] = uptime version = sys.version_info stats["python_version"] = "{}.{}.{}".format( version.major, version.minor, version.micro ) stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users() stats["total_nonbridged_users"] = total_nonbridged_users daily_user_type_results = yield hs.get_datastore().count_daily_user_type() for name, count in iteritems(daily_user_type_results): stats["daily_user_type_" + name] = count room_count = yield hs.get_datastore().get_room_count() stats["total_room_count"] = room_count stats["daily_active_users"] = yield hs.get_datastore().count_daily_users() stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore().count_daily_messages() r30_results = yield hs.get_datastore().count_r30_users() for name, count in iteritems(r30_results): stats["r30_users_" + name] = count daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages stats["cache_factor"] = CACHE_SIZE_FACTOR stats["event_cache_size"] = hs.config.event_cache_size if len(stats_process) > 0: stats["memory_rss"] = 0 stats["cpu_average"] = 0 for process in stats_process: stats["memory_rss"] += process.memory_info().rss stats["cpu_average"] += int(process.cpu_percent(interval=None)) stats["database_engine"] = hs.get_datastore().database_engine_name stats["database_server_version"] = hs.get_datastore().get_server_version() logger.info("Reporting stats to matrix.org: %s" % (stats,)) try: yield hs.get_simple_http_client().put_json( "https://matrix.org/report-usage-stats/push", stats ) except Exception as e: logger.warn("Error reporting stats: %s", e)
def performance_stats_init(): try: process = psutil.Process() # Ensure we can fetch both, and make the initial request for cpu_percent # so the next request will use this as the initial point. process.memory_info().rss process.cpu_percent(interval=None) logger.info("report_stats can use psutil") stats_process.append(process) except (AttributeError): logger.warning( "Unable to read memory/cpu stats. Disabling reporting.")
def performance_stats_init(): try: process = psutil.Process() # Ensure we can fetch both, and make the initial request for cpu_percent # so the next request will use this as the initial point. process.memory_info().rss process.cpu_percent(interval=None) logger.info("report_stats can use psutil") stats_process.append(process) except (AttributeError): logger.warning( "Unable to read memory/cpu stats. Disabling reporting." )
def phone_stats_home(): logger.info("Gathering stats for reporting") now = int(hs.get_clock().time()) uptime = int(now - start_time) if uptime < 0: uptime = 0 # If the stats directory is empty then this is the first time we've # reported stats. first_time = not stats stats["homeserver"] = hs.config.server_name stats["timestamp"] = now stats["uptime_seconds"] = uptime stats["total_users"] = yield hs.get_datastore().count_all_users() room_count = yield hs.get_datastore().get_room_count() stats["total_room_count"] = room_count stats["daily_active_users"] = yield hs.get_datastore( ).count_daily_users() daily_messages = yield hs.get_datastore().count_daily_messages() if daily_messages is not None: stats["daily_messages"] = daily_messages else: stats.pop("daily_messages", None) if first_time: # Add callbacks to report the synapse stats as metrics whenever # prometheus requests them, typically every 30s. # As some of the stats are expensive to calculate we only update # them when synapse phones home to matrix.org every 24 hours. metrics = get_metrics_for("synapse.usage") metrics.add_callback("timestamp", lambda: stats["timestamp"]) metrics.add_callback("uptime_seconds", lambda: stats["uptime_seconds"]) metrics.add_callback("total_users", lambda: stats["total_users"]) metrics.add_callback("total_room_count", lambda: stats["total_room_count"]) metrics.add_callback("daily_active_users", lambda: stats["daily_active_users"]) metrics.add_callback("daily_messages", lambda: stats.get("daily_messages", 0)) logger.info("Reporting stats to matrix.org: %s" % (stats, )) try: yield hs.get_simple_http_client().put_json( "https://matrix.org/report-usage-stats/push", stats) except Exception as e: logger.warn("Error reporting stats: %s", e)
def performance_stats_init(): try: import psutil process = psutil.Process() # Ensure we can fetch both, and make the initial request for cpu_percent # so the next request will use this as the initial point. process.memory_info().rss process.cpu_percent(interval=None) logger.info("report_stats can use psutil") stats_process.append(process) except (ImportError, AttributeError): logger.warn( "report_stats enabled but psutil is not installed or incorrect version." " Disabling reporting of memory/cpu stats." " Ensuring psutil is available will help matrix.org track performance" " changes across releases.")
def performance_stats_init(): try: import psutil process = psutil.Process() # Ensure we can fetch both, and make the initial request for cpu_percent # so the next request will use this as the initial point. process.memory_info().rss process.cpu_percent(interval=None) logger.info("report_stats can use psutil") stats_process.append(process) except (ImportError, AttributeError): logger.warn( "report_stats enabled but psutil is not installed or incorrect version." " Disabling reporting of memory/cpu stats." " Ensuring psutil is available will help matrix.org track performance" " changes across releases." )
def setup(config_options): """ Args: config_options_options: The options passed to Synapse. Usually `sys.argv[1:]`. Returns: HomeServer """ try: config = HomeServerConfig.load_or_generate_config( "Synapse Homeserver", config_options, ) except ConfigError as e: sys.stderr.write("\n" + e.message + "\n") sys.exit(1) if not config: # If a config isn't returned, and an exception isn't raised, we're just # generating config files and shouldn't try to continue. sys.exit(0) synapse.config.logger.setup_logging(config, use_worker_options=False) # check any extra requirements we have now we have a config check_requirements(config) version_string = "Synapse/" + get_version_string(synapse) logger.info("Server hostname: %s", config.server_name) logger.info("Server version: %s", version_string) events.USE_FROZEN_DICTS = config.use_frozen_dicts tls_server_context_factory = context_factory.ServerContextFactory(config) database_engine = create_engine(config.database_config) config.database_config["args"][ "cp_openfun"] = database_engine.on_new_connection hs = SynapseHomeServer( config.server_name, db_config=config.database_config, tls_server_context_factory=tls_server_context_factory, config=config, version_string=version_string, database_engine=database_engine, ) logger.info("Preparing database: %s...", config.database_config['name']) try: db_conn = hs.get_db_conn(run_new_connection=False) prepare_database(db_conn, database_engine, config=config) database_engine.on_new_connection(db_conn) hs.run_startup_checks(db_conn, database_engine) db_conn.commit() except UpgradeDatabaseException: sys.stderr.write( "\nFailed to upgrade database.\n" "Have you checked for version specific instructions in" " UPGRADES.rst?\n") sys.exit(1) logger.info("Database prepared in %s.", config.database_config['name']) hs.setup() hs.start_listening() def start(): hs.get_pusherpool().start() hs.get_state_handler().start_caching() hs.get_datastore().start_profiling() hs.get_datastore().start_doing_background_updates() hs.get_replication_layer().start_get_pdu_cache() register_memory_metrics(hs) reactor.callWhenRunning(start) return hs
def run(hs): PROFILE_SYNAPSE = False if PROFILE_SYNAPSE: def profile(func): from cProfile import Profile from threading import current_thread def profiled(*args, **kargs): profile = Profile() profile.enable() func(*args, **kargs) profile.disable() ident = current_thread().ident profile.dump_stats( "/tmp/%s.%s.%i.pstat" % (hs.hostname, func.__name__, ident) ) return profiled from twisted.python.threadpool import ThreadPool ThreadPool._worker = profile(ThreadPool._worker) reactor.run = profile(reactor.run) clock = hs.get_clock() stats = {} def performance_stats_init(): _stats_process.clear() _stats_process.append( (int(hs.get_clock().time()), resource.getrusage(resource.RUSAGE_SELF)) ) def start_phone_stats_home(): return run_as_background_process( "phone_stats_home", phone_stats_home, hs, stats ) def generate_user_daily_visit_stats(): return run_as_background_process( "generate_user_daily_visits", hs.get_datastore().generate_user_daily_visits ) # Rather than update on per session basis, batch up the requests. # If you increase the loop period, the accuracy of user_daily_visits # table will decrease clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000) # monthly active user limiting functionality def reap_monthly_active_users(): return run_as_background_process( "reap_monthly_active_users", hs.get_datastore().reap_monthly_active_users ) clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60) reap_monthly_active_users() @defer.inlineCallbacks def generate_monthly_active_users(): current_mau_count = 0 reserved_users = () store = hs.get_datastore() if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: current_mau_count = yield store.get_monthly_active_count() reserved_users = yield store.get_registered_reserved_users() current_mau_gauge.set(float(current_mau_count)) registered_reserved_users_mau_gauge.set(float(len(reserved_users))) max_mau_gauge.set(float(hs.config.max_mau_value)) def start_generate_monthly_active_users(): return run_as_background_process( "generate_monthly_active_users", generate_monthly_active_users ) start_generate_monthly_active_users() if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000) # End of monthly active user settings if hs.config.report_stats: logger.info("Scheduling stats reporting for 3 hour intervals") clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000) # We need to defer this init for the cases that we daemonize # otherwise the process ID we get is that of the non-daemon process clock.call_later(0, performance_stats_init) # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes clock.call_later(5 * 60, start_phone_stats_home) _base.start_reactor( "synapse-homeserver", soft_file_limit=hs.config.soft_file_limit, gc_thresholds=hs.config.gc_thresholds, pid_file=hs.config.pid_file, daemonize=hs.config.daemonize, print_pidfile=hs.config.print_pidfile, logger=logger, )
def run(hs): PROFILE_SYNAPSE = False if PROFILE_SYNAPSE: def profile(func): from cProfile import Profile from threading import current_thread def profiled(*args, **kargs): profile = Profile() profile.enable() func(*args, **kargs) profile.disable() ident = current_thread().ident profile.dump_stats("/tmp/%s.%s.%i.pstat" % (hs.hostname, func.__name__, ident)) return profiled from twisted.python.threadpool import ThreadPool ThreadPool._worker = profile(ThreadPool._worker) reactor.run = profile(reactor.run) clock = hs.get_clock() start_time = clock.time() stats = {} @defer.inlineCallbacks def phone_stats_home(): logger.info("Gathering stats for reporting") now = int(hs.get_clock().time()) uptime = int(now - start_time) if uptime < 0: uptime = 0 stats["homeserver"] = hs.config.server_name stats["timestamp"] = now stats["uptime_seconds"] = uptime stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore( ).count_nonbridged_users() stats["total_nonbridged_users"] = total_nonbridged_users room_count = yield hs.get_datastore().get_room_count() stats["total_room_count"] = room_count stats["daily_active_users"] = yield hs.get_datastore( ).count_daily_users() stats["daily_active_rooms"] = yield hs.get_datastore( ).count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore( ).count_daily_messages() daily_sent_messages = yield hs.get_datastore( ).count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages logger.info("Reporting stats to matrix.org: %s" % (stats, )) try: yield hs.get_simple_http_client().put_json( "https://matrix.org/report-usage-stats/push", stats) except Exception as e: logger.warn("Error reporting stats: %s", e) if hs.config.report_stats: logger.info("Scheduling stats reporting for 3 hour intervals") clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000) # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes clock.call_later(5 * 60, phone_stats_home) def in_thread(): # Uncomment to enable tracing of log context changes. # sys.settrace(logcontext_tracer) # make sure that we run the reactor with the sentinel log context, # otherwise other PreserveLoggingContext instances will get confused # and complain when they see the logcontext arbitrarily swapping # between the sentinel and `run` logcontexts. with PreserveLoggingContext(): change_resource_limit(hs.config.soft_file_limit) if hs.config.gc_thresholds: gc.set_threshold(*hs.config.gc_thresholds) reactor.run() if hs.config.daemonize: if hs.config.print_pidfile: print(hs.config.pid_file) daemon = Daemonize( app="synapse-homeserver", pid=hs.config.pid_file, action=lambda: in_thread(), auto_close_fds=False, verbose=True, logger=logger, ) daemon.start() else: in_thread()
def setup(config_options): """ Args: config_options_options: The options passed to Synapse. Usually `sys.argv[1:]`. Returns: HomeServer """ try: config = HomeServerConfig.load_or_generate_config( "Synapse Homeserver", config_options) except ConfigError as e: sys.stderr.write("\nERROR: %s\n" % (e, )) sys.exit(1) if not config: # If a config isn't returned, and an exception isn't raised, we're just # generating config files and shouldn't try to continue. sys.exit(0) events.USE_FROZEN_DICTS = config.use_frozen_dicts hs = SynapseHomeServer( config.server_name, config=config, version_string="Synapse/" + get_version_string(synapse), ) synapse.config.logger.setup_logging(hs, config, use_worker_options=False) logger.info("Setting up server") try: hs.setup() except IncorrectDatabaseSetup as e: quit_with_error(str(e)) except UpgradeDatabaseException as e: quit_with_error("Failed to upgrade database: %s" % (e, )) hs.setup_master() @defer.inlineCallbacks def do_acme(): """ Reprovision an ACME certificate, if it's required. Returns: Deferred[bool]: Whether the cert has been updated. """ acme = hs.get_acme_handler() # Check how long the certificate is active for. cert_days_remaining = hs.config.is_disk_cert_valid( allow_self_signed=False) # We want to reprovision if cert_days_remaining is None (meaning no # certificate exists), or the days remaining number it returns # is less than our re-registration threshold. provision = False if (cert_days_remaining is None or cert_days_remaining < hs.config.acme_reprovision_threshold): provision = True if provision: yield acme.provision_certificate() return provision @defer.inlineCallbacks def reprovision_acme(): """ Provision a certificate from ACME, if required, and reload the TLS certificate if it's renewed. """ reprovisioned = yield do_acme() if reprovisioned: _base.refresh_certificate(hs) @defer.inlineCallbacks def start(): try: # Run the ACME provisioning code, if it's enabled. if hs.config.acme_enabled: acme = hs.get_acme_handler() # Start up the webservices which we will respond to ACME # challenges with, and then provision. yield acme.start_listening() yield do_acme() # Check if it needs to be reprovisioned every day. hs.get_clock().looping_call(reprovision_acme, 24 * 60 * 60 * 1000) _base.start(hs, config.listeners) hs.get_pusherpool().start() hs.get_datastore().db.updates.start_doing_background_updates() except Exception: # Print the exception and bail out. print("Error during startup:", file=sys.stderr) # this gives better tracebacks than traceback.print_exc() Failure().printTraceback(file=sys.stderr) if reactor.running: reactor.stop() sys.exit(1) reactor.callWhenRunning(start) return hs
def phone_stats_home(hs, stats, stats_process=_stats_process): logger.info("Gathering stats for reporting") now = int(hs.get_clock().time()) uptime = int(now - hs.start_time) if uptime < 0: uptime = 0 stats["homeserver"] = hs.config.server_name stats["server_context"] = hs.config.server_context stats["timestamp"] = now stats["uptime_seconds"] = uptime version = sys.version_info stats["python_version"] = "{}.{}.{}".format( version.major, version.minor, version.micro ) stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users() stats["total_nonbridged_users"] = total_nonbridged_users daily_user_type_results = yield hs.get_datastore().count_daily_user_type() for name, count in iteritems(daily_user_type_results): stats["daily_user_type_" + name] = count room_count = yield hs.get_datastore().get_room_count() stats["total_room_count"] = room_count stats["daily_active_users"] = yield hs.get_datastore().count_daily_users() stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users() stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore().count_daily_messages() r30_results = yield hs.get_datastore().count_r30_users() for name, count in iteritems(r30_results): stats["r30_users_" + name] = count daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages stats["cache_factor"] = CACHE_SIZE_FACTOR stats["event_cache_size"] = hs.config.event_cache_size # # Performance statistics # old = stats_process[0] new = (now, resource.getrusage(resource.RUSAGE_SELF)) stats_process[0] = new # Get RSS in bytes stats["memory_rss"] = new[1].ru_maxrss # Get CPU time in % of a single core, not % of all cores used_cpu_time = (new[1].ru_utime + new[1].ru_stime) - ( old[1].ru_utime + old[1].ru_stime ) if used_cpu_time == 0 or new[0] == old[0]: stats["cpu_average"] = 0 else: stats["cpu_average"] = math.floor(used_cpu_time / (new[0] - old[0]) * 100) # # Database version # stats["database_engine"] = hs.database_engine.module.__name__ stats["database_server_version"] = hs.database_engine.server_version logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)) try: yield hs.get_proxied_http_client().put_json( hs.config.report_stats_endpoint, stats ) except Exception as e: logger.warning("Error reporting stats: %s", e)
def setup(config_options): """ Args: config_options_options: The options passed to Synapse. Usually `sys.argv[1:]`. Returns: HomeServer """ try: config = HomeServerConfig.load_or_generate_config( "Synapse Homeserver", config_options, ) except ConfigError as e: sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) if not config: # If a config isn't returned, and an exception isn't raised, we're just # generating config files and shouldn't try to continue. sys.exit(0) sighup_callbacks = [] synapse.config.logger.setup_logging( config, use_worker_options=False, register_sighup=sighup_callbacks.append ) def handle_sighup(*args, **kwargs): for i in sighup_callbacks: i(*args, **kwargs) if hasattr(signal, "SIGHUP"): signal.signal(signal.SIGHUP, handle_sighup) events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection hs = SynapseHomeServer( config.server_name, db_config=config.database_config, config=config, version_string="Synapse/" + get_version_string(synapse), database_engine=database_engine, ) logger.info("Preparing database: %s...", config.database_config['name']) try: with hs.get_db_conn(run_new_connection=False) as db_conn: prepare_database(db_conn, database_engine, config=config) database_engine.on_new_connection(db_conn) hs.run_startup_checks(db_conn, database_engine) db_conn.commit() except UpgradeDatabaseException: sys.stderr.write( "\nFailed to upgrade database.\n" "Have you checked for version specific instructions in" " UPGRADES.rst?\n" ) sys.exit(1) logger.info("Database prepared in %s.", config.database_config['name']) hs.setup() def refresh_certificate(*args): """ Refresh the TLS certificates that Synapse is using by re-reading them from disk and updating the TLS context factories to use them. """ logging.info("Reloading certificate from disk...") hs.config.read_certificate_from_disk() hs.tls_server_context_factory = context_factory.ServerContextFactory(config) hs.tls_client_options_factory = context_factory.ClientTLSOptionsFactory( config ) logging.info("Certificate reloaded.") logging.info("Updating context factories...") for i in hs._listening_services: if isinstance(i.factory, TLSMemoryBIOFactory): i.factory = TLSMemoryBIOFactory( hs.tls_server_context_factory, False, i.factory.wrappedFactory ) logging.info("Context factories updated.") sighup_callbacks.append(refresh_certificate) @defer.inlineCallbacks def start(): try: # Check if the certificate is still valid. cert_days_remaining = hs.config.is_disk_cert_valid() if hs.config.acme_enabled: # If ACME is enabled, we might need to provision a certificate # before starting. acme = hs.get_acme_handler() # Start up the webservices which we will respond to ACME # challenges with. yield acme.start_listening() # We want to reprovision if cert_days_remaining is None (meaning no # certificate exists), or the days remaining number it returns # is less than our re-registration threshold. if (cert_days_remaining is None) or ( not cert_days_remaining > hs.config.acme_reprovision_threshold ): yield acme.provision_certificate() # Read the certificate from disk and build the context factories for # TLS. hs.config.read_certificate_from_disk() hs.tls_server_context_factory = context_factory.ServerContextFactory(config) hs.tls_client_options_factory = context_factory.ClientTLSOptionsFactory( config ) # It is now safe to start your Synapse. hs.start_listening() hs.get_pusherpool().start() hs.get_datastore().start_profiling() hs.get_datastore().start_doing_background_updates() except Exception as e: # If a DeferredList failed (like in listening on the ACME listener), # we need to print the subfailure explicitly. if isinstance(e, defer.FirstError): e.subFailure.printTraceback(sys.stderr) sys.exit(1) # Something else went wrong when starting. Print it and bail out. traceback.print_exc(file=sys.stderr) sys.exit(1) reactor.callWhenRunning(start) return hs
def _listener_http(self, config: HomeServerConfig, listener_config: ListenerConfig) -> Iterable[Port]: port = listener_config.port bind_addresses = listener_config.bind_addresses tls = listener_config.tls # Must exist since this is an HTTP listener. assert listener_config.http_options is not None site_tag = listener_config.http_options.tag if site_tag is None: site_tag = str(port) # We always include a health resource. resources: Dict[str, Resource] = {"/health": HealthResource()} for res in listener_config.http_options.resources: for name in res.names: if name == "openid" and "federation" in res.names: # Skip loading openid resource if federation is defined # since federation resource will include openid continue resources.update( self._configure_named_resource(name, res.compress)) additional_resources = listener_config.http_options.additional_resources logger.debug("Configuring additional resources: %r", additional_resources) module_api = self.get_module_api() for path, resmodule in additional_resources.items(): handler_cls, config = load_module( resmodule, ("listeners", site_tag, "additional_resources", "<%s>" % (path, )), ) handler = handler_cls(config, module_api) if isinstance(handler, Resource): resource = handler elif hasattr(handler, "handle_request"): resource = AdditionalResource(self, handler.handle_request) else: raise ConfigError( "additional_resource %s does not implement a known interface" % (resmodule["module"], )) resources[path] = resource # Attach additional resources registered by modules. resources.update(self._module_web_resources) self._module_web_resources_consumed = True # Try to find something useful to serve at '/': # # 1. Redirect to the web client if it is an HTTP(S) URL. # 2. Redirect to the static "Synapse is running" page. # 3. Do not redirect and use a blank resource. if self.config.server.web_client_location: root_resource: Resource = RootOptionsRedirectResource( self.config.server.web_client_location) elif STATIC_PREFIX in resources: root_resource = RootOptionsRedirectResource(STATIC_PREFIX) else: root_resource = OptionsResource() site = SynapseSite( "synapse.access.%s.%s" % ("https" if tls else "http", site_tag), site_tag, listener_config, create_resource_tree(resources, root_resource), self.version_string, max_request_body_size=max_request_body_size(self.config), reactor=self.get_reactor(), ) if tls: # refresh_certificate should have been called before this. assert self.tls_server_context_factory is not None ports = listen_ssl( bind_addresses, port, site, self.tls_server_context_factory, reactor=self.get_reactor(), ) logger.info("Synapse now listening on TCP port %d (TLS)", port) else: ports = listen_tcp( bind_addresses, port, site, reactor=self.get_reactor(), ) logger.info("Synapse now listening on TCP port %d", port) return ports
def run(hs): PROFILE_SYNAPSE = False if PROFILE_SYNAPSE: def profile(func): from cProfile import Profile from threading import current_thread def profiled(*args, **kargs): profile = Profile() profile.enable() func(*args, **kargs) profile.disable() ident = current_thread().ident profile.dump_stats("/tmp/%s.%s.%i.pstat" % (hs.hostname, func.__name__, ident)) return profiled from twisted.python.threadpool import ThreadPool ThreadPool._worker = profile(ThreadPool._worker) reactor.run = profile(reactor.run) clock = hs.get_clock() start_time = clock.time() stats = {} @defer.inlineCallbacks def phone_stats_home(): logger.info("Gathering stats for reporting") now = int(hs.get_clock().time()) uptime = int(now - start_time) if uptime < 0: uptime = 0 stats["homeserver"] = hs.config.server_name stats["timestamp"] = now stats["uptime_seconds"] = uptime stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore( ).count_nonbridged_users() stats["total_nonbridged_users"] = total_nonbridged_users room_count = yield hs.get_datastore().get_room_count() stats["total_room_count"] = room_count stats["daily_active_users"] = yield hs.get_datastore( ).count_daily_users() stats["daily_active_rooms"] = yield hs.get_datastore( ).count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore( ).count_daily_messages() daily_sent_messages = yield hs.get_datastore( ).count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages logger.info("Reporting stats to matrix.org: %s" % (stats, )) try: yield hs.get_simple_http_client().put_json( "https://matrix.org/report-usage-stats/push", stats) except Exception as e: logger.warn("Error reporting stats: %s", e) if hs.config.report_stats: logger.info("Scheduling stats reporting for 3 hour intervals") clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000) # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes clock.call_later(5 * 60, phone_stats_home) if hs.config.daemonize and hs.config.print_pidfile: print(hs.config.pid_file) _base.start_reactor( "synapse-homeserver", hs.config.soft_file_limit, hs.config.gc_thresholds, hs.config.pid_file, hs.config.daemonize, hs.config.cpu_affinity, logger, )
def setup(config_options): """ Args: config_options_options: The options passed to Synapse. Usually `sys.argv[1:]`. Returns: HomeServer """ try: config = HomeServerConfig.load_or_generate_config( "Synapse Homeserver", config_options, ) except ConfigError as e: sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) if not config: # If a config isn't returned, and an exception isn't raised, we're just # generating config files and shouldn't try to continue. sys.exit(0) synapse.config.logger.setup_logging(config, use_worker_options=False) events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) config.database_config["args"][ "cp_openfun"] = database_engine.on_new_connection hs = SynapseHomeServer( config.server_name, db_config=config.database_config, config=config, version_string="Synapse/" + get_version_string(synapse), database_engine=database_engine, ) logger.info("Preparing database: %s...", config.database_config['name']) try: with hs.get_db_conn(run_new_connection=False) as db_conn: prepare_database(db_conn, database_engine, config=config) database_engine.on_new_connection(db_conn) hs.run_startup_checks(db_conn, database_engine) db_conn.commit() except UpgradeDatabaseException: sys.stderr.write( "\nFailed to upgrade database.\n" "Have you checked for version specific instructions in" " UPGRADES.rst?\n") sys.exit(1) logger.info("Database prepared in %s.", config.database_config['name']) hs.setup() @defer.inlineCallbacks def do_acme(): """ Reprovision an ACME certificate, if it's required. Returns: Deferred[bool]: Whether the cert has been updated. """ acme = hs.get_acme_handler() # Check how long the certificate is active for. cert_days_remaining = hs.config.is_disk_cert_valid( allow_self_signed=False) # We want to reprovision if cert_days_remaining is None (meaning no # certificate exists), or the days remaining number it returns # is less than our re-registration threshold. provision = False if (cert_days_remaining is None or cert_days_remaining < hs.config.acme_reprovision_threshold): provision = True if provision: yield acme.provision_certificate() defer.returnValue(provision) @defer.inlineCallbacks def reprovision_acme(): """ Provision a certificate from ACME, if required, and reload the TLS certificate if it's renewed. """ reprovisioned = yield do_acme() if reprovisioned: _base.refresh_certificate(hs) @defer.inlineCallbacks def start(): try: # Run the ACME provisioning code, if it's enabled. if hs.config.acme_enabled: acme = hs.get_acme_handler() # Start up the webservices which we will respond to ACME # challenges with, and then provision. yield acme.start_listening() yield do_acme() # Check if it needs to be reprovisioned every day. hs.get_clock().looping_call(reprovision_acme, 24 * 60 * 60 * 1000) _base.start(hs, config.listeners) hs.get_pusherpool().start() hs.get_datastore().start_doing_background_updates() except Exception: # Print the exception and bail out. print("Error during startup:", file=sys.stderr) # this gives better tracebacks than traceback.print_exc() Failure().printTraceback(file=sys.stderr) if reactor.running: reactor.stop() sys.exit(1) reactor.callWhenRunning(start) return hs
def setup(config_options): """ Args: config_options_options: The options passed to Synapse. Usually `sys.argv[1:]`. Returns: HomeServer """ try: config = HomeServerConfig.load_or_generate_config( "Synapse Homeserver", config_options) except ConfigError as e: sys.stderr.write("\n") for f in format_config_error(e): sys.stderr.write(f) sys.stderr.write("\n") sys.exit(1) if not config: # If a config isn't returned, and an exception isn't raised, we're just # generating config files and shouldn't try to continue. sys.exit(0) events.USE_FROZEN_DICTS = config.use_frozen_dicts hs = SynapseHomeServer( config.server_name, config=config, version_string="Synapse/" + get_version_string(synapse), ) synapse.config.logger.setup_logging(hs, config, use_worker_options=False) logger.info("Setting up server") try: hs.setup() except IncorrectDatabaseSetup as e: quit_with_error(str(e)) except UpgradeDatabaseException as e: quit_with_error("Failed to upgrade database: %s" % (e, )) async def do_acme() -> bool: """ Reprovision an ACME certificate, if it's required. Returns: Whether the cert has been updated. """ acme = hs.get_acme_handler() # Check how long the certificate is active for. cert_days_remaining = hs.config.is_disk_cert_valid( allow_self_signed=False) # We want to reprovision if cert_days_remaining is None (meaning no # certificate exists), or the days remaining number it returns # is less than our re-registration threshold. provision = False if (cert_days_remaining is None or cert_days_remaining < hs.config.acme_reprovision_threshold): provision = True if provision: await acme.provision_certificate() return provision async def reprovision_acme(): """ Provision a certificate from ACME, if required, and reload the TLS certificate if it's renewed. """ reprovisioned = await do_acme() if reprovisioned: _base.refresh_certificate(hs) async def start(): # Run the ACME provisioning code, if it's enabled. if hs.config.acme_enabled: acme = hs.get_acme_handler() # Start up the webservices which we will respond to ACME # challenges with, and then provision. await acme.start_listening() await do_acme() # Check if it needs to be reprovisioned every day. hs.get_clock().looping_call(reprovision_acme, 24 * 60 * 60 * 1000) # Load the OIDC provider metadatas, if OIDC is enabled. if hs.config.oidc_enabled: oidc = hs.get_oidc_handler() # Loading the provider metadata also ensures the provider config is valid. await oidc.load_metadata() await _base.start(hs, config.listeners) hs.get_datastore().db_pool.updates.start_doing_background_updates() register_start(start) return hs
def run(hs): PROFILE_SYNAPSE = False if PROFILE_SYNAPSE: def profile(func): from cProfile import Profile from threading import current_thread def profiled(*args, **kargs): profile = Profile() profile.enable() func(*args, **kargs) profile.disable() ident = current_thread().ident profile.dump_stats("/tmp/%s.%s.%i.pstat" % ( hs.hostname, func.__name__, ident )) return profiled from twisted.python.threadpool import ThreadPool ThreadPool._worker = profile(ThreadPool._worker) reactor.run = profile(reactor.run) clock = hs.get_clock() start_time = clock.time() stats = {} # Contains the list of processes we will be monitoring # currently either 0 or 1 stats_process = [] def start_phone_stats_home(): return run_as_background_process("phone_stats_home", phone_stats_home) @defer.inlineCallbacks def phone_stats_home(): logger.info("Gathering stats for reporting") now = int(hs.get_clock().time()) uptime = int(now - start_time) if uptime < 0: uptime = 0 stats["homeserver"] = hs.config.server_name stats["timestamp"] = now stats["uptime_seconds"] = uptime version = sys.version_info stats["python_version"] = "{}.{}.{}".format( version.major, version.minor, version.micro ) stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users() stats["total_nonbridged_users"] = total_nonbridged_users daily_user_type_results = yield hs.get_datastore().count_daily_user_type() for name, count in iteritems(daily_user_type_results): stats["daily_user_type_" + name] = count room_count = yield hs.get_datastore().get_room_count() stats["total_room_count"] = room_count stats["daily_active_users"] = yield hs.get_datastore().count_daily_users() stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore().count_daily_messages() r30_results = yield hs.get_datastore().count_r30_users() for name, count in iteritems(r30_results): stats["r30_users_" + name] = count daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages stats["cache_factor"] = CACHE_SIZE_FACTOR stats["event_cache_size"] = hs.config.event_cache_size if len(stats_process) > 0: stats["memory_rss"] = 0 stats["cpu_average"] = 0 for process in stats_process: stats["memory_rss"] += process.memory_info().rss stats["cpu_average"] += int(process.cpu_percent(interval=None)) logger.info("Reporting stats to matrix.org: %s" % (stats,)) try: yield hs.get_simple_http_client().put_json( "https://matrix.org/report-usage-stats/push", stats ) except Exception as e: logger.warn("Error reporting stats: %s", e) def performance_stats_init(): try: process = psutil.Process() # Ensure we can fetch both, and make the initial request for cpu_percent # so the next request will use this as the initial point. process.memory_info().rss process.cpu_percent(interval=None) logger.info("report_stats can use psutil") stats_process.append(process) except (AttributeError): logger.warning( "Unable to read memory/cpu stats. Disabling reporting." ) def generate_user_daily_visit_stats(): return run_as_background_process( "generate_user_daily_visits", hs.get_datastore().generate_user_daily_visits, ) # Rather than update on per session basis, batch up the requests. # If you increase the loop period, the accuracy of user_daily_visits # table will decrease clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000) # monthly active user limiting functionality def reap_monthly_active_users(): return run_as_background_process( "reap_monthly_active_users", hs.get_datastore().reap_monthly_active_users, ) clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60) reap_monthly_active_users() @defer.inlineCallbacks def generate_monthly_active_users(): current_mau_count = 0 reserved_count = 0 store = hs.get_datastore() if hs.config.limit_usage_by_mau: current_mau_count = yield store.get_monthly_active_count() reserved_count = yield store.get_registered_reserved_users_count() current_mau_gauge.set(float(current_mau_count)) registered_reserved_users_mau_gauge.set(float(reserved_count)) max_mau_gauge.set(float(hs.config.max_mau_value)) def start_generate_monthly_active_users(): return run_as_background_process( "generate_monthly_active_users", generate_monthly_active_users, ) start_generate_monthly_active_users() if hs.config.limit_usage_by_mau: clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000) # End of monthly active user settings if hs.config.report_stats: logger.info("Scheduling stats reporting for 3 hour intervals") clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000) # We need to defer this init for the cases that we daemonize # otherwise the process ID we get is that of the non-daemon process clock.call_later(0, performance_stats_init) # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes clock.call_later(5 * 60, start_phone_stats_home) if hs.config.daemonize and hs.config.print_pidfile: print(hs.config.pid_file) _base.start_reactor( "synapse-homeserver", hs.config.soft_file_limit, hs.config.gc_thresholds, hs.config.pid_file, hs.config.daemonize, hs.config.cpu_affinity, logger, )
def setup(config_options): """ Args: config_options_options: The options passed to Synapse. Usually `sys.argv[1:]`. Returns: HomeServer """ try: config = HomeServerConfig.load_or_generate_config( "Synapse Homeserver", config_options, ) except ConfigError as e: sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) if not config: # If a config isn't returned, and an exception isn't raised, we're just # generating config files and shouldn't try to continue. sys.exit(0) synapse.config.logger.setup_logging(config, use_worker_options=False) # check any extra requirements we have now we have a config check_requirements(config) events.USE_FROZEN_DICTS = config.use_frozen_dicts tls_server_context_factory = context_factory.ServerContextFactory(config) tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config) database_engine = create_engine(config.database_config) config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection hs = SynapseHomeServer( config.server_name, db_config=config.database_config, tls_server_context_factory=tls_server_context_factory, tls_client_options_factory=tls_client_options_factory, config=config, version_string="Synapse/" + get_version_string(synapse), database_engine=database_engine, ) logger.info("Preparing database: %s...", config.database_config['name']) try: with hs.get_db_conn(run_new_connection=False) as db_conn: prepare_database(db_conn, database_engine, config=config) database_engine.on_new_connection(db_conn) hs.run_startup_checks(db_conn, database_engine) db_conn.commit() except UpgradeDatabaseException: sys.stderr.write( "\nFailed to upgrade database.\n" "Have you checked for version specific instructions in" " UPGRADES.rst?\n" ) sys.exit(1) logger.info("Database prepared in %s.", config.database_config['name']) hs.setup() hs.start_listening() def start(): hs.get_pusherpool().start() hs.get_datastore().start_profiling() hs.get_datastore().start_doing_background_updates() reactor.callWhenRunning(start) return hs
def setup(config_options): """ Args: config_options_options: The options passed to Synapse. Usually `sys.argv[1:]`. Returns: HomeServer """ try: config = HomeServerConfig.load_or_generate_config( "Synapse Homeserver", config_options, ) except ConfigError as e: sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) if not config: # If a config isn't returned, and an exception isn't raised, we're just # generating config files and shouldn't try to continue. sys.exit(0) synapse.config.logger.setup_logging( config, use_worker_options=False ) events.USE_FROZEN_DICTS = config.use_frozen_dicts database_engine = create_engine(config.database_config) config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection hs = SynapseHomeServer( config.server_name, db_config=config.database_config, config=config, version_string="Synapse/" + get_version_string(synapse), database_engine=database_engine, ) logger.info("Preparing database: %s...", config.database_config['name']) try: with hs.get_db_conn(run_new_connection=False) as db_conn: prepare_database(db_conn, database_engine, config=config) database_engine.on_new_connection(db_conn) hs.run_startup_checks(db_conn, database_engine) db_conn.commit() except UpgradeDatabaseException: sys.stderr.write( "\nFailed to upgrade database.\n" "Have you checked for version specific instructions in" " UPGRADES.rst?\n" ) sys.exit(1) logger.info("Database prepared in %s.", config.database_config['name']) hs.setup() hs.setup_master() @defer.inlineCallbacks def do_acme(): """ Reprovision an ACME certificate, if it's required. Returns: Deferred[bool]: Whether the cert has been updated. """ acme = hs.get_acme_handler() # Check how long the certificate is active for. cert_days_remaining = hs.config.is_disk_cert_valid( allow_self_signed=False ) # We want to reprovision if cert_days_remaining is None (meaning no # certificate exists), or the days remaining number it returns # is less than our re-registration threshold. provision = False if ( cert_days_remaining is None or cert_days_remaining < hs.config.acme_reprovision_threshold ): provision = True if provision: yield acme.provision_certificate() defer.returnValue(provision) @defer.inlineCallbacks def reprovision_acme(): """ Provision a certificate from ACME, if required, and reload the TLS certificate if it's renewed. """ reprovisioned = yield do_acme() if reprovisioned: _base.refresh_certificate(hs) @defer.inlineCallbacks def start(): try: # Run the ACME provisioning code, if it's enabled. if hs.config.acme_enabled: acme = hs.get_acme_handler() # Start up the webservices which we will respond to ACME # challenges with, and then provision. yield acme.start_listening() yield do_acme() # Check if it needs to be reprovisioned every day. hs.get_clock().looping_call( reprovision_acme, 24 * 60 * 60 * 1000 ) _base.start(hs, config.listeners) hs.get_pusherpool().start() hs.get_datastore().start_doing_background_updates() except Exception: # Print the exception and bail out. print("Error during startup:", file=sys.stderr) # this gives better tracebacks than traceback.print_exc() Failure().printTraceback(file=sys.stderr) if reactor.running: reactor.stop() sys.exit(1) reactor.callWhenRunning(start) return hs
def _listener_http(self, config, listener_config): port = listener_config["port"] bind_addresses = listener_config["bind_addresses"] tls = listener_config.get("tls", False) site_tag = listener_config.get("tag", port) resources = {} for res in listener_config["resources"]: for name in res["names"]: if name == "openid" and "federation" in res["names"]: # Skip loading openid resource if federation is defined # since federation resource will include openid continue resources.update(self._configure_named_resource( name, res.get("compress", False), )) additional_resources = listener_config.get("additional_resources", {}) logger.debug("Configuring additional resources: %r", additional_resources) module_api = ModuleApi(self, self.get_auth_handler()) for path, resmodule in additional_resources.items(): handler_cls, config = load_module(resmodule) handler = handler_cls(config, module_api) resources[path] = AdditionalResource(self, handler.handle_request) # try to find something useful to redirect '/' to if WEB_CLIENT_PREFIX in resources: root_resource = RootRedirect(WEB_CLIENT_PREFIX) elif STATIC_PREFIX in resources: root_resource = RootRedirect(STATIC_PREFIX) else: root_resource = NoResource() root_resource = create_resource_tree(resources, root_resource) if tls: ports = listen_ssl( bind_addresses, port, SynapseSite( "synapse.access.https.%s" % (site_tag,), site_tag, listener_config, root_resource, self.version_string, ), self.tls_server_context_factory, reactor=self.get_reactor(), ) logger.info("Synapse now listening on TCP port %d (TLS)", port) else: ports = listen_tcp( bind_addresses, port, SynapseSite( "synapse.access.http.%s" % (site_tag,), site_tag, listener_config, root_resource, self.version_string, ), reactor=self.get_reactor(), ) logger.info("Synapse now listening on TCP port %d", port) return ports
def _listener_http(self, config, listener_config): port = listener_config["port"] bind_addresses = listener_config["bind_addresses"] tls = listener_config.get("tls", False) site_tag = listener_config.get("tag", port) if tls and config.no_tls: return resources = {} for res in listener_config["resources"]: for name in res["names"]: if name == "client": client_resource = ClientRestResource(self) if res["compress"]: client_resource = gz_wrap(client_resource) resources.update({ "/_matrix/client/api/v1": client_resource, "/_matrix/client/r0": client_resource, "/_matrix/client/unstable": client_resource, "/_matrix/client/v2_alpha": client_resource, "/_matrix/client/versions": client_resource, }) if name == "federation": resources.update({ FEDERATION_PREFIX: TransportLayerServer(self), }) if name in ["static", "client"]: resources.update({ STATIC_PREFIX: File( os.path.join(os.path.dirname(synapse.__file__), "static")), }) if name in ["media", "federation", "client"]: media_repo = MediaRepositoryResource(self) resources.update({ MEDIA_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo, CONTENT_REPO_PREFIX: ContentRepoResource(self, self.config.uploads_path), }) if name in ["keys", "federation"]: resources.update({ SERVER_KEY_PREFIX: LocalKey(self), SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self), }) if name == "webclient": resources[ WEB_CLIENT_PREFIX] = build_resource_for_web_client( self) if name == "metrics" and self.get_config().enable_metrics: resources[METRICS_PREFIX] = MetricsResource(self) if WEB_CLIENT_PREFIX in resources: root_resource = RootRedirect(WEB_CLIENT_PREFIX) else: root_resource = Resource() root_resource = create_resource_tree(resources, root_resource) if tls: for address in bind_addresses: reactor.listenSSL(port, SynapseSite( "synapse.access.https.%s" % (site_tag, ), site_tag, listener_config, root_resource, ), self.tls_server_context_factory, interface=address) else: for address in bind_addresses: reactor.listenTCP(port, SynapseSite( "synapse.access.http.%s" % (site_tag, ), site_tag, listener_config, root_resource, ), interface=address) logger.info("Synapse now listening on port %d", port)
def run(hs): PROFILE_SYNAPSE = False if PROFILE_SYNAPSE: def profile(func): from cProfile import Profile from threading import current_thread def profiled(*args, **kargs): profile = Profile() profile.enable() func(*args, **kargs) profile.disable() ident = current_thread().ident profile.dump_stats("/tmp/%s.%s.%i.pstat" % (hs.hostname, func.__name__, ident)) return profiled from twisted.python.threadpool import ThreadPool ThreadPool._worker = profile(ThreadPool._worker) reactor.run = profile(reactor.run) start_time = hs.get_clock().time() stats = {} @defer.inlineCallbacks def phone_stats_home(): logger.info("Gathering stats for reporting") now = int(hs.get_clock().time()) uptime = int(now - start_time) if uptime < 0: uptime = 0 # If the stats directory is empty then this is the first time we've # reported stats. first_time = not stats stats["homeserver"] = hs.config.server_name stats["timestamp"] = now stats["uptime_seconds"] = uptime stats["total_users"] = yield hs.get_datastore().count_all_users() room_count = yield hs.get_datastore().get_room_count() stats["total_room_count"] = room_count stats["daily_active_users"] = yield hs.get_datastore( ).count_daily_users() daily_messages = yield hs.get_datastore().count_daily_messages() if daily_messages is not None: stats["daily_messages"] = daily_messages else: stats.pop("daily_messages", None) if first_time: # Add callbacks to report the synapse stats as metrics whenever # prometheus requests them, typically every 30s. # As some of the stats are expensive to calculate we only update # them when synapse phones home to matrix.org every 24 hours. metrics = get_metrics_for("synapse.usage") metrics.add_callback("timestamp", lambda: stats["timestamp"]) metrics.add_callback("uptime_seconds", lambda: stats["uptime_seconds"]) metrics.add_callback("total_users", lambda: stats["total_users"]) metrics.add_callback("total_room_count", lambda: stats["total_room_count"]) metrics.add_callback("daily_active_users", lambda: stats["daily_active_users"]) metrics.add_callback("daily_messages", lambda: stats.get("daily_messages", 0)) logger.info("Reporting stats to matrix.org: %s" % (stats, )) try: yield hs.get_simple_http_client().put_json( "https://matrix.org/report-usage-stats/push", stats) except Exception as e: logger.warn("Error reporting stats: %s", e) if hs.config.report_stats: phone_home_task = task.LoopingCall(phone_stats_home) logger.info("Scheduling stats reporting for 24 hour intervals") phone_home_task.start(60 * 60 * 24, now=False) def in_thread(): # Uncomment to enable tracing of log context changes. # sys.settrace(logcontext_tracer) # make sure that we run the reactor with the sentinel log context, # otherwise other PreserveLoggingContext instances will get confused # and complain when they see the logcontext arbitrarily swapping # between the sentinel and `run` logcontexts. with PreserveLoggingContext(): change_resource_limit(hs.config.soft_file_limit) if hs.config.gc_thresholds: gc.set_threshold(*hs.config.gc_thresholds) reactor.run() if hs.config.daemonize: if hs.config.print_pidfile: print(hs.config.pid_file) daemon = Daemonize( app="synapse-homeserver", pid=hs.config.pid_file, action=lambda: in_thread(), auto_close_fds=False, verbose=True, logger=logger, ) daemon.start() else: in_thread()
def run(hs): PROFILE_SYNAPSE = False if PROFILE_SYNAPSE: def profile(func): from cProfile import Profile from threading import current_thread def profiled(*args, **kargs): profile = Profile() profile.enable() func(*args, **kargs) profile.disable() ident = current_thread().ident profile.dump_stats("/tmp/%s.%s.%i.pstat" % (hs.hostname, func.__name__, ident)) return profiled from twisted.python.threadpool import ThreadPool ThreadPool._worker = profile(ThreadPool._worker) reactor.run = profile(reactor.run) clock = hs.get_clock() start_time = clock.time() stats = {} # Contains the list of processes we will be monitoring # currently either 0 or 1 stats_process = [] def start_phone_stats_home(): return run_as_background_process("phone_stats_home", phone_stats_home) @defer.inlineCallbacks def phone_stats_home(): logger.info("Gathering stats for reporting") now = int(hs.get_clock().time()) uptime = int(now - start_time) if uptime < 0: uptime = 0 stats["homeserver"] = hs.config.server_name stats["timestamp"] = now stats["uptime_seconds"] = uptime version = sys.version_info stats["python_version"] = "{}.{}.{}".format(version.major, version.minor, version.micro) stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore( ).count_nonbridged_users() stats["total_nonbridged_users"] = total_nonbridged_users daily_user_type_results = yield hs.get_datastore( ).count_daily_user_type() for name, count in iteritems(daily_user_type_results): stats["daily_user_type_" + name] = count room_count = yield hs.get_datastore().get_room_count() stats["total_room_count"] = room_count stats["daily_active_users"] = yield hs.get_datastore( ).count_daily_users() stats["daily_active_rooms"] = yield hs.get_datastore( ).count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore( ).count_daily_messages() r30_results = yield hs.get_datastore().count_r30_users() for name, count in iteritems(r30_results): stats["r30_users_" + name] = count daily_sent_messages = yield hs.get_datastore( ).count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages stats["cache_factor"] = CACHE_SIZE_FACTOR stats["event_cache_size"] = hs.config.event_cache_size if len(stats_process) > 0: stats["memory_rss"] = 0 stats["cpu_average"] = 0 for process in stats_process: stats["memory_rss"] += process.memory_info().rss stats["cpu_average"] += int(process.cpu_percent(interval=None)) stats["database_engine"] = hs.get_datastore().database_engine_name stats["database_server_version"] = hs.get_datastore( ).get_server_version() logger.info("Reporting stats to matrix.org: %s" % (stats, )) try: yield hs.get_simple_http_client().put_json( "https://matrix.org/report-usage-stats/push", stats) except Exception as e: logger.warn("Error reporting stats: %s", e) def performance_stats_init(): try: process = psutil.Process() # Ensure we can fetch both, and make the initial request for cpu_percent # so the next request will use this as the initial point. process.memory_info().rss process.cpu_percent(interval=None) logger.info("report_stats can use psutil") stats_process.append(process) except (AttributeError): logger.warning( "Unable to read memory/cpu stats. Disabling reporting.") def generate_user_daily_visit_stats(): return run_as_background_process( "generate_user_daily_visits", hs.get_datastore().generate_user_daily_visits, ) # Rather than update on per session basis, batch up the requests. # If you increase the loop period, the accuracy of user_daily_visits # table will decrease clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000) # monthly active user limiting functionality def reap_monthly_active_users(): return run_as_background_process( "reap_monthly_active_users", hs.get_datastore().reap_monthly_active_users, ) clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60) reap_monthly_active_users() @defer.inlineCallbacks def generate_monthly_active_users(): current_mau_count = 0 reserved_count = 0 store = hs.get_datastore() if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: current_mau_count = yield store.get_monthly_active_count() reserved_count = yield store.get_registered_reserved_users_count() current_mau_gauge.set(float(current_mau_count)) registered_reserved_users_mau_gauge.set(float(reserved_count)) max_mau_gauge.set(float(hs.config.max_mau_value)) def start_generate_monthly_active_users(): return run_as_background_process( "generate_monthly_active_users", generate_monthly_active_users, ) start_generate_monthly_active_users() if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000) # End of monthly active user settings if hs.config.report_stats: logger.info("Scheduling stats reporting for 3 hour intervals") clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000) # We need to defer this init for the cases that we daemonize # otherwise the process ID we get is that of the non-daemon process clock.call_later(0, performance_stats_init) # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes clock.call_later(5 * 60, start_phone_stats_home) if hs.config.daemonize and hs.config.print_pidfile: print(hs.config.pid_file) _base.start_reactor( "synapse-homeserver", hs.config.soft_file_limit, hs.config.gc_thresholds, hs.config.pid_file, hs.config.daemonize, hs.config.cpu_affinity, logger, )
def run(hs): PROFILE_SYNAPSE = False if PROFILE_SYNAPSE: def profile(func): from cProfile import Profile from threading import current_thread def profiled(*args, **kargs): profile = Profile() profile.enable() func(*args, **kargs) profile.disable() ident = current_thread().ident profile.dump_stats("/tmp/%s.%s.%i.pstat" % (hs.hostname, func.__name__, ident)) return profiled from twisted.python.threadpool import ThreadPool ThreadPool._worker = profile(ThreadPool._worker) reactor.run = profile(reactor.run) clock = hs.get_clock() start_time = clock.time() stats = {} # Contains the list of processes we will be monitoring # currently either 0 or 1 stats_process = [] @defer.inlineCallbacks def phone_stats_home(): logger.info("Gathering stats for reporting") now = int(hs.get_clock().time()) uptime = int(now - start_time) if uptime < 0: uptime = 0 stats["homeserver"] = hs.config.server_name stats["timestamp"] = now stats["uptime_seconds"] = uptime stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore( ).count_nonbridged_users() stats["total_nonbridged_users"] = total_nonbridged_users room_count = yield hs.get_datastore().get_room_count() stats["total_room_count"] = room_count stats["daily_active_users"] = yield hs.get_datastore( ).count_daily_users() stats["daily_active_rooms"] = yield hs.get_datastore( ).count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore( ).count_daily_messages() r30_results = yield hs.get_datastore().count_r30_users() for name, count in r30_results.iteritems(): stats["r30_users_" + name] = count daily_sent_messages = yield hs.get_datastore( ).count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages stats["cache_factor"] = CACHE_SIZE_FACTOR stats["event_cache_size"] = hs.config.event_cache_size if len(stats_process) > 0: stats["memory_rss"] = 0 stats["cpu_average"] = 0 for process in stats_process: stats["memory_rss"] += process.memory_info().rss stats["cpu_average"] += int(process.cpu_percent(interval=None)) logger.info("Reporting stats to matrix.org: %s" % (stats, )) try: yield hs.get_simple_http_client().put_json( "https://matrix.org/report-usage-stats/push", stats) except Exception as e: logger.warn("Error reporting stats: %s", e) def performance_stats_init(): try: import psutil process = psutil.Process() # Ensure we can fetch both, and make the initial request for cpu_percent # so the next request will use this as the initial point. process.memory_info().rss process.cpu_percent(interval=None) logger.info("report_stats can use psutil") stats_process.append(process) except (ImportError, AttributeError): logger.warn( "report_stats enabled but psutil is not installed or incorrect version." " Disabling reporting of memory/cpu stats." " Ensuring psutil is available will help matrix.org track performance" " changes across releases.") if hs.config.report_stats: logger.info("Scheduling stats reporting for 3 hour intervals") clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000) # We need to defer this init for the cases that we daemonize # otherwise the process ID we get is that of the non-daemon process clock.call_later(0, performance_stats_init) # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes clock.call_later(5 * 60, phone_stats_home) if hs.config.daemonize and hs.config.print_pidfile: print(hs.config.pid_file) _base.start_reactor( "synapse-homeserver", hs.config.soft_file_limit, hs.config.gc_thresholds, hs.config.pid_file, hs.config.daemonize, hs.config.cpu_affinity, logger, )
def _listener_http(self, config, listener_config): port = listener_config["port"] bind_addresses = listener_config["bind_addresses"] tls = listener_config.get("tls", False) site_tag = listener_config.get("tag", port) resources = {} for res in listener_config["resources"]: for name in res["names"]: if name == "openid" and "federation" in res["names"]: # Skip loading openid resource if federation is defined # since federation resource will include openid continue resources.update( self._configure_named_resource( name, res.get("compress", False), )) additional_resources = listener_config.get("additional_resources", {}) logger.debug("Configuring additional resources: %r", additional_resources) module_api = ModuleApi(self, self.get_auth_handler()) for path, resmodule in additional_resources.items(): handler_cls, config = load_module(resmodule) handler = handler_cls(config, module_api) resources[path] = AdditionalResource(self, handler.handle_request) # try to find something useful to redirect '/' to if WEB_CLIENT_PREFIX in resources: root_resource = RootRedirect(WEB_CLIENT_PREFIX) elif STATIC_PREFIX in resources: root_resource = RootRedirect(STATIC_PREFIX) else: root_resource = NoResource() root_resource = create_resource_tree(resources, root_resource) if tls: ports = listen_ssl( bind_addresses, port, SynapseSite( "synapse.access.https.%s" % (site_tag, ), site_tag, listener_config, root_resource, self.version_string, ), self.tls_server_context_factory, reactor=self.get_reactor(), ) logger.info("Synapse now listening on TCP port %d (TLS)", port) else: ports = listen_tcp( bind_addresses, port, SynapseSite( "synapse.access.http.%s" % (site_tag, ), site_tag, listener_config, root_resource, self.version_string, ), reactor=self.get_reactor(), ) logger.info("Synapse now listening on TCP port %d", port) return ports
def setup(config_options: List[str]) -> SynapseHomeServer: """ Args: config_options_options: The options passed to Synapse. Usually `sys.argv[1:]`. Returns: A homeserver instance. """ try: config = HomeServerConfig.load_or_generate_config( "Synapse Homeserver", config_options) except ConfigError as e: sys.stderr.write("\n") for f in format_config_error(e): sys.stderr.write(f) sys.stderr.write("\n") sys.exit(1) if not config: # If a config isn't returned, and an exception isn't raised, we're just # generating config files and shouldn't try to continue. sys.exit(0) if config.worker.worker_app: raise ConfigError( "You have specified `worker_app` in the config but are attempting to start a non-worker " "instance. Please use `python -m synapse.app.generic_worker` instead (or remove the option if this is the main process)." ) sys.exit(1) events.USE_FROZEN_DICTS = config.server.use_frozen_dicts synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage if config.server.gc_seconds: synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds if (config.registration.enable_registration and not config.registration.enable_registration_without_verification): if (not config.captcha.enable_registration_captcha and not config.registration.registrations_require_3pid and not config.registration.registration_requires_token): raise ConfigError( "You have enabled open registration without any verification. This is a known vector for " "spam and abuse. If you would like to allow public registration, please consider adding email, " "captcha, or token-based verification. Otherwise this check can be removed by setting the " "`enable_registration_without_verification` config option to `true`." ) hs = SynapseHomeServer( config.server.server_name, config=config, version_string="Synapse/" + get_distribution_version_string("matrix-synapse"), ) synapse.config.logger.setup_logging(hs, config, use_worker_options=False) logger.info("Setting up server") try: hs.setup() except Exception as e: handle_startup_exception(e) async def start() -> None: # Load the OIDC provider metadatas, if OIDC is enabled. if hs.config.oidc.oidc_enabled: oidc = hs.get_oidc_handler() # Loading the provider metadata also ensures the provider config is valid. await oidc.load_metadata() await _base.start(hs) hs.get_datastores( ).main.db_pool.updates.start_doing_background_updates() register_start(start) return hs
def _listener_http(self, config: HomeServerConfig, listener_config: ListenerConfig): port = listener_config.port bind_addresses = listener_config.bind_addresses tls = listener_config.tls site_tag = listener_config.http_options.tag if site_tag is None: site_tag = str(port) # We always include a health resource. resources = {"/health": HealthResource()} for res in listener_config.http_options.resources: for name in res.names: if name == "openid" and "federation" in res.names: # Skip loading openid resource if federation is defined # since federation resource will include openid continue resources.update( self._configure_named_resource(name, res.compress)) additional_resources = listener_config.http_options.additional_resources logger.debug("Configuring additional resources: %r", additional_resources) module_api = self.get_module_api() for path, resmodule in additional_resources.items(): handler_cls, config = load_module( resmodule, ("listeners", site_tag, "additional_resources", "<%s>" % (path, )), ) handler = handler_cls(config, module_api) if IResource.providedBy(handler): resource = handler elif hasattr(handler, "handle_request"): resource = AdditionalResource(self, handler.handle_request) else: raise ConfigError( "additional_resource %s does not implement a known interface" % (resmodule["module"], )) resources[path] = resource # try to find something useful to redirect '/' to if WEB_CLIENT_PREFIX in resources: root_resource = RootOptionsRedirectResource(WEB_CLIENT_PREFIX) elif STATIC_PREFIX in resources: root_resource = RootOptionsRedirectResource(STATIC_PREFIX) else: root_resource = OptionsResource() root_resource = create_resource_tree(resources, root_resource) if tls: ports = listen_ssl( bind_addresses, port, SynapseSite( "synapse.access.https.%s" % (site_tag, ), site_tag, listener_config, root_resource, self.version_string, ), self.tls_server_context_factory, reactor=self.get_reactor(), ) logger.info("Synapse now listening on TCP port %d (TLS)", port) else: ports = listen_tcp( bind_addresses, port, SynapseSite( "synapse.access.http.%s" % (site_tag, ), site_tag, listener_config, root_resource, self.version_string, ), reactor=self.get_reactor(), ) logger.info("Synapse now listening on TCP port %d", port) return ports
def run(hs): PROFILE_SYNAPSE = False if PROFILE_SYNAPSE: def profile(func): from cProfile import Profile from threading import current_thread def profiled(*args, **kargs): profile = Profile() profile.enable() func(*args, **kargs) profile.disable() ident = current_thread().ident profile.dump_stats("/tmp/%s.%s.%i.pstat" % ( hs.hostname, func.__name__, ident )) return profiled from twisted.python.threadpool import ThreadPool ThreadPool._worker = profile(ThreadPool._worker) reactor.run = profile(reactor.run) clock = hs.get_clock() start_time = clock.time() stats = {} # Contains the list of processes we will be monitoring # currently either 0 or 1 stats_process = [] @defer.inlineCallbacks def phone_stats_home(): logger.info("Gathering stats for reporting") now = int(hs.get_clock().time()) uptime = int(now - start_time) if uptime < 0: uptime = 0 stats["homeserver"] = hs.config.server_name stats["timestamp"] = now stats["uptime_seconds"] = uptime stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users() stats["total_nonbridged_users"] = total_nonbridged_users room_count = yield hs.get_datastore().get_room_count() stats["total_room_count"] = room_count stats["daily_active_users"] = yield hs.get_datastore().count_daily_users() stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore().count_daily_messages() r30_results = yield hs.get_datastore().count_r30_users() for name, count in r30_results.iteritems(): stats["r30_users_" + name] = count daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages stats["cache_factor"] = CACHE_SIZE_FACTOR stats["event_cache_size"] = hs.config.event_cache_size if len(stats_process) > 0: stats["memory_rss"] = 0 stats["cpu_average"] = 0 for process in stats_process: stats["memory_rss"] += process.memory_info().rss stats["cpu_average"] += int(process.cpu_percent(interval=None)) logger.info("Reporting stats to matrix.org: %s" % (stats,)) try: yield hs.get_simple_http_client().put_json( "https://matrix.org/report-usage-stats/push", stats ) except Exception as e: logger.warn("Error reporting stats: %s", e) def performance_stats_init(): try: import psutil process = psutil.Process() # Ensure we can fetch both, and make the initial request for cpu_percent # so the next request will use this as the initial point. process.memory_info().rss process.cpu_percent(interval=None) logger.info("report_stats can use psutil") stats_process.append(process) except (ImportError, AttributeError): logger.warn( "report_stats enabled but psutil is not installed or incorrect version." " Disabling reporting of memory/cpu stats." " Ensuring psutil is available will help matrix.org track performance" " changes across releases." ) if hs.config.report_stats: logger.info("Scheduling stats reporting for 3 hour intervals") clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000) # We need to defer this init for the cases that we daemonize # otherwise the process ID we get is that of the non-daemon process clock.call_later(0, performance_stats_init) # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes clock.call_later(5 * 60, phone_stats_home) if hs.config.daemonize and hs.config.print_pidfile: print (hs.config.pid_file) _base.start_reactor( "synapse-homeserver", hs.config.soft_file_limit, hs.config.gc_thresholds, hs.config.pid_file, hs.config.daemonize, hs.config.cpu_affinity, logger, )