def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.enable_metrics = config.get("enable_metrics", False) self.report_stats = config.get("report_stats", None) self.report_stats_endpoint = config.get( "report_stats_endpoint", "https://matrix.org/report-usage-stats/push" ) self.metrics_port = config.get("metrics_port") self.metrics_bind_host = config.get("metrics_bind_host", "127.0.0.1") if self.enable_metrics: _metrics_config = config.get("metrics_flags") or {} self.metrics_flags = MetricsFlags(**_metrics_config) else: self.metrics_flags = MetricsFlags.all_off() self.sentry_enabled = "sentry" in config if self.sentry_enabled: try: check_requirements("sentry") except DependencyException as e: raise ConfigError( e.message # noqa: B306, DependencyException.message is a property ) self.sentry_dsn = config["sentry"].get("dsn") if not self.sentry_dsn: raise ConfigError( "sentry.dsn field is required when sentry integration is enabled" )
def main() -> None: with LoggingContext("main"): # check base requirements check_requirements() hs = setup(sys.argv[1:]) # redirect stdio to the logs, if configured. if not hs.config.logging.no_redirect_stdio: redirect_stdio_to_logs() run(hs)
def read_config(self, config: JsonDict, **kwargs: Any) -> None: redis_config = config.get("redis") or {} self.redis_enabled = redis_config.get("enabled", False) if not self.redis_enabled: return check_requirements("redis") self.redis_host = redis_config.get("host", "localhost") self.redis_port = redis_config.get("port", 6379) self.redis_password = redis_config.get("password")
def test_mandatory_dependency(self) -> None: """Complain if a required package is missing or old.""" with patch( "synapse.util.check_dependencies.metadata.requires", return_value=["dummypkg >= 1"], ): with self.mock_installed_package(None): self.assertRaises(DependencyException, check_requirements) with self.mock_installed_package(old): self.assertRaises(DependencyException, check_requirements) with self.mock_installed_package(new): # should not raise check_requirements()
def read_config(self, config: JsonDict, **kwargs: Any) -> None: opentracing_config = config.get("opentracing") if opentracing_config is None: opentracing_config = {} self.opentracer_enabled = opentracing_config.get("enabled", False) self.jaeger_config = opentracing_config.get( "jaeger_config", { "sampler": { "type": "const", "param": 1 }, "logging": False }, ) self.force_tracing_for_users: Set[str] = set() if not self.opentracer_enabled: return try: check_requirements("opentracing") except DependencyException as e: raise ConfigError( e. message # noqa: B306, DependencyException.message is a property ) # The tracer is enabled so sanitize the config self.opentracer_whitelist: List[str] = opentracing_config.get( "homeserver_whitelist", []) if not isinstance(self.opentracer_whitelist, list): raise ConfigError( "Tracer homeserver_whitelist config is malformed") force_tracing_for_users = opentracing_config.get( "force_tracing_for_users", []) if not isinstance(force_tracing_for_users, list): raise ConfigError("Expected a list", ("opentracing", "force_tracing_for_users")) for i, u in enumerate(force_tracing_for_users): if not isinstance(u, str): raise ConfigError( "Expected a string", ("opentracing", "force_tracing_for_users", f"index {i}"), ) self.force_tracing_for_users.add(u)
def test_generic_check_of_optional_dependency(self) -> None: """Complain if an optional package is old.""" with patch( "synapse.util.check_dependencies.metadata.requires", return_value=["dummypkg >= 1; extra == 'cool-extra'"], ): with self.mock_installed_package(None): # should not raise check_requirements() with self.mock_installed_package(old): self.assertRaises(DependencyException, check_requirements) with self.mock_installed_package(new): # should not raise check_requirements()
def test_release_candidates_satisfy_dependency(self) -> None: """ Tests that release candidates count as far as satisfying a dependency is concerned. (Regression test, see #12176.) """ with patch( "synapse.util.check_dependencies.metadata.requires", return_value=["dummypkg >= 1"], ): with self.mock_installed_package(old_release_candidate): self.assertRaises(DependencyException, check_requirements) with self.mock_installed_package(new_release_candidate): # should not raise check_requirements()
def test_check_for_extra_dependencies(self) -> None: """Complain if a package required for an extra is missing or old.""" with patch( "synapse.util.check_dependencies.metadata.requires", return_value=["dummypkg >= 1; extra == 'cool-extra'"], ), patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}): with self.mock_installed_package(None): self.assertRaises(DependencyException, check_requirements, "cool-extra") with self.mock_installed_package(old): self.assertRaises(DependencyException, check_requirements, "cool-extra") with self.mock_installed_package(new): # should not raise check_requirements("cool-extra")
def test_checks_ignore_dev_dependencies(self) -> None: """Bot generic and per-extra checks should ignore dev dependencies.""" with patch( "synapse.util.check_dependencies.metadata.requires", return_value=["dummypkg >= 1; extra == 'mypy'"], ), patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}): # We're testing that none of these calls raise. with self.mock_installed_package(None): check_requirements() check_requirements("cool-extra") with self.mock_installed_package(old): check_requirements() check_requirements("cool-extra") with self.mock_installed_package(new): check_requirements() check_requirements("cool-extra")
def read_config(self, config: JsonDict, **kwargs: Any) -> None: # Only enable the media repo if either the media repo is enabled or the # current worker app is the media repo. if (self.root.server.enable_media_repo is False and config.get("worker_app") != "synapse.app.media_repository"): self.can_load_media_repo = False return else: self.can_load_media_repo = True # Whether this instance should be the one to run the background jobs to # e.g clean up old URL previews. self.media_instance_running_background_jobs = config.get( "media_instance_running_background_jobs", ) self.max_upload_size = self.parse_size( config.get("max_upload_size", "50M")) self.max_image_pixels = self.parse_size( config.get("max_image_pixels", "32M")) self.max_spider_size = self.parse_size( config.get("max_spider_size", "10M")) self.media_store_path = self.ensure_directory( config.get("media_store_path", "media_store")) backup_media_store_path = config.get("backup_media_store_path") synchronous_backup_media_store = config.get( "synchronous_backup_media_store", False) storage_providers = config.get("media_storage_providers", []) if backup_media_store_path: if storage_providers: raise ConfigError( "Cannot use both 'backup_media_store_path' and 'storage_providers'" ) storage_providers = [{ "module": "file_system", "store_local": True, "store_synchronous": synchronous_backup_media_store, "store_remote": True, "config": { "directory": backup_media_store_path }, }] # This is a list of config that can be used to create the storage # providers. The entries are tuples of (Class, class_config, # MediaStorageProviderConfig), where Class is the class of the provider, # the class_config the config to pass to it, and # MediaStorageProviderConfig are options for StorageProviderWrapper. # # We don't create the storage providers here as not all workers need # them to be started. self.media_storage_providers: List[tuple] = [] for i, provider_config in enumerate(storage_providers): # We special case the module "file_system" so as not to need to # expose FileStorageProviderBackend if provider_config["module"] == "file_system": provider_config["module"] = ( "synapse.rest.media.v1.storage_provider" ".FileStorageProviderBackend") provider_class, parsed_config = load_module( provider_config, ("media_storage_providers", "<item %i>" % i)) wrapper_config = MediaStorageProviderConfig( provider_config.get("store_local", False), provider_config.get("store_remote", False), provider_config.get("store_synchronous", False), ) self.media_storage_providers.append( (provider_class, parsed_config, wrapper_config)) self.dynamic_thumbnails = config.get("dynamic_thumbnails", False) self.thumbnail_requirements = parse_thumbnail_requirements( config.get("thumbnail_sizes", DEFAULT_THUMBNAIL_SIZES)) self.url_preview_enabled = config.get("url_preview_enabled", False) if self.url_preview_enabled: try: check_requirements("url_preview") except DependencyException as e: raise ConfigError( e. message # noqa: B306, DependencyException.message is a property ) proxy_env = getproxies_environment() if "url_preview_ip_range_blacklist" not in config: if "http" not in proxy_env or "https" not in proxy_env: raise ConfigError( "For security, you must specify an explicit target IP address " "blacklist in url_preview_ip_range_blacklist for url previewing " "to work") else: if "http" in proxy_env or "https" in proxy_env: logger.warning("".join(HTTP_PROXY_SET_WARNING)) # we always blacklist '0.0.0.0' and '::', which are supposed to be # unroutable addresses. self.url_preview_ip_range_blacklist = generate_ip_set( config["url_preview_ip_range_blacklist"], ["0.0.0.0", "::"], config_path=("url_preview_ip_range_blacklist", ), ) self.url_preview_ip_range_whitelist = generate_ip_set( config.get("url_preview_ip_range_whitelist", ()), config_path=("url_preview_ip_range_whitelist", ), ) self.url_preview_url_blacklist = config.get( "url_preview_url_blacklist", ()) self.url_preview_accept_language = config.get( "url_preview_accept_language") or ["en"] media_retention = config.get("media_retention") or {} self.media_retention_local_media_lifetime_ms = None local_media_lifetime = media_retention.get("local_media_lifetime") if local_media_lifetime is not None: self.media_retention_local_media_lifetime_ms = self.parse_duration( local_media_lifetime) self.media_retention_remote_media_lifetime_ms = None remote_media_lifetime = media_retention.get("remote_media_lifetime") if remote_media_lifetime is not None: self.media_retention_remote_media_lifetime_ms = self.parse_duration( remote_media_lifetime)
def read_config(self, config: JsonDict, **kwargs: Any) -> None: """Populate this config object with values from `config`. This method does NOT resize existing or future caches: use `resize_all_caches`. We use two separate methods so that we can reject bad config before applying it. """ self.event_cache_size = self.parse_size( config.get("event_cache_size", _DEFAULT_EVENT_CACHE_SIZE) ) self.cache_factors = {} cache_config = config.get("caches") or {} self.global_factor = cache_config.get("global_factor", _DEFAULT_FACTOR_SIZE) if not isinstance(self.global_factor, (int, float)): raise ConfigError("caches.global_factor must be a number.") # Load cache factors from the config individual_factors = cache_config.get("per_cache_factors") or {} if not isinstance(individual_factors, dict): raise ConfigError("caches.per_cache_factors must be a dictionary") # Canonicalise the cache names *before* updating with the environment # variables. individual_factors = { _canonicalise_cache_name(key): val for key, val in individual_factors.items() } # Override factors from environment if necessary individual_factors.update( { _canonicalise_cache_name(key[len(_CACHE_PREFIX) + 1 :]): float(val) for key, val in self._environ.items() if key.startswith(_CACHE_PREFIX + "_") } ) for cache, factor in individual_factors.items(): if not isinstance(factor, (int, float)): raise ConfigError( "caches.per_cache_factors.%s must be a number" % (cache,) ) self.cache_factors[cache] = factor self.track_memory_usage = cache_config.get("track_memory_usage", False) if self.track_memory_usage: try: check_requirements("cache_memory") except DependencyException as e: raise ConfigError( e.message # noqa: B306, DependencyException.message is a property ) expire_caches = cache_config.get("expire_caches", True) cache_entry_ttl = cache_config.get("cache_entry_ttl", "30m") if expire_caches: self.expiry_time_msec = self.parse_duration(cache_entry_ttl) else: self.expiry_time_msec = None # Backwards compatibility support for the now-removed "expiry_time" config flag. expiry_time = cache_config.get("expiry_time") if expiry_time and expire_caches: logger.warning( "You have set two incompatible options, expiry_time and expire_caches. Please only use the " "expire_caches and cache_entry_ttl options and delete the expiry_time option as it is " "deprecated." ) if expiry_time: logger.warning( "Expiry_time is a deprecated option, please use the expire_caches and cache_entry_ttl options " "instead." ) self.expiry_time_msec = self.parse_duration(expiry_time) self.cache_autotuning = cache_config.get("cache_autotuning") if self.cache_autotuning: max_memory_usage = self.cache_autotuning.get("max_cache_memory_usage") self.cache_autotuning["max_cache_memory_usage"] = self.parse_size( max_memory_usage ) target_mem_size = self.cache_autotuning.get("target_cache_memory_usage") self.cache_autotuning["target_cache_memory_usage"] = self.parse_size( target_mem_size ) min_cache_ttl = self.cache_autotuning.get("min_cache_ttl") self.cache_autotuning["min_cache_ttl"] = self.parse_duration(min_cache_ttl) self.sync_response_cache_duration = self.parse_duration( cache_config.get("sync_response_cache_duration", "2m") )
def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.saml2_enabled = False saml2_config = config.get("saml2_config") if not saml2_config or not saml2_config.get("enabled", True): return if not saml2_config.get("sp_config") and not saml2_config.get( "config_path"): return try: check_requirements("saml2") except DependencyException as e: raise ConfigError( e. message # noqa: B306, DependencyException.message is a property ) self.saml2_enabled = True attribute_requirements = saml2_config.get( "attribute_requirements") or [] self.attribute_requirements = _parse_attribute_requirements_def( attribute_requirements) self.saml2_grandfathered_mxid_source_attribute = saml2_config.get( "grandfathered_mxid_source_attribute", "uid") self.saml2_idp_entityid = saml2_config.get("idp_entityid", None) # user_mapping_provider may be None if the key is present but has no value ump_dict = saml2_config.get("user_mapping_provider") or {} # Use the default user mapping provider if not set ump_dict.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER) if ump_dict.get("module") == LEGACY_USER_MAPPING_PROVIDER: ump_dict["module"] = DEFAULT_USER_MAPPING_PROVIDER # Ensure a config is present ump_dict["config"] = ump_dict.get("config") or {} if ump_dict["module"] == DEFAULT_USER_MAPPING_PROVIDER: # Load deprecated options for use by the default module old_mxid_source_attribute = saml2_config.get( "mxid_source_attribute") if old_mxid_source_attribute: logger.warning( "The config option saml2_config.mxid_source_attribute is deprecated. " "Please use saml2_config.user_mapping_provider.config" ".mxid_source_attribute instead.") ump_dict["config"][ "mxid_source_attribute"] = old_mxid_source_attribute old_mxid_mapping = saml2_config.get("mxid_mapping") if old_mxid_mapping: logger.warning( "The config option saml2_config.mxid_mapping is deprecated. Please " "use saml2_config.user_mapping_provider.config.mxid_mapping instead." ) ump_dict["config"]["mxid_mapping"] = old_mxid_mapping # Retrieve an instance of the module's class # Pass the config dictionary to the module for processing ( self.saml2_user_mapping_provider_class, self.saml2_user_mapping_provider_config, ) = load_module(ump_dict, ("saml2_config", "user_mapping_provider")) # Ensure loaded user mapping module has defined all necessary methods # Note parse_config() is already checked during the call to load_module required_methods = [ "get_saml_attributes", "saml_response_to_user_attributes", "get_remote_user_id", ] missing_methods = [ method for method in required_methods if not hasattr(self.saml2_user_mapping_provider_class, method) ] if missing_methods: raise ConfigError( "Class specified by saml2_config." "user_mapping_provider.module is missing required " "methods: %s" % (", ".join(missing_methods), )) # Get the desired saml auth response attributes from the module saml2_config_dict = self._default_saml_config_dict( *self.saml2_user_mapping_provider_class.get_saml_attributes( self.saml2_user_mapping_provider_config)) _dict_merge(merge_dict=saml2_config.get("sp_config", {}), into_dict=saml2_config_dict) config_path = saml2_config.get("config_path", None) if config_path is not None: mod = load_python_module(config_path) config_dict_from_file = getattr(mod, "CONFIG", None) if config_dict_from_file is None: raise ConfigError( "Config path specified by saml2_config.config_path does not " "have a CONFIG property.") _dict_merge(merge_dict=config_dict_from_file, into_dict=saml2_config_dict) import saml2.config self.saml2_sp_config = saml2.config.SPConfig() self.saml2_sp_config.load(saml2_config_dict) # session lifetime: in milliseconds self.saml2_session_lifetime = self.parse_duration( saml2_config.get("saml_session_lifetime", "15m"))
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import sys from typing import Container from synapse.util import check_dependencies logger = logging.getLogger(__name__) try: check_dependencies.check_requirements() except check_dependencies.DependencyException as e: sys.stderr.writelines( e.message # noqa: B306, DependencyException.message is a property ) sys.exit(1) def check_bind_error( e: Exception, address: str, bind_addresses: Container[str] ) -> None: """ This method checks an exception occurred while binding on 0.0.0.0. If :: is specified in the bind addresses a warning is shown. The exception is still raised otherwise.