示例#1
0
def test_ipset_clear():
    ipset = IPSet(['10.0.0.0/16'])
    ipset.update(IPRange('10.1.0.0', '10.1.255.255'))
    assert ipset == IPSet(['10.0.0.0/15'])

    ipset.clear()
    assert ipset == IPSet([])
示例#2
0
class FederationConfig(Config):
    section = "federation"

    def read_config(self, config, **kwargs):
        # FIXME: federation_domain_whitelist needs sytests
        self.federation_domain_whitelist = None  # type: Optional[dict]
        federation_domain_whitelist = config.get("federation_domain_whitelist",
                                                 None)

        if federation_domain_whitelist is not None:
            # turn the whitelist into a hash for speed of lookup
            self.federation_domain_whitelist = {}

            for domain in federation_domain_whitelist:
                self.federation_domain_whitelist[domain] = True

        self.federation_ip_range_blacklist = config.get(
            "federation_ip_range_blacklist", [])

        # Attempt to create an IPSet from the given ranges
        try:
            self.federation_ip_range_blacklist = IPSet(
                self.federation_ip_range_blacklist)

            # Always blacklist 0.0.0.0, ::
            self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
        except Exception as e:
            raise ConfigError(
                "Invalid range(s) provided in federation_ip_range_blacklist: %s"
                % e)

    def generate_config_section(self, config_dir_path, server_name, **kwargs):
        return """\
示例#3
0
def test_ipset_clear():
    ipset = IPSet(['10.0.0.0/16'])
    ipset.update(IPRange('10.1.0.0', '10.1.255.255'))
    assert ipset == IPSet(['10.0.0.0/15'])

    ipset.clear()
    assert ipset == IPSet([])
示例#4
0
def test_ipset_updates():
    s1 = IPSet(['192.0.2.0/25'])
    s2 = IPSet(['192.0.2.128/25'])

    s1.update(s2)
    assert s1 == IPSet(['192.0.2.0/24'])

    s1.update(['192.0.0.0/24', '192.0.1.0/24', '192.0.3.0/24'])
    assert s1 == IPSet(['192.0.0.0/22'])
示例#5
0
def test_ipset_updates():
    s1 = IPSet(['192.0.2.0/25'])
    s2 = IPSet(['192.0.2.128/25'])

    s1.update(s2)
    assert s1 == IPSet(['192.0.2.0/24'])

    s1.update(['192.0.0.0/24', '192.0.1.0/24', '192.0.3.0/24'])
    assert s1 == IPSet(['192.0.0.0/22'])
示例#6
0
def test_ipset_exceptions():
    s1 = IPSet(['10.0.0.1'])

    #   IPSet objects are not hashable.
    with pytest.raises(TypeError):
        hash(s1)

    #   Bad update argument type.
    with pytest.raises(TypeError):
        s1.update(42)
示例#7
0
def test_ipset_exceptions():
    s1 = IPSet(['10.0.0.1'])

    #   IPSet objects are not hashable.
    with pytest.raises(TypeError):
        hash(s1)

    #   Bad update argument type.
    with pytest.raises(TypeError):
        s1.update(42)
示例#8
0
 def parse_and_group_target_specs(target_specs, nocidr):
     str_targets = set()
     ipset_targets = IPSet()
     for target_spec in target_specs:
         if (target_spec.startswith(".") or
             (target_spec[0].isalpha() or target_spec[-1].isalpha())
                 or (nocidr and "/" in target_spec)):
             str_targets.add(target_spec)
         else:
             if "-" in target_spec:
                 start_ip, post_dash_segment = target_spec.split("-")
                 end_ip = start_ip.rsplit(".", maxsplit=1)[0] + "." + \
                     post_dash_segment
                 target_spec = IPRange(start_ip, end_ip)
             elif "*" in target_spec:
                 target_spec = glob_to_iprange(target_spec)
             else:  # str IP addresses and str CIDR notations
                 target_spec = (target_spec, )
             ipset_targets.update(IPSet(target_spec))
     return (str_targets, ipset_targets)
示例#9
0
def test_ipset_adding_and_removing_members_ip_addresses_as_ints():
    s1 = IPSet(['10.0.0.0/25'])

    s1.add('10.0.0.0/24')
    assert s1 == IPSet(['10.0.0.0/24'])

    integer1 = int(IPAddress('10.0.0.1'))
    integer2 = int(IPAddress('fe80::'))
    integer3 = int(IPAddress('10.0.0.2'))

    s2 = IPSet([integer1, integer2])
    assert s2 == IPSet(['10.0.0.1/32', 'fe80::/128'])

    s2.add(integer3)
    assert s2 == IPSet(['10.0.0.1/32', '10.0.0.2/32', 'fe80::/128'])

    s2.remove(integer2)
    assert s2 == IPSet(['10.0.0.1/32', '10.0.0.2/32'])

    s2.update([integer2])
    assert s2 == IPSet(['10.0.0.1/32', '10.0.0.2/32', 'fe80::/128'])
示例#10
0
def test_ipset_adding_and_removing_members_ip_addresses_as_ints():
    s1 = IPSet(['10.0.0.0/25'])

    s1.add('10.0.0.0/24')
    assert s1 == IPSet(['10.0.0.0/24'])

    integer1 = int(IPAddress('10.0.0.1'))
    integer2 = int(IPAddress('fe80::'))
    integer3 = int(IPAddress('10.0.0.2'))

    s2 = IPSet([integer1, integer2])
    assert s2 == IPSet(['10.0.0.1/32', 'fe80::/128'])

    s2.add(integer3)
    assert s2 == IPSet(['10.0.0.1/32', '10.0.0.2/32', 'fe80::/128'])

    s2.remove(integer2)
    assert s2 == IPSet(['10.0.0.1/32', '10.0.0.2/32'])

    s2.update([integer2])
    assert s2 == IPSet(['10.0.0.1/32', '10.0.0.2/32', 'fe80::/128'])
示例#11
0
    def parse_ip_ranges(data, key):
        '''
        Parses IP range or CIDR mask.
        :param data: Dictionary where to look for the value.
        :param key:  Key for the value to be parsed.
        :return: Set of IP ranges and networks.
        '''

        if key not in data:
            return None

        ranges = [[s.strip() for s in r.split('-')] for r in data[key]]
        result = IPSet()
        for r in ranges:
            resolved_set = resolve_service_tag_alias(r[0])
            if resolved_set is not None:
                result.update(resolved_set)
            else:
                if len(r) > 2:
                    raise Exception('Invalid range. Use x.x.x.x-y.y.y.y or x.x.x.x or x.x.x.x/y.')
                result.add(IPRange(*r) if len(r) == 2 else IPNetwork(r[0]))
        return result
示例#12
0
def test_ipset_updates():
    s1 = IPSet(['192.0.2.0/25'])
    s2 = IPSet(['192.0.2.128/25'])

    s1.update(s2)
    assert s1 == IPSet(['192.0.2.0/24'])

    s1.update(['192.0.0.0/24', '192.0.1.0/24', '192.0.3.0/24'])
    assert s1 == IPSet(['192.0.0.0/22'])

    expected = IPSet(['192.0.1.0/24', '192.0.2.0/24'])

    s3 = IPSet(['192.0.1.0/24'])
    s3.update(IPRange('192.0.2.0', '192.0.2.255'))
    assert s3 == expected

    s4 = IPSet(['192.0.1.0/24'])
    s4.update([
        IPRange('192.0.2.0', '192.0.2.100'),
        IPRange('192.0.2.50', '192.0.2.255')
    ])
    assert s4 == expected
示例#13
0
class ContentRepositoryConfig(Config):
    def read_config(self, config, **kwargs):
        self.max_upload_size = self.parse_size(
            config.get("max_upload_size", "10M"))
        self.max_image_pixels = self.parse_size(
            config.get("max_image_pixels", "32M"))
        self.max_spider_size = self.parse_size(
            config.get("max_spider_size", "10M"))

        self.media_store_path = self.ensure_directory(
            config.get("media_store_path", "media_store"))

        backup_media_store_path = config.get("backup_media_store_path")

        synchronous_backup_media_store = config.get(
            "synchronous_backup_media_store", False)

        storage_providers = config.get("media_storage_providers", [])

        if backup_media_store_path:
            if storage_providers:
                raise ConfigError(
                    "Cannot use both 'backup_media_store_path' and 'storage_providers'"
                )

            storage_providers = [{
                "module": "file_system",
                "store_local": True,
                "store_synchronous": synchronous_backup_media_store,
                "store_remote": True,
                "config": {
                    "directory": backup_media_store_path
                },
            }]

        # This is a list of config that can be used to create the storage
        # providers. The entries are tuples of (Class, class_config,
        # MediaStorageProviderConfig), where Class is the class of the provider,
        # the class_config the config to pass to it, and
        # MediaStorageProviderConfig are options for StorageProviderWrapper.
        #
        # We don't create the storage providers here as not all workers need
        # them to be started.
        self.media_storage_providers = []

        for provider_config in storage_providers:
            # We special case the module "file_system" so as not to need to
            # expose FileStorageProviderBackend
            if provider_config["module"] == "file_system":
                provider_config["module"] = (
                    "synapse.rest.media.v1.storage_provider"
                    ".FileStorageProviderBackend")

            provider_class, parsed_config = load_module(provider_config)

            wrapper_config = MediaStorageProviderConfig(
                provider_config.get("store_local", False),
                provider_config.get("store_remote", False),
                provider_config.get("store_synchronous", False),
            )

            self.media_storage_providers.append(
                (provider_class, parsed_config, wrapper_config))

        self.uploads_path = self.ensure_directory(
            config.get("uploads_path", "uploads"))
        self.dynamic_thumbnails = config.get("dynamic_thumbnails", False)
        self.thumbnail_requirements = parse_thumbnail_requirements(
            config.get("thumbnail_sizes", DEFAULT_THUMBNAIL_SIZES))
        self.url_preview_enabled = config.get("url_preview_enabled", False)
        if self.url_preview_enabled:
            try:
                import lxml

                lxml  # To stop unused lint.
            except ImportError:
                raise ConfigError(MISSING_LXML)

            try:
                from netaddr import IPSet
            except ImportError:
                raise ConfigError(MISSING_NETADDR)

            if "url_preview_ip_range_blacklist" not in config:
                raise ConfigError(
                    "For security, you must specify an explicit target IP address "
                    "blacklist in url_preview_ip_range_blacklist for url previewing "
                    "to work")

            self.url_preview_ip_range_blacklist = IPSet(
                config["url_preview_ip_range_blacklist"])

            # we always blacklist '0.0.0.0' and '::', which are supposed to be
            # unroutable addresses.
            self.url_preview_ip_range_blacklist.update(["0.0.0.0", "::"])

            self.url_preview_ip_range_whitelist = IPSet(
                config.get("url_preview_ip_range_whitelist", ()))

            self.url_preview_url_blacklist = config.get(
                "url_preview_url_blacklist", ())

    def generate_config_section(self, data_dir_path, **kwargs):
        media_store = os.path.join(data_dir_path, "media_store")
        uploads_path = os.path.join(data_dir_path, "uploads")

        formatted_thumbnail_sizes = "".join(THUMBNAIL_SIZE_YAML % s
                                            for s in DEFAULT_THUMBNAIL_SIZES)
        # strip final NL
        formatted_thumbnail_sizes = formatted_thumbnail_sizes[:-1]

        return (r"""
        # Directory where uploaded images and attachments are stored.
        #
        media_store_path: "%(media_store)s"

        # Media storage providers allow media to be stored in different
        # locations.
        #
        #media_storage_providers:
        #  - module: file_system
        #    # Whether to write new local files.
        #    store_local: false
        #    # Whether to write new remote media
        #    store_remote: false
        #    # Whether to block upload requests waiting for write to this
        #    # provider to complete
        #    store_synchronous: false
        #    config:
        #       directory: /mnt/some/other/directory

        # Directory where in-progress uploads are stored.
        #
        uploads_path: "%(uploads_path)s"

        # The largest allowed upload size in bytes
        #
        #max_upload_size: 10M

        # Maximum number of pixels that will be thumbnailed
        #
        #max_image_pixels: 32M

        # Whether to generate new thumbnails on the fly to precisely match
        # the resolution requested by the client. If true then whenever
        # a new resolution is requested by the client the server will
        # generate a new thumbnail. If false the server will pick a thumbnail
        # from a precalculated list.
        #
        #dynamic_thumbnails: false

        # List of thumbnails to precalculate when an image is uploaded.
        #
        #thumbnail_sizes:
%(formatted_thumbnail_sizes)s

        # Is the preview URL API enabled?
        #
        # 'false' by default: uncomment the following to enable it (and specify a
        # url_preview_ip_range_blacklist blacklist).
        #
        #url_preview_enabled: true

        # List of IP address CIDR ranges that the URL preview spider is denied
        # from accessing.  There are no defaults: you must explicitly
        # specify a list for URL previewing to work.  You should specify any
        # internal services in your network that you do not want synapse to try
        # to connect to, otherwise anyone in any Matrix room could cause your
        # synapse to issue arbitrary GET requests to your internal services,
        # causing serious security issues.
        #
        # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
        # listed here, since they correspond to unroutable addresses.)
        #
        # This must be specified if url_preview_enabled is set. It is recommended that
        # you uncomment the following list as a starting point.
        #
        #url_preview_ip_range_blacklist:
        #  - '127.0.0.0/8'
        #  - '10.0.0.0/8'
        #  - '172.16.0.0/12'
        #  - '192.168.0.0/16'
        #  - '100.64.0.0/10'
        #  - '169.254.0.0/16'
        #  - '::1/128'
        #  - 'fe80::/64'
        #  - 'fc00::/7'

        # List of IP address CIDR ranges that the URL preview spider is allowed
        # to access even if they are specified in url_preview_ip_range_blacklist.
        # This is useful for specifying exceptions to wide-ranging blacklisted
        # target IP ranges - e.g. for enabling URL previews for a specific private
        # website only visible in your network.
        #
        #url_preview_ip_range_whitelist:
        #   - '192.168.1.1'

        # Optional list of URL matches that the URL preview spider is
        # denied from accessing.  You should use url_preview_ip_range_blacklist
        # in preference to this, otherwise someone could define a public DNS
        # entry that points to a private IP address and circumvent the blacklist.
        # This is more useful if you know there is an entire shape of URL that
        # you know that will never want synapse to try to spider.
        #
        # Each list entry is a dictionary of url component attributes as returned
        # by urlparse.urlsplit as applied to the absolute form of the URL.  See
        # https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit
        # The values of the dictionary are treated as an filename match pattern
        # applied to that component of URLs, unless they start with a ^ in which
        # case they are treated as a regular expression match.  If all the
        # specified component matches for a given list item succeed, the URL is
        # blacklisted.
        #
        #url_preview_url_blacklist:
        #  # blacklist any URL with a username in its URI
        #  - username: '******'
        #
        #  # blacklist all *.google.com URLs
        #  - netloc: 'google.com'
        #  - netloc: '*.google.com'
        #
        #  # blacklist all plain HTTP URLs
        #  - scheme: 'http'
        #
        #  # blacklist http(s)://www.acme.com/foo
        #  - netloc: 'www.acme.com'
        #    path: '/foo'
        #
        #  # blacklist any URL with a literal IPv4 address
        #  - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'

        # The largest allowed URL preview spidering size in bytes
        #
        #max_spider_size: 10M
        """ % locals())
示例#14
0
def run():
    #Check old processes are not running
    check_old_processes_running()

    #Load config
    config_data = ConfigParser.RawConfigParser()
    config_data.read('chain_config.cfg')
    EXT_TX_PER_LOOP = config_data.getint(
        'Transaction generation and processing', 'ext_tx_per_loop')
    USER_TX_PER_LOOP = config_data.getint(
        'Transaction generation and processing', 'user_tx_per_loop')
    LOOPS_PER_TX = config_data.getint('Transaction generation and processing',
                                      'loops_per_tx')
    START_TIME = config_data.getint('Transaction generation and processing',
                                    'start_time')
    DKG_RENEWAL_INTERVAL = config_data.getint('Consensus',
                                              'dkg_renewal_interval')
    BLOCK_TIME = config_data.getint('General', 'block_time')
    TIMEOUT = config_data.getint('General', 'timeout')
    DKG_TIMEOUT = config_data.getint('Consensus', 'dkg_timeout')

    #Telemetry initialization
    start_time = time.time()

    init_logger()

    delays_blocks = open_log_block_process_delay()
    delays_txs = open_log_delay_create_txs()

    #Modules initialization
    mainLog.info("Initializing Chain")
    chain = init_chain()
    last_block = chain.get_head_block().header.number
    mainLog.debug("Last block: %s", last_block)

    mainLog.info("Initializing Keystore")
    keys, addresses = init_keystore()
    mainLog.info("Loaded %s keys", len(keys))
    mainLog.info("----------------LOADED ADDRESSES---------------------")
    mainLog.info([add.encode("HEX") for add in addresses])
    mainLog.info("----------------END ADDRESS LIST---------------------")

    mainLog.info("Initializing P2P")
    p2p = init_p2p(chain.get_head_block().header.number)

    mainLog.info("Initializing Parser")
    user = init_user()
    try:
        user.read_transactions("./transactions.txt")
    except Exception as e:
        mainLog.critical("Exception while reading user transactions")
        mainLog.exception(e)
        p2p.stop()
        sys.exit(0)

    mainLog.info("Initializing OOR")
    oor = init_oor()

    #Variables initialization
    end = 0
    count = 0
    dkg_on = False
    exit_from_dkg = False
    processed_user = 0
    user_tx_count = 0

    block_num = chain.get_head_block().header.number
    timestamp = chain.get_head_block().header.timestamp
    last_random_no = chain.get_head_block().header.random_number.encode('hex')
    current_group_sig = chain.get_head_block().header.group_sig
    current_group_key = chain.get_current_group_key()

    my_dkgIDs = []

    myIPs = IPSet()
    for i in range(len(keys)):
        myIPs.update(chain.get_own_ips(keys[i].address))
    mainLog.info("Own IPs at startup are: %s", myIPs)

    dkg_group = chain.get_current_dkg_group()
    in_dkg_group, my_dkgIDs = find_me_in_dkg_group(dkg_group, addresses)

    mainLog.info("Initializing Consensus")
    consensus = cons.Consensus(dkg_group, my_dkgIDs, last_random_no,
                               current_group_key, block_num, current_group_sig,
                               current_group_key)

    isMaster = load_master_private_keys(consensus, my_dkgIDs)
    if not in_dkg_group:
        consensus.store_ids(dkg_group)
    else:
        mainLog.warning(
            "TODO: nodes that belong to the DKG group and connect after the DKG do not have private keys, so they shouldn't create shares. Needs to be disabled!"
        )
        if not isMaster:
            create_shares = False
    if isMaster:
        consensus.create_shares(last_random_no, block_num, count)
        create_shares = True
    cache = Share_Cache()

    before = time.time()
    last_random_no, block_num, count = perform_bootstrap(
        chain, p2p, consensus, delays_blocks, delays_txs, DKG_RENEWAL_INTERVAL,
        last_random_no, block_num, count)
    after = time.time()
    elapsed = after - before
    mainLog.info("Bootstrap finished. Elapsed time: %s", elapsed)
    timestamp = chain.get_head_block().header.timestamp
    current_group_sig = chain.get_head_block().header.group_sig
    current_group_key = chain.get_current_group_key()
    last_random_no = chain.get_head_block().header.random_number.encode('hex')

    from_bootstrap = True

    while (not end):

        #Process new blocks. DOES NOT support bootstrap
        try:
            block = p2p.get_block()
            while block is not None:
                #FALSE: Only nodes that do NOT belong to the DKG get stuck here until they receive the block with the new group key
                mainLog.info("Received new block no. %s", block.number)
                mainLog.info("Block Data: Group Signature: %s --Random number: %s --Group Key: %s", block.header.group_sig, \
                             block.header.random_number.encode('hex'),  block.header.group_pubkey)
                res = False
                try:
                    signer = consensus.get_next_signer(block.count)
                    expected_message = str(last_random_no) + str(
                        block_num) + str(count)
                    #Use in case the OR in the next line does not work
                    #                    if exit_from_dkg or dkg_on:
                    #                        usePrevGroupKey = True
                    #                    else:
                    #                        usePrevGroupKey = False
                    if consensus.verify_group_sig(expected_message,
                                                  block.header.group_sig,
                                                  exit_from_dkg or dkg_on):
                        mainLog.debug("Verify Group Signature OK")
                    else:
                        raise BlsInvalidGroupSignature()
                    if in_dkg_group and exit_from_dkg:
                        # We ONLY enter here if the node belongs to the DKG group and just finished a new DKG
                        exit_from_dkg = False
                        if block.header.group_pubkey != consensus.get_current_group_key(
                        ):
                            mainLog.error(
                                "FATAL ERROR. A node in the DKG group received a block with a Group Public Key not matching the generated from the DKG."
                            )
                            raise Exception(
                                "Unexpected group key in block header. Stopping"
                            )
                        signer = chain.extract_first_ip_from_address(
                            signing_addr)
                    elif dkg_on:
                        # We ONLY enter here if the nodes DOES NOT belong to the DKG group and is waiting for a current DKG to finish
                        signer = chain.extract_first_ip_from_address(
                            signing_addr)
                        consensus.set_current_group_key(
                            block.header.group_pubkey)
                        dkg_on = False
                    mainLog.debug(
                        "Verifying new block signature, signer should be %s",
                        signer)
                    mainLog.debug("Owner of the previous IP is address %s",
                                  chain.get_addr_from_ip(signer).encode("HEX"))
                    mainLog.debug("Coinbase in the block is: %s",
                                  block.header.coinbase.encode("HEX"))
                    res = chain.verify_block_signature(block, signer)
                except UnsignedBlock as e:
                    mainLog.exception(e)
                    mainLog.error("Unsigned block. Skipping")
                    res = False
                except InvalidBlockSigner as e:
                    mainLog.exception(e)
                    mainLog.error(
                        "Block no. %s signautre is invalid! Ignoring.",
                        block.number)
                    res = False
                except BlsInvalidGroupSignature as e:
                    mainLog.exception(e)
                    mainLog.error(
                        "Block no. %s: invalid unexpected or invalid BLS group signature! Ignoring.",
                        block.number)
                    res = False
                except Exception as e:
                    mainLog.error(
                        "Unrecoverable error when checking block signature. Exiting.",
                        block.number)
                    mainLog.exception(e)
                    raise e
                if res:
                    # correct block
                    before = time.time()
                    chain.add_block(block)
                    after = time.time()
                    delay = after - before
                    delays_blocks.write(
                        str(block.number) + ',' + str(delay) + '\n')
                    delays_txs.write("Added new block no." +
                                     str(block.number) + '\n')
                    timestamp = chain.get_head_block().header.timestamp
                    block_num = chain.get_head_block().header.number
                    last_random_no = block.header.random_number.encode('hex')
                    if from_bootstrap:
                        from_bootstrap = False
                        consensus.bootstrap_only_set_random_no_manual(
                            last_random_no)
                        consensus.bootstrap_only_set_group_sig_manual(
                            block.header.group_sig)
                    #after a correct block: reset BLS and create and broadcast new shares (like receiving a new block)
                    consensus.calculate_next_signer(block_num)
                    consensus.reset_bls()
                    if in_dkg_group and create_shares:
                        count = 0
                        new_shares = consensus.create_shares(
                            last_random_no, block_num, count)
                        for share in new_shares:
                            p2p.broadcast_share(share)
                            cache.store_bls(share)
                            mainLog.info("Sent a new share to the network")
                else:
                    mainLog.error(
                        "Received an erroneous block. Ignoring block...")

                block = p2p.get_block()
        except Exception as e:
            mainLog.critical("Exception while processing a received block")
            mainLog.exception(e)
            p2p.stop()
            sys.exit(0)

        #Process transactions from the network
        processed = 0
        try:
            tx_ext = p2p.get_tx()
            while tx_ext is not None:
                #Check that the transaction has not been sent from this node or already processed
                processed = processed + 1
                if not (chain.in_chain(tx_ext) or chain.in_pool(tx_ext)):
                    mainLog.info("Received external transaction: to: %s hash %s", \
                    tx_ext.to.encode('HEX'), tx_ext.hash.encode('HEX'))
                    try:
                        chain.add_pending_transaction(tx_ext)
                        # Correct tx
                        p2p.broadcast_tx(tx_ext)
                    except Exception as e:
                        mainLog.info("Discarded invalid external transaction: to: %s", \
                        tx_ext.to.encode("HEX"))
                        mainLog.exception(e)
                if processed < EXT_TX_PER_LOOP:
                    tx_ext = p2p.get_tx()
                else:
                    tx_ext = None
        except Exception as e:
            mainLog.critical(
                "Exception while processing a received transaction")
            mainLog.exception(e)
            p2p.stop()
            sys.exit(0)

        #Check if the node has to sign the next block. Control also timeouts
        #Before we wait for the block time
        try:
            timestamp = chain.get_head_block().header.timestamp
            block_num = chain.get_head_block().header.number
            if ((time.time() - timestamp) >= BLOCK_TIME):
                #Time to create a new block
                if (time.time() - timestamp) >= TIMEOUT:
                    #The expected signer didn't create a block. Trigger a recalculation of the random number to select a new signer
                    #TODO: does NOT work because it will enter all the time when the timeout expires
                    #                    count = count + 1
                    #                    timeout_expired =  True
                    #                    if count == 0:
                    #                        consensus.reset_bls()
                    #                    consensus.create_share(count)
                    #                    p2p.broadcast_share(new_share)
                    #                    mainLog.info("Timeout expired. Recalculated random no and sent a new share to the network")
                    mainLog.info("Contextual information: Current time: %s --Last block timestamp: %s --Last random number: %s --Last block number: %s", \
                                 time.time(), timestamp, consensus.get_current_random_no(), block_num)
                    raise Exception(
                        "FATAL ERROR, Block tiemout expired. The feature to re-calculte the random number after a block timeout exprity is not implemented. Stopping..."
                    )
                if (consensus.shares_ready() or exit_from_dkg) and not dkg_on:
                    if not exit_from_dkg:
                        #Normal operation
                        signer = consensus.get_next_signer(count)
                        signing_addr = chain.get_addr_from_ip(signer)
                        #When we exit a new DKG round, the variable signing_addr stores the next signer (we are temporarily overriding the BLS RN generation)
                    if signing_addr in addresses:
                        exit_from_dkg = False
                        mainLog.info(
                            "This node has to sign a block, selected IP: %s",
                            signer)
                        mainLog.info("Associated address: %s",
                                     signing_addr.encode("HEX"))
                        new_block = chain.create_block(signing_addr, consensus.get_current_random_no(), \
                                    consensus.get_current_group_key(), consensus.get_current_group_sig(), count)
                        try:
                            key_pos = addresses.index(signing_addr)
                        except:
                            raise Exception(
                                "FATAL ERROR: This node does not own the indicated key to sign the block (not present in the keystore)"
                            )
                        sig_key = keys[key_pos]
                        new_block.sign(sig_key.privkey)
                        mainLog.info("Created new block no. %s, timestamp %s, coinbase %s", \
                            new_block.header.number, new_block.header.timestamp, new_block.header.coinbase.encode("HEX"))
                        mainLog.info(
                            "New block signature data: v %s -- r %s -- s %s",
                            new_block.v, new_block.r, new_block.s)
                        mainLog.info("Block Group Signature: %s --Random number: %s --Group Key: %s", new_block.header.group_sig, \
                             new_block.header.random_number.encode('hex'),  new_block.header.group_pubkey)
                        mainLog.info("This block contains %s transactions",
                                     new_block.transaction_count)
                        #                        mainLog.info("Sleeping 2s to give way to clock drift...")
                        #                        time.sleep(2)
                        #Like receiving a new block
                        before = time.time()
                        chain.add_block(new_block)
                        after = time.time()
                        delay = after - before
                        delays_blocks.write(
                            str(new_block.number) + ',' + str(delay) + '\n')
                        delays_txs.write("Added new block no." +
                                         str(new_block.number) + '\n')
                        p2p.broadcast_block(new_block)
                        #after a correct block, create and broadcast new share
                        count = 0
                        #timeout_expired = False
                        block_num = new_block.number
                        consensus.calculate_next_signer(block_num)
                        last_random_no = consensus.get_current_random_no()
                        consensus.reset_bls()
                        time.sleep(10)
                        if in_dkg_group and create_shares:
                            count = 0
                            new_shares = consensus.create_shares(
                                last_random_no, block_num, count)
                            for share in new_shares:
                                p2p.broadcast_share(share)
                                cache.store_bls(share)
                                mainLog.info("Sent a new share to the network")

        except Exception as e:
            mainLog.critical(
                "Exception while checking if the node has to sign the next block"
            )
            mainLog.exception(e)
            p2p.stop()
            sys.exit(0)

        # Process transactions from the user
        if ((time.time() - start_time) > START_TIME
                or isMaster) and not dkg_on:
            if user_tx_count == LOOPS_PER_TX - 1:
                try:
                    tx_int = user.get_tx()
                    while tx_int is not None:
                        before = time.time()
                        processed_user = processed_user + 1
                        user_tx_count = 0
                        try:
                            try:
                                key_pos = addresses.index(tx_int["from"])
                                #mainLog.debug("Found key in %s", key_pos)
                            except:
                                raise Exception(
                                    "Key indicated in from field is not in present in the keystore"
                                )
                            key = keys[key_pos]
                            tx = chain.parse_transaction(tx_int)
                            tx.sign(key.privkey)
                            mainLog.info("Processing user transaction, from: %s --  to: %s -- hash %s -- value %s", \
                            tx_int["from"].encode("HEX"), tx_int["to"].encode("HEX"), tx.hash.encode("HEX"), tx_int["value"])
                            #mainLog.debug("TX signed. Info: v %s -- r %s -- s %s -- NONCE %s", tx.v, \
                            #tx.r, str(tx.s), tx.nonce)
                            # correct tx
                            try:
                                chain.add_pending_transaction(tx)
                            except Exception as e:
                                raise e
                            p2p.broadcast_tx(tx)
                            after = time.time()
                            delay = after - before
                            delays_txs.write(
                                str(tx.hash.encode("HEX")) + ',' + str(delay) +
                                '\n')
                            #mainLog.info("Sent transaction to the network, from: %s --  to: %s --  value: %s", \
                            #tx_int["from"].encode("HEX"), tx.to.encode("HEX"), tx.ip_network)
    #                        seen_tx.append(tx.hash)
                        except Exception as e:
                            mainLog.error(
                                "Error when creating user transaction, ignoring transaction."
                            )
                            mainLog.exception(e.message)
#                        Temporarily diabled because we want 1 tx per 2 loops
#                        if processed < USER_TX_PER_LOOP:
#                            tx_int = user.get_tx()
#                        else:
#                            tx_int = None
                        tx_int = None
                except Exception as e:
                    mainLog.exception(e)
                    p2p.stop()
                    sys.exit(0)
            else:
                user_tx_count = user_tx_count + 1

        #answer queries from OOR
        try:
            nonce, afi, address = oor.get_query()
            if nonce is not None and afi is not None and address is not None:
                info = chain.query_eid(ipaddr=address, nonce=nonce)
                oor.send(info)
        except Exception as e:
            mainLog.critical("Exception while answering queries from OOR")
            mainLog.exception(e)
            p2p.stop()
            sys.exit(0)

#########Answer queries from the network
#blocks
        try:
            block_numbers = p2p.get_block_queries()
            if block_numbers is not None:
                mainLog.info("Answering query for block nos. %s",
                             block_numbers)
                response = []
                for number in block_numbers:
                    response.append(chain.get_block_by_number(number))
                p2p.answer_block_queries(response)
        except Exception as e:
            mainLog.critical(
                "Exception while answering queries from the network")
            mainLog.exception(e)
            p2p.stop()
            sys.exit(0)

        #transaction pool
        try:
            if p2p.tx_pool_query():
                mainLog.info("Answering tx pool query")
                pool = chain.get_pending_transactions()
                p2p.answer_tx_pool_query(pool)
        except Exception as e:
            mainLog.critical("Exception while answering the transaction pool")
            mainLog.exception(e)
            # Stop P2P
            p2p.stop()
            sys.exit(0)


########Consensus
#Get shares from the network
        try:
            share = p2p.get_share()
            while share is not None and not dkg_on:
                mainLog.info("Received new BLS share from P2P.")
                if not cache.in_bls_cache(share):
                    mainLog.info("Share not in cache, processing")
                    if share.block_number == block_num:
                        msg = str(last_random_no) + str(block_num) + str(count)
                        res = consensus.store_share(share, msg, block_num)
                    elif share.block_number > block_num:
                        mainLog.debug(
                            "Receive a share for a future block number. Saving for later..."
                        )
                        mainLog.debug(
                            "Current block no. %s, block no. in share: %s",
                            block_num, share.block_number)
                        cache.store_future_bls(share)
                    else:
                        mainLog.debug(
                            "Receive a share for a past block number. VERY STRANGE!!!  Discarding..."
                        )
                    cache.store_bls(share)
                    p2p.broadcast_share(share)
                share = p2p.get_share()
            while cache.pending_future_bls(block_num):
                share = cache.get_future_bls(block_num)
                msg = str(last_random_no) + str(block_num) + str(count)
                res = consensus.store_share(share, msg, block_num)
        except Exception as e:
            mainLog.critical("Exception while processing received shares")
            mainLog.exception(e)
            # Stop P2P
            p2p.stop()
            sys.exit(0)

        if (time.time() - timestamp) > (BLOCK_TIME *
                                        0.75) and not consensus.shares_ready():
            mainLog.warning(
                "This node has not computed yet Group Signature...")
            if (time.time() - timestamp) > (BLOCK_TIME *
                                            0.99) and not isMaster:
                mainLog.warning(
                    "It is nearly block time and we don't have Group Signature. Activating boostrap mode."
                )
                from_bootstrap = True

        #DKG management

        #Trigger new DKG
        try:
            if (
                (block_num + 1) % DKG_RENEWAL_INTERVAL == 0
            ) and not dkg_on and not exit_from_dkg and consensus.shares_ready(
            ):
                mainLog.info(
                    "Next block needs new Group Key. Triggering DKG renewal.")
                dkg_on = True
                create_shares = True
                dkg_group = chain.get_current_dkg_group()
                in_dkg_group, my_dkgIDs = find_me_in_dkg_group(
                    dkg_group, addresses)
                if in_dkg_group:
                    to_send = consensus.new_dkg(dkg_group, my_dkgIDs)
                    for dkg_share in to_send:
                        cache.store_dkg(dkg_share)
                        p2p.send_dkg_share(dkg_share)
                    #For cases when one node does ALL the DKG
                    if consensus.all_node_dkgs_finished():
                        dkg_on = False
                        exit_from_dkg = True
                        mainLog.info(
                            "DKG Finished sucessfully for all node IDs. Exiting loop and resuming normal operation."
                        )
                else:
                    # Configure nodes that do not participate in the DKG so they can verfiy BLS shares later
                    consensus.store_ids(dkg_group)
                #Define new signer that has to be in the dkg_group. Selected randomly from the people in the group (temporal override of the BLS RN generation)
                random_no = chain.get_block_by_number(
                    block_num).header.random_number.encode('hex')
                random_pos = compress_random_no_to_int(random_no,
                                                       16) % len(dkg_group)
                # signing_addr will be used in block RX and block creation code in the beginning of the loop
                signing_addr = dkg_group[random_pos]
        except Exception as e:
            mainLog.critical("Exception while creating DKG shares")
            mainLog.exception(e)
            # Stop P2P
            p2p.stop()
            sys.exit(0)

        #Collect DKG shares for the new DKG
        try:
            #During DKG, the prototype only works on DKG
            if in_dkg_group:
                #WE STAY HERE FOR THE WHOLE DKG
                dkg_share = p2p.get_dkg_share()
                while dkg_on and dkg_share is not None:
                    mainLog.info("Received new DKG share from P2P")
                    if not cache.in_dkg_cache(dkg_share):
                        if dkg_share.to in my_dkgIDs:
                            #Next fix if current fix does not work
                            #if dkg_share.to in my_dkgIDs and consensus.allSharesReceived(dkg_sahre.to):
                            consensus.verify_dkg_contribution(dkg_share)
                            if consensus.all_node_dkgs_finished():
                                dkg_on = False
                                exit_from_dkg = True
                                mainLog.info(
                                    "DKG Finished sucessfully for all node IDs. Exiting loop and resuming normal operation."
                                )
                                #                                if not isMaster:
                                #                                    mainLog.info("Sleeping for 1hmin to give time to master for its keys")
                                #                                    time.sleep(30*88)
                                time.sleep(120)
                            elif (time.time() - timestamp) >= DKG_TIMEOUT:
                                mainLog.critical(
                                    "Fatal Error. DKG renewal timeout expired. Stopping..."
                                )
                                raise Exception
                        # Send shares that are NOT for me
                        else:
                            p2p.send_dkg_share(dkg_share)
                        cache.store_dkg(dkg_share)
                    dkg_share = p2p.get_dkg_share()
            elif dkg_on:
                mainLog.info(
                    "This node is not participating in the DKG. Will sleep for one block time and wait for a block with the new public key"
                )
                time.sleep(BLOCK_TIME)
                if (time.time() - timestamp) >= DKG_TIMEOUT:
                    mainLog.critical(
                        "Fatal Error. DKG renewal timeout expired. Stopping..."
                    )
                    raise Exception
        except Exception as e:
            mainLog.critical("Exception while processing received DKG shares")
            mainLog.exception(e)
            # Stop P2P
            p2p.stop()
            sys.exit(0)
示例#15
0
class ServerConfig(Config):

    def read_config(self, config):
        self.server_name = config["server_name"]
        self.server_context = config.get("server_context", None)

        try:
            parse_and_validate_server_name(self.server_name)
        except ValueError as e:
            raise ConfigError(str(e))

        self.pid_file = self.abspath(config.get("pid_file"))
        self.web_client_location = config.get("web_client_location", None)
        self.soft_file_limit = config.get("soft_file_limit", 0)
        self.daemonize = config.get("daemonize")
        self.print_pidfile = config.get("print_pidfile")
        self.user_agent_suffix = config.get("user_agent_suffix")
        self.use_frozen_dicts = config.get("use_frozen_dicts", False)
        self.public_baseurl = config.get("public_baseurl")
        self.cpu_affinity = config.get("cpu_affinity")

        # Whether to send federation traffic out in this process. This only
        # applies to some federation traffic, and so shouldn't be used to
        # "disable" federation
        self.send_federation = config.get("send_federation", True)

        # Whether to enable user presence.
        self.use_presence = config.get("use_presence", True)

        # Whether to update the user directory or not. This should be set to
        # false only if we are updating the user directory in a worker
        self.update_user_directory = config.get("update_user_directory", True)

        # whether to enable the media repository endpoints. This should be set
        # to false if the media repository is running as a separate endpoint;
        # doing so ensures that we will not run cache cleanup jobs on the
        # master, potentially causing inconsistency.
        self.enable_media_repo = config.get("enable_media_repo", True)

        # Whether to require authentication to retrieve profile data (avatars,
        # display names) of other users through the client API.
        self.require_auth_for_profile_requests = config.get(
            "require_auth_for_profile_requests", False,
        )

        # If set to 'True', requires authentication to access the server's
        # public rooms directory through the client API, and forbids any other
        # homeserver to fetch it via federation.
        self.restrict_public_rooms_to_local_users = config.get(
            "restrict_public_rooms_to_local_users", False,
        )

        # whether to enable search. If disabled, new entries will not be inserted
        # into the search tables and they will not be indexed. Users will receive
        # errors when attempting to search for messages.
        self.enable_search = config.get("enable_search", True)

        self.filter_timeline_limit = config.get("filter_timeline_limit", -1)

        # Whether we should block invites sent to users on this server
        # (other than those sent by local server admins)
        self.block_non_admin_invites = config.get(
            "block_non_admin_invites", False,
        )

        # Whether to enable experimental MSC1849 (aka relations) support
        self.experimental_msc1849_support_enabled = config.get(
            "experimental_msc1849_support_enabled", False,
        )

        # Options to control access by tracking MAU
        self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
        self.max_mau_value = 0
        if self.limit_usage_by_mau:
            self.max_mau_value = config.get(
                "max_mau_value", 0,
            )
        self.mau_stats_only = config.get("mau_stats_only", False)

        self.mau_limits_reserved_threepids = config.get(
            "mau_limit_reserved_threepids", []
        )

        self.mau_trial_days = config.get(
            "mau_trial_days", 0,
        )

        # Options to disable HS
        self.hs_disabled = config.get("hs_disabled", False)
        self.hs_disabled_message = config.get("hs_disabled_message", "")
        self.hs_disabled_limit_type = config.get("hs_disabled_limit_type", "")

        # Admin uri to direct users at should their instance become blocked
        # due to resource constraints
        self.admin_contact = config.get("admin_contact", None)

        # FIXME: federation_domain_whitelist needs sytests
        self.federation_domain_whitelist = None
        federation_domain_whitelist = config.get(
            "federation_domain_whitelist", None,
        )

        if federation_domain_whitelist is not None:
            # turn the whitelist into a hash for speed of lookup
            self.federation_domain_whitelist = {}

            for domain in federation_domain_whitelist:
                self.federation_domain_whitelist[domain] = True

        self.federation_ip_range_blacklist = config.get(
            "federation_ip_range_blacklist", [],
        )

        # Attempt to create an IPSet from the given ranges
        try:
            self.federation_ip_range_blacklist = IPSet(
                self.federation_ip_range_blacklist
            )

            # Always blacklist 0.0.0.0, ::
            self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
        except Exception as e:
            raise ConfigError(
                "Invalid range(s) provided in "
                "federation_ip_range_blacklist: %s" % e
            )

        if self.public_baseurl is not None:
            if self.public_baseurl[-1] != '/':
                self.public_baseurl += '/'
        self.start_pushers = config.get("start_pushers", True)

        # (undocumented) option for torturing the worker-mode replication a bit,
        # for testing. The value defines the number of milliseconds to pause before
        # sending out any replication updates.
        self.replication_torture_level = config.get("replication_torture_level")

        # Whether to require a user to be in the room to add an alias to it.
        # Defaults to True.
        self.require_membership_for_aliases = config.get(
            "require_membership_for_aliases", True,
        )

        # Whether to allow per-room membership profiles through the send of membership
        # events with profile information that differ from the target's global profile.
        self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)

        self.listeners = []
        for listener in config.get("listeners", []):
            if not isinstance(listener.get("port", None), int):
                raise ConfigError(
                    "Listener configuration is lacking a valid 'port' option"
                )

            if listener.setdefault("tls", False):
                # no_tls is not really supported any more, but let's grandfather it in
                # here.
                if config.get("no_tls", False):
                    logger.info(
                        "Ignoring TLS-enabled listener on port %i due to no_tls"
                    )
                    continue

            bind_address = listener.pop("bind_address", None)
            bind_addresses = listener.setdefault("bind_addresses", [])

            # if bind_address was specified, add it to the list of addresses
            if bind_address:
                bind_addresses.append(bind_address)

            # if we still have an empty list of addresses, use the default list
            if not bind_addresses:
                if listener['type'] == 'metrics':
                    # the metrics listener doesn't support IPv6
                    bind_addresses.append('0.0.0.0')
                else:
                    bind_addresses.extend(DEFAULT_BIND_ADDRESSES)

            self.listeners.append(listener)

        if not self.web_client_location:
            _warn_if_webclient_configured(self.listeners)

        self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))

        bind_port = config.get("bind_port")
        if bind_port:
            if config.get("no_tls", False):
                raise ConfigError("no_tls is incompatible with bind_port")

            self.listeners = []
            bind_host = config.get("bind_host", "")
            gzip_responses = config.get("gzip_responses", True)

            self.listeners.append({
                "port": bind_port,
                "bind_addresses": [bind_host],
                "tls": True,
                "type": "http",
                "resources": [
                    {
                        "names": ["client"],
                        "compress": gzip_responses,
                    },
                    {
                        "names": ["federation"],
                        "compress": False,
                    }
                ]
            })

            unsecure_port = config.get("unsecure_port", bind_port - 400)
            if unsecure_port:
                self.listeners.append({
                    "port": unsecure_port,
                    "bind_addresses": [bind_host],
                    "tls": False,
                    "type": "http",
                    "resources": [
                        {
                            "names": ["client"],
                            "compress": gzip_responses,
                        },
                        {
                            "names": ["federation"],
                            "compress": False,
                        }
                    ]
                })

        manhole = config.get("manhole")
        if manhole:
            self.listeners.append({
                "port": manhole,
                "bind_addresses": ["127.0.0.1"],
                "type": "manhole",
                "tls": False,
            })

        metrics_port = config.get("metrics_port")
        if metrics_port:
            logger.warn(
                ("The metrics_port configuration option is deprecated in Synapse 0.31 "
                 "in favour of a listener. Please see "
                 "http://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst"
                 " on how to configure the new listener."))

            self.listeners.append({
                "port": metrics_port,
                "bind_addresses": [config.get("metrics_bind_host", "127.0.0.1")],
                "tls": False,
                "type": "http",
                "resources": [
                    {
                        "names": ["metrics"],
                        "compress": False,
                    },
                ]
            })

        _check_resource_config(self.listeners)

    def has_tls_listener(self):
        return any(l["tls"] for l in self.listeners)

    def default_config(self, server_name, data_dir_path, **kwargs):
        _, bind_port = parse_and_validate_server_name(server_name)
        if bind_port is not None:
            unsecure_port = bind_port - 400
        else:
            bind_port = 8448
            unsecure_port = 8008

        pid_file = os.path.join(data_dir_path, "homeserver.pid")
        return """\
        ## Server ##

        # The domain name of the server, with optional explicit port.
        # This is used by remote servers to connect to this server,
        # e.g. matrix.org, localhost:8080, etc.
        # This is also the last part of your UserID.
        #
        server_name: "%(server_name)s"

        # When running as a daemon, the file to store the pid in
        #
        pid_file: %(pid_file)s

        # CPU affinity mask. Setting this restricts the CPUs on which the
        # process will be scheduled. It is represented as a bitmask, with the
        # lowest order bit corresponding to the first logical CPU and the
        # highest order bit corresponding to the last logical CPU. Not all CPUs
        # may exist on a given system but a mask may specify more CPUs than are
        # present.
        #
        # For example:
        #    0x00000001  is processor #0,
        #    0x00000003  is processors #0 and #1,
        #    0xFFFFFFFF  is all processors (#0 through #31).
        #
        # Pinning a Python process to a single CPU is desirable, because Python
        # is inherently single-threaded due to the GIL, and can suffer a
        # 30-40%% slowdown due to cache blow-out and thread context switching
        # if the scheduler happens to schedule the underlying threads across
        # different cores. See
        # https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
        #
        # This setting requires the affinity package to be installed!
        #
        #cpu_affinity: 0xFFFFFFFF

        # The path to the web client which will be served at /_matrix/client/
        # if 'webclient' is configured under the 'listeners' configuration.
        #
        #web_client_location: "/path/to/web/root"

        # The public-facing base URL that clients use to access this HS
        # (not including _matrix/...). This is the same URL a user would
        # enter into the 'custom HS URL' field on their client. If you
        # use synapse with a reverse proxy, this should be the URL to reach
        # synapse via the proxy.
        #
        #public_baseurl: https://example.com/

        # Set the soft limit on the number of file descriptors synapse can use
        # Zero is used to indicate synapse should set the soft limit to the
        # hard limit.
        #
        #soft_file_limit: 0

        # Set to false to disable presence tracking on this homeserver.
        #
        #use_presence: false

        # Whether to require authentication to retrieve profile data (avatars,
        # display names) of other users through the client API. Defaults to
        # 'false'. Note that profile data is also available via the federation
        # API, so this setting is of limited value if federation is enabled on
        # the server.
        #
        #require_auth_for_profile_requests: true

        # If set to 'true', requires authentication to access the server's
        # public rooms directory through the client API, and forbids any other
        # homeserver to fetch it via federation. Defaults to 'false'.
        #
        #restrict_public_rooms_to_local_users: true

        # The GC threshold parameters to pass to `gc.set_threshold`, if defined
        #
        #gc_thresholds: [700, 10, 10]

        # Set the limit on the returned events in the timeline in the get
        # and sync operations. The default value is -1, means no upper limit.
        #
        #filter_timeline_limit: 5000

        # Whether room invites to users on this server should be blocked
        # (except those sent by local server admins). The default is False.
        #
        #block_non_admin_invites: True

        # Room searching
        #
        # If disabled, new messages will not be indexed for searching and users
        # will receive errors when searching for messages. Defaults to enabled.
        #
        #enable_search: false

        # Restrict federation to the following whitelist of domains.
        # N.B. we recommend also firewalling your federation listener to limit
        # inbound federation traffic as early as possible, rather than relying
        # purely on this application-layer restriction.  If not specified, the
        # default is to whitelist everything.
        #
        #federation_domain_whitelist:
        #  - lon.example.com
        #  - nyc.example.com
        #  - syd.example.com

        # Prevent federation requests from being sent to the following
        # blacklist IP address CIDR ranges. If this option is not specified, or
        # specified with an empty list, no ip range blacklist will be enforced.
        #
        # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
        # listed here, since they correspond to unroutable addresses.)
        #
        federation_ip_range_blacklist:
          - '127.0.0.0/8'
          - '10.0.0.0/8'
          - '172.16.0.0/12'
          - '192.168.0.0/16'
          - '100.64.0.0/10'
          - '169.254.0.0/16'
          - '::1/128'
          - 'fe80::/64'
          - 'fc00::/7'

        # List of ports that Synapse should listen on, their purpose and their
        # configuration.
        #
        # Options for each listener include:
        #
        #   port: the TCP port to bind to
        #
        #   bind_addresses: a list of local addresses to listen on. The default is
        #       'all local interfaces'.
        #
        #   type: the type of listener. Normally 'http', but other valid options are:
        #       'manhole' (see docs/manhole.md),
        #       'metrics' (see docs/metrics-howto.rst),
        #       'replication' (see docs/workers.rst).
        #
        #   tls: set to true to enable TLS for this listener. Will use the TLS
        #       key/cert specified in tls_private_key_path / tls_certificate_path.
        #
        #   x_forwarded: Only valid for an 'http' listener. Set to true to use the
        #       X-Forwarded-For header as the client IP. Useful when Synapse is
        #       behind a reverse-proxy.
        #
        #   resources: Only valid for an 'http' listener. A list of resources to host
        #       on this port. Options for each resource are:
        #
        #       names: a list of names of HTTP resources. See below for a list of
        #           valid resource names.
        #
        #       compress: set to true to enable HTTP comression for this resource.
        #
        #   additional_resources: Only valid for an 'http' listener. A map of
        #        additional endpoints which should be loaded via dynamic modules.
        #
        # Valid resource names are:
        #
        #   client: the client-server API (/_matrix/client), and the synapse admin
        #       API (/_synapse/admin). Also implies 'media' and 'static'.
        #
        #   consent: user consent forms (/_matrix/consent). See
        #       docs/consent_tracking.md.
        #
        #   federation: the server-server API (/_matrix/federation). Also implies
        #       'media', 'keys', 'openid'
        #
        #   keys: the key discovery API (/_matrix/keys).
        #
        #   media: the media API (/_matrix/media).
        #
        #   metrics: the metrics interface. See docs/metrics-howto.rst.
        #
        #   openid: OpenID authentication.
        #
        #   replication: the HTTP replication API (/_synapse/replication). See
        #       docs/workers.rst.
        #
        #   static: static resources under synapse/static (/_matrix/static). (Mostly
        #       useful for 'fallback authentication'.)
        #
        #   webclient: A web client. Requires web_client_location to be set.
        #
        listeners:
          # TLS-enabled listener: for when matrix traffic is sent directly to synapse.
          #
          # Disabled by default. To enable it, uncomment the following. (Note that you
          # will also need to give Synapse a TLS key and certificate: see the TLS section
          # below.)
          #
          #- port: %(bind_port)s
          #  type: http
          #  tls: true
          #  resources:
          #    - names: [client, federation]

          # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
          # that unwraps TLS.
          #
          # If you plan to use a reverse proxy, please see
          # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst.
          #
          - port: %(unsecure_port)s
            tls: false
            bind_addresses: ['::1', '127.0.0.1']
            type: http
            x_forwarded: true

            resources:
              - names: [client, federation]
                compress: false

            # example additonal_resources:
            #
            #additional_resources:
            #  "/_matrix/my/custom/endpoint":
            #    module: my_module.CustomRequestHandler
            #    config: {}

          # Turn on the twisted ssh manhole service on localhost on the given
          # port.
          #
          #- port: 9000
          #  bind_addresses: ['::1', '127.0.0.1']
          #  type: manhole


        ## Homeserver blocking ##

        # How to reach the server admin, used in ResourceLimitError
        #
        #admin_contact: 'mailto:[email protected]'

        # Global blocking
        #
        #hs_disabled: False
        #hs_disabled_message: 'Human readable reason for why the HS is blocked'
        #hs_disabled_limit_type: 'error code(str), to help clients decode reason'

        # Monthly Active User Blocking
        #
        #limit_usage_by_mau: False
        #max_mau_value: 50
        #mau_trial_days: 2

        # If enabled, the metrics for the number of monthly active users will
        # be populated, however no one will be limited. If limit_usage_by_mau
        # is true, this is implied to be true.
        #
        #mau_stats_only: False

        # Sometimes the server admin will want to ensure certain accounts are
        # never blocked by mau checking. These accounts are specified here.
        #
        #mau_limit_reserved_threepids:
        #  - medium: 'email'
        #    address: '*****@*****.**'

        # Used by phonehome stats to group together related servers.
        #server_context: context

        # Whether to require a user to be in the room to add an alias to it.
        # Defaults to 'true'.
        #
        #require_membership_for_aliases: false

        # Whether to allow per-room membership profiles through the send of membership
        # events with profile information that differ from the target's global profile.
        # Defaults to 'true'.
        #
        #allow_per_room_profiles: false
        """ % locals()

    def read_arguments(self, args):
        if args.manhole is not None:
            self.manhole = args.manhole
        if args.daemonize is not None:
            self.daemonize = args.daemonize
        if args.print_pidfile is not None:
            self.print_pidfile = args.print_pidfile

    def add_arguments(self, parser):
        server_group = parser.add_argument_group("server")
        server_group.add_argument("-D", "--daemonize", action='store_true',
                                  default=None,
                                  help="Daemonize the home server")
        server_group.add_argument("--print-pidfile", action='store_true',
                                  default=None,
                                  help="Print the path to the pidfile just"
                                  " before daemonizing")
        server_group.add_argument("--manhole", metavar="PORT", dest="manhole",
                                  type=int,
                                  help="Turn on the twisted telnet manhole"
                                  " service on the given port.")
示例#16
0
class ContentRepositoryConfig(Config):
    section = "media"

    def read_config(self, config, **kwargs):

        # Only enable the media repo if either the media repo is enabled or the
        # current worker app is the media repo.
        if (self.enable_media_repo is False and
                config.get("worker_app") != "synapse.app.media_repository"):
            self.can_load_media_repo = False
            return
        else:
            self.can_load_media_repo = True

        # Whether this instance should be the one to run the background jobs to
        # e.g clean up old URL previews.
        self.media_instance_running_background_jobs = config.get(
            "media_instance_running_background_jobs", )

        self.max_upload_size = self.parse_size(
            config.get("max_upload_size", "50M"))
        self.max_image_pixels = self.parse_size(
            config.get("max_image_pixels", "32M"))
        self.max_spider_size = self.parse_size(
            config.get("max_spider_size", "10M"))

        self.media_store_path = self.ensure_directory(
            config.get("media_store_path", "media_store"))

        backup_media_store_path = config.get("backup_media_store_path")

        synchronous_backup_media_store = config.get(
            "synchronous_backup_media_store", False)

        storage_providers = config.get("media_storage_providers", [])

        if backup_media_store_path:
            if storage_providers:
                raise ConfigError(
                    "Cannot use both 'backup_media_store_path' and 'storage_providers'"
                )

            storage_providers = [{
                "module": "file_system",
                "store_local": True,
                "store_synchronous": synchronous_backup_media_store,
                "store_remote": True,
                "config": {
                    "directory": backup_media_store_path
                },
            }]

        # This is a list of config that can be used to create the storage
        # providers. The entries are tuples of (Class, class_config,
        # MediaStorageProviderConfig), where Class is the class of the provider,
        # the class_config the config to pass to it, and
        # MediaStorageProviderConfig are options for StorageProviderWrapper.
        #
        # We don't create the storage providers here as not all workers need
        # them to be started.
        self.media_storage_providers = []  # type: List[tuple]

        for i, provider_config in enumerate(storage_providers):
            # We special case the module "file_system" so as not to need to
            # expose FileStorageProviderBackend
            if provider_config["module"] == "file_system":
                provider_config["module"] = (
                    "synapse.rest.media.v1.storage_provider"
                    ".FileStorageProviderBackend")

            provider_class, parsed_config = load_module(
                provider_config, ("media_storage_providers", "<item %i>" % i))

            wrapper_config = MediaStorageProviderConfig(
                provider_config.get("store_local", False),
                provider_config.get("store_remote", False),
                provider_config.get("store_synchronous", False),
            )

            self.media_storage_providers.append(
                (provider_class, parsed_config, wrapper_config))

        self.dynamic_thumbnails = config.get("dynamic_thumbnails", False)
        self.thumbnail_requirements = parse_thumbnail_requirements(
            config.get("thumbnail_sizes", DEFAULT_THUMBNAIL_SIZES))
        self.url_preview_enabled = config.get("url_preview_enabled", False)
        if self.url_preview_enabled:
            try:
                check_requirements("url_preview")

            except DependencyException as e:
                raise ConfigError(e.message)

            if "url_preview_ip_range_blacklist" not in config:
                raise ConfigError(
                    "For security, you must specify an explicit target IP address "
                    "blacklist in url_preview_ip_range_blacklist for url previewing "
                    "to work")

            self.url_preview_ip_range_blacklist = IPSet(
                config["url_preview_ip_range_blacklist"])

            # we always blacklist '0.0.0.0' and '::', which are supposed to be
            # unroutable addresses.
            self.url_preview_ip_range_blacklist.update(["0.0.0.0", "::"])

            self.url_preview_ip_range_whitelist = IPSet(
                config.get("url_preview_ip_range_whitelist", ()))

            self.url_preview_url_blacklist = config.get(
                "url_preview_url_blacklist", ())

            self.url_preview_accept_language = config.get(
                "url_preview_accept_language") or ["en"]

    def generate_config_section(self, data_dir_path, **kwargs):
        media_store = os.path.join(data_dir_path, "media_store")
        uploads_path = os.path.join(data_dir_path, "uploads")

        formatted_thumbnail_sizes = "".join(THUMBNAIL_SIZE_YAML % s
                                            for s in DEFAULT_THUMBNAIL_SIZES)
        # strip final NL
        formatted_thumbnail_sizes = formatted_thumbnail_sizes[:-1]

        ip_range_blacklist = "\n".join("        #  - '%s'" % ip
                                       for ip in DEFAULT_IP_RANGE_BLACKLIST)

        return (r"""
        ## Media Store ##

        # Enable the media store service in the Synapse master. Uncomment the
        # following if you are using a separate media store worker.
        #
        #enable_media_repo: false

        # Directory where uploaded images and attachments are stored.
        #
        media_store_path: "%(media_store)s"

        # Media storage providers allow media to be stored in different
        # locations.
        #
        #media_storage_providers:
        #  - module: file_system
        #    # Whether to store newly uploaded local files
        #    store_local: false
        #    # Whether to store newly downloaded remote files
        #    store_remote: false
        #    # Whether to wait for successful storage for local uploads
        #    store_synchronous: false
        #    config:
        #       directory: /mnt/some/other/directory

        # The largest allowed upload size in bytes
        #
        #max_upload_size: 50M

        # Maximum number of pixels that will be thumbnailed
        #
        #max_image_pixels: 32M

        # Whether to generate new thumbnails on the fly to precisely match
        # the resolution requested by the client. If true then whenever
        # a new resolution is requested by the client the server will
        # generate a new thumbnail. If false the server will pick a thumbnail
        # from a precalculated list.
        #
        #dynamic_thumbnails: false

        # List of thumbnails to precalculate when an image is uploaded.
        #
        #thumbnail_sizes:
%(formatted_thumbnail_sizes)s

        # Is the preview URL API enabled?
        #
        # 'false' by default: uncomment the following to enable it (and specify a
        # url_preview_ip_range_blacklist blacklist).
        #
        #url_preview_enabled: true

        # List of IP address CIDR ranges that the URL preview spider is denied
        # from accessing.  There are no defaults: you must explicitly
        # specify a list for URL previewing to work.  You should specify any
        # internal services in your network that you do not want synapse to try
        # to connect to, otherwise anyone in any Matrix room could cause your
        # synapse to issue arbitrary GET requests to your internal services,
        # causing serious security issues.
        #
        # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
        # listed here, since they correspond to unroutable addresses.)
        #
        # This must be specified if url_preview_enabled is set. It is recommended that
        # you uncomment the following list as a starting point.
        #
        #url_preview_ip_range_blacklist:
%(ip_range_blacklist)s

        # List of IP address CIDR ranges that the URL preview spider is allowed
        # to access even if they are specified in url_preview_ip_range_blacklist.
        # This is useful for specifying exceptions to wide-ranging blacklisted
        # target IP ranges - e.g. for enabling URL previews for a specific private
        # website only visible in your network.
        #
        #url_preview_ip_range_whitelist:
        #   - '192.168.1.1'

        # Optional list of URL matches that the URL preview spider is
        # denied from accessing.  You should use url_preview_ip_range_blacklist
        # in preference to this, otherwise someone could define a public DNS
        # entry that points to a private IP address and circumvent the blacklist.
        # This is more useful if you know there is an entire shape of URL that
        # you know that will never want synapse to try to spider.
        #
        # Each list entry is a dictionary of url component attributes as returned
        # by urlparse.urlsplit as applied to the absolute form of the URL.  See
        # https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit
        # The values of the dictionary are treated as an filename match pattern
        # applied to that component of URLs, unless they start with a ^ in which
        # case they are treated as a regular expression match.  If all the
        # specified component matches for a given list item succeed, the URL is
        # blacklisted.
        #
        #url_preview_url_blacklist:
        #  # blacklist any URL with a username in its URI
        #  - username: '******'
        #
        #  # blacklist all *.google.com URLs
        #  - netloc: 'google.com'
        #  - netloc: '*.google.com'
        #
        #  # blacklist all plain HTTP URLs
        #  - scheme: 'http'
        #
        #  # blacklist http(s)://www.acme.com/foo
        #  - netloc: 'www.acme.com'
        #    path: '/foo'
        #
        #  # blacklist any URL with a literal IPv4 address
        #  - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'

        # The largest allowed URL preview spidering size in bytes
        #
        #max_spider_size: 10M

        # A list of values for the Accept-Language HTTP header used when
        # downloading webpages during URL preview generation. This allows
        # Synapse to specify the preferred languages that URL previews should
        # be in when communicating with remote servers.
        #
        # Each value is a IETF language tag; a 2-3 letter identifier for a
        # language, optionally followed by subtags separated by '-', specifying
        # a country or region variant.
        #
        # Multiple values can be provided, and a weight can be added to each by
        # using quality value syntax (;q=). '*' translates to any language.
        #
        # Defaults to "en".
        #
        # Example:
        #
        # url_preview_accept_language:
        #   - en-UK
        #   - en-US;q=0.9
        #   - fr;q=0.8
        #   - *;q=0.7
        #
        url_preview_accept_language:
        #   - en
        """ % locals())
示例#17
0
class ServerConfig(Config):
    section = "server"

    def read_config(self, config, **kwargs):
        self.server_name = config["server_name"]
        self.server_context = config.get("server_context", None)

        try:
            parse_and_validate_server_name(self.server_name)
        except ValueError as e:
            raise ConfigError(str(e))

        self.pid_file = self.abspath(config.get("pid_file"))
        self.web_client_location = config.get("web_client_location", None)
        self.soft_file_limit = config.get("soft_file_limit", 0)
        self.daemonize = config.get("daemonize")
        self.print_pidfile = config.get("print_pidfile")
        self.user_agent_suffix = config.get("user_agent_suffix")
        self.use_frozen_dicts = config.get("use_frozen_dicts", False)
        self.public_baseurl = config.get("public_baseurl")

        # Whether to send federation traffic out in this process. This only
        # applies to some federation traffic, and so shouldn't be used to
        # "disable" federation
        self.send_federation = config.get("send_federation", True)

        # Whether to enable user presence.
        self.use_presence = config.get("use_presence", True)

        # Whether to update the user directory or not. This should be set to
        # false only if we are updating the user directory in a worker
        self.update_user_directory = config.get("update_user_directory", True)

        # whether to enable the media repository endpoints. This should be set
        # to false if the media repository is running as a separate endpoint;
        # doing so ensures that we will not run cache cleanup jobs on the
        # master, potentially causing inconsistency.
        self.enable_media_repo = config.get("enable_media_repo", True)

        # Whether to require authentication to retrieve profile data (avatars,
        # display names) of other users through the client API.
        self.require_auth_for_profile_requests = config.get(
            "require_auth_for_profile_requests", False
        )

        # Whether to require sharing a room with a user to retrieve their
        # profile data
        self.limit_profile_requests_to_users_who_share_rooms = config.get(
            "limit_profile_requests_to_users_who_share_rooms", False,
        )

        if "restrict_public_rooms_to_local_users" in config and (
            "allow_public_rooms_without_auth" in config
            or "allow_public_rooms_over_federation" in config
        ):
            raise ConfigError(
                "Can't use 'restrict_public_rooms_to_local_users' if"
                " 'allow_public_rooms_without_auth' and/or"
                " 'allow_public_rooms_over_federation' is set."
            )

        # Check if the legacy "restrict_public_rooms_to_local_users" flag is set. This
        # flag is now obsolete but we need to check it for backward-compatibility.
        if config.get("restrict_public_rooms_to_local_users", False):
            self.allow_public_rooms_without_auth = False
            self.allow_public_rooms_over_federation = False
        else:
            # If set to 'true', removes the need for authentication to access the server's
            # public rooms directory through the client API, meaning that anyone can
            # query the room directory. Defaults to 'false'.
            self.allow_public_rooms_without_auth = config.get(
                "allow_public_rooms_without_auth", False
            )
            # If set to 'true', allows any other homeserver to fetch the server's public
            # rooms directory via federation. Defaults to 'false'.
            self.allow_public_rooms_over_federation = config.get(
                "allow_public_rooms_over_federation", False
            )

        default_room_version = config.get("default_room_version", DEFAULT_ROOM_VERSION)

        # Ensure room version is a str
        default_room_version = str(default_room_version)

        if default_room_version not in KNOWN_ROOM_VERSIONS:
            raise ConfigError(
                "Unknown default_room_version: %s, known room versions: %s"
                % (default_room_version, list(KNOWN_ROOM_VERSIONS.keys()))
            )

        # Get the actual room version object rather than just the identifier
        self.default_room_version = KNOWN_ROOM_VERSIONS[default_room_version]

        # whether to enable search. If disabled, new entries will not be inserted
        # into the search tables and they will not be indexed. Users will receive
        # errors when attempting to search for messages.
        self.enable_search = config.get("enable_search", True)

        self.filter_timeline_limit = config.get("filter_timeline_limit", -1)

        # Whether we should block invites sent to users on this server
        # (other than those sent by local server admins)
        self.block_non_admin_invites = config.get("block_non_admin_invites", False)

        # Whether to enable experimental MSC1849 (aka relations) support
        self.experimental_msc1849_support_enabled = config.get(
            "experimental_msc1849_support_enabled", True
        )

        # Options to control access by tracking MAU
        self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
        self.max_mau_value = 0
        if self.limit_usage_by_mau:
            self.max_mau_value = config.get("max_mau_value", 0)
        self.mau_stats_only = config.get("mau_stats_only", False)

        self.mau_limits_reserved_threepids = config.get(
            "mau_limit_reserved_threepids", []
        )

        self.mau_trial_days = config.get("mau_trial_days", 0)
        self.mau_limit_alerting = config.get("mau_limit_alerting", True)

        # How long to keep redacted events in the database in unredacted form
        # before redacting them.
        redaction_retention_period = config.get("redaction_retention_period", "7d")
        if redaction_retention_period is not None:
            self.redaction_retention_period = self.parse_duration(
                redaction_retention_period
            )
        else:
            self.redaction_retention_period = None

        # How long to keep entries in the `users_ips` table.
        user_ips_max_age = config.get("user_ips_max_age", "28d")
        if user_ips_max_age is not None:
            self.user_ips_max_age = self.parse_duration(user_ips_max_age)
        else:
            self.user_ips_max_age = None

        # Options to disable HS
        self.hs_disabled = config.get("hs_disabled", False)
        self.hs_disabled_message = config.get("hs_disabled_message", "")

        # Admin uri to direct users at should their instance become blocked
        # due to resource constraints
        self.admin_contact = config.get("admin_contact", None)

        # FIXME: federation_domain_whitelist needs sytests
        self.federation_domain_whitelist = None  # type: Optional[dict]
        federation_domain_whitelist = config.get("federation_domain_whitelist", None)

        if federation_domain_whitelist is not None:
            # turn the whitelist into a hash for speed of lookup
            self.federation_domain_whitelist = {}

            for domain in federation_domain_whitelist:
                self.federation_domain_whitelist[domain] = True

        self.federation_ip_range_blacklist = config.get(
            "federation_ip_range_blacklist", []
        )

        # Attempt to create an IPSet from the given ranges
        try:
            self.federation_ip_range_blacklist = IPSet(
                self.federation_ip_range_blacklist
            )

            # Always blacklist 0.0.0.0, ::
            self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
        except Exception as e:
            raise ConfigError(
                "Invalid range(s) provided in federation_ip_range_blacklist: %s" % e
            )

        if self.public_baseurl is not None:
            if self.public_baseurl[-1] != "/":
                self.public_baseurl += "/"
        self.start_pushers = config.get("start_pushers", True)

        # (undocumented) option for torturing the worker-mode replication a bit,
        # for testing. The value defines the number of milliseconds to pause before
        # sending out any replication updates.
        self.replication_torture_level = config.get("replication_torture_level")

        # Whether to require a user to be in the room to add an alias to it.
        # Defaults to True.
        self.require_membership_for_aliases = config.get(
            "require_membership_for_aliases", True
        )

        # Whether to allow per-room membership profiles through the send of membership
        # events with profile information that differ from the target's global profile.
        self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)

        retention_config = config.get("retention")
        if retention_config is None:
            retention_config = {}

        self.retention_enabled = retention_config.get("enabled", False)

        retention_default_policy = retention_config.get("default_policy")

        if retention_default_policy is not None:
            self.retention_default_min_lifetime = retention_default_policy.get(
                "min_lifetime"
            )
            if self.retention_default_min_lifetime is not None:
                self.retention_default_min_lifetime = self.parse_duration(
                    self.retention_default_min_lifetime
                )

            self.retention_default_max_lifetime = retention_default_policy.get(
                "max_lifetime"
            )
            if self.retention_default_max_lifetime is not None:
                self.retention_default_max_lifetime = self.parse_duration(
                    self.retention_default_max_lifetime
                )

            if (
                self.retention_default_min_lifetime is not None
                and self.retention_default_max_lifetime is not None
                and (
                    self.retention_default_min_lifetime
                    > self.retention_default_max_lifetime
                )
            ):
                raise ConfigError(
                    "The default retention policy's 'min_lifetime' can not be greater"
                    " than its 'max_lifetime'"
                )
        else:
            self.retention_default_min_lifetime = None
            self.retention_default_max_lifetime = None

        if self.retention_enabled:
            logger.info(
                "Message retention policies support enabled with the following default"
                " policy: min_lifetime = %s ; max_lifetime = %s",
                self.retention_default_min_lifetime,
                self.retention_default_max_lifetime,
            )

        self.retention_allowed_lifetime_min = retention_config.get(
            "allowed_lifetime_min"
        )
        if self.retention_allowed_lifetime_min is not None:
            self.retention_allowed_lifetime_min = self.parse_duration(
                self.retention_allowed_lifetime_min
            )

        self.retention_allowed_lifetime_max = retention_config.get(
            "allowed_lifetime_max"
        )
        if self.retention_allowed_lifetime_max is not None:
            self.retention_allowed_lifetime_max = self.parse_duration(
                self.retention_allowed_lifetime_max
            )

        if (
            self.retention_allowed_lifetime_min is not None
            and self.retention_allowed_lifetime_max is not None
            and self.retention_allowed_lifetime_min
            > self.retention_allowed_lifetime_max
        ):
            raise ConfigError(
                "Invalid retention policy limits: 'allowed_lifetime_min' can not be"
                " greater than 'allowed_lifetime_max'"
            )

        self.retention_purge_jobs = []  # type: List[Dict[str, Optional[int]]]
        for purge_job_config in retention_config.get("purge_jobs", []):
            interval_config = purge_job_config.get("interval")

            if interval_config is None:
                raise ConfigError(
                    "A retention policy's purge jobs configuration must have the"
                    " 'interval' key set."
                )

            interval = self.parse_duration(interval_config)

            shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")

            if shortest_max_lifetime is not None:
                shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)

            longest_max_lifetime = purge_job_config.get("longest_max_lifetime")

            if longest_max_lifetime is not None:
                longest_max_lifetime = self.parse_duration(longest_max_lifetime)

            if (
                shortest_max_lifetime is not None
                and longest_max_lifetime is not None
                and shortest_max_lifetime > longest_max_lifetime
            ):
                raise ConfigError(
                    "A retention policy's purge jobs configuration's"
                    " 'shortest_max_lifetime' value can not be greater than its"
                    " 'longest_max_lifetime' value."
                )

            self.retention_purge_jobs.append(
                {
                    "interval": interval,
                    "shortest_max_lifetime": shortest_max_lifetime,
                    "longest_max_lifetime": longest_max_lifetime,
                }
            )

        if not self.retention_purge_jobs:
            self.retention_purge_jobs = [
                {
                    "interval": self.parse_duration("1d"),
                    "shortest_max_lifetime": None,
                    "longest_max_lifetime": None,
                }
            ]

        self.listeners = []  # type: List[dict]
        for listener in config.get("listeners", []):
            if not isinstance(listener.get("port", None), int):
                raise ConfigError(
                    "Listener configuration is lacking a valid 'port' option"
                )

            if listener.setdefault("tls", False):
                # no_tls is not really supported any more, but let's grandfather it in
                # here.
                if config.get("no_tls", False):
                    logger.info(
                        "Ignoring TLS-enabled listener on port %i due to no_tls"
                    )
                    continue

            bind_address = listener.pop("bind_address", None)
            bind_addresses = listener.setdefault("bind_addresses", [])

            # if bind_address was specified, add it to the list of addresses
            if bind_address:
                bind_addresses.append(bind_address)

            # if we still have an empty list of addresses, use the default list
            if not bind_addresses:
                if listener["type"] == "metrics":
                    # the metrics listener doesn't support IPv6
                    bind_addresses.append("0.0.0.0")
                else:
                    bind_addresses.extend(DEFAULT_BIND_ADDRESSES)

            self.listeners.append(listener)

        if not self.web_client_location:
            _warn_if_webclient_configured(self.listeners)

        self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))

        @attr.s
        class LimitRemoteRoomsConfig(object):
            enabled = attr.ib(
                validator=attr.validators.instance_of(bool), default=False
            )
            complexity = attr.ib(
                validator=attr.validators.instance_of(
                    (float, int)  # type: ignore[arg-type] # noqa
                ),
                default=1.0,
            )
            complexity_error = attr.ib(
                validator=attr.validators.instance_of(str),
                default=ROOM_COMPLEXITY_TOO_GREAT,
            )

        self.limit_remote_rooms = LimitRemoteRoomsConfig(
            **config.get("limit_remote_rooms", {})
        )

        bind_port = config.get("bind_port")
        if bind_port:
            if config.get("no_tls", False):
                raise ConfigError("no_tls is incompatible with bind_port")

            self.listeners = []
            bind_host = config.get("bind_host", "")
            gzip_responses = config.get("gzip_responses", True)

            self.listeners.append(
                {
                    "port": bind_port,
                    "bind_addresses": [bind_host],
                    "tls": True,
                    "type": "http",
                    "resources": [
                        {"names": ["client"], "compress": gzip_responses},
                        {"names": ["federation"], "compress": False},
                    ],
                }
            )

            unsecure_port = config.get("unsecure_port", bind_port - 400)
            if unsecure_port:
                self.listeners.append(
                    {
                        "port": unsecure_port,
                        "bind_addresses": [bind_host],
                        "tls": False,
                        "type": "http",
                        "resources": [
                            {"names": ["client"], "compress": gzip_responses},
                            {"names": ["federation"], "compress": False},
                        ],
                    }
                )

        manhole = config.get("manhole")
        if manhole:
            self.listeners.append(
                {
                    "port": manhole,
                    "bind_addresses": ["127.0.0.1"],
                    "type": "manhole",
                    "tls": False,
                }
            )

        metrics_port = config.get("metrics_port")
        if metrics_port:
            logger.warning(METRICS_PORT_WARNING)

            self.listeners.append(
                {
                    "port": metrics_port,
                    "bind_addresses": [config.get("metrics_bind_host", "127.0.0.1")],
                    "tls": False,
                    "type": "http",
                    "resources": [{"names": ["metrics"], "compress": False}],
                }
            )

        _check_resource_config(self.listeners)

        self.cleanup_extremities_with_dummy_events = config.get(
            "cleanup_extremities_with_dummy_events", True
        )

        # The number of forward extremities in a room needed to send a dummy event.
        self.dummy_events_threshold = config.get("dummy_events_threshold", 10)

        self.enable_ephemeral_messages = config.get("enable_ephemeral_messages", False)

        # Inhibits the /requestToken endpoints from returning an error that might leak
        # information about whether an e-mail address is in use or not on this
        # homeserver, and instead return a 200 with a fake sid if this kind of error is
        # met, without sending anything.
        # This is a compromise between sending an email, which could be a spam vector,
        # and letting the client know which email address is bound to an account and
        # which one isn't.
        self.request_token_inhibit_3pid_errors = config.get(
            "request_token_inhibit_3pid_errors", False,
        )

    def has_tls_listener(self) -> bool:
        return any(l["tls"] for l in self.listeners)

    def generate_config_section(
        self, server_name, data_dir_path, open_private_ports, listeners, **kwargs
    ):
        _, bind_port = parse_and_validate_server_name(server_name)
        if bind_port is not None:
            unsecure_port = bind_port - 400
        else:
            bind_port = 8448
            unsecure_port = 8008

        pid_file = os.path.join(data_dir_path, "homeserver.pid")

        # Bring DEFAULT_ROOM_VERSION into the local-scope for use in the
        # default config string
        default_room_version = DEFAULT_ROOM_VERSION
        secure_listeners = []
        unsecure_listeners = []
        private_addresses = ["::1", "127.0.0.1"]
        if listeners:
            for listener in listeners:
                if listener["tls"]:
                    secure_listeners.append(listener)
                else:
                    # If we don't want open ports we need to bind the listeners
                    # to some address other than 0.0.0.0. Here we chose to use
                    # localhost.
                    # If the addresses are already bound we won't overwrite them
                    # however.
                    if not open_private_ports:
                        listener.setdefault("bind_addresses", private_addresses)

                    unsecure_listeners.append(listener)

            secure_http_bindings = indent(
                yaml.dump(secure_listeners), " " * 10
            ).lstrip()

            unsecure_http_bindings = indent(
                yaml.dump(unsecure_listeners), " " * 10
            ).lstrip()

        if not unsecure_listeners:
            unsecure_http_bindings = (
                """- port: %(unsecure_port)s
            tls: false
            type: http
            x_forwarded: true"""
                % locals()
            )

            if not open_private_ports:
                unsecure_http_bindings += (
                    "\n            bind_addresses: ['::1', '127.0.0.1']"
                )

            unsecure_http_bindings += """

            resources:
              - names: [client, federation]
                compress: false"""

            if listeners:
                # comment out this block
                unsecure_http_bindings = "#" + re.sub(
                    "\n {10}",
                    lambda match: match.group(0) + "#",
                    unsecure_http_bindings,
                )

        if not secure_listeners:
            secure_http_bindings = (
                """#- port: %(bind_port)s
          #  type: http
          #  tls: true
          #  resources:
          #    - names: [client, federation]"""
                % locals()
            )

        return (
            """\
        ## Server ##

        # The domain name of the server, with optional explicit port.
        # This is used by remote servers to connect to this server,
        # e.g. matrix.org, localhost:8080, etc.
        # This is also the last part of your UserID.
        #
        server_name: "%(server_name)s"

        # When running as a daemon, the file to store the pid in
        #
        pid_file: %(pid_file)s

        # The absolute URL to the web client which /_matrix/client will redirect
        # to if 'webclient' is configured under the 'listeners' configuration.
        #
        # This option can be also set to the filesystem path to the web client
        # which will be served at /_matrix/client/ if 'webclient' is configured
        # under the 'listeners' configuration, however this is a security risk:
        # https://github.com/matrix-org/synapse#security-note
        #
        #web_client_location: https://riot.example.com/

        # The public-facing base URL that clients use to access this HS
        # (not including _matrix/...). This is the same URL a user would
        # enter into the 'custom HS URL' field on their client. If you
        # use synapse with a reverse proxy, this should be the URL to reach
        # synapse via the proxy.
        #
        #public_baseurl: https://example.com/

        # Set the soft limit on the number of file descriptors synapse can use
        # Zero is used to indicate synapse should set the soft limit to the
        # hard limit.
        #
        #soft_file_limit: 0

        # Set to false to disable presence tracking on this homeserver.
        #
        #use_presence: false

        # Whether to require authentication to retrieve profile data (avatars,
        # display names) of other users through the client API. Defaults to
        # 'false'. Note that profile data is also available via the federation
        # API, so this setting is of limited value if federation is enabled on
        # the server.
        #
        #require_auth_for_profile_requests: true

        # Uncomment to require a user to share a room with another user in order
        # to retrieve their profile information. Only checked on Client-Server
        # requests. Profile requests from other servers should be checked by the
        # requesting server. Defaults to 'false'.
        #
        #limit_profile_requests_to_users_who_share_rooms: true

        # If set to 'true', removes the need for authentication to access the server's
        # public rooms directory through the client API, meaning that anyone can
        # query the room directory. Defaults to 'false'.
        #
        #allow_public_rooms_without_auth: true

        # If set to 'true', allows any other homeserver to fetch the server's public
        # rooms directory via federation. Defaults to 'false'.
        #
        #allow_public_rooms_over_federation: true

        # The default room version for newly created rooms.
        #
        # Known room versions are listed here:
        # https://matrix.org/docs/spec/#complete-list-of-room-versions
        #
        # For example, for room version 1, default_room_version should be set
        # to "1".
        #
        #default_room_version: "%(default_room_version)s"

        # The GC threshold parameters to pass to `gc.set_threshold`, if defined
        #
        #gc_thresholds: [700, 10, 10]

        # Set the limit on the returned events in the timeline in the get
        # and sync operations. The default value is -1, means no upper limit.
        #
        #filter_timeline_limit: 5000

        # Whether room invites to users on this server should be blocked
        # (except those sent by local server admins). The default is False.
        #
        #block_non_admin_invites: true

        # Room searching
        #
        # If disabled, new messages will not be indexed for searching and users
        # will receive errors when searching for messages. Defaults to enabled.
        #
        #enable_search: false

        # Restrict federation to the following whitelist of domains.
        # N.B. we recommend also firewalling your federation listener to limit
        # inbound federation traffic as early as possible, rather than relying
        # purely on this application-layer restriction.  If not specified, the
        # default is to whitelist everything.
        #
        #federation_domain_whitelist:
        #  - lon.example.com
        #  - nyc.example.com
        #  - syd.example.com

        # Prevent federation requests from being sent to the following
        # blacklist IP address CIDR ranges. If this option is not specified, or
        # specified with an empty list, no ip range blacklist will be enforced.
        #
        # As of Synapse v1.4.0 this option also affects any outbound requests to identity
        # servers provided by user input.
        #
        # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
        # listed here, since they correspond to unroutable addresses.)
        #
        federation_ip_range_blacklist:
          - '127.0.0.0/8'
          - '10.0.0.0/8'
          - '172.16.0.0/12'
          - '192.168.0.0/16'
          - '100.64.0.0/10'
          - '169.254.0.0/16'
          - '::1/128'
          - 'fe80::/64'
          - 'fc00::/7'

        # List of ports that Synapse should listen on, their purpose and their
        # configuration.
        #
        # Options for each listener include:
        #
        #   port: the TCP port to bind to
        #
        #   bind_addresses: a list of local addresses to listen on. The default is
        #       'all local interfaces'.
        #
        #   type: the type of listener. Normally 'http', but other valid options are:
        #       'manhole' (see docs/manhole.md),
        #       'metrics' (see docs/metrics-howto.md),
        #       'replication' (see docs/workers.md).
        #
        #   tls: set to true to enable TLS for this listener. Will use the TLS
        #       key/cert specified in tls_private_key_path / tls_certificate_path.
        #
        #   x_forwarded: Only valid for an 'http' listener. Set to true to use the
        #       X-Forwarded-For header as the client IP. Useful when Synapse is
        #       behind a reverse-proxy.
        #
        #   resources: Only valid for an 'http' listener. A list of resources to host
        #       on this port. Options for each resource are:
        #
        #       names: a list of names of HTTP resources. See below for a list of
        #           valid resource names.
        #
        #       compress: set to true to enable HTTP comression for this resource.
        #
        #   additional_resources: Only valid for an 'http' listener. A map of
        #        additional endpoints which should be loaded via dynamic modules.
        #
        # Valid resource names are:
        #
        #   client: the client-server API (/_matrix/client), and the synapse admin
        #       API (/_synapse/admin). Also implies 'media' and 'static'.
        #
        #   consent: user consent forms (/_matrix/consent). See
        #       docs/consent_tracking.md.
        #
        #   federation: the server-server API (/_matrix/federation). Also implies
        #       'media', 'keys', 'openid'
        #
        #   keys: the key discovery API (/_matrix/keys).
        #
        #   media: the media API (/_matrix/media).
        #
        #   metrics: the metrics interface. See docs/metrics-howto.md.
        #
        #   openid: OpenID authentication.
        #
        #   replication: the HTTP replication API (/_synapse/replication). See
        #       docs/workers.md.
        #
        #   static: static resources under synapse/static (/_matrix/static). (Mostly
        #       useful for 'fallback authentication'.)
        #
        #   webclient: A web client. Requires web_client_location to be set.
        #
        listeners:
          # TLS-enabled listener: for when matrix traffic is sent directly to synapse.
          #
          # Disabled by default. To enable it, uncomment the following. (Note that you
          # will also need to give Synapse a TLS key and certificate: see the TLS section
          # below.)
          #
          %(secure_http_bindings)s

          # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
          # that unwraps TLS.
          #
          # If you plan to use a reverse proxy, please see
          # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
          #
          %(unsecure_http_bindings)s

            # example additional_resources:
            #
            #additional_resources:
            #  "/_matrix/my/custom/endpoint":
            #    module: my_module.CustomRequestHandler
            #    config: {}

          # Turn on the twisted ssh manhole service on localhost on the given
          # port.
          #
          #- port: 9000
          #  bind_addresses: ['::1', '127.0.0.1']
          #  type: manhole

        # Forward extremities can build up in a room due to networking delays between
        # homeservers. Once this happens in a large room, calculation of the state of
        # that room can become quite expensive. To mitigate this, once the number of
        # forward extremities reaches a given threshold, Synapse will send an
        # org.matrix.dummy_event event, which will reduce the forward extremities
        # in the room.
        #
        # This setting defines the threshold (i.e. number of forward extremities in the
        # room) at which dummy events are sent. The default value is 10.
        #
        #dummy_events_threshold: 5


        ## Homeserver blocking ##

        # How to reach the server admin, used in ResourceLimitError
        #
        #admin_contact: 'mailto:[email protected]'

        # Global blocking
        #
        #hs_disabled: false
        #hs_disabled_message: 'Human readable reason for why the HS is blocked'

        # Monthly Active User Blocking
        #
        # Used in cases where the admin or server owner wants to limit to the
        # number of monthly active users.
        #
        # 'limit_usage_by_mau' disables/enables monthly active user blocking. When
        # anabled and a limit is reached the server returns a 'ResourceLimitError'
        # with error type Codes.RESOURCE_LIMIT_EXCEEDED
        #
        # 'max_mau_value' is the hard limit of monthly active users above which
        # the server will start blocking user actions.
        #
        # 'mau_trial_days' is a means to add a grace period for active users. It
        # means that users must be active for this number of days before they
        # can be considered active and guards against the case where lots of users
        # sign up in a short space of time never to return after their initial
        # session.
        #
        # 'mau_limit_alerting' is a means of limiting client side alerting
        # should the mau limit be reached. This is useful for small instances
        # where the admin has 5 mau seats (say) for 5 specific people and no
        # interest increasing the mau limit further. Defaults to True, which
        # means that alerting is enabled
        #
        #limit_usage_by_mau: false
        #max_mau_value: 50
        #mau_trial_days: 2
        #mau_limit_alerting: false

        # If enabled, the metrics for the number of monthly active users will
        # be populated, however no one will be limited. If limit_usage_by_mau
        # is true, this is implied to be true.
        #
        #mau_stats_only: false

        # Sometimes the server admin will want to ensure certain accounts are
        # never blocked by mau checking. These accounts are specified here.
        #
        #mau_limit_reserved_threepids:
        #  - medium: 'email'
        #    address: '*****@*****.**'

        # Used by phonehome stats to group together related servers.
        #server_context: context

        # Resource-constrained homeserver Settings
        #
        # If limit_remote_rooms.enabled is True, the room complexity will be
        # checked before a user joins a new remote room. If it is above
        # limit_remote_rooms.complexity, it will disallow joining or
        # instantly leave.
        #
        # limit_remote_rooms.complexity_error can be set to customise the text
        # displayed to the user when a room above the complexity threshold has
        # its join cancelled.
        #
        # Uncomment the below lines to enable:
        #limit_remote_rooms:
        #  enabled: true
        #  complexity: 1.0
        #  complexity_error: "This room is too complex."

        # Whether to require a user to be in the room to add an alias to it.
        # Defaults to 'true'.
        #
        #require_membership_for_aliases: false

        # Whether to allow per-room membership profiles through the send of membership
        # events with profile information that differ from the target's global profile.
        # Defaults to 'true'.
        #
        #allow_per_room_profiles: false

        # How long to keep redacted events in unredacted form in the database. After
        # this period redacted events get replaced with their redacted form in the DB.
        #
        # Defaults to `7d`. Set to `null` to disable.
        #
        #redaction_retention_period: 28d

        # How long to track users' last seen time and IPs in the database.
        #
        # Defaults to `28d`. Set to `null` to disable clearing out of old rows.
        #
        #user_ips_max_age: 14d

        # Message retention policy at the server level.
        #
        # Room admins and mods can define a retention period for their rooms using the
        # 'm.room.retention' state event, and server admins can cap this period by setting
        # the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
        #
        # If this feature is enabled, Synapse will regularly look for and purge events
        # which are older than the room's maximum retention period. Synapse will also
        # filter events received over federation so that events that should have been
        # purged are ignored and not stored again.
        #
        retention:
          # The message retention policies feature is disabled by default. Uncomment the
          # following line to enable it.
          #
          #enabled: true

          # Default retention policy. If set, Synapse will apply it to rooms that lack the
          # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
          # matter much because Synapse doesn't take it into account yet.
          #
          #default_policy:
          #  min_lifetime: 1d
          #  max_lifetime: 1y

          # Retention policy limits. If set, a user won't be able to send a
          # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
          # that's not within this range. This is especially useful in closed federations,
          # in which server admins can make sure every federating server applies the same
          # rules.
          #
          #allowed_lifetime_min: 1d
          #allowed_lifetime_max: 1y

          # Server admins can define the settings of the background jobs purging the
          # events which lifetime has expired under the 'purge_jobs' section.
          #
          # If no configuration is provided, a single job will be set up to delete expired
          # events in every room daily.
          #
          # Each job's configuration defines which range of message lifetimes the job
          # takes care of. For example, if 'shortest_max_lifetime' is '2d' and
          # 'longest_max_lifetime' is '3d', the job will handle purging expired events in
          # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
          # lower than or equal to 3 days. Both the minimum and the maximum value of a
          # range are optional, e.g. a job with no 'shortest_max_lifetime' and a
          # 'longest_max_lifetime' of '3d' will handle every room with a retention policy
          # which 'max_lifetime' is lower than or equal to three days.
          #
          # The rationale for this per-job configuration is that some rooms might have a
          # retention policy with a low 'max_lifetime', where history needs to be purged
          # of outdated messages on a more frequent basis than for the rest of the rooms
          # (e.g. every 12h), but not want that purge to be performed by a job that's
          # iterating over every room it knows, which could be heavy on the server.
          #
          #purge_jobs:
          #  - shortest_max_lifetime: 1d
          #    longest_max_lifetime: 3d
          #    interval: 12h
          #  - shortest_max_lifetime: 3d
          #    longest_max_lifetime: 1y
          #    interval: 1d

        # Inhibits the /requestToken endpoints from returning an error that might leak
        # information about whether an e-mail address is in use or not on this
        # homeserver.
        # Note that for some endpoints the error situation is the e-mail already being
        # used, and for others the error is entering the e-mail being unused.
        # If this option is enabled, instead of returning an error, these endpoints will
        # act as if no error happened and return a fake session ID ('sid') to clients.
        #
        #request_token_inhibit_3pid_errors: true
        """
            % locals()
        )

    def read_arguments(self, args):
        if args.manhole is not None:
            self.manhole = args.manhole
        if args.daemonize is not None:
            self.daemonize = args.daemonize
        if args.print_pidfile is not None:
            self.print_pidfile = args.print_pidfile

    @staticmethod
    def add_arguments(parser):
        server_group = parser.add_argument_group("server")
        server_group.add_argument(
            "-D",
            "--daemonize",
            action="store_true",
            default=None,
            help="Daemonize the homeserver",
        )
        server_group.add_argument(
            "--print-pidfile",
            action="store_true",
            default=None,
            help="Print the path to the pidfile just before daemonizing",
        )
        server_group.add_argument(
            "--manhole",
            metavar="PORT",
            dest="manhole",
            type=int,
            help="Turn on the twisted telnet manhole service on the given port.",
        )
示例#18
0
class ServerConfig(Config):
    def read_config(self, config, **kwargs):
        self.server_name = config["server_name"]
        self.server_context = config.get("server_context", None)

        try:
            parse_and_validate_server_name(self.server_name)
        except ValueError as e:
            raise ConfigError(str(e))

        self.pid_file = self.abspath(config.get("pid_file"))
        self.web_client_location = config.get("web_client_location", None)
        self.soft_file_limit = config.get("soft_file_limit", 0)
        self.daemonize = config.get("daemonize")
        self.print_pidfile = config.get("print_pidfile")
        self.user_agent_suffix = config.get("user_agent_suffix")
        self.use_frozen_dicts = config.get("use_frozen_dicts", False)
        self.public_baseurl = config.get("public_baseurl")

        # Whether to send federation traffic out in this process. This only
        # applies to some federation traffic, and so shouldn't be used to
        # "disable" federation
        self.send_federation = config.get("send_federation", True)

        # Whether to enable user presence.
        self.use_presence = config.get("use_presence", True)

        # Whether to update the user directory or not. This should be set to
        # false only if we are updating the user directory in a worker
        self.update_user_directory = config.get("update_user_directory", True)

        # whether to enable the media repository endpoints. This should be set
        # to false if the media repository is running as a separate endpoint;
        # doing so ensures that we will not run cache cleanup jobs on the
        # master, potentially causing inconsistency.
        self.enable_media_repo = config.get("enable_media_repo", True)

        # Whether to require authentication to retrieve profile data (avatars,
        # display names) of other users through the client API.
        self.require_auth_for_profile_requests = config.get(
            "require_auth_for_profile_requests", False)

        if "restrict_public_rooms_to_local_users" in config and (
                "allow_public_rooms_without_auth" in config
                or "allow_public_rooms_over_federation" in config):
            raise ConfigError(
                "Can't use 'restrict_public_rooms_to_local_users' if"
                " 'allow_public_rooms_without_auth' and/or"
                " 'allow_public_rooms_over_federation' is set.")

        # Check if the legacy "restrict_public_rooms_to_local_users" flag is set. This
        # flag is now obsolete but we need to check it for backward-compatibility.
        if config.get("restrict_public_rooms_to_local_users", False):
            self.allow_public_rooms_without_auth = False
            self.allow_public_rooms_over_federation = False
        else:
            # If set to 'False', requires authentication to access the server's public
            # rooms directory through the client API. Defaults to 'True'.
            self.allow_public_rooms_without_auth = config.get(
                "allow_public_rooms_without_auth", True)
            # If set to 'False', forbids any other homeserver to fetch the server's public
            # rooms directory via federation. Defaults to 'True'.
            self.allow_public_rooms_over_federation = config.get(
                "allow_public_rooms_over_federation", True)

        default_room_version = config.get("default_room_version",
                                          DEFAULT_ROOM_VERSION)

        # Ensure room version is a str
        default_room_version = str(default_room_version)

        if default_room_version not in KNOWN_ROOM_VERSIONS:
            raise ConfigError(
                "Unknown default_room_version: %s, known room versions: %s" %
                (default_room_version, list(KNOWN_ROOM_VERSIONS.keys())))

        # Get the actual room version object rather than just the identifier
        self.default_room_version = KNOWN_ROOM_VERSIONS[default_room_version]

        # whether to enable search. If disabled, new entries will not be inserted
        # into the search tables and they will not be indexed. Users will receive
        # errors when attempting to search for messages.
        self.enable_search = config.get("enable_search", True)

        self.filter_timeline_limit = config.get("filter_timeline_limit", -1)

        # Whether we should block invites sent to users on this server
        # (other than those sent by local server admins)
        self.block_non_admin_invites = config.get("block_non_admin_invites",
                                                  False)

        # Whether to enable experimental MSC1849 (aka relations) support
        self.experimental_msc1849_support_enabled = config.get(
            "experimental_msc1849_support_enabled", True)

        # Options to control access by tracking MAU
        self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
        self.max_mau_value = 0
        if self.limit_usage_by_mau:
            self.max_mau_value = config.get("max_mau_value", 0)
        self.mau_stats_only = config.get("mau_stats_only", False)

        self.mau_limits_reserved_threepids = config.get(
            "mau_limit_reserved_threepids", [])

        self.mau_trial_days = config.get("mau_trial_days", 0)

        # Options to disable HS
        self.hs_disabled = config.get("hs_disabled", False)
        self.hs_disabled_message = config.get("hs_disabled_message", "")
        self.hs_disabled_limit_type = config.get("hs_disabled_limit_type", "")

        # Admin uri to direct users at should their instance become blocked
        # due to resource constraints
        self.admin_contact = config.get("admin_contact", None)

        # FIXME: federation_domain_whitelist needs sytests
        self.federation_domain_whitelist = None
        federation_domain_whitelist = config.get("federation_domain_whitelist",
                                                 None)

        if federation_domain_whitelist is not None:
            # turn the whitelist into a hash for speed of lookup
            self.federation_domain_whitelist = {}

            for domain in federation_domain_whitelist:
                self.federation_domain_whitelist[domain] = True

        self.federation_ip_range_blacklist = config.get(
            "federation_ip_range_blacklist", [])

        # Attempt to create an IPSet from the given ranges
        try:
            self.federation_ip_range_blacklist = IPSet(
                self.federation_ip_range_blacklist)

            # Always blacklist 0.0.0.0, ::
            self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
        except Exception as e:
            raise ConfigError("Invalid range(s) provided in "
                              "federation_ip_range_blacklist: %s" % e)

        if self.public_baseurl is not None:
            if self.public_baseurl[-1] != "/":
                self.public_baseurl += "/"
        self.start_pushers = config.get("start_pushers", True)

        # (undocumented) option for torturing the worker-mode replication a bit,
        # for testing. The value defines the number of milliseconds to pause before
        # sending out any replication updates.
        self.replication_torture_level = config.get(
            "replication_torture_level")

        # Whether to require a user to be in the room to add an alias to it.
        # Defaults to True.
        self.require_membership_for_aliases = config.get(
            "require_membership_for_aliases", True)

        # Whether to allow per-room membership profiles through the send of membership
        # events with profile information that differ from the target's global profile.
        self.allow_per_room_profiles = config.get("allow_per_room_profiles",
                                                  True)

        self.listeners = []
        for listener in config.get("listeners", []):
            if not isinstance(listener.get("port", None), int):
                raise ConfigError(
                    "Listener configuration is lacking a valid 'port' option")

            if listener.setdefault("tls", False):
                # no_tls is not really supported any more, but let's grandfather it in
                # here.
                if config.get("no_tls", False):
                    logger.info(
                        "Ignoring TLS-enabled listener on port %i due to no_tls"
                    )
                    continue

            bind_address = listener.pop("bind_address", None)
            bind_addresses = listener.setdefault("bind_addresses", [])

            # if bind_address was specified, add it to the list of addresses
            if bind_address:
                bind_addresses.append(bind_address)

            # if we still have an empty list of addresses, use the default list
            if not bind_addresses:
                if listener["type"] == "metrics":
                    # the metrics listener doesn't support IPv6
                    bind_addresses.append("0.0.0.0")
                else:
                    bind_addresses.extend(DEFAULT_BIND_ADDRESSES)

            self.listeners.append(listener)

        if not self.web_client_location:
            _warn_if_webclient_configured(self.listeners)

        self.gc_thresholds = read_gc_thresholds(
            config.get("gc_thresholds", None))

        bind_port = config.get("bind_port")
        if bind_port:
            if config.get("no_tls", False):
                raise ConfigError("no_tls is incompatible with bind_port")

            self.listeners = []
            bind_host = config.get("bind_host", "")
            gzip_responses = config.get("gzip_responses", True)

            self.listeners.append({
                "port":
                bind_port,
                "bind_addresses": [bind_host],
                "tls":
                True,
                "type":
                "http",
                "resources": [
                    {
                        "names": ["client"],
                        "compress": gzip_responses
                    },
                    {
                        "names": ["federation"],
                        "compress": False
                    },
                ],
            })

            unsecure_port = config.get("unsecure_port", bind_port - 400)
            if unsecure_port:
                self.listeners.append({
                    "port":
                    unsecure_port,
                    "bind_addresses": [bind_host],
                    "tls":
                    False,
                    "type":
                    "http",
                    "resources": [
                        {
                            "names": ["client"],
                            "compress": gzip_responses
                        },
                        {
                            "names": ["federation"],
                            "compress": False
                        },
                    ],
                })

        manhole = config.get("manhole")
        if manhole:
            self.listeners.append({
                "port": manhole,
                "bind_addresses": ["127.0.0.1"],
                "type": "manhole",
                "tls": False,
            })

        metrics_port = config.get("metrics_port")
        if metrics_port:
            logger.warn((
                "The metrics_port configuration option is deprecated in Synapse 0.31 "
                "in favour of a listener. Please see "
                "http://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst"
                " on how to configure the new listener."))

            self.listeners.append({
                "port":
                metrics_port,
                "bind_addresses":
                [config.get("metrics_bind_host", "127.0.0.1")],
                "tls":
                False,
                "type":
                "http",
                "resources": [{
                    "names": ["metrics"],
                    "compress": False
                }],
            })

        _check_resource_config(self.listeners)

        # An experimental option to try and periodically clean up extremities
        # by sending dummy events.
        self.cleanup_extremities_with_dummy_events = config.get(
            "cleanup_extremities_with_dummy_events", False)

    def has_tls_listener(self):
        return any(l["tls"] for l in self.listeners)

    def generate_config_section(self, server_name, data_dir_path,
                                open_private_ports, **kwargs):
        _, bind_port = parse_and_validate_server_name(server_name)
        if bind_port is not None:
            unsecure_port = bind_port - 400
        else:
            bind_port = 8448
            unsecure_port = 8008

        pid_file = os.path.join(data_dir_path, "homeserver.pid")

        # Bring DEFAULT_ROOM_VERSION into the local-scope for use in the
        # default config string
        default_room_version = DEFAULT_ROOM_VERSION

        unsecure_http_binding = "port: %i\n            tls: false" % (
            unsecure_port, )
        if not open_private_ports:
            unsecure_http_binding += (
                "\n            bind_addresses: ['::1', '127.0.0.1']")

        return ("""\
        ## Server ##

        # The domain name of the server, with optional explicit port.
        # This is used by remote servers to connect to this server,
        # e.g. matrix.org, localhost:8080, etc.
        # This is also the last part of your UserID.
        #
        server_name: "%(server_name)s"

        # When running as a daemon, the file to store the pid in
        #
        pid_file: %(pid_file)s

        # The path to the web client which will be served at /_matrix/client/
        # if 'webclient' is configured under the 'listeners' configuration.
        #
        #web_client_location: "/path/to/web/root"

        # The public-facing base URL that clients use to access this HS
        # (not including _matrix/...). This is the same URL a user would
        # enter into the 'custom HS URL' field on their client. If you
        # use synapse with a reverse proxy, this should be the URL to reach
        # synapse via the proxy.
        #
        #public_baseurl: https://example.com/

        # Set the soft limit on the number of file descriptors synapse can use
        # Zero is used to indicate synapse should set the soft limit to the
        # hard limit.
        #
        #soft_file_limit: 0

        # Set to false to disable presence tracking on this homeserver.
        #
        #use_presence: false

        # Whether to require authentication to retrieve profile data (avatars,
        # display names) of other users through the client API. Defaults to
        # 'false'. Note that profile data is also available via the federation
        # API, so this setting is of limited value if federation is enabled on
        # the server.
        #
        #require_auth_for_profile_requests: true

        # If set to 'false', requires authentication to access the server's public rooms
        # directory through the client API. Defaults to 'true'.
        #
        #allow_public_rooms_without_auth: false

        # If set to 'false', forbids any other homeserver to fetch the server's public
        # rooms directory via federation. Defaults to 'true'.
        #
        #allow_public_rooms_over_federation: false

        # The default room version for newly created rooms.
        #
        # Known room versions are listed here:
        # https://matrix.org/docs/spec/#complete-list-of-room-versions
        #
        # For example, for room version 1, default_room_version should be set
        # to "1".
        #
        #default_room_version: "%(default_room_version)s"

        # The GC threshold parameters to pass to `gc.set_threshold`, if defined
        #
        #gc_thresholds: [700, 10, 10]

        # Set the limit on the returned events in the timeline in the get
        # and sync operations. The default value is -1, means no upper limit.
        #
        #filter_timeline_limit: 5000

        # Whether room invites to users on this server should be blocked
        # (except those sent by local server admins). The default is False.
        #
        #block_non_admin_invites: True

        # Room searching
        #
        # If disabled, new messages will not be indexed for searching and users
        # will receive errors when searching for messages. Defaults to enabled.
        #
        #enable_search: false

        # Restrict federation to the following whitelist of domains.
        # N.B. we recommend also firewalling your federation listener to limit
        # inbound federation traffic as early as possible, rather than relying
        # purely on this application-layer restriction.  If not specified, the
        # default is to whitelist everything.
        #
        #federation_domain_whitelist:
        #  - lon.example.com
        #  - nyc.example.com
        #  - syd.example.com

        # Prevent federation requests from being sent to the following
        # blacklist IP address CIDR ranges. If this option is not specified, or
        # specified with an empty list, no ip range blacklist will be enforced.
        #
        # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
        # listed here, since they correspond to unroutable addresses.)
        #
        federation_ip_range_blacklist:
          - '127.0.0.0/8'
          - '10.0.0.0/8'
          - '172.16.0.0/12'
          - '192.168.0.0/16'
          - '100.64.0.0/10'
          - '169.254.0.0/16'
          - '::1/128'
          - 'fe80::/64'
          - 'fc00::/7'

        # List of ports that Synapse should listen on, their purpose and their
        # configuration.
        #
        # Options for each listener include:
        #
        #   port: the TCP port to bind to
        #
        #   bind_addresses: a list of local addresses to listen on. The default is
        #       'all local interfaces'.
        #
        #   type: the type of listener. Normally 'http', but other valid options are:
        #       'manhole' (see docs/manhole.md),
        #       'metrics' (see docs/metrics-howto.rst),
        #       'replication' (see docs/workers.rst).
        #
        #   tls: set to true to enable TLS for this listener. Will use the TLS
        #       key/cert specified in tls_private_key_path / tls_certificate_path.
        #
        #   x_forwarded: Only valid for an 'http' listener. Set to true to use the
        #       X-Forwarded-For header as the client IP. Useful when Synapse is
        #       behind a reverse-proxy.
        #
        #   resources: Only valid for an 'http' listener. A list of resources to host
        #       on this port. Options for each resource are:
        #
        #       names: a list of names of HTTP resources. See below for a list of
        #           valid resource names.
        #
        #       compress: set to true to enable HTTP comression for this resource.
        #
        #   additional_resources: Only valid for an 'http' listener. A map of
        #        additional endpoints which should be loaded via dynamic modules.
        #
        # Valid resource names are:
        #
        #   client: the client-server API (/_matrix/client), and the synapse admin
        #       API (/_synapse/admin). Also implies 'media' and 'static'.
        #
        #   consent: user consent forms (/_matrix/consent). See
        #       docs/consent_tracking.md.
        #
        #   federation: the server-server API (/_matrix/federation). Also implies
        #       'media', 'keys', 'openid'
        #
        #   keys: the key discovery API (/_matrix/keys).
        #
        #   media: the media API (/_matrix/media).
        #
        #   metrics: the metrics interface. See docs/metrics-howto.rst.
        #
        #   openid: OpenID authentication.
        #
        #   replication: the HTTP replication API (/_synapse/replication). See
        #       docs/workers.rst.
        #
        #   static: static resources under synapse/static (/_matrix/static). (Mostly
        #       useful for 'fallback authentication'.)
        #
        #   webclient: A web client. Requires web_client_location to be set.
        #
        listeners:
          # TLS-enabled listener: for when matrix traffic is sent directly to synapse.
          #
          # Disabled by default. To enable it, uncomment the following. (Note that you
          # will also need to give Synapse a TLS key and certificate: see the TLS section
          # below.)
          #
          #- port: %(bind_port)s
          #  type: http
          #  tls: true
          #  resources:
          #    - names: [client, federation]

          # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
          # that unwraps TLS.
          #
          # If you plan to use a reverse proxy, please see
          # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst.
          #
          - %(unsecure_http_binding)s
            type: http
            x_forwarded: true

            resources:
              - names: [client, federation]
                compress: false

            # example additional_resources:
            #
            #additional_resources:
            #  "/_matrix/my/custom/endpoint":
            #    module: my_module.CustomRequestHandler
            #    config: {}

          # Turn on the twisted ssh manhole service on localhost on the given
          # port.
          #
          #- port: 9000
          #  bind_addresses: ['::1', '127.0.0.1']
          #  type: manhole


        ## Homeserver blocking ##

        # How to reach the server admin, used in ResourceLimitError
        #
        #admin_contact: 'mailto:[email protected]'

        # Global blocking
        #
        #hs_disabled: False
        #hs_disabled_message: 'Human readable reason for why the HS is blocked'
        #hs_disabled_limit_type: 'error code(str), to help clients decode reason'

        # Monthly Active User Blocking
        #
        # Used in cases where the admin or server owner wants to limit to the
        # number of monthly active users.
        #
        # 'limit_usage_by_mau' disables/enables monthly active user blocking. When
        # anabled and a limit is reached the server returns a 'ResourceLimitError'
        # with error type Codes.RESOURCE_LIMIT_EXCEEDED
        #
        # 'max_mau_value' is the hard limit of monthly active users above which
        # the server will start blocking user actions.
        #
        # 'mau_trial_days' is a means to add a grace period for active users. It
        # means that users must be active for this number of days before they
        # can be considered active and guards against the case where lots of users
        # sign up in a short space of time never to return after their initial
        # session.
        #
        #limit_usage_by_mau: False
        #max_mau_value: 50
        #mau_trial_days: 2

        # If enabled, the metrics for the number of monthly active users will
        # be populated, however no one will be limited. If limit_usage_by_mau
        # is true, this is implied to be true.
        #
        #mau_stats_only: False

        # Sometimes the server admin will want to ensure certain accounts are
        # never blocked by mau checking. These accounts are specified here.
        #
        #mau_limit_reserved_threepids:
        #  - medium: 'email'
        #    address: '*****@*****.**'

        # Used by phonehome stats to group together related servers.
        #server_context: context

        # Whether to require a user to be in the room to add an alias to it.
        # Defaults to 'true'.
        #
        #require_membership_for_aliases: false

        # Whether to allow per-room membership profiles through the send of membership
        # events with profile information that differ from the target's global profile.
        # Defaults to 'true'.
        #
        #allow_per_room_profiles: false
        """ % locals())

    def read_arguments(self, args):
        if args.manhole is not None:
            self.manhole = args.manhole
        if args.daemonize is not None:
            self.daemonize = args.daemonize
        if args.print_pidfile is not None:
            self.print_pidfile = args.print_pidfile

    @staticmethod
    def add_arguments(parser):
        server_group = parser.add_argument_group("server")
        server_group.add_argument(
            "-D",
            "--daemonize",
            action="store_true",
            default=None,
            help="Daemonize the home server",
        )
        server_group.add_argument(
            "--print-pidfile",
            action="store_true",
            default=None,
            help="Print the path to the pidfile just"
            " before daemonizing",
        )
        server_group.add_argument(
            "--manhole",
            metavar="PORT",
            dest="manhole",
            type=int,
            help="Turn on the twisted telnet manhole"
            " service on the given port.",
        )
示例#19
0
class ContentRepositoryConfig(Config):
    def read_config(self, config):
        self.max_upload_size = self.parse_size(config.get("max_upload_size", "10M"))
        self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M"))
        self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M"))

        self.media_store_path = self.ensure_directory(config["media_store_path"])

        backup_media_store_path = config.get("backup_media_store_path")

        synchronous_backup_media_store = config.get(
            "synchronous_backup_media_store", False
        )

        storage_providers = config.get("media_storage_providers", [])

        if backup_media_store_path:
            if storage_providers:
                raise ConfigError(
                    "Cannot use both 'backup_media_store_path' and 'storage_providers'"
                )

            storage_providers = [{
                "module": "file_system",
                "store_local": True,
                "store_synchronous": synchronous_backup_media_store,
                "store_remote": True,
                "config": {
                    "directory": backup_media_store_path,
                }
            }]

        # This is a list of config that can be used to create the storage
        # providers. The entries are tuples of (Class, class_config,
        # MediaStorageProviderConfig), where Class is the class of the provider,
        # the class_config the config to pass to it, and
        # MediaStorageProviderConfig are options for StorageProviderWrapper.
        #
        # We don't create the storage providers here as not all workers need
        # them to be started.
        self.media_storage_providers = []

        for provider_config in storage_providers:
            # We special case the module "file_system" so as not to need to
            # expose FileStorageProviderBackend
            if provider_config["module"] == "file_system":
                provider_config["module"] = (
                    "synapse.rest.media.v1.storage_provider"
                    ".FileStorageProviderBackend"
                )

            provider_class, parsed_config = load_module(provider_config)

            wrapper_config = MediaStorageProviderConfig(
                provider_config.get("store_local", False),
                provider_config.get("store_remote", False),
                provider_config.get("store_synchronous", False),
            )

            self.media_storage_providers.append(
                (provider_class, parsed_config, wrapper_config,)
            )

        self.uploads_path = self.ensure_directory(config["uploads_path"])
        self.dynamic_thumbnails = config.get("dynamic_thumbnails", False)
        self.thumbnail_requirements = parse_thumbnail_requirements(
            config.get("thumbnail_sizes", DEFAULT_THUMBNAIL_SIZES),
        )
        self.url_preview_enabled = config.get("url_preview_enabled", False)
        if self.url_preview_enabled:
            try:
                import lxml
                lxml  # To stop unused lint.
            except ImportError:
                raise ConfigError(MISSING_LXML)

            try:
                from netaddr import IPSet
            except ImportError:
                raise ConfigError(MISSING_NETADDR)

            if "url_preview_ip_range_blacklist" not in config:
                raise ConfigError(
                    "For security, you must specify an explicit target IP address "
                    "blacklist in url_preview_ip_range_blacklist for url previewing "
                    "to work"
                )

            self.url_preview_ip_range_blacklist = IPSet(
                config["url_preview_ip_range_blacklist"]
            )

            # we always blacklist '0.0.0.0' and '::', which are supposed to be
            # unroutable addresses.
            self.url_preview_ip_range_blacklist.update(['0.0.0.0', '::'])

            self.url_preview_ip_range_whitelist = IPSet(
                config.get("url_preview_ip_range_whitelist", ())
            )

            self.url_preview_url_blacklist = config.get(
                "url_preview_url_blacklist", ()
            )

    def default_config(self, data_dir_path, **kwargs):
        media_store = os.path.join(data_dir_path, "media_store")
        uploads_path = os.path.join(data_dir_path, "uploads")

        formatted_thumbnail_sizes = "".join(
            THUMBNAIL_SIZE_YAML % s for s in DEFAULT_THUMBNAIL_SIZES
        )
        # strip final NL
        formatted_thumbnail_sizes = formatted_thumbnail_sizes[:-1]

        return r"""
        # Directory where uploaded images and attachments are stored.
        #
        media_store_path: "%(media_store)s"

        # Media storage providers allow media to be stored in different
        # locations.
        #
        #media_storage_providers:
        #  - module: file_system
        #    # Whether to write new local files.
        #    store_local: false
        #    # Whether to write new remote media
        #    store_remote: false
        #    # Whether to block upload requests waiting for write to this
        #    # provider to complete
        #    store_synchronous: false
        #    config:
        #       directory: /mnt/some/other/directory

        # Directory where in-progress uploads are stored.
        #
        uploads_path: "%(uploads_path)s"

        # The largest allowed upload size in bytes
        #
        #max_upload_size: 10M

        # Maximum number of pixels that will be thumbnailed
        #
        #max_image_pixels: 32M

        # Whether to generate new thumbnails on the fly to precisely match
        # the resolution requested by the client. If true then whenever
        # a new resolution is requested by the client the server will
        # generate a new thumbnail. If false the server will pick a thumbnail
        # from a precalculated list.
        #
        #dynamic_thumbnails: false

        # List of thumbnails to precalculate when an image is uploaded.
        #
        #thumbnail_sizes:
%(formatted_thumbnail_sizes)s

        # Is the preview URL API enabled?
        #
        # 'false' by default: uncomment the following to enable it (and specify a
        # url_preview_ip_range_blacklist blacklist).
        #
        #url_preview_enabled: true

        # List of IP address CIDR ranges that the URL preview spider is denied
        # from accessing.  There are no defaults: you must explicitly
        # specify a list for URL previewing to work.  You should specify any
        # internal services in your network that you do not want synapse to try
        # to connect to, otherwise anyone in any Matrix room could cause your
        # synapse to issue arbitrary GET requests to your internal services,
        # causing serious security issues.
        #
        # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
        # listed here, since they correspond to unroutable addresses.)
        #
        # This must be specified if url_preview_enabled is set. It is recommended that
        # you uncomment the following list as a starting point.
        #
        #url_preview_ip_range_blacklist:
        #  - '127.0.0.0/8'
        #  - '10.0.0.0/8'
        #  - '172.16.0.0/12'
        #  - '192.168.0.0/16'
        #  - '100.64.0.0/10'
        #  - '169.254.0.0/16'
        #  - '::1/128'
        #  - 'fe80::/64'
        #  - 'fc00::/7'

        # List of IP address CIDR ranges that the URL preview spider is allowed
        # to access even if they are specified in url_preview_ip_range_blacklist.
        # This is useful for specifying exceptions to wide-ranging blacklisted
        # target IP ranges - e.g. for enabling URL previews for a specific private
        # website only visible in your network.
        #
        #url_preview_ip_range_whitelist:
        #   - '192.168.1.1'

        # Optional list of URL matches that the URL preview spider is
        # denied from accessing.  You should use url_preview_ip_range_blacklist
        # in preference to this, otherwise someone could define a public DNS
        # entry that points to a private IP address and circumvent the blacklist.
        # This is more useful if you know there is an entire shape of URL that
        # you know that will never want synapse to try to spider.
        #
        # Each list entry is a dictionary of url component attributes as returned
        # by urlparse.urlsplit as applied to the absolute form of the URL.  See
        # https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit
        # The values of the dictionary are treated as an filename match pattern
        # applied to that component of URLs, unless they start with a ^ in which
        # case they are treated as a regular expression match.  If all the
        # specified component matches for a given list item succeed, the URL is
        # blacklisted.
        #
        #url_preview_url_blacklist:
        #  # blacklist any URL with a username in its URI
        #  - username: '******'
        #
        #  # blacklist all *.google.com URLs
        #  - netloc: 'google.com'
        #  - netloc: '*.google.com'
        #
        #  # blacklist all plain HTTP URLs
        #  - scheme: 'http'
        #
        #  # blacklist http(s)://www.acme.com/foo
        #  - netloc: 'www.acme.com'
        #    path: '/foo'
        #
        #  # blacklist any URL with a literal IPv4 address
        #  - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'

        # The largest allowed URL preview spidering size in bytes
        #
        #max_spider_size: 10M
        """ % locals()