Beispiel #1
0
 def test_swift_info(self):
     # the deprecated 'three' should not exist in expect
     expect = [{'default': True, 'name': 'zero'},
               {'name': 'two'},
               {'name': 'one'}]
     swift_info = POLICIES.get_policy_info()
     self.assertEquals(sorted(expect, key=lambda k: k['name']),
                       sorted(swift_info, key=lambda k: k['name']))
Beispiel #2
0
 def test_swift_info(self):
     # the deprecated 'three' should not exist in expect
     expect = [{'aliases': 'zero', 'default': True, 'name': 'zero', },
               {'aliases': 'two', 'name': 'two'},
               {'aliases': 'one', 'name': 'one'},
               {'aliases': 'ten', 'name': 'ten'}]
     swift_info = POLICIES.get_policy_info()
     self.assertEqual(sorted(expect, key=lambda k: k['name']),
                      sorted(swift_info, key=lambda k: k['name']))
Beispiel #3
0
    def __init__(self,
                 conf,
                 memcache=None,
                 logger=None,
                 account_ring=None,
                 container_ring=None):
        if conf is None:
            conf = {}
        if logger is None:
            self.logger = get_logger(conf, log_route='proxy-server')
        else:
            self.logger = logger

        self._error_limiting = {}

        swift_dir = conf.get('swift_dir', '/etc/swift')
        self.swift_dir = swift_dir
        self.node_timeout = float(conf.get('node_timeout', 10))
        self.recoverable_node_timeout = float(
            conf.get('recoverable_node_timeout', self.node_timeout))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = int(conf.get('client_timeout', 60))
        self.put_queue_depth = int(conf.get('put_queue_depth', 10))
        self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
        self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
        self.trans_id_suffix = conf.get('trans_id_suffix', '')
        self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5))
        self.error_suppression_interval = \
            int(conf.get('error_suppression_interval', 60))
        self.error_suppression_limit = \
            int(conf.get('error_suppression_limit', 10))
        self.recheck_container_existence = \
            int(conf.get('recheck_container_existence', 60))
        self.recheck_account_existence = \
            int(conf.get('recheck_account_existence', 60))
        self.allow_account_management = \
            config_true_value(conf.get('allow_account_management', 'no'))
        self.object_post_as_copy = \
            config_true_value(conf.get('object_post_as_copy', 'true'))
        self.container_ring = container_ring or Ring(swift_dir,
                                                     ring_name='container')
        self.account_ring = account_ring or Ring(swift_dir,
                                                 ring_name='account')
        # ensure rings are loaded for all configured storage policies
        for policy in POLICIES:
            policy.load_ring(swift_dir)
        self.obj_controller_router = ObjectControllerRouter()
        self.memcache = memcache
        mimetypes.init(mimetypes.knownfiles +
                       [os.path.join(swift_dir, 'mime.types')])
        self.account_autocreate = \
            config_true_value(conf.get('account_autocreate', 'no'))
        self.auto_create_account_prefix = (
            conf.get('auto_create_account_prefix') or '.')
        self.expiring_objects_account = self.auto_create_account_prefix + \
            (conf.get('expiring_objects_account_name') or 'expiring_objects')
        self.expiring_objects_container_divisor = \
            int(conf.get('expiring_objects_container_divisor') or 86400)
        self.max_containers_per_account = \
            int(conf.get('max_containers_per_account') or 0)
        self.max_containers_whitelist = [
            a.strip()
            for a in conf.get('max_containers_whitelist', '').split(',')
            if a.strip()
        ]
        self.deny_host_headers = [
            host.strip()
            for host in conf.get('deny_host_headers', '').split(',')
            if host.strip()
        ]
        self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
        self.cors_allow_origin = [
            a.strip() for a in conf.get('cors_allow_origin', '').split(',')
            if a.strip()
        ]
        self.strict_cors_mode = config_true_value(
            conf.get('strict_cors_mode', 't'))
        self.node_timings = {}
        self.timing_expiry = int(conf.get('timing_expiry', 300))
        self.sorting_method = conf.get('sorting_method', 'shuffle').lower()
        self.concurrent_gets = \
            config_true_value(conf.get('concurrent_gets'))
        self.concurrency_timeout = float(
            conf.get('concurrency_timeout', self.conn_timeout))
        value = conf.get('request_node_count', '2 * replicas').lower().split()
        if len(value) == 1:
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value
        elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value * replicas
        else:
            raise ValueError('Invalid request_node_count value: %r' %
                             ''.join(value))
        try:
            self._read_affinity = read_affinity = conf.get('read_affinity', '')
            self.read_affinity_sort_key = affinity_key_function(read_affinity)
        except ValueError as err:
            # make the message a little more useful
            raise ValueError("Invalid read_affinity value: %r (%s)" %
                             (read_affinity, err.message))
        try:
            write_affinity = conf.get('write_affinity', '')
            self.write_affinity_is_local_fn \
                = affinity_locality_predicate(write_affinity)
        except ValueError as err:
            # make the message a little more useful
            raise ValueError("Invalid write_affinity value: %r (%s)" %
                             (write_affinity, err.message))
        value = conf.get('write_affinity_node_count',
                         '2 * replicas').lower().split()
        if len(value) == 1:
            wanc_value = int(value[0])
            self.write_affinity_node_count = lambda replicas: wanc_value
        elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
            wanc_value = int(value[0])
            self.write_affinity_node_count = \
                lambda replicas: wanc_value * replicas
        else:
            raise ValueError('Invalid write_affinity_node_count value: %r' %
                             ''.join(value))
        # swift_owner_headers are stripped by the account and container
        # controllers; we should extend header stripping to object controller
        # when a privileged object header is implemented.
        swift_owner_headers = conf.get(
            'swift_owner_headers', 'x-container-read, x-container-write, '
            'x-container-sync-key, x-container-sync-to, '
            'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
            'x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, '
            'x-account-access-control')
        self.swift_owner_headers = [
            name.strip().title() for name in swift_owner_headers.split(',')
            if name.strip()
        ]
        # Initialization was successful, so now apply the client chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because it affects the client as well, currently, we use the
        # client chunk size as the govenor and not the object chunk size.
        socket._fileobject.default_bufsize = self.client_chunk_size
        self.expose_info = config_true_value(conf.get('expose_info', 'yes'))
        self.disallowed_sections = list_from_csv(
            conf.get('disallowed_sections', 'swift.valid_api_versions'))
        self.admin_key = conf.get('admin_key', None)
        register_swift_info(
            version=swift_version,
            strict_cors_mode=self.strict_cors_mode,
            policies=POLICIES.get_policy_info(),
            allow_account_management=self.allow_account_management,
            account_autocreate=self.account_autocreate,
            **constraints.EFFECTIVE_CONSTRAINTS)
Beispiel #4
0
    def __init__(self,
                 conf,
                 logger=None,
                 account_ring=None,
                 container_ring=None):
        if conf is None:
            conf = {}
        if logger is None:
            self.logger = get_logger(conf, log_route='proxy-server')
        else:
            self.logger = logger
        self._error_limiting = {}

        swift_dir = conf.get('swift_dir', '/etc/swift')
        self.swift_dir = swift_dir
        self.node_timeout = float(conf.get('node_timeout', 10))
        self.recoverable_node_timeout = float(
            conf.get('recoverable_node_timeout', self.node_timeout))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = float(conf.get('client_timeout', 60))
        self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
        self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
        self.trans_id_suffix = conf.get('trans_id_suffix', '')
        self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5))
        self.error_suppression_interval = \
            int(conf.get('error_suppression_interval', 60))
        self.error_suppression_limit = \
            int(conf.get('error_suppression_limit', 10))
        self.recheck_container_existence = \
            int(conf.get('recheck_container_existence',
                         DEFAULT_RECHECK_CONTAINER_EXISTENCE))
        self.recheck_updating_shard_ranges = \
            int(conf.get('recheck_updating_shard_ranges',
                         DEFAULT_RECHECK_UPDATING_SHARD_RANGES))
        self.recheck_account_existence = \
            int(conf.get('recheck_account_existence',
                         DEFAULT_RECHECK_ACCOUNT_EXISTENCE))
        self.allow_account_management = \
            config_true_value(conf.get('allow_account_management', 'no'))
        self.container_ring = container_ring or Ring(swift_dir,
                                                     ring_name='container')
        self.account_ring = account_ring or Ring(swift_dir,
                                                 ring_name='account')
        # ensure rings are loaded for all configured storage policies
        for policy in POLICIES:
            policy.load_ring(swift_dir)
        self.obj_controller_router = ObjectControllerRouter()
        mimetypes.init(mimetypes.knownfiles +
                       [os.path.join(swift_dir, 'mime.types')])
        self.account_autocreate = \
            config_true_value(conf.get('account_autocreate', 'no'))
        if conf.get('auto_create_account_prefix'):
            self.logger.warning('Option auto_create_account_prefix is '
                                'deprecated. Configure '
                                'auto_create_account_prefix under the '
                                'swift-constraints section of '
                                'swift.conf. This option will '
                                'be ignored in a future release.')
            self.auto_create_account_prefix = \
                conf['auto_create_account_prefix']
        else:
            self.auto_create_account_prefix = \
                constraints.AUTO_CREATE_ACCOUNT_PREFIX
        self.expiring_objects_account = self.auto_create_account_prefix + \
            (conf.get('expiring_objects_account_name') or 'expiring_objects')
        self.expiring_objects_container_divisor = \
            int(conf.get('expiring_objects_container_divisor') or 86400)
        self.max_containers_per_account = \
            int(conf.get('max_containers_per_account') or 0)
        self.max_containers_whitelist = [
            a.strip()
            for a in conf.get('max_containers_whitelist', '').split(',')
            if a.strip()
        ]
        self.deny_host_headers = [
            host.strip()
            for host in conf.get('deny_host_headers', '').split(',')
            if host.strip()
        ]
        self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
        self.cors_allow_origin = [
            a.strip() for a in conf.get('cors_allow_origin', '').split(',')
            if a.strip()
        ]
        self.cors_expose_headers = [
            a.strip() for a in conf.get('cors_expose_headers', '').split(',')
            if a.strip()
        ]
        self.strict_cors_mode = config_true_value(
            conf.get('strict_cors_mode', 't'))
        self.node_timings = {}
        self.timing_expiry = int(conf.get('timing_expiry', 300))
        value = conf.get('request_node_count', '2 * replicas').lower().split()
        if len(value) == 1:
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value
        elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value * replicas
        else:
            raise ValueError('Invalid request_node_count value: %r' %
                             ''.join(value))
        # swift_owner_headers are stripped by the account and container
        # controllers; we should extend header stripping to object controller
        # when a privileged object header is implemented.
        swift_owner_headers = conf.get(
            'swift_owner_headers', 'x-container-read, x-container-write, '
            'x-container-sync-key, x-container-sync-to, '
            'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
            'x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, '
            'x-account-access-control')
        self.swift_owner_headers = [
            name.strip().title() for name in swift_owner_headers.split(',')
            if name.strip()
        ]

        # When upgrading from liberasurecode<=1.5.0, you may want to continue
        # writing legacy CRCs until all nodes are upgraded and capabale of
        # reading fragments with zlib CRCs.
        # See https://bugs.launchpad.net/liberasurecode/+bug/1886088 for more
        # information.
        if 'write_legacy_ec_crc' in conf:
            os.environ['LIBERASURECODE_WRITE_LEGACY_CRC'] = \
                '1' if config_true_value(conf['write_legacy_ec_crc']) else '0'
        # else, assume operators know what they're doing and leave env alone

        # Initialization was successful, so now apply the client chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because it affects the client as well, currently, we use the
        # client chunk size as the govenor and not the object chunk size.
        if sys.version_info < (3, ):
            socket._fileobject.default_bufsize = self.client_chunk_size
        # TODO: find a way to enable similar functionality in py3

        self.expose_info = config_true_value(conf.get('expose_info', 'yes'))
        self.disallowed_sections = list_from_csv(
            conf.get(
                'disallowed_sections', ', '.join([
                    'swift.auto_create_account_prefix',
                    'swift.valid_api_versions',
                ])))
        self.admin_key = conf.get('admin_key', None)
        self._override_options = self._load_per_policy_config(conf)
        self.sorts_by_timing = any(pc.sorting_method == 'timing'
                                   for pc in self._override_options.values())

        register_swift_info(
            version=swift_version,
            strict_cors_mode=self.strict_cors_mode,
            policies=POLICIES.get_policy_info(),
            allow_account_management=self.allow_account_management,
            account_autocreate=self.account_autocreate,
            **constraints.EFFECTIVE_CONSTRAINTS)
        self.watchdog = Watchdog()
        self.watchdog.spawn()
Beispiel #5
0
    def __init__(self, conf, memcache=None, logger=None, account_ring=None,
                 container_ring=None):
        if conf is None:
            conf = {}
        if logger is None:
            self.logger = get_logger(conf, log_route='proxy-server')
        else:
            self.logger = logger

        self._error_limiting = {}

        swift_dir = conf.get('swift_dir', '/etc/swift')
        self.swift_dir = swift_dir
        self.node_timeout = int(conf.get('node_timeout', 10))
        self.recoverable_node_timeout = int(
            conf.get('recoverable_node_timeout', self.node_timeout))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = int(conf.get('client_timeout', 60))
        self.put_queue_depth = int(conf.get('put_queue_depth', 10))
        self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
        self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
        self.trans_id_suffix = conf.get('trans_id_suffix', '')
        self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5))
        self.error_suppression_interval = \
            int(conf.get('error_suppression_interval', 60))
        self.error_suppression_limit = \
            int(conf.get('error_suppression_limit', 10))
        self.recheck_container_existence = \
            int(conf.get('recheck_container_existence', 60))
        self.recheck_account_existence = \
            int(conf.get('recheck_account_existence', 60))
        self.allow_account_management = \
            config_true_value(conf.get('allow_account_management', 'no'))
        self.object_post_as_copy = \
            config_true_value(conf.get('object_post_as_copy', 'true'))
        self.container_ring = container_ring or Ring(swift_dir,
                                                     ring_name='container')
        self.account_ring = account_ring or Ring(swift_dir,
                                                 ring_name='account')
        # ensure rings are loaded for all configured storage policies
        for policy in POLICIES:
            policy.load_ring(swift_dir)
        self.obj_controller_router = ObjectControllerRouter()
        self.memcache = memcache
        mimetypes.init(mimetypes.knownfiles +
                       [os.path.join(swift_dir, 'mime.types')])
        self.account_autocreate = \
            config_true_value(conf.get('account_autocreate', 'no'))
        self.auto_create_account_prefix = (
            conf.get('auto_create_account_prefix') or '.')
        self.expiring_objects_account = self.auto_create_account_prefix + \
            (conf.get('expiring_objects_account_name') or 'expiring_objects')
        self.expiring_objects_container_divisor = \
            int(conf.get('expiring_objects_container_divisor') or 86400)
        self.max_containers_per_account = \
            int(conf.get('max_containers_per_account') or 0)
        self.max_containers_whitelist = [
            a.strip()
            for a in conf.get('max_containers_whitelist', '').split(',')
            if a.strip()]
        self.deny_host_headers = [
            host.strip() for host in
            conf.get('deny_host_headers', '').split(',') if host.strip()]
        self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
        self.cors_allow_origin = [
            a.strip()
            for a in conf.get('cors_allow_origin', '').split(',')
            if a.strip()]
        self.strict_cors_mode = config_true_value(
            conf.get('strict_cors_mode', 't'))
        self.node_timings = {}
        self.timing_expiry = int(conf.get('timing_expiry', 300))
        self.sorting_method = conf.get('sorting_method', 'shuffle').lower()
        self.max_large_object_get_time = float(
            conf.get('max_large_object_get_time', '86400'))
        value = conf.get('request_node_count', '2 * replicas').lower().split()
        if len(value) == 1:
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value
        elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value * replicas
        else:
            raise ValueError(
                'Invalid request_node_count value: %r' % ''.join(value))
        try:
            self._read_affinity = read_affinity = conf.get('read_affinity', '')
            self.read_affinity_sort_key = affinity_key_function(read_affinity)
        except ValueError as err:
            # make the message a little more useful
            raise ValueError("Invalid read_affinity value: %r (%s)" %
                             (read_affinity, err.message))
        try:
            write_affinity = conf.get('write_affinity', '')
            self.write_affinity_is_local_fn \
                = affinity_locality_predicate(write_affinity)
        except ValueError as err:
            # make the message a little more useful
            raise ValueError("Invalid write_affinity value: %r (%s)" %
                             (write_affinity, err.message))
        value = conf.get('write_affinity_node_count',
                         '2 * replicas').lower().split()
        if len(value) == 1:
            wanc_value = int(value[0])
            self.write_affinity_node_count = lambda replicas: wanc_value
        elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
            wanc_value = int(value[0])
            self.write_affinity_node_count = \
                lambda replicas: wanc_value * replicas
        else:
            raise ValueError(
                'Invalid write_affinity_node_count value: %r' % ''.join(value))
        # swift_owner_headers are stripped by the account and container
        # controllers; we should extend header stripping to object controller
        # when a privileged object header is implemented.
        swift_owner_headers = conf.get(
            'swift_owner_headers',
            'x-container-read, x-container-write, '
            'x-container-sync-key, x-container-sync-to, '
            'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
            'x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, '
            'x-account-access-control')
        self.swift_owner_headers = [
            name.strip().title()
            for name in swift_owner_headers.split(',') if name.strip()]
        # Initialization was successful, so now apply the client chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because it affects the client as well, currently, we use the
        # client chunk size as the govenor and not the object chunk size.
        socket._fileobject.default_bufsize = self.client_chunk_size
        self.expose_info = config_true_value(
            conf.get('expose_info', 'yes'))
        self.disallowed_sections = list_from_csv(
            conf.get('disallowed_sections', 'swift.valid_api_versions'))
        self.admin_key = conf.get('admin_key', None)
        register_swift_info(
            version=swift_version,
            strict_cors_mode=self.strict_cors_mode,
            policies=POLICIES.get_policy_info(),
            allow_account_management=self.allow_account_management,
            account_autocreate=self.account_autocreate,
            **constraints.EFFECTIVE_CONSTRAINTS)
Beispiel #6
0
    def __init__(self,
                 conf,
                 memcache=None,
                 logger=None,
                 account_ring=None,
                 container_ring=None):
        if conf is None:
            conf = {}
        if logger is None:
            self.logger = get_logger(conf, log_route='proxy-server')
        else:
            self.logger = logger

        swift_dir = conf.get('swift_dir', '/etc/swift')
        self.swift_dir = swift_dir
        self.node_timeout = float(conf.get('node_timeout', 10))
        self.recoverable_node_timeout = float(
            conf.get('recoverable_node_timeout', self.node_timeout))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = int(conf.get('client_timeout', 60))
        self.put_queue_depth = int(conf.get('put_queue_depth', 10))
        self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
        self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
        self.trans_id_suffix = conf.get('trans_id_suffix', '')
        self.allow_account_management = \
            config_true_value(conf.get('allow_account_management', 'no'))
        self.memcache = memcache
        mimetypes.init(mimetypes.knownfiles +
                       [os.path.join(swift_dir, 'mime.types')])
        self.account_autocreate = \
            config_true_value(conf.get('account_autocreate', 'no'))
        self.deny_host_headers = [
            host.strip()
            for host in conf.get('deny_host_headers', '').split(',')
            if host.strip()
        ]
        self.strict_cors_mode = config_true_value(
            conf.get('strict_cors_mode', 't'))
        value = conf.get('request_node_count', '2 * replicas').lower().split()
        if len(value) == 1:
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value
        elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value * replicas
        else:
            raise ValueError('Invalid request_node_count value: %r' %
                             ''.join(value))
        swift_owner_headers = conf.get(
            'swift_owner_headers', 'x-container-read, x-container-write, '
            'x-container-sync-key, x-container-sync-to, '
            'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
            'x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, '
            'x-account-access-control')
        self.swift_owner_headers = [
            name.strip().title() for name in swift_owner_headers.split(',')
            if name.strip()
        ]
        socket._fileobject.default_bufsize = self.client_chunk_size
        self.expose_info = config_true_value(conf.get('expose_info', 'yes'))
        self.disallowed_sections = list_from_csv(
            conf.get('disallowed_sections', 'swift.valid_api_versions'))
        self.admin_key = conf.get('admin_key', None)
        register_swift_info(
            version=swift_version,
            strict_cors_mode=self.strict_cors_mode,
            policies=POLICIES.get_policy_info(),
            allow_account_management=self.allow_account_management,
            account_autocreate=self.account_autocreate,
            **constraints.EFFECTIVE_CONSTRAINTS)
        self.swift_baseurl = conf.get('swift_baseurl')
Beispiel #7
0
    def __init__(self, conf, memcache=None, logger=None, account_ring=None, container_ring=None):
        if conf is None:
            conf = {}
        if logger is None:
            self.logger = get_logger(conf, log_route="proxy-server")
        else:
            self.logger = logger

        self._error_limiting = {}

        swift_dir = conf.get("swift_dir", "/etc/swift")
        self.swift_dir = swift_dir
        self.node_timeout = float(conf.get("node_timeout", 10))
        self.recoverable_node_timeout = float(conf.get("recoverable_node_timeout", self.node_timeout))
        self.conn_timeout = float(conf.get("conn_timeout", 0.5))
        self.client_timeout = int(conf.get("client_timeout", 60))
        self.put_queue_depth = int(conf.get("put_queue_depth", 10))
        self.object_chunk_size = int(conf.get("object_chunk_size", 65536))
        self.client_chunk_size = int(conf.get("client_chunk_size", 65536))
        self.trans_id_suffix = conf.get("trans_id_suffix", "")
        self.post_quorum_timeout = float(conf.get("post_quorum_timeout", 0.5))
        self.error_suppression_interval = int(conf.get("error_suppression_interval", 60))
        self.error_suppression_limit = int(conf.get("error_suppression_limit", 10))
        self.recheck_container_existence = int(conf.get("recheck_container_existence", 60))
        self.recheck_account_existence = int(conf.get("recheck_account_existence", 60))
        self.allow_account_management = config_true_value(conf.get("allow_account_management", "no"))
        self.object_post_as_copy = config_true_value(conf.get("object_post_as_copy", "true"))
        self.container_ring = container_ring or Ring(swift_dir, ring_name="container")
        self.account_ring = account_ring or Ring(swift_dir, ring_name="account")
        # ensure rings are loaded for all configured storage policies
        for policy in POLICIES:
            policy.load_ring(swift_dir)
        self.obj_controller_router = ObjectControllerRouter()
        self.memcache = memcache
        mimetypes.init(mimetypes.knownfiles + [os.path.join(swift_dir, "mime.types")])
        self.account_autocreate = config_true_value(conf.get("account_autocreate", "no"))
        self.auto_create_account_prefix = conf.get("auto_create_account_prefix") or "."
        self.expiring_objects_account = self.auto_create_account_prefix + (
            conf.get("expiring_objects_account_name") or "expiring_objects"
        )
        self.expiring_objects_container_divisor = int(conf.get("expiring_objects_container_divisor") or 86400)
        self.max_containers_per_account = int(conf.get("max_containers_per_account") or 0)
        self.max_containers_whitelist = [
            a.strip() for a in conf.get("max_containers_whitelist", "").split(",") if a.strip()
        ]
        self.deny_host_headers = [host.strip() for host in conf.get("deny_host_headers", "").split(",") if host.strip()]
        self.log_handoffs = config_true_value(conf.get("log_handoffs", "true"))
        self.cors_allow_origin = [a.strip() for a in conf.get("cors_allow_origin", "").split(",") if a.strip()]
        self.strict_cors_mode = config_true_value(conf.get("strict_cors_mode", "t"))
        self.node_timings = {}
        self.timing_expiry = int(conf.get("timing_expiry", 300))
        self.sorting_method = conf.get("sorting_method", "shuffle").lower()
        self.concurrent_gets = config_true_value(conf.get("concurrent_gets"))
        self.concurrency_timeout = float(conf.get("concurrency_timeout", self.conn_timeout))
        value = conf.get("request_node_count", "2 * replicas").lower().split()
        if len(value) == 1:
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value
        elif len(value) == 3 and value[1] == "*" and value[2] == "replicas":
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value * replicas
        else:
            raise ValueError("Invalid request_node_count value: %r" % "".join(value))
        try:
            self._read_affinity = read_affinity = conf.get("read_affinity", "")
            self.read_affinity_sort_key = affinity_key_function(read_affinity)
        except ValueError as err:
            # make the message a little more useful
            raise ValueError("Invalid read_affinity value: %r (%s)" % (read_affinity, err.message))
        try:
            write_affinity = conf.get("write_affinity", "")
            self.write_affinity_is_local_fn = affinity_locality_predicate(write_affinity)
        except ValueError as err:
            # make the message a little more useful
            raise ValueError("Invalid write_affinity value: %r (%s)" % (write_affinity, err.message))
        value = conf.get("write_affinity_node_count", "2 * replicas").lower().split()
        if len(value) == 1:
            wanc_value = int(value[0])
            self.write_affinity_node_count = lambda replicas: wanc_value
        elif len(value) == 3 and value[1] == "*" and value[2] == "replicas":
            wanc_value = int(value[0])
            self.write_affinity_node_count = lambda replicas: wanc_value * replicas
        else:
            raise ValueError("Invalid write_affinity_node_count value: %r" % "".join(value))
        # swift_owner_headers are stripped by the account and container
        # controllers; we should extend header stripping to object controller
        # when a privileged object header is implemented.
        swift_owner_headers = conf.get(
            "swift_owner_headers",
            "x-container-read, x-container-write, "
            "x-container-sync-key, x-container-sync-to, "
            "x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, "
            "x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, "
            "x-account-access-control",
        )
        self.swift_owner_headers = [name.strip().title() for name in swift_owner_headers.split(",") if name.strip()]
        # Initialization was successful, so now apply the client chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because it affects the client as well, currently, we use the
        # client chunk size as the govenor and not the object chunk size.
        socket._fileobject.default_bufsize = self.client_chunk_size
        self.expose_info = config_true_value(conf.get("expose_info", "yes"))
        self.disallowed_sections = list_from_csv(conf.get("disallowed_sections", "swift.valid_api_versions"))
        self.admin_key = conf.get("admin_key", None)
        register_swift_info(
            version=swift_version,
            strict_cors_mode=self.strict_cors_mode,
            policies=POLICIES.get_policy_info(),
            allow_account_management=self.allow_account_management,
            account_autocreate=self.account_autocreate,
            **constraints.EFFECTIVE_CONSTRAINTS
        )
Beispiel #8
0
 def test_swift_info(self):
     # the deprecated 'three' should not exist in expect
     expect = [{"default": True, "name": "zero"}, {"name": "two"}, {"name": "one"}, {"name": "ten"}]
     swift_info = POLICIES.get_policy_info()
     self.assertEquals(sorted(expect, key=lambda k: k["name"]), sorted(swift_info, key=lambda k: k["name"]))