示例#1
0
    def __init__(self, app, conf, *args, **kwargs):
        self.app = app
        self.conf = Config()

        # Set default values if they are not configured
        self.conf.allow_no_owner = config_true_value(
            conf.get('allow_no_owner', False))
        self.conf.location = conf.get('location', 'US')
        self.conf.dns_compliant_bucket_names = config_true_value(
            conf.get('dns_compliant_bucket_names', True))
        self.conf.max_bucket_listing = config_positive_int_value(
            conf.get('max_bucket_listing', 1000))
        self.conf.max_parts_listing = config_positive_int_value(
            conf.get('max_parts_listing', 1000))
        self.conf.max_multi_delete_objects = config_positive_int_value(
            conf.get('max_multi_delete_objects', 1000))
        self.conf.s3_acl = config_true_value(conf.get('s3_acl', False))
        self.conf.storage_domain = conf.get('storage_domain', '')
        self.conf.auth_pipeline_check = config_true_value(
            conf.get('auth_pipeline_check', True))
        self.conf.max_upload_part_num = config_positive_int_value(
            conf.get('max_upload_part_num', 1000))
        self.conf.check_bucket_owner = config_true_value(
            conf.get('check_bucket_owner', False))
        self.conf.force_swift_request_proxy_log = config_true_value(
            conf.get('force_swift_request_proxy_log', False))
        self.conf.allow_multipart_uploads = config_true_value(
            conf.get('allow_multipart_uploads', True))
        self.conf.min_segment_size = config_positive_int_value(
            conf.get('min_segment_size', 5242880))

        self.logger = get_logger(conf, log_route=conf.get('log_name', 's3api'))
        self.slo_enabled = self.conf.allow_multipart_uploads
        self.check_pipeline(self.conf)
示例#2
0
    def __init__(self, app, conf):
        self.app = app
        self.conf = conf
        self.logger = get_logger(conf, log_route='swift_zipkin')
        self.enabled = config_true_value(conf.get('zipkin_enable'))
        self.zipkin_v2_host = self.conf.get('zipkin_v2_host') or '127.0.0.1'
        self.zipkin_v2_port = config_positive_int_value(
            self.conf.get('zipkin_v2_port') or 9411)
        raw_sample_rate = self.conf.get('zipkin_sample_rate')
        if raw_sample_rate:
            self.zipkin_sample_rate = config_float_value(raw_sample_rate,
                                                         minimum=0.0,
                                                         maximum=1.0)
        else:
            self.zipkin_sample_rate = 1.0
        self.zipkin_flush_threshold_size = config_positive_int_value(
            self.conf.get('zipkin_flush_threshold_size', 2**20))
        self.zipkin_flush_threshold_sec = config_float_value(
            self.conf.get('zipkin_flush_threshold_sec', 2.0))

        if not self.enabled:
            # It's not like we're going to get enabled between the first and
            # second invocations of this constructor...
            return

        # Use our class to store a count of instantiations; We'll get
        # one time before the forking off of workers, and again inside each
        # worker.  We're interested in only doing our business post-fork,
        # during the SECOND instantiation.
        setattr(self.__class__, '_instantiation_count',
                1 + getattr(self.__class__, '_instantiation_count', 0))

        if self.__class__._instantiation_count < 2:
            self.logger.debug(
                'ZipkinMiddleware() count=%d PID=%d; '
                'deferring work to 2nd instantiation.',
                self.__class__._instantiation_count, os.getpid())
            return

        self.logger.debug(
            'ZipkinMiddleware() count=%d PID=%d; '
            'tracing %.0f%% of reqs to Zipkin at '
            '%s:%s', self.__class__._instantiation_count, os.getpid(),
            100.0 * self.zipkin_sample_rate, self.zipkin_v2_host,
            self.zipkin_v2_port)
        patch_eventlet_and_swift(
            self.logger,
            self.zipkin_v2_host,
            self.zipkin_v2_port,
            self.zipkin_sample_rate,
            self.zipkin_flush_threshold_size,
            self.zipkin_flush_threshold_sec,
        )
示例#3
0
    def __init__(self, app, conf, *args, **kwargs):
        self.app = app
        self.conf = Config()

        # Set default values if they are not configured
        self.conf.allow_no_owner = config_true_value(
            conf.get('allow_no_owner', False))
        self.conf.location = conf.get('location', 'US')
        self.conf.dns_compliant_bucket_names = config_true_value(
            conf.get('dns_compliant_bucket_names', True))
        self.conf.max_bucket_listing = config_positive_int_value(
            conf.get('max_bucket_listing', 1000))
        self.conf.max_parts_listing = config_positive_int_value(
            conf.get('max_parts_listing', 1000))
        self.conf.max_multi_delete_objects = config_positive_int_value(
            conf.get('max_multi_delete_objects', 1000))
        self.conf.s3_acl = config_true_value(
            conf.get('s3_acl', False))
        self.conf.storage_domain = conf.get('storage_domain', '')
        self.conf.auth_pipeline_check = config_true_value(
            conf.get('auth_pipeline_check', True))
        self.conf.max_upload_part_num = config_positive_int_value(
            conf.get('max_upload_part_num', 1000))
        self.conf.check_bucket_owner = config_true_value(
            conf.get('check_bucket_owner', False))
        self.conf.force_swift_request_proxy_log = config_true_value(
            conf.get('force_swift_request_proxy_log', False))
        self.conf.allow_multipart_uploads = config_true_value(
            conf.get('allow_multipart_uploads', True))
        self.conf.min_segment_size = config_positive_int_value(
            conf.get('min_segment_size', 5242880))

        self.logger = get_logger(
            conf, log_route=conf.get('log_name', 's3api'))
        self.slo_enabled = self.conf.allow_multipart_uploads
        self.check_pipeline(self.conf)
示例#4
0
文件: s3api.py 项目: realitix/swift
    def __init__(self, app, wsgi_conf, *args, **kwargs):
        self.app = app
        self.conf = Config()

        # Set default values if they are not configured
        self.conf.allow_no_owner = config_true_value(
            wsgi_conf.get('allow_no_owner', False))
        self.conf.location = wsgi_conf.get('location', 'us-east-1')
        self.conf.dns_compliant_bucket_names = config_true_value(
            wsgi_conf.get('dns_compliant_bucket_names', True))
        self.conf.max_bucket_listing = config_positive_int_value(
            wsgi_conf.get('max_bucket_listing', 1000))
        self.conf.max_parts_listing = config_positive_int_value(
            wsgi_conf.get('max_parts_listing', 1000))
        self.conf.max_multi_delete_objects = config_positive_int_value(
            wsgi_conf.get('max_multi_delete_objects', 1000))
        self.conf.multi_delete_concurrency = config_positive_int_value(
            wsgi_conf.get('multi_delete_concurrency', 2))
        self.conf.s3_acl = config_true_value(wsgi_conf.get('s3_acl', False))
        self.conf.storage_domain = wsgi_conf.get('storage_domain', '')
        self.conf.auth_pipeline_check = config_true_value(
            wsgi_conf.get('auth_pipeline_check', True))
        self.conf.max_upload_part_num = config_positive_int_value(
            wsgi_conf.get('max_upload_part_num', 1000))
        self.conf.check_bucket_owner = config_true_value(
            wsgi_conf.get('check_bucket_owner', False))
        self.conf.force_swift_request_proxy_log = config_true_value(
            wsgi_conf.get('force_swift_request_proxy_log', False))
        self.conf.allow_multipart_uploads = config_true_value(
            wsgi_conf.get('allow_multipart_uploads', True))
        self.conf.min_segment_size = config_positive_int_value(
            wsgi_conf.get('min_segment_size', 5242880))
        self.conf.allowable_clock_skew = config_positive_int_value(
            wsgi_conf.get('allowable_clock_skew', 15 * 60))
        self.conf.cors_preflight_allow_origin = list_from_csv(
            wsgi_conf.get('cors_preflight_allow_origin', ''))
        if '*' in self.conf.cors_preflight_allow_origin and \
                len(self.conf.cors_preflight_allow_origin) > 1:
            raise ValueError('if cors_preflight_allow_origin should include '
                             'all domains, * must be the only entry')
        self.conf.ratelimit_as_client_error = config_true_value(
            wsgi_conf.get('ratelimit_as_client_error', False))

        self.logger = get_logger(wsgi_conf,
                                 log_route=wsgi_conf.get('log_name', 's3api'))
        self.check_pipeline(wsgi_conf)
示例#5
0
def main(args=None):
    parser = _make_parser()
    args = parser.parse_args(args)
    if not args.subcommand:
        # On py2, subparsers are required; on py3 they are not; see
        # https://bugs.python.org/issue9253. py37 added a `required` kwarg
        # to let you control it, but prior to that, there was no choice in
        # the matter. So, check whether the destination was set and bomb
        # out if not.
        parser.print_help()
        print('\nA sub-command is required.')
        return 1

    conf = {}
    rows_per_shard = DEFAULT_ROWS_PER_SHARD
    shrink_threshold = DEFAULT_SHRINK_THRESHOLD
    expansion_limit = DEFAULT_ROWS_PER_SHARD
    if args.conf_file:
        try:
            conf = readconf(args.conf_file, 'container-sharder')
            shard_container_threshold = config_positive_int_value(conf.get(
                'shard_container_threshold',
                DEFAULT_SHARD_CONTAINER_THRESHOLD))
            if shard_container_threshold:
                rows_per_shard = shard_container_threshold // 2
                shrink_threshold = int(
                    shard_container_threshold * config_percent_value(
                        conf.get('shard_shrink_point',
                                 DEFAULT_SHARD_SHRINK_POINT)))
                expansion_limit = int(
                    shard_container_threshold * config_percent_value(
                        conf.get('shard_shrink_merge_point',
                                 DEFAULT_SHARD_MERGE_POINT)))
        except Exception as exc:
            print('Error opening config file %s: %s' % (args.conf_file, exc),
                  file=sys.stderr)
            return 2

    # seems having sub parsers mean sometimes an arg wont exist in the args
    # namespace. But we can check if it is with the 'in' statement.
    if "max_shrinking" in args and args.max_shrinking is None:
        args.max_shrinking = int(conf.get(
            "max_shrinking", DEFAULT_MAX_SHRINKING))
    if "max_expanding" in args and args.max_expanding is None:
        args.max_expanding = int(conf.get(
            "max_expanding", DEFAULT_MAX_EXPANDING))
    if "shrink_threshold" in args and args.shrink_threshold is None:
        args.shrink_threshold = shrink_threshold
    if "expansion_limit" in args and args.expansion_limit is None:
        args.expansion_limit = expansion_limit
    if "rows_per_shard" in args and args.rows_per_shard is None:
        args.rows_per_shard = rows_per_shard

    if args.func in (analyze_shard_ranges,):
        args.input = args.path_to_file
        return args.func(args) or 0

    logger = get_logger({}, name='ContainerBroker', log_to_console=True)
    broker = ContainerBroker(os.path.realpath(args.path_to_file),
                             logger=logger,
                             skip_commits=not args.force_commits)
    try:
        broker.get_info()
    except Exception as exc:
        print('Error opening container DB %s: %s' % (args.path_to_file, exc),
              file=sys.stderr)
        return 2
    print('Loaded db broker for %s' % broker.path, file=sys.stderr)
    return args.func(broker, args)
示例#6
0
    def __init__(self,
                 idx,
                 name='',
                 aliases='',
                 is_default=False,
                 is_deprecated=False,
                 object_ring=None,
                 ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE,
                 ec_type=None,
                 ec_ndata=None,
                 ec_nparity=None,
                 ec_duplication_factor=1):

        super(ECStoragePolicy, self).__init__(idx=idx,
                                              name=name,
                                              aliases=aliases,
                                              is_default=is_default,
                                              is_deprecated=is_deprecated,
                                              object_ring=object_ring)

        # Validate erasure_coding policy specific members
        # ec_type is one of the EC implementations supported by PyEClib
        if ec_type is None:
            raise PolicyError('Missing ec_type')
        if ec_type not in VALID_EC_TYPES:
            raise PolicyError('Wrong ec_type %s for policy %s, should be one'
                              ' of "%s"' %
                              (ec_type, self.name, ', '.join(VALID_EC_TYPES)))
        self._ec_type = ec_type

        # Define _ec_ndata as the number of EC data fragments
        # Accessible as the property "ec_ndata"
        try:
            value = int(ec_ndata)
            if value <= 0:
                raise ValueError
            self._ec_ndata = value
        except (TypeError, ValueError):
            raise PolicyError('Invalid ec_num_data_fragments %r' % ec_ndata,
                              index=self.idx)

        # Define _ec_nparity as the number of EC parity fragments
        # Accessible as the property "ec_nparity"
        try:
            value = int(ec_nparity)
            if value <= 0:
                raise ValueError
            self._ec_nparity = value
        except (TypeError, ValueError):
            raise PolicyError('Invalid ec_num_parity_fragments %r' %
                              ec_nparity,
                              index=self.idx)

        # Define _ec_segment_size as the encode segment unit size
        # Accessible as the property "ec_segment_size"
        try:
            value = int(ec_segment_size)
            if value <= 0:
                raise ValueError
            self._ec_segment_size = value
        except (TypeError, ValueError):
            raise PolicyError('Invalid ec_object_segment_size %r' %
                              ec_segment_size,
                              index=self.idx)

        if self._ec_type == 'isa_l_rs_vand' and self._ec_nparity >= 5:
            logger = logging.getLogger("swift.common.storage_policy")
            if not logger.handlers:
                # If nothing else, log to stderr
                logger.addHandler(logging.StreamHandler(sys.__stderr__))
            logger.warning(
                'Storage policy %s uses an EC configuration known to harm '
                'data durability. Any data in this policy should be migrated. '
                'See https://bugs.launchpad.net/swift/+bug/1639691 for '
                'more information.' % self.name)
            if not is_deprecated:
                raise PolicyError(
                    'Storage policy %s uses an EC configuration known to harm '
                    'data durability. This policy MUST be deprecated.' %
                    self.name)

        # Initialize PyECLib EC backend
        try:
            self.pyeclib_driver = \
                ECDriver(k=self._ec_ndata, m=self._ec_nparity,
                         ec_type=self._ec_type)
        except ECDriverError as e:
            raise PolicyError("Error creating EC policy (%s)" % e,
                              index=self.idx)

        # quorum size in the EC case depends on the choice of EC scheme.
        self._ec_quorum_size = \
            self._ec_ndata + self.pyeclib_driver.min_parity_fragments_needed()
        self._fragment_size = None

        self._ec_duplication_factor = \
            config_positive_int_value(ec_duplication_factor)
示例#7
0
    def __init__(self, idx, name='', aliases='', is_default=False,
                 is_deprecated=False, object_ring=None,
                 diskfile_module='egg:swift#erasure_coding.fs',
                 ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE,
                 ec_type=None, ec_ndata=None, ec_nparity=None,
                 ec_duplication_factor=1):

        super(ECStoragePolicy, self).__init__(
            idx=idx, name=name, aliases=aliases, is_default=is_default,
            is_deprecated=is_deprecated, object_ring=object_ring,
            diskfile_module=diskfile_module)

        # Validate erasure_coding policy specific members
        # ec_type is one of the EC implementations supported by PyEClib
        if ec_type is None:
            raise PolicyError('Missing ec_type')
        if ec_type not in VALID_EC_TYPES:
            raise PolicyError('Wrong ec_type %s for policy %s, should be one'
                              ' of "%s"' % (ec_type, self.name,
                                            ', '.join(VALID_EC_TYPES)))
        self._ec_type = ec_type

        # Define _ec_ndata as the number of EC data fragments
        # Accessible as the property "ec_ndata"
        try:
            value = int(ec_ndata)
            if value <= 0:
                raise ValueError
            self._ec_ndata = value
        except (TypeError, ValueError):
            raise PolicyError('Invalid ec_num_data_fragments %r' %
                              ec_ndata, index=self.idx)

        # Define _ec_nparity as the number of EC parity fragments
        # Accessible as the property "ec_nparity"
        try:
            value = int(ec_nparity)
            if value <= 0:
                raise ValueError
            self._ec_nparity = value
        except (TypeError, ValueError):
            raise PolicyError('Invalid ec_num_parity_fragments %r'
                              % ec_nparity, index=self.idx)

        # Define _ec_segment_size as the encode segment unit size
        # Accessible as the property "ec_segment_size"
        try:
            value = int(ec_segment_size)
            if value <= 0:
                raise ValueError
            self._ec_segment_size = value
        except (TypeError, ValueError):
            raise PolicyError('Invalid ec_object_segment_size %r' %
                              ec_segment_size, index=self.idx)

        if self._ec_type == 'isa_l_rs_vand' and self._ec_nparity >= 5:
            logger = logging.getLogger("swift.common.storage_policy")
            if not logger.handlers:
                # If nothing else, log to stderr
                logger.addHandler(logging.StreamHandler(sys.__stderr__))
            logger.warning(
                'Storage policy %s uses an EC configuration known to harm '
                'data durability. Any data in this policy should be migrated. '
                'See https://bugs.launchpad.net/swift/+bug/1639691 for '
                'more information.' % self.name)
            if not is_deprecated:
                raise PolicyError(
                    'Storage policy %s uses an EC configuration known to harm '
                    'data durability. This policy MUST be deprecated.'
                    % self.name)

        # Initialize PyECLib EC backend
        try:
            self.pyeclib_driver = \
                ECDriver(k=self._ec_ndata, m=self._ec_nparity,
                         ec_type=self._ec_type)
        except ECDriverError as e:
            raise PolicyError("Error creating EC policy (%s)" % e,
                              index=self.idx)

        # quorum size in the EC case depends on the choice of EC scheme.
        self._ec_quorum_size = \
            self._ec_ndata + self.pyeclib_driver.min_parity_fragments_needed()
        self._fragment_size = None

        self._ec_duplication_factor = \
            config_positive_int_value(ec_duplication_factor)