Пример #1
0
 def __init__(self, idx, name='', is_default=False, is_deprecated=False,
              object_ring=None):
     try:
         self.idx = int(idx)
     except ValueError:
         raise PolicyError('Invalid index', idx)
     if self.idx < 0:
         raise PolicyError('Invalid index', idx)
     if not name:
         raise PolicyError('Invalid name %r' % name, idx)
     # this is defensively restrictive, but could be expanded in the future
     if not all(c in VALID_CHARS for c in name):
         raise PolicyError('Names are used as HTTP headers, and can not '
                           'reliably contain any characters not in %r. '
                           'Invalid name %r' % (VALID_CHARS, name))
     if name.upper() == LEGACY_POLICY_NAME.upper() and self.idx != 0:
         msg = 'The name %s is reserved for policy index 0. ' \
             'Invalid name %r' % (LEGACY_POLICY_NAME, name)
         raise PolicyError(msg, idx)
     self.name = name
     self.is_deprecated = config_true_value(is_deprecated)
     self.is_default = config_true_value(is_default)
     if self.is_deprecated and self.is_default:
         raise PolicyError('Deprecated policy can not be default.  '
                           'Invalid config', self.idx)
     self.ring_name = _get_policy_string('object', self.idx)
     self.object_ring = object_ring
Пример #2
0
 def __init__(self, conf, logger=None):
     super(ContainerController, self).__init__(conf)
     self.logger = logger or get_logger(conf, log_route='container-server')
     self.log_requests = config_true_value(conf.get('log_requests', 'true'))
     self.root = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.node_timeout = int(conf.get('node_timeout', 3))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     #: ContainerSyncCluster instance for validating sync-to values.
     self.realms_conf = ContainerSyncRealms(
         os.path.join(
             conf.get('swift_dir', '/etc/swift'),
             'container-sync-realms.conf'),
         self.logger)
     #: The list of hosts we're allowed to send syncs to. This can be
     #: overridden by data in self.realms_conf
     self.allowed_sync_hosts = [
         h.strip()
         for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
         if h.strip()]
     self.replicator_rpc = ContainerReplicatorRpc(
         self.root, DATADIR, ContainerBroker, self.mount_check,
         logger=self.logger)
     self.auto_create_account_prefix = \
         conf.get('auto_create_account_prefix') or '.'
     if config_true_value(conf.get('allow_versions', 'f')):
         self.save_headers.append('x-versions-location')
     swift.common.db.DB_PREALLOCATION = \
         config_true_value(conf.get('db_preallocation', 'f'))
Пример #3
0
    def __init__(self, conf, logger=None):
        """
        Creates a new WSGI application for the Swift Object Server. An
        example configuration is given at
        <source-dir>/etc/object-server.conf-sample or
        /etc/swift/object-server.conf-sample.
        """
        super(ObjectController, self).__init__(conf)
        self.logger = logger or get_logger(conf, log_route='object-server')
        self.node_timeout = int(conf.get('node_timeout', 3))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = int(conf.get('client_timeout', 60))
        self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
        self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
        self.log_requests = config_true_value(conf.get('log_requests', 'true'))
        self.max_upload_time = int(conf.get('max_upload_time', 86400))
        self.slow = int(conf.get('slow', 0))
        self.keep_cache_private = \
            config_true_value(conf.get('keep_cache_private', 'false'))

        default_allowed_headers = '''
            content-disposition,
            content-encoding,
            x-delete-at,
            x-object-manifest,
            x-static-large-object,
        '''
        extra_allowed_headers = [
            header.strip().lower() for header in conf.get(
                'allowed_headers', default_allowed_headers).split(',')
            if header.strip()
        ]
        self.allowed_headers = set()
        for header in extra_allowed_headers:
            if header not in DATAFILE_SYSTEM_META:
                self.allowed_headers.add(header)
        self.auto_create_account_prefix = \
            conf.get('auto_create_account_prefix') or '.'
        self.expiring_objects_account = self.auto_create_account_prefix + \
            (conf.get('expiring_objects_account_name') or 'expiring_objects')
        self.expiring_objects_container_divisor = \
            int(conf.get('expiring_objects_container_divisor') or 86400)
        # Initialization was successful, so now apply the network chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because the primary motivation for this is to optimize how data
        # is written back to the proxy server, we could use the value from the
        # disk_chunk_size parameter. However, it affects all created sockets
        # using this class so we have chosen to tie it to the
        # network_chunk_size parameter value instead.
        socket._fileobject.default_bufsize = self.network_chunk_size

        # Provide further setup specific to an object server implementation.
        self.setup(conf)
Пример #4
0
 def __init__(self, idx, name="", is_default=False, is_deprecated=False, object_ring=None):
     # do not allow BaseStoragePolicy class to be instantiated directly
     if type(self) == BaseStoragePolicy:
         raise TypeError("Can't instantiate BaseStoragePolicy directly")
     # policy parameter validation
     try:
         self.idx = int(idx)
     except ValueError:
         raise PolicyError("Invalid index", idx)
     if self.idx < 0:
         raise PolicyError("Invalid index", idx)
     if not name:
         raise PolicyError("Invalid name %r" % name, idx)
     # this is defensively restrictive, but could be expanded in the future
     if not all(c in VALID_CHARS for c in name):
         raise PolicyError(
             "Names are used as HTTP headers, and can not "
             "reliably contain any characters not in %r. "
             "Invalid name %r" % (VALID_CHARS, name)
         )
     if name.upper() == LEGACY_POLICY_NAME.upper() and self.idx != 0:
         msg = "The name %s is reserved for policy index 0. " "Invalid name %r" % (LEGACY_POLICY_NAME, name)
         raise PolicyError(msg, idx)
     self.name = name
     self.is_deprecated = config_true_value(is_deprecated)
     self.is_default = config_true_value(is_default)
     if self.policy_type not in BaseStoragePolicy.policy_type_to_policy_cls:
         raise PolicyError("Invalid type", self.policy_type)
     if self.is_deprecated and self.is_default:
         raise PolicyError("Deprecated policy can not be default.  " "Invalid config", self.idx)
     self.ring_name = _get_policy_string("object", self.idx)
     self.object_ring = object_ring
Пример #5
0
def filter_factory(global_conf, **local_conf):
    """
    Returns the WSGI filter for use with paste.deploy.

    Parameters in config:

    # value to prepend to the account in order to compute the trash location
    trash_prefix = ".trash-"
    # how long, in seconds, trash objects should live before expiring. Set to 0
    # to keep trash objects forever.
    trash_lifetime = 7776000  # 90 days
    # whether to block trash objects from being deleted
    block_trash_deletes = no
    # whether to enable undelete functionality by default. Administrators may
    # explicitly enable or disable per account or container via the
    # X-Undelete-Enabled header. Set this header to 'default' to resume default
    # behavior.
    enable_by_default = yes
    """
    conf = global_conf.copy()
    conf.update(local_conf)

    trash_prefix = conf.get("trash_prefix", DEFAULT_TRASH_PREFIX)
    trash_lifetime = int(conf.get("trash_lifetime", DEFAULT_TRASH_LIFETIME))
    block_trash_deletes = utils.config_true_value(
        conf.get('block_trash_deletes', 'no'))
    enable_by_default = utils.config_true_value(
        conf.get('enable_by_default', 'yes'))

    def filt(app):
        return UndeleteMiddleware(app, trash_prefix=trash_prefix,
                                  trash_lifetime=trash_lifetime,
                                  block_trash_deletes=block_trash_deletes,
                                  enable_by_default=enable_by_default)
    return filt
Пример #6
0
    def __init__(self, idx, name='', is_default=False, is_deprecated=False,
                 object_ring=None, aliases=''):
        # do not allow BaseStoragePolicy class to be instantiated directly
        if type(self) == BaseStoragePolicy:
            raise TypeError("Can't instantiate BaseStoragePolicy directly")
        # policy parameter validation
        try:
            self.idx = int(idx)
        except ValueError:
            raise PolicyError('Invalid index', idx)
        if self.idx < 0:
            raise PolicyError('Invalid index', idx)
        self.alias_list = []
        self.add_name(name)
        if aliases:
            names_list = list_from_csv(aliases)
            for alias in names_list:
                if alias == name:
                    continue
                self.add_name(alias)
        self.is_deprecated = config_true_value(is_deprecated)
        self.is_default = config_true_value(is_default)
        if self.policy_type not in BaseStoragePolicy.policy_type_to_policy_cls:
            raise PolicyError('Invalid type', self.policy_type)
        if self.is_deprecated and self.is_default:
            raise PolicyError('Deprecated policy can not be default.  '
                              'Invalid config', self.idx)

        self.ring_name = _get_policy_string('object', self.idx)
        self.object_ring = object_ring
Пример #7
0
 def __init__(self, app, conf, logger=None):
     self.app = app
     self.logger = logger or get_logger(conf, log_route='read_only')
     self.read_only = config_true_value(conf.get('read_only'))
     self.write_methods = {'COPY', 'POST', 'PUT'}
     if not config_true_value(conf.get('allow_deletes')):
         self.write_methods.add('DELETE')
Пример #8
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='container-updater')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.interval = int(conf.get('interval', 300))
     self.account_ring = None
     self.concurrency = int(conf.get('concurrency', 4))
     self.slowdown = float(conf.get('slowdown', 0.01))
     self.node_timeout = float(conf.get('node_timeout', 3))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.no_changes = 0
     self.successes = 0
     self.failures = 0
     self.account_suppressions = {}
     self.account_suppression_time = \
         float(conf.get('account_suppression_time', 60))
     self.new_account_suppressions = None
     swift.common.db.DB_PREALLOCATION = \
         config_true_value(conf.get('db_preallocation', 'f'))
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = os.path.join(self.recon_cache_path, "container.recon")
     self.user_agent = 'container-updater %s' % os.getpid()
Пример #9
0
 def __init__(self, conf, logger=None):
     self.conf = conf
     self.logger = logger or get_logger(conf, log_route='replicator')
     self.root = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.port = int(conf.get('bind_port', self.default_port))
     concurrency = int(conf.get('concurrency', 8))
     self.cpool = GreenPool(size=concurrency)
     swift_dir = conf.get('swift_dir', '/etc/swift')
     self.ring = ring.Ring(swift_dir, ring_name=self.server_type)
     self._local_device_ids = set()
     self.per_diff = int(conf.get('per_diff', 1000))
     self.max_diffs = int(conf.get('max_diffs') or 100)
     self.interval = int(conf.get('interval') or
                         conf.get('run_pause') or 30)
     self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no'))
     self.node_timeout = int(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
     swift.common.db.DB_PREALLOCATION = \
         config_true_value(conf.get('db_preallocation', 'f'))
     self._zero_stats()
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.recon_replicator = '%s.recon' % self.server_type
     self.rcache = os.path.join(self.recon_cache_path,
                                self.recon_replicator)
     self.extract_device_re = re.compile('%s%s([^%s]+)' % (
         self.root, os.path.sep, os.path.sep))
Пример #10
0
 def __init__(self, app, conf):
     self.app = app
     self.conf = conf
     self.logger = get_logger(conf, log_route='kerbauth')
     self.log_headers = config_true_value(conf.get('log_headers', 'f'))
     self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
     if self.reseller_prefix and self.reseller_prefix[-1] != '_':
         self.reseller_prefix += '_'
     self.logger.set_statsd_prefix('kerbauth.%s' % (
         self.reseller_prefix if self.reseller_prefix else 'NONE',))
     self.auth_prefix = conf.get('auth_prefix', '/auth/')
     if not self.auth_prefix or not self.auth_prefix.strip('/'):
         self.logger.warning('Rewriting invalid auth prefix "%s" to '
                             '"/auth/" (Non-empty auth prefix path '
                             'is required)' % self.auth_prefix)
         self.auth_prefix = '/auth/'
     if self.auth_prefix[0] != '/':
         self.auth_prefix = '/' + self.auth_prefix
     if self.auth_prefix[-1] != '/':
         self.auth_prefix += '/'
     self.token_life = int(conf.get('token_life', 86400))
     self.allow_overrides = config_true_value(
         conf.get('allow_overrides', 't'))
     self.storage_url_scheme = conf.get('storage_url_scheme', 'default')
     self.ext_authentication_url = conf.get('ext_authentication_url')
     if not self.ext_authentication_url:
         raise RuntimeError("Missing filter parameter ext_authentication_"
                            "url in /etc/swift/proxy-server.conf")
Пример #11
0
def filter_factory(global_conf, **local_config):
    conf = global_conf.copy()
    conf.update(local_config)

    ns = conf.get('sds_namespace')
    acct = conf.get('sds_default_account')
    proxy = conf.get('sds_proxy_url')

    if ns is None:
        raise ConfigurationException('No OIO-SDS namespace configured')
    if acct is None:
        raise ConfigurationException('No OIO-SDS account configured')
    if proxy is None:
        raise ConfigurationException('No OIO-SDS proxy URL configured')

    strip_v1 = config_true_value(local_config.pop('strip_v1', False))
    account_first = config_true_value(local_config.pop('account_first', False))
    skip_metadata = config_true_value(local_config.pop('skip_metadata', False))

    def factory(app):
        return HashedContainerMiddleware(app, ns, acct, proxy,
                                         strip_v1=strip_v1,
                                         account_first=account_first,
                                         skip_metadata=skip_metadata,
                                         **local_config)
    return factory
Пример #12
0
def filter_factory(global_conf, **local_config):
    conf = global_conf.copy()
    conf.update(local_config)

    acct = conf.get('sds_default_account')

    if acct is None:
        raise ConfigurationException('No OIO-SDS account configured')

    account_first = config_true_value(local_config.get('account_first'))
    failsafe = config_true_value(local_config.get('failsafe'))
    swift3_compat = config_true_value(local_config.get('swift3_compat'))
    strip_v1 = config_true_value(local_config.get('strip_v1'))
    # By default this is enabled, to be compatible with openio-sds < 4.2.
    stop_at_first_match = config_true_value(
        local_config.get('stop_at_first_match', True))
    pattern_dict = {k: v for k, v in local_config.items()
                    if k.startswith("pattern")}

    def factory(app):
        patterns = [pattern_dict[k] for k in sorted(pattern_dict.keys())]
        logger = get_logger(conf)
        logger.info("Using patterns %s", patterns)
        return RegexContainerMiddleware(
            app, acct, patterns,
            strip_v1=strip_v1, account_first=account_first,
            swift3_compat=swift3_compat,
            stop_at_first_match=stop_at_first_match,
            failsafe=failsafe)
    return factory
Пример #13
0
 def __init__(self, app, conf):
     self.app = app
     self.logger = get_logger(conf, log_route='profile')
     self.log_filename_prefix = conf.get('log_filename_prefix',
                                         DEFAULT_PROFILE_PREFIX)
     dirname = os.path.dirname(self.log_filename_prefix)
     # Notes: this effort may fail due to permission denied.
     # it is better to be created and authorized to current
     # user in advance.
     if not os.path.exists(dirname):
         os.makedirs(dirname)
     self.dump_interval = float(conf.get('dump_interval', 5.0))
     self.dump_timestamp = config_true_value(conf.get(
         'dump_timestamp', 'no'))
     self.flush_at_shutdown = config_true_value(conf.get(
         'flush_at_shutdown', 'no'))
     self.path = conf.get('path', '__profile__').replace('/', '')
     self.unwind = config_true_value(conf.get('unwind', 'no'))
     self.profile_module = conf.get('profile_module',
                                    'eventlet.green.profile')
     self.profiler = get_profiler(self.profile_module)
     self.profile_log = ProfileLog(self.log_filename_prefix,
                                   self.dump_timestamp)
     self.viewer = HTMLViewer(self.path, self.profile_module,
                              self.profile_log)
     self.dump_pool = GreenPool(1000)
     self.last_dump_at = None
Пример #14
0
 def __init__(self, conf, logger=None):
     self.conf = conf
     self.logger = logger or get_logger(conf, log_route='account-reaper')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.interval = int(conf.get('interval', 3600))
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.account_ring = None
     self.container_ring = None
     self.object_ring = None
     self.node_timeout = float(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.myips = whataremyips(conf.get('bind_ip', '0.0.0.0'))
     self.bind_port = int(conf.get('bind_port', 6202))
     self.concurrency = int(conf.get('concurrency', 25))
     self.container_concurrency = self.object_concurrency = \
         sqrt(self.concurrency)
     self.container_pool = GreenPool(size=self.container_concurrency)
     swift.common.db.DB_PREALLOCATION = \
         config_true_value(conf.get('db_preallocation', 'f'))
     self.delay_reaping = int(conf.get('delay_reaping') or 0)
     reap_warn_after = float(conf.get('reap_warn_after') or 86400 * 30)
     self.reap_not_done_after = reap_warn_after + self.delay_reaping
     self.start_time = time()
     self.reset_stats()
Пример #15
0
 def __init__(self, conf):
     self.logger = get_logger(conf, log_route='container-server')
     self.root = conf.get('devices', '/srv/node/')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.node_timeout = int(conf.get('node_timeout', 3))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     replication_server = conf.get('replication_server', None)
     if replication_server is None:
         allowed_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'REPLICATE',
                            'POST']
     else:
         replication_server = config_true_value(replication_server)
         if replication_server:
             allowed_methods = ['REPLICATE']
         else:
             allowed_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST']
     self.replication_server = replication_server
     self.allowed_methods = allowed_methods
     self.allowed_sync_hosts = [
         h.strip()
         for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
         if h.strip()]
     self.replicator_rpc = ReplicatorRpc(
         self.root, DATADIR, ContainerBroker, self.mount_check,
         logger=self.logger)
     self.auto_create_account_prefix = \
         conf.get('auto_create_account_prefix') or '.'
     if config_true_value(conf.get('allow_versions', 'f')):
         self.save_headers.append('x-versions-location')
     swift.common.db.DB_PREALLOCATION = \
         config_true_value(conf.get('db_preallocation', 'f'))
Пример #16
0
 def __init__(self, conf, logger=None):
     """
     :param conf: configuration object obtained from ConfigParser
     :param logger: logging object
     """
     self.conf = conf
     self.logger = logger or get_logger(conf, log_route="object-reconstructor")
     self.devices_dir = conf.get("devices", "/srv/node")
     self.mount_check = config_true_value(conf.get("mount_check", "true"))
     self.swift_dir = conf.get("swift_dir", "/etc/swift")
     self.bind_ip = conf.get("bind_ip", "0.0.0.0")
     self.servers_per_port = int(conf.get("servers_per_port", "0") or 0)
     self.port = None if self.servers_per_port else int(conf.get("bind_port", 6000))
     self.concurrency = int(conf.get("concurrency", 1))
     self.stats_interval = int(conf.get("stats_interval", "300"))
     self.ring_check_interval = int(conf.get("ring_check_interval", 15))
     self.next_check = time.time() + self.ring_check_interval
     self.reclaim_age = int(conf.get("reclaim_age", 86400 * 7))
     self.partition_times = []
     self.interval = int(conf.get("interval") or conf.get("run_pause") or 30)
     self.http_timeout = int(conf.get("http_timeout", 60))
     self.lockup_timeout = int(conf.get("lockup_timeout", 1800))
     self.recon_cache_path = conf.get("recon_cache_path", "/var/cache/swift")
     self.rcache = os.path.join(self.recon_cache_path, "object.recon")
     # defaults subject to change after beta
     self.conn_timeout = float(conf.get("conn_timeout", 0.5))
     self.node_timeout = float(conf.get("node_timeout", 10))
     self.network_chunk_size = int(conf.get("network_chunk_size", 65536))
     self.disk_chunk_size = int(conf.get("disk_chunk_size", 65536))
     self.headers = {"Content-Length": "0", "user-agent": "obj-reconstructor %s" % os.getpid()}
     self.handoffs_first = config_true_value(conf.get("handoffs_first", False))
     self._df_router = DiskFileRouter(conf, self.logger)
Пример #17
0
 def __init__(self, conf, logger=None):
     """
     :param conf: configuration object obtained from ConfigParser
     :param logger: logging object
     """
     self.conf = conf
     self.logger = logger or get_logger(conf, log_route='object-replicator')
     self.devices_dir = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.bind_ip = conf.get('bind_ip', '0.0.0.0')
     self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
     self.port = None if self.servers_per_port else \
         int(conf.get('bind_port', 6000))
     self.concurrency = int(conf.get('concurrency', 1))
     self.stats_interval = int(conf.get('stats_interval', '300'))
     self.ring_check_interval = int(conf.get('ring_check_interval', 15))
     self.next_check = time.time() + self.ring_check_interval
     self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
     self.partition_times = []
     self.interval = int(conf.get('interval') or
                         conf.get('run_pause') or 30)
     self.rsync_timeout = int(conf.get('rsync_timeout', 900))
     self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
     self.rsync_bwlimit = conf.get('rsync_bwlimit', '0')
     self.rsync_compress = config_true_value(
         conf.get('rsync_compress', 'no'))
     self.rsync_module = conf.get('rsync_module', '').rstrip('/')
     if not self.rsync_module:
         self.rsync_module = '{replication_ip}::object'
         if config_true_value(conf.get('vm_test_mode', 'no')):
             self.logger.warn('Option object-replicator/vm_test_mode is '
                              'deprecated and will be removed in a future '
                              'version. Update your configuration to use '
                              'option object-replicator/rsync_module.')
             self.rsync_module += '{replication_port}'
     self.http_timeout = int(conf.get('http_timeout', 60))
     self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = os.path.join(self.recon_cache_path, "object.recon")
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.node_timeout = float(conf.get('node_timeout', 10))
     self.sync_method = getattr(self, conf.get('sync_method') or 'rsync')
     self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
     self.default_headers = {
         'Content-Length': '0',
         'user-agent': 'object-replicator %s' % os.getpid()}
     self.rsync_error_log_line_length = \
         int(conf.get('rsync_error_log_line_length', 0))
     self.handoffs_first = config_true_value(conf.get('handoffs_first',
                                                      False))
     self.handoff_delete = config_auto_int_value(
         conf.get('handoff_delete', 'auto'), 0)
     if any((self.handoff_delete, self.handoffs_first)):
         self.logger.warn('Handoff only mode is not intended for normal '
                          'operation, please disable handoffs_first and '
                          'handoff_delete before the next '
                          'normal rebalance')
     self._diskfile_mgr = DiskFileManager(conf, self.logger)
Пример #18
0
 def __init__(self, conf):
     """
     :param conf: configuration object obtained from ConfigParser
     :param logger: logging object
     """
     self.conf = conf
     self.logger = get_logger(conf, log_route="object-replicator")
     self.devices_dir = conf.get("devices", "/srv/node")
     self.mount_check = config_true_value(conf.get("mount_check", "true"))
     self.vm_test_mode = config_true_value(conf.get("vm_test_mode", "no"))
     self.swift_dir = conf.get("swift_dir", "/etc/swift")
     self.port = int(conf.get("bind_port", 6000))
     self.concurrency = int(conf.get("concurrency", 1))
     self.stats_interval = int(conf.get("stats_interval", "300"))
     self.object_ring = Ring(self.swift_dir, ring_name="object")
     self.ring_check_interval = int(conf.get("ring_check_interval", 15))
     self.next_check = time.time() + self.ring_check_interval
     self.reclaim_age = int(conf.get("reclaim_age", 86400 * 7))
     self.partition_times = []
     self.run_pause = int(conf.get("run_pause", 30))
     self.rsync_timeout = int(conf.get("rsync_timeout", 900))
     self.rsync_io_timeout = conf.get("rsync_io_timeout", "30")
     self.http_timeout = int(conf.get("http_timeout", 60))
     self.lockup_timeout = int(conf.get("lockup_timeout", 1800))
     self.recon_cache_path = conf.get("recon_cache_path", "/var/cache/swift")
     self.rcache = os.path.join(self.recon_cache_path, "object.recon")
Пример #19
0
    def __init__(self, conf):
        """
        :param conf: configuration object obtained from ConfigParser
        :param logger: logging object
        """

        self.conf = conf
        self.logger = get_logger(conf, log_route='object-mover')
        self.devices_dir = conf.get('devices', '/srv/node')
        self.mount_check = config_true_value(conf.get('mount_check', 'true'))
        self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no'))
        self.swift_dir = conf.get('swift_dir', '/etc/swift')
        self.bind_ip = conf.get('bind_ip', '0.0.0.0')
        self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
        self.port = None if self.servers_per_port else \
            int(conf.get('bind_port', 6000))
        self.concurrency = int(conf.get('concurrency', 1))
        self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))

        self.handoffs_first = config_true_value(conf.get('handoffs_first',
                                                         False))

        self.data_moving_map_dump = (conf.get('data_moving_map_dump')
                                     or DEFAULT_DUMP_FILE)

        self._diskfile_mgr = DiskFileManager(conf, self.logger)

        self.mover_tmp_dir = (conf.get('mover_tmp_dir') or 'data_mover')
        self.retries = int(conf.get('retries', 3))
        self.test = bool(conf.get('test', False))

        self.retrie_list = []
    def __init__(self, app, conf):
        """
        This function is called when Swift Proxy inits.
        """
        self.app = app
        self.conf = conf
        self.logger = get_logger(conf, log_route='customauth')
        self.log_headers = config_true_value(conf.get('log_headers', 'f'))
        self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
        if self.reseller_prefix and self.reseller_prefix[-1] != '_':
            self.reseller_prefix += '_'
        self.logger.set_statsd_prefix('customauth.%s' % (
            self.reseller_prefix if self.reseller_prefix else 'NONE',))
        self.auth_prefix = conf.get('auth_prefix', '/auth/')
        #Organization
        self.organization_id = conf.get('organization_id', '57b69c457792482c8d817c4945c6c8a8')


        #Keystone
        self.keystone_auth_endpoint = conf.get('keystone_auth_endpoint', 'http://cloud.lab.fiware.org:4730/v2.0/tokens')
        self.keystone_tenant_endpoint = conf.get('keystone_tenant_endpoint', 'http://cloud.lab.fiware.org:4730/v2.0/tenants')
        if not self.auth_prefix or not self.auth_prefix.strip('/'):
            self.logger.warning('Rewriting invalid auth prefix "%s" to '
                                '"/auth/" (Non-empty auth prefix path '
                                'is required)' % self.auth_prefix)
            self.auth_prefix = '/auth/'
        if self.auth_prefix[0] != '/':
            self.auth_prefix = '/' + self.auth_prefix
        if self.auth_prefix[-1] != '/':
            self.auth_prefix += '/'
        self.token_life = int(conf.get('token_life', 86400))
        self.allow_overrides = config_true_value(
            conf.get('allow_overrides', 't'))
        self.storage_url_scheme = conf.get('storage_url_scheme', 'default')
        self.logger.info('CustomAuth v1.3 loaded successfully')
Пример #21
0
    def __init__(self, conf, logger=None):
        # location/directory of the metadata database (meta.db)
        self.location = conf.get('location', '/srv/node/sdb1/metadata/')
        # path the the actual file
        self.db_file = os.path.join(self.location, 'meta.db')
        self.logger = logger or get_logger(conf, log_route='metadata-server')
        self.root = conf.get('devices', '/srv/node')
        self.mount_check = config_true_value(conf.get('mount_check', 'true'))
        self.node_timeout = int(conf.get('node_timeout', 3))
        self.conn_timeout = float(conf.get('node_timeout', 3))
        replication_server = conf.get('replication_server', None)
        if replication_server is not None:
            replication_server = config_true_value(replication_server)
        self.replication_server = replication_server
        self.allowed_sync_hosts = [
            h.strip()
            for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
            if h.strip()
        ]
        self.replicator_rpc = ReplicatorRpc(
            self.root,
            DATADIR,
            MetadataBroker,
            self.mount_check,
            logger=self.logger
        )

        if config_true_value(conf.get('allow_versions', 'f')):
            self.save_headers.append('x-versions-location')

        swift.common.db.DB_PREALLOCATION = config_true_value(
            conf.get('db_preallocation', 'f'))
Пример #22
0
 def __init__(self, conf, logger=None):
     """
     :param conf: configuration object obtained from ConfigParser
     :param logger: logging object
     """
     self.conf = conf
     self.logger = logger or get_logger(
         conf, log_route='object-reconstructor')
     self.devices_dir = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.port = int(conf.get('bind_port', 6000))
     self.concurrency = int(conf.get('concurrency', 1))
     self.stats_interval = int(conf.get('stats_interval', '300'))
     self.ring_check_interval = int(conf.get('ring_check_interval', 15))
     self.next_check = time.time() + self.ring_check_interval
     self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
     self.partition_times = []
     self.run_pause = int(conf.get('run_pause', 30))
     self.http_timeout = int(conf.get('http_timeout', 60))
     self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = os.path.join(self.recon_cache_path, "object.recon")
     # defaults subject to change after beta
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.node_timeout = float(conf.get('node_timeout', 10))
     self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
     self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
     self.headers = {
         'Content-Length': '0',
         'user-agent': 'obj-reconstructor %s' % os.getpid()}
     self.handoffs_first = config_true_value(conf.get('handoffs_first',
                                                      False))
     self._df_router = DiskFileRouter(conf, self.logger)
Пример #23
0
def filter_factory(global_conf, **local_config):
    conf = global_conf.copy()
    conf.update(local_config)
    global LOG
    LOG = get_logger(conf)

    acct = conf.get('sds_default_account')

    if acct is None:
        raise ConfigurationException('No OIO-SDS account configured')

    account_first = config_true_value(local_config.get('account_first'))
    swift3_compat = config_true_value(local_config.get('swift3_compat'))
    strip_v1 = config_true_value(local_config.get('strip_v1'))
    redis_keys_format = local_config.get('redis_keys_format',
                                         REDIS_KEYS_FORMAT_V1)
    redis_host = local_config.get('redis_host')
    sentinel_hosts = local_config.get('sentinel_hosts')
    sentinel_name = local_config.get('sentinel_name')

    def factory(app):
        return ContainerHierarchyMiddleware(
            app, global_conf, acct,
            strip_v1=strip_v1,
            account_first=account_first,
            swift3_compat=swift3_compat,
            redis_keys_format=redis_keys_format,
            redis_host=redis_host,
            sentinel_hosts=sentinel_hosts,
            sentinel_name=sentinel_name)
    return factory
Пример #24
0
 def __init__(self, conf):
     """
     :param conf: configuration object obtained from ConfigParser
     :param logger: logging object
     """
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-replicator')
     self.devices_dir = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no'))
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.port = int(conf.get('bind_port', 6000))
     self.concurrency = int(conf.get('concurrency', 1))
     self.stats_interval = int(conf.get('stats_interval', '300'))
     self.object_ring = Ring(self.swift_dir, ring_name='object')
     self.ring_check_interval = int(conf.get('ring_check_interval', 15))
     self.next_check = time.time() + self.ring_check_interval
     self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
     self.partition_times = []
     self.run_pause = int(conf.get('run_pause', 30))
     self.rsync_timeout = int(conf.get('rsync_timeout', 900))
     self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
     self.rsync_bwlimit = conf.get('rsync_bwlimit', '0')
     self.http_timeout = int(conf.get('http_timeout', 60))
     self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = os.path.join(self.recon_cache_path, "object.recon")
     self.headers = {
         'Content-Length': '0',
         'user-agent': 'obj-replicator %s' % os.getpid()}
     self.rsync_error_log_line_length = \
         int(conf.get('rsync_error_log_line_length', 0))
Пример #25
0
 def __init__(self, conf):
     """
     :param conf: configuration object obtained from ConfigParser
     :param logger: logging object
     """
     self.conf = conf
     self.logger = get_logger(conf, log_route="object-replicator")
     self.devices_dir = conf.get("devices", "/srv/node")
     self.mount_check = config_true_value(conf.get("mount_check", "true"))
     self.vm_test_mode = config_true_value(conf.get("vm_test_mode", "no"))
     self.swift_dir = conf.get("swift_dir", "/etc/swift")
     self.port = int(conf.get("bind_port", 6000))
     self.concurrency = int(conf.get("concurrency", 1))
     self.stats_interval = int(conf.get("stats_interval", "300"))
     self.ring_check_interval = int(conf.get("ring_check_interval", 15))
     self.next_check = time.time() + self.ring_check_interval
     self.reclaim_age = int(conf.get("reclaim_age", 86400 * 7))
     self.partition_times = []
     self.run_pause = int(conf.get("run_pause", 30))
     self.rsync_timeout = int(conf.get("rsync_timeout", 900))
     self.rsync_io_timeout = conf.get("rsync_io_timeout", "30")
     self.rsync_bwlimit = conf.get("rsync_bwlimit", "0")
     self.http_timeout = int(conf.get("http_timeout", 60))
     self.lockup_timeout = int(conf.get("lockup_timeout", 1800))
     self.recon_cache_path = conf.get("recon_cache_path", "/var/cache/swift")
     self.rcache = os.path.join(self.recon_cache_path, "object.recon")
     self.conn_timeout = float(conf.get("conn_timeout", 0.5))
     self.node_timeout = float(conf.get("node_timeout", 10))
     self.sync_method = getattr(self, conf.get("sync_method") or "rsync")
     self.network_chunk_size = int(conf.get("network_chunk_size", 65536))
     self.headers = {"Content-Length": "0", "user-agent": "object-replicator %s" % os.getpid()}
     self.rsync_error_log_line_length = int(conf.get("rsync_error_log_line_length", 0))
     self.handoffs_first = config_true_value(conf.get("handoffs_first", False))
     self.handoff_delete = config_auto_int_value(conf.get("handoff_delete", "auto"), 0)
     self._diskfile_mgr = DiskFileManager(conf, self.logger)
Пример #26
0
 def __init__(self, conf):
     """
     Creates a new WSGI application for the Swift Object Server. An
     example configuration is given at
     <source-dir>/etc/object-server.conf-sample or
     /etc/swift/object-server.conf-sample.
     """
     self.logger = get_logger(conf, log_route="object-server")
     self.devices = conf.get("devices", "/srv/node/")
     self.mount_check = config_true_value(conf.get("mount_check", "true"))
     self.node_timeout = int(conf.get("node_timeout", 3))
     self.conn_timeout = float(conf.get("conn_timeout", 0.5))
     self.disk_chunk_size = int(conf.get("disk_chunk_size", 65536))
     self.network_chunk_size = int(conf.get("network_chunk_size", 65536))
     self.keep_cache_size = int(conf.get("keep_cache_size", 5242880))
     self.keep_cache_private = config_true_value(conf.get("keep_cache_private", "false"))
     self.log_requests = config_true_value(conf.get("log_requests", "true"))
     self.max_upload_time = int(conf.get("max_upload_time", 86400))
     self.slow = int(conf.get("slow", 0))
     self.bytes_per_sync = int(conf.get("mb_per_sync", 512)) * 1024 * 1024
     default_allowed_headers = """
         content-disposition,
         content-encoding,
         x-delete-at,
         x-object-manifest,
     """
     self.allowed_headers = set(
         i.strip().lower()
         for i in conf.get("allowed_headers", default_allowed_headers).split(",")
         if i.strip() and i.strip().lower() not in DISALLOWED_HEADERS
     )
     self.expiring_objects_account = (conf.get("auto_create_account_prefix") or ".") + "expiring_objects"
     self.expiring_objects_container_divisor = int(conf.get("expiring_objects_container_divisor") or 86400)
Пример #27
0
    def container_request(self, req, start_response, enabled):
        # set version location header as sysmeta
        if VERSIONS_LOC_CLIENT in req.headers:
            val = req.headers.get(VERSIONS_LOC_CLIENT)
            if val:
                # differently from previous version, we are actually
                # returning an error if user tries to set versions location
                # while feature is explicitly disabled.
                if not config_true_value(enabled) and req.method in ("PUT", "POST"):
                    raise HTTPPreconditionFailed(
                        request=req, content_type="text/plain", body="Versioned Writes is disabled"
                    )

                location = check_container_format(req, val)
                req.headers[VERSIONS_LOC_SYSMETA] = location

                # reset original header to maintain sanity
                # now only sysmeta is source of Versions Location
                req.headers[VERSIONS_LOC_CLIENT] = ""

                # if both headers are in the same request
                # adding location takes precedence over removing
                if "X-Remove-Versions-Location" in req.headers:
                    del req.headers["X-Remove-Versions-Location"]
            else:
                # empty value is the same as X-Remove-Versions-Location
                req.headers["X-Remove-Versions-Location"] = "x"

        # handle removing versions container
        val = req.headers.get("X-Remove-Versions-Location")
        if val:
            req.headers.update({VERSIONS_LOC_SYSMETA: "", VERSIONS_LOC_CLIENT: ""})
            del req.headers["X-Remove-Versions-Location"]

        # handle versioning mode
        if VERSIONS_MODE_CLIENT in req.headers:
            val = req.headers.pop(VERSIONS_MODE_CLIENT)
            if val:
                if not config_true_value(enabled) and req.method in ("PUT", "POST"):
                    raise HTTPPreconditionFailed(
                        request=req, content_type="text/plain", body="Versioned Writes is disabled"
                    )
                if val not in VERSIONING_MODES:
                    raise HTTPBadRequest(
                        request=req,
                        content_type="text/plain",
                        body="X-Versions-Mode must be one of %s" % ", ".join(VERSIONING_MODES),
                    )
                req.headers[VERSIONS_MODE_SYSMETA] = val
            else:
                req.headers["X-Remove-Versions-Mode"] = "x"

        if req.headers.pop("X-Remove-Versions-Mode", None):
            req.headers.update({VERSIONS_MODE_SYSMETA: ""})

        # send request and translate sysmeta headers from response
        vw_ctx = VersionedWritesContext(self.app, self.logger)
        return vw_ctx.handle_container_request(req.environ, start_response)
Пример #28
0
    def __init__(self, conf, logger=None):
        """
        Creates a new WSGI application for the Swift Object Server. An
        example configuration is given at
        <source-dir>/etc/object-server.conf-sample or
        /etc/swift/object-server.conf-sample.
        """
        self.logger = logger or get_logger(conf, log_route="object-server")
        self.node_timeout = int(conf.get("node_timeout", 3))
        self.conn_timeout = float(conf.get("conn_timeout", 0.5))
        self.client_timeout = int(conf.get("client_timeout", 60))
        self.disk_chunk_size = int(conf.get("disk_chunk_size", 65536))
        self.network_chunk_size = int(conf.get("network_chunk_size", 65536))
        self.log_requests = config_true_value(conf.get("log_requests", "true"))
        self.max_upload_time = int(conf.get("max_upload_time", 86400))
        self.slow = int(conf.get("slow", 0))
        self.keep_cache_private = config_true_value(conf.get("keep_cache_private", "false"))
        replication_server = conf.get("replication_server", None)
        if replication_server is not None:
            replication_server = config_true_value(replication_server)
        self.replication_server = replication_server

        default_allowed_headers = """
            content-disposition,
            content-encoding,
            x-delete-at,
            x-object-manifest,
            x-static-large-object,
        """
        extra_allowed_headers = [
            header.strip().lower()
            for header in conf.get("allowed_headers", default_allowed_headers).split(",")
            if header.strip()
        ]
        self.allowed_headers = set()
        for header in extra_allowed_headers:
            if header not in DATAFILE_SYSTEM_META:
                self.allowed_headers.add(header)
        self.expiring_objects_account = (conf.get("auto_create_account_prefix") or ".") + "expiring_objects"
        self.expiring_objects_container_divisor = int(conf.get("expiring_objects_container_divisor") or 86400)
        # Initialization was successful, so now apply the network chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because the primary motivation for this is to optimize how data
        # is written back to the proxy server, we could use the value from the
        # disk_chunk_size parameter. However, it affects all created sockets
        # using this class so we have chosen to tie it to the
        # network_chunk_size parameter value instead.
        socket._fileobject.default_bufsize = self.network_chunk_size

        # Provide further setup sepecific to an object server implemenation.
        self.setup(conf)
Пример #29
0
 def __init__(self, conf, container_ring=None, object_ring=None):
     #: The dict of configuration values from the [container-sync] section
     #: of the container-server.conf.
     self.conf = conf
     #: Logger to use for container-sync log lines.
     self.logger = get_logger(conf, log_route='container-sync')
     #: Path to the local device mount points.
     self.devices = conf.get('devices', '/srv/node')
     #: Indicates whether mount points should be verified as actual mount
     #: points (normally true, false for tests and SAIO).
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     #: Minimum time between full scans. This is to keep the daemon from
     #: running wild on near empty systems.
     self.interval = int(conf.get('interval', 300))
     #: Maximum amount of time to spend syncing a container before moving on
     #: to the next one. If a conatiner sync hasn't finished in this time,
     #: it'll just be resumed next scan.
     self.container_time = int(conf.get('container_time', 60))
     #: ContainerSyncCluster instance for validating sync-to values.
     self.realms_conf = ContainerSyncRealms(
         os.path.join(
             conf.get('swift_dir', '/etc/swift'),
             'container-sync-realms.conf'),
         self.logger)
     #: The list of hosts we're allowed to send syncs to. This can be
     #: overridden by data in self.realms_conf
     self.allowed_sync_hosts = [
         h.strip()
         for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
         if h.strip()]
     self.http_proxies = [
         a.strip()
         for a in conf.get('sync_proxy', '').split(',')
         if a.strip()]
     #: Number of containers with sync turned on that were successfully
     #: synced.
     self.container_syncs = 0
     #: Number of successful DELETEs triggered.
     self.container_deletes = 0
     #: Number of successful PUTs triggered.
     self.container_puts = 0
     #: Number of containers that didn't have sync turned on.
     self.container_skips = 0
     #: Number of containers that had a failure of some type.
     self.container_failures = 0
     #: Time of last stats report.
     self.reported = time()
     swift_dir = conf.get('swift_dir', '/etc/swift')
     #: swift.common.ring.Ring for locating containers.
     self.container_ring = container_ring or Ring(swift_dir,
                                                  ring_name='container')
     #: swift.common.ring.Ring for locating objects.
     self.object_ring = object_ring or Ring(swift_dir, ring_name='object')
     self._myips = whataremyips()
     self._myport = int(conf.get('bind_port', 6001))
     swift.common.db.DB_PREALLOCATION = \
         config_true_value(conf.get('db_preallocation', 'f'))
Пример #30
0
 def __init__(self, conf, logger=None):
     super(AccountController, self).__init__(conf)
     self.logger = logger or get_logger(conf, log_route="account-server")
     self.log_requests = config_true_value(conf.get("log_requests", "true"))
     self.root = conf.get("devices", "/srv/node")
     self.mount_check = config_true_value(conf.get("mount_check", "true"))
     self.replicator_rpc = ReplicatorRpc(self.root, DATADIR, AccountBroker, self.mount_check, logger=self.logger)
     self.auto_create_account_prefix = conf.get("auto_create_account_prefix") or "."
     swift.common.db.DB_PREALLOCATION = config_true_value(conf.get("db_preallocation", "f"))
Пример #31
0
    def container_request(self, req, start_response, enabled):
        sysmeta_version_hdr = get_sys_meta_prefix('container') + \
            'versions-location'

        # set version location header as sysmeta
        if 'X-Versions-Location' in req.headers:
            val = req.headers.get('X-Versions-Location')
            if val:
                # diferently from previous version, we are actually
                # returning an error if user tries to set versions location
                # while feature is explicitly disabled.
                if not config_true_value(enabled) and \
                        req.method in ('PUT', 'POST'):
                    raise HTTPPreconditionFailed(
                        request=req,
                        content_type='text/plain',
                        body='Versioned Writes is disabled')

                location = check_container_format(req, val)
                req.headers[sysmeta_version_hdr] = location

                # reset original header to maintain sanity
                # now only sysmeta is source of Versions Location
                req.headers['X-Versions-Location'] = ''

                # if both headers are in the same request
                # adding location takes precendence over removing
                if 'X-Remove-Versions-Location' in req.headers:
                    del req.headers['X-Remove-Versions-Location']
            else:
                # empty value is the same as X-Remove-Versions-Location
                req.headers['X-Remove-Versions-Location'] = 'x'

        # handle removing versions container
        val = req.headers.get('X-Remove-Versions-Location')
        if val:
            req.headers.update({sysmeta_version_hdr: ''})
            req.headers.update({'X-Versions-Location': ''})
            del req.headers['X-Remove-Versions-Location']

        # send request and translate sysmeta headers from response
        vw_ctx = VersionedWritesContext(self.app, self.logger)
        return vw_ctx.handle_container_request(req.environ, start_response)
Пример #32
0
    def __init__(self, config):
        for key in 'auth_uri username password'.split():
            if key not in config:
                raise SkipTest(
                    "Missing required configuration parameter: %s" % key)

        self.auth_url = config['auth_uri']
        self.insecure = config_true_value(config.get('insecure', 'false'))
        self.auth_version = str(config.get('auth_version', '1'))

        self.domain = config.get('domain')
        self.account = config.get('account')
        self.username = config['username']
        self.password = config['password']

        self.storage_netloc = None
        self.storage_path = None
        self.conn_class = None
        self.connection = None  # until you call .http_connect()
Пример #33
0
 def connect(self):
     """
     Establishes a connection and starts an SSYNC request
     with the object server.
     """
     connection = response = None
     node_addr = '%s:%s' % (self.node['replication_ip'],
                            self.node['replication_port'])
     with exceptions.MessageTimeout(self.daemon.conn_timeout,
                                    'connect send'):
         connection = SsyncBufferedHTTPConnection(node_addr)
         connection.putrequest(
             'SSYNC',
             '/%s/%s' % (self.node['device'], self.job['partition']))
         connection.putheader('Transfer-Encoding', 'chunked')
         connection.putheader('X-Backend-Storage-Policy-Index',
                              int(self.job['policy']))
         # a sync job must use the node's backend_index for the frag_index
         # of the rebuilt fragments instead of the frag_index from the job
         # which will be rebuilding them
         frag_index = self.node.get('backend_index')
         if frag_index is not None:
             connection.putheader('X-Backend-Ssync-Frag-Index', frag_index)
             # Node-Index header is for backwards compat 2.4.0-2.20.0
             connection.putheader('X-Backend-Ssync-Node-Index', frag_index)
         connection.endheaders()
     with exceptions.MessageTimeout(self.daemon.node_timeout,
                                    'connect receive'):
         response = connection.getresponse()
         if response.status != http.HTTP_OK:
             err_msg = response.read()[:1024]
             raise exceptions.ReplicationException(
                 'Expected status %s; got %s (%s)' %
                 (http.HTTP_OK, response.status, err_msg))
         if self.include_non_durable and not config_true_value(
                 response.getheader('x-backend-accept-no-commit', False)):
             # fall back to legacy behaviour if receiver does not understand
             # X-Backend-Commit
             self.daemon.logger.warning(
                 'ssync receiver %s does not accept non-durable fragments' %
                 node_addr)
             self.include_non_durable = False
     return connection, response
Пример #34
0
 def __init__(self, app, conf):
     self.app = app
     storage_domain = conf.get('storage_domain', 'example.com')
     self.storage_domain = [
         '.' + s for s in list_from_csv(storage_domain)
         if not s.startswith('.')
     ]
     self.storage_domain += [
         s for s in list_from_csv(storage_domain) if s.startswith('.')
     ]
     self.path_root = conf.get('path_root', 'v1').strip('/') + '/'
     prefixes = conf.get('reseller_prefixes', 'AUTH')
     self.reseller_prefixes = list_from_csv(prefixes)
     self.reseller_prefixes_lower = [
         x.lower() for x in self.reseller_prefixes
     ]
     self.default_reseller_prefix = conf.get('default_reseller_prefix')
     self.mangle_client_paths = config_true_value(
         conf.get('mangle_client_paths'))
Пример #35
0
    def __init__(self, *args, **kwargs):
        swob.Response.__init__(self, *args, **kwargs)

        if self.etag:
            # add double quotes to the etag header
            self.etag = self.etag

        sw_sysmeta_headers = swob.HeaderKeyDict()
        sw_headers = swob.HeaderKeyDict()
        headers = HeaderKeyDict()
        self.is_slo = False

        for key, val in self.headers.iteritems():
            _key = key.lower()
            if _key.startswith(sysmeta_prefix('object')) or \
                    _key.startswith(sysmeta_prefix('container')):
                sw_sysmeta_headers[key] = val
            else:
                sw_headers[key] = val

        # Handle swift headers
        for key, val in sw_headers.iteritems():
            _key = key.lower()

            if _key.startswith('x-object-meta-'):
                # Note that AWS ignores user-defined headers with '=' in the
                # header name. We translated underscores to '=5F' on the way
                # in, though.
                headers['x-amz-meta-' + _key[14:].replace('=5f', '_')] = val
            elif _key in ('content-length', 'content-type', 'content-range',
                          'content-encoding', 'content-disposition',
                          'content-language', 'etag', 'last-modified',
                          'x-robots-tag', 'cache-control', 'expires'):
                headers[key] = val
            elif _key == 'x-static-large-object':
                # for delete slo
                self.is_slo = config_true_value(val)

        self.headers = headers
        # Used for pure swift header handling at the request layer
        self.sw_headers = sw_headers
        self.sysmeta_headers = sw_sysmeta_headers
Пример #36
0
    def test_put_bucket_error_key2(self):
        if config_true_value(tf.cluster_info['s3api'].get('s3_acl')):
            if 's3_access_key2' not in tf.config or \
                    's3_secret_key2' not in tf.config:
                raise tf.SkipTest(
                    'Cannot test for BucketAlreadyExists with second user; '
                    'need s3_access_key2 and s3_secret_key2 configured')

            self.conn.create_bucket(Bucket='bucket')

            # Other users of the same account get the same 409 error
            conn2 = get_boto3_conn(tf.config['s3_access_key2'],
                                   tf.config['s3_secret_key2'])
            with self.assertRaises(botocore.exceptions.ClientError) as ctx:
                conn2.create_bucket(Bucket='bucket')
            self.assertEqual(
                ctx.exception.response['ResponseMetadata']['HTTPStatusCode'],
                409)
            self.assertEqual(ctx.exception.response['Error']['Code'],
                             'BucketAlreadyExists')
Пример #37
0
    def __init__(self, config):
        for key in 'auth_host auth_port auth_ssl username password'.split():
            if key not in config:
                raise SkipTest("Missing required configuration parameter: %s" %
                               key)

        self.auth_host = config['auth_host']
        self.auth_port = int(config['auth_port'])
        self.auth_ssl = config['auth_ssl'] in ('on', 'true', 'yes', '1')
        self.insecure = config_true_value(config.get('insecure', 'false'))
        self.auth_prefix = config.get('auth_prefix', '/')
        self.auth_version = str(config.get('auth_version', '1'))

        self.account = config.get('account')
        self.username = config['username']
        self.password = config['password']

        self.storage_netloc = None
        self.storage_path = None
        self.conn_class = None
Пример #38
0
    def __init__(self, app, wsgi_conf, *args, **kwargs):
        self.app = app
        self.conf = Config()

        # Set default values if they are not configured
        self.conf.allow_no_owner = config_true_value(
            wsgi_conf.get('allow_no_owner', False))
        self.conf.location = wsgi_conf.get('location', 'us-east-1')
        self.conf.dns_compliant_bucket_names = config_true_value(
            wsgi_conf.get('dns_compliant_bucket_names', True))
        self.conf.max_bucket_listing = config_positive_int_value(
            wsgi_conf.get('max_bucket_listing', 1000))
        self.conf.max_parts_listing = config_positive_int_value(
            wsgi_conf.get('max_parts_listing', 1000))
        self.conf.max_multi_delete_objects = config_positive_int_value(
            wsgi_conf.get('max_multi_delete_objects', 1000))
        self.conf.multi_delete_concurrency = config_positive_int_value(
            wsgi_conf.get('multi_delete_concurrency', 2))
        self.conf.s3_acl = config_true_value(wsgi_conf.get('s3_acl', False))
        self.conf.storage_domain = wsgi_conf.get('storage_domain', '')
        self.conf.auth_pipeline_check = config_true_value(
            wsgi_conf.get('auth_pipeline_check', True))
        self.conf.max_upload_part_num = config_positive_int_value(
            wsgi_conf.get('max_upload_part_num', 1000))
        self.conf.check_bucket_owner = config_true_value(
            wsgi_conf.get('check_bucket_owner', False))
        self.conf.force_swift_request_proxy_log = config_true_value(
            wsgi_conf.get('force_swift_request_proxy_log', False))
        self.conf.allow_multipart_uploads = config_true_value(
            wsgi_conf.get('allow_multipart_uploads', True))
        self.conf.min_segment_size = config_positive_int_value(
            wsgi_conf.get('min_segment_size', 5242880))
        self.conf.allowable_clock_skew = config_positive_int_value(
            wsgi_conf.get('allowable_clock_skew', 15 * 60))
        self.conf.cors_preflight_allow_origin = list_from_csv(
            wsgi_conf.get('cors_preflight_allow_origin', ''))
        if '*' in self.conf.cors_preflight_allow_origin and \
                len(self.conf.cors_preflight_allow_origin) > 1:
            raise ValueError('if cors_preflight_allow_origin should include '
                             'all domains, * must be the only entry')

        self.logger = get_logger(wsgi_conf,
                                 log_route=wsgi_conf.get('log_name', 's3api'))
        self.check_pipeline(wsgi_conf)
Пример #39
0
def get_bucket_db(conf):
    """
    If `bucket_db_enabled` is set in `conf`, get the bucket database,
    otherwise return `None`.

    If `bucket_db_host` or `bucket_db_sentinel_hosts` are also set in `conf`,
    return an instance of `RedisBucketDb`, otherwise return an instance of
    `DummyBucketDb`.
    """
    db_kwargs = {
        k[10:]: v
        for k, v in conf.items() if k.startswith('bucket_db_')
    }
    if config_true_value(db_kwargs.get('enabled', 'false')):
        if 'host' in db_kwargs or 'sentinel_hosts' in db_kwargs:
            return RedisBucketDb(**db_kwargs)
        else:
            return DummyBucketDb(**db_kwargs)

    return None
Пример #40
0
 def __init__(self, app, conf, logger=None):
     self.app = app
     self.conf = conf
     self.logger = logger or get_logger(conf, log_route='container_sync')
     self.realms_conf = ContainerSyncRealms(
         os.path.join(conf.get('swift_dir', '/etc/swift'),
                      'container-sync-realms.conf'), self.logger)
     self.allow_full_urls = config_true_value(
         conf.get('allow_full_urls', 'true'))
     # configure current realm/cluster for /info
     self.realm = self.cluster = None
     current = conf.get('current', None)
     if current:
         try:
             self.realm, self.cluster = (
                 p.upper() for p in current.strip('/').split('/'))
         except ValueError:
             self.logger.error('Invalid current //REALM/CLUSTER (%s)',
                               current)
     self.register_info()
Пример #41
0
    def handle_container(self, env, start_response):
        """
        Handles a possible static web request for a container.

        :param env: The original WSGI environment dict.
        :param start_response: The original WSGI start_response hook.
        """
        container_info = self._get_container_info(env)
        req = Request(env)
        req.acl = container_info['read_acl']
        # we checked earlier that swift.authorize is set in env
        aresp = env['swift.authorize'](req)
        if aresp:
            resp = aresp(env, self._start_response)
            return self._error_response(resp, env, start_response)

        if not self._listings and not self._index:
            if config_true_value(env.get('HTTP_X_WEB_MODE', 'f')):
                return HTTPNotFound()(env, start_response)
            return self.app(env, start_response)
        if not env['PATH_INFO'].endswith('/'):
            resp = HTTPMovedPermanently(
                location=(env['PATH_INFO'] + '/'))
            return resp(env, start_response)
        if not self._index:
            return self._listing(env, start_response)
        tmp_env = dict(env)
        tmp_env['HTTP_USER_AGENT'] = \
            '%s StaticWeb' % env.get('HTTP_USER_AGENT')
        tmp_env['swift.source'] = 'SW'
        tmp_env['PATH_INFO'] += self._index
        resp = self._app_call(tmp_env)
        status_int = self._get_status_int()
        if status_int == HTTP_NOT_FOUND:
            return self._listing(env, start_response)
        elif not is_success(self._get_status_int()) and \
                not is_redirection(self._get_status_int()):
            return self._error_response(resp, env, start_response)
        start_response(self._response_status, self._response_headers,
                       self._response_exc_info)
        return resp
Пример #42
0
    def object_request(self, req, api_version, account, container, obj,
                       allow_versioned_writes):
        account_name = unquote(account)
        container_name = unquote(container)
        object_name = unquote(obj)
        resp = None
        is_enabled = config_true_value(allow_versioned_writes)
        container_info = get_container_info(
            req.environ, self.app)

        # To maintain backwards compatibility, container version
        # location could be stored as sysmeta or not, need to check both.
        # If stored as sysmeta, check if middleware is enabled. If sysmeta
        # is not set, but versions property is set in container_info, then
        # for backwards compatibility feature is enabled.
        versions_cont = container_info.get(
            'sysmeta', {}).get('versions-location')
        if not versions_cont:
            versions_cont = container_info.get('versions')
            # if allow_versioned_writes is not set in the configuration files
            # but 'versions' is configured, enable feature to maintain
            # backwards compatibility
            if not allow_versioned_writes and versions_cont:
                is_enabled = True

        if is_enabled and versions_cont:
            versions_cont = unquote(versions_cont).split('/')[0]
            vw_ctx = VersionedWritesContext(self.app, self.logger)
            if req.method == 'PUT':
                resp = vw_ctx.handle_obj_versions_put(
                    req, versions_cont, api_version, account_name,
                    object_name)
            else:  # handle DELETE
                resp = vw_ctx.handle_obj_versions_delete(
                    req, versions_cont, api_version, account_name,
                    container_name, object_name)

        if resp:
            return resp
        else:
            return self.app
Пример #43
0
    def _get_storlet_invocation_options(self, req):
        options = dict()

        filtered_key = ['X-Storlet-Range', 'X-Storlet-Generate-Log']

        for key in req.headers:
            prefix = 'X-Storlet-'
            if key.startswith(prefix) and key not in filtered_key:
                new_key = 'storlet_' + \
                    key[len(prefix):].lower().replace('-', '_')
                options[new_key] = req.headers.get(key)

        generate_log = req.headers.get('X-Storlet-Generate-Log')
        options['generate_log'] = config_true_value(generate_log)
        options['scope'] = self.scope
        options['file_manager'] = \
            SwiftFileManager(self.account, self.storlet_container,
                             self.storlet_dependency, self.log_container,
                             self.client_conf_file, self.logger)

        return options
Пример #44
0
    def run_server():
        wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
        # Turn off logging requests by the underlying WSGI software.
        wsgi.HttpProtocol.log_request = lambda *a: None
        # Redirect logging other messages by the underlying WSGI software.
        wsgi.HttpProtocol.log_message = \
            lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a)
        wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60)

        eventlet.hubs.use_hub(get_hub())
        eventlet.patcher.monkey_patch(all=False, socket=True)
        eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no'))
        eventlet.debug.hub_exceptions(eventlet_debug)
        app = loadapp('config:%s' % conf_file,
                      global_conf={'log_name': log_name})
        pool = GreenPool(size=1024)
        try:
            wsgi.server(sock, app, NullLogger(), custom_pool=pool)
        except socket.error, err:
            if err[0] != errno.EINVAL:
                raise
Пример #45
0
 def __init__(self, app, conf, *args, **kwargs):
     self.app = app
     self.devices = conf.get('devices', '/srv/node')
     swift_dir = conf.get('swift_dir', '/etc/swift')
     self.logger = get_logger(conf, log_route='recon')
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.object_recon_cache = os.path.join(self.recon_cache_path,
                                            'object.recon')
     self.container_recon_cache = os.path.join(self.recon_cache_path,
                                               'container.recon')
     self.account_recon_cache = os.path.join(self.recon_cache_path,
                                             'account.recon')
     self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz')
     self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
     self.rings = [self.account_ring_path, self.container_ring_path]
     # include all object ring files (for all policies)
     for f in os.listdir(swift_dir):
         if f.startswith('object') and f.endswith('ring.gz'):
             self.rings.append(os.path.join(swift_dir, f))
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
Пример #46
0
 def __init__(self, app, conf, *args, **kwargs):
     self.app = app
     self.devices = conf.get('devices', '/srv/node/')
     swift_dir = conf.get('swift_dir', '/etc/swift')
     self.logger = get_logger(conf, log_route='recon')
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.object_recon_cache = os.path.join(self.recon_cache_path,
                                            'object.recon')
     self.container_recon_cache = os.path.join(self.recon_cache_path,
                                               'container.recon')
     self.account_recon_cache = os.path.join(self.recon_cache_path,
                                             'account.recon')
     self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz')
     self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
     self.object_ring_path = os.path.join(swift_dir, 'object.ring.gz')
     self.rings = [
         self.account_ring_path, self.container_ring_path,
         self.object_ring_path
     ]
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
Пример #47
0
    def make_object_response(self, req, metadata, stream=None):
        conditional_etag = None
        if 'X-Backend-Etag-Is-At' in req.headers:
            conditional_etag = metadata.get(
                req.headers['X-Backend-Etag-Is-At'])

        resp = Response(request=req,
                        conditional_response=True,
                        conditional_etag=conditional_etag)

        if config_true_value(metadata['deleted']):
            resp.headers['Content-Type'] = DELETE_MARKER_CONTENT_TYPE
        else:
            resp.headers['Content-Type'] = metadata.get(
                'mime_type', 'application/octet-stream')
        properties = metadata.get('properties')
        if properties:
            for k, v in properties.items():
                if is_sys_or_user_meta('object', k) or \
                        is_object_transient_sysmeta(k) or \
                        k.lower() in self.allowed_headers:
                    resp.headers[str(k)] = v
        hash_ = metadata.get('hash')
        if hash_ is not None:
            hash_ = hash_.lower()
        resp.headers['etag'] = hash_
        resp.headers['x-object-sysmeta-version-id'] = metadata['version']
        resp.last_modified = int(metadata['mtime'])
        if stream:
            # Whether we are bothered with ranges or not, we wrap the
            # stream in order to handle exceptions.
            resp.app_iter = StreamRangeIterator(req, stream)

        length_ = metadata.get('length')
        if length_ is not None:
            length_ = int(length_)
        resp.content_length = length_
        resp.content_encoding = metadata.get('encoding')
        resp.accept_ranges = 'bytes'
        return resp
Пример #48
0
    def __init__(self, app, req, server_type, ring, partition, path,
                 backend_headers):
        self.app = app
        self.ring = ring
        self.server_type = server_type
        self.partition = partition
        self.path = path
        self.backend_headers = backend_headers
        self.used_nodes = []
        self.used_source_etag = ''

        # stuff from request
        self.req_method = req.method
        self.req_path = req.path
        self.req_query_string = req.query_string
        self.newest = config_true_value(req.headers.get('x-newest', 'f'))

        # populated when finding source
        self.statuses = []
        self.reasons = []
        self.bodies = []
        self.source_headers = []
Пример #49
0
    def make_object_response(self, req, metadata, stream=None, ranges=None):
        conditional_etag = None
        if 'X-Backend-Etag-Is-At' in req.headers:
            conditional_etag = metadata.get(
                req.headers['X-Backend-Etag-Is-At'])

        resp = Response(request=req,
                        conditional_response=True,
                        conditional_etag=conditional_etag)

        if config_true_value(metadata['deleted']):
            resp.headers['Content-Type'] = DELETE_MARKER_CONTENT_TYPE
        else:
            resp.headers['Content-Type'] = metadata.get(
                'mime_type', 'application/octet-stream')
        properties = metadata.get('properties')
        if properties:
            for k, v in properties.iteritems():
                if is_sys_or_user_meta('object', k) or \
                        is_object_transient_sysmeta(k) or \
                        k.lower() in self.allowed_headers:
                    resp.headers[str(k)] = v
        resp.headers['etag'] = metadata['hash'].lower()
        resp.headers['x-object-sysmeta-version-id'] = metadata['version']
        ts = Timestamp(metadata['ctime'])
        resp.last_modified = math.ceil(float(ts))
        if stream:
            if ranges:
                resp.app_iter = StreamRangeIterator(stream)
            else:
                resp.app_iter = stream

        resp.content_length = int(metadata['length'])
        try:
            resp.content_encoding = metadata['encoding']
        except KeyError:
            pass
        resp.accept_ranges = 'bytes'
        return resp
Пример #50
0
    def get_or_head_response(self, req, resp_headers, resp_iter):
        segments = self._get_manifest_read(resp_iter)

        slo_etag = None
        content_length = None
        response_headers = []
        for header, value in resp_headers:
            lheader = header.lower()
            if lheader == SYSMETA_SLO_ETAG:
                slo_etag = value
            elif lheader == SYSMETA_SLO_SIZE:
                content_length = value
            elif lheader not in ('etag', 'content-length'):
                response_headers.append((header, value))

        if slo_etag is None or content_length is None:
            etag = md5()
            content_length = 0
            for seg_dict in segments:
                if seg_dict.get('range'):
                    etag.update('%s:%s;' %
                                (seg_dict['hash'], seg_dict['range']))
                else:
                    etag.update(seg_dict['hash'])

                if config_true_value(seg_dict.get('sub_slo')):
                    override_bytes_from_content_type(seg_dict,
                                                     logger=self.slo.logger)
                content_length += self._segment_length(seg_dict)
            slo_etag = etag.hexdigest()

        response_headers.append(('Content-Length', str(content_length)))
        response_headers.append(('Etag', '"%s"' % slo_etag))

        if req.method == 'HEAD':
            return self._manifest_head_response(req, response_headers)
        else:
            return self._manifest_get_response(req, content_length,
                                               response_headers, segments)
Пример #51
0
    def get_slo_segments(self, obj_name, req):
        """
        Performs a swob.Request and returns the SLO manifest's segments.

        :raises HTTPServerError: on unable to load obj_name or
                                 on unable to load the SLO manifest data.
        :raises HTTPBadRequest: on not an SLO manifest
        :raises HTTPNotFound: on SLO manifest not found
        :returns: SLO manifest's segments
        """
        vrs, account, _junk = req.split_path(2, 3, True)
        new_env = req.environ.copy()
        new_env['REQUEST_METHOD'] = 'GET'
        del(new_env['wsgi.input'])
        new_env['QUERY_STRING'] = 'multipart-manifest=get'
        new_env['CONTENT_LENGTH'] = 0
        new_env['HTTP_USER_AGENT'] = \
            '%s MultipartDELETE' % new_env.get('HTTP_USER_AGENT')
        new_env['swift.source'] = 'SLO'
        new_env['PATH_INFO'] = (
            '/%s/%s/%s' % (
            vrs, account,
            obj_name.lstrip('/'))).encode('utf-8')
        resp = Request.blank('', new_env).get_response(self.app)

        if resp.is_success:
            if config_true_value(resp.headers.get('X-Static-Large-Object')):
                try:
                    return json.loads(resp.body)
                except ValueError:
                    raise HTTPServerError('Unable to load SLO manifest')
            else:
                raise HTTPBadRequest('Not an SLO manifest')
        elif resp.status_int == HTTP_NOT_FOUND:
            raise HTTPNotFound('SLO manifest not found')
        elif resp.status_int == HTTP_UNAUTHORIZED:
            raise HTTPUnauthorized('401 Unauthorized')
        else:
            raise HTTPServerError('Unable to load SLO manifest or segment.')
Пример #52
0
def check_delete_headers(request):
    """
    Validate if 'x-delete' headers are have correct values
    values should be positive integers and correspond to
    a time in the future.

    :param request: the swob request object

    :returns: HTTPBadRequest in case of invalid values
              or None if values are ok
    """
    if 'x-delete-after' in request.headers:
        try:
            x_delete_after = int(request.headers['x-delete-after'])
        except ValueError:
            raise HTTPBadRequest(request=request,
                                 content_type='text/plain',
                                 body='Non-integer X-Delete-After')
        actual_del_time = time.time() + x_delete_after
        if actual_del_time < time.time():
            raise HTTPBadRequest(request=request,
                                 content_type='text/plain',
                                 body='X-Delete-After in past')
        request.headers['x-delete-at'] = utils.normalize_delete_at_timestamp(
            actual_del_time)

    if 'x-delete-at' in request.headers:
        try:
            x_delete_at = int(utils.normalize_delete_at_timestamp(
                int(request.headers['x-delete-at'])))
        except ValueError:
            raise HTTPBadRequest(request=request, content_type='text/plain',
                                 body='Non-integer X-Delete-At')

        if x_delete_at < time.time() and not utils.config_true_value(
                request.headers.get('x-backend-replication', 'f')):
            raise HTTPBadRequest(request=request, content_type='text/plain',
                                 body='X-Delete-At in past')
    return request
Пример #53
0
def filter_factory(global_conf, **local_conf):

    conf = global_conf.copy()
    conf.update(local_conf)
    storlet_conf = dict()
    storlet_conf['storlet_timeout'] = conf.get('storlet_timeout', 40)
    storlet_conf['storlet_container'] = conf.get('storlet_container',
                                                 'storlet')
    storlet_conf['storlet_dependency'] = conf.get('storlet_dependency',
                                                  'dependency')
    storlet_conf['execution_server'] = conf.get('execution_server', '')
    storlet_conf['storlet_execute_on_proxy_only'] = config_true_value(
        conf.get('storlet_execute_on_proxy_only', 'false'))
    storlet_conf['gateway_conf'] = {}

    module_name = conf.get('storlet_gateway_module', '')
    mo = module_name[:module_name.rfind(':')]
    cl = module_name[module_name.rfind(':') + 1:]
    module = __import__(mo, fromlist=[cl])
    the_class = getattr(module, cl)

    configParser = ConfigParser.RawConfigParser()
    configParser.read(
        conf.get('storlet_gateway_conf',
                 '/etc/swift/storlet_stub_gateway.conf'))

    additional_items = configParser.items("DEFAULT")
    for key, val in additional_items:
        storlet_conf[key] = val

    swift_info = {}
    storlet_conf["gateway_module"] = the_class
    register_swift_info('storlet_handler', False, **swift_info)

    def storlet_handler_filter(app):
        return StorletHandlerMiddleware(app, conf, storlet_conf)

    return storlet_handler_filter
Пример #54
0
    def _segment_listing_iterator(self, req, version, account, segments,
                                  byteranges):
        for seg_dict in segments:
            if config_true_value(seg_dict.get('sub_slo')):
                override_bytes_from_content_type(seg_dict,
                                                 logger=self.slo.logger)

        # We handle the range stuff here so that we can be smart about
        # skipping unused submanifests. For example, if our first segment is a
        # submanifest referencing 50 MiB total, but start_byte falls in
        # the 51st MiB, then we can avoid fetching the first submanifest.
        #
        # If we were to make SegmentedIterable handle all the range
        # calculations, we would be unable to make this optimization.
        total_length = sum(self._segment_length(seg) for seg in segments)
        if not byteranges:
            byteranges = [(0, total_length - 1)]

        # Cache segments from sub-SLOs in case more than one byterange
        # includes data from a particular sub-SLO. We only cache a few sets
        # of segments so that a malicious user cannot build a giant SLO tree
        # and then GET it to run the proxy out of memory.
        #
        # LRUCache is a little awkward to use this way, but it beats doing
        # things manually.
        #
        # 20 is sort of an arbitrary choice; it's twice our max recursion
        # depth, so we know this won't expand memory requirements by too
        # much.
        cached_fetch_sub_slo_segments = \
            LRUCache(maxsize=20)(self._fetch_sub_slo_segments)

        for first_byte, last_byte in byteranges:
            byterange_listing_iter = self._byterange_listing_iterator(
                req, version, account, segments, first_byte, last_byte,
                cached_fetch_sub_slo_segments)
            for seg_info in byterange_listing_iter:
                yield seg_info
Пример #55
0
    def GET(self, req):
        # import pydevd
        # pydevd.settrace('172.29.132.122', port=5678)
        """Handle HTTP GET request."""
        # import rpdb2
        # rpdb2.start_embedded_debugger("12345")
        drive, part, account = split_and_validate_path(req, 3)
        prefix = get_param(req, 'prefix')
        delimiter = get_param(req, 'delimiter')
        if delimiter and (len(delimiter) > 1 or ord(delimiter) > 254):
            # delimiters can be made more flexible later
            return HTTPPreconditionFailed(body='Bad delimiter')
        limit = constraints.ACCOUNT_LISTING_LIMIT
        given_limit = get_param(req, 'limit')
        reverse = config_true_value(get_param(req, 'reverse'))
        if given_limit and given_limit.isdigit():
            limit = int(given_limit)
            if limit > constraints.ACCOUNT_LISTING_LIMIT:
                return HTTPPreconditionFailed(
                    request=req,
                    body='Maximum limit is %d' %
                    constraints.ACCOUNT_LISTING_LIMIT)
        marker = get_param(req, 'marker', '')
        end_marker = get_param(req, 'end_marker')
        out_content_type = get_listing_content_type(req)

        if self.mount_check and not check_mount(self.root, drive):
            return HTTPInsufficientStorage(drive=drive, request=req)
        broker = self._get_account_broker(drive,
                                          part,
                                          account,
                                          pending_timeout=0.1,
                                          stale_reads_ok=True)
        if broker.is_deleted():
            return self._deleted_response(broker, req, HTTPNotFound)
        return account_listing_response(account, req, out_content_type, broker,
                                        limit, marker, end_marker, prefix,
                                        delimiter, reverse)
Пример #56
0
def get_bucket_db(conf):
    """
    If `bucket_db_enabled` is set in `conf`, get the bucket database,
    otherwise return `None`.

    If `bucket_db_host` or `bucket_db_sentinel_hosts` are also set in `conf`,
    return an instance of `RedisBucketDb`, otherwise return an instance of
    `DummyBucketDb`.
    """
    db_kwargs = {
        k[10:]: v
        for k, v in conf.items() if k.startswith('bucket_db_')
    }
    if config_true_value(db_kwargs.get('enabled', 'false')):
        if 'host' in db_kwargs or 'sentinel_hosts' in db_kwargs:
            if db_kwargs.get('sentinel_name') is None:
                # TODO(adu): Delete when it will no longer be used
                db_kwargs['sentinel_name'] = db_kwargs.pop('master_name', None)
            return RedisBucketDb(**db_kwargs)
        else:
            return DummyBucketDb(**db_kwargs)

    return None
Пример #57
0
def _initrp(conf_path, app_section, *args, **kwargs):
    try:
        conf = appconfig(conf_path, name=app_section)
    except Exception as e:
        raise ConfigFileError("Error trying to load config from %s: %s" %
                              (conf_path, e))

    validate_configuration()

    # pre-configure logger
    log_name = conf.get('log_name', app_section)
    if 'logger' in kwargs:
        logger = kwargs.pop('logger')
    else:
        logger = get_logger(conf, log_name,
                            log_to_console=kwargs.pop('verbose', False),
                            log_route='wsgi')

    # disable fallocate if desired
    if config_true_value(conf.get('disable_fallocate', 'no')):
        disable_fallocate()

    return (conf, logger, log_name)
Пример #58
0
 def __init__(self, request, conf, gateway_conf, app, logger):
     """
     :param request: swob.Request instance
     :param conf: middleware conf dict
     :param gateway_conf: gatway conf dict
     :param app: wsgi Application
     :param logger: logger instance
     """
     self.reseller_prefix = conf.get('reseller_prefix', 'AUTH')
     self.request = request
     self.app = app
     self.logger = logger
     self.conf = conf
     self.gateway_conf = gateway_conf
     self.gateway_class = self.conf['gateway_module']
     self.sreq_class = self.gateway_class.request_class
     self.storlet_execute_on_proxy = \
         config_true_value(conf.get('storlet_execute_on_proxy', 'false'))
     containers = get_container_names(conf)
     self.storlet_container = containers['storlet']
     self.storlet_dependency = containers['dependency']
     self.log_container = containers['log']
     self.client_conf_file = '/etc/swift/storlet-proxy-server.conf'
Пример #59
0
    def properties_from_headers(self, headers):
        metadata = {
            k: v
            for k, v in headers.items()
            if k.lower() in self.pass_through_headers
            or is_sys_or_user_meta('container', k)
        }

        system = dict()
        # This headers enable versioning.
        # First the legacy one.
        ver_loc = headers.get('X-Container-Sysmeta-Versions-Location')
        if ver_loc is not None:
            # When suspending versioning, header has empty string value
            ver_val = "-1" if ver_loc else "1"
            system['sys.m2.policy.version'] = ver_val
        # Then the new one.
        vers_enabled = headers.get(CLIENT_VERSIONS_ENABLED)
        if vers_enabled is not None:
            ver_val = "-1" if config_true_value(vers_enabled) else "1"
            system['sys.m2.policy.version'] = ver_val

        return metadata, system
Пример #60
0
 def GET(self, req):
     """Handle HTTP GET request."""
     drive, part, account, container, obj = split_and_validate_path(
         req, 4, 5, True)
     path = get_param(req, 'path')
     prefix = get_param(req, 'prefix')
     delimiter = get_param(req, 'delimiter')
     if delimiter and (len(delimiter) > 1 or ord(delimiter) > 254):
         # delimiters can be made more flexible later
         return HTTPPreconditionFailed(body='Bad delimiter')
     marker = get_param(req, 'marker', '')
     end_marker = get_param(req, 'end_marker')
     limit = constraints.CONTAINER_LISTING_LIMIT
     given_limit = get_param(req, 'limit')
     reverse = config_true_value(get_param(req, 'reverse'))
     if given_limit and given_limit.isdigit():
         limit = int(given_limit)
         if limit > constraints.CONTAINER_LISTING_LIMIT:
             return HTTPPreconditionFailed(
                 request=req,
                 body='Maximum limit is %d'
                 % constraints.CONTAINER_LISTING_LIMIT)
     out_content_type = get_listing_content_type(req)
     if self.mount_check and not check_mount(self.root, drive):
         return HTTPInsufficientStorage(drive=drive, request=req)
     broker = self._get_container_broker(drive, part, account, container,
                                         pending_timeout=0.1,
                                         stale_reads_ok=True)
     info, is_deleted = broker.get_info_is_deleted()
     resp_headers = gen_resp_headers(info, is_deleted=is_deleted)
     if is_deleted:
         return HTTPNotFound(request=req, headers=resp_headers)
     container_list = broker.list_objects_iter(
         limit, marker, end_marker, prefix, delimiter, path,
         storage_policy_index=info['storage_policy_index'], reverse=reverse)
     return self.create_listing(req, out_content_type, info, resp_headers,
                                broker.metadata, container_list, container)