예제 #1
1
파일: test_daemon.py 프로젝트: clayg/swift
    def test_run_daemon(self):
        sample_conf = "[my-daemon]\nuser = %s\n" % getuser()
        with tmpfile(sample_conf) as conf_file:
            with mock.patch.dict('os.environ', {'TZ': ''}):
                with mock.patch('time.tzset') as mock_tzset:
                    daemon.run_daemon(MyDaemon, conf_file)
                    self.assertTrue(MyDaemon.forever_called)
                    self.assertEqual(os.environ['TZ'], 'UTC+0')
                    self.assertEqual(mock_tzset.mock_calls, [mock.call()])
            daemon.run_daemon(MyDaemon, conf_file, once=True)
            self.assertEqual(MyDaemon.once_called, True)

            # test raise in daemon code
            with mock.patch.object(MyDaemon, 'run_once', MyDaemon.run_raise):
                self.assertRaises(OSError, daemon.run_daemon, MyDaemon,
                                  conf_file, once=True)

            # test user quit
            sio = StringIO()
            logger = logging.getLogger('server')
            logger.addHandler(logging.StreamHandler(sio))
            logger = utils.get_logger(None, 'server', log_route='server')
            with mock.patch.object(MyDaemon, 'run_forever', MyDaemon.run_quit):
                daemon.run_daemon(MyDaemon, conf_file, logger=logger)
            self.assertTrue('user quit' in sio.getvalue().lower())

            # test missing section
            sample_conf = "[default]\nuser = %s\n" % getuser()
            with tmpfile(sample_conf) as conf_file:
                self.assertRaisesRegexp(SystemExit,
                                        'Unable to find my-daemon '
                                        'config section in.*',
                                        daemon.run_daemon, MyDaemon,
                                        conf_file, once=True)
예제 #2
0
 def __init__(self, app, conf):
     #: The next WSGI application/filter in the paste.deploy pipeline.
     self.app = app
     #: The filter configuration dict.
     self.conf = conf
     #: The seconds to cache the x-container-meta-web-* headers.,
     self.cache_timeout = int(conf.get('cache_timeout', 300))
     #: Logger for this filter.
     self.logger = get_logger(conf, log_route='staticweb')
     access_log_conf = {}
     for key in ('log_facility', 'log_name', 'log_level'):
         value = conf.get('access_' + key, conf.get(key, None))
         if value:
             access_log_conf[key] = value
     #: Web access logger for this filter.
     self.access_logger = get_logger(access_log_conf,
                                     log_route='staticweb-access')
     #: Indicates whether full HTTP headers should be logged or not.
     self.log_headers = conf.get('log_headers') == 'True'
     # Results from the last call to self._start_response.
     self._response_status = None
     self._response_headers = None
     self._response_exc_info = None
     # Results from the last call to self._get_container_info.
     self._index = self._error = self._listings = self._listings_css = None
예제 #3
0
 def test_get_logger(self):
     sio = StringIO()
     logger = logging.getLogger('server')
     logger.addHandler(logging.StreamHandler(sio))
     logger = utils.get_logger(None, 'server', log_route='server')
     logger.warn('test1')
     self.assertEquals(sio.getvalue(), 'test1\n')
     logger.debug('test2')
     self.assertEquals(sio.getvalue(), 'test1\n')
     logger = utils.get_logger({'log_level': 'DEBUG'}, 'server',
                               log_route='server')
     logger.debug('test3')
     self.assertEquals(sio.getvalue(), 'test1\ntest3\n')
     # Doesn't really test that the log facility is truly being used all the
     # way to syslog; but exercises the code.
     logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
                               log_route='server')
     logger.warn('test4')
     self.assertEquals(sio.getvalue(),
                       'test1\ntest3\ntest4\n')
     # make sure debug doesn't log by default
     logger.debug('test5')
     self.assertEquals(sio.getvalue(),
                       'test1\ntest3\ntest4\n')
     # make sure notice lvl logs by default
     logger.notice('test6')
     self.assertEquals(sio.getvalue(),
                       'test1\ntest3\ntest4\ntest6\n')
예제 #4
0
파일: wsgi.py 프로젝트: mahak/swift
def get_socket(conf):
    """Bind socket to bind ip:port in conf

    :param conf: Configuration dict to read settings from

    :returns: a socket object as returned from socket.listen or
              ssl.wrap_socket if conf specifies cert_file
    """
    try:
        bind_port = int(conf['bind_port'])
    except (ValueError, KeyError, TypeError):
        raise ConfigFilePortError()
    bind_addr = (conf.get('bind_ip', '0.0.0.0'), bind_port)
    address_family = [addr[0] for addr in socket.getaddrinfo(
        bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
        if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
    sock = None
    bind_timeout = int(conf.get('bind_timeout', 30))
    retry_until = time.time() + bind_timeout
    warn_ssl = False

    try:
        keepidle = int(conf.get('keep_idle', 600))
        if keepidle <= 0 or keepidle >= 2 ** 15 - 1:
            raise ValueError()
    except (ValueError, KeyError, TypeError):
        raise ConfigFileError()

    while not sock and time.time() < retry_until:
        try:
            sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)),
                          family=address_family)
            if 'cert_file' in conf:
                warn_ssl = True
                sock = ssl.wrap_socket(sock, certfile=conf['cert_file'],
                                       keyfile=conf['key_file'])
        except socket.error as err:
            if err.args[0] != errno.EADDRINUSE:
                raise
            sleep(0.1)
    if not sock:
        raise Exception(_('Could not bind to %(addr)s:%(port)s '
                          'after trying for %(timeout)s seconds') % {
                              'addr': bind_addr[0], 'port': bind_addr[1],
                              'timeout': bind_timeout})
    # in my experience, sockets can hang around forever without keepalive
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
    sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
    if hasattr(socket, 'TCP_KEEPIDLE'):
        sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, keepidle)
    if warn_ssl:
        ssl_warning_message = _('WARNING: SSL should only be enabled for '
                                'testing purposes. Use external SSL '
                                'termination for a production deployment.')
        get_logger(conf).warning(ssl_warning_message)
        print(ssl_warning_message)
    return sock
예제 #5
0
파일: test_utils.py 프로젝트: edwardt/swift
 def test_get_logger_console(self):
     reload(utils)  # reset get_logger attrs
     logger = utils.get_logger(None)
     self.assertFalse(hasattr(utils.get_logger, 'console'))
     logger = utils.get_logger(None, log_to_console=True)
     self.assert_(hasattr(utils.get_logger, 'console'))
     self.assert_(isinstance(utils.get_logger.console,
                             logging.StreamHandler))
     # make sure you can't have two console handlers
     old_handler = utils.get_logger.console
     logger = utils.get_logger(None, log_to_console=True)
     self.assertNotEquals(utils.get_logger.console, old_handler)
     logger.logger.removeHandler(utils.get_logger.console)
예제 #6
0
    def test_capture_stdio(self):
        # stubs
        logger = utils.get_logger(None, 'dummy')

        # mock utils system modules
        _orig_sys = utils.sys
        _orig_os = utils.os
        try:
            utils.sys = MockSys()
            utils.os = MockOs()

            # basic test
            utils.capture_stdio(logger)
            self.assert_(utils.sys.excepthook is not None)
            self.assertEquals(utils.os.closed_fds, utils.sys.stdio_fds)
            self.assert_(isinstance(utils.sys.stdout, utils.LoggerFileObject))
            self.assert_(isinstance(utils.sys.stderr, utils.LoggerFileObject))

            # reset; test same args, but exc when trying to close stdio
            utils.os = MockOs(raise_funcs=('dup2',))
            utils.sys = MockSys()

            # test unable to close stdio
            utils.capture_stdio(logger)
            self.assert_(utils.sys.excepthook is not None)
            self.assertEquals(utils.os.closed_fds, [])
            self.assert_(isinstance(utils.sys.stdout, utils.LoggerFileObject))
            self.assert_(isinstance(utils.sys.stderr, utils.LoggerFileObject))

            # reset; test some other args
            utils.os = MockOs()
            utils.sys = MockSys()
            logger = utils.get_logger(None, log_to_console=True)

            # test console log
            utils.capture_stdio(logger, capture_stdout=False,
                                capture_stderr=False)
            self.assert_(utils.sys.excepthook is not None)
            # when logging to console, stderr remains open
            self.assertEquals(utils.os.closed_fds, utils.sys.stdio_fds[:2])
            reset_loggers()

            # stdio not captured
            self.assertFalse(isinstance(utils.sys.stdout,
                                        utils.LoggerFileObject))
            self.assertFalse(isinstance(utils.sys.stderr,
                                        utils.LoggerFileObject))
            reset_loggers()
        finally:
            utils.sys = _orig_sys
            utils.os = _orig_os
예제 #7
0
파일: wsgi.py 프로젝트: vandanashah/swift
def get_socket(conf):
    """Bind socket to bind ip:port in conf

    :param conf: Configuration dict to read settings from

    :returns : a socket object as returned from socket.listen or
               ssl.wrap_socket if conf specifies cert_file
    """
    try:
        bind_port = int(conf["bind_port"])
    except (ValueError, KeyError, TypeError):
        raise ConfigFilePortError()
    bind_addr = (conf.get("bind_ip", "0.0.0.0"), bind_port)
    address_family = [
        addr[0]
        for addr in socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
        if addr[0] in (socket.AF_INET, socket.AF_INET6)
    ][0]
    sock = None
    bind_timeout = int(conf.get("bind_timeout", 30))
    retry_until = time.time() + bind_timeout
    warn_ssl = False
    while not sock and time.time() < retry_until:
        try:
            sock = listen(bind_addr, backlog=int(conf.get("backlog", 4096)), family=address_family)
            if "cert_file" in conf:
                warn_ssl = True
                sock = ssl.wrap_socket(sock, certfile=conf["cert_file"], keyfile=conf["key_file"])
        except socket.error as err:
            if err.args[0] != errno.EADDRINUSE:
                raise
            sleep(0.1)
    if not sock:
        raise Exception(
            _("Could not bind to %s:%s " "after trying for %s seconds") % (bind_addr[0], bind_addr[1], bind_timeout)
        )
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    # in my experience, sockets can hang around forever without keepalive
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
    sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
    if hasattr(socket, "TCP_KEEPIDLE"):
        sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 600)
    if warn_ssl:
        ssl_warning_message = _(
            "WARNING: SSL should only be enabled for "
            "testing purposes. Use external SSL "
            "termination for a production deployment."
        )
        get_logger(conf).warning(ssl_warning_message)
        print(ssl_warning_message)
    return sock
예제 #8
0
    def __init__(self, conf, logger=None):
        """
        Creates a new WSGI application for the Swift Object Server. An
        example configuration is given at
        <source-dir>/etc/object-server.conf-sample or
        /etc/swift/object-server.conf-sample.
        """
        super(ObjectController, self).__init__(conf)
        self.logger = logger or get_logger(conf, log_route='object-server')
        self.node_timeout = int(conf.get('node_timeout', 3))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = int(conf.get('client_timeout', 60))
        self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
        self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
        self.log_requests = config_true_value(conf.get('log_requests', 'true'))
        self.max_upload_time = int(conf.get('max_upload_time', 86400))
        self.slow = int(conf.get('slow', 0))
        self.keep_cache_private = \
            config_true_value(conf.get('keep_cache_private', 'false'))

        default_allowed_headers = '''
            content-disposition,
            content-encoding,
            x-delete-at,
            x-object-manifest,
            x-static-large-object,
        '''
        extra_allowed_headers = [
            header.strip().lower() for header in conf.get(
                'allowed_headers', default_allowed_headers).split(',')
            if header.strip()
        ]
        self.allowed_headers = set()
        for header in extra_allowed_headers:
            if header not in DATAFILE_SYSTEM_META:
                self.allowed_headers.add(header)
        self.auto_create_account_prefix = \
            conf.get('auto_create_account_prefix') or '.'
        self.expiring_objects_account = self.auto_create_account_prefix + \
            (conf.get('expiring_objects_account_name') or 'expiring_objects')
        self.expiring_objects_container_divisor = \
            int(conf.get('expiring_objects_container_divisor') or 86400)
        # Initialization was successful, so now apply the network chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because the primary motivation for this is to optimize how data
        # is written back to the proxy server, we could use the value from the
        # disk_chunk_size parameter. However, it affects all created sockets
        # using this class so we have chosen to tie it to the
        # network_chunk_size parameter value instead.
        socket._fileobject.default_bufsize = self.network_chunk_size

        # Provide further setup specific to an object server implementation.
        self.setup(conf)
예제 #9
0
파일: server.py 프로젝트: edwardt/swift
 def __init__(self, conf):
     self.logger = get_logger(conf)
     self.root = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.replicator_rpc = \
         ReplicatorRpc(self.root, DATADIR, AccountBroker, self.mount_check)
예제 #10
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='replicator')
     self.root = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.port = int(conf.get('bind_port', self.default_port))
     concurrency = int(conf.get('concurrency', 8))
     self.cpool = GreenPool(size=concurrency)
     swift_dir = conf.get('swift_dir', '/etc/swift')
     self.ring = ring.Ring(swift_dir, ring_name=self.server_type)
     self.per_diff = int(conf.get('per_diff', 1000))
     self.max_diffs = int(conf.get('max_diffs') or 100)
     self.interval = int(conf.get('interval') or
                         conf.get('run_pause') or 30)
     self.vm_test_mode = conf.get(
         'vm_test_mode', 'no').lower() in ('yes', 'true', 'on', '1')
     self.node_timeout = int(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
     swift.common.db.DB_PREALLOCATION = \
         conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
     self._zero_stats()
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.recon_replicator = '%s.recon' % self.server_type
     self.rcache = os.path.join(self.recon_cache_path,
                                self.recon_replicator)
     self.extract_device_re = re.compile('%s%s([^%s]+)' % (
         self.root, os.path.sep, os.path.sep))
예제 #11
0
 def __init__(self, root, datadir, broker_class, mount_check=True,
              logger=None):
     self.root = root
     self.datadir = datadir
     self.broker_class = broker_class
     self.mount_check = mount_check
     self.logger = logger or get_logger({}, log_route='replicator-rpc')
예제 #12
0
 def __init__(self, app, conf):
     self.app = app
     self.conf = conf
     self.logger = get_logger(conf, log_route='catch-errors')
     self.image_handlers = {
         'ImageUploadHandler': ImageUploadHandler,
     }
예제 #13
0
    def __init__(self, app, conf):
        self.app = app
        self.logger = get_logger(conf)

        # TODO Check pipeline

        self.verb_acl = {}
        verb_acl = conf.get('verb_acl', None)
        if verb_acl is None:
            raise ValueError(
                'verb_acl: missing "verb_acl" configuration entry')
        for acl in verb_acl.split(';'):
            if not acl:
                continue
            if acl.count(':') != 1:
                raise ValueError('verb_acl: bad format: "%s"' % acl)
            methods, blocks = acl.split(':', 1)
            for method in methods.split(','):
                if not method:
                    raise ValueError(
                        'verb_acl: bad format: missing method: "%s"' % acl)
                for block in blocks.split(','):
                    if not block:
                        raise ValueError(
                            'verb_acl: bad format: empty address block: "%s"' %
                            acl)
                    self.verb_acl.setdefault(method.upper(), []).append(block)
        self.logger.debug("Verb ACL: " + str(self.verb_acl))
예제 #14
0
 def __init__(self, app, conf):
     self.app = app
     self.conf = conf
     self.logger = get_logger(conf, log_route='lite-swauth')
     self.profile_path = 'profile'
     self.super_admin_key = conf.get('super_admin_key')
     if not self.super_admin_key:
         msg = 'No super_admin_key set in conf file; ' \
               'Swauth administration features will be disabled.'
         try:
             self.logger.warn(msg)
         except Exception:
             pass
     self.auth_prefix = conf.get('auth_prefix', '/auth/')
     if not self.auth_prefix:
         self.auth_prefix = '/auth/'
     if self.auth_prefix[0] != '/':
         self.auth_prefix = '/' + self.auth_prefix
     if self.auth_prefix[-1] != '/':
         self.auth_prefix += '/'
     self.version = 'v2'
     # url for whitelist objects
     # Example: /v1/liteauth/whitelist
     self.whitelist_url = conf.get('whitelist_url', '').lower().rstrip('/')
     if not self.whitelist_url:
         raise ValueError('whitelist_url not set in config file')
     # url for invite objects
     # Example: /v1/liteauth/invites
     self.invite_url = conf.get('invite_url', '').lower().rstrip('/')
     if not self.invite_url:
         raise ValueError('invite_url not set in config file')
예제 #15
0
    def test_delegate_methods_with_metric_prefix(self):
        self.logger = utils.get_logger({
            'log_statsd_host': 'localhost',
            'log_statsd_port': str(self.port),
            'log_statsd_metric_prefix': 'alpha.beta',
        }, 'pfx')
        self.assertStat('alpha.beta.pfx.some.counter:1|c',
                        self.logger.increment, 'some.counter')
        self.assertStat('alpha.beta.pfx.some.counter:-1|c',
                        self.logger.decrement, 'some.counter')
        self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms',
                        self.logger.timing, 'some.operation', 4.76 * 1000)
        self.assertStatMatches(
            'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms',
            self.logger.timing_since, 'another.op', time.time())
        self.assertStat('alpha.beta.pfx.another.counter:3|c',
                        self.logger.update_stats, 'another.counter', 3)

        self.logger.set_statsd_prefix('')
        self.assertStat('alpha.beta.some.counter:1|c|@0.9912',
                        self.logger.increment, 'some.counter',
                        sample_rate=0.9912)
        self.assertStat('alpha.beta.some.counter:-1|c|@0.9912',
                        self.logger.decrement, 'some.counter', 0.9912)
        self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912',
                        self.logger.timing, 'some.operation', 4.9 * 1000,
                        sample_rate=0.9912)
        self.assertStatMatches('alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912',
                               self.logger.timing_since, 'another.op',
                               time.time(), sample_rate=0.9912)
        self.assertStat('alpha.beta.another.counter:3|c|@0.9912',
                        self.logger.update_stats, 'another.counter', 3,
                        sample_rate=0.9912)
예제 #16
0
파일: proxy_logging.py 프로젝트: 701/swift
    def __init__(self, app, conf, logger=None):
        self.app = app
        self.log_hdrs = config_true_value(conf.get(
            'access_log_headers',
            conf.get('log_headers', 'no')))
        log_hdrs_only = list_from_csv(conf.get(
            'access_log_headers_only', ''))
        self.log_hdrs_only = [x.title() for x in log_hdrs_only]

        # The leading access_* check is in case someone assumes that
        # log_statsd_valid_http_methods behaves like the other log_statsd_*
        # settings.
        self.valid_methods = conf.get(
            'access_log_statsd_valid_http_methods',
            conf.get('log_statsd_valid_http_methods',
                     'GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS'))
        self.valid_methods = [m.strip().upper() for m in
                              self.valid_methods.split(',') if m.strip()]
        access_log_conf = {}
        for key in ('log_facility', 'log_name', 'log_level', 'log_udp_host',
                    'log_udp_port', 'log_statsd_host', 'log_statsd_port',
                    'log_statsd_default_sample_rate',
                    'log_statsd_sample_rate_factor',
                    'log_statsd_metric_prefix'):
            value = conf.get('access_' + key, conf.get(key, None))
            if value:
                access_log_conf[key] = value
        self.access_logger = logger or get_logger(access_log_conf,
                                                  log_route='proxy-access')
        self.access_logger.set_statsd_prefix('proxy-server')
        self.reveal_sensitive_prefix = int(
            conf.get('reveal_sensitive_prefix', 16))
예제 #17
0
 def __init__(self, conf, logger=None):
     super(ContainerController, self).__init__(conf)
     self.logger = logger or get_logger(conf, log_route='container-server')
     self.log_requests = config_true_value(conf.get('log_requests', 'true'))
     self.root = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.node_timeout = int(conf.get('node_timeout', 3))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     #: ContainerSyncCluster instance for validating sync-to values.
     self.realms_conf = ContainerSyncRealms(
         os.path.join(
             conf.get('swift_dir', '/etc/swift'),
             'container-sync-realms.conf'),
         self.logger)
     #: The list of hosts we're allowed to send syncs to. This can be
     #: overridden by data in self.realms_conf
     self.allowed_sync_hosts = [
         h.strip()
         for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
         if h.strip()]
     self.replicator_rpc = ContainerReplicatorRpc(
         self.root, DATADIR, ContainerBroker, self.mount_check,
         logger=self.logger)
     self.auto_create_account_prefix = \
         conf.get('auto_create_account_prefix') or '.'
     if config_true_value(conf.get('allow_versions', 'f')):
         self.save_headers.append('x-versions-location')
     swift.common.db.DB_PREALLOCATION = \
         config_true_value(conf.get('db_preallocation', 'f'))
예제 #18
0
파일: server.py 프로젝트: Nupta/swift
 def __init__(self, conf):
     """
     Creates a new WSGI application for the Swift Object Server. An
     example configuration is given at
     <source-dir>/etc/object-server.conf-sample or
     /etc/swift/object-server.conf-sample.
     """
     self.logger = get_logger(conf, log_route='object-server')
     self.devices = conf.get('devices', '/srv/node/')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.node_timeout = int(conf.get('node_timeout', 3))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
     self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
     self.log_requests = conf.get('log_requests', 't')[:1].lower() == 't'
     self.max_upload_time = int(conf.get('max_upload_time', 86400))
     self.slow = int(conf.get('slow', 0))
     self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024
     default_allowed_headers = '''
         content-disposition,
         content-encoding,
         x-delete-at,
         x-object-manifest,
     '''
     self.allowed_headers = set(i.strip().lower() for i in
             conf.get('allowed_headers',
             default_allowed_headers).split(',') if i.strip() and
             i.strip().lower() not in DISALLOWED_HEADERS)
     self.expiring_objects_account = \
         (conf.get('auto_create_account_prefix') or '.') + \
         'expiring_objects'
     self.expiring_objects_container_divisor = \
         int(conf.get('expiring_objects_container_divisor') or 86400)
예제 #19
0
 def __init__(self, conf, logger=None):
     """
     :param conf: configuration object obtained from ConfigParser
     :param logger: logging object
     """
     self.conf = conf
     self.logger = logger or get_logger(conf, log_route='object-replicator')
     self.devices_dir = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.bind_ip = conf.get('bind_ip', '0.0.0.0')
     self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
     self.port = None if self.servers_per_port else \
         int(conf.get('bind_port', 6000))
     self.concurrency = int(conf.get('concurrency', 1))
     self.stats_interval = int(conf.get('stats_interval', '300'))
     self.ring_check_interval = int(conf.get('ring_check_interval', 15))
     self.next_check = time.time() + self.ring_check_interval
     self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
     self.partition_times = []
     self.interval = int(conf.get('interval') or
                         conf.get('run_pause') or 30)
     self.rsync_timeout = int(conf.get('rsync_timeout', 900))
     self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
     self.rsync_bwlimit = conf.get('rsync_bwlimit', '0')
     self.rsync_compress = config_true_value(
         conf.get('rsync_compress', 'no'))
     self.rsync_module = conf.get('rsync_module', '').rstrip('/')
     if not self.rsync_module:
         self.rsync_module = '{replication_ip}::object'
         if config_true_value(conf.get('vm_test_mode', 'no')):
             self.logger.warn('Option object-replicator/vm_test_mode is '
                              'deprecated and will be removed in a future '
                              'version. Update your configuration to use '
                              'option object-replicator/rsync_module.')
             self.rsync_module += '{replication_port}'
     self.http_timeout = int(conf.get('http_timeout', 60))
     self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = os.path.join(self.recon_cache_path, "object.recon")
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.node_timeout = float(conf.get('node_timeout', 10))
     self.sync_method = getattr(self, conf.get('sync_method') or 'rsync')
     self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
     self.default_headers = {
         'Content-Length': '0',
         'user-agent': 'object-replicator %s' % os.getpid()}
     self.rsync_error_log_line_length = \
         int(conf.get('rsync_error_log_line_length', 0))
     self.handoffs_first = config_true_value(conf.get('handoffs_first',
                                                      False))
     self.handoff_delete = config_auto_int_value(
         conf.get('handoff_delete', 'auto'), 0)
     if any((self.handoff_delete, self.handoffs_first)):
         self.logger.warn('Handoff only mode is not intended for normal '
                          'operation, please disable handoffs_first and '
                          'handoff_delete before the next '
                          'normal rebalance')
     self._diskfile_mgr = DiskFileManager(conf, self.logger)
예제 #20
0
 def __init__(self, app, conf):
     self.app = app
     self.conf = conf
     self.logger = get_logger(conf, log_route='liteauth')
     self.provider = load_oauth_provider(
         conf.get('oauth_provider', 'google_oauth'))
     self.auth_endpoint = conf.get('auth_endpoint', '')
     if not self.auth_endpoint:
         raise ValueError('auth_endpoint not set in config file')
     if isinstance(self.auth_endpoint, unicode):
         self.auth_endpoint = self.auth_endpoint.encode('utf-8')
     parsed_path = urlparse(self.auth_endpoint)
     if not parsed_path.netloc:
         raise ValueError('auth_endpoint is invalid in config file')
     self.auth_domain = parsed_path.netloc
     self.login_path = parsed_path.path
     self.scheme = parsed_path.scheme
     if self.scheme != 'https':
         raise ValueError('auth_endpoint must have https:// scheme')
     # by default service_domain can be extracted from the endpoint
     # in case where auth domain is different from service domain
     # you need to set up the service domain separately
     # Example:
     # auth_endpoint = https://auth.example.com/login
     # service_domain = https://www.example.com
     self.service_domain = conf.get('service_domain',
                                    '%s://%s'
                                    % (self.scheme, self.auth_domain))
     self.storage_driver = None
     self.oauth_login_timeout = 3600
예제 #21
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='container-updater')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.interval = int(conf.get('interval', 300))
     self.account_ring = None
     self.concurrency = int(conf.get('concurrency', 4))
     self.slowdown = float(conf.get('slowdown', 0.01))
     self.node_timeout = float(conf.get('node_timeout', 3))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.no_changes = 0
     self.successes = 0
     self.failures = 0
     self.account_suppressions = {}
     self.account_suppression_time = \
         float(conf.get('account_suppression_time', 60))
     self.new_account_suppressions = None
     swift.common.db.DB_PREALLOCATION = \
         config_true_value(conf.get('db_preallocation', 'f'))
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = os.path.join(self.recon_cache_path, "container.recon")
     self.user_agent = 'container-updater %s' % os.getpid()
예제 #22
0
 def __init__(self, app, conf):
     self.app = app
     self.conf = conf
     self.logger = get_logger(conf, log_route='kerbauth')
     self.log_headers = config_true_value(conf.get('log_headers', 'f'))
     self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
     if self.reseller_prefix and self.reseller_prefix[-1] != '_':
         self.reseller_prefix += '_'
     self.logger.set_statsd_prefix('kerbauth.%s' % (
         self.reseller_prefix if self.reseller_prefix else 'NONE',))
     self.auth_prefix = conf.get('auth_prefix', '/auth/')
     if not self.auth_prefix or not self.auth_prefix.strip('/'):
         self.logger.warning('Rewriting invalid auth prefix "%s" to '
                             '"/auth/" (Non-empty auth prefix path '
                             'is required)' % self.auth_prefix)
         self.auth_prefix = '/auth/'
     if self.auth_prefix[0] != '/':
         self.auth_prefix = '/' + self.auth_prefix
     if self.auth_prefix[-1] != '/':
         self.auth_prefix += '/'
     self.token_life = int(conf.get('token_life', 86400))
     self.allow_overrides = config_true_value(
         conf.get('allow_overrides', 't'))
     self.storage_url_scheme = conf.get('storage_url_scheme', 'default')
     self.ext_authentication_url = conf.get('ext_authentication_url')
     if not self.ext_authentication_url:
         raise RuntimeError("Missing filter parameter ext_authentication_"
                            "url in /etc/swift/proxy-server.conf")
예제 #23
0
파일: reaper.py 프로젝트: jgmerritt/swift
 def __init__(self, conf, logger=None):
     self.conf = conf
     self.logger = logger or get_logger(conf, log_route='account-reaper')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.interval = int(conf.get('interval', 3600))
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.account_ring = None
     self.container_ring = None
     self.object_ring = None
     self.node_timeout = float(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.myips = whataremyips(conf.get('bind_ip', '0.0.0.0'))
     self.bind_port = int(conf.get('bind_port', 6202))
     self.concurrency = int(conf.get('concurrency', 25))
     self.container_concurrency = self.object_concurrency = \
         sqrt(self.concurrency)
     self.container_pool = GreenPool(size=self.container_concurrency)
     swift.common.db.DB_PREALLOCATION = \
         config_true_value(conf.get('db_preallocation', 'f'))
     self.delay_reaping = int(conf.get('delay_reaping') or 0)
     reap_warn_after = float(conf.get('reap_warn_after') or 86400 * 30)
     self.reap_not_done_after = reap_warn_after + self.delay_reaping
     self.start_time = time()
     self.reset_stats()
    def __init__(self, app, conf):
        """
        This function is called when Swift Proxy inits.
        """
        self.app = app
        self.conf = conf
        self.logger = get_logger(conf, log_route='customauth')
        self.log_headers = config_true_value(conf.get('log_headers', 'f'))
        self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
        if self.reseller_prefix and self.reseller_prefix[-1] != '_':
            self.reseller_prefix += '_'
        self.logger.set_statsd_prefix('customauth.%s' % (
            self.reseller_prefix if self.reseller_prefix else 'NONE',))
        self.auth_prefix = conf.get('auth_prefix', '/auth/')
        #Organization
        self.organization_id = conf.get('organization_id', '57b69c457792482c8d817c4945c6c8a8')


        #Keystone
        self.keystone_auth_endpoint = conf.get('keystone_auth_endpoint', 'http://cloud.lab.fiware.org:4730/v2.0/tokens')
        self.keystone_tenant_endpoint = conf.get('keystone_tenant_endpoint', 'http://cloud.lab.fiware.org:4730/v2.0/tenants')
        if not self.auth_prefix or not self.auth_prefix.strip('/'):
            self.logger.warning('Rewriting invalid auth prefix "%s" to '
                                '"/auth/" (Non-empty auth prefix path '
                                'is required)' % self.auth_prefix)
            self.auth_prefix = '/auth/'
        if self.auth_prefix[0] != '/':
            self.auth_prefix = '/' + self.auth_prefix
        if self.auth_prefix[-1] != '/':
            self.auth_prefix += '/'
        self.token_life = int(conf.get('token_life', 86400))
        self.allow_overrides = config_true_value(
            conf.get('allow_overrides', 't'))
        self.storage_url_scheme = conf.get('storage_url_scheme', 'default')
        self.logger.info('CustomAuth v1.3 loaded successfully')
예제 #25
0
def filter_factory(global_conf, **local_conf):
    """Returns the WSGI filter for use with paste.deploy."""
    conf = global_conf.copy()
    conf.update(local_conf)

    defaults = {
        'methods': 'GET HEAD PUT POST DELETE',
        'incoming_remove_headers': DEFAULT_INCOMING_REMOVE_HEADERS,
        'incoming_allow_headers': DEFAULT_INCOMING_ALLOW_HEADERS,
        'outgoing_remove_headers': DEFAULT_OUTGOING_REMOVE_HEADERS,
        'outgoing_allow_headers': DEFAULT_OUTGOING_ALLOW_HEADERS,
        'allowed_digests': DEFAULT_ALLOWED_DIGESTS,
    }
    info_conf = {k: conf.get(k, v).split() for k, v in defaults.items()}

    allowed_digests = set(digest.lower()
                          for digest in info_conf['allowed_digests'])
    not_supported = allowed_digests - SUPPORTED_DIGESTS
    if not_supported:
        logger = get_logger(conf, log_route='tempurl')
        logger.warning('The following digest algorithms are configured but '
                       'not supported: %s', ', '.join(not_supported))
        allowed_digests -= not_supported
    if not allowed_digests:
        raise ValueError('No valid digest algorithms are configured '
                         'for tempurls')
    info_conf['allowed_digests'] = sorted(allowed_digests)

    register_swift_info('tempurl', **info_conf)
    conf.update(info_conf)

    return lambda app: TempURL(app, conf)
예제 #26
0
파일: recon.py 프로젝트: bkolli/swift
    def __init__(self, app, conf, *args, **kwargs):
        self.app = app
        self.devices = conf.get('devices', '/srv/node')
        swift_dir = conf.get('swift_dir', '/etc/swift')
        self.logger = get_logger(conf, log_route='recon')
        self.recon_cache_path = conf.get('recon_cache_path',
                                         '/var/cache/swift')
        self.object_recon_cache = os.path.join(self.recon_cache_path,
                                               'object.recon')
        self.container_recon_cache = os.path.join(self.recon_cache_path,
                                                  'container.recon')
        self.account_recon_cache = os.path.join(self.recon_cache_path,
                                                'account.recon')
        self.drive_recon_cache = os.path.join(self.recon_cache_path,
                                              'drive.recon')
        self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz')
        self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')

        self.rings = [self.account_ring_path, self.container_ring_path]
        # include all object ring files (for all policies)
        for policy in POLICIES:
            self.rings.append(os.path.join(swift_dir,
                                           policy.ring_name + '.ring.gz'))

        self.mount_check = config_true_value(conf.get('mount_check', 'true'))
예제 #27
0
 def __init__(self, conf, logger=None):
     self.conf = conf
     self.logger = logger or get_logger(conf, log_route='object-updater')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.interval = int(conf.get('interval', 300))
     self.container_ring = None
     self.concurrency = int(conf.get('concurrency', 1))
     if 'slowdown' in conf:
         self.logger.warning(
             'The slowdown option is deprecated in favor of '
             'objects_per_second. This option may be ignored in a '
             'future release.')
         objects_per_second = 1 / (
             float(conf.get('slowdown', '0.01')) + 0.01)
     else:
         objects_per_second = 50
     self.objects_running_time = 0
     self.max_objects_per_second = \
         float(conf.get('objects_per_second',
                        objects_per_second))
     self.node_timeout = float(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.report_interval = float(conf.get('report_interval', 300))
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = os.path.join(self.recon_cache_path, 'object.recon')
     self.stats = SweepStats()
예제 #28
0
 def __init__(self, conf):
     """
     :param conf: configuration object obtained from ConfigParser
     :param logger: logging object
     """
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-replicator')
     self.devices_dir = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.vm_test_mode = conf.get(
             'vm_test_mode', 'no').lower() in ('yes', 'true', 'on', '1')
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.port = int(conf.get('bind_port', 6000))
     self.concurrency = int(conf.get('concurrency', 1))
     self.stats_interval = int(conf.get('stats_interval', '300'))
     self.object_ring = Ring(self.swift_dir, ring_name='object')
     self.ring_check_interval = int(conf.get('ring_check_interval', 15))
     self.next_check = time.time() + self.ring_check_interval
     self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
     self.partition_times = []
     self.run_pause = int(conf.get('run_pause', 30))
     self.rsync_timeout = int(conf.get('rsync_timeout', 900))
     self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
     self.http_timeout = int(conf.get('http_timeout', 60))
     self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
     self.recon_enable = conf.get(
             'recon_enable', 'no').lower() in TRUE_VALUES
     self.recon_cache_path = conf.get(
             'recon_cache_path', '/var/cache/swift')
     self.recon_object = os.path.join(self.recon_cache_path, "object.recon")
예제 #29
0
 def __init__(self, app, conf):
     self.app = app
     self.conf = conf
     self.logger = get_logger(conf, log_route='lite-swauth')
     self.profile_path = 'profile'
     self.provider = load_provider('swauth',
                                   'SwauthClient',
                                   'could not load SwauthClient')(conf)
     # url for whitelist objects
     # Example: /v1/liteauth/whitelist
     self.whitelist_url = conf.get('whitelist_url', '').rstrip('/')
     if not self.whitelist_url:
         raise ValueError('whitelist_url not set in config file')
     # url for invite objects
     # Example: /v1/liteauth/invites
     self.invite_url = conf.get('invite_url', '').rstrip('/')
     if not self.invite_url:
         raise ValueError('invite_url not set in config file')
     self.cors_allow_origin = [
         a.strip()
         for a in conf.get('cors_allow_origin', '').split(',')
         if a.strip()]
     self.payload_limit = 65535
     self.allow_passthrough = conf.get('allow_passthrough', 'f').lower() \
         in TRUE_VALUES
예제 #30
0
 def __init__(self, app, conf):
     self.app = app
     swift_dir = conf.get('swift_dir', '/etc/swift')
     self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz')
     self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
     self.object_ring_path = os.path.join(swift_dir, 'object.ring.gz')
     self.logger = get_logger(conf, log_route='lxc-swift')
예제 #31
0
파일: wsgi.py 프로젝트: spil-jasper/swift
        except socket.error, err:
            if err.args[0] != errno.EADDRINUSE:
                raise
            sleep(0.1)
    if not sock:
        raise Exception('Could not bind to %s:%s after trying for 30 seconds' %
                        bind_addr)
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    # in my experience, sockets can hang around forever without keepalive
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
    sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 600)
    if warn_ssl:
        ssl_warning_message = 'WARNING: SSL should only be enabled for ' \
                              'testing purposes. Use external SSL ' \
                              'termination for a production deployment.'
        get_logger(conf).warning(ssl_warning_message)
        print _(ssl_warning_message)
    return sock


# TODO: pull pieces of this out to test
def run_wsgi(conf_file, app_section, *args, **kwargs):
    """
    Loads common settings from conf, then instantiates app and runs
    the server using the specified number of workers.

    :param conf_file: Path to paste.deploy style configuration file
    :param app_section: App name from conf file to load config from
    """

    try:
예제 #32
0
 def setUp(self):
     self.logger = get_logger({})
     self.logger.txn_id = None
예제 #33
0
    def __init__(self,
                 conf,
                 logger=None,
                 account_ring=None,
                 container_ring=None):
        if conf is None:
            conf = {}
        if logger is None:
            self.logger = get_logger(conf, log_route='proxy-server')
        else:
            self.logger = logger
        self._error_limiting = {}

        swift_dir = conf.get('swift_dir', '/etc/swift')
        self.swift_dir = swift_dir
        self.node_timeout = float(conf.get('node_timeout', 10))
        self.recoverable_node_timeout = float(
            conf.get('recoverable_node_timeout', self.node_timeout))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = float(conf.get('client_timeout', 60))
        self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
        self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
        self.trans_id_suffix = conf.get('trans_id_suffix', '')
        self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5))
        self.error_suppression_interval = \
            int(conf.get('error_suppression_interval', 60))
        self.error_suppression_limit = \
            int(conf.get('error_suppression_limit', 10))
        self.recheck_container_existence = \
            int(conf.get('recheck_container_existence',
                         DEFAULT_RECHECK_CONTAINER_EXISTENCE))
        self.recheck_updating_shard_ranges = \
            int(conf.get('recheck_updating_shard_ranges',
                         DEFAULT_RECHECK_UPDATING_SHARD_RANGES))
        self.recheck_listing_shard_ranges = \
            int(conf.get('recheck_listing_shard_ranges',
                         DEFAULT_RECHECK_LISTING_SHARD_RANGES))
        self.recheck_account_existence = \
            int(conf.get('recheck_account_existence',
                         DEFAULT_RECHECK_ACCOUNT_EXISTENCE))
        self.allow_account_management = \
            config_true_value(conf.get('allow_account_management', 'no'))
        self.container_ring = container_ring or Ring(swift_dir,
                                                     ring_name='container')
        self.account_ring = account_ring or Ring(swift_dir,
                                                 ring_name='account')
        # ensure rings are loaded for all configured storage policies
        for policy in POLICIES:
            policy.load_ring(swift_dir)
        self.obj_controller_router = ObjectControllerRouter()
        mimetypes.init(mimetypes.knownfiles +
                       [os.path.join(swift_dir, 'mime.types')])
        self.account_autocreate = \
            config_true_value(conf.get('account_autocreate', 'no'))
        if conf.get('auto_create_account_prefix'):
            self.logger.warning('Option auto_create_account_prefix is '
                                'deprecated. Configure '
                                'auto_create_account_prefix under the '
                                'swift-constraints section of '
                                'swift.conf. This option will '
                                'be ignored in a future release.')
            self.auto_create_account_prefix = \
                conf['auto_create_account_prefix']
        else:
            self.auto_create_account_prefix = \
                constraints.AUTO_CREATE_ACCOUNT_PREFIX
        self.expiring_objects_account = self.auto_create_account_prefix + \
            (conf.get('expiring_objects_account_name') or 'expiring_objects')
        self.expiring_objects_container_divisor = \
            int(conf.get('expiring_objects_container_divisor') or 86400)
        self.max_containers_per_account = \
            int(conf.get('max_containers_per_account') or 0)
        self.max_containers_whitelist = [
            a.strip()
            for a in conf.get('max_containers_whitelist', '').split(',')
            if a.strip()
        ]
        self.deny_host_headers = [
            host.strip()
            for host in conf.get('deny_host_headers', '').split(',')
            if host.strip()
        ]
        self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
        self.cors_allow_origin = [
            a.strip() for a in conf.get('cors_allow_origin', '').split(',')
            if a.strip()
        ]
        self.cors_expose_headers = [
            a.strip() for a in conf.get('cors_expose_headers', '').split(',')
            if a.strip()
        ]
        self.strict_cors_mode = config_true_value(
            conf.get('strict_cors_mode', 't'))
        self.node_timings = {}
        self.timing_expiry = int(conf.get('timing_expiry', 300))
        value = conf.get('request_node_count', '2 * replicas')
        self.request_node_count = config_request_node_count_value(value)
        # swift_owner_headers are stripped by the account and container
        # controllers; we should extend header stripping to object controller
        # when a privileged object header is implemented.
        swift_owner_headers = conf.get(
            'swift_owner_headers', 'x-container-read, x-container-write, '
            'x-container-sync-key, x-container-sync-to, '
            'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
            'x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, '
            'x-account-access-control')
        self.swift_owner_headers = [
            name.strip().title() for name in swift_owner_headers.split(',')
            if name.strip()
        ]

        # When upgrading from liberasurecode<=1.5.0, you may want to continue
        # writing legacy CRCs until all nodes are upgraded and capabale of
        # reading fragments with zlib CRCs.
        # See https://bugs.launchpad.net/liberasurecode/+bug/1886088 for more
        # information.
        if 'write_legacy_ec_crc' in conf:
            os.environ['LIBERASURECODE_WRITE_LEGACY_CRC'] = \
                '1' if config_true_value(conf['write_legacy_ec_crc']) else '0'
        # else, assume operators know what they're doing and leave env alone

        # Initialization was successful, so now apply the client chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because it affects the client as well, currently, we use the
        # client chunk size as the govenor and not the object chunk size.
        if sys.version_info < (3, ):
            socket._fileobject.default_bufsize = self.client_chunk_size
        # TODO: find a way to enable similar functionality in py3

        self.expose_info = config_true_value(conf.get('expose_info', 'yes'))
        self.disallowed_sections = list_from_csv(
            conf.get(
                'disallowed_sections', ', '.join([
                    'swift.auto_create_account_prefix',
                    'swift.valid_api_versions',
                ])))
        self.admin_key = conf.get('admin_key', None)
        self._override_options = self._load_per_policy_config(conf)
        self.sorts_by_timing = any(pc.sorting_method == 'timing'
                                   for pc in self._override_options.values())

        register_swift_info(
            version=swift_version,
            strict_cors_mode=self.strict_cors_mode,
            policies=POLICIES.get_policy_info(),
            allow_account_management=self.allow_account_management,
            account_autocreate=self.account_autocreate,
            **constraints.EFFECTIVE_CONSTRAINTS)
        self.watchdog = Watchdog()
        self.watchdog.spawn()
예제 #34
0
 def __init__(self, app, conf):
     self.app = app
     self.logger = get_logger(conf, log_route="copy")
예제 #35
0
 def __init__(self, app, conf):
     self.app = app
     self.logger = get_logger(conf, log_route="encrypter")
     self.crypto = Crypto(conf)
     self.disable_encryption = config_true_value(
         conf.get('disable_encryption', 'false'))
예제 #36
0
파일: sync.py 프로젝트: timolow/swift
    def __init__(self, conf, container_ring=None, logger=None):
        #: The dict of configuration values from the [container-sync] section
        #: of the container-server.conf.
        self.conf = conf
        #: Logger to use for container-sync log lines.
        self.logger = logger or get_logger(conf, log_route='container-sync')
        #: Path to the local device mount points.
        self.devices = conf.get('devices', '/srv/node')
        #: Indicates whether mount points should be verified as actual mount
        #: points (normally true, false for tests and SAIO).
        self.mount_check = config_true_value(conf.get('mount_check', 'true'))
        #: Minimum time between full scans. This is to keep the daemon from
        #: running wild on near empty systems.
        self.interval = int(conf.get('interval', 300))
        #: Maximum amount of time to spend syncing a container before moving on
        #: to the next one. If a container sync hasn't finished in this time,
        #: it'll just be resumed next scan.
        self.container_time = int(conf.get('container_time', 60))
        #: ContainerSyncCluster instance for validating sync-to values.
        self.realms_conf = ContainerSyncRealms(
            os.path.join(conf.get('swift_dir', '/etc/swift'),
                         'container-sync-realms.conf'), self.logger)
        #: The list of hosts we're allowed to send syncs to. This can be
        #: overridden by data in self.realms_conf
        self.allowed_sync_hosts = [
            h.strip()
            for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
            if h.strip()
        ]
        self.http_proxies = [
            a.strip() for a in conf.get('sync_proxy', '').split(',')
            if a.strip()
        ]
        #: ContainerSyncStore instance for iterating over synced containers
        self.sync_store = ContainerSyncStore(self.devices, self.logger,
                                             self.mount_check)
        #: Number of containers with sync turned on that were successfully
        #: synced.
        self.container_syncs = 0
        #: Number of successful DELETEs triggered.
        self.container_deletes = 0
        #: Number of successful PUTs triggered.
        self.container_puts = 0
        #: Number of containers whose sync has been turned off, but
        #: are not yet cleared from the sync store.
        self.container_skips = 0
        #: Number of containers that had a failure of some type.
        self.container_failures = 0

        #: Per container stats. These are collected per container.
        #: puts - the number of puts that were done for the container
        #: deletes - the number of deletes that were fot the container
        #: bytes - the total number of bytes transferred per the container
        self.container_stats = collections.defaultdict(int)
        self.container_stats.clear()

        #: Time of last stats report.
        self.reported = time()
        self.swift_dir = conf.get('swift_dir', '/etc/swift')
        #: swift.common.ring.Ring for locating containers.
        self.container_ring = container_ring or Ring(self.swift_dir,
                                                     ring_name='container')
        bind_ip = conf.get('bind_ip', '0.0.0.0')
        self._myips = whataremyips(bind_ip)
        self._myport = int(conf.get('bind_port', 6001))
        swift.common.db.DB_PREALLOCATION = \
            config_true_value(conf.get('db_preallocation', 'f'))
        self.conn_timeout = float(conf.get('conn_timeout', 5))
        request_tries = int(conf.get('request_tries') or 3)

        internal_client_conf_path = conf.get('internal_client_conf_path')
        if not internal_client_conf_path:
            self.logger.warning(
                _('Configuration option internal_client_conf_path not '
                  'defined. Using default configuration, See '
                  'internal-client.conf-sample for options'))
            internal_client_conf = ConfigString(ic_conf_body)
        else:
            internal_client_conf = internal_client_conf_path
        try:
            self.swift = InternalClient(internal_client_conf,
                                        'Swift Container Sync', request_tries)
        except IOError as err:
            if err.errno != errno.ENOENT:
                raise
            raise SystemExit(
                _('Unable to load internal client from config: %r (%s)') %
                (internal_client_conf_path, err))
예제 #37
0
 def __init__(self, app, conf):
     self.app = app
     self.conf = conf
     self.logger = get_logger(conf)
예제 #38
0
    def __init__(self, app, conf):
        """Common initialization code."""
        self._app = app
        self._logger = get_logger(
            conf, log_route=conf.get('log_name', 's3token'))
        self._logger.debug('Starting the %s component', PROTOCOL_NAME)
        self._timeout = float(conf.get('http_timeout', '10.0'))
        if not (0 < self._timeout <= 60):
            raise ValueError('http_timeout must be between 0 and 60 seconds')
        self._reseller_prefix = append_underscore(
            conf.get('reseller_prefix', 'AUTH'))
        self._delay_auth_decision = config_true_value(
            conf.get('delay_auth_decision'))

        # where to find the auth service (we use this to validate tokens)
        self._request_uri = conf.get('auth_uri', '').rstrip('/') + '/s3tokens'
        parsed = urllib.parse.urlsplit(self._request_uri)
        if not parsed.scheme or not parsed.hostname:
            raise ConfigFileError(
                'Invalid auth_uri; must include scheme and host')
        if parsed.scheme not in ('http', 'https'):
            raise ConfigFileError(
                'Invalid auth_uri; scheme must be http or https')
        if parsed.query or parsed.fragment or '@' in parsed.netloc:
            raise ConfigFileError('Invalid auth_uri; must not include '
                                  'username, query, or fragment')

        # SSL
        insecure = config_true_value(conf.get('insecure'))
        cert_file = conf.get('certfile')
        key_file = conf.get('keyfile')

        if insecure:
            self._verify = False
        elif cert_file and key_file:
            self._verify = (cert_file, key_file)
        elif cert_file:
            self._verify = cert_file
        else:
            self._verify = None

        self._secret_cache_duration = int(conf.get('secret_cache_duration', 0))
        if self._secret_cache_duration < 0:
            raise ValueError('secret_cache_duration must be non-negative')
        if self._secret_cache_duration:
            try:
                auth_plugin = keystone_loading.get_plugin_loader(
                    conf.get('auth_type', 'password'))
                available_auth_options = auth_plugin.get_options()
                auth_options = {}
                for option in available_auth_options:
                    name = option.name.replace('-', '_')
                    value = conf.get(name)
                    if value:
                        auth_options[name] = value

                auth = auth_plugin.load_from_options(**auth_options)
                session = keystone_session.Session(auth=auth)
                self.keystoneclient = keystone_client.Client(
                    session=session,
                    region_name=conf.get('region_name'))
                self._logger.info("Caching s3tokens for %s seconds",
                                  self._secret_cache_duration)
            except Exception:
                self._logger.warning("Unable to load keystone auth_plugin. "
                                     "Secret caching will be unavailable.",
                                     exc_info=True)
                self.keystoneclient = None
                self._secret_cache_duration = 0
예제 #39
0
 def __init__(self, app, conf):
     self.app = app
     self.logger = get_logger(conf, log_route='newdatatrigger')
예제 #40
0
    def __init__(self,
                 conf,
                 memcache=None,
                 logger=None,
                 account_ring=None,
                 container_ring=None):
        if conf is None:
            conf = {}
        if logger is None:
            self.logger = get_logger(conf, log_route='proxy-server')
        else:
            self.logger = logger
        self._override_options = self._load_per_policy_config(conf)
        self.sorts_by_timing = any(pc.sorting_method == 'timing'
                                   for pc in self._override_options.values())

        self._error_limiting = {}

        swift_dir = conf.get('swift_dir', '/etc/swift')
        self.swift_dir = swift_dir
        self.node_timeout = float(conf.get('node_timeout', 10))
        self.recoverable_node_timeout = float(
            conf.get('recoverable_node_timeout', self.node_timeout))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = int(conf.get('client_timeout', 60))
        self.put_queue_depth = int(conf.get('put_queue_depth', 10))
        self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
        self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
        self.trans_id_suffix = conf.get('trans_id_suffix', '')
        self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5))
        self.error_suppression_interval = \
            int(conf.get('error_suppression_interval', 60))
        self.error_suppression_limit = \
            int(conf.get('error_suppression_limit', 10))
        self.recheck_container_existence = \
            int(conf.get('recheck_container_existence',
                         DEFAULT_RECHECK_CONTAINER_EXISTENCE))
        self.recheck_account_existence = \
            int(conf.get('recheck_account_existence',
                         DEFAULT_RECHECK_ACCOUNT_EXISTENCE))
        self.allow_account_management = \
            config_true_value(conf.get('allow_account_management', 'no'))
        self.container_ring = container_ring or Ring(swift_dir,
                                                     ring_name='container')
        self.account_ring = account_ring or Ring(swift_dir,
                                                 ring_name='account')
        # ensure rings are loaded for all configured storage policies
        for policy in POLICIES:
            policy.load_ring(swift_dir)
        self.obj_controller_router = ObjectControllerRouter()
        self.memcache = memcache
        mimetypes.init(mimetypes.knownfiles +
                       [os.path.join(swift_dir, 'mime.types')])
        self.account_autocreate = \
            config_true_value(conf.get('account_autocreate', 'no'))
        self.auto_create_account_prefix = (
            conf.get('auto_create_account_prefix') or '.')
        self.expiring_objects_account = self.auto_create_account_prefix + \
            (conf.get('expiring_objects_account_name') or 'expiring_objects')
        self.expiring_objects_container_divisor = \
            int(conf.get('expiring_objects_container_divisor') or 86400)
        self.max_containers_per_account = \
            int(conf.get('max_containers_per_account') or 0)
        self.max_containers_whitelist = [
            a.strip()
            for a in conf.get('max_containers_whitelist', '').split(',')
            if a.strip()
        ]
        self.deny_host_headers = [
            host.strip()
            for host in conf.get('deny_host_headers', '').split(',')
            if host.strip()
        ]
        self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
        self.cors_allow_origin = [
            a.strip() for a in conf.get('cors_allow_origin', '').split(',')
            if a.strip()
        ]
        self.cors_expose_headers = [
            a.strip() for a in conf.get('cors_expose_headers', '').split(',')
            if a.strip()
        ]
        self.strict_cors_mode = config_true_value(
            conf.get('strict_cors_mode', 't'))
        self.node_timings = {}
        self.timing_expiry = int(conf.get('timing_expiry', 300))
        self.concurrent_gets = config_true_value(conf.get('concurrent_gets'))
        self.concurrency_timeout = float(
            conf.get('concurrency_timeout', self.conn_timeout))
        value = conf.get('request_node_count', '2 * replicas').lower().split()
        if len(value) == 1:
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value
        elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
            rnc_value = int(value[0])
            self.request_node_count = lambda replicas: rnc_value * replicas
        else:
            raise ValueError('Invalid request_node_count value: %r' %
                             ''.join(value))
        # swift_owner_headers are stripped by the account and container
        # controllers; we should extend header stripping to object controller
        # when a privileged object header is implemented.
        swift_owner_headers = conf.get(
            'swift_owner_headers', 'x-container-read, x-container-write, '
            'x-container-sync-key, x-container-sync-to, '
            'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
            'x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, '
            'x-account-access-control')
        self.swift_owner_headers = [
            name.strip().title() for name in swift_owner_headers.split(',')
            if name.strip()
        ]
        # Initialization was successful, so now apply the client chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because it affects the client as well, currently, we use the
        # client chunk size as the govenor and not the object chunk size.
        if sys.version_info < (3, ):
            socket._fileobject.default_bufsize = self.client_chunk_size
        # TODO: find a way to enable similar functionality in py3

        self.expose_info = config_true_value(conf.get('expose_info', 'yes'))
        self.disallowed_sections = list_from_csv(
            conf.get('disallowed_sections', 'swift.valid_api_versions'))
        self.admin_key = conf.get('admin_key', None)
        register_swift_info(
            version=swift_version,
            strict_cors_mode=self.strict_cors_mode,
            policies=POLICIES.get_policy_info(),
            allow_account_management=self.allow_account_management,
            account_autocreate=self.account_autocreate,
            **constraints.EFFECTIVE_CONSTRAINTS)
예제 #41
0
 def __init__(self, conf):
     self.logger = get_logger(conf)
예제 #42
0
 def __init__(self, app, conf):
     self._app = app
     self.logger = get_logger(conf, log_route='webhook')
     self._notifier = LoggingNotifier(self.logger, conf)
 def __init__(self, conf, logger=None):
     super(FakeOPTIONS, self).__init__(conf)
     self.logger = logger or get_logger(conf, log_route='test-server')
예제 #44
0
def in_process_setup(the_object_server=object_server):
    _info('IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS')
    _info('Using object_server class: %s' % the_object_server.__name__)
    conf_src_dir = os.environ.get('SWIFT_TEST_IN_PROCESS_CONF_DIR')
    show_debug_logs = os.environ.get('SWIFT_TEST_DEBUG_LOGS')

    if conf_src_dir is not None:
        if not os.path.isdir(conf_src_dir):
            msg = 'Config source %s is not a dir' % conf_src_dir
            raise InProcessException(msg)
        _info('Using config source dir: %s' % conf_src_dir)

    # If SWIFT_TEST_IN_PROCESS_CONF specifies a config source dir then
    # prefer config files from there, otherwise read config from source tree
    # sample files. A mixture of files from the two sources is allowed.
    proxy_conf = _in_process_find_conf_file(conf_src_dir, 'proxy-server.conf')
    _info('Using proxy config from %s' % proxy_conf)
    swift_conf_src = _in_process_find_conf_file(conf_src_dir, 'swift.conf')
    _info('Using swift config from %s' % swift_conf_src)

    monkey_patch_mimetools()

    global _testdir
    _testdir = os.path.join(mkdtemp(), 'tmp_functional')
    utils.mkdirs(_testdir)
    rmtree(_testdir)
    utils.mkdirs(os.path.join(_testdir, 'sda1'))
    utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
    utils.mkdirs(os.path.join(_testdir, 'sdb1'))
    utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))

    swift_conf = _in_process_setup_swift_conf(swift_conf_src, _testdir)
    obj_sockets = _in_process_setup_ring(swift_conf, conf_src_dir, _testdir)

    global orig_swift_conf_name
    orig_swift_conf_name = utils.SWIFT_CONF_FILE
    utils.SWIFT_CONF_FILE = swift_conf
    constraints.reload_constraints()
    storage_policy.SWIFT_CONF_FILE = swift_conf
    storage_policy.reload_storage_policies()
    global config
    if constraints.SWIFT_CONSTRAINTS_LOADED:
        # Use the swift constraints that are loaded for the test framework
        # configuration
        _c = dict(
            (k, str(v)) for k, v in constraints.EFFECTIVE_CONSTRAINTS.items())
        config.update(_c)
    else:
        # In-process swift constraints were not loaded, somethings wrong
        raise SkipTest
    global orig_hash_path_suff_pref
    orig_hash_path_suff_pref = utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX
    utils.validate_hash_conf()

    global _test_socks
    _test_socks = []
    # We create the proxy server listening socket to get its port number so
    # that we can add it as the "auth_port" value for the functional test
    # clients.
    prolis = eventlet.listen(('localhost', 0))
    _test_socks.append(prolis)

    # The following set of configuration values is used both for the
    # functional test frame work and for the various proxy, account, container
    # and object servers.
    config.update({
        # Values needed by the various in-process swift servers
        'devices': _testdir,
        'swift_dir': _testdir,
        'mount_check': 'false',
        'client_timeout': '4',
        'allow_account_management': 'true',
        'account_autocreate': 'true',
        'allow_versions': 'True',
        # Below are values used by the functional test framework, as well as
        # by the various in-process swift servers
        'auth_host': '127.0.0.1',
        'auth_port': str(prolis.getsockname()[1]),
        'auth_ssl': 'no',
        'auth_prefix': '/auth/',
        # Primary functional test account (needs admin access to the
        # account)
        'account': 'test',
        'username': '******',
        'password': '******',
        # User on a second account (needs admin access to the account)
        'account2': 'test2',
        'username2': 'tester2',
        'password2': 'testing2',
        # User on same account as first, but without admin access
        'username3': 'tester3',
        'password3': 'testing3',
        # Service user and prefix (emulates glance, cinder, etc. user)
        'account5': 'test5',
        'username5': 'tester5',
        'password5': 'testing5',
        'service_prefix': 'SERVICE',
        # For tempauth middleware. Update reseller_prefix
        'reseller_prefix': 'AUTH, SERVICE',
        'SERVICE_require_group': 'service',
        # Reseller admin user (needs reseller_admin_role)
        'account6': 'test6',
        'username6': 'tester6',
        'password6': 'testing6'
    })

    # If an env var explicitly specifies the proxy-server object_post_as_copy
    # option then use its value, otherwise leave default config unchanged.
    object_post_as_copy = os.environ.get(
        'SWIFT_TEST_IN_PROCESS_OBJECT_POST_AS_COPY')
    if object_post_as_copy is not None:
        object_post_as_copy = config_true_value(object_post_as_copy)
        config['object_post_as_copy'] = str(object_post_as_copy)
        _debug('Setting object_post_as_copy to %r' % object_post_as_copy)

    acc1lis = eventlet.listen(('localhost', 0))
    acc2lis = eventlet.listen(('localhost', 0))
    con1lis = eventlet.listen(('localhost', 0))
    con2lis = eventlet.listen(('localhost', 0))
    _test_socks += [acc1lis, acc2lis, con1lis, con2lis] + obj_sockets

    account_ring_path = os.path.join(_testdir, 'account.ring.gz')
    with closing(GzipFile(account_ring_path, 'wb')) as f:
        pickle.dump(
            ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
                          [{
                              'id': 0,
                              'zone': 0,
                              'device': 'sda1',
                              'ip': '127.0.0.1',
                              'port': acc1lis.getsockname()[1]
                          }, {
                              'id': 1,
                              'zone': 1,
                              'device': 'sdb1',
                              'ip': '127.0.0.1',
                              'port': acc2lis.getsockname()[1]
                          }], 30), f)
    container_ring_path = os.path.join(_testdir, 'container.ring.gz')
    with closing(GzipFile(container_ring_path, 'wb')) as f:
        pickle.dump(
            ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
                          [{
                              'id': 0,
                              'zone': 0,
                              'device': 'sda1',
                              'ip': '127.0.0.1',
                              'port': con1lis.getsockname()[1]
                          }, {
                              'id': 1,
                              'zone': 1,
                              'device': 'sdb1',
                              'ip': '127.0.0.1',
                              'port': con2lis.getsockname()[1]
                          }], 30), f)

    eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
    # Turn off logging requests by the underlying WSGI software.
    eventlet.wsgi.HttpProtocol.log_request = lambda *a: None
    logger = utils.get_logger(config, 'wsgi-server', log_route='wsgi')
    # Redirect logging other messages by the underlying WSGI software.
    eventlet.wsgi.HttpProtocol.log_message = \
        lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a)
    # Default to only 4 seconds for in-process functional test runs
    eventlet.wsgi.WRITE_TIMEOUT = 4

    def get_logger_name(name):
        if show_debug_logs:
            return debug_logger(name)
        else:
            return None

    acc1srv = account_server.AccountController(config,
                                               logger=get_logger_name('acct1'))
    acc2srv = account_server.AccountController(config,
                                               logger=get_logger_name('acct2'))
    con1srv = container_server.ContainerController(
        config, logger=get_logger_name('cont1'))
    con2srv = container_server.ContainerController(
        config, logger=get_logger_name('cont2'))

    objsrvs = [(obj_sockets[index],
                the_object_server.ObjectController(config,
                                                   logger=get_logger_name(
                                                       'obj%d' % (index + 1))))
               for index in range(len(obj_sockets))]

    if show_debug_logs:
        logger = debug_logger('proxy')

    def get_logger(name, *args, **kwargs):
        return logger

    with mock.patch('swift.common.utils.get_logger', get_logger):
        with mock.patch('swift.common.middleware.memcache.MemcacheMiddleware',
                        FakeMemcacheMiddleware):
            try:
                app = loadapp(proxy_conf, global_conf=config)
            except Exception as e:
                raise InProcessException(e)

    nl = utils.NullLogger()
    global proxy_srv
    proxy_srv = prolis
    prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl)
    acc1spa = eventlet.spawn(eventlet.wsgi.server, acc1lis, acc1srv, nl)
    acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl)
    con1spa = eventlet.spawn(eventlet.wsgi.server, con1lis, con1srv, nl)
    con2spa = eventlet.spawn(eventlet.wsgi.server, con2lis, con2srv, nl)

    objspa = [
        eventlet.spawn(eventlet.wsgi.server, objsrv[0], objsrv[1], nl)
        for objsrv in objsrvs
    ]

    global _test_coros
    _test_coros = \
        (prospa, acc1spa, acc2spa, con1spa, con2spa) + tuple(objspa)

    # Create accounts "test" and "test2"
    def create_account(act):
        ts = utils.normalize_timestamp(time())
        account_ring = Ring(_testdir, ring_name='account')
        partition, nodes = account_ring.get_nodes(act)
        for node in nodes:
            # Note: we are just using the http_connect method in the object
            # controller here to talk to the account server nodes.
            conn = swift.proxy.controllers.obj.http_connect(
                node['ip'], node['port'], node['device'], partition, 'PUT',
                '/' + act, {
                    'X-Timestamp': ts,
                    'x-trans-id': act
                })
            resp = conn.getresponse()
            assert (resp.status == 201)

    create_account('AUTH_test')
    create_account('AUTH_test2')
예제 #45
0
 def __init__(self, app, conf):
     self.app = app
     self.logger = get_logger(conf, log_route='catch-errors')
     self.trans_id_suffix = conf.get('trans_id_suffix', '')
예제 #46
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = utils.get_logger(None, 'server', log_route='server')
     MyDaemon.forever_called = False
     MyDaemon.once_called = False
예제 #47
0
 def __init__(self, app, conf):
     self.app = app
     self.logger = get_logger(conf, log_route='catch-errors')
예제 #48
0
    def __init__(self, conf, logger=None):
        """
        Creates a new WSGI application for the Swift Object Server. An
        example configuration is given at
        <source-dir>/etc/object-server.conf-sample or
        /etc/swift/object-server.conf-sample.
        """
        self.logger = logger or get_logger(conf, log_route='object-server')
        self.node_timeout = int(conf.get('node_timeout', 3))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = int(conf.get('client_timeout', 60))
        self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
        self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
        self.log_requests = config_true_value(conf.get('log_requests', 'true'))
        self.max_upload_time = int(conf.get('max_upload_time', 86400))
        self.slow = int(conf.get('slow', 0))
        self.keep_cache_private = \
            config_true_value(conf.get('keep_cache_private', 'false'))
        replication_server = conf.get('replication_server', None)
        if replication_server is not None:
            replication_server = config_true_value(replication_server)
        self.replication_server = replication_server

        default_allowed_headers = '''
            content-disposition,
            content-encoding,
            x-delete-at,
            x-object-manifest,
            x-static-large-object,
        '''
        extra_allowed_headers = [
            header.strip().lower() for header in conf.get(
                'allowed_headers', default_allowed_headers).split(',')
            if header.strip()
        ]
        self.allowed_headers = set()
        for header in extra_allowed_headers:
            if header not in DATAFILE_SYSTEM_META:
                self.allowed_headers.add(header)
        self.expiring_objects_account = \
            (conf.get('auto_create_account_prefix') or '.') + \
            'expiring_objects'
        self.expiring_objects_container_divisor = \
            int(conf.get('expiring_objects_container_divisor') or 86400)
        # Initialization was successful, so now apply the network chunk size
        # parameter as the default read / write buffer size for the network
        # sockets.
        #
        # NOTE WELL: This is a class setting, so until we get set this on a
        # per-connection basis, this affects reading and writing on ALL
        # sockets, those between the proxy servers and external clients, and
        # those between the proxy servers and the other internal servers.
        #
        # ** Because the primary motivation for this is to optimize how data
        # is written back to the proxy server, we could use the value from the
        # disk_chunk_size parameter. However, it affects all created sockets
        # using this class so we have chosen to tie it to the
        # network_chunk_size parameter value instead.
        socket._fileobject.default_bufsize = self.network_chunk_size

        # Provide further setup sepecific to an object server implemenation.
        self.setup(conf)
예제 #49
0
 def __init__(self, app, conf):
     self.app = app
     self.logger = get_logger(conf, log_route="decrypter")
     self.crypto = Crypto(conf)
예제 #50
0
 def __init__(self, app, conf):
     self.app = app
     self.conf = conf
     self.logger = get_logger(conf, log_route='versioned_writes')
예제 #51
0
파일: daemon.py 프로젝트: yantingHu91/swift
 def __init__(self, conf):
     self.conf = conf
     self.logger = utils.get_logger(conf, log_route='daemon')
예제 #52
0
파일: server.py 프로젝트: zhoubing00/swift
    def __init__(self,
                 conf,
                 memcache=None,
                 logger=None,
                 account_ring=None,
                 container_ring=None,
                 object_ring=None):
        if conf is None:
            conf = {}
        if logger is None:
            self.logger = get_logger(conf, log_route='proxy-server')
        else:
            self.logger = logger

        swift_dir = conf.get('swift_dir', '/etc/swift')
        self.node_timeout = int(conf.get('node_timeout', 10))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.client_timeout = int(conf.get('client_timeout', 60))
        self.put_queue_depth = int(conf.get('put_queue_depth', 10))
        self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
        self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
        self.error_suppression_interval = \
            int(conf.get('error_suppression_interval', 60))
        self.error_suppression_limit = \
            int(conf.get('error_suppression_limit', 10))
        self.recheck_container_existence = \
            int(conf.get('recheck_container_existence', 60))
        self.recheck_account_existence = \
            int(conf.get('recheck_account_existence', 60))
        self.allow_account_management = \
            conf.get('allow_account_management', 'no').lower() in TRUE_VALUES
        self.object_post_as_copy = \
            conf.get('object_post_as_copy', 'true').lower() in TRUE_VALUES
        self.resellers_conf = ConfigParser()
        self.resellers_conf.read(os.path.join(swift_dir, 'resellers.conf'))
        self.object_ring = object_ring or Ring(swift_dir, ring_name='object')
        self.container_ring = container_ring or Ring(swift_dir,
                                                     ring_name='container')
        self.account_ring = account_ring or Ring(swift_dir,
                                                 ring_name='account')
        self.memcache = memcache
        mimetypes.init(mimetypes.knownfiles +
                       [os.path.join(swift_dir, 'mime.types')])
        self.account_autocreate = \
            conf.get('account_autocreate', 'no').lower() in TRUE_VALUES
        self.expiring_objects_account = \
            (conf.get('auto_create_account_prefix') or '.') + \
            'expiring_objects'
        self.expiring_objects_container_divisor = \
            int(conf.get('expiring_objects_container_divisor') or 86400)
        self.max_containers_per_account = \
            int(conf.get('max_containers_per_account') or 0)
        self.max_containers_whitelist = [
            a.strip()
            for a in conf.get('max_containers_whitelist', '').split(',')
            if a.strip()
        ]
        self.deny_host_headers = [
            host.strip()
            for host in conf.get('deny_host_headers', '').split(',')
            if host.strip()
        ]
        self.rate_limit_after_segment = \
            int(conf.get('rate_limit_after_segment', 10))
        self.rate_limit_segments_per_sec = \
            int(conf.get('rate_limit_segments_per_sec', 1))
        self.log_handoffs = \
            conf.get('log_handoffs', 'true').lower() in TRUE_VALUES
예제 #53
0
 def __init__(self, app, conf):
     self.app = app
     self.conf = conf
     self.logger = get_logger(conf, log_route='object_versioning')
예제 #54
0
 def __init__(self, conf, logger=None):
     """
     :param conf: configuration object obtained from ConfigParser
     :param logger: logging object
     """
     self.conf = conf
     self.logger = logger or get_logger(conf, log_route='object-replicator')
     self.devices_dir = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.bind_ip = conf.get('bind_ip', '0.0.0.0')
     self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
     self.port = None if self.servers_per_port else \
         int(conf.get('bind_port', 6000))
     self.concurrency = int(conf.get('concurrency', 1))
     self.stats_interval = int(conf.get('stats_interval', '300'))
     self.ring_check_interval = int(conf.get('ring_check_interval', 15))
     self.next_check = time.time() + self.ring_check_interval
     self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
     self.partition_times = []
     self.interval = int(
         conf.get('interval') or conf.get('run_pause') or 30)
     self.rsync_timeout = int(conf.get('rsync_timeout', 900))
     self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
     self.rsync_bwlimit = conf.get('rsync_bwlimit', '0')
     self.rsync_compress = config_true_value(
         conf.get('rsync_compress', 'no'))
     self.rsync_module = conf.get('rsync_module', '').rstrip('/')
     if not self.rsync_module:
         self.rsync_module = '{replication_ip}::object'
         if config_true_value(conf.get('vm_test_mode', 'no')):
             self.logger.warn('Option object-replicator/vm_test_mode is '
                              'deprecated and will be removed in a future '
                              'version. Update your configuration to use '
                              'option object-replicator/rsync_module.')
             self.rsync_module += '{replication_port}'
     self.http_timeout = int(conf.get('http_timeout', 60))
     self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = os.path.join(self.recon_cache_path, "object.recon")
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.node_timeout = float(conf.get('node_timeout', 10))
     self.sync_method = getattr(self, conf.get('sync_method') or 'rsync')
     self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
     self.default_headers = {
         'Content-Length': '0',
         'user-agent': 'object-replicator %s' % os.getpid()
     }
     self.rsync_error_log_line_length = \
         int(conf.get('rsync_error_log_line_length', 0))
     self.handoffs_first = config_true_value(
         conf.get('handoffs_first', False))
     self.handoff_delete = config_auto_int_value(
         conf.get('handoff_delete', 'auto'), 0)
     if any((self.handoff_delete, self.handoffs_first)):
         self.logger.warn('Handoff only mode is not intended for normal '
                          'operation, please disable handoffs_first and '
                          'handoff_delete before the next '
                          'normal rebalance')
     self._diskfile_mgr = DiskFileManager(conf, self.logger)
예제 #55
0
파일: wsgi.py 프로젝트: harkhuang/swift
def get_socket(conf):
    """Bind socket to bind ip:port in conf

    :param conf: Configuration dict to read settings from

    :returns: a socket object as returned from socket.listen or
              ssl.wrap_socket if conf specifies cert_file
    """
    try:
        bind_port = int(conf['bind_port'])
    except (ValueError, KeyError, TypeError):
        raise ConfigFilePortError()
    bind_addr = (conf.get('bind_ip', '0.0.0.0'), bind_port)
    address_family = [
        addr[0] for addr in socket.getaddrinfo(
            bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
        if addr[0] in (socket.AF_INET, socket.AF_INET6)
    ][0]
    sock = None
    bind_timeout = int(conf.get('bind_timeout', 30))
    retry_until = time.time() + bind_timeout
    warn_ssl = False

    try:
        keepidle = int(conf.get('keep_idle', 600))
        if keepidle <= 0 or keepidle >= 2**15 - 1:
            raise ValueError()
    except (ValueError, KeyError, TypeError):
        raise ConfigFileError()

    while not sock and time.time() < retry_until:
        try:
            sock = listen(bind_addr,
                          backlog=int(conf.get('backlog', 4096)),
                          family=address_family)
            if 'cert_file' in conf:
                warn_ssl = True
                sock = ssl.wrap_socket(sock,
                                       certfile=conf['cert_file'],
                                       keyfile=conf['key_file'])
        except socket.error as err:
            if err.args[0] != errno.EADDRINUSE:
                raise
            sleep(0.1)
    if not sock:
        raise Exception(
            _('Could not bind to %(addr)s:%(port)s '
              'after trying for %(timeout)s seconds') % {
                  'addr': bind_addr[0],
                  'port': bind_addr[1],
                  'timeout': bind_timeout
              })
    # in my experience, sockets can hang around forever without keepalive
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
    sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
    if hasattr(socket, 'TCP_KEEPIDLE'):
        sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, keepidle)
    if warn_ssl:
        ssl_warning_message = _('WARNING: SSL should only be enabled for '
                                'testing purposes. Use external SSL '
                                'termination for a production deployment.')
        get_logger(conf).warning(ssl_warning_message)
        print(ssl_warning_message)
    return sock
예제 #56
0
 def __init__(self, app, conf):
     self.app = app
     self.logger = get_logger(conf, log_route='constraints')
     self.policies = conf.get('policies', '')
예제 #57
0
def in_process_setup(the_object_server=object_server):
    _info('IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS')
    _info('Using object_server class: %s' % the_object_server.__name__)
    conf_src_dir = os.environ.get('SWIFT_TEST_IN_PROCESS_CONF_DIR')
    show_debug_logs = os.environ.get('SWIFT_TEST_DEBUG_LOGS')

    if conf_src_dir is not None:
        if not os.path.isdir(conf_src_dir):
            msg = 'Config source %s is not a dir' % conf_src_dir
            raise InProcessException(msg)
        _info('Using config source dir: %s' % conf_src_dir)

    # If SWIFT_TEST_IN_PROCESS_CONF specifies a config source dir then
    # prefer config files from there, otherwise read config from source tree
    # sample files. A mixture of files from the two sources is allowed.
    proxy_conf = _in_process_find_conf_file(conf_src_dir, 'proxy-server.conf')
    _info('Using proxy config from %s' % proxy_conf)
    swift_conf_src = _in_process_find_conf_file(conf_src_dir, 'swift.conf')
    _info('Using swift config from %s' % swift_conf_src)

    monkey_patch_mimetools()

    global _testdir
    _testdir = os.path.join(mkdtemp(), 'tmp_functional')
    utils.mkdirs(_testdir)
    rmtree(_testdir)
    utils.mkdirs(os.path.join(_testdir, 'sda1'))
    utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
    utils.mkdirs(os.path.join(_testdir, 'sdb1'))
    utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
    utils.mkdirs(os.path.join(_testdir, 'sdc1'))
    utils.mkdirs(os.path.join(_testdir, 'sdc1', 'tmp'))

    swift_conf = _in_process_setup_swift_conf(swift_conf_src, _testdir)
    _info('prepared swift.conf: %s' % swift_conf)

    # Call the associated method for the value of
    # 'SWIFT_TEST_IN_PROCESS_CONF_LOADER', if one exists
    conf_loader_label = os.environ.get('SWIFT_TEST_IN_PROCESS_CONF_LOADER')
    if conf_loader_label is not None:
        try:
            conf_loader = conf_loaders[conf_loader_label]
            _debug('Calling method %s mapped to conf loader %s' %
                   (conf_loader.__name__, conf_loader_label))
        except KeyError as missing_key:
            raise InProcessException('No function mapped for conf loader %s' %
                                     missing_key)

        try:
            # Pass-in proxy_conf, swift_conf files
            proxy_conf, swift_conf = conf_loader(proxy_conf, swift_conf)
            _debug('Now using proxy conf %s' % proxy_conf)
            _debug('Now using swift conf %s' % swift_conf)
        except Exception as err:  # noqa
            raise InProcessException(err)

    obj_sockets = _in_process_setup_ring(swift_conf, conf_src_dir, _testdir)

    # load new swift.conf file
    if set_swift_dir(os.path.dirname(swift_conf)):
        constraints.reload_constraints()
        storage_policy.reload_storage_policies()

    global config
    config['__file__'] = 'in_process_setup()'
    if constraints.SWIFT_CONSTRAINTS_LOADED:
        # Use the swift constraints that are loaded for the test framework
        # configuration
        _c = dict(
            (k, str(v)) for k, v in constraints.EFFECTIVE_CONSTRAINTS.items())
        config.update(_c)
    else:
        # In-process swift constraints were not loaded, somethings wrong
        raise SkipTest

    global _test_socks
    _test_socks = []
    # We create the proxy server listening socket to get its port number so
    # that we can add it as the "auth_port" value for the functional test
    # clients.
    prolis = listen_zero()
    _test_socks.append(prolis)

    # The following set of configuration values is used both for the
    # functional test frame work and for the various proxy, account, container
    # and object servers.
    config.update({
        # Values needed by the various in-process swift servers
        'devices':
        _testdir,
        'swift_dir':
        _testdir,
        'mount_check':
        'false',
        'client_timeout':
        '4',
        'allow_account_management':
        'true',
        'account_autocreate':
        'true',
        'allow_versions':
        'True',
        'allow_versioned_writes':
        'True',
        # TODO: move this into s3api config loader because they are
        #       required by only s3api
        'allowed_headers':
        "Content-Disposition, Content-Encoding, X-Delete-At, "
        "X-Object-Manifest, X-Static-Large-Object, Cache-Control, "
        "Content-Language, Expires, X-Robots-Tag",
        # Below are values used by the functional test framework, as well as
        # by the various in-process swift servers
        'auth_host':
        '127.0.0.1',
        'auth_port':
        str(prolis.getsockname()[1]),
        'auth_ssl':
        'no',
        'auth_prefix':
        '/auth/',
        # Primary functional test account (needs admin access to the
        # account)
        'account':
        'test',
        'username':
        '******',
        'password':
        '******',
        's3_access_key':
        'test:tester',
        's3_secret_key':
        'testing',
        # Secondary user of the primary test account (needs admin access
        # to the account) for s3api
        's3_access_key2':
        'test:tester2',
        's3_secret_key2':
        'testing2',
        # User on a second account (needs admin access to the account)
        'account2':
        'test2',
        'username2':
        'tester2',
        'password2':
        'testing2',
        # User on same account as first, but without admin access
        'username3':
        'tester3',
        'password3':
        'testing3',
        's3_access_key3':
        'test:tester3',
        's3_secret_key3':
        'testing3',
        # Service user and prefix (emulates glance, cinder, etc. user)
        'account5':
        'test5',
        'username5':
        'tester5',
        'password5':
        'testing5',
        'service_prefix':
        'SERVICE',
        # For tempauth middleware. Update reseller_prefix
        'reseller_prefix':
        'AUTH, SERVICE',
        'SERVICE_require_group':
        'service',
        # Reseller admin user (needs reseller_admin_role)
        'account6':
        'test6',
        'username6':
        'tester6',
        'password6':
        'testing6'
    })

    acc1lis = listen_zero()
    acc2lis = listen_zero()
    con1lis = listen_zero()
    con2lis = listen_zero()
    _test_socks += [acc1lis, acc2lis, con1lis, con2lis] + obj_sockets

    account_ring_path = os.path.join(_testdir, 'account.ring.gz')
    with closing(GzipFile(account_ring_path, 'wb')) as f:
        pickle.dump(
            ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
                          [{
                              'id': 0,
                              'zone': 0,
                              'device': 'sda1',
                              'ip': '127.0.0.1',
                              'port': acc1lis.getsockname()[1]
                          }, {
                              'id': 1,
                              'zone': 1,
                              'device': 'sdb1',
                              'ip': '127.0.0.1',
                              'port': acc2lis.getsockname()[1]
                          }], 30), f)
    container_ring_path = os.path.join(_testdir, 'container.ring.gz')
    with closing(GzipFile(container_ring_path, 'wb')) as f:
        pickle.dump(
            ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
                          [{
                              'id': 0,
                              'zone': 0,
                              'device': 'sda1',
                              'ip': '127.0.0.1',
                              'port': con1lis.getsockname()[1]
                          }, {
                              'id': 1,
                              'zone': 1,
                              'device': 'sdb1',
                              'ip': '127.0.0.1',
                              'port': con2lis.getsockname()[1]
                          }], 30), f)

    # Default to only 4 seconds for in-process functional test runs
    eventlet.wsgi.WRITE_TIMEOUT = 4

    def get_logger_name(name):
        if show_debug_logs:
            return debug_logger(name)
        else:
            return None

    acc1srv = account_server.AccountController(config,
                                               logger=get_logger_name('acct1'))
    acc2srv = account_server.AccountController(config,
                                               logger=get_logger_name('acct2'))
    con1srv = container_server.ContainerController(
        config, logger=get_logger_name('cont1'))
    con2srv = container_server.ContainerController(
        config, logger=get_logger_name('cont2'))

    objsrvs = [(obj_sockets[index],
                the_object_server.ObjectController(config,
                                                   logger=get_logger_name(
                                                       'obj%d' % (index + 1))))
               for index in range(len(obj_sockets))]

    if show_debug_logs:
        logger = get_logger_name('proxy')
    else:
        logger = utils.get_logger(config, 'wsgi-server', log_route='wsgi')

    def get_logger(name, *args, **kwargs):
        return logger

    with mock.patch('swift.common.utils.get_logger', get_logger):
        with mock.patch('swift.common.middleware.memcache.MemcacheMiddleware',
                        FakeMemcacheMiddleware):
            try:
                app = loadapp(proxy_conf, global_conf=config)
            except Exception as e:
                raise InProcessException(e)

    nl = utils.NullLogger()
    global proxy_srv
    proxy_srv = prolis
    prospa = eventlet.spawn(eventlet.wsgi.server,
                            prolis,
                            app,
                            nl,
                            protocol=SwiftHttpProtocol)
    acc1spa = eventlet.spawn(eventlet.wsgi.server,
                             acc1lis,
                             acc1srv,
                             nl,
                             protocol=SwiftHttpProtocol)
    acc2spa = eventlet.spawn(eventlet.wsgi.server,
                             acc2lis,
                             acc2srv,
                             nl,
                             protocol=SwiftHttpProtocol)
    con1spa = eventlet.spawn(eventlet.wsgi.server,
                             con1lis,
                             con1srv,
                             nl,
                             protocol=SwiftHttpProtocol)
    con2spa = eventlet.spawn(eventlet.wsgi.server,
                             con2lis,
                             con2srv,
                             nl,
                             protocol=SwiftHttpProtocol)

    objspa = [
        eventlet.spawn(eventlet.wsgi.server,
                       objsrv[0],
                       objsrv[1],
                       nl,
                       protocol=SwiftHttpProtocol) for objsrv in objsrvs
    ]

    global _test_coros
    _test_coros = \
        (prospa, acc1spa, acc2spa, con1spa, con2spa) + tuple(objspa)

    # Create accounts "test" and "test2"
    def create_account(act):
        ts = utils.normalize_timestamp(time())
        account_ring = Ring(_testdir, ring_name='account')
        partition, nodes = account_ring.get_nodes(act)
        for node in nodes:
            # Note: we are just using the http_connect method in the object
            # controller here to talk to the account server nodes.
            conn = swift.proxy.controllers.obj.http_connect(
                node['ip'], node['port'], node['device'], partition, 'PUT',
                '/' + act, {
                    'X-Timestamp': ts,
                    'x-trans-id': act
                })
            resp = conn.getresponse()
            assert resp.status == 201, 'Unable to create account: %s\n%s' % (
                resp.status, resp.body)

    create_account('AUTH_test')
    create_account('AUTH_test2')
예제 #58
0
 def __init__(self, app, conf, symloop_max):
     self.app = app
     self.conf = conf
     self.logger = get_logger(self.conf, log_route='symlink')
     self.symloop_max = symloop_max
예제 #59
0
 def __init__(self, conf, **options):
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-auditor')
     self.conf_zero_byte_fps = int(
         conf.get('zero_byte_files_per_second', 50))
예제 #60
0
 def __init__(self, conf=None):
     self.logger = get_logger(conf, log_route="crypto")
     # memoize backend to avoid repeated iteration over entry points
     self.backend = default_backend()