示例#1
0
 def test_get_logger_console(self):
     reset_loggers()
     logger = utils.get_logger(None)
     console_handlers = [
         h for h in logger.logger.handlers
         if isinstance(h, logging.StreamHandler)
     ]
     self.assertFalse(console_handlers)
     logger = utils.get_logger(None, log_to_console=True)
     console_handlers = [
         h for h in logger.logger.handlers
         if isinstance(h, logging.StreamHandler)
     ]
     self.assert_(console_handlers)
     # make sure you can't have two console handlers
     self.assertEquals(len(console_handlers), 1)
     old_handler = console_handlers[0]
     logger = utils.get_logger(None, log_to_console=True)
     console_handlers = [
         h for h in logger.logger.handlers
         if isinstance(h, logging.StreamHandler)
     ]
     self.assertEquals(len(console_handlers), 1)
     new_handler = console_handlers[0]
     self.assertNotEquals(new_handler, old_handler)
     reset_loggers()
示例#2
0
 def __init__(self, app, conf):
     #: The next WSGI application/filter in the paste.deploy pipeline.
     self.app = app
     #: The filter configuration dict.
     self.conf = conf
     #: The seconds to cache the x-container-meta-web-* headers.,
     self.cache_timeout = int(conf.get('cache_timeout', 300))
     #: Logger for this filter.
     self.logger = get_logger(conf, log_route='staticweb')
     access_log_conf = {}
     for key in ('log_facility', 'log_name', 'log_level'):
         value = conf.get('access_' + key, conf.get(key, None))
         if value:
             access_log_conf[key] = value
     #: Web access logger for this filter.
     self.access_logger = get_logger(access_log_conf,
                                     log_route='staticweb-access')
     #: Indicates whether full HTTP headers should be logged or not.
     self.log_headers = conf.get('log_headers') == 'True'
     # Results from the last call to self._start_response.
     self._response_status = None
     self._response_headers = None
     self._response_exc_info = None
     # Results from the last call to self._get_container_info.
     self._index = self._error = self._listings = self._listings_css = None
示例#3
0
 def __init__(self, app, conf):
     #: The next WSGI application/filter in the paste.deploy pipeline.
     self.app = app
     #: The filter configuration dict.
     self.conf = conf
     #: The seconds to cache the x-container-meta-web-* headers.,
     self.cache_timeout = int(conf.get('cache_timeout', 300))
     #: Logger for this filter.
     self.logger = get_logger(conf, log_route='staticweb')
     access_log_conf = {}
     for key in ('log_facility', 'log_name', 'log_level'):
         value = conf.get('access_' + key, conf.get(key, None))
         if value:
             access_log_conf[key] = value
     #: Web access logger for this filter.
     self.access_logger = get_logger(access_log_conf,
                                     log_route='staticweb-access')
     #: Indicates whether full HTTP headers should be logged or not.
     self.log_headers = conf.get('log_headers') == 'True'
     # Results from the last call to self._start_response.
     self._response_status = None
     self._response_headers = None
     self._response_exc_info = None
     # Results from the last call to self._get_container_info.
     self._index = self._error = self._listings = self._listings_css = None
示例#4
0
 def test_get_logger(self):
     sio = StringIO()
     logger = logging.getLogger('server')
     logger.addHandler(logging.StreamHandler(sio))
     logger = utils.get_logger(None, 'server', log_route='server')
     logger.warn('test1')
     self.assertEquals(sio.getvalue(), 'test1\n')
     logger.debug('test2')
     self.assertEquals(sio.getvalue(), 'test1\n')
     logger = utils.get_logger({'log_level': 'DEBUG'},
                               'server',
                               log_route='server')
     logger.debug('test3')
     self.assertEquals(sio.getvalue(), 'test1\ntest3\n')
     # Doesn't really test that the log facility is truly being used all the
     # way to syslog; but exercises the code.
     logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'},
                               'server',
                               log_route='server')
     logger.warn('test4')
     self.assertEquals(sio.getvalue(), 'test1\ntest3\ntest4\n')
     # make sure debug doesn't log by default
     logger.debug('test5')
     self.assertEquals(sio.getvalue(), 'test1\ntest3\ntest4\n')
     # make sure notice lvl logs by default
     logger.notice('test6')
示例#5
0
 def test_get_logger(self):
     sio = StringIO()
     logger = logging.getLogger('server')
     logger.addHandler(logging.StreamHandler(sio))
     logger = utils.get_logger(None, 'server', log_route='server')
     logger.warn('test1')
     self.assertEquals(sio.getvalue(), 'test1\n')
     logger.debug('test2')
     self.assertEquals(sio.getvalue(), 'test1\n')
     logger = utils.get_logger({'log_level': 'DEBUG'}, 'server',
                               log_route='server')
     logger.debug('test3')
     self.assertEquals(sio.getvalue(), 'test1\ntest3\n')
     # Doesn't really test that the log facility is truly being used all the
     # way to syslog; but exercises the code.
     logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
                               log_route='server')
     logger.warn('test4')
     self.assertEquals(sio.getvalue(),
                       'test1\ntest3\ntest4\n')
     # make sure debug doesn't log by default
     logger.debug('test5')
     self.assertEquals(sio.getvalue(),
                       'test1\ntest3\ntest4\n')
     # make sure notice lvl logs by default
     logger.notice('test6')
示例#6
0
    def test_capture_stdio(self):
        # stubs
        logger = utils.get_logger(None, 'dummy')

        # mock utils system modules
        _orig_sys = utils.sys
        _orig_os = utils.os
        try:
            utils.sys = MockSys()
            utils.os = MockOs()

            # basic test
            utils.capture_stdio(logger)
            self.assert_(utils.sys.excepthook is not None)
            self.assertEquals(utils.os.closed_fds, utils.sys.stdio_fds)
            self.assert_(isinstance(utils.sys.stdout, utils.LoggerFileObject))
            self.assert_(isinstance(utils.sys.stderr, utils.LoggerFileObject))

            # reset; test same args, but exc when trying to close stdio
            utils.os = MockOs(raise_funcs=('dup2', ))
            utils.sys = MockSys()

            # test unable to close stdio
            utils.capture_stdio(logger)
            self.assert_(utils.sys.excepthook is not None)
            self.assertEquals(utils.os.closed_fds, [])
            self.assert_(isinstance(utils.sys.stdout, utils.LoggerFileObject))
            self.assert_(isinstance(utils.sys.stderr, utils.LoggerFileObject))

            # reset; test some other args
            utils.os = MockOs()
            utils.sys = MockSys()
            logger = utils.get_logger(None, log_to_console=True)

            # test console log
            utils.capture_stdio(logger,
                                capture_stdout=False,
                                capture_stderr=False)
            self.assert_(utils.sys.excepthook is not None)
            # when logging to console, stderr remains open
            self.assertEquals(utils.os.closed_fds, utils.sys.stdio_fds[:2])
            reset_loggers()

            # stdio not captured
            self.assertFalse(
                isinstance(utils.sys.stdout, utils.LoggerFileObject))
            self.assertFalse(
                isinstance(utils.sys.stderr, utils.LoggerFileObject))
            reset_loggers()
        finally:
            utils.sys = _orig_sys
            utils.os = _orig_os
示例#7
0
    def test_capture_stdio(self):
        # stubs
        logger = utils.get_logger(None, 'dummy')

        # mock utils system modules
        _orig_sys = utils.sys
        _orig_os = utils.os
        try:
            utils.sys = MockSys()
            utils.os = MockOs()

            # basic test
            utils.capture_stdio(logger)
            self.assert_(utils.sys.excepthook is not None)
            self.assertEquals(utils.os.closed_fds, utils.sys.stdio_fds)
            self.assert_(isinstance(utils.sys.stdout, utils.LoggerFileObject))
            self.assert_(isinstance(utils.sys.stderr, utils.LoggerFileObject))

            # reset; test same args, but exc when trying to close stdio
            utils.os = MockOs(raise_funcs=('dup2',))
            utils.sys = MockSys()

            # test unable to close stdio
            utils.capture_stdio(logger)
            self.assert_(utils.sys.excepthook is not None)
            self.assertEquals(utils.os.closed_fds, [])
            self.assert_(isinstance(utils.sys.stdout, utils.LoggerFileObject))
            self.assert_(isinstance(utils.sys.stderr, utils.LoggerFileObject))

            # reset; test some other args
            utils.os = MockOs()
            utils.sys = MockSys()
            logger = utils.get_logger(None, log_to_console=True)

            # test console log
            utils.capture_stdio(logger, capture_stdout=False,
                                capture_stderr=False)
            self.assert_(utils.sys.excepthook is not None)
            # when logging to console, stderr remains open
            self.assertEquals(utils.os.closed_fds, utils.sys.stdio_fds[:2])
            reset_loggers()

            # stdio not captured
            self.assertFalse(isinstance(utils.sys.stdout,
                                        utils.LoggerFileObject))
            self.assertFalse(isinstance(utils.sys.stderr,
                                        utils.LoggerFileObject))
            reset_loggers()
        finally:
            utils.sys = _orig_sys
            utils.os = _orig_os
示例#8
0
 def __init__(self, conf):
     """
     Creates a new WSGI application for the Chase Object Server. An
     example configuration is given at
     <source-dir>/etc/object-server.conf-sample or
     /etc/chase/object-server.conf-sample.
     """
     self.logger = get_logger(conf, log_route='object-server')
     self.devices = conf.get('devices', '/srv/node/')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.node_timeout = int(conf.get('node_timeout', 3))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
     self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
     self.log_requests = conf.get('log_requests', 't')[:1].lower() == 't'
     self.max_upload_time = int(conf.get('max_upload_time', 86400))
     self.slow = int(conf.get('slow', 0))
     self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024
     default_allowed_headers = '''
         content-disposition,
         content-encoding,
         x-delete-at,
         x-object-manifest,
     '''
     self.allowed_headers = set(i.strip().lower() for i in \
             conf.get('allowed_headers', \
             default_allowed_headers).split(',') if i.strip() and \
             i.strip().lower() not in DISALLOWED_HEADERS)
     self.expiring_objects_account = \
         (conf.get('auto_create_account_prefix') or '.') + \
         'expiring_objects'
     self.expiring_objects_container_divisor = \
         int(conf.get('expiring_objects_container_divisor') or 86400)
示例#9
0
 def __init__(self, conf):
     """
     :param conf: configuration object obtained from ConfigParser
     :param logger: logging object
     """
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-replicator')
     self.devices_dir = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.vm_test_mode = conf.get('vm_test_mode',
                                  'no').lower() in ('yes', 'true', 'on',
                                                    '1')
     self.chase_dir = conf.get('chase_dir', '/etc/chase')
     self.port = int(conf.get('bind_port', 6000))
     self.concurrency = int(conf.get('concurrency', 1))
     self.stats_interval = int(conf.get('stats_interval', '300'))
     self.object_ring = Ring(join(self.chase_dir, 'object.ring.gz'))
     self.ring_check_interval = int(conf.get('ring_check_interval', 15))
     self.next_check = time.time() + self.ring_check_interval
     self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
     self.partition_times = []
     self.run_pause = int(conf.get('run_pause', 30))
     self.rsync_timeout = int(conf.get('rsync_timeout', 900))
     self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
     self.http_timeout = int(conf.get('http_timeout', 60))
     self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
     self.recon_enable = conf.get('recon_enable',
                                  'no').lower() in TRUE_VALUES
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/chase')
     self.recon_object = os.path.join(self.recon_cache_path, "object.recon")
示例#10
0
 def __init__(self, root, datadir, broker_class, mount_check=True,
              logger=None):
     self.root = root
     self.datadir = datadir
     self.broker_class = broker_class
     self.mount_check = mount_check
     self.logger = logger or get_logger({}, log_route='replicator-rpc')
示例#11
0
def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
    """
    Loads settings from conf, then instantiates daemon "klass" and runs the
    daemon with the specified once kwarg.  The section_name will be derived
    from the daemon "klass" if not provided (e.g. ObjectReplicator =>
    object-replicator).

    :param klass: Class to instantiate, subclass of common.daemon.Daemon
    :param conf_file: Path to configuration file
    :param section_name: Section name from conf file to load config from
    :param once: Passed to daemon run method
    """
    # very often the config section_name is based on the class name
    # the None singleton will be passed through to readconf as is
    if section_name is '':
        section_name = sub(r'([a-z])([A-Z])', r'\1-\2',
                           klass.__name__).lower()
    conf = utils.readconf(conf_file, section_name,
                          log_name=kwargs.get('log_name'))

    # once on command line (i.e. daemonize=false) will over-ride config
    once = once or \
            conf.get('daemonize', 'true').lower() not in utils.TRUE_VALUES

    # pre-configure logger
    if 'logger' in kwargs:
        logger = kwargs.pop('logger')
    else:
        logger = utils.get_logger(conf, conf.get('log_name', section_name),
           log_to_console=kwargs.pop('verbose', False), log_route=section_name)
    try:
        klass(conf).run(once=once, **kwargs)
    except KeyboardInterrupt:
        logger.info('User quit')
    logger.info('Exited')
示例#12
0
 def __init__(self, conf):
     """
     :param conf: configuration object obtained from ConfigParser
     :param logger: logging object
     """
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-replicator')
     self.devices_dir = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.vm_test_mode = conf.get(
             'vm_test_mode', 'no').lower() in ('yes', 'true', 'on', '1')
     self.chase_dir = conf.get('chase_dir', '/etc/chase')
     self.port = int(conf.get('bind_port', 6000))
     self.concurrency = int(conf.get('concurrency', 1))
     self.stats_interval = int(conf.get('stats_interval', '300'))
     self.object_ring = Ring(join(self.chase_dir, 'object.ring.gz'))
     self.ring_check_interval = int(conf.get('ring_check_interval', 15))
     self.next_check = time.time() + self.ring_check_interval
     self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
     self.partition_times = []
     self.run_pause = int(conf.get('run_pause', 30))
     self.rsync_timeout = int(conf.get('rsync_timeout', 900))
     self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
     self.http_timeout = int(conf.get('http_timeout', 60))
     self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
     self.recon_enable = conf.get(
             'recon_enable', 'no').lower() in TRUE_VALUES
     self.recon_cache_path = conf.get(
             'recon_cache_path', '/var/cache/chase')
     self.recon_object = os.path.join(self.recon_cache_path, "object.recon")
示例#13
0
 def __init__(self, conf):
     """
     Creates a new WSGI application for the Chase Object Server. An
     example configuration is given at
     <source-dir>/etc/object-server.conf-sample or
     /etc/chase/object-server.conf-sample.
     """
     self.logger = get_logger(conf, log_route='object-server')
     self.devices = conf.get('devices', '/srv/node/')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.node_timeout = int(conf.get('node_timeout', 3))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
     self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
     self.log_requests = conf.get('log_requests', 't')[:1].lower() == 't'
     self.max_upload_time = int(conf.get('max_upload_time', 86400))
     self.slow = int(conf.get('slow', 0))
     self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024
     default_allowed_headers = '''
         content-disposition,
         content-encoding,
         x-delete-at,
         x-object-manifest,
     '''
     self.allowed_headers = set(i.strip().lower() for i in \
             conf.get('allowed_headers', \
             default_allowed_headers).split(',') if i.strip() and \
             i.strip().lower() not in DISALLOWED_HEADERS)
     self.expiring_objects_account = \
         (conf.get('auto_create_account_prefix') or '.') + \
         'expiring_objects'
     self.expiring_objects_container_divisor = \
         int(conf.get('expiring_objects_container_divisor') or 86400)
示例#14
0
    def test_run_daemon(self):
        sample_conf = """[my-daemon]
user = %s
""" % getuser()
        with tmpfile(sample_conf) as conf_file:
            daemon.run_daemon(MyDaemon, conf_file)
            self.assertEquals(MyDaemon.forever_called, True)
            daemon.run_daemon(MyDaemon, conf_file, once=True)
            self.assertEquals(MyDaemon.once_called, True)

            # test raise in daemon code
            MyDaemon.run_once = MyDaemon.run_raise
            self.assertRaises(OSError,
                              daemon.run_daemon,
                              MyDaemon,
                              conf_file,
                              once=True)

            # test user quit
            MyDaemon.run_forever = MyDaemon.run_quit
            sio = StringIO()
            logger = logging.getLogger('server')
            logger.addHandler(logging.StreamHandler(sio))
            logger = utils.get_logger(None, 'server', log_route='server')
            daemon.run_daemon(MyDaemon, conf_file, logger=logger)
            self.assert_('user quit' in sio.getvalue().lower())
示例#15
0
    def test_chase_log_formatter(self):
        # setup stream logging
        sio = StringIO()
        logger = utils.get_logger(None)
        handler = logging.StreamHandler(sio)
        handler.setFormatter(utils.ChaseLogFormatter())
        logger.logger.addHandler(handler)

        def strip_value(sio):
            v = sio.getvalue()
            sio.truncate(0)
            return v

        try:
            self.assertFalse(logger.txn_id)
            logger.error('my error message')
            log_msg = strip_value(sio)
            self.assert_('my error message' in log_msg)
            self.assert_('txn' not in log_msg)
            logger.txn_id = '12345'
            logger.error('test')
            log_msg = strip_value(sio)
            self.assert_('txn' in log_msg)
            self.assert_('12345' in log_msg)
            # test no txn on info message
            self.assertEquals(logger.txn_id, '12345')
            logger.info('test')
            log_msg = strip_value(sio)
            self.assert_('txn' not in log_msg)
            self.assert_('12345' not in log_msg)
            # test txn already in message
            self.assertEquals(logger.txn_id, '12345')
            logger.warn('test 12345 test')
            self.assertEquals(strip_value(sio), 'test 12345 test\n')

            # test client_ip
            self.assertFalse(logger.client_ip)
            logger.error('my error message')
            log_msg = strip_value(sio)
            self.assert_('my error message' in log_msg)
            self.assert_('client_ip' not in log_msg)
            logger.client_ip = '1.2.3.4'
            logger.error('test')
            log_msg = strip_value(sio)
            self.assert_('client_ip' in log_msg)
            self.assert_('1.2.3.4' in log_msg)
            # test no client_ip on info message
            self.assertEquals(logger.client_ip, '1.2.3.4')
            logger.info('test')
            log_msg = strip_value(sio)
            self.assert_('client_ip' not in log_msg)
            self.assert_('1.2.3.4' not in log_msg)
            # test client_ip (and txn) already in message
            self.assertEquals(logger.client_ip, '1.2.3.4')
            logger.warn('test 1.2.3.4 test 12345')
            self.assertEquals(strip_value(sio), 'test 1.2.3.4 test 12345\n')
        finally:
            logger.logger.removeHandler(handler)
            reset_loggers()
示例#16
0
    def test_chase_log_formatter(self):
        # setup stream logging
        sio = StringIO()
        logger = utils.get_logger(None)
        handler = logging.StreamHandler(sio)
        handler.setFormatter(utils.ChaseLogFormatter())
        logger.logger.addHandler(handler)

        def strip_value(sio):
            v = sio.getvalue()
            sio.truncate(0)
            return v

        try:
            self.assertFalse(logger.txn_id)
            logger.error('my error message')
            log_msg = strip_value(sio)
            self.assert_('my error message' in log_msg)
            self.assert_('txn' not in log_msg)
            logger.txn_id = '12345'
            logger.error('test')
            log_msg = strip_value(sio)
            self.assert_('txn' in log_msg)
            self.assert_('12345' in log_msg)
            # test no txn on info message
            self.assertEquals(logger.txn_id, '12345')
            logger.info('test')
            log_msg = strip_value(sio)
            self.assert_('txn' not in log_msg)
            self.assert_('12345' not in log_msg)
            # test txn already in message
            self.assertEquals(logger.txn_id, '12345')
            logger.warn('test 12345 test')
            self.assertEquals(strip_value(sio), 'test 12345 test\n')

            # test client_ip
            self.assertFalse(logger.client_ip)
            logger.error('my error message')
            log_msg = strip_value(sio)
            self.assert_('my error message' in log_msg)
            self.assert_('client_ip' not in log_msg)
            logger.client_ip = '1.2.3.4'
            logger.error('test')
            log_msg = strip_value(sio)
            self.assert_('client_ip' in log_msg)
            self.assert_('1.2.3.4' in log_msg)
            # test no client_ip on info message
            self.assertEquals(logger.client_ip, '1.2.3.4')
            logger.info('test')
            log_msg = strip_value(sio)
            self.assert_('client_ip' not in log_msg)
            self.assert_('1.2.3.4' not in log_msg)
            # test client_ip (and txn) already in message
            self.assertEquals(logger.client_ip, '1.2.3.4')
            logger.warn('test 1.2.3.4 test 12345')
            self.assertEquals(strip_value(sio), 'test 1.2.3.4 test 12345\n')
        finally:
            logger.logger.removeHandler(handler)
            reset_loggers()
示例#17
0
 def __init__(self, app, conf):
     self.app = app
     self.storage_domain = conf.get('storage_domain', 'example.com')
     if self.storage_domain and self.storage_domain[0] != '.':
         self.storage_domain = '.' + self.storage_domain
     self.lookup_depth = int(conf.get('lookup_depth', '1'))
     self.memcache = None
     self.logger = get_logger(conf, log_route='cname-lookup')
示例#18
0
 def __init__(self, app, conf):
     """Store valuable bits from the conf and set up logging."""
     self.app = app
     self.keystone_url = urlparse(conf.get("keystone_url"))
     self.admin_token = conf.get("keystone_admin_token")
     self.reseller_prefix = conf.get("reseller_prefix", "AUTH")
     self.log = get_logger(conf, log_route="keystone")
     self.log.info("Keystone middleware started")
示例#19
0
 def __init__(self, app, conf):
     """Store valuable bits from the conf and set up logging."""
     self.app = app
     self.keystone_url = urlparse(conf.get('keystone_url'))
     self.admin_token = conf.get('keystone_admin_token')
     self.reseller_prefix = conf.get('reseller_prefix', 'AUTH')
     self.log = get_logger(conf, log_route='keystone')
     self.log.info('Keystone middleware started')
示例#20
0
 def __init__(self, app, conf):
     self.app = app
     self.storage_domain = conf.get('storage_domain', 'example.com')
     if self.storage_domain and self.storage_domain[0] != '.':
         self.storage_domain = '.' + self.storage_domain
     self.lookup_depth = int(conf.get('lookup_depth', '1'))
     self.memcache = None
     self.logger = get_logger(conf, log_route='cname-lookup')
示例#21
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='account-auditor')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.interval = int(conf.get('interval', 1800))
     self.account_passes = 0
     self.account_failures = 0
示例#22
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='account-auditor')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.interval = int(conf.get('interval', 1800))
     self.account_passes = 0
     self.account_failures = 0
示例#23
0
 def __init__(self, conf):
     self.logger = get_logger(conf, log_route='account-server')
     self.root = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.replicator_rpc = ReplicatorRpc(self.root, DATADIR, AccountBroker,
         self.mount_check, logger=self.logger)
     self.auto_create_account_prefix = \
         conf.get('auto_create_account_prefix') or '.'
示例#24
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='container-auditor')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.interval = int(conf.get('interval', 1800))
     chase_dir = conf.get('chase_dir', '/etc/chase')
     self.container_passes = 0
     self.container_failures = 0
示例#25
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='container-auditor')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.interval = int(conf.get('interval', 1800))
     chase_dir = conf.get('chase_dir', '/etc/chase')
     self.container_passes = 0
     self.container_failures = 0
示例#26
0
 def test_get_logger_console(self):
     reset_loggers()
     logger = utils.get_logger(None)
     console_handlers = [h for h in logger.logger.handlers if
                         isinstance(h, logging.StreamHandler)]
     self.assertFalse(console_handlers)
     logger = utils.get_logger(None, log_to_console=True)
     console_handlers = [h for h in logger.logger.handlers if
                         isinstance(h, logging.StreamHandler)]
     self.assert_(console_handlers)
     # make sure you can't have two console handlers
     self.assertEquals(len(console_handlers), 1)
     old_handler = console_handlers[0]
     logger = utils.get_logger(None, log_to_console=True)
     console_handlers = [h for h in logger.logger.handlers if
                         isinstance(h, logging.StreamHandler)]
     self.assertEquals(len(console_handlers), 1)
     new_handler = console_handlers[0]
     self.assertNotEquals(new_handler, old_handler)
     reset_loggers()
示例#27
0
 def __init__(self,
              root,
              datadir,
              broker_class,
              mount_check=True,
              logger=None):
     self.root = root
     self.datadir = datadir
     self.broker_class = broker_class
     self.mount_check = mount_check
     self.logger = logger or get_logger({}, log_route='replicator-rpc')
示例#28
0
 def __init__(self, conf):
     self.logger = get_logger(conf, log_route='account-server')
     self.root = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.replicator_rpc = ReplicatorRpc(self.root,
                                         DATADIR,
                                         AccountBroker,
                                         self.mount_check,
                                         logger=self.logger)
     self.auto_create_account_prefix = \
         conf.get('auto_create_account_prefix') or '.'
示例#29
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-expirer')
     self.interval = int(conf.get('interval') or 300)
     self.expiring_objects_account = \
         (conf.get('auto_create_account_prefix') or '.') + \
         'expiring_objects'
     self.retries = int(conf.get('retries') or 3)
     self.app = loadapp('config:' + (
         conf.get('__file__') or '/etc/chase/object-expirer.conf'))
     self.report_interval = int(conf.get('report_interval') or 300)
     self.report_first_time = self.report_last_time = time()
     self.report_objects = 0
示例#30
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-expirer')
     self.interval = int(conf.get('interval') or 300)
     self.expiring_objects_account = \
         (conf.get('auto_create_account_prefix') or '.') + \
         'expiring_objects'
     self.retries = int(conf.get('retries') or 3)
     self.app = loadapp('config:' + (conf.get('__file__') or
                        '/etc/chase/object-expirer.conf'))
     self.report_interval = int(conf.get('report_interval') or 300)
     self.report_first_time = self.report_last_time = time()
     self.report_objects = 0
示例#31
0
 def __init__(self, conf):
     self.logger = get_logger(conf, log_route='container-server')
     self.root = conf.get('devices', '/srv/node/')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.node_timeout = int(conf.get('node_timeout', 3))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.allowed_sync_hosts = [h.strip()
         for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
         if h.strip()]
     self.replicator_rpc = ReplicatorRpc(self.root, DATADIR,
         ContainerBroker, self.mount_check, logger=self.logger)
     self.auto_create_account_prefix = \
         conf.get('auto_create_account_prefix') or '.'
示例#32
0
 def __init__(self, app, conf, *args, **kwargs):
     self.app = app
     self.devices = conf.get('devices', '/srv/node/')
     chase_dir = conf.get('chase_dir', '/etc/chase')
     self.logger = get_logger(conf, log_route='recon')
     self.recon_cache_path = conf.get('recon_cache_path', \
         '/var/cache/chase')
     self.object_recon_cache = "%s/object.recon" % self.recon_cache_path
     self.account_ring_path = os.path.join(chase_dir, 'account.ring.gz')
     self.container_ring_path = os.path.join(chase_dir, 'container.ring.gz')
     self.object_ring_path = os.path.join(chase_dir, 'object.ring.gz')
     self.rings = [self.account_ring_path, self.container_ring_path, \
         self.object_ring_path]
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
示例#33
0
 def __init__(self, app, conf, *args, **kwargs):
     self.app = app
     self.devices = conf.get('devices', '/srv/node/')
     chase_dir = conf.get('chase_dir', '/etc/chase')
     self.logger = get_logger(conf, log_route='recon')
     self.recon_cache_path = conf.get('recon_cache_path', \
         '/var/cache/chase')
     self.object_recon_cache = "%s/object.recon" % self.recon_cache_path
     self.account_ring_path = os.path.join(chase_dir, 'account.ring.gz')
     self.container_ring_path = os.path.join(chase_dir, 'container.ring.gz')
     self.object_ring_path = os.path.join(chase_dir, 'object.ring.gz')
     self.rings = [self.account_ring_path, self.container_ring_path, \
         self.object_ring_path]
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
示例#34
0
 def __init__(self, conf, container_ring=None, object_ring=None):
     #: The dict of configuration values from the [container-sync] section
     #: of the container-server.conf.
     self.conf = conf
     #: Logger to use for container-sync log lines.
     self.logger = get_logger(conf, log_route='container-sync')
     #: Path to the local device mount points.
     self.devices = conf.get('devices', '/srv/node')
     #: Indicates whether mount points should be verified as actual mount
     #: points (normally true, false for tests and SAIO).
     self.mount_check = \
         conf.get('mount_check', 'true').lower() in TRUE_VALUES
     #: Minimum time between full scans. This is to keep the daemon from
     #: running wild on near empty systems.
     self.interval = int(conf.get('interval', 300))
     #: Maximum amount of time to spend syncing a container before moving on
     #: to the next one. If a conatiner sync hasn't finished in this time,
     #: it'll just be resumed next scan.
     self.container_time = int(conf.get('container_time', 60))
     #: The list of hosts we're allowed to send syncs to.
     self.allowed_sync_hosts = [
         h.strip()
         for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
         if h.strip()
     ]
     self.proxy = conf.get('sync_proxy')
     #: Number of containers with sync turned on that were successfully
     #: synced.
     self.container_syncs = 0
     #: Number of successful DELETEs triggered.
     self.container_deletes = 0
     #: Number of successful PUTs triggered.
     self.container_puts = 0
     #: Number of containers that didn't have sync turned on.
     self.container_skips = 0
     #: Number of containers that had a failure of some type.
     self.container_failures = 0
     #: Time of last stats report.
     self.reported = time()
     chase_dir = conf.get('chase_dir', '/etc/chase')
     #: chase.common.ring.Ring for locating containers.
     self.container_ring = container_ring or \
         Ring(os.path.join(chase_dir, 'container.ring.gz'))
     #: chase.common.ring.Ring for locating objects.
     self.object_ring = object_ring or \
         Ring(os.path.join(chase_dir, 'object.ring.gz'))
     self._myips = whataremyips()
     self._myport = int(conf.get('bind_port', 6001))
示例#35
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-updater')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     chase_dir = conf.get('chase_dir', '/etc/chase')
     self.interval = int(conf.get('interval', 300))
     self.container_ring_path = os.path.join(chase_dir, 'container.ring.gz')
     self.container_ring = None
     self.concurrency = int(conf.get('concurrency', 1))
     self.slowdown = float(conf.get('slowdown', 0.01))
     self.node_timeout = int(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.successes = 0
     self.failures = 0
示例#36
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-updater')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     chase_dir = conf.get('chase_dir', '/etc/chase')
     self.interval = int(conf.get('interval', 300))
     self.container_ring_path = os.path.join(chase_dir, 'container.ring.gz')
     self.container_ring = None
     self.concurrency = int(conf.get('concurrency', 1))
     self.slowdown = float(conf.get('slowdown', 0.01))
     self.node_timeout = int(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.successes = 0
     self.failures = 0
示例#37
0
    def __init__(self, app, conf, logger=None):
        self.app = app
        if logger:
            self.logger = logger
        else:
            self.logger = get_logger(conf, log_route='ratelimit')
        self.account_ratelimit = float(conf.get('account_ratelimit', 0))
        self.max_sleep_time_seconds = \
            float(conf.get('max_sleep_time_seconds', 60))
        self.log_sleep_time_seconds = \
            float(conf.get('log_sleep_time_seconds', 0))
        self.clock_accuracy = int(conf.get('clock_accuracy', 1000))
        self.rate_buffer_seconds = int(conf.get('rate_buffer_seconds', 5))
        self.ratelimit_whitelist = [
            acc.strip() for acc in conf.get('account_whitelist', '').split(',')
            if acc.strip()
        ]
        self.ratelimit_blacklist = [
            acc.strip() for acc in conf.get('account_blacklist', '').split(',')
            if acc.strip()
        ]
        self.memcache_client = None
        conf_limits = []
        for conf_key in conf.keys():
            if conf_key.startswith('container_ratelimit_'):
                cont_size = int(conf_key[len('container_ratelimit_'):])
                rate = float(conf[conf_key])
                conf_limits.append((cont_size, rate))

        conf_limits.sort()
        self.container_ratelimits = []
        while conf_limits:
            cur_size, cur_rate = conf_limits.pop(0)
            if conf_limits:
                next_size, next_rate = conf_limits[0]
                slope = (float(next_rate) - float(cur_rate)) \
                      / (next_size - cur_size)

                def new_scope(cur_size, slope, cur_rate):
                    # making new scope for variables
                    return lambda x: (x - cur_size) * slope + cur_rate

                line_func = new_scope(cur_size, slope, cur_rate)
            else:
                line_func = lambda x: cur_rate

            self.container_ratelimits.append((cur_size, cur_rate, line_func))
示例#38
0
 def __init__(self, conf, container_ring=None, object_ring=None):
     #: The dict of configuration values from the [container-sync] section
     #: of the container-server.conf.
     self.conf = conf
     #: Logger to use for container-sync log lines.
     self.logger = get_logger(conf, log_route='container-sync')
     #: Path to the local device mount points.
     self.devices = conf.get('devices', '/srv/node')
     #: Indicates whether mount points should be verified as actual mount
     #: points (normally true, false for tests and SAIO).
     self.mount_check = \
         conf.get('mount_check', 'true').lower() in TRUE_VALUES
     #: Minimum time between full scans. This is to keep the daemon from
     #: running wild on near empty systems.
     self.interval = int(conf.get('interval', 300))
     #: Maximum amount of time to spend syncing a container before moving on
     #: to the next one. If a conatiner sync hasn't finished in this time,
     #: it'll just be resumed next scan.
     self.container_time = int(conf.get('container_time', 60))
     #: The list of hosts we're allowed to send syncs to.
     self.allowed_sync_hosts = [h.strip()
         for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
         if h.strip()]
     self.proxy = conf.get('sync_proxy')
     #: Number of containers with sync turned on that were successfully
     #: synced.
     self.container_syncs = 0
     #: Number of successful DELETEs triggered.
     self.container_deletes = 0
     #: Number of successful PUTs triggered.
     self.container_puts = 0
     #: Number of containers that didn't have sync turned on.
     self.container_skips = 0
     #: Number of containers that had a failure of some type.
     self.container_failures = 0
     #: Time of last stats report.
     self.reported = time()
     chase_dir = conf.get('chase_dir', '/etc/chase')
     #: chase.common.ring.Ring for locating containers.
     self.container_ring = container_ring or \
         Ring(os.path.join(chase_dir, 'container.ring.gz'))
     #: chase.common.ring.Ring for locating objects.
     self.object_ring = object_ring or \
         Ring(os.path.join(chase_dir, 'object.ring.gz'))
     self._myips = whataremyips()
     self._myport = int(conf.get('bind_port', 6001))
示例#39
0
 def __init__(self, app, conf):
     self.app = app
     self.conf = conf
     self.logger = get_logger(conf, log_route='tempauth')
     self.log_headers = conf.get('log_headers') == 'True'
     self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
     if self.reseller_prefix and self.reseller_prefix[-1] != '_':
         self.reseller_prefix += '_'
     self.auth_prefix = conf.get('auth_prefix', '/auth/')
     if not self.auth_prefix:
         self.auth_prefix = '/auth/'
     if self.auth_prefix[0] != '/':
         self.auth_prefix = '/' + self.auth_prefix
     if self.auth_prefix[-1] != '/':
         self.auth_prefix += '/'
     self.token_life = int(conf.get('token_life', 86400))
     self.allowed_sync_hosts = [
         h.strip()
         for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
         if h.strip()
     ]
     self.users = {}
     for conf_key in conf:
         if conf_key.startswith('user_'):
             values = conf[conf_key].split()
             if not values:
                 raise ValueError('%s has no key set' % conf_key)
             key = values.pop(0)
             if values and '://' in values[-1]:
                 url = values.pop()
             else:
                 url = 'https://' if 'cert_file' in conf else 'http://'
                 ip = conf.get('bind_ip', '127.0.0.1')
                 if ip == '0.0.0.0':
                     ip = '127.0.0.1'
                 url += ip
                 url += ':' + conf.get('bind_port', '8080') + '/v1/' + \
                        self.reseller_prefix + conf_key.split('_')[1]
             groups = values
             self.users[conf_key.split('_', 1)[1].replace('_', ':')] = {
                 'key': key,
                 'url': url,
                 'groups': values
             }
示例#40
0
 def __init__(self, conf):
     self.logger = get_logger(conf, log_route='container-server')
     self.root = conf.get('devices', '/srv/node/')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.node_timeout = int(conf.get('node_timeout', 3))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.allowed_sync_hosts = [
         h.strip()
         for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
         if h.strip()
     ]
     self.replicator_rpc = ReplicatorRpc(self.root,
                                         DATADIR,
                                         ContainerBroker,
                                         self.mount_check,
                                         logger=self.logger)
     self.auto_create_account_prefix = \
         conf.get('auto_create_account_prefix') or '.'
示例#41
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='replicator')
     self.root = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.port = int(conf.get('bind_port', self.default_port))
     concurrency = int(conf.get('concurrency', 8))
     self.cpool = GreenPool(size=concurrency)
     chase_dir = conf.get('chase_dir', '/etc/chase')
     self.ring = ring.Ring(os.path.join(chase_dir, self.ring_file))
     self.per_diff = int(conf.get('per_diff', 1000))
     self.run_pause = int(conf.get('run_pause', 30))
     self.vm_test_mode = conf.get(
         'vm_test_mode', 'no').lower() in ('yes', 'true', 'on', '1')
     self.node_timeout = int(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
     self._zero_stats()
示例#42
0
    def __init__(self, app, conf, logger=None):
        self.app = app
        if logger:
            self.logger = logger
        else:
            self.logger = get_logger(conf, log_route='ratelimit')
        self.account_ratelimit = float(conf.get('account_ratelimit', 0))
        self.max_sleep_time_seconds = \
            float(conf.get('max_sleep_time_seconds', 60))
        self.log_sleep_time_seconds = \
            float(conf.get('log_sleep_time_seconds', 0))
        self.clock_accuracy = int(conf.get('clock_accuracy', 1000))
        self.rate_buffer_seconds = int(conf.get('rate_buffer_seconds', 5))
        self.ratelimit_whitelist = [acc.strip() for acc in
            conf.get('account_whitelist', '').split(',') if acc.strip()]
        self.ratelimit_blacklist = [acc.strip() for acc in
            conf.get('account_blacklist', '').split(',') if acc.strip()]
        self.memcache_client = None
        conf_limits = []
        for conf_key in conf.keys():
            if conf_key.startswith('container_ratelimit_'):
                cont_size = int(conf_key[len('container_ratelimit_'):])
                rate = float(conf[conf_key])
                conf_limits.append((cont_size, rate))

        conf_limits.sort()
        self.container_ratelimits = []
        while conf_limits:
            cur_size, cur_rate = conf_limits.pop(0)
            if conf_limits:
                next_size, next_rate = conf_limits[0]
                slope = (float(next_rate) - float(cur_rate)) \
                      / (next_size - cur_size)

                def new_scope(cur_size, slope, cur_rate):
                    # making new scope for variables
                    return lambda x: (x - cur_size) * slope + cur_rate
                line_func = new_scope(cur_size, slope, cur_rate)
            else:
                line_func = lambda x: cur_rate

            self.container_ratelimits.append((cur_size, cur_rate, line_func))
示例#43
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='replicator')
     self.root = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.port = int(conf.get('bind_port', self.default_port))
     concurrency = int(conf.get('concurrency', 8))
     self.cpool = GreenPool(size=concurrency)
     chase_dir = conf.get('chase_dir', '/etc/chase')
     self.ring = ring.Ring(os.path.join(chase_dir, self.ring_file))
     self.per_diff = int(conf.get('per_diff', 1000))
     self.run_pause = int(conf.get('run_pause', 30))
     self.vm_test_mode = conf.get('vm_test_mode',
                                  'no').lower() in ('yes', 'true', 'on',
                                                    '1')
     self.node_timeout = int(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
     self._zero_stats()
示例#44
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='account-reaper')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.interval = int(conf.get('interval', 3600))
     chase_dir = conf.get('chase_dir', '/etc/chase')
     self.account_ring_path = os.path.join(chase_dir, 'account.ring.gz')
     self.container_ring_path = os.path.join(chase_dir, 'container.ring.gz')
     self.object_ring_path = os.path.join(chase_dir, 'object.ring.gz')
     self.account_ring = None
     self.container_ring = None
     self.object_ring = None
     self.node_timeout = int(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.myips = whataremyips()
     self.concurrency = int(conf.get('concurrency', 25))
     self.container_concurrency = self.object_concurrency = \
         sqrt(self.concurrency)
     self.container_pool = GreenPool(size=self.container_concurrency)
示例#45
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='account-reaper')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.interval = int(conf.get('interval', 3600))
     chase_dir = conf.get('chase_dir', '/etc/chase')
     self.account_ring_path = os.path.join(chase_dir, 'account.ring.gz')
     self.container_ring_path = os.path.join(chase_dir, 'container.ring.gz')
     self.object_ring_path = os.path.join(chase_dir, 'object.ring.gz')
     self.account_ring = None
     self.container_ring = None
     self.object_ring = None
     self.node_timeout = int(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.myips = whataremyips()
     self.concurrency = int(conf.get('concurrency', 25))
     self.container_concurrency = self.object_concurrency = \
         sqrt(self.concurrency)
     self.container_pool = GreenPool(size=self.container_concurrency)
示例#46
0
 def __init__(self, app, conf):
     self.app = app
     self.conf = conf
     self.logger = get_logger(conf, log_route='tempauth')
     self.log_headers = conf.get('log_headers') == 'True'
     self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
     if self.reseller_prefix and self.reseller_prefix[-1] != '_':
         self.reseller_prefix += '_'
     self.auth_prefix = conf.get('auth_prefix', '/auth/')
     if not self.auth_prefix:
         self.auth_prefix = '/auth/'
     if self.auth_prefix[0] != '/':
         self.auth_prefix = '/' + self.auth_prefix
     if self.auth_prefix[-1] != '/':
         self.auth_prefix += '/'
     self.token_life = int(conf.get('token_life', 86400))
     self.allowed_sync_hosts = [h.strip()
         for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
         if h.strip()]
     self.users = {}
     for conf_key in conf:
         if conf_key.startswith('user_'):
             values = conf[conf_key].split()
             if not values:
                 raise ValueError('%s has no key set' % conf_key)
             key = values.pop(0)
             if values and '://' in values[-1]:
                 url = values.pop()
             else:
                 url = 'https://' if 'cert_file' in conf else 'http://'
                 ip = conf.get('bind_ip', '127.0.0.1')
                 if ip == '0.0.0.0':
                     ip = '127.0.0.1'
                 url += ip
                 url += ':' + conf.get('bind_port', '8080') + '/v1/' + \
                        self.reseller_prefix + conf_key.split('_')[1]
             groups = values
             self.users[conf_key.split('_', 1)[1].replace('_', ':')] = {
                 'key': key, 'url': url, 'groups': values}
示例#47
0
    def test_run_daemon(self):
        sample_conf = """[my-daemon]
user = %s
""" % getuser()
        with tmpfile(sample_conf) as conf_file:
            daemon.run_daemon(MyDaemon, conf_file)
            self.assertEquals(MyDaemon.forever_called, True)
            daemon.run_daemon(MyDaemon, conf_file, once=True)
            self.assertEquals(MyDaemon.once_called, True)

            # test raise in daemon code
            MyDaemon.run_once = MyDaemon.run_raise
            self.assertRaises(OSError, daemon.run_daemon, MyDaemon,
                              conf_file, once=True)

            # test user quit
            MyDaemon.run_forever = MyDaemon.run_quit
            sio = StringIO()
            logger = logging.getLogger('server')
            logger.addHandler(logging.StreamHandler(sio))
            logger = utils.get_logger(None, 'server', log_route='server')
            daemon.run_daemon(MyDaemon, conf_file, logger=logger)
            self.assert_('user quit' in sio.getvalue().lower())
示例#48
0
 def __init__(self, conf, zero_byte_only_at_fps=0):
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-auditor')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
         TRUE_VALUES
     self.max_files_per_second = float(conf.get('files_per_second', 20))
     self.max_bytes_per_second = float(
         conf.get('bytes_per_second', 10000000))
     self.auditor_type = 'ALL'
     self.zero_byte_only_at_fps = zero_byte_only_at_fps
     if self.zero_byte_only_at_fps:
         self.max_files_per_second = float(self.zero_byte_only_at_fps)
         self.auditor_type = 'ZBF'
     self.log_time = int(conf.get('log_time', 3600))
     self.files_running_time = 0
     self.bytes_running_time = 0
     self.bytes_processed = 0
     self.total_bytes_processed = 0
     self.total_files_processed = 0
     self.passes = 0
     self.quarantines = 0
     self.errors = 0
示例#49
0
 def __init__(self, conf, zero_byte_only_at_fps=0):
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-auditor')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
         TRUE_VALUES
     self.max_files_per_second = float(conf.get('files_per_second', 20))
     self.max_bytes_per_second = float(conf.get('bytes_per_second',
                                                10000000))
     self.auditor_type = 'ALL'
     self.zero_byte_only_at_fps = zero_byte_only_at_fps
     if self.zero_byte_only_at_fps:
         self.max_files_per_second = float(self.zero_byte_only_at_fps)
         self.auditor_type = 'ZBF'
     self.log_time = int(conf.get('log_time', 3600))
     self.files_running_time = 0
     self.bytes_running_time = 0
     self.bytes_processed = 0
     self.total_bytes_processed = 0
     self.total_files_processed = 0
     self.passes = 0
     self.quarantines = 0
     self.errors = 0
示例#50
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = utils.get_logger(None, 'server', log_route='server')
     MyDaemon.forever_called = False
     MyDaemon.once_called = False
示例#51
0
    """

    try:
        conf = appconfig('config:%s' % conf_file, name=app_section)
    except Exception, e:
        print "Error trying to load config %s: %s" % (conf_file, e)
        return
    validate_configuration()

    # pre-configure logger
    log_name = conf.get('log_name', app_section)
    if 'logger' in kwargs:
        logger = kwargs.pop('logger')
    else:
        logger = get_logger(conf,
                            log_name,
                            log_to_console=kwargs.pop('verbose', False),
                            log_route='wsgi')

    # bind to address and port
    sock = get_socket(conf, default_port=kwargs.get('default_port', 8080))
    # remaining tasks should not require elevated privileges
    drop_privileges(conf.get('user', 'chase'))

    # Ensure the application can be loaded before proceeding.
    loadapp('config:%s' % conf_file, global_conf={'log_name': log_name})

    # redirect errors to logger and close stdio
    capture_stdio(logger)

    def run_server():
        wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
示例#52
0
 def __init__(self, conf, **options):
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-auditor')
     self.conf_zero_byte_fps = int(conf.get(
             'zero_byte_files_per_second', 50))
示例#53
0
    :param app_section: App name from conf file to load config from
    """

    try:
        conf = appconfig("config:%s" % conf_file, name=app_section)
    except Exception, e:
        print "Error trying to load config %s: %s" % (conf_file, e)
        return
    validate_configuration()

    # pre-configure logger
    log_name = conf.get("log_name", app_section)
    if "logger" in kwargs:
        logger = kwargs.pop("logger")
    else:
        logger = get_logger(conf, log_name, log_to_console=kwargs.pop("verbose", False), log_route="wsgi")

    # bind to address and port
    sock = get_socket(conf, default_port=kwargs.get("default_port", 8080))
    # remaining tasks should not require elevated privileges
    drop_privileges(conf.get("user", "chase"))

    # Ensure the application can be loaded before proceeding.
    loadapp("config:%s" % conf_file, global_conf={"log_name": log_name})

    # redirect errors to logger and close stdio
    capture_stdio(logger)

    def run_server():
        wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
        # Turn off logging requests by the underlying WSGI software.
示例#54
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = utils.get_logger(None, 'server', log_route='server')
     MyDaemon.forever_called = False
     MyDaemon.once_called = False
示例#55
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = utils.get_logger(conf, log_route='daemon')
示例#56
0
 def __init__(self, conf, **options):
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-auditor')
     self.conf_zero_byte_fps = int(
         conf.get('zero_byte_files_per_second', 50))
示例#57
0
 def setUp(self):
     self.logger = get_logger({})
     self.logger.txn_id = None