Exemple #1
0
 def has_expirer_section(conf_path):
     try:
         readconf(conf_path, section_name="object-expirer")
     except ValueError:
         return False
     else:
         return True
Exemple #2
0
    def test_readconf(self):
        conf = """[section1]
foo = bar

[section2]
log_name = yarr"""
        # setup a real file
        with open("/tmp/test", "wb") as f:
            f.write(conf)
        make_filename = lambda: "/tmp/test"
        # setup a file stream
        make_fp = lambda: StringIO(conf)
        for conf_object_maker in (make_filename, make_fp):
            result = utils.readconf(conf_object_maker())
            expected = {"log_name": None, "section1": {"foo": "bar"}, "section2": {"log_name": "yarr"}}
            self.assertEquals(result, expected)
            result = utils.readconf(conf_object_maker(), "section1")
            expected = {"log_name": "section1", "foo": "bar"}
            self.assertEquals(result, expected)
            result = utils.readconf(conf_object_maker(), "section2").get("log_name")
            expected = "yarr"
            self.assertEquals(result, expected)
            result = utils.readconf(conf_object_maker(), "section1", log_name="foo").get("log_name")
            expected = "foo"
            self.assertEquals(result, expected)
            result = utils.readconf(conf_object_maker(), "section1", defaults={"bar": "baz"})
            expected = {"log_name": "section1", "foo": "bar", "bar": "baz"}
            self.assertEquals(result, expected)
        self.assertRaises(SystemExit, utils.readconf, "/tmp/test", "section3")
        os.unlink("/tmp/test")
        self.assertRaises(SystemExit, utils.readconf, "/tmp/test")
Exemple #3
0
    def test_readconf(self):
        conf = '''[section1]
foo = bar

[section2]
log_name = yarr'''
        f = open('/tmp/test', 'wb')
        f.write(conf)
        f.close()
        result = utils.readconf('/tmp/test')
        expected = {'log_name': None,
                    'section1': {'foo': 'bar'},
                    'section2': {'log_name': 'yarr'}}
        self.assertEquals(result, expected)
        result = utils.readconf('/tmp/test', 'section1')
        expected = {'log_name': 'section1', 'foo': 'bar'}
        self.assertEquals(result, expected)
        result = utils.readconf('/tmp/test', 'section2').get('log_name')
        expected = 'yarr'
        self.assertEquals(result, expected)
        result = utils.readconf('/tmp/test', 'section1',
                                log_name='foo').get('log_name')
        expected = 'foo'
        self.assertEquals(result, expected)
        result = utils.readconf('/tmp/test', 'section1',
                                defaults={'bar': 'baz'})
        expected = {'log_name': 'section1', 'foo': 'bar', 'bar': 'baz'}
        self.assertEquals(result, expected)
        os.unlink('/tmp/test')
Exemple #4
0
 def test_rsync_tempfile_timeout_auto_option(self):
     # if we don't have access to the replicator config section we'll use
     # our default
     auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
                                            self.rcache, self.devices)
     self.assertEqual(auditor_worker.rsync_tempfile_timeout, 86400)
     # if the rsync_tempfile_timeout option is set explicitly we use that
     self.conf['rsync_tempfile_timeout'] = '1800'
     auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
                                            self.rcache, self.devices)
     self.assertEqual(auditor_worker.rsync_tempfile_timeout, 1800)
     # if we have a real config we can be a little smarter
     config_path = os.path.join(self.testdir, 'objserver.conf')
     stub_config = """
     [object-auditor]
     rsync_tempfile_timeout = auto
     """
     with open(config_path, 'w') as f:
         f.write(textwrap.dedent(stub_config))
     # the Daemon loader will hand the object-auditor config to the
     # auditor who will build the workers from it
     conf = readconf(config_path, 'object-auditor')
     auditor_worker = auditor.AuditorWorker(conf, self.logger,
                                            self.rcache, self.devices)
     # if there is no object-replicator section we still have to fall back
     # to default because we can't parse the config for that section!
     self.assertEqual(auditor_worker.rsync_tempfile_timeout, 86400)
     stub_config = """
     [object-replicator]
     [object-auditor]
     rsync_tempfile_timeout = auto
     """
     with open(os.path.join(self.testdir, 'objserver.conf'), 'w') as f:
         f.write(textwrap.dedent(stub_config))
     conf = readconf(config_path, 'object-auditor')
     auditor_worker = auditor.AuditorWorker(conf, self.logger,
                                            self.rcache, self.devices)
     # if the object-replicator section will parse but does not override
     # the default rsync_timeout we assume the default rsync_timeout value
     # and add 15mins
     self.assertEqual(auditor_worker.rsync_tempfile_timeout,
                      replicator.DEFAULT_RSYNC_TIMEOUT + 900)
     stub_config = """
     [DEFAULT]
     reclaim_age = 1209600
     [object-replicator]
     rsync_timeout = 3600
     [object-auditor]
     rsync_tempfile_timeout = auto
     """
     with open(os.path.join(self.testdir, 'objserver.conf'), 'w') as f:
         f.write(textwrap.dedent(stub_config))
     conf = readconf(config_path, 'object-auditor')
     auditor_worker = auditor.AuditorWorker(conf, self.logger,
                                            self.rcache, self.devices)
     # if there is an object-replicator section with a rsync_timeout
     # configured we'll use that value (3600) + 900
     self.assertEqual(auditor_worker.rsync_tempfile_timeout, 3600 + 900)
Exemple #5
0
def get_config():
    """
    Attempt to get a functional config dictionary.
    """
    config_file = 'test/dispatcher_test.conf'
    config = {}
    try:
        try:
            config = readconf(config_file, 'app:dispatcher')
        except MissingSectionHeaderError:
            config_fp = StringIO('[func_test]\n' + open(config_file).read())
            config = readconf(config_fp, 'func_test')
    except SystemExit:
        print >>sys.stderr, 'UNABLE TO READ FUNCTIONAL TESTS CONFIG FILE'
    return config
    def __init__(self, app, conf):
        self.app = app

        keymaster_config_path = conf.get('keymaster_config_path')
        if keymaster_config_path:
            if any(opt in conf for opt in ('encryption_root_secret',)):
                raise ValueError('keymaster_config_path is set, but there '
                                 'are other config options specified!')
            conf = readconf(keymaster_config_path, 'keymaster')

        self.root_secret = conf.get('encryption_root_secret')
        try:
            # b64decode will silently discard bad characters, but we should
            # treat them as an error
            if not isinstance(self.root_secret, six.string_types) or any(
                    c not in string.digits + string.ascii_letters + '/+\r\n'
                    for c in self.root_secret.strip('\r\n=')):
                raise ValueError
            self.root_secret = base64.b64decode(self.root_secret)
            if len(self.root_secret) < 32:
                raise ValueError
        except (TypeError, ValueError):
            raise ValueError(
                'encryption_root_secret option in %s must be a base64 '
                'encoding of at least 32 raw bytes' % (
                    keymaster_config_path or 'proxy-server.conf'))
Exemple #7
0
def get_config():
    """
    Attempt to get a functional config dictionary.
    """
    config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
                                 '/etc/gluster-object/func_test.conf')
    config = {}
    try:
        try:
            config = readconf(config_file, 'func_test')
        except MissingSectionHeaderError:
            config_fp = StringIO('[func_test]\n' + open(config_file).read())
            config = readconf(config_fp, 'func_test')
    except SystemExit:
        print >>sys.stderr, 'UNABLE TO READ FUNCTIONAL TESTS CONFIG FILE'
    return config
 def _get_objects_dir(self, onode):
     device = onode["device"]
     node_id = (onode["port"] - 6000) / 10
     obj_server_conf = readconf(self.configs["object-server"][node_id])
     devices = obj_server_conf["app:object-server"]["devices"]
     obj_dir = "%s/%s" % (devices, device)
     return obj_dir
 def setUp(self):
     super(TestDbRsyncReplicator, self).setUp()
     cont_configs = [utils.readconf(p, 'container-replicator')
                     for p in self.configs['container-replicator'].values()]
     # Do more than per_diff object PUTs, to force rsync instead of usync
     self.object_puts = 1 + max(int(c.get('per_diff', '1000'))
                                for c in cont_configs)
 def _get_objects_dir(self, onode):
     device = onode['device']
     node_id = (onode['port'] - 6000) / 10
     obj_server_conf = readconf(self.configs['object'] % node_id)
     devices = obj_server_conf['app:object-server']['devices']
     obj_dir = '%s/%s' % (devices, device)
     return obj_dir
Exemple #11
0
def get_lfs(conf, ring, datadir, default_port, logger):
    """
    Returns LFS for current node

    :param conf: server configuration
    :param ring: server ring file
    :param datadir: server data directory
    :param default_port: default server port
    :param logger: server logger
    :returns : LFS storage class
    :raises SwiftConfigurationError: if fs is invalid
    """
    fs = conf.get('fs', 'xfs')
    try:
        module_name = 'swift_lfs.fs.%s' % fs
        cls_name = 'LFS%s' % fs.upper()
        module = __import__(module_name, fromlist=[cls_name])
        cls = getattr(module, cls_name)
        if '__file__' in conf and fs in conf:
            fs_conf = readconf(conf['__file__'], fs)
            conf = dict(conf, **fs_conf)
        return cls(conf, ring, datadir, default_port, logger)
    except ImportError, e:
        raise SwiftConfigurationError(
            _('Cannot load LFS. Invalid FS: %s. %s') % (fs, e))
Exemple #12
0
def get_config(section_name=None, defaults=None):
    """
    Attempt to get a test config dictionary.

    :param section_name: the section to read (all sections if not defined)
    :param defaults: an optional dictionary namespace of defaults
    """
    config = {}
    if defaults is not None:
        config.update(defaults)

    config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
                                 '/etc/swift/test.conf')
    try:
        config = readconf(config_file, section_name)
    except IOError:
        if not os.path.exists(config_file):
            print('Unable to read test config %s - file not found'
                  % config_file, file=sys.stderr)
        elif not os.access(config_file, os.R_OK):
            print('Unable to read test config %s - permission denied'
                  % config_file, file=sys.stderr)
    except ValueError as e:
        print(e)
    return config
Exemple #13
0
 def collect_tasks(self):
     tasks = {"account": {}, "container": {}, "object": {}}
     for t in tasks:
         for level in ("partition", "suffix", "hsh", "item"):
             tasks[t][level] = []
     for section in self.conf:
         try:
             if type(self.conf[section]) is not dict:
                 continue
             if "data_type" in self.conf[section] and self.conf[section]:
                 datatype = self.conf[section]["data_type"]
                 level = self.conf[section]["level"]
                 task_module = self.conf[section]["task_module"]
                 task_class = self.conf[section]["task_class"]
                 task_conf = self.conf[section]["task_conf"]
                 task_method = self.conf[section].get("task_method", "process_%s" % level)
                 _module = __import__(task_module)
                 splits = task_module.split(".")
                 for split in splits[1:]:
                     _module = getattr(_module, split)
                 _class = getattr(_module, task_class)
                 conf = utils.readconf(task_conf, section)
                 _object = _class(conf)
                 self.conf[section]["func"] = getattr(_object, task_method)
                 self.conf[section]["cycles"] = int(self.conf[section].get("cycles", 1))
                 tasks[datatype][level].append(section)
         except:
             print "Failed to parse config file section %s - %s" % (section, sys.exc_info())
             traceback.print_exc()
     return tasks
 def _get_objects_dir(self, onode):
     device = onode["device"]
     _, node_id = get_server_number((onode["ip"], onode["port"]), self.ipport2server)
     obj_server_conf = readconf(self.configs["object-server"][node_id])
     devices = obj_server_conf["app:object-server"]["devices"]
     obj_dir = "%s/%s" % (devices, device)
     return obj_dir
Exemple #15
0
def load_server_conf(conf, sections):
    server_conf_file = conf.get('__file__', None)
    if server_conf_file:
        server_conf = readconf(server_conf_file)
        for sect in sections:
            if server_conf.get(sect, None):
                conf.update(server_conf[sect])
Exemple #16
0
    def _get_root_secret(self, conf):
        """
        This keymaster requires its ``encryption_root_secret`` option to be
        set. This must be set before first use to a value that is a base64
        encoding of at least 32 bytes. The encryption root secret is stored
        in either proxy-server.conf, or in an external file referenced from
        proxy-server.conf using ``keymaster_config_path``.

        :param conf: the keymaster config section from proxy-server.conf
        :type conf: dict

        :return: the encryption root secret binary bytes
        :rtype: bytearray
        """
        if self.keymaster_config_path:
            keymaster_opts = ['encryption_root_secret']
            if any(opt in conf for opt in keymaster_opts):
                raise ValueError('keymaster_config_path is set, but there '
                                 'are other config options specified: %s' %
                                 ", ".join(list(
                                     set(keymaster_opts).intersection(conf))))
            conf = readconf(self.keymaster_config_path, 'keymaster')
        b64_root_secret = conf.get('encryption_root_secret')
        try:
            binary_root_secret = strict_b64decode(b64_root_secret,
                                                  allow_line_breaks=True)
            if len(binary_root_secret) < 32:
                raise ValueError
            return binary_root_secret
        except ValueError:
            raise ValueError(
                'encryption_root_secret option in %s must be a base64 '
                'encoding of at least 32 raw bytes' % (
                    self.keymaster_config_path or 'proxy-server.conf'))
Exemple #17
0
    def setUp(self):
        resetswift()
        try:
            self.ipport2server = {}
            self.configs = defaultdict(dict)
            self.account_ring = get_ring(
                'account',
                self.acct_cont_required_replicas,
                self.acct_cont_required_devices,
                ipport2server=self.ipport2server,
                config_paths=self.configs)
            self.container_ring = get_ring(
                'container',
                self.acct_cont_required_replicas,
                self.acct_cont_required_devices,
                ipport2server=self.ipport2server,
                config_paths=self.configs)
            self.policy = get_policy(**self.policy_requirements)
            self.object_ring = get_ring(
                self.policy.ring_name,
                self.obj_required_replicas,
                self.obj_required_devices,
                server='object',
                ipport2server=self.ipport2server,
                config_paths=self.configs)

            self.servers_per_port = any(
                int(readconf(c, section_name='object-replicator').get(
                    'servers_per_port', '0'))
                for c in self.configs['object-replicator'].values())

            Manager(['main']).start(wait=False)
            for ipport in self.ipport2server:
                check_server(ipport, self.ipport2server)
            proxy_ipport = ('127.0.0.1', 8080)
            self.ipport2server[proxy_ipport] = 'proxy'
            self.url, self.token, self.account = check_server(
                proxy_ipport, self.ipport2server)
            self.account_1 = {
                'url': self.url, 'token': self.token, 'account': self.account}

            url2, token2 = get_auth(
                'http://%s:%d/auth/v1.0' % proxy_ipport,
                'test2:tester2', 'testing2')
            self.account_2 = {
                'url': url2, 'token': token2, 'account': url2.split('/')[-1]}
            head_account(url2, token2)  # sanity check

            self.replicators = Manager(
                ['account-replicator', 'container-replicator',
                 'object-replicator'])
            self.updaters = Manager(['container-updater', 'object-updater'])
        except BaseException:
            try:
                raise
            finally:
                try:
                    Manager(['all']).kill()
                except Exception:
                    pass
    def test_locked_container_dbs(self):

        def run_test(num_locks, catch_503):
            container = 'container-%s' % uuid4()
            client.put_container(self.url, self.token, container)
            db_files = self._get_container_db_files(container)
            db_conns = []
            for i in range(num_locks):
                db_conn = connect(db_files[i])
                db_conn.execute('begin exclusive transaction')
                db_conns.append(db_conn)
            if catch_503:
                try:
                    client.delete_container(self.url, self.token, container)
                except client.ClientException as err:
                    self.assertEqual(err.http_status, 503)
                else:
                    self.fail("Expected ClientException but didn't get it")
            else:
                client.delete_container(self.url, self.token, container)

        proxy_conf = readconf(self.configs['proxy-server'],
                              section_name='app:proxy-server')
        node_timeout = int(proxy_conf.get('node_timeout', 10))
        pool = GreenPool()
        try:
            with Timeout(node_timeout + 5):
                pool.spawn(run_test, 1, False)
                pool.spawn(run_test, 2, True)
                pool.spawn(run_test, 3, True)
                pool.waitall()
        except Timeout as err:
            raise Exception(
                "The server did not return a 503 on container db locks, "
                "it just hangs: %s" % err)
Exemple #19
0
 def device_dir(self, server, node):
     server_type, config_number = get_server_number(
         (node['ip'], node['port']), self.ipport2server)
     repl_server = '%s-replicator' % server_type
     conf = readconf(self.configs[repl_server][config_number],
                     section_name=repl_server)
     return os.path.join(conf['devices'], node['device'])
Exemple #20
0
def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
    """
    Loads settings from conf, then instantiates daemon "klass" and runs the
    daemon with the specified once kwarg.  The section_name will be derived
    from the daemon "klass" if not provided (e.g. ObjectReplicator =>
    object-replicator).

    :param klass: Class to instantiate, subclass of common.daemon.Daemon
    :param conf_file: Path to configuration file
    :param section_name: Section name from conf file to load config from
    :param once: Passed to daemon run method
    """
    # very often the config section_name is based on the class name
    # the None singleton will be passed through to readconf as is
    if section_name is '':
        section_name = sub(r'([a-z])([A-Z])', r'\1-\2',
                           klass.__name__).lower()
    conf = utils.readconf(conf_file, section_name,
                          log_name=kwargs.get('log_name'))

    # once on command line (i.e. daemonize=false) will over-ride config
    once = once or \
            conf.get('daemonize', 'true').lower() not in utils.TRUE_VALUES

    # pre-configure logger
    if 'logger' in kwargs:
        logger = kwargs.pop('logger')
    else:
        logger = utils.get_logger(conf, conf.get('log_name', section_name),
           log_to_console=kwargs.pop('verbose', False), log_route=section_name)
    try:
        klass(conf).run(once=once, **kwargs)
    except KeyboardInterrupt:
        logger.info('User quit')
    logger.info('Exited')
def run_server():
    usage = '''
    %prog start|stop|restart|pause|unpause [--conf=/path/to/some.conf] [-f]
    '''
    args = optparse.OptionParser(usage)
    args.add_option('--foreground', '-f', action="store_true",
                    help="Run in foreground, in debug mode")
    args.add_option('--conf', default="/etc/swift/ring-master.conf",
                    help="path to config. default /etc/swift/ring-master.conf")
    args.add_option('--pid', default="/var/run/swift-ring-master.pid",
                    help="default: /var/run/swift-ring-master.pid")
    options, arguments = args.parse_args()

    if len(sys.argv) <= 1:
        args.print_help()

    if options.foreground:
        conf = readconf(options.conf)
        tap = RingMasterServer(conf)
        tap.start()
        sys.exit(0)

    if len(sys.argv) >= 2:
        conf = readconf(options.conf)
        user = conf['ringmasterd'].get('user', 'swift')
        pfile = conf['ringmasterd'].get('pause_file_path', '/tmp/.srm-pause')
        daemon = RingMasterd(options.pid, user=user)
        if 'start' == sys.argv[1]:
            daemon.start(conf)
        elif 'stop' == sys.argv[1]:
            daemon.stop()
        elif 'restart' == sys.argv[1]:
            daemon.restart(conf)
        elif 'pause' == sys.argv[1]:
            print "Writing pause file"
            with open(pfile, 'w') as f:
                f.write("")
        elif 'unpause':
            print "Removing pause file"
            unlink(pfile)
        else:
            args.print_help()
            sys.exit(2)
        sys.exit(0)
    else:
        args.print_help()
        sys.exit(2)
Exemple #22
0
 def run_custom_daemon(self, klass, conf_section, conf_index,
                       custom_conf, **kwargs):
     conf_file = self.configs[conf_section][conf_index]
     conf = utils.readconf(conf_file, conf_section)
     conf.update(custom_conf)
     daemon = klass(conf, debug_logger('probe'))
     daemon.run_once(**kwargs)
     return daemon
Exemple #23
0
 def _get_objects_dir(self, onode):
     device = onode['device']
     _, node_id = get_server_number((onode['ip'], onode['port']),
                                    self.ipport2server)
     obj_server_conf = readconf(self.configs['object-server'][node_id])
     devices = obj_server_conf['app:object-server']['devices']
     obj_dir = '%s/%s' % (devices, device)
     return obj_dir
 def _get_objects_dir(self, onode):
     device = onode['device']
     _, node_id = get_server_number((onode['ip'], onode['port']),
                                    self.ipport2server)
     obj_server_conf = readconf(self.configs['object-server'][node_id])
     devices = obj_server_conf['app:object-server']['devices']
     obj_dir = '%s/%s' % (devices, device)
     return obj_dir
 def _get_objects_dir(self, onode):
     device = onode['device']
     node_id = (onode['port'] - 6000) / 10
     obj_server_conf = readconf('/etc/swift/object-server/%s.conf' %
                                node_id)
     devices = obj_server_conf['app:object-server']['devices']
     obj_dir = '%s/%s' % (devices, device)
     return obj_dir
Exemple #26
0
    def setUp(self):
        resetswift()
        try:
            self.ipport2server = {}
            self.configs = defaultdict(dict)
            self.account_ring = get_ring(
                'account',
                self.acct_cont_required_replicas,
                self.acct_cont_required_devices,
                ipport2server=self.ipport2server,
                config_paths=self.configs)
            self.container_ring = get_ring(
                'container',
                self.acct_cont_required_replicas,
                self.acct_cont_required_devices,
                ipport2server=self.ipport2server,
                config_paths=self.configs)
            self.policy = get_policy(**self.policy_requirements)
            self.object_ring = get_ring(
                self.policy.ring_name,
                self.obj_required_replicas,
                self.obj_required_devices,
                server='object',
                ipport2server=self.ipport2server,
                config_paths=self.configs)

            self.servers_per_port = any(
                int(readconf(c, section_name='object-replicator').get(
                    'servers_per_port', '0'))
                for c in self.configs['object-replicator'].values())

            Manager(['main']).start(wait=True)
            for ipport in self.ipport2server:
                check_server(ipport, self.ipport2server)
            proxy_ipport = ('127.0.0.1', 8080)
            self.ipport2server[proxy_ipport] = 'proxy'
            self.url, self.token, self.account = check_server(
                proxy_ipport, self.ipport2server)
            self.account_1 = {
                'url': self.url, 'token': self.token, 'account': self.account}

            rv = _retry_timeout(_check_proxy, args=(
                proxy_ipport, 'test2:tester2', 'testing2'))
            self.account_2 = {
                k: v for (k, v) in zip(('url', 'token', 'account'), rv)}

            self.replicators = Manager(
                ['account-replicator', 'container-replicator',
                 'object-replicator'])
            self.updaters = Manager(['container-updater', 'object-updater'])
        except BaseException:
            try:
                raise
            finally:
                try:
                    Manager(['all']).kill()
                except Exception:
                    pass
Exemple #27
0
    def setUp(self):
        resetswift()
        try:
            self.ipport2server = {}
            self.configs = defaultdict(dict)
            self.account_ring = get_ring(
                'account',
                self.acct_cont_required_replicas,
                self.acct_cont_required_devices,
                ipport2server=self.ipport2server,
                config_paths=self.configs)
            self.container_ring = get_ring(
                'container',
                self.acct_cont_required_replicas,
                self.acct_cont_required_devices,
                ipport2server=self.ipport2server,
                config_paths=self.configs)
            self.policy = get_policy(**self.policy_requirements)
            self.object_ring = get_ring(
                self.policy.ring_name,
                self.obj_required_replicas,
                self.obj_required_devices,
                server='object',
                ipport2server=self.ipport2server,
                config_paths=self.configs)

            self.servers_per_port = any(
                int(readconf(c, section_name='object-replicator').get(
                    'servers_per_port', '0'))
                for c in self.configs['object-replicator'].values())

            Manager(['main']).start(wait=True)
            for ipport in self.ipport2server:
                check_server(ipport, self.ipport2server)
            proxy_ipport = ('127.0.0.1', 8080)
            self.ipport2server[proxy_ipport] = 'proxy'
            self.url, self.token, self.account = check_server(
                proxy_ipport, self.ipport2server)
            self.account_1 = {
                'url': self.url, 'token': self.token, 'account': self.account}

            rv = _retry_timeout(_check_proxy, args=(
                proxy_ipport, 'test2:tester2', 'testing2'))
            self.account_2 = {
                k: v for (k, v) in zip(('url', 'token', 'account'), rv)}

            self.replicators = Manager(
                ['account-replicator', 'container-replicator',
                 'object-replicator'])
            self.updaters = Manager(['container-updater', 'object-updater'])
        except BaseException:
            try:
                raise
            finally:
                try:
                    Manager(['all']).kill()
                except Exception:
                    pass
Exemple #28
0
    def __init__(self, conf, logger, rcache, devices, zero_byte_only_at_fps=0):
        self.conf = conf
        self.logger = logger
        self.devices = devices
        self.max_files_per_second = float(conf.get('files_per_second', 20))
        self.max_bytes_per_second = float(
            conf.get('bytes_per_second', 10000000))
        try:
            # ideally unless ops overrides the rsync_tempfile_timeout in the
            # auditor section we can base our behavior on whatever they
            # configure for their replicator
            replicator_config = readconf(self.conf['__file__'],
                                         'object-replicator')
        except (KeyError, ValueError, IOError):
            # if we can't parse the real config (generally a KeyError on
            # __file__, or ValueError on no object-replicator section, or
            # IOError if reading the file failed) we use
            # a very conservative default for rsync_timeout
            default_rsync_timeout = 86400
        else:
            replicator_rsync_timeout = int(
                replicator_config.get('rsync_timeout',
                                      replicator.DEFAULT_RSYNC_TIMEOUT))
            # Here we can do some light math for ops and use the *replicator's*
            # rsync_timeout (plus 15 mins to avoid deleting local tempfiles
            # before the remote replicator kills it's rsync)
            default_rsync_timeout = replicator_rsync_timeout + 900
            # there's not really a good reason to assume the replicator
            # section's reclaim_age is more appropriate than the reconstructor
            # reclaim_age - but we're already parsing the config so we can set
            # the default value in our config if it's not already set
            if 'reclaim_age' in replicator_config:
                conf.setdefault('reclaim_age',
                                replicator_config['reclaim_age'])
        self.rsync_tempfile_timeout = config_auto_int_value(
            self.conf.get('rsync_tempfile_timeout'), default_rsync_timeout)
        self.diskfile_router = diskfile.DiskFileRouter(conf, self.logger)

        self.auditor_type = 'ALL'
        self.zero_byte_only_at_fps = zero_byte_only_at_fps
        if self.zero_byte_only_at_fps:
            self.max_files_per_second = float(self.zero_byte_only_at_fps)
            self.auditor_type = 'ZBF'
        self.log_time = int(conf.get('log_time', 3600))
        self.last_logged = 0
        self.files_running_time = 0
        self.bytes_running_time = 0
        self.bytes_processed = 0
        self.total_bytes_processed = 0
        self.total_files_processed = 0
        self.passes = 0
        self.quarantines = 0
        self.errors = 0
        self.rcache = rcache
        self.stats_sizes = sorted(
            [int(s) for s in list_from_csv(conf.get('object_size_stats'))])
        self.stats_buckets = dict([(s, 0)
                                   for s in self.stats_sizes + ['OVER']])
Exemple #29
0
def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
    """
    Loads settings from conf, then instantiates daemon "klass" and runs the
    daemon with the specified once kwarg.  The section_name will be derived
    from the daemon "klass" if not provided (e.g. ObjectReplicator =>
    object-replicator).

    :param klass: Class to instantiate, subclass of common.daemon.Daemon
    :param conf_file: Path to configuration file
    :param section_name: Section name from conf file to load config from
    :param once: Passed to daemon run method
    """
    # very often the config section_name is based on the class name
    # the None singleton will be passed through to readconf as is
    if section_name is '':
        section_name = sub(r'([a-z])([A-Z])', r'\1-\2', klass.__name__).lower()
    conf = utils.readconf(conf_file,
                          section_name,
                          log_name=kwargs.get('log_name'))

    # once on command line (i.e. daemonize=false) will over-ride config
    once = once or not utils.config_true_value(conf.get('daemonize', 'true'))

    # pre-configure logger
    if 'logger' in kwargs:
        logger = kwargs.pop('logger')
    else:
        logger = utils.get_logger(conf,
                                  conf.get('log_name', section_name),
                                  log_to_console=kwargs.pop('verbose', False),
                                  log_route=section_name)

    # optional nice/ionice priority scheduling
    utils.modify_priority(conf, logger)

    # disable fallocate if desired
    if utils.config_true_value(conf.get('disable_fallocate', 'no')):
        utils.disable_fallocate()
    # set utils.FALLOCATE_RESERVE if desired
    utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
        utils.config_fallocate_value(conf.get('fallocate_reserve', '1%'))

    # By default, disable eventlet printing stacktraces
    eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no'))
    eventlet.debug.hub_exceptions(eventlet_debug)

    # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on
    # some platforms. This locks in reported times to the timezone in which
    # the server first starts running in locations that periodically change
    # timezones.
    os.environ['TZ'] = time.strftime("%z", time.gmtime())

    try:
        klass(conf).run(once=once, **kwargs)
    except KeyboardInterrupt:
        logger.info('User quit')
    logger.info('Exited')
Exemple #30
0
    def __init__(self, conf, logger, rcache, devices, zero_byte_only_at_fps=0):
        self.conf = conf
        self.logger = logger
        self.devices = devices
        self.max_files_per_second = float(conf.get('files_per_second', 20))
        self.max_bytes_per_second = float(conf.get('bytes_per_second',
                                                   10000000))
        try:
            # ideally unless ops overrides the rsync_tempfile_timeout in the
            # auditor section we can base our behavior on whatever they
            # configure for their replicator
            replicator_config = readconf(self.conf['__file__'],
                                         'object-replicator')
        except (KeyError, ValueError, IOError):
            # if we can't parse the real config (generally a KeyError on
            # __file__, or ValueError on no object-replicator section, or
            # IOError if reading the file failed) we use
            # a very conservative default for rsync_timeout
            default_rsync_timeout = 86400
        else:
            replicator_rsync_timeout = int(replicator_config.get(
                'rsync_timeout', replicator.DEFAULT_RSYNC_TIMEOUT))
            # Here we can do some light math for ops and use the *replicator's*
            # rsync_timeout (plus 15 mins to avoid deleting local tempfiles
            # before the remote replicator kills it's rsync)
            default_rsync_timeout = replicator_rsync_timeout + 900
            # there's not really a good reason to assume the replicator
            # section's reclaim_age is more appropriate than the reconstructor
            # reclaim_age - but we're already parsing the config so we can set
            # the default value in our config if it's not already set
            if 'reclaim_age' in replicator_config:
                conf.setdefault('reclaim_age',
                                replicator_config['reclaim_age'])
        self.rsync_tempfile_timeout = config_auto_int_value(
            self.conf.get('rsync_tempfile_timeout'), default_rsync_timeout)
        self.diskfile_router = diskfile.DiskFileRouter(conf, self.logger)

        self.auditor_type = 'ALL'
        self.zero_byte_only_at_fps = zero_byte_only_at_fps
        if self.zero_byte_only_at_fps:
            self.max_files_per_second = float(self.zero_byte_only_at_fps)
            self.auditor_type = 'ZBF'
        self.log_time = int(conf.get('log_time', 3600))
        self.last_logged = 0
        self.files_running_time = 0
        self.bytes_running_time = 0
        self.bytes_processed = 0
        self.total_bytes_processed = 0
        self.total_files_processed = 0
        self.passes = 0
        self.quarantines = 0
        self.errors = 0
        self.rcache = rcache
        self.stats_sizes = sorted(
            [int(s) for s in list_from_csv(conf.get('object_size_stats'))])
        self.stats_buckets = dict(
            [(s, 0) for s in self.stats_sizes + ['OVER']])
Exemple #31
0
def build_port_to_conf(server):
    # map server to config by port
    port_to_config = {}
    for server_ in Manager([server]):
        for config_path in server_.conf_files():
            conf = readconf(config_path,
                            section_name='%s-replicator' % server_.type)
            port_to_config[int(conf['bind_port'])] = conf
    return port_to_config
 def setUp(self):
     super(TestDbRsyncReplicator, self).setUp()
     cont_configs = [
         utils.readconf(p, 'container-replicator')
         for p in self.configs['container-replicator'].values()
     ]
     # Do more than per_diff object PUTs, to force rsync instead of usync
     self.object_puts = 1 + max(
         int(c.get('per_diff', '1000')) for c in cont_configs)
Exemple #33
0
def build_port_to_conf(server):
    # map server to config by port
    port_to_config = {}
    for server_ in Manager([server]):
        for config_path in server_.conf_files():
            conf = readconf(config_path,
                            section_name='%s-replicator' % server_.type)
            port_to_config[int(conf['bind_port'])] = conf
    return port_to_config
Exemple #34
0
    def setUp(self):
        resetswift()
        kill_orphans()
        self._load_rings_and_configs()
        try:
            self.servers_per_port = any(
                int(readconf(c, section_name='object-replicator').get(
                    'servers_per_port', '0'))
                for c in self.configs['object-replicator'].values())

            Manager(['main']).start(wait=True)
            for ipport in self.ipport2server:
                check_server(ipport, self.ipport2server)
            proxy_conf = readconf(self.configs['proxy-server'],
                                  section_name='app:proxy-server')
            proxy_ipport = (proxy_conf.get('bind_ip', '127.0.0.1'),
                            int(proxy_conf.get('bind_port', 8080)))
            self.ipport2server[proxy_ipport] = 'proxy'
            self.url, self.token, self.account = check_server(
                proxy_ipport, self.ipport2server)
            self.account_1 = {
                'url': self.url, 'token': self.token, 'account': self.account}

            rv = _retry_timeout(_check_proxy, args=(
                proxy_ipport, 'test2:tester2', 'testing2'))
            self.account_2 = {
                k: v for (k, v) in zip(('url', 'token', 'account'), rv)}

            self.replicators = Manager(
                ['account-replicator', 'container-replicator',
                 'object-replicator'])
            self.updaters = Manager(['container-updater', 'object-updater'])
        except BaseException:
            try:
                raise
            finally:
                try:
                    Manager(['all']).kill()
                except Exception:
                    pass
        info_url = "%s://%s/info" % (urlparse(self.url).scheme,
                                     urlparse(self.url).netloc)
        proxy_conn = client.http_connection(info_url)
        self.cluster_info = client.get_capabilities(proxy_conn)
Exemple #35
0
    def test_readconf(self):
        conf = '''[section1]
foo = bar

[section2]
log_name = yarr'''
        # setup a real file
        with open('/tmp/test', 'wb') as f:
            f.write(conf)
        make_filename = lambda: '/tmp/test'
        # setup a file stream
        make_fp = lambda: StringIO(conf)
        for conf_object_maker in (make_filename, make_fp):
            conffile = conf_object_maker()
            result = utils.readconf(conffile)
            expected = {'__file__': conffile,
                        'log_name': None,
                        'section1': {'foo': 'bar'},
                        'section2': {'log_name': 'yarr'}}
            self.assertEquals(result, expected)
            conffile = conf_object_maker()
            result = utils.readconf(conffile, 'section1')
            expected = {'__file__': conffile, 'log_name': 'section1',
                        'foo': 'bar'}
            self.assertEquals(result, expected)
            conffile = conf_object_maker()
            result = utils.readconf(conffile,
                                    'section2').get('log_name')
            expected = 'yarr'
            self.assertEquals(result, expected)
            conffile = conf_object_maker()
            result = utils.readconf(conffile, 'section1',
                                    log_name='foo').get('log_name')
            expected = 'foo'
            self.assertEquals(result, expected)
            conffile = conf_object_maker()
            result = utils.readconf(conffile, 'section1',
                                    defaults={'bar': 'baz'})
            expected = {'__file__': conffile, 'log_name': 'section1',
                        'foo': 'bar', 'bar': 'baz'}
            self.assertEquals(result, expected)
        self.assertRaises(SystemExit, utils.readconf, '/tmp/test', 'section3')
        os.unlink('/tmp/test')
        self.assertRaises(SystemExit, utils.readconf, '/tmp/test')
Exemple #36
0
    def test_readconf(self):
        conf = '''[section1]
foo = bar

[section2]
log_name = yarr'''
        # setup a real file
        with open('/tmp/test', 'wb') as f:
            f.write(conf)
        make_filename = lambda: '/tmp/test'
        # setup a file stream
        make_fp = lambda: StringIO(conf)
        for conf_object_maker in (make_filename, make_fp):
            conffile = conf_object_maker()
            result = utils.readconf(conffile)
            expected = {'__file__': conffile,
                        'log_name': None,
                        'section1': {'foo': 'bar'},
                        'section2': {'log_name': 'yarr'}}
            self.assertEquals(result, expected)
            conffile = conf_object_maker()
            result = utils.readconf(conffile, 'section1')
            expected = {'__file__': conffile, 'log_name': 'section1',
                        'foo': 'bar'}
            self.assertEquals(result, expected)
            conffile = conf_object_maker()
            result = utils.readconf(conffile,
                                    'section2').get('log_name')
            expected = 'yarr'
            self.assertEquals(result, expected)
            conffile = conf_object_maker()
            result = utils.readconf(conffile, 'section1',
                                    log_name='foo').get('log_name')
            expected = 'foo'
            self.assertEquals(result, expected)
            conffile = conf_object_maker()
            result = utils.readconf(conffile, 'section1',
                                    defaults={'bar': 'baz'})
            expected = {'__file__': conffile, 'log_name': 'section1',
                        'foo': 'bar', 'bar': 'baz'}
            self.assertEquals(result, expected)
        self.assertRaises(SystemExit, utils.readconf, '/tmp/test', 'section3')
        os.unlink('/tmp/test')
        self.assertRaises(SystemExit, utils.readconf, '/tmp/test')
Exemple #37
0
def get_ring(ring_name, required_replicas, required_devices,
             server=None, force_validate=None):
    if not server:
        server = ring_name
    ring = Ring('/etc/swift', ring_name=ring_name)
    if not VALIDATE_RSYNC and not force_validate:
        return ring
    # easy sanity checks
    if ring.replica_count != required_replicas:
        raise SkipTest('%s has %s replicas instead of %s' % (
            ring.serialized_path, ring.replica_count, required_replicas))
    if len(ring.devs) != required_devices:
        raise SkipTest('%s has %s devices instead of %s' % (
            ring.serialized_path, len(ring.devs), required_devices))
    # map server to config by port
    port_to_config = {}
    for server_ in Manager([server]):
        for config_path in server_.conf_files():
            conf = readconf(config_path,
                            section_name='%s-replicator' % server_.type)
            port_to_config[int(conf['bind_port'])] = conf
    for dev in ring.devs:
        # verify server is exposing mounted device
        conf = port_to_config[dev['port']]
        for device in os.listdir(conf['devices']):
            if device == dev['device']:
                dev_path = os.path.join(conf['devices'], device)
                full_path = os.path.realpath(dev_path)
                if not os.path.exists(full_path):
                    raise SkipTest(
                        'device %s in %s was not found (%s)' %
                        (device, conf['devices'], full_path))
                break
        else:
            raise SkipTest(
                "unable to find ring device %s under %s's devices (%s)" % (
                    dev['device'], server, conf['devices']))
        # verify server is exposing rsync device
        if port_to_config[dev['port']].get('vm_test_mode', False):
            rsync_export = '%s%s' % (server, dev['replication_port'])
        else:
            rsync_export = server
        cmd = "rsync rsync://localhost/%s" % rsync_export
        p = Popen(cmd, shell=True, stdout=PIPE)
        stdout, _stderr = p.communicate()
        if p.returncode:
            raise SkipTest('unable to connect to rsync '
                           'export %s (%s)' % (rsync_export, cmd))
        for line in stdout.splitlines():
            if line.rsplit(None, 1)[-1] == dev['device']:
                break
        else:
            raise SkipTest("unable to find ring device %s under rsync's "
                           "exported devices for %s (%s)" %
                           (dev['device'], rsync_export, cmd))
    return ring
Exemple #38
0
def main(cli_args=None):
    parser = _make_parser()
    args = parser.parse_args(cli_args)
    if not args.subcommand:
        # On py2, subparsers are required; on py3 they are not; see
        # https://bugs.python.org/issue9253. py37 added a `required` kwarg
        # to let you control it, but prior to that, there was no choice in
        # the matter. So, check whether the destination was set and bomb
        # out if not.
        parser.print_help()
        print('\nA sub-command is required.', file=sys.stderr)
        return EXIT_INVALID_ARGS

    try:
        conf = {}
        if args.conf_file:
            conf = readconf(args.conf_file, 'container-sharder')
        conf.update(
            dict((k, v) for k, v in vars(args).items()
                 if v != USE_SHARDER_DEFAULT))
        conf_args = ContainerSharderConf(conf)
    except (OSError, IOError) as exc:
        print('Error opening config file %s: %s' % (args.conf_file, exc),
              file=sys.stderr)
        return EXIT_ERROR
    except (TypeError, ValueError) as exc:
        print('Error loading config: %s' % exc, file=sys.stderr)
        return EXIT_INVALID_ARGS

    for k, v in vars(args).items():
        # set any un-set cli args from conf_args
        if v is USE_SHARDER_DEFAULT:
            setattr(args, k, getattr(conf_args, k))

    try:
        ContainerSharderConf.validate_conf(args)
    except ValueError as err:
        print('Invalid config: %s' % err, file=sys.stderr)
        return EXIT_INVALID_ARGS

    if args.func in (analyze_shard_ranges, ):
        args.input = args.path_to_file
        return args.func(args) or 0

    logger = get_logger({}, name='ContainerBroker', log_to_console=True)
    broker = ContainerBroker(os.path.realpath(args.path_to_file),
                             logger=logger,
                             skip_commits=not args.force_commits)
    try:
        broker.get_info()
    except Exception as exc:
        print('Error opening container DB %s: %s' % (args.path_to_file, exc),
              file=sys.stderr)
        return EXIT_ERROR
    print('Loaded db broker for %s' % broker.path, file=sys.stderr)
    return args.func(broker, args)
Exemple #39
0
def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
    """
    Loads settings from conf, then instantiates daemon "klass" and runs the
    daemon with the specified once kwarg.  The section_name will be derived
    from the daemon "klass" if not provided (e.g. ObjectReplicator =>
    object-replicator).

    :param klass: Class to instantiate, subclass of common.daemon.Daemon
    :param conf_file: Path to configuration file
    :param section_name: Section name from conf file to load config from
    :param once: Passed to daemon run method
    """
    # very often the config section_name is based on the class name
    # the None singleton will be passed through to readconf as is
    if section_name is '':
        section_name = sub(r'([a-z])([A-Z])', r'\1-\2',
                           klass.__name__).lower()
    conf = utils.readconf(conf_file, section_name,
                          log_name=kwargs.get('log_name'))

    # once on command line (i.e. daemonize=false) will over-ride config
    once = once or not utils.config_true_value(conf.get('daemonize', 'true'))

    # pre-configure logger
    if 'logger' in kwargs:
        logger = kwargs.pop('logger')
    else:
        logger = utils.get_logger(conf, conf.get('log_name', section_name),
                                  log_to_console=kwargs.pop('verbose', False),
                                  log_route=section_name)

    # optional nice/ionice priority scheduling
    utils.modify_priority(conf, logger)

    # disable fallocate if desired
    if utils.config_true_value(conf.get('disable_fallocate', 'no')):
        utils.disable_fallocate()
    # set utils.FALLOCATE_RESERVE if desired
    utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
        utils.config_fallocate_value(conf.get('fallocate_reserve', '1%'))

    # By default, disable eventlet printing stacktraces
    eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no'))
    eventlet.debug.hub_exceptions(eventlet_debug)

    # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on
    # some platforms. This locks in reported times to the timezone in which
    # the server first starts running in locations that periodically change
    # timezones.
    os.environ['TZ'] = time.strftime("%z", time.gmtime())

    try:
        klass(conf).run(once=once, **kwargs)
    except KeyboardInterrupt:
        logger.info('User quit')
    logger.info('Exited')
def _sync_methods(object_server_config_paths):
    """
    Get the set of all configured sync_methods for the object-replicator
    sections in the list of config paths.
    """
    sync_methods = set()
    for config_path in object_server_config_paths:
        options = utils.readconf(config_path, 'object-replicator')
        sync_methods.add(options.get('sync_method', 'rsync'))
    return sync_methods
Exemple #41
0
def run_server():
    usage = '''
    %prog start|stop|restart [--conf=/path/to/some.conf] [--foreground|-f]
    '''
    args = optparse.OptionParser(usage)
    args.add_option('--foreground',
                    '-f',
                    action="store_true",
                    help="Run in foreground, in debug mode")
    args.add_option('--conf',
                    default="/etc/swift/ring-master.conf",
                    help="path to config. default /etc/swift/ring-master.conf")
    args.add_option('--pid',
                    default="/var/run/swift-ring-master-wsgi.pid",
                    help="default: /var/run/swift-ring-master-wsgi.pid")
    options, arguments = args.parse_args()

    if len(sys.argv) <= 1:
        args.print_help()

    if options.foreground:
        conf = readconf(options.conf)
        rma = RingMasterApp(conf['ringmaster_wsgi'])
        rma.start()
        sys.exit(0)

    if len(sys.argv) >= 2:
        conf = readconf(options.conf)
        user = conf['ringmaster_wsgi'].get('user', 'swift')
        daemon = RingMasterAppd(options.pid, user=user)
        if 'start' == sys.argv[1]:
            daemon.start(conf['ringmaster_wsgi'])
        elif 'stop' == sys.argv[1]:
            daemon.stop()
        elif 'restart' == sys.argv[1]:
            daemon.restart(conf['ringmaster_wsgi'])
        else:
            args.print_help()
            sys.exit(2)
        sys.exit(0)
    else:
        args.print_help()
        sys.exit(2)
def run_server():
    usage = '''
    %prog start|stop|restart [--conf=/path/to/some.conf] [--foreground|-f]
    '''
    args = optparse.OptionParser(usage)
    args.add_option('--foreground', '-f', action="store_true",
                    help="Run in foreground, in debug mode")
    args.add_option('--once', '-o', action="store_true", help="Run once")
    args.add_option('--conf', default="/etc/swift/ring-minion.conf",
                    help="path to config. default /etc/swift/ring-minion.conf")
    args.add_option('--pid', default='/var/run/swift/ring-minion-server.pid',
                    help="default: /var/run/swift-ring-minion-server.pid")
    options, arguments = args.parse_args()

    if len(sys.argv) <= 1:
        args.print_help()

    if options.foreground:
        conf = readconf(options.conf)
        minion = RingMinion(conf['minion'])
        if options.once:
            minion.once()
        else:
            minion.watch_loop()
        sys.exit(0)

    if len(sys.argv) >= 2:
        conf = readconf(options.conf)
        user = conf['minion'].get('user', 'swift')
        daemon = RingMiniond(options.pid, user=user)
        if 'start' == sys.argv[1]:
            daemon.start(conf['minion'])
        elif 'stop' == sys.argv[1]:
            daemon.stop()
        elif 'restart' == sys.argv[1]:
            daemon.restart(conf['minion'])
        else:
            args.print_help()
            sys.exit(2)
        sys.exit(0)
    else:
        args.print_help()
        sys.exit(2)
def _sync_methods(object_server_config_paths):
    """
    Get the set of all configured sync_methods for the object-replicator
    sections in the list of config paths.
    """
    sync_methods = set()
    for config_path in object_server_config_paths:
        options = utils.readconf(config_path, 'object-replicator')
        sync_methods.add(options.get('sync_method', 'rsync'))
    return sync_methods
Exemple #44
0
    def setUp(self):
        p = Popen("resetswift 2>&1", shell=True, stdout=PIPE)
        stdout, _stderr = p.communicate()
        print stdout
        Manager(['all']).stop()
        self.pids = {}
        try:
            self.ipport2server = {}
            self.configs = defaultdict(dict)
            self.account_ring = get_ring('account',
                                         self.acct_cont_required_replicas,
                                         self.acct_cont_required_devices,
                                         ipport2server=self.ipport2server,
                                         config_paths=self.configs)
            self.container_ring = get_ring('container',
                                           self.acct_cont_required_replicas,
                                           self.acct_cont_required_devices,
                                           ipport2server=self.ipport2server,
                                           config_paths=self.configs)
            self.policy = get_policy(**self.policy_requirements)
            self.object_ring = get_ring(self.policy.ring_name,
                                        self.obj_required_replicas,
                                        self.obj_required_devices,
                                        server='object',
                                        ipport2server=self.ipport2server,
                                        config_paths=self.configs)

            self.servers_per_port = any(
                int(
                    readconf(c, section_name='object-replicator').get(
                        'servers_per_port', '0'))
                for c in self.configs['object-replicator'].values())

            Manager(['main']).start(wait=False)
            for ipport in self.ipport2server:
                check_server(ipport, self.ipport2server, self.pids)
            proxy_ipport = ('127.0.0.1', 8080)
            self.ipport2server[proxy_ipport] = 'proxy'
            self.url, self.token, self.account = check_server(
                proxy_ipport, self.ipport2server, self.pids)
            self.replicators = Manager([
                'account-replicator', 'container-replicator',
                'object-replicator'
            ])
            self.updaters = Manager(['container-updater', 'object-updater'])
        except BaseException:
            try:
                raise
            finally:
                try:
                    Manager(['all']).kill()
                except Exception:
                    pass
Exemple #45
0
 def _translate_conf(cls, conf):
     origin_conf = conf['sos_conf']
     conf = readconf(origin_conf, raw=True)
     xconf = conf['sos']
     for format_section in [
             'outgoing_url_format', 'outgoing_url_format_head',
             'outgoing_url_format_get', 'outgoing_url_format_get_xml',
             'outgoing_url_format_get_json', 'incoming_url_regex'
     ]:
         if conf.get(format_section, None):
             xconf[format_section] = conf[format_section]
     return xconf
Exemple #46
0
 def run_custom_daemon(self, klass, conf_section, conf_index,
                       custom_conf, **kwargs):
     conf_file = self.configs[conf_section][conf_index]
     conf = utils.readconf(conf_file, conf_section)
     conf.update(custom_conf)
     # Use a CaptureLogAdapter in order to preserve the pattern of tests
     # calling the log accessor methods (e.g. get_lines_for_level) directly
     # on the logger instance
     with capture_logger(conf, conf.get('log_name', conf_section),
                         log_to_console=kwargs.pop('verbose', False),
                         log_route=conf_section) as log_adapter:
         daemon = klass(conf, log_adapter)
         daemon.run_once(**kwargs)
     return daemon
Exemple #47
0
def get_ring(server, force_validate=None):
    ring = Ring('/etc/swift/%s.ring.gz' % server)
    if not VALIDATE_RSYNC and not force_validate:
        return ring
    # easy sanity checks
    assert 3 == ring.replica_count, '%s has %s replicas instead of 3' % (
        ring.serialized_path, ring.replica_count)
    assert 4 == len(
        ring.devs), '%s has %s devices instead of 4' % (ring.serialized_path,
                                                        len(ring.devs))
    # map server to config by port
    port_to_config = {}
    for node_id in range(1, 5):
        conf = readconf('/etc/swift/%s-server/%d.conf' % (server, node_id),
                        section_name='%s-replicator' % server)
        port_to_config[int(conf['bind_port'])] = conf
    for dev in ring.devs:
        # verify server is exposing mounted device
        conf = port_to_config[dev['port']]
        for device in os.listdir(conf['devices']):
            if device == dev['device']:
                dev_path = path.join(conf['devices'], device)
                full_path = path.realpath(dev_path)
                assert path.exists(full_path), \
                    'device %s in %s was not found (%s)' % (
                        device, conf['devices'], full_path)
                break
        else:
            raise AssertionError(
                "unable to find ring device %s under %s's devices (%s)" %
                (dev['device'], server, conf['devices']))
        # verify server is exposing rsync device
        if port_to_config[dev['port']].get('vm_test_mode', False):
            rsync_export = '%s%s' % (server, dev['replication_port'])
        else:
            rsync_export = server
        cmd = "rsync rsync://localhost/%s" % rsync_export
        p = Popen(cmd, shell=True, stdout=PIPE)
        stdout, _stderr = p.communicate()
        if p.returncode:
            raise AssertionError('unable to connect to rsync '
                                 'export %s (%s)' % (rsync_export, cmd))
        for line in stdout.splitlines():
            if line.rsplit(None, 1)[-1] == dev['device']:
                break
        else:
            raise AssertionError("unable to find ring device %s under rsync's "
                                 "exported devices for %s (%s)" %
                                 (dev['device'], rsync_export, cmd))
    return ring
Exemple #48
0
    def get_container_db_files(self, container):
        opart, onodes = self.container_ring.get_nodes(self.account, container)
        db_files = []
        for onode in onodes:
            node_id = self.config_number(onode)
            device = onode['device']
            hash_str = hash_path(self.account, container)
            server_conf = readconf(self.configs['container-server'][node_id])
            devices = server_conf['app:container-server']['devices']
            obj_dir = '%s/%s/containers/%s/%s/%s/' % (devices, device, opart,
                                                      hash_str[-3:], hash_str)
            db_files.append(_get_db_file_path(obj_dir))

        return db_files
    def _get_container_db_files(self, container):
        opart, onodes = self.container_ring.get_nodes(self.account, container)
        onode = onodes[0]
        db_files = []
        for onode in onodes:
            node_id = (onode['port'] - 6000) / 10
            device = onode['device']
            hash_str = hash_path(self.account, container)
            server_conf = readconf(self.configs['container'] % node_id)
            devices = server_conf['app:container-server']['devices']
            obj_dir = '%s/%s/containers/%s/%s/%s/' % (devices, device, opart,
                                                      hash_str[-3:], hash_str)
            db_files.append(get_db_file_path(obj_dir))

        return db_files
Exemple #50
0
    def get_user_root_secret_by_id(self, account, user_token, key_id):
        """
        Retrieve the user's root encryption secret with the specified ID from
        an external key management system using Castellan.

        :param account: the name of the account
        :type account: string

        :param user_token: the keystone token of the user from the request
        :type user_token: string

        :param key_id: the ID of the user's root encryption secret to retrieve

        :return: the binary bytes of the user's root encryption secret with the
                 specified ID
        :rtype: bytearray
        """
        user_root_secrets = self._user_root_secrets.get(account)
        if user_root_secrets is None:
            user_root_secrets = dict()
        else:
            encoded_key = user_root_secrets.get(key_id)
            if ROOT_SECRET_CACHE_TIME > 0:
                if encoded_key is not None:
                    return encoded_key
        conf = self.conf
        if self.keymaster_config_path is not None:
            if any(opt in conf for opt in ('key_id', )):
                raise ValueError('keymaster_config_path is set, but there '
                                 'are other config options specified!')
            conf = readconf(self.keymaster_config_path, 'rotating_keymaster')
        user_ctxt = keystone_token.KeystoneToken(token=user_token)
        oslo_conf = cfg.ConfigOpts()
        options.set_defaults(oslo_conf,
                             auth_endpoint=conf.get('auth_endpoint'),
                             api_class=conf.get('api_class'))
        options.enable_logging()
        manager = key_manager.API(oslo_conf)
        # Get the latest key from Barbican. If no keymanager class has been
        # specified (using 'api_class'), or the keymaster does not have a
        # 'get_latest_key()' method, an exception will be raised.
        key = manager.get(user_ctxt, key_id)
        if key is None:
            raise ValueError("Could not find user '%s' with key_id '%s'" %
                             (account, key_id))
        user_root_secrets[key_id] = key.get_encoded()
        self._user_root_secrets[account] = user_root_secrets
        return key.get_encoded()
Exemple #51
0
    def setUp(self):
        """
        Reset all environment and start all servers.
        """
        super(TestDarkDataDeletion, self).setUp()

        self.conf_dest = \
            os.path.join('/tmp/',
                         datetime.now().strftime('swift-%Y-%m-%d_%H-%M-%S-%f'))
        os.mkdir(self.conf_dest)

        object_server_dir = os.path.join(self.conf_dest, 'object-server')
        os.mkdir(object_server_dir)

        for conf_file in Server('object-auditor').conf_files():
            config = readconf(conf_file)
            if 'object-auditor' not in config:
                continue  # *somebody* should be set up to run the auditor
            config['object-auditor'].update({'watchers': 'swift#dark_data'})
            # Note that this setdefault business may mean the watcher doesn't
            # pick up DEFAULT values, but that (probably?) won't matter.
            # We set grace_age to 0 so that tests don't have to deal with time.
            config.setdefault(CONF_SECTION, {}).update({
                'action': self.action,
                'grace_age': "0"
            })

            parser = ConfigParser()
            for section in ('object-auditor', CONF_SECTION):
                parser.add_section(section)
                for option, value in config[section].items():
                    parser.set(section, option, value)

            file_name = os.path.basename(conf_file)
            if file_name.endswith('.d'):
                # Work around conf.d setups (like you might see with VSAIO)
                file_name = file_name[:-2]
            with open(os.path.join(object_server_dir, file_name), 'w') as fp:
                parser.write(fp)

        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url,
                                   self.token,
                                   self.container_name,
                                   self.object_name,
                                   'object',
                                   policy=self.policy)
Exemple #52
0
def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
    """
    Loads settings from conf, then instantiates daemon "klass" and runs the
    daemon with the specified once kwarg.  The section_name will be derived
    from the daemon "klass" if not provided (e.g. ObjectReplicator =>
    object-replicator).

    :param klass: Class to instantiate, subclass of common.daemon.Daemon
    :param conf_file: Path to configuration file
    :param section_name: Section name from conf file to load config from
    :param once: Passed to daemon run method
    """
    # very often the config section_name is based on the class name
    # the None singleton will be passed through to readconf as is
    if section_name is '':
        section_name = sub(r'([a-z])([A-Z])', r'\1-\2',
                           klass.__name__).lower()
    conf = utils.readconf(conf_file, section_name,
                          log_name=kwargs.get('log_name'))

    # once on command line (i.e. daemonize=false) will over-ride config
    once = once or not utils.config_true_value(conf.get('daemonize', 'true'))

    # pre-configure logger
    if 'logger' in kwargs:
        logger = kwargs.pop('logger')
    else:
        logger = utils.get_logger(conf, conf.get('log_name', section_name),
                                  log_to_console=kwargs.pop('verbose', False),
                                  log_route=section_name)

    # disable fallocate if desired
    if utils.config_true_value(conf.get('disable_fallocate', 'no')):
        utils.disable_fallocate()
    # set utils.FALLOCATE_RESERVE if desired
    reserve = int(conf.get('fallocate_reserve', 0))
    if reserve > 0:
        utils.FALLOCATE_RESERVE = reserve

    # By default, disable eventlet printing stacktraces
    eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no'))
    eventlet.debug.hub_exceptions(eventlet_debug)

    try:
        klass(conf).run(once=once, **kwargs)
    except KeyboardInterrupt:
        logger.info('User quit')
    logger.info('Exited')
Exemple #53
0
    def __init__(self, app, conf, *args, **kwargs):
        self.logger = swift_utils.get_logger(conf, log_route='swearch')

        self.app = app
        # path used to activate search. ex: /[path]/AUTH_12345
        self.path = conf.get('path', 'search')

        config_file = conf.get('config_file', '/etc/swift/swearch.conf')
        config = swift_utils.readconf(config_file)

        index_config = config.get('index', {})
        self.search_index_name = index_config.get('search_index_name',
                                                  'os_default')

        hosts_str = index_config.get('elastic_hosts', '127.0.0.1:9200')
        self.elastic_hosts = [x.strip() for x in hosts_str.split(',')]
Exemple #54
0
 def _load_keymaster_config_file(self, conf):
     # Keymaster options specified in the filter section would be ignored if
     # a separate keymaster config file is specified. To avoid confusion,
     # prohibit them existing in the filter section.
     bad_opts = []
     for opt in conf:
         for km_opt in self.keymaster_opts:
             if ((km_opt.endswith('*') and opt.startswith(km_opt[:-1]))
                     or opt == km_opt):
                 bad_opts.append(opt)
     if bad_opts:
         raise ValueError('keymaster_config_path is set, but there '
                          'are other config options specified: %s' %
                          ", ".join(bad_opts))
     return readconf(self.keymaster_config_path,
                     self.keymaster_conf_section)
 def _setup_data_file(self, container, obj, data):
     client.put_container(self.url, self.token, container)
     client.put_object(self.url, self.token, container, obj, data)
     odata = client.get_object(self.url, self.token, container, obj)[-1]
     self.assertEquals(odata, data)
     opart, onodes = self.object_ring.get_nodes(self.account, container,
                                                obj)
     onode = onodes[0]
     node_id = (onode['port'] - 6000) / 10
     device = onode['device']
     hash_str = hash_path(self.account, container, obj)
     obj_server_conf = readconf(self.configs['object'] % node_id)
     devices = obj_server_conf['app:object-server']['devices']
     obj_dir = '%s/%s/objects/%s/%s/%s/' % (devices, device, opart,
                                            hash_str[-3:], hash_str)
     data_file = get_data_file_path(obj_dir)
     return onode, opart, data_file
    def test_delayed_reap(self):
        # define reapers which are supposed to operate 3 seconds later
        account_reapers = []
        for conf_file in self.configs['account-server'].values():
            conf = utils.readconf(conf_file, 'account-reaper')
            conf['delay_reaping'] = '3'
            account_reapers.append(reaper.AccountReaper(conf))

        self.assertTrue(account_reapers)

        # run reaper, and make sure that nothing is reaped
        for account_reaper in account_reapers:
            account_reaper.run_once()

        for policy, container, obj in self.all_objects:
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException:
                    self.fail(
                        "Nothing should be reaped. Container should exist")

            part, nodes = policy.object_ring.get_nodes(self.account, container,
                                                       obj)
            headers = {'X-Backend-Storage-Policy-Index': int(policy)}
            for node in nodes:
                try:
                    direct_get_object(node,
                                      part,
                                      self.account,
                                      container,
                                      obj,
                                      headers=headers)
                except ClientException:
                    self.fail("Nothing should be reaped. Object should exist")

        # wait 3 seconds, run reaper, and make sure that all is reaped
        sleep(3)
        for account_reaper in account_reapers:
            account_reaper.run_once()

        self._verify_account_reaped()
Exemple #57
0
    def get_latest_user_root_secret_and_id(self, account, user_token):
        """
        Retrieve the user's latest root encryption secret from an external key
        management system using Castellan.

        :param account: the name of the account
        :type account: string

        :param user_token: the keystone token of the user from the request
        :type user_token: string

        :return: a tuple containing the binary bytes of the latest encryption
                 root secret, and the id of the latest root encryption secret
        :rtype: (bytearray, string)
        """
        conf = self.conf
        if self.keymaster_config_path is not None:
            if any(opt in conf for opt in ('key_id', )):
                raise ValueError('keymaster_config_path is set, but there '
                                 'are other config options specified!')
            conf = readconf(self.keymaster_config_path, 'rotating_keymaster')
        user_ctxt = keystone_token.KeystoneToken(token=user_token)
        oslo_conf = cfg.ConfigOpts()
        options.set_defaults(oslo_conf,
                             auth_endpoint=conf.get('auth_endpoint'),
                             api_class=conf.get('api_class'))
        options.enable_logging()
        manager = key_manager.API(oslo_conf)
        # Get the latest key from Barbican. If no keymanager class has been
        # specified (using 'api_class'), or the keymaster does not have a
        # 'get_latest_key()' method, an exception will be raised.
        latest_user_root_secret_id, key = manager.get_latest_key(
            user_ctxt, bits=256, algorithm='aes', name='swift_root_secret')
        self.logger.log(
            SECDEL_LOG_LEVEL_DEBUG,
            "ID of latest user root secret is %s" % latest_user_root_secret_id)
        if latest_user_root_secret_id is None or key is None:
            return None, None
        user_root_secrets = self._user_root_secrets.get(account)
        if user_root_secrets is None:
            user_root_secrets = dict()
        user_root_secrets[latest_user_root_secret_id] = key.get_encoded()
        self._user_root_secrets[account] = user_root_secrets
        return key.get_encoded(), latest_user_root_secret_id
Exemple #58
0
def parse_per_policy_config(conf):
    """
    Search the config file for any per-policy config sections and load those
    sections to a dict mapping policy reference (name or index) to policy
    options.

    :param conf: the proxy server conf dict
    :return: a dict mapping policy reference -> dict of policy options
    :raises ValueError: if a policy config section has an invalid name
    """
    policy_config = {}
    all_conf = readconf(conf['__file__'])
    policy_section_prefix = conf['__name__'] + ':policy:'
    for section, options in all_conf.items():
        if not section.startswith(policy_section_prefix):
            continue
        policy_ref = section[len(policy_section_prefix):]
        policy_config[policy_ref] = options
    return policy_config
Exemple #59
0
 def _get_object_info(self, account, container, obj, number):
     obj_conf = self.configs['object-server']
     config_path = obj_conf[number]
     options = utils.readconf(config_path, 'app:object-server')
     swift_dir = options.get('swift_dir', '/etc/swift')
     ring = POLICIES.get_object_ring(int(self.policy), swift_dir)
     part, nodes = ring.get_nodes(account, container, obj)
     for node in nodes:
         # assumes one to one mapping
         if node['port'] == int(options.get('bind_port')):
             device = node['device']
             break
     else:
         return None
     mgr = DiskFileManager(options, get_logger(options))
     disk_file = mgr.get_diskfile(device, part, account, container, obj,
                                  self.policy)
     info = disk_file.read_metadata()
     return info