def test_reload(self): os.utime(self.testgz, (time() - 300, time() - 300)) self.ring = ring.Ring(self.testdir, reload_time=0.001, ring_name='whatever') orig_mtime = self.ring._mtime self.assertEquals(len(self.ring.devs), 5) self.intended_devs.append( {'id': 3, 'region': 0, 'zone': 3, 'weight': 1.0}) ring.RingData( self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift).save(self.testgz) sleep(0.1) self.ring.get_nodes('a') self.assertEquals(len(self.ring.devs), 6) self.assertNotEquals(self.ring._mtime, orig_mtime) os.utime(self.testgz, (time() - 300, time() - 300)) self.ring = ring.Ring(self.testdir, reload_time=0.001, ring_name='whatever') orig_mtime = self.ring._mtime self.assertEquals(len(self.ring.devs), 6) self.intended_devs.append( {'id': 5, 'region': 0, 'zone': 4, 'weight': 1.0}) ring.RingData( self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift).save(self.testgz) sleep(0.1) self.ring.get_part_nodes(0) self.assertEquals(len(self.ring.devs), 7) self.assertNotEquals(self.ring._mtime, orig_mtime) os.utime(self.testgz, (time() - 300, time() - 300)) self.ring = ring.Ring(self.testdir, reload_time=0.001, ring_name='whatever') orig_mtime = self.ring._mtime part, nodes = self.ring.get_nodes('a') self.assertEquals(len(self.ring.devs), 7) self.intended_devs.append( {'id': 6, 'region': 0, 'zone': 5, 'weight': 1.0}) ring.RingData( self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift).save(self.testgz) sleep(0.1) self.ring.get_more_nodes(part).next() self.assertEquals(len(self.ring.devs), 8) self.assertNotEquals(self.ring._mtime, orig_mtime) os.utime(self.testgz, (time() - 300, time() - 300)) self.ring = ring.Ring(self.testdir, reload_time=0.001, ring_name='whatever') orig_mtime = self.ring._mtime self.assertEquals(len(self.ring.devs), 8) self.intended_devs.append( {'id': 5, 'region': 0, 'zone': 4, 'weight': 1.0}) ring.RingData( self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift).save(self.testgz) sleep(0.1) self.assertEquals(len(self.ring.devs), 9) self.assertNotEquals(self.ring._mtime, orig_mtime)
def __init__(self, url, token, container_name='test', object_name='test', server_type='container'): self.url = url self.token = token self.account = utils.split_path(urlparse(url).path, 2, 2)[1] self.container_name = container_name self.object_name = object_name server_list = ['%s-server' % server_type] if server_type else ['all'] self.servers = Manager(server_list) policies = list(POLICIES) random.shuffle(policies) self.policies = itertools.cycle(policies) o = object_name if server_type == 'object' else None c = container_name if server_type in ('object', 'container') else None part, nodes = ring.Ring('/etc/swift/%s.ring.gz' % server_type).get_nodes(self.account, c, o) node_ids = [n['id'] for n in nodes] if all(n_id in node_ids for n_id in (0, 1)): self.primary_numbers = (1, 2) self.handoff_numbers = (3, 4) else: self.primary_numbers = (3, 4) self.handoff_numbers = (1, 2)
def test_print_item_locations_invalid_args(self): # No target specified self.assertRaises(InfoSystemExit, print_item_locations, None) # Need a ring or policy self.assertRaises(InfoSystemExit, print_item_locations, None, account='account', obj='object') # No account specified self.assertRaises(InfoSystemExit, print_item_locations, None, container='con') # No policy named 'xyz' (unrecognized policy) self.assertRaises(InfoSystemExit, print_item_locations, None, obj='object', policy_name='xyz') # No container specified objring = ring.Ring(self.testdir, ring_name='object') self.assertRaises(InfoSystemExit, print_item_locations, objring, account='account', obj='object')
def setUp(self): utils.HASH_PATH_SUFFIX = 'endcap' self.testdir = os.path.join(os.path.dirname(__file__), 'ring') rmtree(self.testdir, ignore_errors=1) os.mkdir(self.testdir) self.testgz = os.path.join(self.testdir, 'whatever.ring.gz') self.intended_replica2part2dev_id = [ array.array('H', [0, 1, 0, 1]), array.array('H', [0, 1, 0, 1]), array.array('H', [3, 4, 3, 4])] self.intended_devs = [{'id': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6000}, {'id': 1, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6000}, None, {'id': 3, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.1', 'port': 6000}, {'id': 4, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.2', 'port': 6000}] self.intended_part_shift = 30 self.intended_reload_time = 15 ring.RingData(self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift).save(self.testgz) self.ring = ring.Ring(self.testdir, reload_time=self.intended_reload_time, ring_name='whatever')
def setUp(self): super(TestRing, self).setUp() self.testdir = mkdtemp() self.testgz = os.path.join(self.testdir, 'whatever.ring.gz') self.intended_replica2part2dev_id = [ array.array('H', [0, 1, 0, 1]), array.array('H', [0, 1, 0, 1]), array.array('H', [3, 4, 3, 4])] self.intended_devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6200, 'replication_ip': '10.1.0.1', 'replication_port': 6066}, {'id': 1, 'region': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6200, 'replication_ip': '10.1.0.2', 'replication_port': 6066}, None, {'id': 3, 'region': 0, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.1', 'port': 6200, 'replication_ip': '10.2.0.1', 'replication_port': 6066}, {'id': 4, 'region': 0, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.2', 'port': 6200, 'replication_ip': '10.2.0.1', 'replication_port': 6066}] self.intended_part_shift = 30 self.intended_reload_time = 15 ring.RingData( self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift).save(self.testgz) self.ring = ring.Ring( self.testdir, reload_time=self.intended_reload_time, ring_name='whatever')
def __init__(self, conf): self.conf = conf self.logger = get_logger(conf, log_route='replicator') self.root = conf.get('devices', '/srv/node') self.mount_check = conf.get('mount_check', 'true').lower() in \ ('true', 't', '1', 'on', 'yes', 'y') self.port = int(conf.get('bind_port', self.default_port)) concurrency = int(conf.get('concurrency', 8)) self.cpool = GreenPool(size=concurrency) swift_dir = conf.get('swift_dir', '/etc/swift') self.ring = ring.Ring(swift_dir, ring_name=self.server_type) self.per_diff = int(conf.get('per_diff', 1000)) self.max_diffs = int(conf.get('max_diffs') or 100) self.interval = int(conf.get('interval') or conf.get('run_pause') or 30) self.vm_test_mode = conf.get( 'vm_test_mode', 'no').lower() in ('yes', 'true', 'on', '1') self.node_timeout = int(conf.get('node_timeout', 10)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7)) swift.common.db.DB_PREALLOCATION = \ conf.get('db_preallocation', 'f').lower() in TRUE_VALUES self._zero_stats() self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.recon_replicator = '%s.recon' % self.server_type self.rcache = os.path.join(self.recon_cache_path, self.recon_replicator)
def _create_test_ring(path): testgz = os.path.join(path, 'object.ring.gz') intended_replica2part2dev_id = [ [0, 1, 2, 3, 4, 5, 6], [1, 2, 3, 0, 5, 6, 4], [2, 3, 0, 1, 6, 4, 5], ] intended_devs = [ {'id': 0, 'device': 'sda', 'zone': 0, 'ip': '127.0.0.0', 'port': 6000}, {'id': 1, 'device': 'sda', 'zone': 1, 'ip': '127.0.0.1', 'port': 6000}, {'id': 2, 'device': 'sda', 'zone': 2, 'ip': '127.0.0.2', 'port': 6000}, {'id': 3, 'device': 'sda', 'zone': 4, 'ip': '127.0.0.3', 'port': 6000}, {'id': 4, 'device': 'sda', 'zone': 5, 'ip': '127.0.0.4', 'port': 6000}, {'id': 5, 'device': 'sda', 'zone': 6, 'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6000}, {'id': 6, 'device': 'sda', 'zone': 7, 'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'port': 6000}, ] intended_part_shift = 30 intended_reload_time = 15 with closing(GzipFile(testgz, 'wb')) as f: pickle.dump( ring.RingData(intended_replica2part2dev_id, intended_devs, intended_part_shift), f) return ring.Ring(path, ring_name='object', reload_time=intended_reload_time)
def __init__(self, conf, logger=None): self.conf = conf self.logger = logger or get_logger(conf, log_route='replicator') self.root = conf.get('devices', '/srv/node') self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.bind_ip = conf.get('bind_ip', '0.0.0.0') self.port = int(conf.get('bind_port', self.default_port)) concurrency = int(conf.get('concurrency', 8)) self.cpool = GreenPool(size=concurrency) swift_dir = conf.get('swift_dir', '/etc/swift') self.ring = ring.Ring(swift_dir, ring_name=self.server_type) self._local_device_ids = set() self.per_diff = int(conf.get('per_diff', 1000)) self.max_diffs = int(conf.get('max_diffs') or 100) self.interval = int( conf.get('interval') or conf.get('run_pause') or 30) self.node_timeout = float(conf.get('node_timeout', 10)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.rsync_compress = config_true_value( conf.get('rsync_compress', 'no')) self.rsync_module = conf.get('rsync_module', '').rstrip('/') if not self.rsync_module: self.rsync_module = '{replication_ip}::%s' % self.server_type self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7)) swift.common.db.DB_PREALLOCATION = \ config_true_value(conf.get('db_preallocation', 'f')) self._zero_stats() self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.recon_replicator = '%s.recon' % self.server_type self.rcache = os.path.join(self.recon_cache_path, self.recon_replicator) self.extract_device_re = re.compile( '%s%s([^%s]+)' % (self.root, os.path.sep, os.path.sep))
def test_print_ring_locations_container(self): out = StringIO() with mock.patch('sys.stdout', out): contring = ring.Ring(self.testdir, ring_name='container') print_ring_locations(contring, 'dir', 'acct', 'con') exp_db = os.path.join('${DEVICE:-/srv/node*}', 'sdb1', 'dir', '1', 'fe6', '63e70955d78dfc62821edc07d6ec1fe6') self.assertTrue(exp_db in out.getvalue())
def test_print_ring_locations_obj(self): out = StringIO() with mock.patch('sys.stdout', out): objring = ring.Ring(self.testdir, ring_name='object') print_ring_locations(objring, 'dir', 'acct', 'con', 'obj') exp_obj = os.path.join('${DEVICE:-/srv/node*}', 'sda1', 'dir', '1', '117', '4a16154fc15c75e26ba6afadf5b1c117') self.assertTrue(exp_obj in out.getvalue())
def test_reload(self): os.utime(self.testgz, (time() - 300, time() - 300)) self.ring = ring.Ring(self.testgz, reload_time=0.001) orig_mtime = self.ring._mtime self.assertEquals(len(self.ring.devs), 3) self.intended_devs.append({'id': 3, 'zone': 3}) pickle.dump( ring.RingData(self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift), GzipFile(self.testgz, 'wb')) sleep(0.1) self.ring.get_nodes('a') self.assertEquals(len(self.ring.devs), 4) self.assertNotEquals(self.ring._mtime, orig_mtime) os.utime(self.testgz, (time() - 300, time() - 300)) self.ring = ring.Ring(self.testgz, reload_time=0.001) orig_mtime = self.ring._mtime self.assertEquals(len(self.ring.devs), 4) self.intended_devs.append({'id': 4, 'zone': 4}) pickle.dump( ring.RingData(self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift), GzipFile(self.testgz, 'wb')) sleep(0.1) self.ring.get_part_nodes(0) self.assertEquals(len(self.ring.devs), 5) self.assertNotEquals(self.ring._mtime, orig_mtime) os.utime(self.testgz, (time() - 300, time() - 300)) self.ring = \ ring.Ring(self.testgz, reload_time=0.001) orig_mtime = self.ring._mtime part, nodes = self.ring.get_nodes('a') self.assertEquals(len(self.ring.devs), 5) self.intended_devs.append({'id': 5, 'zone': 5}) pickle.dump( ring.RingData(self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift), GzipFile(self.testgz, 'wb')) sleep(0.1) self.ring.get_more_nodes(part).next() self.assertEquals(len(self.ring.devs), 6) self.assertNotEquals(self.ring._mtime, orig_mtime)
def test_print_item_locations_ring_policy_mismatch_no_target(self): out = StringIO() with mock.patch('sys.stdout', out): objring = ring.Ring(self.testdir, ring_name='object') # Test mismatch of ring and policy name (valid policy) self.assertRaises(InfoSystemExit, print_item_locations, objring, policy_name='zero') self.assertIn('Warning: mismatch between ring and policy name!', out.getvalue()) self.assertIn('No target specified', out.getvalue())
def test_print_item_locations_invalid_policy_no_target(self): out = StringIO() policy_name = 'nineteen' with mock.patch('sys.stdout', out): objring = ring.Ring(self.testdir, ring_name='object') self.assertRaises(InfoSystemExit, print_item_locations, objring, policy_name=policy_name) exp_msg = 'Warning: Policy %s is not valid' % policy_name self.assertIn(exp_msg, out.getvalue()) self.assertIn('No target specified', out.getvalue())
def test_print_ring_locations_account(self): out = StringIO() with mock.patch('sys.stdout', out): acctring = ring.Ring(self.testdir, ring_name='account') print_ring_locations(acctring, 'dir', 'acct') exp_db = os.path.join('${DEVICE:-/srv/node*}', 'sdb1', 'dir', '3', 'b47', 'dc5be2aa4347a22a0fee6bc7de505b47') self.assertTrue(exp_db in out.getvalue()) self.assertTrue('127.0.0.1' in out.getvalue()) self.assertTrue('127.0.0.2' in out.getvalue())
def test_print_ring_locations_partition_number(self): out = StringIO() with mock.patch('sys.stdout', out): objring = ring.Ring(self.testdir, ring_name='object') print_ring_locations(objring, 'objects', None, tpart='1') exp_obj1 = os.path.join('${DEVICE:-/srv/node*}', 'sda1', 'objects', '1') exp_obj2 = os.path.join('${DEVICE:-/srv/node*}', 'sdb1', 'objects', '1') self.assertTrue(exp_obj1 in out.getvalue()) self.assertTrue(exp_obj2 in out.getvalue())
def __init__(self, conf, logger=None): self.conf = conf self.logger = logger or get_logger(conf, log_route='replicator') self.root = conf.get('devices', '/srv/node') self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.bind_ip = conf.get('bind_ip', '0.0.0.0') self.port = int(conf.get('bind_port', self.default_port)) concurrency = int(conf.get('concurrency', 8)) self.cpool = GreenPool(size=concurrency) swift_dir = conf.get('swift_dir', '/etc/swift') self.ring = ring.Ring(swift_dir, ring_name=self.server_type) self._local_device_ids = set() self.per_diff = int(conf.get('per_diff', 1000)) self.max_diffs = int(conf.get('max_diffs') or 100) self.interval = int( conf.get('interval') or conf.get('run_pause') or 30) if 'run_pause' in conf: if 'interval' in conf: self.logger.warning( 'Option %(type)s-replicator/run_pause is deprecated ' 'and %(type)s-replicator/interval is already configured. ' 'You can safely remove run_pause; it is now ignored and ' 'will be removed in a future version.' % {'type': self.server_type}) else: self.logger.warning( 'Option %(type)s-replicator/run_pause is deprecated ' 'and will be removed in a future version. ' 'Update your configuration to use option ' '%(type)s-replicator/interval.' % {'type': self.server_type}) self.databases_per_second = float(conf.get('databases_per_second', 50)) self.node_timeout = float(conf.get('node_timeout', 10)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.rsync_compress = config_true_value( conf.get('rsync_compress', 'no')) self.rsync_module = conf.get('rsync_module', '').rstrip('/') if not self.rsync_module: self.rsync_module = '{replication_ip}::%s' % self.server_type self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7)) swift.common.db.DB_PREALLOCATION = \ config_true_value(conf.get('db_preallocation', 'f')) swift.common.db.QUERY_LOGGING = \ config_true_value(conf.get('db_query_logging', 'f')) self._zero_stats() self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.recon_replicator = '%s.recon' % self.server_type self.rcache = os.path.join(self.recon_cache_path, self.recon_replicator) self.extract_device_re = re.compile( '%s%s([^%s]+)' % (self.root, os.path.sep, os.path.sep)) self.handoffs_only = config_true_value(conf.get('handoffs_only', 'no'))
def test_reload_old_style_pickled_ring(self): devs = [{'id': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6200}, {'id': 1, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6200}, None, {'id': 3, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.1', 'port': 6200}, {'id': 4, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.2', 'port': 6200}] intended_devs = [{'id': 0, 'region': 1, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6200, 'replication_ip': '10.1.1.1', 'replication_port': 6200}, {'id': 1, 'region': 1, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6200, 'replication_ip': '10.1.1.1', 'replication_port': 6200}, None, {'id': 3, 'region': 1, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.1', 'port': 6200, 'replication_ip': '10.1.2.1', 'replication_port': 6200}, {'id': 4, 'region': 1, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.2', 'port': 6200, 'replication_ip': '10.1.2.2', 'replication_port': 6200}] # simulate an old-style pickled ring testgz = os.path.join(self.testdir, 'without_replication_or_region.ring.gz') ring_data = ring.RingData(self.intended_replica2part2dev_id, devs, self.intended_part_shift) # an old-style pickled ring won't have region data for dev in ring_data.devs: if dev: del dev["region"] gz_file = GzipFile(testgz, 'wb') pickle.dump(ring_data, gz_file, protocol=2) gz_file.close() self.ring = ring.Ring( self.testdir, reload_time=self.intended_reload_time, ring_name='without_replication_or_region') self.assertEqual(self.ring.devs, intended_devs)
def test_print_item_locations_account_container_ring(self): out = StringIO() account = 'account' container = 'container' with mock.patch('sys.stdout', out): container_ring = ring.Ring(self.testdir, ring_name='container') print_item_locations(container_ring, account=account, container=container) exp_acct_msg = 'Account \t%s' % account exp_cont_msg = 'Container\t%s' % container exp_obj_msg = 'Object \tNone' self.assertIn(exp_acct_msg, out.getvalue()) self.assertIn(exp_cont_msg, out.getvalue()) self.assertIn(exp_obj_msg, out.getvalue())
def setUp(self): utils.HASH_PATH_SUFFIX = 'endcap' self.testdir = os.path.join(os.path.dirname(__file__), 'ring') rmtree(self.testdir, ignore_errors=1) os.mkdir(self.testdir) self.testgz = os.path.join(self.testdir, 'ring.gz') self.intended_replica2part2dev_id = [[0, 2, 0, 2], [2, 0, 2, 0]] self.intended_devs = [{'id': 0, 'zone': 0}, None, {'id': 2, 'zone': 2}] self.intended_part_shift = 30 self.intended_reload_time = 15 pickle.dump( ring.RingData(self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift), GzipFile(self.testgz, 'wb')) self.ring = \ ring.Ring(self.testgz, reload_time=self.intended_reload_time)
def test_print_item_locations_account_container_object_dashed_ring(self): out = StringIO() account = 'account' container = 'container' obj = 'object' with mock.patch('sys.stdout', out): object_ring = ring.Ring(self.testdir, ring_name='object-1') print_item_locations(object_ring, ring_name='object-1', account=account, container=container, obj=obj) exp_acct_msg = 'Account \t%s' % account exp_cont_msg = 'Container\t%s' % container exp_obj_msg = 'Object \t%s' % obj self.assertIn(exp_acct_msg, out.getvalue()) self.assertIn(exp_cont_msg, out.getvalue()) self.assertIn(exp_obj_msg, out.getvalue())
def __init__(self, url, token, container_name='test', object_name='test', server_type='container', policy=None): self.url = url self.token = token self.account = utils.split_path(urlparse(url).path, 2, 2)[1] self.container_name = container_name self.object_name = object_name server_list = ['%s-server' % server_type] if server_type else ['all'] self.servers = Manager(server_list) policies = list(ENABLED_POLICIES) random.shuffle(policies) self.policies = itertools.cycle(policies) o = object_name if server_type == 'object' else None c = container_name if server_type in ('object', 'container') else None if server_type in ('container', 'account'): if policy: raise TypeError('Metadata server brains do not ' 'support specific storage policies') self.policy = None self.ring = ring.Ring('/etc/swift/%s.ring.gz' % server_type) elif server_type == 'object': if not policy: raise TypeError('Object BrainSplitters need to ' 'specify the storage policy') self.policy = policy policy.load_ring('/etc/swift') self.ring = policy.object_ring else: raise ValueError('Unkonwn server_type: %r' % server_type) self.server_type = server_type part, nodes = self.ring.get_nodes(self.account, c, o) node_ids = [n['id'] for n in nodes] if all(n_id in node_ids for n_id in (0, 1)): self.primary_numbers = (1, 2) self.handoff_numbers = (3, 4) else: self.primary_numbers = (3, 4) self.handoff_numbers = (1, 2)
def test_print_item_locations_account_with_ring(self): out = StringIO() account = 'account' with mock.patch('sys.stdout', out): account_ring = ring.Ring(self.testdir, ring_name=account) print_item_locations(account_ring, account=account) exp_msg = 'Account \t%s' % account self.assertIn(exp_msg, out.getvalue()) exp_warning = 'Warning: account specified ' + \ 'but ring not named "account"' self.assertIn(exp_warning, out.getvalue()) exp_acct_msg = 'Account \t%s' % account exp_cont_msg = 'Container\tNone' exp_obj_msg = 'Object \tNone' self.assertIn(exp_acct_msg, out.getvalue()) self.assertIn(exp_cont_msg, out.getvalue()) self.assertIn(exp_obj_msg, out.getvalue())
def __init__(self, conf): self.conf = conf self.logger = get_logger(conf) self.root = conf.get('devices', '/srv/node') self.mount_check = conf.get('mount_check', 'true').lower() in \ ('true', 't', '1', 'on', 'yes', 'y') self.port = int(conf.get('bind_port', self.default_port)) concurrency = int(conf.get('concurrency', 8)) self.cpool = GreenPool(size=concurrency) swift_dir = conf.get('swift_dir', '/etc/swift') self.ring = ring.Ring(os.path.join(swift_dir, self.ring_file)) self.per_diff = int(conf.get('per_diff', 1000)) self.run_pause = int(conf.get('run_pause', 30)) self.vm_test_mode = conf.get( 'vm_test_mode', 'no').lower() in ('yes', 'true', 'on', '1') self.node_timeout = int(conf.get('node_timeout', 10)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7)) self._zero_stats()
def __init__(self, url, token, container_name='test', object_name='test'): self.url = url self.token = token self.account = utils.split_path(urlparse(url).path, 2, 2)[1] self.container_name = container_name self.object_name = object_name self.servers = Manager(['container-server']) policies = list(POLICIES) random.shuffle(policies) self.policies = itertools.cycle(policies) container_part, container_nodes = ring.Ring( '/etc/swift/container.ring.gz').get_nodes(self.account, self.container_name) container_node_ids = [n['id'] for n in container_nodes] if all(n_id in container_node_ids for n_id in (0, 1)): self.primary_numbers = (1, 2) self.handoff_numbers = (3, 4) else: self.primary_numbers = (3, 4) self.handoff_numbers = (1, 2)
def _setup(self, account, container_name, object_name, server_type, policy): self.account = account self.container_name = container_name self.object_name = object_name server_list = ['%s-server' % server_type] if server_type else ['all'] self.servers = Manager(server_list) policies = list(ENABLED_POLICIES) random.shuffle(policies) self.policies = itertools.cycle(policies) o = object_name if server_type == 'object' else None c = container_name if server_type in ('object', 'container') else None if server_type in ('container', 'account'): if policy: raise TypeError('Metadata server brains do not ' 'support specific storage policies') self.policy = None self.ring = ring.Ring( '/etc/swift/%s.ring.gz' % server_type) elif server_type == 'object': if not policy: raise TypeError('Object BrainSplitters need to ' 'specify the storage policy') self.policy = policy policy.load_ring('/etc/swift') self.ring = policy.object_ring else: raise ValueError('Unknown server_type: %r' % server_type) self.server_type = server_type self.part, self.nodes = self.ring.get_nodes(self.account, c, o) self.node_numbers = [n['id'] + 1 for n in self.nodes] if 1 in self.node_numbers and 2 in self.node_numbers: self.primary_numbers = (1, 2) self.handoff_numbers = (3, 4) else: self.primary_numbers = (3, 4) self.handoff_numbers = (1, 2)
def test_reload_without_replication(self): replication_less_devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6200}, {'id': 1, 'region': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6200}, None, {'id': 3, 'region': 0, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.1', 'port': 6200}, {'id': 4, 'region': 0, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.2', 'port': 6200}] intended_devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6200, 'replication_ip': '10.1.1.1', 'replication_port': 6200}, {'id': 1, 'region': 0, 'zone': 0, 'weight': 1.0, 'ip': '10.1.1.1', 'port': 6200, 'replication_ip': '10.1.1.1', 'replication_port': 6200}, None, {'id': 3, 'region': 0, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.1', 'port': 6200, 'replication_ip': '10.1.2.1', 'replication_port': 6200}, {'id': 4, 'region': 0, 'zone': 2, 'weight': 1.0, 'ip': '10.1.2.2', 'port': 6200, 'replication_ip': '10.1.2.2', 'replication_port': 6200}] testgz = os.path.join(self.testdir, 'without_replication.ring.gz') ring.RingData( self.intended_replica2part2dev_id, replication_less_devs, self.intended_part_shift).save(testgz) self.ring = ring.Ring( self.testdir, reload_time=self.intended_reload_time, ring_name='without_replication') self.assertEqual(self.ring.devs, intended_devs)
def test_print_obj_curl_command_ipv6(self): # Note: policy 3 has IPv6 addresses in its ring datafile3 = os.path.join( self.testdir, 'sda', 'objects-3', '1', 'ea8', 'db4449e025aca992307c7c804a67eea8', '1402017884.18202.data') utils.mkdirs(os.path.dirname(datafile3)) with open(datafile3, 'wb') as fp: md = {'name': '/AUTH_admin/c/obj', 'Content-Type': 'application/octet-stream', 'ETag': 'd41d8cd98f00b204e9800998ecf8427e', 'Content-Length': 0} write_metadata(fp, md) object_ring = ring.Ring(self.testdir, ring_name='object-3') part, nodes = object_ring.get_nodes('AUTH_admin', 'c', 'obj') node = nodes[0] out = StringIO() hash_dir = os.path.dirname(datafile3) file_name = os.path.basename(datafile3) # Change working directory to object hash dir cwd = os.getcwd() try: os.chdir(hash_dir) with mock.patch('sys.stdout', out): print_obj(file_name, swift_dir=self.testdir) finally: os.chdir(cwd) exp_curl = ( 'curl -g -I -XHEAD ' '"http://[{host}]:{port}' '/{device}/{part}/AUTH_admin/c/obj" ').format( host=node['ip'], port=node['port'], device=node['device'], part=part) self.assertIn(exp_curl, out.getvalue())
def _create_test_ring(path): testgz = os.path.join(path, 'object.ring.gz') intended_replica2part2dev_id = [ [0, 1, 2, 3, 4, 5, 6], [1, 2, 3, 0, 5, 6, 4], [2, 3, 0, 1, 6, 4, 5], ] intended_devs = [ {'id': 0, 'device': 'sda', 'zone': 0, 'ip': '127.0.0.0', 'port': 6000}, {'id': 1, 'device': 'sda', 'zone': 1, 'ip': '127.0.0.1', 'port': 6000}, {'id': 2, 'device': 'sda', 'zone': 2, 'ip': '127.0.0.2', 'port': 6000}, {'id': 3, 'device': 'sda', 'zone': 4, 'ip': '127.0.0.3', 'port': 6000}, {'id': 4, 'device': 'sda', 'zone': 5, 'ip': '127.0.0.4', 'port': 6000}, {'id': 5, 'device': 'sda', 'zone': 6, 'ip': '127.0.0.5', 'port': 6000}, {'id': 6, 'device': 'sda', 'zone': 7, 'ip': '127.0.0.6', 'port': 6000}, ] intended_part_shift = 30 intended_reload_time = 15 pickle.dump(ring.RingData(intended_replica2part2dev_id, intended_devs, intended_part_shift), GzipFile(testgz, 'wb')) return ring.Ring(testgz, reload_time=intended_reload_time)
def test_get_more_nodes(self): # Yes, these tests are deliberately very fragile. We want to make sure # that if someone changes the results the ring produces, they know it. exp_part = 6 exp_devs = [71, 77, 30] exp_zones = set([6, 3, 7]) exp_handoffs = [ 99, 43, 94, 13, 1, 49, 60, 72, 27, 68, 78, 26, 21, 9, 51, 105, 47, 89, 65, 82, 34, 98, 38, 85, 16, 4, 59, 102, 40, 90, 20, 8, 54, 66, 80, 25, 14, 2, 50, 12, 0, 48, 70, 76, 32, 107, 45, 87, 101, 44, 93, 100, 42, 95, 106, 46, 88, 97, 37, 86, 96, 36, 84, 17, 5, 57, 63, 81, 33, 67, 79, 24, 15, 3, 58, 69, 75, 31, 61, 74, 29, 23, 10, 52, 22, 11, 53, 64, 83, 35, 62, 73, 28, 18, 6, 56, 104, 39, 91, 103, 41, 92, 19, 7, 55 ] exp_first_handoffs = [ 23, 64, 105, 102, 67, 17, 99, 65, 69, 97, 15, 17, 24, 98, 66, 65, 69, 18, 104, 105, 16, 107, 100, 15, 14, 19, 102, 105, 63, 104, 99, 12, 107, 99, 16, 105, 71, 15, 15, 63, 63, 99, 21, 68, 20, 64, 96, 21, 98, 19, 68, 99, 15, 69, 62, 100, 96, 102, 17, 62, 13, 61, 102, 105, 22, 16, 21, 18, 21, 100, 20, 16, 21, 106, 66, 106, 16, 99, 16, 22, 62, 60, 99, 69, 18, 23, 104, 98, 106, 61, 21, 23, 23, 16, 67, 71, 101, 16, 64, 66, 70, 15, 102, 63, 19, 98, 18, 106, 101, 100, 62, 63, 98, 18, 13, 97, 23, 22, 100, 13, 14, 67, 96, 14, 105, 97, 71, 64, 96, 22, 65, 66, 98, 19, 105, 98, 97, 21, 15, 69, 100, 98, 106, 65, 66, 97, 62, 22, 68, 63, 61, 67, 67, 20, 105, 106, 105, 18, 71, 100, 17, 62, 60, 13, 103, 99, 101, 96, 97, 16, 60, 21, 14, 20, 12, 60, 69, 104, 65, 65, 17, 16, 67, 13, 64, 15, 16, 68, 96, 21, 104, 66, 96, 105, 58, 105, 103, 21, 96, 60, 16, 96, 21, 71, 16, 99, 101, 63, 62, 103, 18, 102, 60, 17, 19, 106, 97, 14, 99, 68, 102, 13, 70, 103, 21, 22, 19, 61, 103, 23, 104, 65, 62, 68, 16, 65, 15, 102, 102, 71, 99, 63, 67, 19, 23, 15, 69, 107, 14, 13, 64, 13, 105, 15, 98, 69 ] rb = ring.RingBuilder(8, 3, 1) next_dev_id = 0 for zone in range(1, 10): for server in range(1, 5): for device in range(1, 4): rb.add_dev({ 'id': next_dev_id, 'ip': '1.2.%d.%d' % (zone, server), 'port': 1234 + device, 'zone': zone, 'region': 0, 'weight': 1.0, 'device': "d%s" % device }) next_dev_id += 1 rb.rebalance(seed=2) rb.get_ring().save(self.testgz) r = ring.Ring(self.testdir, ring_name='whatever') # every part has the same number of handoffs part_handoff_counts = set() for part in range(r.partition_count): part_handoff_counts.add(len(list(r.get_more_nodes(part)))) self.assertEqual(part_handoff_counts, {105}) # which less the primaries - is every device in the ring self.assertEqual(len(list(rb._iter_devs())) - rb.replicas, 105) part, devs = r.get_nodes('a', 'c', 'o') primary_zones = set([d['zone'] for d in devs]) self.assertEqual(part, exp_part) self.assertEqual([d['id'] for d in devs], exp_devs) self.assertEqual(primary_zones, exp_zones) devs = list(r.get_more_nodes(part)) self.assertEqual(len(devs), len(exp_handoffs)) dev_ids = [d['id'] for d in devs] self.assertEqual(dev_ids, exp_handoffs) # We mark handoffs so code consuming extra nodes can reason about how # far they've gone for i, d in enumerate(devs): self.assertEqual(d['handoff_index'], i) # The first 6 replicas plus the 3 primary nodes should cover all 9 # zones in this test seen_zones = set(primary_zones) seen_zones.update([d['zone'] for d in devs[:6]]) self.assertEqual(seen_zones, set(range(1, 10))) # The first handoff nodes for each partition in the ring devs = [] for part in range(r.partition_count): devs.append(next(r.get_more_nodes(part))['id']) self.assertEqual(devs, exp_first_handoffs) # Add a new device we can handoff to. zone = 5 server = 0 rb.add_dev({ 'id': next_dev_id, 'ip': '1.2.%d.%d' % (zone, server), 'port': 1234, 'zone': zone, 'region': 0, 'weight': 1.0, 'device': 'xd0' }) next_dev_id += 1 rb.pretend_min_part_hours_passed() num_parts_changed, _balance, _removed_dev = rb.rebalance(seed=2) rb.get_ring().save(self.testgz) r = ring.Ring(self.testdir, ring_name='whatever') # so now we expect the device list to be longer by one device part_handoff_counts = set() for part in range(r.partition_count): part_handoff_counts.add(len(list(r.get_more_nodes(part)))) self.assertEqual(part_handoff_counts, {106}) self.assertEqual(len(list(rb._iter_devs())) - rb.replicas, 106) # I don't think there's any special reason this dev goes at this index exp_handoffs.insert(27, rb.devs[-1]['id']) # We would change expectations here, but in this part only the added # device changed at all. part, devs = r.get_nodes('a', 'c', 'o') primary_zones = set([d['zone'] for d in devs]) self.assertEqual(part, exp_part) self.assertEqual([d['id'] for d in devs], exp_devs) self.assertEqual(primary_zones, exp_zones) devs = list(r.get_more_nodes(part)) dev_ids = [d['id'] for d in devs] self.assertEqual(len(dev_ids), len(exp_handoffs)) for index, dev in enumerate(dev_ids): self.assertEqual( dev, exp_handoffs[index], 'handoff differs at position %d\n%s\n%s' % (index, dev_ids[index:], exp_handoffs[index:])) # The handoffs still cover all the non-primary zones first seen_zones = set(primary_zones) seen_zones.update([d['zone'] for d in devs[:6]]) self.assertEqual(seen_zones, set(range(1, 10))) # Change expectations for the rest of the parts devs = [] for part in range(r.partition_count): devs.append(next(r.get_more_nodes(part))['id']) changed_first_handoff = 0 for part in range(r.partition_count): if devs[part] != exp_first_handoffs[part]: changed_first_handoff += 1 exp_first_handoffs[part] = devs[part] self.assertEqual(devs, exp_first_handoffs) self.assertEqual(changed_first_handoff, num_parts_changed) # Remove a device - no need to fluff min_part_hours. rb.remove_dev(0) num_parts_changed, _balance, _removed_dev = rb.rebalance(seed=1) rb.get_ring().save(self.testgz) r = ring.Ring(self.testdir, ring_name='whatever') # so now we expect the device list to be shorter by one device part_handoff_counts = set() for part in range(r.partition_count): part_handoff_counts.add(len(list(r.get_more_nodes(part)))) self.assertEqual(part_handoff_counts, {105}) self.assertEqual(len(list(rb._iter_devs())) - rb.replicas, 105) # Change expectations for our part exp_handoffs.remove(0) first_matches = 0 total_changed = 0 devs = list(d['id'] for d in r.get_more_nodes(exp_part)) for i, part in enumerate(devs): if exp_handoffs[i] != devs[i]: total_changed += 1 exp_handoffs[i] = devs[i] if not total_changed: first_matches += 1 self.assertEqual(devs, exp_handoffs) # the first 21 handoffs were the same across the rebalance self.assertEqual(first_matches, 21) # but as you dig deeper some of the differences show up self.assertEqual(total_changed, 41) # Change expectations for the rest of the parts devs = [] for part in range(r.partition_count): devs.append(next(r.get_more_nodes(part))['id']) changed_first_handoff = 0 for part in range(r.partition_count): if devs[part] != exp_first_handoffs[part]: changed_first_handoff += 1 exp_first_handoffs[part] = devs[part] self.assertEqual(devs, exp_first_handoffs) self.assertEqual(changed_first_handoff, num_parts_changed) # Test part, devs = r.get_nodes('a', 'c', 'o') primary_zones = set([d['zone'] for d in devs]) self.assertEqual(part, exp_part) self.assertEqual([d['id'] for d in devs], exp_devs) self.assertEqual(primary_zones, exp_zones) devs = list(r.get_more_nodes(part)) dev_ids = [d['id'] for d in devs] self.assertEqual(len(dev_ids), len(exp_handoffs)) for index, dev in enumerate(dev_ids): self.assertEqual( dev, exp_handoffs[index], 'handoff differs at position %d\n%s\n%s' % (index, dev_ids[index:], exp_handoffs[index:])) seen_zones = set(primary_zones) seen_zones.update([d['zone'] for d in devs[:6]]) self.assertEqual(seen_zones, set(range(1, 10))) devs = [] for part in range(r.partition_count): devs.append(next(r.get_more_nodes(part))['id']) for part in range(r.partition_count): self.assertEqual( devs[part], exp_first_handoffs[part], 'handoff for partitition %d is now device id %d' % (part, devs[part])) # Add a partial replica rb.set_replicas(3.5) num_parts_changed, _balance, _removed_dev = rb.rebalance(seed=164) rb.get_ring().save(self.testgz) r = ring.Ring(self.testdir, ring_name='whatever') # Change expectations # We have another replica now exp_devs.append(90) exp_zones.add(8) # and therefore one less handoff exp_handoffs = exp_handoffs[:-1] # Caused some major changes in the sequence of handoffs for our test # partition, but at least the first stayed the same. devs = list(d['id'] for d in r.get_more_nodes(exp_part)) first_matches = 0 total_changed = 0 for i, part in enumerate(devs): if exp_handoffs[i] != devs[i]: total_changed += 1 exp_handoffs[i] = devs[i] if not total_changed: first_matches += 1 # most seeds seem to throw out first handoff stabilization with # replica_count change self.assertEqual(first_matches, 2) # and lots of other handoff changes... self.assertEqual(total_changed, 95) self.assertEqual(devs, exp_handoffs) # Change expectations for the rest of the parts devs = [] for part in range(r.partition_count): devs.append(next(r.get_more_nodes(part))['id']) changed_first_handoff = 0 for part in range(r.partition_count): if devs[part] != exp_first_handoffs[part]: changed_first_handoff += 1 exp_first_handoffs[part] = devs[part] self.assertEqual(devs, exp_first_handoffs) self.assertLessEqual(changed_first_handoff, num_parts_changed) # Test part, devs = r.get_nodes('a', 'c', 'o') primary_zones = set([d['zone'] for d in devs]) self.assertEqual(part, exp_part) self.assertEqual([d['id'] for d in devs], exp_devs) self.assertEqual(primary_zones, exp_zones) devs = list(r.get_more_nodes(part)) dev_ids = [d['id'] for d in devs] self.assertEqual(len(dev_ids), len(exp_handoffs)) for index, dev in enumerate(dev_ids): self.assertEqual( dev, exp_handoffs[index], 'handoff differs at position %d\n%s\n%s' % (index, dev_ids[index:], exp_handoffs[index:])) seen_zones = set(primary_zones) seen_zones.update([d['zone'] for d in devs[:6]]) self.assertEqual(seen_zones, set(range(1, 10))) devs = [] for part in range(r.partition_count): devs.append(next(r.get_more_nodes(part))['id']) for part in range(r.partition_count): self.assertEqual( devs[part], exp_first_handoffs[part], 'handoff for partitition %d is now device id %d' % (part, devs[part])) # One last test of a partial replica partition exp_part2 = 136 exp_devs2 = [70, 76, 32] exp_zones2 = set([3, 6, 7]) exp_handoffs2 = [ 89, 97, 37, 53, 20, 1, 86, 64, 102, 40, 90, 60, 72, 27, 99, 68, 78, 26, 105, 45, 42, 95, 22, 13, 49, 55, 11, 8, 83, 16, 4, 59, 33, 108, 61, 74, 29, 88, 66, 80, 25, 100, 39, 67, 79, 24, 65, 96, 36, 84, 54, 21, 63, 81, 56, 71, 77, 30, 48, 23, 10, 52, 82, 34, 17, 107, 87, 104, 5, 35, 2, 50, 43, 62, 73, 28, 18, 14, 98, 38, 85, 15, 57, 9, 51, 12, 6, 91, 3, 103, 41, 92, 47, 75, 44, 69, 101, 93, 106, 46, 94, 31, 19, 7, 58 ] part2, devs2 = r.get_nodes('a', 'c', 'o2') primary_zones2 = set([d['zone'] for d in devs2]) self.assertEqual(part2, exp_part2) self.assertEqual([d['id'] for d in devs2], exp_devs2) self.assertEqual(primary_zones2, exp_zones2) devs2 = list(r.get_more_nodes(part2)) dev_ids2 = [d['id'] for d in devs2] self.assertEqual(len(dev_ids2), len(exp_handoffs2)) for index, dev in enumerate(dev_ids2): self.assertEqual( dev, exp_handoffs2[index], 'handoff differs at position %d\n%s\n%s' % (index, dev_ids2[index:], exp_handoffs2[index:])) seen_zones = set(primary_zones2) seen_zones.update([d['zone'] for d in devs2[:6]]) self.assertEqual(seen_zones, set(range(1, 10))) # Test distribution across regions rb.set_replicas(3) for region in range(1, 5): rb.add_dev({ 'id': next_dev_id, 'ip': '1.%d.1.%d' % (region, server), 'port': 1234, # 108.0 is the weight of all devices created prior to # this test in region 0; this way all regions have # equal combined weight 'zone': 1, 'region': region, 'weight': 108.0, 'device': 'sdx' }) next_dev_id += 1 rb.pretend_min_part_hours_passed() rb.rebalance(seed=1) rb.pretend_min_part_hours_passed() rb.rebalance(seed=1) rb.get_ring().save(self.testgz) r = ring.Ring(self.testdir, ring_name='whatever') # There's 5 regions now, so the primary nodes + first 2 handoffs # should span all 5 regions part, devs = r.get_nodes('a1', 'c1', 'o1') primary_regions = set([d['region'] for d in devs]) primary_zones = set([(d['region'], d['zone']) for d in devs]) more_devs = list(r.get_more_nodes(part)) seen_regions = set(primary_regions) seen_regions.update([d['region'] for d in more_devs[:2]]) self.assertEqual(seen_regions, set(range(0, 5))) # There are 13 zones now, so the first 13 nodes should all have # distinct zones (that's r0z0, r0z1, ..., r0z8, r1z1, r2z1, r3z1, and # r4z1). seen_zones = set(primary_zones) seen_zones.update([(d['region'], d['zone']) for d in more_devs[:10]]) self.assertEqual(13, len(seen_zones)) # Here's a brittle canary-in-the-coalmine test to make sure the region # handoff computation didn't change accidentally exp_handoffs = [ 111, 112, 35, 58, 62, 74, 20, 105, 41, 90, 53, 6, 3, 67, 55, 76, 108, 32, 12, 80, 38, 85, 94, 42, 27, 99, 50, 47, 70, 87, 26, 9, 15, 97, 102, 81, 23, 65, 33, 77, 34, 4, 75, 8, 5, 30, 13, 73, 36, 92, 54, 51, 72, 78, 66, 1, 48, 14, 93, 95, 88, 86, 84, 106, 60, 101, 57, 43, 89, 59, 79, 46, 61, 52, 44, 45, 37, 68, 25, 100, 49, 24, 16, 71, 96, 21, 107, 98, 64, 39, 18, 29, 103, 91, 22, 63, 69, 28, 56, 11, 82, 10, 17, 19, 7, 40, 83, 104, 31 ] dev_ids = [d['id'] for d in more_devs] self.assertEqual(len(dev_ids), len(exp_handoffs)) for index, dev_id in enumerate(dev_ids): self.assertEqual( dev_id, exp_handoffs[index], 'handoff differs at position %d\n%s\n%s' % (index, dev_ids[index:], exp_handoffs[index:]))
def test_get_more_nodes_with_zero_weight_region(self): rb = ring.RingBuilder(8, 3, 1) devs = [ ring_utils.parse_add_value(v) for v in [ 'r1z1-127.0.0.1:6200/d1', 'r1z1-127.0.0.1:6201/d2', 'r1z1-127.0.0.1:6202/d3', 'r1z1-127.0.0.1:6203/d4', 'r1z2-127.0.0.2:6200/d1', 'r1z2-127.0.0.2:6201/d2', 'r1z2-127.0.0.2:6202/d3', 'r1z2-127.0.0.2:6203/d4', 'r2z1-127.0.1.1:6200/d1', 'r2z1-127.0.1.1:6201/d2', 'r2z1-127.0.1.1:6202/d3', 'r2z1-127.0.1.1:6203/d4', 'r2z2-127.0.1.2:6200/d1', 'r2z2-127.0.1.2:6201/d2', 'r2z2-127.0.1.2:6202/d3', 'r2z2-127.0.1.2:6203/d4', ] ] for dev in devs: if dev['region'] == 2: dev['weight'] = 0.0 else: dev['weight'] = 1.0 rb.add_dev(dev) rb.rebalance() rb.get_ring().save(self.testgz) r = ring.Ring(self.testdir, ring_name='whatever') self.assertEqual(r.version, rb.version) class CountingRingTable(object): def __init__(self, table): self.table = table self.count = 0 def __iter__(self): self._iter = iter(self.table) return self def __next__(self): self.count += 1 return next(self._iter) # complete the api next = __next__ def __getitem__(self, key): return self.table[key] histogram = collections.defaultdict(int) for part in range(r.partition_count): counting_table = CountingRingTable(r._replica2part2dev_id) with mock.patch.object(r, '_replica2part2dev_id', counting_table): node_iter = r.get_more_nodes(part) next(node_iter) histogram[counting_table.count] += 1 # Don't let our summing muddy our histogram histogram = dict(histogram) # sanity self.assertEqual(1, r._num_regions) self.assertEqual(2, r._num_zones) self.assertEqual(256, r.partition_count) # We always do one loop (including the StopIteration) while getting # primaries, so every part should hit next() at least 5 times self.assertEqual(sum(histogram.get(x, 0) for x in range(5)), 0, histogram) # Most of the parts should find a handoff device in the next partition, # but because some of the primary devices may *also* be used for that # partition, that means 5, 6, or 7 calls to next(). self.assertGreater(sum(histogram.get(x, 0) for x in range(8)), 160, histogram) # Want 90% confidence that it'll happen within two partitions self.assertGreater(sum(histogram.get(x, 0) for x in range(12)), 230, histogram) # Tail should fall off fairly quickly self.assertLess(sum(histogram.get(x, 0) for x in range(20, 100)), 5, histogram) # Hard limit at 50 (we've seen as bad as 41, 45) self.assertEqual(sum(histogram.get(x, 0) for x in range(50, 100)), 0, histogram)