def _replace_shard_ranges(broker, args, shard_data, timeout=0): own_shard_range = _check_own_shard_range(broker, args) shard_ranges = make_shard_ranges( broker, shard_data, args.shards_account_prefix) _check_shard_ranges(own_shard_range, shard_ranges) if args.verbose > 0: print('New shard ranges to be injected:') print(json.dumps([dict(sr) for sr in shard_ranges], sort_keys=True, indent=2)) # Crank up the timeout in an effort to *make sure* this succeeds with broker.updated_timeout(max(timeout, args.replace_timeout)): delete_status = delete_shard_ranges(broker, args) if delete_status != 0: return delete_status broker.merge_shard_ranges(shard_ranges) print('Injected %d shard ranges.' % len(shard_ranges)) print('Run container-replicator to replicate them to other nodes.') if args.enable: return enable_sharding(broker, args) else: print('Use the enable sub-command to enable sharding.') return 0
def test_compact_max_shrinking_default(self): # verify default limit on number of shrinking shards per acceptor broker = self._make_broker() shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_') for i, sr in enumerate(shard_ranges): sr.update_state(ShardRange.ACTIVE) broker.merge_shard_ranges(shard_ranges) self._move_broker_to_sharded_state(broker) def do_compact(expect_msg): out = StringIO() err = StringIO() with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err): ret = main([broker.db_file, 'compact', '--yes']) self.assertEqual(0, ret) err_lines = err.getvalue().split('\n') self.assert_starts_with(err_lines[0], 'Loaded db broker for ') out_lines = out.getvalue().split('\n') self.assertEqual([expect_msg], out_lines[:1]) return broker.get_shard_ranges() updated_ranges = do_compact( 'Updated 5 shard sequences for compaction.') for acceptor in (1, 3, 5, 7, 9): shard_ranges[acceptor].lower = shard_ranges[acceptor - 1].lower self.assertEqual(shard_ranges, updated_ranges) self.assertEqual([ShardRange.SHRINKING, ShardRange.ACTIVE] * 5, [sr.state for sr in updated_ranges]) # check idempotency updated_ranges = do_compact('No shards identified for compaction.') self.assertEqual(shard_ranges, updated_ranges) self.assertEqual([ShardRange.SHRINKING, ShardRange.ACTIVE] * 5, [sr.state for sr in updated_ranges])
def test_compact_expansion_limit(self): # verify option to limit the size of each acceptor after compaction broker = self._make_broker() shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_') for i, sr in enumerate(shard_ranges): sr.update_state(ShardRange.ACTIVE) broker.merge_shard_ranges(shard_ranges) self._move_broker_to_sharded_state(broker) out = StringIO() err = StringIO() with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err): ret = main([broker.db_file, 'compact', '--yes', '--expansion-limit', '20']) self.assertEqual(0, ret, out.getvalue()) err_lines = err.getvalue().split('\n') self.assert_starts_with(err_lines[0], 'Loaded db broker for ') out_lines = out.getvalue().split('\n') self.assertEqual( ['Updated 5 shard sequences for compaction.'], out_lines[:1]) updated_ranges = broker.get_shard_ranges() shard_ranges[1].lower = shard_ranges[0].lower shard_ranges[3].lower = shard_ranges[2].lower shard_ranges[5].lower = shard_ranges[4].lower shard_ranges[7].lower = shard_ranges[6].lower shard_ranges[9].lower = shard_ranges[8].lower self.assertEqual(shard_ranges, updated_ranges) self.assertEqual([ShardRange.SHRINKING] + [ShardRange.ACTIVE] + [ShardRange.SHRINKING] + [ShardRange.ACTIVE] + [ShardRange.SHRINKING] + [ShardRange.ACTIVE] + [ShardRange.SHRINKING] + [ShardRange.ACTIVE] + [ShardRange.SHRINKING] + [ShardRange.ACTIVE], [sr.state for sr in updated_ranges])
def test_compact_overlapping_shard_ranges(self): # verify that containers with overlaps will not be compacted broker = self._make_broker() shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_') for i, sr in enumerate(shard_ranges): sr.update_state(ShardRange.ACTIVE) shard_ranges[3].upper = shard_ranges[4].upper broker.merge_shard_ranges(shard_ranges) self._move_broker_to_sharded_state(broker) out = StringIO() err = StringIO() with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err): ret = main([broker.db_file, 'compact', '--yes', '--max-expanding', '10']) self.assertEqual(2, ret) err_lines = err.getvalue().split('\n') self.assert_starts_with(err_lines[0], 'Loaded db broker for ') out_lines = out.getvalue().split('\n') self.assertEqual( ['WARNING: Container has overlapping shard ranges so cannot be ' 'compacted.'], out_lines[:1]) updated_ranges = broker.get_shard_ranges() self.assertEqual(shard_ranges, updated_ranges) self.assertEqual([ShardRange.ACTIVE] * 10, [sr.state for sr in updated_ranges])
def test_compact_no_gaps(self): # verify that compactible sequences do not include gaps broker = self._make_broker() shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_') for i, sr in enumerate(shard_ranges): sr.update_state(ShardRange.ACTIVE) gapped_ranges = shard_ranges[:3] + shard_ranges[4:] broker.merge_shard_ranges(gapped_ranges) self._move_broker_to_sharded_state(broker) out = StringIO() err = StringIO() with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err): ret = main([broker.db_file, 'compact', '--yes', '--max-shrinking', '99']) self.assertEqual(0, ret) err_lines = err.getvalue().split('\n') self.assert_starts_with(err_lines[0], 'Loaded db broker for ') out_lines = out.getvalue().split('\n') self.assertEqual( ['Updated 2 shard sequences for compaction.'], out_lines[:1]) updated_ranges = broker.get_shard_ranges() gapped_ranges[2].lower = gapped_ranges[0].lower gapped_ranges[8].lower = gapped_ranges[3].lower self.assertEqual(gapped_ranges, updated_ranges) self.assertEqual([ShardRange.SHRINKING] * 2 + [ShardRange.ACTIVE] + [ShardRange.SHRINKING] * 5 + [ShardRange.ACTIVE], [sr.state for sr in updated_ranges])
def test_compact_all_donors_shrink_to_root(self): # by default all shard ranges are small enough to shrink so the root # becomes the acceptor broker = self._make_broker() shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_') for i, sr in enumerate(shard_ranges): sr.update_state(ShardRange.ACTIVE) broker.merge_shard_ranges(shard_ranges) epoch = self._move_broker_to_sharded_state(broker) own_sr = broker.get_own_shard_range(no_default=True) self.assertEqual(epoch, own_sr.state_timestamp) # sanity check self.assertEqual(ShardRange.SHARDED, own_sr.state) # sanity check out = StringIO() err = StringIO() with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err): ret = main( [broker.db_file, 'compact', '--yes', '--max-shrinking', '99']) self.assertEqual( 0, ret, 'stdout:\n%s\nstderr\n%s' % (out.getvalue(), err.getvalue())) err_lines = err.getvalue().split('\n') self.assert_starts_with(err_lines[0], 'Loaded db broker for ') out_lines = out.getvalue().split('\n') self.assertEqual(['Updated 1 shard sequences for compaction.'], out_lines[:1]) updated_ranges = broker.get_shard_ranges() self.assertEqual(shard_ranges, updated_ranges) self.assertEqual([ShardRange.SHRINKING] * 10, [sr.state for sr in updated_ranges]) updated_own_sr = broker.get_own_shard_range(no_default=True) self.assertEqual(own_sr.timestamp, updated_own_sr.timestamp) self.assertEqual(own_sr.epoch, updated_own_sr.epoch) self.assertLess(own_sr.state_timestamp, updated_own_sr.state_timestamp) self.assertEqual(ShardRange.ACTIVE, updated_own_sr.state)
def test_compact_nothing_to_do(self): broker = self._make_broker() shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_') for i, sr in enumerate(shard_ranges): sr.update_state(ShardRange.ACTIVE) broker.merge_shard_ranges(shard_ranges) self._move_broker_to_sharded_state(broker) out = StringIO() err = StringIO() # all shards are too big to shrink with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err): ret = main([ broker.db_file, 'compact', '--yes', '--shrink-threshold', '5', '--expansion-limit', '8' ]) self.assertEqual(0, ret) out_lines = out.getvalue().split('\n') self.assertEqual(['No shards identified for compaction.'], out_lines[:1]) # all shards could shrink but acceptors would be too large with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err): ret = main([ broker.db_file, 'compact', '--yes', '--shrink-threshold', '11', '--expansion-limit', '12' ]) self.assertEqual(0, ret) out_lines = out.getvalue().split('\n') self.assertEqual(['No shards identified for compaction.'], out_lines[:1])
def test_compact_user_input(self): # verify user input 'y' or 'n' is respected small_ranges = (3, 4, 7) broker = self._make_broker() shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_') for i, sr in enumerate(shard_ranges): if i not in small_ranges: sr.object_count = 100001 sr.update_state(ShardRange.ACTIVE) broker.merge_shard_ranges(shard_ranges) self._move_broker_to_sharded_state(broker) def do_compact(user_input): out = StringIO() err = StringIO() with mock.patch('sys.stdout', out),\ mock.patch('sys.stderr', err), \ mock.patch('swift.cli.manage_shard_ranges.input', return_value=user_input): ret = main([broker.db_file, 'compact', '--max-shrinking', '99']) self.assertEqual(0, ret) err_lines = err.getvalue().split('\n') self.assert_starts_with(err_lines[0], 'Loaded db broker for ') out_lines = out.getvalue().split('\n') self.assertIn('total of 20 objects', out_lines[0]) self.assertIn('objects: 10', out_lines[1]) self.assertIn('state: active', out_lines[2]) self.assertIn('objects: 10', out_lines[3]) self.assertIn('state: active', out_lines[4]) self.assertIn('can be compacted into', out_lines[5]) self.assertIn('objects: 10', out_lines[6]) self.assertIn('state: active', out_lines[7]) broker_ranges = broker.get_shard_ranges() return broker_ranges broker_ranges = do_compact('n') # expect no changes to shard ranges self.assertEqual(shard_ranges, broker_ranges) for i, sr in enumerate(broker_ranges): self.assertEqual(ShardRange.ACTIVE, sr.state) broker_ranges = do_compact('y') # expect updated shard ranges shard_ranges[5].lower = shard_ranges[3].lower shard_ranges[8].lower = shard_ranges[7].lower self.assertEqual(shard_ranges, broker_ranges) for i, sr in enumerate(broker_ranges): if i in small_ranges: self.assertEqual(ShardRange.SHRINKING, sr.state) else: self.assertEqual(ShardRange.ACTIVE, sr.state)
def test_compact_shrink_threshold(self): # verify option to set the shrink threshold for compaction; broker = self._make_broker() shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_') for i, sr in enumerate(shard_ranges): sr.update_state(ShardRange.ACTIVE) # (n-2)th shard range has one extra object shard_ranges[-2].object_count = 11 broker.merge_shard_ranges(shard_ranges) self._move_broker_to_sharded_state(broker) # with threshold set to 10 no shard ranges can be shrunk out = StringIO() err = StringIO() with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err): ret = main([broker.db_file, 'compact', '--yes', '--max-shrinking', '99', '--shrink-threshold', '10']) self.assertEqual(0, ret) err_lines = err.getvalue().split('\n') self.assert_starts_with(err_lines[0], 'Loaded db broker for ') out_lines = out.getvalue().split('\n') self.assertEqual( ['No shards identified for compaction.'], out_lines[:1]) updated_ranges = broker.get_shard_ranges() self.assertEqual(shard_ranges, updated_ranges) self.assertEqual([ShardRange.ACTIVE] * 10, [sr.state for sr in updated_ranges]) # with threshold == 11 all but the final 2 shard ranges can be shrunk; # note: the (n-1)th shard range is NOT shrunk to root out = StringIO() err = StringIO() with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err): ret = main([broker.db_file, 'compact', '--yes', '--max-shrinking', '99', '--shrink-threshold', '11']) self.assertEqual(0, ret) err_lines = err.getvalue().split('\n') self.assert_starts_with(err_lines[0], 'Loaded db broker for ') out_lines = out.getvalue().split('\n') self.assertEqual( ['Updated 1 shard sequences for compaction.'], out_lines[:1]) updated_ranges = broker.get_shard_ranges() shard_ranges[8].lower = shard_ranges[0].lower self.assertEqual(shard_ranges, updated_ranges) self.assertEqual([ShardRange.SHRINKING] * 8 + [ShardRange.ACTIVE] * 2, [sr.state for sr in updated_ranges])
def test_compact_four_donors_two_acceptors(self): small_ranges = (2, 3, 4, 7) broker = self._make_broker() shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_') for i, sr in enumerate(shard_ranges): if i not in small_ranges: sr.object_count = 100001 sr.update_state(ShardRange.ACTIVE) broker.merge_shard_ranges(shard_ranges) self._move_broker_to_sharded_state(broker) out = StringIO() err = StringIO() with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err): ret = main([broker.db_file, 'compact', '--yes', '--max-shrinking', '99']) self.assertEqual(0, ret) err_lines = err.getvalue().split('\n') self.assert_starts_with(err_lines[0], 'Loaded db broker for ') out_lines = out.getvalue().split('\n') self.assertEqual( ['Updated 2 shard sequences for compaction.'], out_lines[:1]) updated_ranges = broker.get_shard_ranges() for i, sr in enumerate(updated_ranges): if i in small_ranges: self.assertEqual(ShardRange.SHRINKING, sr.state) else: self.assertEqual(ShardRange.ACTIVE, sr.state) shard_ranges[5].lower = shard_ranges[2].lower shard_ranges[8].lower = shard_ranges[7].lower self.assertEqual(shard_ranges, updated_ranges) for i in (5, 8): # acceptors should have updated timestamp self.assertLess(shard_ranges[i].timestamp, updated_ranges[i].timestamp) # check idempotency with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err): ret = main([broker.db_file, 'compact', '--yes', '--max-shrinking', '99']) self.assertEqual(0, ret) updated_ranges = broker.get_shard_ranges() self.assertEqual(shard_ranges, updated_ranges) for i, sr in enumerate(updated_ranges): if i in small_ranges: self.assertEqual(ShardRange.SHRINKING, sr.state) else: self.assertEqual(ShardRange.ACTIVE, sr.state)
def _find_ranges(broker, args, status_file=None): start = last_report = time.time() limit = 5 if status_file else -1 shard_data, last_found = broker.find_shard_ranges( args.rows_per_shard, limit=limit) if shard_data: while not last_found: if last_report + 10 < time.time(): print('Found %d ranges in %gs; looking for more...' % ( len(shard_data), time.time() - start), file=status_file) last_report = time.time() # prefix doesn't matter since we aren't persisting it found_ranges = make_shard_ranges(broker, shard_data, '.shards_') more_shard_data, last_found = broker.find_shard_ranges( args.rows_per_shard, existing_ranges=found_ranges, limit=5) shard_data.extend(more_shard_data) return shard_data, time.time() - start
def test_compact_shard_ranges_in_found_state(self): broker = self._make_broker() shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_') broker.merge_shard_ranges(shard_ranges) self._move_broker_to_sharded_state(broker) out = StringIO() err = StringIO() with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err): ret = main([broker.db_file, 'compact']) self.assertEqual(0, ret) err_lines = err.getvalue().split('\n') self.assert_starts_with(err_lines[0], 'Loaded db broker for ') out_lines = out.getvalue().split('\n') self.assertEqual(['No shards identified for compaction.'], out_lines[:1]) updated_ranges = broker.get_shard_ranges() self.assertEqual([ShardRange.FOUND] * 10, [sr.state for sr in updated_ranges])
def test_compact_max_expanding(self): # verify option to limit the number of expanding shards per acceptor broker = self._make_broker() shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_') for i, sr in enumerate(shard_ranges): sr.update_state(ShardRange.ACTIVE) broker.merge_shard_ranges(shard_ranges) self._move_broker_to_sharded_state(broker) def do_compact(expect_msg): out = StringIO() err = StringIO() # note: max_shrinking is set to 3 so that there is opportunity for # more than 2 acceptors with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err): ret = main([ broker.db_file, 'compact', '--yes', '--max-shrinking', '3', '--max-expanding', '2' ]) self.assertEqual(0, ret) err_lines = err.getvalue().split('\n') self.assert_starts_with(err_lines[0], 'Loaded db broker for ') out_lines = out.getvalue().split('\n') self.assertEqual([expect_msg], out_lines[:1]) return broker.get_shard_ranges() updated_ranges = do_compact( 'Updated 2 shard sequences for compaction.') shard_ranges[3].lower = shard_ranges[0].lower shard_ranges[7].lower = shard_ranges[4].lower self.assertEqual(shard_ranges, updated_ranges) self.assertEqual([ShardRange.SHRINKING] * 3 + [ShardRange.ACTIVE] + [ShardRange.SHRINKING] * 3 + [ShardRange.ACTIVE] * 3, [sr.state for sr in updated_ranges]) # check idempotency - no more sequences found while existing sequences # are shrinking updated_ranges = do_compact('No shards identified for compaction.') self.assertEqual(shard_ranges, updated_ranges) self.assertEqual([ShardRange.SHRINKING] * 3 + [ShardRange.ACTIVE] + [ShardRange.SHRINKING] * 3 + [ShardRange.ACTIVE] * 3, [sr.state for sr in updated_ranges])
def test_compact_donors_but_no_suitable_acceptor(self): # if shard ranges are already shrinking, check that the final one is # not made into an acceptor if a suitable adjacent acceptor is not # found (unexpected scenario but possible in an overlap situation) broker = self._make_broker() shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_') for i, state in enumerate([ShardRange.SHRINKING] * 3 + [ShardRange.SHARDING] + [ShardRange.ACTIVE] * 6): shard_ranges[i].update_state(state) broker.merge_shard_ranges(shard_ranges) epoch = self._move_broker_to_sharded_state(broker) with mock_timestamp_now(epoch): own_sr = broker.get_own_shard_range(no_default=True) self.assertEqual(epoch, own_sr.state_timestamp) # sanity check self.assertEqual(ShardRange.SHARDED, own_sr.state) # sanity check out = StringIO() err = StringIO() with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err): ret = main( [broker.db_file, 'compact', '--yes', '--max-shrinking', '99']) self.assertEqual( 0, ret, 'stdout:\n%s\nstderr\n%s' % (out.getvalue(), err.getvalue())) err_lines = err.getvalue().split('\n') self.assert_starts_with(err_lines[0], 'Loaded db broker for ') out_lines = out.getvalue().split('\n') self.assertEqual(['Updated 1 shard sequences for compaction.'], out_lines[:1]) updated_ranges = broker.get_shard_ranges() shard_ranges[9].lower = shard_ranges[4].lower # expanded acceptor self.assertEqual(shard_ranges, updated_ranges) self.assertEqual( [ShardRange.SHRINKING] * 3 + # unchanged [ShardRange.SHARDING] + # unchanged [ShardRange.SHRINKING] * 5 + # moved to shrinking [ShardRange.ACTIVE], # unchanged [sr.state for sr in updated_ranges]) with mock_timestamp_now(epoch): # force equal meta-timestamp updated_own_sr = broker.get_own_shard_range(no_default=True) self.assertEqual(dict(own_sr), dict(updated_own_sr))
def test_compact_not_sharded(self): broker = self._make_broker() shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_') broker.merge_shard_ranges(shard_ranges) # make broker appear to be a root container but it isn't sharded out = StringIO() err = StringIO() broker.set_sharding_sysmeta('Quoted-Root', 'a/c') self.assertTrue(broker.is_root_container()) with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err): ret = main([broker.db_file, 'compact']) self.assertEqual(2, ret) err_lines = err.getvalue().split('\n') self.assert_starts_with(err_lines[0], 'Loaded db broker for ') out_lines = out.getvalue().split('\n') self.assertEqual( ['WARNING: Container is not yet sharded so cannot be compacted.'], out_lines[:1]) updated_ranges = broker.get_shard_ranges() self.assertEqual(shard_ranges, updated_ranges) self.assertEqual([ShardRange.FOUND] * 10, [sr.state for sr in updated_ranges])
def test_compact_expansion_limit_less_than_shrink_threshold(self): broker = self._make_broker() shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_') for i, sr in enumerate(shard_ranges): if i % 2: sr.object_count = 25 else: sr.object_count = 3 sr.update_state(ShardRange.ACTIVE) broker.merge_shard_ranges(shard_ranges) self._move_broker_to_sharded_state(broker) out = StringIO() err = StringIO() with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err): ret = main([ broker.db_file, 'compact', '--yes', '--shrink-threshold', '10', '--expansion-limit', '5' ]) self.assertEqual(0, ret) out_lines = out.getvalue().split('\n') self.assertEqual(['No shards identified for compaction.'], out_lines[:1])