def test_query(self): api.create_realm('dummy-realm', 'some_field', 'dummy_collection', 'cluster-1/%s' % test_settings.CONN1['db_name']) api.set_shard_at_rest('dummy-realm', 1, 'dest1/some_db') expected_metadata = { 'shard_key': 1, 'location': 'dest1/some_db', 'realm': 'dummy-realm' } def _trim_results(docs): return [{ 'shard_key': doc['shard_key'], 'location': doc['location'], 'realm': doc['realm'] } for doc in docs] store = metadata.ShardMetadataStore({'name': 'dummy-realm'}) results = _trim_results(store._query_shards_collection()) self.assertEquals([expected_metadata], results) results = _trim_results(store._query_shards_collection(1)) self.assertEquals([expected_metadata], results) results = _trim_results(store._query_shards_collection(2)) self.assertEquals([], results) store = metadata.ShardMetadataStore({'name': 'some-other-realm'}) results = _trim_results(store._query_shards_collection()) self.assertEquals([], results) results = _trim_results(store._query_shards_collection(1)) self.assertEquals([], results)
def test_empty_list_when_shard_is_missing(self): api.create_realm('dummy-realm', 'some_field', 'dummy_collection', 'cluster-1/%s' % test_settings.CONN1['db_name']) entries = list(metadata.ShardMetadataStore('dummy-realm') \ ._query_shards_collection(1)) self.assertEquals(entries, [])
def test_shard_location_does_not_change_even_when_forced(self): default_dest = 'cluster-2/%s' % test_settings.CONN2['db_name'] api.create_realm('dummy-realm', 'some_field', 'dummy_collection', default_dest) api.set_shard_at_rest('dummy-realm', 1, 'dest2/some_db') api.set_shard_at_rest('dummy-realm', 1, 'dest1/some_db', force=True) entries = metadata.ShardMetadataStore('dummy-realm') \ ._query_shards_collection() self.assertEquals(entries[0]['location'], 'dest1/some_db')
def test_fetch_all_shards_from_metadata(self): api.create_realm('dummy-realm', 'some_field', 'dummy_collection', 'cluster-1/%s' % test_settings.CONN1['db_name']) api.set_shard_at_rest('dummy-realm', 1, 'dest1/some_db') entries = metadata.ShardMetadataStore('dummy-realm') \ ._query_shards_collection() self.assertEquals(entries[0]['shard_key'], 1) self.assertEquals(entries[0]['location'], 'dest1/some_db') self.assertEquals(entries[0]['realm'], 'dummy-realm')
def test_cache_single_shard_in_flux(self, mock_query): expected_shard_metadata = { 'status': metadata.ShardStatus.MIGRATING_SYNC, 'shard_key': 1} mock_query.return_value = [expected_shard_metadata.copy()] store = metadata.ShardMetadataStore({'realm': 'dummy-realm'}) actual_shard_metadata = store.get_single_shard_metadata(1) self.assertEquals(expected_shard_metadata, actual_shard_metadata) self.assertEquals(1, mock_query.call_count) # Do another query and ensure we have NOT used a cached value actual_shard_metadata = store.get_single_shard_metadata(1) self.assertEquals(expected_shard_metadata, actual_shard_metadata) self.assertEquals(2, mock_query.call_count)
def test_cache_all_shards(self, mock_query): expected_metadata_1 = { 'status': metadata.ShardStatus.AT_REST, 'shard_key': 1 } expected_metadata_2 = { 'status': metadata.ShardStatus.AT_REST, 'shard_key': 2 } mock_query.return_value = [ expected_metadata_1.copy(), expected_metadata_2.copy() ] store = metadata.ShardMetadataStore({'realm': 'dummy-realm'}) actual_metadata = store.get_all_shard_metadata() self.assertEquals({ 1: expected_metadata_1, 2: expected_metadata_2 }, actual_metadata) self.assertEquals(1, mock_query.call_count) # Do another query and ensure we have used a cached value actual_metadata = store.get_all_shard_metadata() self.assertEquals({ 1: expected_metadata_1, 2: expected_metadata_2 }, actual_metadata) self.assertEquals(1, mock_query.call_count) # Do a query for a single shard and ensure we get the cache value actual_metadata = store.get_single_shard_metadata(1) self.assertEquals(expected_metadata_1, actual_metadata) # Now sleep for longer the cache timeout and ensure we do another call time.sleep(self._cache_length * 2) mock_query.return_value = [expected_metadata_1.copy()] actual_metadata = store.get_single_shard_metadata(1) self.assertEquals(expected_metadata_1, actual_metadata) self.assertEquals(2, mock_query.call_count) # The previous call will have only refreshed a single shard. Therefore, # a global call will result in another query actual_metadata = store.get_all_shard_metadata() self.assertEquals({ 1: expected_metadata_1, 2: expected_metadata_2 }, actual_metadata) self.assertEquals(3, mock_query.call_count)
def test_default_location(self, mock_query): default_dest = 'cluster-1/%s' % test_settings.CONN1['db_name'] expected_shard_metadata = { 'status': metadata.ShardStatus.AT_REST, 'location': default_dest, 'realm': 'dummy-realm', } mock_query.return_value = [] store = metadata.ShardMetadataStore({ 'name': 'dummy-realm', 'default_dest': default_dest}) actual_shard_metadata = store.get_single_shard_metadata(1) self.assertEquals(expected_shard_metadata, actual_shard_metadata) self.assertEquals(1, mock_query.call_count)
def test_cache_all_shards_in_flux(self, mock_query): expected_metadata_1 = { 'status': metadata.ShardStatus.MIGRATING_SYNC, 'shard_key': 1 } expected_metadata_2 = { 'status': metadata.ShardStatus.AT_REST, 'shard_key': 2 } mock_query.return_value = [ expected_metadata_1.copy(), expected_metadata_2.copy() ] store = metadata.ShardMetadataStore({'realm': 'dummy-realm'}) actual_metadata = store.get_all_shard_metadata() self.assertEquals({ 1: expected_metadata_1, 2: expected_metadata_2 }, actual_metadata) self.assertEquals(1, mock_query.call_count) # A query of the second shard should hit the cache actual_metadata = store.get_single_shard_metadata(2) self.assertEquals(expected_metadata_2, actual_metadata) self.assertEquals(1, mock_query.call_count) # Do another query and we should end up refrshing just a single shard mock_query.return_value = [expected_metadata_1.copy()] actual_metadata = store.get_all_shard_metadata() self.assertEquals({ 1: expected_metadata_1, 2: expected_metadata_2 }, actual_metadata) self.assertEquals(2, mock_query.call_count) mock_query.assert_called_with(1) # Another full query should skip the cache actual_metadata = store.get_all_shard_metadata() self.assertEquals({ 1: expected_metadata_1, 2: expected_metadata_2 }, actual_metadata) self.assertEquals(3, mock_query.call_count)
def test_cache_single_shard(self, mock_query): expected_shard_metadata = { 'status': metadata.ShardStatus.AT_REST, 'shard_key': 1} mock_query.return_value = [expected_shard_metadata.copy()] store = metadata.ShardMetadataStore({'realm': 'dummy-realm'}) actual_shard_metadata = store.get_single_shard_metadata(1) self.assertEquals(expected_shard_metadata, actual_shard_metadata) self.assertEquals(1, mock_query.call_count) # Do another query and ensure we have used a cached value actual_shard_metadata = store.get_single_shard_metadata(1) self.assertEquals(expected_shard_metadata, actual_shard_metadata) self.assertEquals(1, mock_query.call_count) # Now sleep for longer the cache timeout and ensure we do another call time.sleep(self._cache_length * 2) actual_shard_metadata = store.get_single_shard_metadata(1) self.assertEquals(expected_shard_metadata, actual_shard_metadata) self.assertEquals(2, mock_query.call_count)
def test_raise_when_collection_absent(self): with self.assertRaises(Exception): metadata.ShardMetadataStore('missing')._query_shards_collection()