def testShardedOp(self): """Basic test for _ShardedOp""" kv_tuples = [('k1', 'v1'), ('k2', 'v2'), ('k3', None)] returns = [[True, True], [True]] callbacks = [] def _callback(shard, keys, vkeys, values): callbacks.append((shard, keys, vkeys, values)) return Operation(True, returns.pop(0)) self.mox.ReplayAll() self.engine = redis_engine.RedisEngine(self.endpoints, self.vbuckets) op = self.engine._ShardedOp(kv_tuples, _callback) self.assertTrue(op.success) self.assertEqual(op.response_value, [True, True, True]) self.assertTrue(callbacks[1][0] is self.shard1) self.assertEqual(callbacks[1][1], ['k1']) self.assertEqual(callbacks[1][2], ['2|k1']) self.assertEqual(callbacks[1][3], ['v1']) self.assertTrue(callbacks[0][0] is self.shard0) self.assertEqual(callbacks[0][1], ['k2', 'k3']) self.assertEqual(callbacks[0][2], ['1|k2', '0|k3']) self.assertEqual(callbacks[0][3], ['v2', None]) self.mox.VerifyAll()
def testBatchCheckAndMultiSetSimple(self): """Test BatchCheckAndMultiSet() under normal conditions""" kv_dict = {'k1': 'v11', 'k2': 'v21', 'k3': 'v31'} mock_pipe0 = self.mox.CreateMockAnything() self.shard0.pipeline(True).AndReturn(mock_pipe0) mock_pipe0.watch('1|k2', '0|k3') mock_sub_pipe0 = self.mox.CreateMockAnything() mock_pipe0.pipeline(False).AndReturn(mock_sub_pipe0) mock_sub_pipe0.get('1|k2') mock_sub_pipe0.get('0|k3') mock_sub_pipe0.execute().AndReturn(['v20', 'v30']) mock_pipe0.multi() mock_pipe0.set('1|k2', 'v21') mock_pipe0.set('0|k3', 'v31') mock_pipe0.execute().AndReturn([True, True]) mock_pipe0.reset() mock_pipe1 = self.mox.CreateMockAnything() self.shard1.pipeline(True).AndReturn(mock_pipe1) mock_pipe1.watch('2|k1') mock_sub_pipe1 = self.mox.CreateMockAnything() mock_pipe1.pipeline(False).AndReturn(mock_sub_pipe1) mock_sub_pipe1.get('2|k1') mock_sub_pipe1.execute().AndReturn(['v10']) mock_pipe1.multi() mock_pipe1.set('2|k1', 'v11') mock_pipe1.execute().AndReturn([True]) mock_pipe1.reset() self.mox.ReplayAll() old_vals = [] def callback(key, old_value): old_vals.append(old_value) return [(key, kv_dict[key])] engine = redis_engine.RedisEngine(self.endpoints, self.vbuckets) op = engine.BatchCheckAndMultiSet(sorted(kv_dict.keys()), callback) return op self.assertTrue(op.success) self.assertEqual(op.response_value, [True, True, True]) self.assertEqual(len(old_vals), 3) self.mox.VerifyAll()
def testHashDelete(self): """Test HashDelete()""" key = 'h1' field = 'f1' self.shard1.hdel('3|h1', 'f1').AndReturn(True) self.mox.ReplayAll() engine = redis_engine.RedisEngine(self.endpoints, self.vbuckets) op = engine.HashDelete(key, field) self.assertTrue(op.success) self.mox.VerifyAll()
def testSetMembers(self): """Test SetMembers()""" key = 's1' self.shard1.smembers('2|s1').AndReturn(['v1', 'v2', 'v3']) self.mox.ReplayAll() engine = redis_engine.RedisEngine(self.endpoints, self.vbuckets) op = engine.SetMembers(key) self.assertTrue(op.success) self.assertEqual(op.response_value, set(['v1', 'v2', 'v3'])) self.mox.VerifyAll()
def testHashMultiPut(self): """Test HashMultiPut()""" key = 'h1' mapping = {'f1': 'v1', 'f2': 'v2'} self.shard1.hmset('3|h1', {'f1': 'v1', 'f2': 'v2'}).AndReturn(3) self.mox.ReplayAll() engine = redis_engine.RedisEngine(self.endpoints, self.vbuckets) op = engine.HashMultiPut(key, mapping) self.assertTrue(op.success) self.mox.VerifyAll()
def testHashGetAll(self): """Test HashGetAll()""" key = 'h1' mapping = {'f1': 'v1'} self.shard1.hgetall('3|h1').AndReturn({'f1': 'v1'}) self.mox.ReplayAll() engine = redis_engine.RedisEngine(self.endpoints, self.vbuckets) op = engine.HashGetAll(key) self.assertTrue(op.success) self.assertEqual(op.response_value, mapping) self.mox.VerifyAll()
def testHashPut(self): """Test HashPut()""" key = 'h1' field = 'f1' value = 'v1' self.shard1.hset('3|h1', 'f1', 'v1').AndReturn(1) self.mox.ReplayAll() engine = redis_engine.RedisEngine(self.endpoints, self.vbuckets) op = engine.HashPut(key, field, value) self.assertTrue(op.success) self.mox.VerifyAll()
def testDelete(self): """Test Delete()""" key = 'k1' mock_pipe1 = self.mox.CreateMockAnything() self.shard1.pipeline().AndReturn(mock_pipe1) mock_pipe1.delete('2|k1') mock_pipe1.execute().AndReturn([True]) self.mox.ReplayAll() engine = redis_engine.RedisEngine(self.endpoints, self.vbuckets) op = engine.Delete(key) self.assertTrue(op.success) self.assertEqual(op.response_value, True) self.mox.VerifyAll()
def testSetRemove(self): """Test SetRemove()""" key = 's1' values = ['v1', 'v2'] mock_pipe1 = self.mox.CreateMockAnything() self.shard1.pipeline().AndReturn(mock_pipe1) mock_pipe1.srem('2|s1', 'v1') mock_pipe1.srem('2|s1', 'v2') mock_pipe1.execute().AndReturn([1, 1]) self.mox.ReplayAll() engine = redis_engine.RedisEngine(self.endpoints, self.vbuckets) op = engine.SetRemove(key, *values) self.assertTrue(op.success) self.mox.VerifyAll()
def testShardedOpSingleFailure(self): """Test for _ShardedOp where a single operatin is not successful""" kv_tuples = [('k1', 'v1'), ('k2', 'v2'), ('k3', None)] results = [True, False] returns = [[True, True], None] callbacks = [] def _callback(shard, keys, vkeys, values): callbacks.append((shard, keys, vkeys, values)) return Operation(results.pop(0), returns.pop(0)) self.mox.ReplayAll() engine = redis_engine.RedisEngine(self.endpoints, self.vbuckets) op = engine._ShardedOp(kv_tuples, _callback) self.assertFalse(op.success) self.assertEqual(op.response_value, [True, True]) self.mox.VerifyAll()
def testSetIsMember(self): """Test SetMembers()""" key = 's1' val1 = 'v1' val2 = 'v2' self.shard1.sismember('2|s1', val1).AndReturn(0) self.shard1.sismember('2|s1', val2).AndReturn(1) self.mox.ReplayAll() engine = redis_engine.RedisEngine(self.endpoints, self.vbuckets) op = engine.SetIsMember(key, val1) self.assertTrue(op.success) self.assertEqual(op.response_value, False) op = engine.SetIsMember(key, val2) self.assertTrue(op.success) self.assertEqual(op.response_value, True) self.mox.VerifyAll()
def testHashBatchGet(self): """Test HashBatchGet()""" keys = ['h1', 'h2'] field = 'f1' mock_pipe1 = self.mox.CreateMockAnything() self.shard1.pipeline().AndReturn(mock_pipe1) mock_pipe1.hget('3|h1', 'f1') mock_pipe1.execute().AndReturn(['v1']) mock_pipe0 = self.mox.CreateMockAnything() self.shard0.pipeline().AndReturn(mock_pipe0) mock_pipe0.hget('0|h2', 'f1') mock_pipe0.execute().AndReturn(['v2']) self.mox.ReplayAll() engine = redis_engine.RedisEngine(self.endpoints, self.vbuckets) op = engine.HashBatchGet(keys, field) self.assertTrue(op.success) self.assertEquals(op.response_value, ['v1', 'v2']) self.mox.VerifyAll()
def testBatchDelete(self): """Test BatchDelete()""" keys = ['k1', 'k2', 'k3'] mock_pipe0 = self.mox.CreateMockAnything() self.shard0.pipeline().AndReturn(mock_pipe0) mock_pipe0.delete('1|k2') mock_pipe0.delete('0|k3') mock_pipe0.execute().AndReturn([True, True]) mock_pipe1 = self.mox.CreateMockAnything() self.shard1.pipeline().AndReturn(mock_pipe1) mock_pipe1.delete('2|k1') mock_pipe1.execute().AndReturn([True]) self.mox.ReplayAll() engine = redis_engine.RedisEngine(self.endpoints, self.vbuckets) op = engine.BatchDelete(keys) self.assertTrue(op.success) self.assertEqual(op.response_value, [True, True, True]) self.mox.VerifyAll()
def StartTabaServer(primary_port, secondary_port, db_endpoints, db_vbuckets, server_endpoints, pruning_enabled=False, use_memory_engine=False): """Initialize the Taba Server, and setup Juno request handlers for the Taba Client and monitoring interface. Args: primary_port - Port to listen on for primary requests. secondary_port - Port to listen on for secondary requests. db_endpoints - List of database end-point information dictionaries. Each entry should contain 'host', 'port', 'vbuckets' (where 'vbuckets' is a 2 element list specifying the start and end vbucket for that end-point). db_vbuckets - Total number of vbuckets in the database. server_endpoints - List of Taba Server end-points information dictionaries. Each entry should contain 'host' and 'port' fields. pruning_enabled - If True, enable the periodic pruning service for this process. use_memory_engine - If True, use an in-memory database instead of the usual Redis one. Useful for testing. """ # Monkey patch gevent before doing anything. from gevent import monkey monkey.patch_all() # Attempt to turn heapy remote monitoring on. try: import guppy.heapy.RM #@UnusedImport except ImportError: pass # Attempt to attach a stack-trace dump to the SIGQUIT (kill -3) signal. try: import signal signal.signal(signal.SIGQUIT, _PrintTrace) except ImportError: pass # Compile the Cython components. from tellapart.taba.util.misc_util import BootstrapCython BootstrapCython(primary_port) from tellapart.frontend import juno_patches from tellapart.third_party import juno from tellapart.storage.engine import redis_engine from tellapart.taba import taba_client from tellapart.taba.server import taba_server from tellapart.taba.server import taba_server_storage_manager from tellapart.taba.server import taba_server_handlers from tellapart.taba.util.misc_util import HandlerSpec # Initialize the Taba Server, storage manager, and Redis engine. if use_memory_engine: from tellapart.storage.engine import memory_redis_engine engine = memory_redis_engine.MemoryRedisEngine(None, None) server_endpoints = None taba_url = 'http://localhost:%d/post' % secondary_port else: redis_endpoints = [] for endpoint in db_endpoints: redis_endpoints.append( redis_engine.RedisServerEndpoint( host=endpoint['host'], port=endpoint['port'], vbucket_start=endpoint['vbuckets'][0], vbucket_end=endpoint['vbuckets'][1])) engine = redis_engine.RedisEngine(redis_endpoints, db_vbuckets) taba_url = 'http://localhost:%d/post' % server_endpoints[0]['port'] dao = taba_server_storage_manager.TabaServerStorageManager(engine) taba_server_handlers.global_taba_server = \ taba_server.TabaServer(dao, server_endpoints, prune=pruning_enabled) # Setup the local Taba Client. taba_client.Initialize('taba_server', taba_url, 60) # Setup all necessary handlers. juno_patches.InitializeJuno('wsgi') _handlers = [ # Event posting handlers. HandlerSpec('/post_zip', taba_server_handlers.HandlePostCompressed, methods=('POST', )), HandlerSpec('/post', taba_server_handlers.HandlePostDirect, methods=('POST', )), # Single Get handlers. HandlerSpec('/raw', taba_server_handlers.HandleGetRaw), HandlerSpec('/projection', taba_server_handlers.HandleGetProjection), HandlerSpec('/aggregate', taba_server_handlers.HandleGetAggregate), HandlerSpec('/taba', taba_server_handlers.HandleGetTaba), # Batch Get handlers. HandlerSpec('/raw_batch', taba_server_handlers.HandleGetRawBatch, methods=('POST', )), HandlerSpec('/projection_batch', taba_server_handlers.HandleGetProjectionBatch, methods=('POST', )), HandlerSpec('/aggregate_batch', taba_server_handlers.HandleGetAggretateBatch, methods=('POST', )), # Meta-data handlers. HandlerSpec('/clients', taba_server_handlers.HandleGetClients), HandlerSpec('/names', taba_server_handlers.HandleGetTabaNames), HandlerSpec('/type', taba_server_handlers.HandleGetType), # Administrative handlers. HandlerSpec('/delete', taba_server_handlers.HandleDeleteName), HandlerSpec('/prune', taba_server_handlers.HandlePrune), HandlerSpec('/upgrade', taba_server_handlers.HandleUpgrade), HandlerSpec('/status', taba_server_handlers.HandleStatus), ] for h in _handlers: for method in h.methods: juno.route(h.route_url, method)(h.handler_func) # Move 'wsgi.input' to 'request_body_bytes' so that we can read it directly # instead of having Juno parse it into a dictionary. juno.config('middleware', [ ('tellapart.taba.util.middleware.CircumventJunoInputParsing', {}), ]) # Create and start the Primary and Secondary Server objects. from tellapart.taba.util import wsgi_server application = juno.run() primary_server = wsgi_server.launch_gevent_wsgi_server( application, port=primary_port, max_concurrent_requests=8, server_name='Taba Server - Primary', should_run_forever=False, use_clean_shutdown=True) secondary_server = wsgi_server.launch_gevent_wsgi_server( application, port=secondary_port, max_concurrent_requests=8, server_name='Taba Server - Secondary', should_run_forever=False, use_clean_shutdown=True) primary_server.start() secondary_server.start() primary_server._stopped_event.wait() secondary_server._stopped_event.wait()
def testBatchCheckAndMultiSetLockError(self): """Test BatchCheckAndSet() when a lock error occurs""" kv_dict = {'k1': 'v11', 'k2': 'v21', 'k3': 'v31'} # Shard 0, first attempt. mock_pipe00 = self.mox.CreateMockAnything('pipe00') self.shard0.pipeline(True).AndReturn(mock_pipe00) mock_pipe00.watch('1|k2', '0|k3') mock_sub_pipe00 = self.mox.CreateMockAnything('sub-pipe00') mock_pipe00.pipeline(False).AndReturn(mock_sub_pipe00) mock_sub_pipe00.get('1|k2') mock_sub_pipe00.get('0|k3') mock_sub_pipe00.execute().AndReturn(['v20', 'v30']) mock_pipe00.multi() mock_pipe00.set('1|k2', 'v21') mock_pipe00.set('0|k3', 'v31') mock_pipe00.execute().AndRaise(redis_engine.redis.WatchError) mock_pipe00.reset() # Shard 0, second attempt. mock_pipe01 = self.mox.CreateMockAnything('pipe01') self.shard0.pipeline(True).AndReturn(mock_pipe01) mock_pipe01.watch('1|k2', '0|k3') mock_sub_pipe01 = self.mox.CreateMockAnything('sub-pipe01') mock_pipe01.pipeline(False).AndReturn(mock_sub_pipe01) mock_sub_pipe01.get('1|k2') mock_sub_pipe01.get('0|k3') mock_sub_pipe01.execute().AndReturn(['v20', 'v30']) mock_pipe01.multi() mock_pipe01.set('1|k2', 'v21') mock_pipe01.set('0|k3', 'v31') mock_pipe01.execute().AndReturn([True, True]) mock_pipe01.reset() # Shard 1. mock_pipe1 = self.mox.CreateMockAnything('pipe1') self.shard1.pipeline(True).AndReturn(mock_pipe1) mock_pipe1.watch('2|k1') mock_sub_pipe1 = self.mox.CreateMockAnything('sub-pipe1') mock_pipe1.pipeline(False).AndReturn(mock_sub_pipe1) mock_sub_pipe1.get('2|k1') mock_sub_pipe1.execute().AndReturn(['v10']) mock_pipe1.multi() mock_pipe1.set('2|k1', 'v11') mock_pipe1.execute().AndReturn([True]) mock_pipe1.reset() self.mox.ReplayAll() old_vals = [] def callback(key, old_value): old_vals.append(old_value) return [(key, kv_dict[key])] engine = redis_engine.RedisEngine(self.endpoints, self.vbuckets) op = engine.BatchCheckAndMultiSet(sorted(kv_dict.keys()), callback) self.assertTrue(op.success) self.assertEqual(op.response_value, [True, True, True]) self.assertEqual(len(old_vals), 5) self.mox.VerifyAll()