def test_zmq_ipc(): io_threads = 1 zmq_transport = 'ipc' tornado_sockets = False num_ventilators = 2 num_workers = 4 num_sinks = 2 is_dispatcher = True db_num = 1 connection_info = {} r = connect_redis(db_num, connection_info) r.flushdb() with MultiprocessEnvironment(zmq_transport, tornado_sockets, io_threads, num_ventilators, num_workers, num_sinks, is_dispatcher, db_num) as g: procedure, data, result = 'sleep', [-1], 1 for i in range(10): assert g.run(procedure, data) == result for i in range(3): procedure, data, result = 'test', [-1, i], i assert g.run(procedure, data) == result list_iter = [10, 10] num_iter = reduce(lambda x, y: (x + 1) * y, reversed(list_iter)) + 1 procedure, data, result = 'test_deep', [-1, list_iter], num_iter assert g.run(procedure, data) == result g.wait_finish() assert g.is_clean() assert g.is_closed(linger=0.1) assert not r.keys() r.flushdb() import shutil from settings.settings import DIR_IPC shutil.rmtree(DIR_IPC)
def _get_group_id(self): """Get the name of the group for this instance. Group must be unique between different instances of the class.""" from pipeline.models import connect_redis, code_done self.redis = connect_redis(self.db_num, self.connection_info) with self.redis.pipeline() as pipe: while True: try: pipe.watch('group') pipe.multi() pipe.exists('group') new = pipe.execute()[0] if self.is_dispatcher: new_code = -1 else: if not new: new_code = 0 else: pipe.watch('group') pipe.multi() pipe.sort('group', start=0, num=1, desc=1) sorted_keys = pipe.execute()[0] if sorted_keys: new_code = int(sorted_keys[0]) + 1 else: new_code = 0 saved = self.db_add_group_id(new_code) if saved == str(new_code): continue return code_done(str(new_code)) except WatchError: continue finally: pipe.reset()
def test_interrupt_zmq_tcp(): io_threads = 1 zmq_transport = 'tcp' tornado_sockets = False num_ventilators = 2 num_workers = 4 num_sinks = 2 is_dispatcher = True db_num = 1 connection_info = {} r = connect_redis(db_num, connection_info) r.flushdb() with MultiprocessEnvironment(zmq_transport, tornado_sockets, io_threads, num_ventilators, num_workers, num_sinks, is_dispatcher, db_num, connection_info) as g_dispatcher: is_dispatcher = False with MultiprocessEnvironment(zmq_transport, tornado_sockets, io_threads, num_ventilators, num_workers, num_sinks, is_dispatcher, db_num, connection_info) as g_node: import time procedure, data = 'test', [0.01, 1000, []] g_dispatcher.server.to_socket.send(procedure, SNDMORE) for d in data[:-1]: g_dispatcher.server.to_socket.send_json(d, SNDMORE) g_dispatcher.server.to_socket.send_json(data[-1]) time.sleep(4) g_node.clean_exit() g_dispatcher.server.to_socket.poll() res = [g_dispatcher.server.to_socket.recv_json(RCVMORE)] while g_dispatcher.server.to_socket.RCVMORE: res.append(g_dispatcher.server.to_socket.recv_json(RCVMORE)) print res g_dispatcher.wait_finish() assert g_dispatcher.is_clean() assert g_dispatcher.is_closed(linger=0.1) assert g_node.is_closed(linger=0.1) assert not r.keys() r.flushdb()
def test_connection_redis(): from pipeline.models import connect_redis connect_redis()