def __init__(self, processor, lsocket, inputProtocolFactory=None, outputProtocolFactory=None, threads=10, readTimeout=None, maxQueueSize=0): self.processor = self._getProcessor(processor) self.socket = lsocket self.in_protocol = inputProtocolFactory or TBinaryProtocolFactory() self.out_protocol = outputProtocolFactory or self.in_protocol self.threads = int(threads) self.clients = {} self.max_queue_size = maxQueueSize # do not set this as a hard size # maximum - the queue may need # extra space for close() self.tasks = Queue.Queue() self._read, self._write = _create_socketpair() self.prepared = False self._stop = False self.serverEventHandler = TServer.TServerEventHandler() self.select_timeout = DEFAULT_SELECT_TIMEOUT self.poller = TSocket.ConnectionEpoll() if hasattr(select, "epoll") \ else TSocket.ConnectionSelect() self.last_logged_error = 0 timeouts = [x for x in [self.select_timeout, readTimeout] \ if x is not None] if len(timeouts) > 0: self.select_timeout = min(timeouts) self._readTimeout = readTimeout
def __init__(self, processor, zmq_socket_type=zmq.DEALER, context=None, pool_size=5, mode_ppworker=False, service=None): # 1. 获取zeromq context, 以及 events self.context = context or Context.get_instance() self.events = Events(zmq_socket_type, self.context, mode_ppworker=mode_ppworker, service=service) # 2. 设置: mode_ppworker if self.events.mode_ppworker: self.liveness = HEARTBEAT_LIVENESS self.interval = INTERVAL_INIT self.heartbeat_at = time.time() + HEARTBEAT_INTERVAL self.processor = processor # thrift processor self.proto_factory_input = TBinaryProtocolFactory() self.proto_factory_output = TUtf8StrBinaryProtocolFactory() # 4. gevent self.task_pool = gevent.pool.Pool(size=pool_size) self.acceptor_task = None self.events.create_worker_socket() self.endpoint = None # 5. 程序退出控制 self.alive = True
def start_server_process(self): """ Starts a test server, returns the (pid, port) pair. The server needs to be in a subprocess because we need to run a TThreadedServer for the concurrency tests. And the only way to stop a TThreadedServer is to kill it. So we can't just use a thread. """ self.pid = os.fork() if self.pid != 0: logging.info("Started SimpleThriftServer (pid %s) on port %s" % (self.pid, self.port)) self._ensure_online() return # Child process runs the thrift server loop try: processor = TestService.Processor(self) transport = TSocket.TServerSocket(self.port) server = TServer.TThreadedServer(processor, transport, TBufferedTransportFactory(), TBinaryProtocolFactory()) server.serve() except: sys.exit(1)
def encode(n, proto_factory=TBinaryProtocolFactory()): ab = make_addressbook() start = time.time() for i in range(n): serialize(ab, proto_factory) end = time.time() print("encode\t-> {}".format(end - start))
def decode(n, proto_factory=TBinaryProtocolFactory()): ab = ttypes.AddressBook() ab_encoded = serialize(make_addressbook()) start = time.time() for i in range(n): deserialize(ab, ab_encoded, proto_factory) end = time.time() print("decode\t-> {}".format(end - start))
def get_client(self, host, port, max_connections=0): key = "%s:%s" % (host, port) if key not in self.client_pools: self.__class__.client_pools[key] = PoolClient( Client, TStreamPool(host, port, max_stream=max_connections), TBinaryProtocolFactory()) elif max_connections: self.client_pools[key]._itrans_pool._max_stream = max_connections return self.client_pools[key]
def serve(self): handler = Handler(self.translator, self.bpe) processor = Processor(handler) tfactory = TIOStreamTransportFactory() protocol = TBinaryProtocolFactory() server = TTornadoServer(processor, tfactory, protocol) server.bind(9095) server.start(1) # MXNet requires we use a single thread, single process. IOLoop.instance().start()
def main(): """ main() loop, called from jobsubd management command. """ LOG.info("Starting daemon on port %s" % PORT) sock = TServerSocket(PORT) sock.host = HOST TThreadedServer(JobSubmissionService.Processor(JobSubmissionServiceImpl()), sock, TBufferedTransportFactory(), TBinaryProtocolFactory()).serve()
def test_process_missing_function(self): processor = FBaseProcessor() frame = bytearray( b'\x00\x00\x00\x004\x00\x00\x00\x04_cid\x00\x00\x00\x06someid' b'\x00\x00\x00\x05_opid\x00\x00\x00\x011\x00\x00\x00\x08_timeout' b'\x00\x00\x00\x045000' # End of context b'\x80\x01\x00\x02\x00\x00\x00\x08basePing\x00\x00\x00\x00\x00') itrans = TMemoryBuffer(value=frame) iprot = FProtocolFactory(TBinaryProtocolFactory()).get_protocol(itrans) otrans = TMemoryOutputBuffer(1000) oprot = FProtocolFactory(TBinaryProtocolFactory()).get_protocol(otrans) yield processor.process(iprot, oprot) expected_response = bytearray( b'\x80\x01\x00\x03\x00\x00\x00\x08basePing\x00\x00' b'\x00\x00\x0b\x00\x01\x00\x00\x00\x1aUnknown function: basePing' b'\x08\x00\x02\x00\x00\x00\x01\x00') self.assertEqual(otrans.getvalue()[41:], expected_response)
def __init__(self, processor, lsocket, inputProtocolFactory = None, outputProtocolFactory = None, threads = 10): self.processor = processor self.socket = lsocket self.in_protocol = inputProtocolFactory or TBinaryProtocolFactory() self.out_protocol = outputProtocolFactory or self.in_protocol self.threads = int(threads) self.clients = {} self.tasks = Queue.Queue() (self._read, self._write,) = socket.socketpair() self.prepared = False
def runManageHandler(accounts, accounts_lock, port): handler = ManageHandler(accounts, accounts_lock) processor = ManageService.Processor(handler) transport = TSocket.TServerSocket(host='127.0.0.1', port=port) tfactory = TTransport.TBufferedTransportFactory() pfactory = TBinaryProtocolFactory() server = TServer.TThreadedServer(processor, transport, tfactory, pfactory) print('Starting manage server.') server.serve() print('Manage server done.')
def test_process_processor_exception(self): processor = FBaseProcessor() proc = Mock() e = TException(message='foo bar exception') proc.process.side_effect = e processor.add_to_processor_map("basePing", proc) frame = bytearray( b'\x00\x00\x00\x00\x0e\x00\x00\x00\x05_opid\x00\x00\x00\x011' b'\x80\x01\x00\x02\x00\x00\x00\x08basePing\x00\x00\x00\x00\x00') itrans = TMemoryBuffer(value=frame) iprot = FProtocolFactory(TBinaryProtocolFactory()).get_protocol(itrans) oprot = Mock() processor.process(iprot, oprot)
def do_run(self): if settings.USE_SSL: socket = TLateInitSSLServerSocket( port=settings.THRIFT_PORT, certfile=settings.SSL_CERTIFICATE) else: socket = TServerSocket(port=settings.THRIFT_PORT) # server = TThreadedServer(ThriftProcessor(), socket, TFramedTransportFactory(), TBinaryProtocolFactory()) self.serverpid = os.getpid() self.server = TForkingServer(ThriftProcessor(), socket, TFramedTransportFactory(), TBinaryProtocolFactory()) self.server.serve()
def runStandardHandler(currency_rates, currency_rates_lock, accounts, accounts_lock, port): handler = StandardHandler(accounts, accounts_lock, currency_rates, currency_rates_lock) processor = PremiumService.Processor(handler) transport = TSocket.TServerSocket(host='127.0.0.1', port=port) tfactory = TTransport.TBufferedTransportFactory() pfactory = TBinaryProtocolFactory() server = TServer.TThreadedServer(processor, transport, tfactory, pfactory) print('Starting standard server.') server.serve() print('Manage server done.')
def test_process(self): processor = FBaseProcessor() proc = Mock() processor.add_to_processor_map("basePing", proc) frame = bytearray( b'\x00\x00\x00\x00\x0e\x00\x00\x00\x05_opid\x00\x00\x00\x011' b'\x80\x01\x00\x02\x00\x00\x00\x08basePing\x00\x00\x00\x00\x00') itrans = TMemoryBuffer(value=frame) iprot = FProtocolFactory(TBinaryProtocolFactory()).get_protocol(itrans) oprot = Mock() processor.process(iprot, oprot) assert (proc.process.call_args) args, _ = proc.process.call_args self.assertEqual(args[0].get_response_header(_OPID_HEADER), '1') assert (args[1] == iprot) assert (args[2] == oprot)
def __init__(self, processor, lsocket, inputProtocolFactory=None, outputProtocolFactory=None, threads=10): self.processor = processor self.socket = lsocket self.in_protocol = inputProtocolFactory or TBinaryProtocolFactory() self.out_protocol = outputProtocolFactory or self.in_protocol self.threads = int(threads) self.clients = {} self.tasks = Queue.Queue() self._read, self._write = socket.socketpair() self.prepared = False self._stop = False self.select_timeout = DEFAULT_SELECT_TIMEOUT
def __init__(self, processor, lsocket, inputProtocolFactory=None, outputProtocolFactory=None, threads=10): self.processor = processor self.socket = lsocket self.in_protocol = inputProtocolFactory or TBinaryProtocolFactory() self.out_protocol = outputProtocolFactory or self.in_protocol self.threads = int(threads) self.clients = {} self.tasks = queue.Queue() self._read, self._write = socket.socketpair() self.prepared = False self._stop = False self.poll = select.poll() if hasattr(select, 'poll') else None
def __init__(self, processor, lsocket, inputProtocolFactory=None, outputProtocolFactory=None, threads=0): assert threads == 0 # Modified thrift server implementation self.processor = processor self.socket = lsocket self.in_protocol = inputProtocolFactory or TBinaryProtocolFactory() self.out_protocol = outputProtocolFactory or self.in_protocol self.clients = {} self.tasks = Queue.Queue() self._read, self._write = socket.socketpair() self._accept_socket = socket.socket() self.prepared = False self._stop = False
def get_protocol_factory(protocol): """ Returns a protocol factory associated with the string protocol passed in as a command line argument to the cross runner :param protocol: string :return: Protocol factory """ if protocol == "binary": return FProtocolFactory(TBinaryProtocolFactory()) elif protocol == "compact": return FProtocolFactory(TCompactProtocolFactory()) elif protocol == "json": return FProtocolFactory(TJSONProtocolFactory()) else: logging.error("Unknown protocol type: %s", protocol) sys.exit(1)
def test(): try: transport = TStreamPool('127.0.0.1', 9095, max_stream=10) client = PoolClient(Client, transport, TBinaryProtocolFactory()) for i in range(0, 20): res = yield client.translate( 'Die USA und Großbritannien berichten von einer mutmaßlichen weltweiten Cyberattacke.' ) res = res.replace('@@ ', '') print(res) res = yield client.translate( 'Von der Regierung in Moskau unterstützte Hacker-Gruppen hätten Router, Switches und Firewalls infiziert, so Behörden beider Länder.' ) res = res.replace('@@ ', '') print(res) except Thrift.TException as ex: print("%s" % ex.message) ioloop.stop()
def test_process(self): processor = FBaseProcessor() proc = Mock() future = Future() future.set_result(None) proc.process.return_value = future processor.add_to_processor_map("basePing", proc) frame = bytearray( b'\x00\x00\x00\x00\x0e\x00\x00\x00\x05_opid\x00\x00\x00\x011' b'\x80\x01\x00\x02\x00\x00\x00\x08basePing\x00\x00\x00\x00\x00') itrans = TMemoryBuffer(value=frame) iprot = FProtocolFactory(TBinaryProtocolFactory()).get_protocol(itrans) oprot = Mock() yield processor.process(iprot, oprot) assert (proc.process.call_args) args, _ = proc.process.call_args assert (args[0]._get_op_id() == 1) assert (args[1] == iprot) assert (args[2] == oprot)
def __init__(self, host=None, port=10000, authMechanism=None, user=None, password=None, configuration=None): super(TornadoConnection, self).__init__(authMechanism) #Must set a password for thrift, even if it doesn't need one #Open issue with python-sasl password = self._check_password(authMechanism, password) if authMechanism == "NOSASL": self.transport = TTornadoStreamTransport(host, port) else: saslc, sasl_mech = self._get_sasl_client(host, authMechanism, user, password, configuration) self.transport = TSaslClientTransportTornado( saslc, sasl_mech, host, port) pfactory = TBinaryProtocolFactory() self.client = TCLIServiceTornado.Client(self.transport, pfactory)
def run(self): processor = ThriftProcessor(self._interface) server = self._server_type(processor, self._transport, TFramedTransportFactory(), TBinaryProtocolFactory()) return server.serve()
def init_pool(self): return PoolClient(Client, TStreamPool(self.host, self.port, max_stream=self.max_stream), TBinaryProtocolFactory())
def setUp(self): self.fac = TBinaryProtocolFactory()
def test_TBinaryProtocol(self): buf = TTransport.TMemoryBuffer() transport = TTransport.TBufferedTransportFactory().getTransport(buf) factory = TBinaryProtocolFactory(transport) self.verify(self.binary_serialized, factory)
class Server(object): def __init__(self, processor, zmq_socket_type=zmq.DEALER, context=None, pool_size=5, mode_ppworker=False, service=None): # 1. 获取zeromq context, 以及 events self.context = context or Context.get_instance() self.events = Events(zmq_socket_type, self.context, mode_ppworker=mode_ppworker, service=service) # 2. 设置: mode_ppworker if self.events.mode_ppworker: self.liveness = HEARTBEAT_LIVENESS self.interval = INTERVAL_INIT self.heartbeat_at = time.time() + HEARTBEAT_INTERVAL self.processor = processor # thrift processor self.proto_factory_input = TBinaryProtocolFactory() self.proto_factory_output = TUtf8StrBinaryProtocolFactory() # 4. gevent self.task_pool = gevent.pool.Pool(size=pool_size) self.acceptor_task = None self.events.create_worker_socket() self.endpoint = None # 5. 程序退出控制 self.alive = True def get_heartbeat_msg(self): # 协议: byte0(动作) + byte1(版本) + byte2(并发度) # 启动时发一个消息,之后间隔一段时间发送一个消息 # return PPP_HEARTBEAT + chr(0) + chr(self.task_pool.free_count()) def get_ready_msg(self): # 协议: byte0(动作) + byte1(版本) + byte2(并发度) # 启动时发一个消息,之后间隔一段时间发送一个消息 # return PPP_READY + chr(0) + chr(self.task_pool.free_count()) # 通过_events来connect, bind服务 def connect(self, endpoint, resolve=True): self.endpoint = endpoint return self.events.connect(endpoint, resolve) def bind(self, endpoint, resolve=True): self.endpoint = endpoint return self.events.bind(endpoint, resolve) def close(self): self.events.close() self.stop() def handle_request(self, event): # 1. 将zeromq的消息转换成为 thrift的 protocols trans_input = TMemoryBuffer(event.msg) trans_output = TMemoryBuffer() proto_input = self.proto_factory_input.getProtocol(trans_input) proto_output = self.proto_factory_output.getProtocol(trans_output) # 2. 交给processor来处理 try: self.processor.process(proto_input, proto_output) # 3. 将thirft的结果转换成为 zeromq 格式的数据 msg = trans_output.getvalue() # print "Return Msg: ", msg, event.id self.events.emit(msg, event.id) except Exception as e: # 如何出现了异常该如何处理呢 # 程序不能挂 print "Exception: ", e def _acceptor(self): # run # ---> _acceptor # ---> _handle_request # # # server的工作模式: # 1. Demo服务器可以简单地启动一个ZeroRpcServer, 然后也不用考虑网络 io的一点点时间开销 # 2. 线上服务器, ZeroRpcServer之前添加了一个queue或load balance, 因此网络io的时间也可以忽略 # last_queue_time = time.time() last_event_time = time.time() start = True while True: if self.events.mode_ppworker: # 注意: events现在只从 input获取信息, 如果 handle_request 因为什么原因,导致数据没有返回,而代码却堵在这里了。 # poll_event的参数最小为1,否则就成为0, 则无限等待; 也不能设置为None event = self.events.poll_event(1) # 1ms(为什么呢?) now = time.time() if event: last_queue_time = now last_event_time = now if self.liveness == 0 or start: start = False print Fore.GREEN, "Queue back to life", Fore.RESET # print "Event: ", event if len(event.msg) == 1 and event.msg == PPP_HEARTBEAT: self.liveness = HEARTBEAT_LIVENESS else: # 正常的RPC数据 self.liveness = HEARTBEAT_LIVENESS self.task_pool.spawn(self.handle_request, event) self.interval = INTERVAL_INIT else: # timeout(太长时间没有回应) if self.alive and (now - last_queue_time >= HEARTBEAT_INTERVAL): last_queue_time = time.time() now = time.time() self.liveness -= 1 if self.liveness == 0: print Fore.RED, "ID: ", self.events.identity, "Queue died, Begin to sleep for: ", self.interval, Fore.RESET # 反正都没啥事了,等待就等待 gevent.sleep(self.interval) if self.interval < INTERVAL_MAX: self.interval *= 2 self.liveness = HEARTBEAT_LIVENESS start = True # 重新注册(恢复正常状态) assert self.endpoint print Fore.RED, "ID: ", self.events.identity, "Reconnection Queue", Fore.RESET self.events.reconnect(self.get_ready_msg()) # 如果要死了,并且1s内没有新的任务, 并且task_pool为空,则自杀 if not self.alive and (now - last_event_time) > 1.0 and self.task_pool.free_count() == self.task_pool.size: print Fore.CYAN, "<<<< Suicide Worker Gracefully", Fore.RESET break # 自己也需要发送消息给queue(自己活着的时候才发送hb) elif now >= self.heartbeat_at and self.alive: print "Send Hb Msg...", self.events.identity self.events.emit(self.get_heartbeat_msg(), None) self.heartbeat_at = time.time() + HEARTBEAT_INTERVAL if not self.alive: self.events.emit(PPP_STOP, None) else: event = self.events.poll_event(1) if event: self.task_pool.spawn(self.handle_request, event) def run(self): import gevent.monkey gevent.monkey.patch_socket() # 0. 注册信号(控制运维) self.init_signal() # 1 ppworker在启动的时候,会告知 lb, 我准备好了,以及支持的并发度 if self.events.mode_ppworker: print "Send Hb Msg..." self.events.emit(self.get_ready_msg(), None) # 2. 监听数据 self.acceptor_task = gevent.spawn(self._acceptor) # 3. 等待结束 try: self.acceptor_task.get() finally: self.stop() self.task_pool.join(raise_error=True) def stop(self): if self.acceptor_task is not None: self.acceptor_task.kill() self.acceptor_task = None def init_signal(self): def handle_int(*_): self.alive = False def handle_term(*_): # 主动退出 self.alive = False # 2/15 signal.signal(signal.SIGINT, handle_int) signal.signal(signal.SIGTERM, handle_term) print Fore.RED, "To graceful stop current worker plz. use:", Fore.RESET print Fore.GREEN, ("kill -15 %s" % os.getpid()), Fore.RESET
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol from thrift.protocol.TBinaryProtocol import TBinaryProtocolFactory from thrift.util.Serializer import serialize, deserialize from Recursive.ttypes import * fac = TBinaryProtocolFactory() class TestRecursivePythonStructs(unittest.TestCase): def test_tree(self): tree = RecTree() child = RecTree() tree.children = [child] ser = serialize(fac, tree) result = RecTree() result = deserialize(fac, ser, result) self.assertEqual(result, tree) def test_list(self): l = RecList() l2 = RecList() l.next = l2 ser = serialize(fac, l)
def main(): parser = argparse.ArgumentParser(description="Run a tornado python server") parser.add_argument('--port', dest='port', default='9090') parser.add_argument('--protocol', dest='protocol_type', default="binary", choices="binary, compact, json") parser.add_argument('--transport', dest="transport_type", default="stateless", choices="stateless, http") args = parser.parse_args() if args.protocol_type == "binary": protocol_factory = FProtocolFactory(TBinaryProtocolFactory()) elif args.protocol_type == "compact": protocol_factory = FProtocolFactory(TCompactProtocolFactory()) elif args.protocol_type == "json": protocol_factory = FProtocolFactory(TJSONProtocolFactory()) else: logging.error("Unknown protocol type: %s", args.protocol_type) sys.exit(1) nats_client = NATS() options = {"verbose": True, "servers": ["nats://127.0.0.1:4222"]} yield nats_client.connect(**options) global port port = args.port handler = FrugalTestHandler() subject = "frugal.*.*.rpc.{}".format(args.port) processor = Processor(handler) if args.transport_type == "stateless": server = FNatsServer(nats_client, [subject], processor, protocol_factory) # start healthcheck so the test runner knows the server is running thread.start_new_thread(healthcheck, (port, )) print("Starting {} server...".format(args.transport_type)) yield server.serve() elif args.transport_type == "http": factories = { 'processor': processor, 'protocol_factory': protocol_factory } server = Application([(r'/', FHttpHandler, factories)]) print("Starting {} server...".format(args.transport_type)) server.listen(port) else: logging.error("Unknown transport type: %s", args.transport_type) sys.exit(1) # Setup subscriber, send response upon receipt pub_transport_factory = FNatsPublisherTransportFactory(nats_client) sub_transport_factory = FNatsSubscriberTransportFactory(nats_client) provider = FScopeProvider(pub_transport_factory, sub_transport_factory, protocol_factory) global publisher publisher = EventsPublisher(provider) yield publisher.open() @gen.coroutine def response_handler(context, event): print("received {} : {}".format(context, event)) preamble = context.get_request_header(PREAMBLE_HEADER) if preamble is None or preamble == "": logging.error("Client did not provide preamble header") return ramble = context.get_request_header(RAMBLE_HEADER) if ramble is None or ramble == "": logging.error("Client did not provide ramble header") return response_event = Event(Message="Sending Response") response_context = FContext("Call") global publisher global port yield publisher.publish_EventCreated(response_context, preamble, ramble, "response", "{}".format(port), response_event) print("Published event={}".format(response_event)) subscriber = EventsSubscriber(provider) yield subscriber.subscribe_EventCreated("*", "*", "call", "{}".format(args.port), response_handler)