def __init__(self, port=5000, host='127.0.0.1', max_workers=10): self._port = port self._host = host # The server is created by thread with maximum max_workers threads, # all the thread-driven gRPC self._server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers)) self.chat_grpc_server = ChatGrpcServer() # We say that our ChattingService implements the chat service described in Proto on this server rpc.add_ChatServerServicer_to_server(self.chat_grpc_server, self._server)
def serve(block=False, max_numerical_error_global=10, max_order_error_global=5, max_staleness_global=10, max_numerical_error_other=2, max_order_error_other=1, max_staleness_other=10, load_check_interval=5, load_threshold=1): global pid pid = str(os.getpid()) print('PID:' + pid) create_initial_logs() global server # Start server server = grpc.server(concurrent.futures.ThreadPoolExecutor(max_workers=10)) normal_thread_configuration = ThreadConfiguration( max_numerical_error_other, max_order_error_other, max_staleness_other) global_thread_configuration = ThreadConfiguration( max_numerical_error_global, max_order_error_global, max_staleness_other) normal_policy = SimpleLoadBasedThreadConfigurationPolicy( load_check_interval, load_threshold, normal_thread_configuration) global_policy = SimpleLoadBasedThreadConfigurationPolicy( load_check_interval, load_threshold, global_thread_configuration) servicer = ChatServicer(normal_policy, global_policy) chat_pb2_grpc.add_ChatServerServicer_to_server(servicer, server) args = sys.argv if len(args) > 1: port = args[1] else: port = 5000 + random.randint(0, 9) server.add_insecure_port('[::]:' + str(port)) server.start() # Load balancer info ip = 'localhost' load_port = 50050 load_balancer_channel = grpc.insecure_channel(ip + ':' + str(load_port)) load_balancer_connection = load_balancer_pb2_grpc.LoadBalancerServerStub( load_balancer_channel) info = load_balancer_pb2.ConnectionInfo(ip='localhost', port=str(port)) threading.Thread(target=_load_balancer_listener, args=(load_balancer_connection, info, pid), daemon=True).start() if block: server.wait_for_termination()
def openserver(chater): port = 11912 # a random port for the server to run on # the workers is like the amount of threads that can be opened at the same time, when there are 10 clients connected # then no more clients able to connect to the server. server = grpc.server( futures.ThreadPoolExecutor(max_workers=chater)) # create a gRPC server rpc.add_ChatServerServicer_to_server(ChatServer(), server) # register the server to gRPC # gRPC basically manages all the threading and server responding logic, which is perfect! print('Starting server. Listening...') server.add_insecure_port('[::]:' + str(port)) server.start() # Server starts in background (in another thread) so keep waiting # if we don't wait here the main thread will end, which will end all the child threads, and thus the threads # from the server won't continue to work and stop the server while True: time.sleep(64 * 64 * 100)
:param context: :return: """ print("[{}] {}".format(request.name, request.message)) # Add it to the chat history self.chats.append(request) gConn[request.name] = context return chat.Empty() if __name__ == '__main__': port = 11912 # create a gRPC server server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) servicer = ChatServer() rpc.add_ChatServerServicer_to_server(servicer, server) print('Starting server. Listening...') server.add_insecure_port('[::]:' + str(port)) server.start() # Server starts in background (another thread) so keep waiting while True: for name,context in gConn.items(): note = chat.Note() note.name = "system" note.message = "Hello every body." servicer.chats.append(note) servicer.ChatStream(None, context) time.sleep(10)
:param request: :param context: :return: """ # this is only for the server console print("[{}] {}".format(request.name, request.message)) # Add it to the chat history self.chats.append(request) return chat.Empty( ) # something needs to be returned required by protobuf language, we just return empty msg if __name__ == '__main__': port = 11912 # a random port for the server to run on # the workers is like the amount of threads that can be opened at the same time, when there are 10 clients connected # then no more clients able to connect to the server. server = grpc.server( futures.ThreadPoolExecutor(max_workers=10)) # create a gRPC server rpc.add_ChatServerServicer_to_server(ChatServer(), server) # register the server to gRPC # gRPC basically manages all the threading and server responding logic, which is perfect! print('Starting server. Listening...') server.add_insecure_port('[::]:' + str(port)) server.start() # Server starts in background (in another thread) so keep waiting # if we don't wait here the main thread will end, which will end all the child threads, and thus the threads # from the server won't continue to work and stop the server while True: time.sleep(64 * 64 * 100)
return "Hello World!" class ChatServer(rpc.ChatServerServicer): def __init__(self): self.chat_messages = [] def ChatStream(self, request, context): last_index = 0 while True: while len(self.chat_messages) > last_index: n = self.chat_messages[last_index] last_index += 1 yield n def SendText(self, request: chat.Text, context): print("SendText: {} {}".format(request.name, request.msg)) self.chat_messages.append(request) return chat.Empty() if __name__ == '__main__': port = 11912 server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) rpc.add_ChatServerServicer_to_server(ChatServer(), server) print("chat server listening... {}".format(port)) server.add_insecure_port('[::]:' + str(port)) server.start() while True: time.sleep(64 * 64 * 100)
class ChatServer(chat_pb2_grpc.ChatServerServicer): def __init__(self): self.chat_history = [] # 新连接的客户端是从这里取消息历史的,已连接的客户端也从这里取新的消息 def ChatStream(self, request_iterator, context): lastindex = 0 while True: while len(self.chat_history) > lastindex: n = self.chat_history[lastindex] lastindex += 1 yield n def SendNote(self, request, context): self.chat_history.append(request) return chat_pb2.Empty() if __name__ == "__main__": # 同时处理10个客户端 server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) chat_pb2_grpc.add_ChatServerServicer_to_server(ChatServer(), server) print("start server, Listen...") server.add_insecure_port("[::]:11912") # 是另一个守护线程执行的,所以主线程需要while True server.start() while True: time.sleep(64 * 64 * 100)