class Transporter(object): """ usage: function: isRunning():is transpoter running bind(address, start_port, port_range):bind to local socket start(): stop(): connect(remote_ip, remote_port): disconnect(session_id): sendMessage(session_id, message_list): """ timeout_check_interval = 1 max_timeout = 5 max_retry = 2 max_task = 10000 max_queue = 50000 def __init__(self, name, logger_name, handler=None, socket_count=1, process_channel=1, notify_channel=1, max_datagram_size=548): self.handler = handler self.name = name self.bound = False self.ip = "" self.port = 0 ##server_key int_value = random.random() * 10000000 sha = hashlib.sha1() sha.update(str(int_value)) self.server_key = sha.hexdigest() self.channel_lock = threading.RLock() self.channel_index = 0 self.channels = [] self.socket_count = socket_count self.process_channel = process_channel self.notify_channel = notify_channel self.logger = logging.getLogger(logger_name) self.endpoint_manager = EndpointManager() self.max_datagram_size = max_datagram_size self.max_message_size = max_datagram_size - 20 self.status = StatusEnum.stopped self.status_mutex = threading.RLock() self.serial_request_available = ResetEvent( ) # threading.Event() # self.serialProcess() self.serial_request = [ ] # list of tuple (session_id, list of AppMessage) self.serial_thread = [] self.serial_lock = threading.Lock() self.package_request_available = ResetEvent( ) # threading.Event() # self.packageProcess() self.package_request = [ ] # list of tuple (remote_ip, remote_port, datagram_list), datagram means stringlized of ConnectRequest() self.package_thread = [] self.package_lock = threading.Lock() self.unpackage_request_available = ResetEvent( ) # threading.Event() # self.unpackageProcess() self.unpackage_request = [ ] # content tuple of (packet, remote_ip, remote_port) self.unpackage_thread = [] self.unpackage_lock = threading.Lock() self.process_request_available = ResetEvent( ) # threading.Event() # self.processProcess() self.process_request = [ ] # contain tuple of (address, instance of subclass of TransportCommand()) self.process_thread = [] self.process_lock = threading.Lock() self.notify_request_available = ResetEvent( ) # threading.Event() # self.notifyProcess() self.notify_request = [] self.notify_thread = [] self.notify_lock = threading.Lock() for i in range(process_channel): self.serial_thread.append( threading.Thread(target=self.serialProcess)) self.package_thread.append( threading.Thread(target=self.packageProcess)) self.unpackage_thread.append( threading.Thread(target=self.unpackageProcess)) self.process_thread.append( threading.Thread(target=self.processProcess)) for i in range(notify_channel): self.notify_thread.append( threading.Thread(target=self.notifyProcess)) self.packet_handler = None self.task_manager = SendTaskManager(self.max_task, self.max_timeout, self.max_retry) ##timeout check self.timeout_check_event = threading.Event() self.timeout_check_thread = threading.Thread( target=self.timeoutCheckProcess) def isRunning(self): return (StatusEnum.running == self.status) def bind(self, address, start_port=5600, port_range=200): buf_size = 2 * 1024 * 1024 self.packet_handler = PacketHandler(address, start_port, self.socket_count, self.onPacketReceived, buf_size) if not self.packet_handler.initial(): return False self.bound = True self.ip = address self.port = self.packet_handler.getDefaultPort() ports = self.packet_handler.getLocalPorts() self.logger.info("<Transporter>bind to '%s:%d' success, port list:%s" % (self.ip, self.port, ports)) for index in range(len(ports)): port = ports[index] info = ChannelInfo(index) info.ip = address info.port = port self.channels.append(info) return True def getListenPort(self): if self.bound: return self.port else: return -1 def start(self): with self.status_mutex: if StatusEnum.stopped != self.status: self.logger.error( "<Transporter>start transporter fail, not in stop status") return False self.status = StatusEnum.running self.packet_handler.start() for i in range(self.process_channel): self.serial_thread[i].start() self.package_thread[i].start() self.unpackage_thread[i].start() self.process_thread[i].start() for i in range(self.notify_channel): self.notify_thread[i].start() self.timeout_check_event.clear() self.timeout_check_thread.start() self.logger.info("<Transporter>start transporter success") return True def stop(self): with self.status_mutex: if StatusEnum.stopped == self.status: return if StatusEnum.running == self.status: self.status = StatusEnum.stopping self.logger.info("<Transporter>stopping transporter...") self.disconnectAll() self.timeout_check_event.set() self.serial_request_available.set() self.package_request_available.set() self.unpackage_request_available.set() self.process_request_available.set() self.notify_request_available.set() self.packet_handler.stop() self.timeout_check_thread.join() for i in range(self.process_channel): self.serial_thread[i].join() self.package_thread[i].join() self.unpackage_thread[i].join() self.process_thread[i].join() for i in range(self.notify_channel): self.notify_thread[i].join() with self.status_mutex: self.status = StatusEnum.stopped self.logger.info("<Transporter>transporter stopped") def connect(self, remote_name, remote_ip, remote_port): if self.endpoint_manager.isExists(remote_name): self.logger.error( "<Transporter>connect fail, remote endpoint '%s' already exists" % (remote_name)) return False session_id = self.endpoint_manager.allocate(remote_name) if -1 == session_id: self.logger.error( "<Transporter>connect fail, can't allocate endpoint for endpoint '%s'" % (remote_name)) return False ##new session channel_id = session_id % (self.socket_count) endpoint = self.endpoint_manager.getSession(session_id) if not endpoint.initial(channel_id): self.logger.error( "<Transporter>connect fail, can't initial endpoint") return False info = self.channels[channel_id] request = ConnectRequest() request.sender = session_id request.ip = info.ip request.port = info.port request.name = self.name ##client_key int_value = random.random() * 10000000 sha = hashlib.sha1() sha.update(str(int_value)) request.client_key = sha.hexdigest() datagram = request.toString() self.sendDatagram(remote_ip, remote_port, [datagram]) return True def disconnect(self, session_id): endpoint = self.endpoint_manager.getSession(session_id) if not endpoint: self.logger.error( "<Transporter>disconnect fail, invalid session %d" % (session_id)) return request = DisconnectRequest() request.name = self.name request.session = endpoint.remote_session ## self.logger.info("[%08X]send disconnect request to node '%s'"%(session_id, endpoint.remote_name)) self.sendDatagramToSession(session_id, [request.toString()]) def disconnectAll(self): session_list = self.endpoint_manager.getConnectedEndpoint() if 0 == len(session_list): return for session_id in session_list: self.disconnect(session_id) ## self.logger.info("<Transporter>disconnect %d connectd endpoint"%(len(session_list))) def sendMessage(self, session_id, message_list): with self.serial_lock: if self.max_queue < len(self.serial_request): self.logger.error( "<Transporter>send message fail, send queue is full") return False self.serial_request.append((session_id, message_list)) self.serial_request_available.set() return True def serialProcess(self): max_fetch = 100 while self.isRunning(): ##wait for signal self.serial_request_available.wait() if not self.isRunning(): ##double protect ##pass notify to other thread self.serial_request_available.set() break with self.serial_lock: ##check queue request_count = len(self.serial_request) if 0 == request_count: ##empty continue ##FIFO/pop front fetch_count = min(request_count, max_fetch) if fetch_count < request_count: ##more available,self invoke self.serial_request_available.set() request_list = self.serial_request[:fetch_count] del self.serial_request[:fetch_count] ##begin process try: ##list of (ip, port, datagram_list) package_request = [] ##list of (session_id, message_list) for request in request_list: session_id = request[0] message_list = request[1] endpoint = self.endpoint_manager.getSession(session_id) if not endpoint: self.logger.error( "<Transporter>send message fail, invalid session %d" % session_id) continue if not endpoint.isConnected(): self.logger.error( "[%08X]send message fail, session disconnected" % session_id) continue begin_serial, end_serial = endpoint.allocateSerial( len(message_list)) if 0 == begin_serial: self.logger.error( "[%08X]send message fail, allocate serial fail" % session_id) continue message_serial = begin_serial datagram_list = [] for message in message_list: content = message.toString() if len(content) > self.max_message_size: ##split length = len(content) total = (length - length % self.max_message_size ) / self.max_message_size + 1 begin = 0 end = begin + self.max_message_size index = 1 while begin != len(content): message_data = MessageData() message_data.serial = message_serial message_data.index = index message_data.total = total message_data.data = content[begin:end] message_data.session = endpoint.remote_session datagram_list.append(message_data.toString()) ##next split index += 1 begin = end end = begin + self.max_message_size if end > len(content): end = len(content) else: ##single datagram message_data = MessageData() message_data.serial = message_serial message_data.index = 1 message_data.total = 1 message_data.data = content message_data.session = endpoint.remote_session datagram_list.append(message_data.toString()) if message_serial > EndpointSession.max_serial: message_serial = 1 else: message_serial += 1 package_request.append( (endpoint.nat_ip, endpoint.nat_port, datagram_list)) ##end of for request in request_list: self.putToPackage(package_request) except Exception as e: self.logger.exception( "<Transporter>exception when serial message, message:%s" % (e.args)) def putToPackage(self, request_list): """ @request_list: list of tuple (remote_ip, remote_port, datagram_list) @datagram_list: list of datagram, datagram means stringlized of ConnectRequest(), MessageData(), """ with self.package_lock: if self.max_queue < len(self.package_request): self.logger.error( "<Transporter>put package fail, package queue is full") return False self.package_request.extend(request_list) self.package_request_available.set() return True def packageProcess(self): max_fetch = 100 while self.isRunning(): ##wait for signal self.package_request_available.wait() if not self.isRunning(): ##double protect ##pass notify to other thread self.package_request_available.set() break with self.package_lock: ##check queue request_count = len(self.package_request) if 0 == request_count: ##empty continue ##FIFO/pop front fetch_count = min(request_count, max_fetch) if fetch_count < request_count: ##more available,self invoke self.package_request_available.set() request_list = self.package_request[:fetch_count] del self.package_request[:fetch_count] ##package send_list = [] ##list of (packet, ip, port) ##key = (ip, port), value = datagram_list rawdata = {} ##list of (ip, port, datagram_list) ##resort by address for request in request_list: address = (request[0], request[1]) datagram_list = request[2] if not rawdata.has_key(address): rawdata[address] = datagram_list else: rawdata[address].extend(datagram_list) ##package to packet packets = {} for address in rawdata.keys(): packets[address] = [] cache = "" length = 0 for data in rawdata[address]: ##4 bytes length + raw data data_length = len(data) if (data_length + length) > self.max_datagram_size: ##new packet, flush cache if 0 != length: packets[address].append(cache) cache = data length = data_length else: cache += data length += data_length if 0 != len(cache): ##flush last packet packets[address].append(cache) ##end of for address in rawdata.keys(): ##resort to send_list for address in packets.keys(): ip = address[0] port = address[1] for packet in packets[address]: send_list.append((packet, ip, port)) packet_count = len(send_list) id_list = self.task_manager.allocate(packet_count) if 0 == len(id_list): self.logger.error( "<Transporter>package fail, allocate %d task fail" % (packet_count)) continue updated = self.task_manager.update(id_list, send_list) if updated != packet_count: self.logger.error( "<Transporter>package fail, not all task updated (%d / %d)" % (updated, packet_count)) self.task_manager.deallocate(id_list) continue ##refetch packeted data gram send_list = self.task_manager.fetch(id_list) if not self.packet_handler.sendPacketList(send_list): self.logger.error( "<Transporter>package fail, send packet to handler fail") self.task_manager.deallocate(id_list) def sendDatagramToSession(self, session_id, datagram_list): endpoint = self.endpoint_manager.getSession(session_id) if not endpoint: self.logger.error( "<Transporter>send fail, invalid endpoint id %d" % (session_id)) return False return self.sendDatagram(endpoint.nat_ip, endpoint.nat_port, datagram_list) def sendDatagram(self, remote_ip, remote_port, datagram_list): return self.putToPackage([(remote_ip, remote_port, datagram_list)]) def onPacketReceived(self, message_list): """ @message_list:list of (packet, remote_ip, remote_port) """ with self.unpackage_lock: if self.max_queue < len(self.unpackage_request): self.logger.error( "<Transporter>put unpackage fail, unpackage queue is full") return False self.unpackage_request.extend(message_list) self.unpackage_request_available.set() return True def unpackageProcess(self): max_fetch = 100 while self.isRunning(): ##wait for signal self.unpackage_request_available.wait() if not self.isRunning(): ##double protect ##pass notify to other thread self.unpackage_request_available.set() break with self.unpackage_lock: ##check queue request_count = len(self.unpackage_request) if 0 == request_count: ##empty continue ##FIFO/pop front fetch_count = min(request_count, max_fetch) if fetch_count < request_count: ##more available,self invoke self.unpackage_request_available.set() request_list = self.unpackage_request[:fetch_count] del self.unpackage_request[:fetch_count] ##list of (address, datagram) received_datagram = [ ] # contain tuple of (address, instance of subclass of TransportCommand()) finished = [] ack_packets = {} ##list of (packet, remote_ip, remote_port) for request in request_list: remote_ip = request[1] remote_port = request[2] packet = request[0] address = (remote_ip, remote_port) length = len(packet) begin = 0 while (length - begin) >= 3: header, seq = struct.unpack(">BH", packet[begin:(begin + 3)]) if Datagram.header_mask != ((header & 0xF0) >> 4): break version = (header & 0x0C) >> 2 data_type = header & 0x03 if 1 == data_type: ##ack finished.append(seq) begin += 3 else: ##data if (length - begin) < 9: ##incomplete break data_length, crc = struct.unpack( ">HI", packet[(begin + 3):(begin + 9)]) content_offset = begin + 9 data_content = packet[content_offset:(content_offset + data_length)] ##crc check computed_crc = zlib.crc32(data_content) & 0xFFFFFFFF if computed_crc != crc: ##data damaged break ##unserialize command_list = unpackageFromRawdata(data_content) for command in command_list: received_datagram.append((address, command)) ack = DatagramACK(seq) if not ack_packets.has_key(address): ack_packets[address] = [ack.toString()] else: ack_packets[address].append(ack.toString()) begin = content_offset + data_length ##end while (length - begin) >= 3: ##end for request in request_list: if 0 != len(ack_packets): send_list = [] ##list of (packet, ip, port) ##send ack for address in ack_packets.keys(): ack_list = ack_packets[address] for packet in ack_list: send_list.append((packet, address[0], address[1])) if not self.packet_handler.sendPacketList(send_list): self.logger.warn("<Transporter>try send %d ack fail!" % (len(send_list))) self.task_manager.deallocate(finished) self.putToProcess(received_datagram) def putToProcess(self, request_list): """ @request_list: # contain tuple of (address, instance of subclass of TransportCommand()) """ ##list of (address, datagram) with self.process_lock: if self.max_queue < len(self.process_request): self.logger.error( "<Transporter>put to process fail, process queue is full") return False self.process_request.extend(request_list) self.process_request_available.set() return True def processProcess(self): max_fetch = 100 while self.isRunning(): ##wait for signal self.process_request_available.wait() if not self.isRunning(): ##double protect ##pass notify to other thread self.process_request_available.set() break with self.process_lock: ##check queue request_count = len(self.process_request) if 0 == request_count: ##empty continue ##FIFO/pop front fetch_count = min(request_count, max_fetch) if fetch_count < request_count: ##more available,self invoke self.process_request_available.set() request_list = self.process_request[:fetch_count] del self.process_request[:fetch_count] ##list of (address, datagram) for request in request_list: # contain tuple of (address, instance of subclass of TransportCommand()) address = request[0] command = request[1] try: if command.type == TransportCommand.type_keep_alive: self.handleKeepAlive(command, command.session) elif command.type == TransportCommand.type_connect_request: self.handleConnectRequest(command, address, command.session) elif command.type == TransportCommand.type_connect_response: self.handleConnectResponse(command, address, command.session) elif command.type == TransportCommand.type_connect_acknowledge: self.handleConnectACK(command, command.session) elif command.type == TransportCommand.type_disconnect_request: self.handleDisconnectRequest(command, command.session) elif command.type == TransportCommand.type_disconnect_response: self.handleDisconnectResponse(command, command.session) elif command.type == TransportCommand.type_message_data: self.handleMessageData(command, command.session) except Exception, ex: self.logger.error( "<Transporter>handle received datagram exception:%s" % (ex)) continue
class BaseService(LoggerHelper): def __init__(self, logger_name, max_request=10000): LoggerHelper.__init__(self, logger_name) self.__max_request = 10000 self.__status = StatusEnum.stopped self.__status_mutex = threading.RLock() ##block after create self.__request_available = ResetEvent() #threading.Event() self.__request_list = [] self.__request_lock = threading.Lock() self.__main_thread = threading.Thread(target=self.__mainProcess) def start(self): """ start service """ with self.__status_mutex: if StatusEnum.stopped != self.__status: return False if not self.onStart(): return False self.__status = StatusEnum.running self.__main_thread.start() return True def stop(self): """ stop service """ with self.__status_mutex: if StatusEnum.stopped == self.__status: return if StatusEnum.running == self.__status: self.__status = StatusEnum.stopping ##notify wait thread self.__request_available.set() self.__main_thread.join() with self.__status_mutex: self.__status = StatusEnum.stopped self.onStop() def __mainProcess(self): max_batch = 100 while StatusEnum.running == self.__status: ##wait for signal self.__request_available.wait() if StatusEnum.running != self.__status: ##double protect self.__request_available.set() break with self.__request_lock: request_count = len(self.__request_list) if (0 == request_count): ##empty continue ##FIFO/pop front fetch_count = min(request_count, max_batch) if fetch_count < request_count: ##more available,self invoke self.__request_available.set() request_list = self.__request_list[:fetch_count] del self.__request_list[:fetch_count] try: ## self.info("<BaseService>debug:%d request fetched"%(fetch_count)) for request in request_list: ## begin = datetime.datetime.now() self.OnRequestReceived(request) ## diff = datetime.datetime.now() - begin ## elapse_seconds = diff.seconds + float(diff.microseconds)/1000000 ## if (request.type == 0) or(request.type == 1): ## ##message ## msg = request.message ## self.info("<BaseService>debug:handle message in %.1f second(s), msg type %d, id %d, session[%08X]"%( ## elapse_seconds, msg.type, msg.id, msg.session)) ## else: ## ## ## self.info("<BaseService>debug:handle request in %.1f second(s), request type %d, session[%08X]"%( ## elapse_seconds, request.type, request.session_id)) except Exception as e: self.console("<BaseService>OnRequestReceived exception:%s" % e.args[0]) self.exception("<BaseService>OnRequestReceived exception:%s" % e.args[0]) traceback.print_exc() def putRequest(self, request): """ put request into queue tail """ ## begin = datetime.datetime.now() ## if (request.type == 0) or(request.type == 1): ## ##message ## msg = request.message ## self.info("<BaseService>debug:try put message msg type %d, id %d, session[%08X]..."%( ## msg.type, msg.id, msg.session)) with self.__request_lock: length = len(self.__request_list) if length >= self.__max_request: self.console( "<BaseService> put request fail, request queue is full") self.error( "<BaseService> put request fail, request queue is full") return False self.__request_list.append(request) self.__request_available.set() ## diff = datetime.datetime.now() - begin ## elapse_seconds = diff.seconds + float(diff.microseconds)/1000000 ## if (request.type == 0) or(request.type == 1): ## ##message ## msg = request.message ## self.info("<BaseService>debug:put message in %.1f second(s), msg type %d, id %d, session[%08X]"%( ## elapse_seconds, msg.type, msg.id, msg.session)) ## else: ## ## ## self.info("<BaseService>debug:put request in %.1f second(s), request type %d, session[%08X]"%( ## elapse_seconds, request.type, request.session_id)) return True def putRequestList(self, request_list): with self.__request_lock: length = len(self.__request_list) if length >= self.__max_request: self.console( "<BaseService> put request list fail, request queue is full" ) self.error( "<BaseService> put request list fail, request queue is full" ) return False self.__request_list.extend(request_list) self.__request_available.set() return True def insertRequest(self, request): """ put request into queue head """ ## begin = datetime.datetime.now() with self.__request_lock: length = len(self.__request_list) if length >= self.__max_request: self.console( "<BaseService> insert request fail, request queue is full") self.error( "<BaseService> insert request fail, request queue is full") return False self.__request_list.insert(0, request) self.__request_available.set() ## diff = datetime.datetime.now() - begin ## elapse_seconds = diff.seconds + float(diff.microseconds)/1000000 ## if (request.type == 0) or(request.type == 1): ## ##message ## msg = request.message ## self.info("<BaseService>debug:insert message in %.1f second(s), msg type %d, id %d, session[%08X]"%( ## elapse_seconds, msg.type, msg.id, msg.session)) ## else: ## ## ## self.info("<BaseService>debug:insert request in %.1f second(s), request type %d, session[%08X]"%( ## elapse_seconds, request.type, request.session_id)) return True """ method need override by subclass """ """ onStart @return: False = initial fail, stop service True = initial success, start main service """ def onStart(self): pass def onStop(self): pass def OnRequestReceived(self, request): pass
class PacketHandler(object): """ def initial() def start() def stop() def getLocalIP() def getDefaultPort() def getLocalPorts() def sendPacket(packet, remote_ip, remote_port) def sendPacketList(request_list) def onPacketReceived(message_list) """ max_queue = 50000 threhold = 5 max_batch = 20 slow_interval = 1 ##20 ms normal_interval = 0.02 ##5ms fast_interval = 0.005 def __init__(self, listen_ip, start_port, port_count, callback=None, bufsize=2 * 1024 * 1024, send_thread=1, receive_thread=1): self.local_ip = listen_ip self.local_ports = [] self.start_port = start_port self.port_count = port_count self.callback = callback self.bufsize = bufsize self.sender_count = send_thread self.receiver_count = receive_thread self.status = StatusEnum.stopped self.status_mutex = threading.RLock() self.sockets = [] # monitro receive socket and send socket, and then dispatch to corresponding thread self.monitor_thread = threading.Thread(target=self.monitorProcess) self.notify_thread = threading.Thread(target=self.notifyProcess) self.notify_queue = [ ] # content tuple of (data, remote_ip, remote_port) self.notify_lock = threading.RLock() self.notify_available = ResetEvent() # threading.Event() self.send_packet_queue = [ ] # list of (consolidated_packet, remote_ip, remote_port) self.send_packet_lock = threading.RLock() self.sendable_socket = [] self.sendable_socket_available = ResetEvent() # threading.Event() self.sendable_lock = threading.RLock() self.send_packet_available = ResetEvent( ) # threading.Event() # self.sendProcess self.send_packet_thread = [] # self.sendProcess self.receivable_socket = [] self.receivable_lock = threading.RLock() self.receivable_socket_available = ResetEvent( ) # threading.Event() # self.receiveProcess self.receive_packet_thread = [] # self.receiveProcess def getLocalIP(self): return self.local_ip def getDefaultPort(self): if 0 == len(self.local_ports): return -1 else: return self.local_ports[0] def getLocalPorts(self): return self.local_ports def sendPacket(self, packet, remote_ip, remote_port): with self.send_packet_lock: current_length = len(self.send_packet_queue) if current_length < self.max_queue: self.send_packet_queue.append((packet, remote_ip, remote_port)) self.send_packet_available.set() return True else: logging.warn("<PacketHandler>send queue is full, %d / %d" % (current_length, self.max_queue)) return False def sendPacketList(self, request_list): """ @request_list:list of (packet, remote_ip, remote_port) """ with self.send_packet_lock: current_length = len(self.send_packet_queue) if current_length < self.max_queue: self.send_packet_queue.extend(request_list) self.send_packet_available.set() return True else: logging.warn("<PacketHandler>send queue is full, %d / %d" % (current_length, self.max_queue)) return False def onPacketReceived(self, message_list): """ @message_list:list of (packet, remote_ip, remote_port) """ if self.callback is not None: self.callback(message_list) def initial(self): max_try = 1000 count = 0 ip = self.local_ip for port in range(self.start_port, self.start_port + max_try): try: new_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) new_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.bufsize) new_socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.bufsize) new_socket.setblocking(0) new_socket.bind((ip, port)) except socket.error as e: continue ##bind success self.local_ports.append(port) self.sockets.append(new_socket) count += 1 if count >= self.port_count: logging.info("<PacketHandler> %d socket(s) established" % (self.port_count)) ##initial threads for i in range(self.sender_count): self.send_packet_thread.append( threading.Thread(target=self.sendProcess)) for i in range(self.receiver_count): self.receive_packet_thread.append( threading.Thread(target=self.receiveProcess)) logging.info( "<PacketHandler> %d send thread, %d receive thread ready" % (self.sender_count, self.receiver_count)) return True else: ##no port availabe logging.error( "<PacketHandler> not enough available port(%d required) in %s:%d~%d" % (self.port_count, self.local_ip, self.start_port, self.start_port + max_try)) return False def start(self): with self.status_mutex: if StatusEnum.stopped != self.status: return False self.status = StatusEnum.running self.monitor_thread.start() for i in range(self.sender_count): self.send_packet_thread[i].start() for i in range(self.receiver_count): self.receive_packet_thread[i].start() self.notify_thread.start() return True def stop(self): with self.status_mutex: if StatusEnum.stopped == self.status: return if StatusEnum.running == self.status: self.status = StatusEnum.stopping self.send_packet_available.set() self.receivable_socket_available.set() self.notify_available.set() for socket in self.sockets: socket.close() self.notify_thread.join() for i in range(self.sender_count): self.send_packet_thread[i].join() for i in range(self.receiver_count): self.receive_packet_thread[i].join() self.monitor_thread.join() with self.status_mutex: self.status = StatusEnum.stopped def monitorProcess(self): monitor = select.epoll() for _socket in self.sockets: monitor.register( _socket.fileno(), select.EPOLLIN | select.EPOLLOUT | select.EPOLLET | select.EPOLLERR | select.EPOLLHUP) while StatusEnum.running == self.status: try: epoll_list = monitor.poll(self.slow_interval) if StatusEnum.running != self.status: ##double protect break receivable = [] sendable = [] result_count = len(epoll_list) if 0 == result_count: continue for result in epoll_list: socket_fd = result[0] event = result[1] if (event & select.EPOLLIN): ##available for read for target_socket in self.sockets: if socket_fd == target_socket.fileno(): receivable.append(target_socket) break if (event & select.EPOLLOUT): ##available for write for target_socket in self.sockets: if socket_fd == target_socket.fileno(): sendable.append(target_socket) break if 0 != len(receivable): with self.receivable_lock: for recv_socket in receivable: socket_fd = recv_socket.fileno() for exist in self.receivable_socket: ##avoid duplicate if socket_fd == exist.fileno(): ##duplicate break else: ##new self.receivable_socket.append(recv_socket) self.receivable_socket_available.set() if 0 != len(sendable): with self.sendable_lock: for target in sendable: socket_fd = target.fileno() for exist in self.sendable_socket: ##avoid duplicate if socket_fd == exist.fileno(): ##duplicate break else: ##new self.sendable_socket.append(target) self.sendable_socket_available.set() except socket.error, e: logging.exception( "<PacketHandler>exception when monitor network, message:%s" % (e.strerror))
class MessageQueue(object): """ usage: MessageQueue(callback_function): start(): stop(): putMessage(msg): insertMessage(msg): """ class StatusEnum: stopped = 0 running = 1 stopping = 2 min_threhold = 5 max_threhold = 200 max_batch = 200 check_interval = 0.02##20ms max_message = 10000 def __init__(self, callback, batch_call = False): self.max_message = 10000 self.status = MessageQueue.StatusEnum.stopped self.status_lock = threading.RLock() ##block after create self.message_available = ResetEvent() #threading.Event() self.message_queue = [] self.message_lock = threading.RLock() self.main_thread = threading.Thread(target=self.dispathProcess) self.callback = callback self.batch_call = batch_call def start(self): """ start service """ with self.status_lock: if MessageQueue.StatusEnum.stopped != self.status: return False self.status = MessageQueue.StatusEnum.running self.main_thread.start() return True def stop(self): """ stop service """ with self.status_lock: if MessageQueue.StatusEnum.stopped == self.status: return if MessageQueue.StatusEnum.running == self.status: self.status = MessageQueue.StatusEnum.stopping ##notify wait thread self.message_available.set() self.main_thread.join() with self.status_lock: self.status = MessageQueue.StatusEnum.stopped def dispathProcess(self): while MessageQueue.StatusEnum.running == self.status: ##wait for signal self.message_available.wait(self.check_interval) if MessageQueue.StatusEnum.running != self.status: ##double protect break ## if self.message_available.isSet(): ## self.message_available.clear() if(0 == len(self.message_queue)): ##empty continue with self.message_lock: request_count = len(self.message_queue) if(0 == request_count): ##empty continue ##FIFO/pop front fetch_count = min(request_count, self.max_batch) if fetch_count < request_count: ##more available,self invoke self.message_available.set() request_list = self.message_queue[:fetch_count] del self.message_queue[:fetch_count] if self.callback: if self.batch_call: self.callback(request_list) else: ##single call for request in request_list: self.callback(request) def putMessage(self, message): """ put message into queue tail """ with self.message_lock: length = len(self.message_queue) if length >= self.max_message: return False self.message_queue.append(message) ## length += 1 ## if (length < self.min_threhold) or (length > self.max_threhold): self.message_available.set() return True def insertMessage(self, message): """ put message into queue head """ with self.message_lock: length = len(self.message_queue) if length >= self.max_message: return False self.message_queue.insert(0, message) ## length += 1 ## if (length < self.min_threhold) or (length > self.max_threhold): self.message_available.set() return True def batchPut(self,message_list): """ put message into queue tail """ with self.message_lock: length = len(self.message_queue) if length >= self.max_message: return False self.message_queue.extend(message_list) self.message_available.set() return True