def start_server(addr): server = ThreadingTCPServer(addr, Handler, bind_and_activate=False) # 参数为监听地址和已建立连接的处理类 server.allow_reuse_address = True server.server_bind() server.server_activate() server.serve_forever() # 监听,建立好TCP连接后,为该连接创建新的socket和线程,并由处理类中的handle方法处理
class WebUI(object): version = "Bitcoin WebUI v0.0.1" def __init__(self, config): self.config = config credentials = config.rpcusername + ":" + config.rpcpassword self.rpcauth = "Basic " + base64.b64encode(credentials.encode("utf_8")).decode("ascii") self.httpd = None def start(self): self.httpd = ThreadingTCPServer((self.config.bindip, self.config.bindport), RequestHandler, False) self.httpd.webui = self self.httpd.allow_reuse_address = True self.httpd.daemon_threads = True tcp_socket = socket.socket(self.httpd.address_family, self.httpd.socket_type) self.httpd.socket = ssl.wrap_socket(tcp_socket, self.config.privkeyfile, self.config.pubkeyfile, True) self.httpd.server_bind() self.httpd.server_activate() self.serverthread = Thread(None, self.httpd.serve_forever, "httpd") self.serverthread.start() def stop(self): self.httpd.shutdown() self.serverthread.join(5) self.httpd.server_close()
class WebUI(object): version = "Bitcoin WebUI v0.0.1" def __init__(self, config): self.config = config credentials = config.rpcusername + ":" + config.rpcpassword self.rpcauth = "Basic " + base64.b64encode( credentials.encode("utf_8")).decode("ascii") self.httpd = None def start(self): self.httpd = ThreadingTCPServer( (self.config.bindip, self.config.bindport), RequestHandler, False) self.httpd.webui = self self.httpd.allow_reuse_address = True self.httpd.daemon_threads = True tcp_socket = socket.socket(self.httpd.address_family, self.httpd.socket_type) self.httpd.socket = ssl.wrap_socket(tcp_socket, self.config.privkeyfile, self.config.pubkeyfile, True) self.httpd.server_bind() self.httpd.server_activate() self.serverthread = Thread(None, self.httpd.serve_forever, "httpd") self.serverthread.start() def stop(self): self.httpd.shutdown() self.serverthread.join(5) self.httpd.server_close()
def server_bind(self): ThreadingTCPServer.server_bind(self) # wrap the socket early self.socket = ssl.wrap_socket(self.socket, server_side=True, certfile=CERT_PATH, keyfile="server.key", do_handshake_on_connect=False)
def get_server(addr, handler, debug=False): if debug: serv = ThreadingTCPServer(addr, handler, bind_and_activate=False) serv.allow_reuse_address = True serv.daemon_threads = True serv.server_bind() serv.server_activate() else: serv = ThreadingTCPServer(addr, handler) return serv
class tcp_server(QThread): # 为了Handle能够发送Qt信号 所以使用Qthread dataChanged = pyqtSignal(dict) # 自定义发送信号 def __init__(self, port, mode): # mode = 'grc' or 'hcr' super().__init__() self.serv = None self.port = port self.mode = mode def run(self): if self.mode == 'grc': self.serv = ThreadingTCPServer(('', self.port), GRC_Handler, bind_and_activate=False) self.serv.socket.settimeout(1) # 设置超时, 以便能够退出线程 self.serv.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) # 设置端口重用, 以便异常socket断开后的重建 self.serv.qthread = self # 这里让Handle里面有信号的发射口 elif self.mode == 'hcr': self.serv = ThreadingTCPServer(('', self.port), HCR_Handler, bind_and_activate=False) self.serv.socket.settimeout(1) # 设置超时, 以便能够退出线程 self.serv.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) # 设置端口重用, 以便异常socket断开后的重建 else: print("error") return # Bind and activate # self.setName("tcp_server " + self.mode) self.serv.server_bind() self.serv.server_activate() self.serv.serve_forever() def send_data(self, data): ''' Args: data: bytes data ''' if self.is_alive(): for client in client_socket: client.sendall(data) def shutdown(self): for client in client_socket: client.shutdown(1) shutdown_flag[0] = True self.serv.socket.close() self.serv.shutdown() self.serv.server_close()
def __init__(self, dbadapter): CoMgrObj = self class CoHandler(StreamRequestHandler): """Class will handle a connection, request, authenticate it One object per client connection request""" disable_nagle_algorithm = True def handle(self): "Called by TCPServer for each client connection request" try: (dbname, username, password, jobName) = custompickle.load(self.rfile) #logging.debug("connection request received {}".format((dbname,username,jobName))); #dbc = dborm.dbAdapter.DBCMonetDB(dbname, username, password, jobName, CoMgrObj); dbc = dbadapter(dbname, username, password, jobName, CoMgrObj, self.request.getsockname()[0]) #logging.debug("created dbc for {}, type {}".format((dbname,username,jobName), type(dbc))); custompickle.dump(dbc, self.wfile) self.wfile.flush() #Handshake to wait for the otherside to establish the stubs. custompickle.load(self.rfile) except Exception as e: logging.error( "error {} occured while creating dbc for {}". format(e, (dbname, username, jobName))) logging.exception(e) def finish(self): self.wfile.close() self.rfile.close() #Setup a TCP Server to listen to client stub connection requests. __srvr = ThreadingTCPServer( ('', AConfig.CONNECTIONMANAGERPORT), CoHandler, False) __srvr.allow_reuse_address = True __srvr.server_bind() __srvr.server_activate() self.__srvr = __srvr #Handle signals to exit gracefully. if (threading.current_thread() == threading.main_thread()): signal.signal(signal.SIGINT, self.terminate) signal.signal(signal.SIGTERM, self.terminate) #Start the server polling as a daemon thread. self.__srvrThread = threading.Thread( target=self.__srvr.serve_forever) self.__srvrThread.daemon = True self.__srvrThread.start()
def protocol_server_main(): print("thread id={}".format(threading.current_thread())) serv = ThreadingTCPServer(("", 5198), HwProtocolHandler, bind_and_activate=False) # serv = ThreadingTCPServer(("", 5198), HwProtocolHandler) serv.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) serv.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 256 * 1024) serv.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, struct.pack("ll", 5, 0)) serv.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDTIMEO, struct.pack("ll", 5, 0)) serv.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) serv.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) serv.server_bind() serv.server_activate() serv.serve_forever()
def start_server(): if platform.python_version_tuple()[0] == '3' and int( platform.python_version_tuple()[1]) >= 6: with ThreadingTCPServer(('127.0.0.1', LOCAL_PORT), Handler, bind_and_activate=False) as server: server.allow_reuse_address = True server.server_bind() server.server_activate() server.serve_forever() else: server = ThreadingTCPServer(('127.0.0.1', LOCAL_PORT), Handler, bind_and_activate=False) server.allow_reuse_address = True server.server_bind() server.server_activate() server.serve_forever() server.server_close()
class Server(Thread): def __init__(self, host, port, dispatcher): Thread.__init__(self) self.daemon = True self.server = ThreadingTCPServer((host, port), RequestHandler, bind_and_activate=False) self.server.allow_reuse_address = True self.server.server_bind() self.server.server_activate() self.server.dispatcher = dispatcher def run(self): logging.debug('server thread is running...') self.server.serve_forever() def stop(self): logging.debug('server thread will be stopped') self.server.shutdown() self.server.server_close() logging.debug('server thread is killed')
class ThreadingServerInThread(object): """ Context manager for running a threading http server in a thread. Since the Thread is not using "daemon=True", it will keep Python running until the context manager exits, which means until request completion. """ def __init__(self, port=8000): self._server_address = ("127.0.0.1", port) self._handler = SimpleHTTPRequestHandlerHere self.httpd = ThreadingTCPServer( self._server_address, self._handler, bind_and_activate=False ) def _bind_and_activate(self): try: self.httpd.server_bind() self.httpd.server_activate() except Exception as e: self.httpd.server_close() raise e def start(self): self._bind_and_activate() thread = threading.Thread(target=self.httpd.serve_forever) thread.start() def stop(self): self.httpd.shutdown() self.httpd.server_close() def __enter__(self): self.start() return self def __exit__(self, exc_type, exc_val, exc_tb): self.stop()
class ThreadingServerInThread(object): """ Context manager for running a threading http server in a thread. Since the Thread is not using "daemon=True", it will keep Python running until the context manager exits, which means until request completion. """ def __init__(self, port=8000): self._server_address = ("127.0.0.1", port) self._handler = SimpleHTTPRequestHandlerHere self.httpd = ThreadingTCPServer( self._server_address, self._handler, bind_and_activate=False) def _bind_and_activate(self): try: self.httpd.server_bind() self.httpd.server_activate() except Exception as e: self.httpd.server_close() raise e def start(self): self._bind_and_activate() thread = threading.Thread(target=self.httpd.serve_forever) thread.start() def stop(self): self.httpd.shutdown() self.httpd.server_close() def __enter__(self): self.start() return self def __exit__(self, exc_type, exc_val, exc_tb): self.stop()
class WebUI(BaseFrontend): version = "theseven.webui v0.1.0beta" default_name = "WebUI" can_log = True can_configure = True can_autodetect = True settings = dict( BaseFrontend.settings, **{ "port": { "title": "HTTP port", "type": "int", "position": 1000 }, "users": { "title": "Users", "type": "dict", "key": { "title": "User:Password", "type": "string" }, "value": { "title": "Privilege level", "type": "enum", "values": [ { "value": "readonly", "title": "Read only access" }, { "value": "admin", "title": "Full access" }, ], }, "position": 2000 }, "log_buffer_max_length": { "title": "Maximum log buffer length", "type": "int", "position": 3000 }, "log_buffer_purge_size": { "title": "Log buffer purge size", "type": "int", "position": 3010 }, }) @classmethod def autodetect(self, core): core.add_frontend(self(core)) def __init__(self, core, state=None): super(WebUI, self).__init__(core, state) self.log_lock = RLock() def apply_settings(self): super(WebUI, self).apply_settings() if not "port" in self.settings: self.settings.port = 8832 if not "users" in self.settings: self.settings.users = {"admin:mpbm": "admin"} if not "uiconfig" in self.settings: self.settings.uiconfig = { "loggadget": { "loglevel": self.core.default_loglevel } } if not "log_buffer_max_length" in self.settings: self.settings.log_buffer_max_length = 1000 if not "log_buffer_purge_size" in self.settings: self.settings.log_buffer_purge_size = 100 if self.started and self.settings.port != self.port: self.async_restart(3) def _reset(self): self.log_buffer = [] self.log_listeners = [] def _start(self): super(WebUI, self)._start() self.httpd = ThreadingTCPServer(("", self.settings.port), RequestHandler, False) self.httpd.webui = self self.httpd.allow_reuse_address = 1 self.httpd.server_bind() self.httpd.server_activate() self.serverthread = Thread(None, self.httpd.serve_forever, self.settings.name + "_httpd") self.serverthread.daemon = True self.serverthread.start() self.port = self.settings.port def _stop(self): self.httpd.shutdown() self.serverthread.join(10) self.httpd.server_close() super(WebUI, self)._stop() def write_log_message(self, timestamp, loglevel, messages): if not self.started: return data = { "timestamp": time.mktime(timestamp.timetuple()) * 1000 + timestamp.microsecond / 1000, "loglevel": loglevel, "message": [{ "data": data, "format": format } for data, format in messages], } with self.log_lock: for queue in self.log_listeners: queue.put(data) self.log_buffer.append(data) if len(self.log_buffer) > self.settings.log_buffer_max_length: self.log_buffer = self.log_buffer[self.settings. log_buffer_purge_size:] def register_log_listener(self, listener): with self.log_lock: if not listener in self.log_listeners: self.log_listeners.append(listener) for data in self.log_buffer: listener.put(data) def unregister_log_listener(self, listener): with self.log_lock: while listener in self.log_listeners: self.log_listeners.remove(listener)
def __init__(self, host, port): ROMgrObj = self; class ROProxy(StreamRequestHandler): """This class provides the proxy support for the objects. Client stubs will invoke the proxy, which in turn will invoke the local object.""" #TODO .. we need to do this only once ? may be move it elsewhere. disable_nagle_algorithm = True; def handle(self): "Called by TCPServer for each client connection request" try: while True: msg = custompickle.load(self.rfile); #logging.debug("ROProxy {} {:0.20f}".format(msg, time.time())); #First message from client stub, check if object exists or not. if(msg == ROMessages._INIT_): robjName = custompickle.load(self.rfile); #logging.debug("_INIT_ message to look for object {}".format(robjName)); if(ROMgrObj.has(robjName)): self.obj = ROMgrObj.get(robjName, self); #On success, send the id of the proxy. custompickle.dump(id(self), self.wfile); self.wfile.flush(); self._robjName = robjName; else: logging.warning("_INIT_ message object {} not found".format(robjName)); custompickle.dump(ROMessages._NOT_FOUND_, self.wfile); self.wfile.flush(); #Check if the return should be compressed or not. elif(msg != ROMessages._COMPRESS_): #logging.debug("RemoteMethod: {} is not a compress directive.".format(msg)); #Request for an attribute if(msg == ROMessages._GET_ATTRIBUTE_): item = custompickle.load(self.rfile); try: val = self.obj.__getattribute__(item); custompickle.dump(None,self.wfile); custompickle.dump(val, self.wfile); self.wfile.flush(); except Exception as e: #An exception occured. send traceback info the client stub. custompickle.dump(sys.exc_info(), self.wfile);self.wfile.flush(); #Regular client stub messages contain the name of the function to be invoked and any arguments. else: #logging.debug("ROProxy {} reading args time {:0.20f}".format(msg, time.time())); args = custompickle.load(self.rfile); kwargs = custompickle.load(self.rfile); #logging.debug("ROProxy {} read args time {:0.20f}".format(msg, time.time())); #Execute the function locally and send back any results/exceptions. try: #Execute the local function, store the results. func = self.obj.__getattribute__(msg); if(inspect.ismethod(func)): result = func(*args, **kwargs); args = kwargs = None; else: #This is probably a property, in which case we already have the value, return it. result = func; #logging.debug("ROProxy {} local result time {:0.20f}".format(msg, time.time())); #No exception to report. custompickle.dump(None,self.wfile);#self.wfile.flush(); #logging.debug("ROProxy {} exception send time {:0.20f}".format(msg, time.time())); #Return the results. custompickle.dump(result, self.wfile); self.wfile.flush(); #logging.debug("ROProxy {} result send time {:0.20f}".format(msg, time.time())); #Hand shake to make sure this function scope is active till the other side has setup remote object stubs if any #the contents of this message is irrelevant to us. #NOT REQUIRED: this object reference (result) is alive in this space till next remote function call reaches it. #custompickle.load(self.rfile); except Exception as e: #An exception occured. send traceback info the client stub. custompickle.dump(sys.exc_info(), self.wfile);self.wfile.flush(); else: msg = custompickle.load(self.rfile); #logging.debug("RemoteMethod : request for compressing {}".format(msg)); #Request for an attribute if(msg == ROMessages._GET_ATTRIBUTE_): item = custompickle.load(self.rfile); try: val = self.obj.__getattribute__(item); custompickle.dump(None, self.wfile); self.wfile.flush(); AConfig.NTWKCHANNEL.transmit(val, self.wfile); except Exception as e: #An exception occured. send traceback info the client stub. custompickle.dump(sys.exc_info(), self.wfile);self.wfile.flush(); #Regular client stub messages contain the name of the function to be invoked and any arguments. else: #logging.debug("ROProxy {} reading args time {:0.20f}".format(msg, time.time())); args = custompickle.load(self.rfile); kwargs = custompickle.load(self.rfile); #logging.debug("ROProxy {} read args time {:0.20f}".format(msg, time.time())); #Execute the function locally and send back any results/exceptions. try: #Execute the local function, store the results. func = self.obj.__getattribute__(msg); if(inspect.ismethod(func)): result = func(*args, **kwargs); args = kwargs = None; else: #This is probably a property, in which case we already have the value, return it. result = func; #logging.debug("ROProxy {} local result time {:0.20f}".format(msg, time.time())); #No exception to report. custompickle.dump(None,self.wfile);self.wfile.flush(); #logging.debug("ROProxy {} exception send time {:0.20f}".format(msg, time.time())); #Return the results. AConfig.NTWKCHANNEL.transmit(result, self.wfile); #logging.debug("ROProxy {} result send time {:0.20f}".format(msg, time.time())); #Hand shake to make sure this function scope is active till the other side has setup remote object stubs if any #the contents of this message is irrelevant to us. #NOT REQUIRED: this object reference (result) is alive in this space till next remote function call reaches it. #custompickle.load(self.rfile); except Exception as e: #An exception occured. send traceback info the client stub. custompickle.dump(sys.exc_info(), self.wfile);self.wfile.flush(); #logging.debug("ROProxy {} exit time {:0.20f}".format(msg, time.time())); except EOFError: pass; #if(hasattr(self, 'obj')): #gc.collect(); #logging.debug('ROProxy {} terminating ... object {} has currently {} references'.format(id(self),robjName, sys.getrefcount(self.obj)) ); #for obj in gc.get_referrers(self.obj): # logging.debug("Referred by {}-{}".format(type(obj), id(obj))); # if(hasattr(obj, 'f_code')): # logging.debug("Frame info {}-{}-{}".format(obj.f_code.co_filename, obj.f_code.co_name, obj.f_lineno)); # if(hasattr(obj, '__func__')): # logging.debug("Function info {}".format(obj.__func__.__qualname__)); #TODO, we may need some locking to synchronize with the handle function. def _swapObj(self, obj): """Called by the remote object manager when it wants the proxy to start serving a different object""" #logging.debug("Proxy _swapObj : swapping object {} with {}".format(self.obj, obj)); #logging.debug("Proxy _swapObj : old content {}".format(self.obj.rows)); #logging.info("Proxy _swapObj : new content {}".format(obj.rows)); self.obj = obj; def finish(self): self.wfile.close(); self.rfile.close(); #self.__host = socket.gethostname(); self.__host = host; self.__port = port; self.__RObjRepos = dict(); #Keep track of regular objects self.__RObjRepos_tmp = weakref.WeakValueDictionary(); #Keep track of temporary objects created as part of return values to remote calls. #self.__RObjReposIds = weakref.WeakValueDictionary(); #We will use the system ids to ensure that we keep an object just once. self.__proxyObjectRepo__ = weakref.WeakValueDictionary(); #Keep track of proxy objects for workspaces. #self.__RObjNames__ = weakref.WeakKeyDictionary();#For reverse mapping of objects to names. #Setup a TCP Server to listen to client stub connection requests. #self.__srvr = ThreadingTCPServer((host, port), ROProxy, True); __srvr = ThreadingTCPServer(("", port), ROProxy, False); __srvr.allow_reuse_address = True; __srvr.server_bind(); __srvr.server_activate(); self.__srvr = __srvr; #Handle signals to exit gracefully. if(threading.current_thread() == threading.main_thread()): signal.signal(signal.SIGINT, self.terminate); signal.signal(signal.SIGTERM, self.terminate); #Start the server polling as a daemon thread. self.__srvrThread = threading.Thread(target=self.__srvr.serve_forever); self.__srvrThread.daemon = True; self.__srvrThread.start();
class WebUI(BaseFrontend): version = "theseven.webui v0.1.0" default_name = "WebUI" can_log = True can_configure = True can_autodetect = True settings = dict( BaseFrontend.settings, **{ "port": {"title": "HTTP port", "type": "int", "position": 1000}, "users": { "title": "Users", "type": "dict", "key": {"title": "User:Password", "type": "string"}, "value": { "title": "Privilege level", "type": "enum", "values": [ {"value": "readonly", "title": "Read only access"}, {"value": "admin", "title": "Full access"}, ], }, "position": 2000, }, "log_buffer_max_length": {"title": "Maximum log buffer length", "type": "int", "position": 3000}, "log_buffer_purge_size": {"title": "Log buffer purge size", "type": "int", "position": 3010}, } ) @classmethod def autodetect(self, core): core.add_frontend(self(core)) def __init__(self, core, state=None): super(WebUI, self).__init__(core, state) self.log_lock = RLock() def apply_settings(self): super(WebUI, self).apply_settings() if not "port" in self.settings: self.settings.port = 8832 if not "users" in self.settings: self.settings.users = {"admin:mpbm": "admin"} if not "uiconfig" in self.settings: self.settings.uiconfig = {"loggadget": {"loglevel": self.core.default_loglevel}} if not "log_buffer_max_length" in self.settings: self.settings.log_buffer_max_length = 1000 if not "log_buffer_purge_size" in self.settings: self.settings.log_buffer_purge_size = 100 if self.started and self.settings.port != self.port: self.async_restart(3) def _reset(self): self.log_buffer = [] self.log_listeners = [] def _start(self): super(WebUI, self)._start() self.httpd = ThreadingTCPServer(("", self.settings.port), RequestHandler, False) self.httpd.webui = self self.httpd.allow_reuse_address = True self.httpd.daemon_threads = True self.httpd.server_bind() self.httpd.server_activate() self.serverthread = Thread(None, self.httpd.serve_forever, self.settings.name + "_httpd") self.serverthread.daemon = True self.serverthread.start() self.port = self.settings.port def _stop(self): self.httpd.shutdown() self.serverthread.join(10) self.httpd.server_close() super(WebUI, self)._stop() def write_log_message(self, source, timestamp, loglevel, messages): if not self.started: return data = { "timestamp": time.mktime(timestamp.timetuple()) * 1000 + timestamp.microsecond / 1000.0, "loglevel": loglevel, "source": source.settings.name, "message": [{"data": data, "format": format} for data, format in messages], } with self.log_lock: for queue in self.log_listeners: queue.put(data) self.log_buffer.append(data) if len(self.log_buffer) > self.settings.log_buffer_max_length: self.log_buffer = self.log_buffer[self.settings.log_buffer_purge_size :] def register_log_listener(self, listener): with self.log_lock: if not listener in self.log_listeners: self.log_listeners.append(listener) for data in self.log_buffer: listener.put(data) def unregister_log_listener(self, listener): with self.log_lock: while listener in self.log_listeners: self.log_listeners.remove(listener)
# self.log_file.write("test\n") self.log_file.flush() def handle_run(self, data): self.log_file.write(f"running: {data}\n") self.log_file.flush() run(data.split()) def handle_cd(self, loc): with (self.data_dir / "active_nodes" / "active_directory").open() as f: chdir(f.read()) self.log_write(loc) self.log_file.flush() if __name__ == "__main__": print("Starting client...") hostname = argv[-1] here = Path(__file__).parent.resolve() nodes = here / "active_nodes" nodes.mkdir(parents=True, exist_ok=True) server = ThreadingTCPServer(('', 0), Handler, False) server.server_bind() port = server.socket.getsockname()[1] with (nodes / f"{hostname}.node").open('w') as f: f.write(str(port)) Handler.data_dir = here Handler.log_file = (nodes / f"{hostname}.node.log").open('w') server.server_activate() server.serve_forever()
class ModbusServer(object): """Modbus TCP server""" class ModbusService(BaseRequestHandler): def handle(self): while True: rx_head = self.request.recv(7) # close connection if no standard 7 bytes header if not (rx_head and len(rx_head) == 7): break # decode header (rx_hd_tr_id, rx_hd_pr_id, rx_hd_length, rx_hd_unit_id) = struct.unpack('>HHHB', rx_head) # close connection if frame header content inconsistency if not ((rx_hd_pr_id == 0) and (2 < rx_hd_length < 256)): break # receive body rx_body = self.request.recv(rx_hd_length - 1) # close connection if lack of bytes in frame body if not (rx_body and (len(rx_body) == rx_hd_length - 1)): break # body decode: function code rx_bd_fc = struct.unpack('B', rx_body[0:1])[0] # close connection if function code is inconsistent if rx_bd_fc > 0x7F: break # default except status exp_status = const.EXP_NONE # functions Read Coils (0x01) or Read Discrete Inputs (0x02) if rx_bd_fc in (const.READ_COILS, const.READ_DISCRETE_INPUTS): (b_address, b_count) = struct.unpack('>HH', rx_body[1:]) # check quantity of requested bits if 0x0001 <= b_count <= 0x07D0: bits_l = DataBank.get_bits(b_address, b_count) if bits_l: # allocate bytes list b_size = int(b_count / 8) b_size += 1 if (b_count % 8) else 0 bytes_l = [0] * b_size # populate bytes list with data bank bits for i, item in enumerate(bits_l): if item: byte_i = int(i/8) bytes_l[byte_i] = set_bit(bytes_l[byte_i], i % 8) # format body of frame with bits tx_body = struct.pack('BB', rx_bd_fc, len(bytes_l)) # add bytes with bits for byte in bytes_l: tx_body += struct.pack('B', byte) else: exp_status = const.EXP_DATA_ADDRESS else: exp_status = const.EXP_DATA_VALUE # functions Read Holding Registers (0x03) or Read Input Registers (0x04) elif rx_bd_fc in (const.READ_HOLDING_REGISTERS, const.READ_INPUT_REGISTERS): (w_address, w_count) = struct.unpack('>HH', rx_body[1:]) # check quantity of requested words if 0x0001 <= w_count <= 0x007D: words_l = DataBank.get_words(w_address, w_count) if words_l: # format body of frame with words tx_body = struct.pack('BB', rx_bd_fc, w_count * 2) for word in words_l: tx_body += struct.pack('>H', word) else: exp_status = const.EXP_DATA_ADDRESS else: exp_status = const.EXP_DATA_VALUE # function Write Single Coil (0x05) elif rx_bd_fc is const.WRITE_SINGLE_COIL: (b_address, b_value) = struct.unpack('>HH', rx_body[1:]) f_b_value = bool(b_value == 0xFF00) if DataBank.set_bits(b_address, [f_b_value]): # send write ok frame tx_body = struct.pack('>BHH', rx_bd_fc, b_address, b_value) else: exp_status = const.EXP_DATA_ADDRESS # function Write Single Register (0x06) elif rx_bd_fc is const.WRITE_SINGLE_REGISTER: (w_address, w_value) = struct.unpack('>HH', rx_body[1:]) if DataBank.set_words(w_address, [w_value]): # send write ok frame tx_body = struct.pack('>BHH', rx_bd_fc, w_address, w_value) else: exp_status = const.EXP_DATA_ADDRESS # function Write Multiple Coils (0x0F) elif rx_bd_fc is const.WRITE_MULTIPLE_COILS: (b_address, b_count, byte_count) = struct.unpack('>HHB', rx_body[1:6]) # check quantity of updated coils if (0x0001 <= b_count <= 0x07B0) and (byte_count >= (b_count/8)): # allocate bits list bits_l = [False] * b_count # populate bits list with bits from rx frame for i, item in enumerate(bits_l): b_bit_pos = int(i/8)+6 b_bit_val = struct.unpack('B', rx_body[b_bit_pos:b_bit_pos+1])[0] bits_l[i] = test_bit(b_bit_val, i % 8) # write words to data bank if DataBank.set_bits(b_address, bits_l): # send write ok frame tx_body = struct.pack('>BHH', rx_bd_fc, b_address, b_count) else: exp_status = const.EXP_DATA_ADDRESS else: exp_status = const.EXP_DATA_VALUE # function Write Multiple Registers (0x10) elif rx_bd_fc is const.WRITE_MULTIPLE_REGISTERS: (w_address, w_count, byte_count) = struct.unpack('>HHB', rx_body[1:6]) # check quantity of updated words if (0x0001 <= w_count <= 0x007B) and (byte_count == w_count * 2): # allocate words list words_l = [0] * w_count # populate words list with words from rx frame for i, item in enumerate(words_l): w_offset = i * 2 + 6 words_l[i] = struct.unpack('>H', rx_body[w_offset:w_offset + 2])[0] # write words to data bank if DataBank.set_words(w_address, words_l): # send write ok frame tx_body = struct.pack('>BHH', rx_bd_fc, w_address, w_count) else: exp_status = const.EXP_DATA_ADDRESS else: exp_status = const.EXP_DATA_VALUE else: exp_status = const.EXP_ILLEGAL_FUNCTION # check exception if exp_status != const.EXP_NONE: # format body of frame with exception status tx_body = struct.pack('BB', rx_bd_fc + 0x80, exp_status) # build frame header tx_head = struct.pack('>HHHB', rx_hd_tr_id, rx_hd_pr_id, len(tx_body) + 1, rx_hd_unit_id) # send frame self.request.send(tx_head + tx_body) self.request.close() def __init__(self, host='localhost', port=502, no_block=False, ipv6=False): """Constructor Modbus server constructor. :param host: hostname or IPv4/IPv6 address server address (optional) :type host: str :param port: TCP port number (optional) :type port: int :param no_block: set no block mode, in this mode start() return (optional) :type no_block: bool :param ipv6: use ipv6 stack :type ipv6: bool """ self.host = host self.port = port self.no_block = no_block self.ipv6 = ipv6 # set class attribute ThreadingTCPServer.address_family = socket.AF_INET6 if self.ipv6 else socket.AF_INET ThreadingTCPServer.daemon_threads = True # init server self._service = ThreadingTCPServer((self.host, self.port), self.ModbusService, bind_and_activate=False) # set socket options self._service.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._service.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # TODO test no_delay with bench self._service.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) # add thread for no block mode if self.no_block: self._serve_th = Thread(target=self._service.serve_forever) self._serve_th.daemon = True def start(self): # bind and activate self._service.server_bind() self._service.server_activate() # serve request if self.no_block: self._serve_th.start() else: self._service.serve_forever()
class ModbusServer(object): """Modbus TCP server""" class ModbusService(BaseRequestHandler): def recv_all(self, size): if hasattr(socket, "MSG_WAITALL"): data = self.request.recv(size, socket.MSG_WAITALL) else: # Windows lacks MSG_WAITALL data = b'' while len(data) < size: data += self.request.recv(size - len(data)) return data def handle(self): while True: rx_head = self.recv_all(7) # close connection if no standard 7 bytes header if not (rx_head and len(rx_head) == 7): break # decode header (rx_hd_tr_id, rx_hd_pr_id, rx_hd_length, rx_hd_unit_id) = struct.unpack('>HHHB', rx_head) # close connection if frame header content inconsistency if not ((rx_hd_pr_id == 0) and (2 < rx_hd_length < 256)): break # receive body rx_body = self.recv_all(rx_hd_length - 1) # close connection if lack of bytes in frame body if not (rx_body and (len(rx_body) == rx_hd_length - 1)): break # body decode: function code rx_bd_fc = struct.unpack('B', rx_body[0:1])[0] # close connection if function code is inconsistent if rx_bd_fc > 0x7F: break # default except status exp_status = const.EXP_NONE # functions Read Coils (0x01) or Read Discrete Inputs (0x02) if rx_bd_fc in (const.READ_COILS, const.READ_DISCRETE_INPUTS): (b_address, b_count) = struct.unpack('>HH', rx_body[1:]) # check quantity of requested bits if 0x0001 <= b_count <= 0x07D0: bits_l = DataBank.get_bits(b_address, b_count) if bits_l: # allocate bytes list b_size = int(b_count / 8) b_size += 1 if (b_count % 8) else 0 bytes_l = [0] * b_size # populate bytes list with data bank bits for i, item in enumerate(bits_l): if item: byte_i = int(i / 8) bytes_l[byte_i] = set_bit( bytes_l[byte_i], i % 8) # format body of frame with bits tx_body = struct.pack('BB', rx_bd_fc, len(bytes_l)) # add bytes with bits for byte in bytes_l: tx_body += struct.pack('B', byte) else: exp_status = const.EXP_DATA_ADDRESS else: exp_status = const.EXP_DATA_VALUE # functions Read Holding Registers (0x03) or Read Input Registers (0x04) elif rx_bd_fc in (const.READ_HOLDING_REGISTERS, const.READ_INPUT_REGISTERS): (w_address, w_count) = struct.unpack('>HH', rx_body[1:]) # check quantity of requested words if 0x0001 <= w_count <= 0x007D: words_l = DataBank.get_words(w_address, w_count) if words_l: # format body of frame with words tx_body = struct.pack('BB', rx_bd_fc, w_count * 2) for word in words_l: tx_body += struct.pack('>H', word) else: exp_status = const.EXP_DATA_ADDRESS else: exp_status = const.EXP_DATA_VALUE # function Write Single Coil (0x05) elif rx_bd_fc is const.WRITE_SINGLE_COIL: (b_address, b_value) = struct.unpack('>HH', rx_body[1:]) f_b_value = bool(b_value == 0xFF00) if DataBank.set_bits(b_address, [f_b_value]): # send write ok frame tx_body = struct.pack('>BHH', rx_bd_fc, b_address, b_value) else: exp_status = const.EXP_DATA_ADDRESS # function Write Single Register (0x06) elif rx_bd_fc is const.WRITE_SINGLE_REGISTER: (w_address, w_value) = struct.unpack('>HH', rx_body[1:]) if DataBank.set_words(w_address, [w_value]): # send write ok frame tx_body = struct.pack('>BHH', rx_bd_fc, w_address, w_value) else: exp_status = const.EXP_DATA_ADDRESS # function Write Multiple Coils (0x0F) elif rx_bd_fc is const.WRITE_MULTIPLE_COILS: (b_address, b_count, byte_count) = struct.unpack('>HHB', rx_body[1:6]) # check quantity of updated coils if (0x0001 <= b_count <= 0x07B0) and (byte_count >= (b_count / 8)): # allocate bits list bits_l = [False] * b_count # populate bits list with bits from rx frame for i, item in enumerate(bits_l): b_bit_pos = int(i / 8) + 6 b_bit_val = struct.unpack( 'B', rx_body[b_bit_pos:b_bit_pos + 1])[0] bits_l[i] = test_bit(b_bit_val, i % 8) # write words to data bank if DataBank.set_bits(b_address, bits_l): # send write ok frame tx_body = struct.pack('>BHH', rx_bd_fc, b_address, b_count) else: exp_status = const.EXP_DATA_ADDRESS else: exp_status = const.EXP_DATA_VALUE # function Write Multiple Registers (0x10) elif rx_bd_fc is const.WRITE_MULTIPLE_REGISTERS: (w_address, w_count, byte_count) = struct.unpack('>HHB', rx_body[1:6]) # check quantity of updated words if (0x0001 <= w_count <= 0x007B) and (byte_count == w_count * 2): # allocate words list words_l = [0] * w_count # populate words list with words from rx frame for i, item in enumerate(words_l): w_offset = i * 2 + 6 words_l[i] = struct.unpack( '>H', rx_body[w_offset:w_offset + 2])[0] # write words to data bank if DataBank.set_words(w_address, words_l): # send write ok frame tx_body = struct.pack('>BHH', rx_bd_fc, w_address, w_count) else: exp_status = const.EXP_DATA_ADDRESS else: exp_status = const.EXP_DATA_VALUE else: exp_status = const.EXP_ILLEGAL_FUNCTION # check exception if exp_status != const.EXP_NONE: # format body of frame with exception status tx_body = struct.pack('BB', rx_bd_fc + 0x80, exp_status) # build frame header tx_head = struct.pack('>HHHB', rx_hd_tr_id, rx_hd_pr_id, len(tx_body) + 1, rx_hd_unit_id) # send frame self.request.send(tx_head + tx_body) self.request.close() def __init__(self, host='localhost', port=const.MODBUS_PORT, no_block=False, ipv6=False): """Constructor Modbus server constructor. :param host: hostname or IPv4/IPv6 address server address (optional) :type host: str :param port: TCP port number (optional) :type port: int :param no_block: set no block mode, in this mode start() return (optional) :type no_block: bool :param ipv6: use ipv6 stack :type ipv6: bool """ # public self.host = host self.port = port self.no_block = no_block self.ipv6 = ipv6 # private self._running = False self._service = None self._serve_th = None def start(self): """Start the server. Do nothing if server is already running. This function will block if no_block is not set to True. """ if not self.is_run: # set class attribute ThreadingTCPServer.address_family = socket.AF_INET6 if self.ipv6 else socket.AF_INET ThreadingTCPServer.daemon_threads = True # init server self._service = ThreadingTCPServer((self.host, self.port), self.ModbusService, bind_and_activate=False) # set socket options self._service.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._service.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # TODO test no_delay with bench self._service.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) # bind and activate self._service.server_bind() self._service.server_activate() # serve request if self.no_block: self._serve_th = Thread(target=self._serve) self._serve_th.daemon = True self._serve_th.start() else: self._serve() def stop(self): """Stop the server. Do nothing if server is already not running. """ if self.is_run: self._service.shutdown() self._service.server_close() @property def is_run(self): """Return True if server running. """ return self._running def _serve(self): try: self._running = True self._service.serve_forever() except: self._service.server_close() raise finally: self._running = False
def server_bind(self): """Override server_bind to store the server name.""" ThreadingTCPServer.server_bind(self) host, port = self.server_address[:2] self.server_name = socket.getfqdn(host) self.server_port = port