class Bus(common.AutoClose): """ An Autobus bus. Busses manage a set of published services, and allow connecting to other services. A single bus listens on a single TCP port and multiplexes all published services over it. Bus is a subclass of ServiceProvider; the service it provides is a service exposing information about what other services, events, functions, and objects are present. (This service is more commonly known as the introspection service.) You normally won't have to know this; instances of Bus register themselves as services with themselves, so you don't need to do anything to make the introspection service work. """ def __init__(self, default_discoverers=True, default_publishers=True, port=None): """ Creates a new bus. The bus will listen on the specified port; if none is specified (which is the usual case), a port will be chosen from the ports not currently in use on this computer. If default_discoverers is True (the default), a default set of discoverers will be installed, and likewise for default_publishers. Right now, this simply installs a autobus2.discovery.BroadcastPublisher and autobus2.discovery.BroadcastDiscoverer. Others might be added in the future. """ # Number of times this bus has been __enter__'d. Allows it to be used # as a re-entrant context manager. self.context_enters = 0 if port is None: port = 0 # True once close() has been called self.closed = False # The TCP server that will listen for connections self.server = Socket() self.server.bind(("", port)) # TODO: make the backlog configurable self.server.listen(100) self.port = self.server.getsockname()[1] # Lock that nearly everything bus-related locks on self.lock = RLock() # PropertyTable whose keys are service ids and whose values are # instances of autobus2.local.LocalService self.local_services = PropertyTable() self.local_services.global_watch(self.local_service_changed) # Map of ids of discovered services to DiscoveredService instances self.discovered_services = {} self.discovery_listeners = [] # List of (filter, function) tuples, where filter is an info object # filter and function is a function to be notified when a matching # service is created or deleted self.service_listeners = [] # Set of RemoteConnection instances that have been bound to a service self.bound_connections = set() # Set of discoverers registered on this bus self.discoverers = set() # Set of publishers registered on this bus self.publishers = set() if default_discoverers: self.install_discoverer(discovery.BroadcastDiscoverer()) if default_publishers: self.install_publisher(discovery.BroadcastPublisher()) Thread(name="autobus2.Bus.accept_loop", target=self.accept_loop).start() # Disable the introspection service for now. I'm seeing what would # happen if I have per-service introspection functions and objects, so # I'm disabling the bus-wide introspection service. # self._create_introspection_service() # # Register the bus as a service on itself. self.create_service({"type": "autobus.details", "pid": os.getpid()}, _IntrospectionService(self)) def accept_loop(self): """ Called on a new thread to accept socket connections to this bus. """ self.server.settimeout(1) while not self.closed: try: socket = None socket = self.server.accept()[0] self.setup_inbound_socket(socket) except SocketTimeout: # This happens when we time out, which is # normal. The 1-second timeout is to fix what appears to be a # bug with Windows not properly throwing an exception from # accept when another thread closes the socket. pass except: # This happens when the server socket is closed if socket: socket.close() # Make sure it's /really/ closed on the # off chance that something else caused the exception if not issubclass(sys.exc_type, SocketError): # Something else # happened print_exc() # print "Bus server died" return @synchronized_on("lock") def create_service(self, info, provider): """ Creates a new service on this bus. info is the info object to use for this service. provider is the instance of autobus2.service.ServiceProvider to publish; an instance of autobus2.providers.PyServiceProvider can be used to publish a simple Python object as a service. (This is how I expect most services to be published; writing a custom ServiceProvider subclass should rarely be needed.) The return value is an instance of local.LocalService. You can safely ignore it if you don't need it and don't plan on deleting the service before you close the bus itself. """ # Create a new id for the service service_id = messaging.create_service_id() self.set_remote_info_builtins(service_id, info) # Create the actual service object service = local.LocalService(self, service_id, info, provider) # Then store the service in our services map, which will cause the # service to be published through the introspection service and through # the bus's publishers (see self.local_service_changed). self.local_services[service_id] = service return service def _close_service(self, service): # This is called from LocalService.close, which will take care of # shutting down the service's connections and such. So the only thing # we really need to do here is delete the service from the local_service # map, which will cause self.local_service_changed to unpublish the # service and remove it from the introspection service. del self.local_services[service.id] @synchronized_on("lock") def setup_inbound_socket(self, socket): # Create a connection and then add it to our list of connections connection = local.RemoteConnection(self, socket) self.bound_connections.add(connection) def connect(self, host, port, service_id, timeout=10, open_listener=None, close_listener=None, fail_listener=None, lock=None): """ Opens a connection to the specified service on the specified host/port. The connection will be returned immediately. The actual connection to the server will be made as soon as possible in the future. If you need to block until the connection actually connects, call wait_for_connect on the returned Connection object. The connection will attempt to reconnect indefinitely whenever it is disconnected. If you don't want this behavior, specify a close_listener that calls the connection's close method. Timeout is the TCP timeout to use when connecting. The default is 10; this is usually a suitable default. You'll probably only want to increase this if you're working on a particularly latent network. open_listener and close_listener are functions accepting one argument. They will be called when the connection successfully connects and when the connection disconnects, respectively, and the connection itself will be passed in. They are both run synchronously on the connection's input thread, so it's guaranteed that, for example, the connection will not attempt to reconnect until close_listener has returned. Thus close_listener could be set to a function that just closes the specified connection in order to effectively disable the auto-reconnect feature of connections. """ return remote.Connection(self, host, port, service_id, timeout, open_listener, close_listener, fail_listener, lock) def connect_to(self, info_filter, timeout=10, open_listener=None, close_listener=None, fail_listener=None, lock=None): """ Locates the first service in the list of discovered services and uses self.connect to connect to it. The connection is then returned. This function will be going away soon. Service proxies (which can be obtained using self.get_service_proxy) are the replacement; a single service proxy is quite similar to this method, but it can follow the service across restarts of the underlying process publishing the service, which this method can't. """ with self.lock: for service_id, d in self.discovered_services.items(): if filter_matches(d.info, info_filter): host, port = d.locations.keys()[0] return self.connect(host, port, service_id, timeout, open_listener, close_listener, fail_listener, lock) raise exceptions.NoMatchingServiceException() def get_service_proxy(self, info_filter, bind_function=None, unbind_function=None, multiple=False): """ Returns a service proxy that will connect to services matching the specified info object filter. If multiple is False (the default), a single service proxy will be returned. If multiple is True, a multiple service proxy will be returned. See proxy.SingleServiceProxy and proxy.MultipleServiceProxy for the differences between the two. bind_function and unbind_function are optional functions that will be called when the proxy binds to and unbinds from a service, respectively. Binding is where a proxy discovers a new service matching its info filter and establishes a connection to it. Unbinding is where the proxy disconnects from said connection, usually because the service went away. """ with self.lock: if multiple: return proxy.MultipleServiceProxy(self, info_filter, bind_function, unbind_function) else: return proxy.SingleServiceProxy(self, info_filter) @synchronized_on("lock") def close(self): """ Closes this bus and all services registered on it. """ if self.closed: # Already closed return self.closed = True # First we shut down all of our discoverers for discoverer in self.discoverers: discoverer.shutdown() # Then we need to close all of our services. Closing a service causes # self._close_service to be called, which removes the service from the # list of services, which causes self.local_service_changed to be # called, which unpublishes the service. So we don't need to worry # about unpublishing services aside from this. for service_id in list(self.local_services): self.local_services[service_id].close() # Then we shut down all of the publishers for publisher in self.publishers: publisher.shutdown() # Then we shut down the server socket net.shutdown(self.server) # Then we close all of the connections currently connected to us for c in self.bound_connections: with no_exceptions: c.close() # And that's it! @synchronized_on("lock") def install_publisher(self, publisher): # Add the publisher to our list and start it up self.publishers.add(publisher) publisher.startup(self) # Then register all of our local services with the publisher for service in self.local_services.values(): publisher.add(service) @synchronized_on("lock") def remove_publisher(self, publisher): # Check to make sure that the publisher is already installed if publisher not in self.publishers: # TODO: Not sure why we're using __builtin__ here... raise __builtin__.ValueError("The specified publisher is not currently installed on this bus.") # Remove the publisher from our list of publishers self.publishers.remove(publisher) # Unpublish all of our services from the publisher for service in self.local_services.values(): if service.active: publisher.remove(service) # Then we shut down the publisher publisher.shutdown() @synchronized_on("lock") def install_discoverer(self, discoverer): # Add the discoverer to our list of discoverers, then start it up self.discoverers.add(discoverer) discoverer.startup(self) @synchronized_on("lock") def remove_discoverer(self, discoverer): # Check to make sure that the discoverer has already been installed if discoverer not in self.discoverers: # TODO: Ditto from remove_publisher raise __builtin__.ValueError("The specified discoverer is not currently installed on this bus.") # Remove the discoverer from our list of discoverers, then shut it # down self.discoverers.remove(discoverer) discoverer.shutdown() def set_local_info_builtins(self, host, port, service_id, info): new_info = info.copy() new_info["host"] = host new_info["port"] = port new_info["service"] = service_id return new_info def set_remote_info_builtins(self, service_id, info): """ Adds some values to the specified info object. The only one added right now is hostname, which is the value of socket.gethostname(). I haven't really standardized the list of values added here; I hope to at some point, though, and have all Autobus client libraries add the same ones. """ info["hostname"] = gethostname() @synchronized_on("lock") def discover(self, discoverer, host, port, service_id, info): # print "Discovered:", (host, port, service_id, info) # Add the relevant local builtins info = self.set_local_info_builtins(host, port, service_id, info) # Check to see if the specified service has been discovered yet, and if # it hasn't, create an entry for it is_new_service = False if service_id not in self.discovered_services: self.discovered_services[service_id] = DiscoveredService(info) is_new_service = True discovered_service = self.discovered_services[service_id] # Check to see if the specified host/port combination is already # present, and if it isn't, add it. if (host, port) not in discovered_service.locations: discovered_service.locations[(host, port)] = [] discoverer_list = discovered_service.locations[(host, port)] # Check to see if this discoverer has already discovered that host/port if discoverer in discoverer_list: print ("Warning: discoverer " + str(discoverer) + " tried to rediscover " + str((host, port, service_id)) + " with info " + str(info)) return # It hasn't, so add it. discoverer_list.append(discoverer) # The check to see if we need to notify listeners, and do so if we # need to if is_new_service: self.notify_service_listeners(service_id, host, port, info, DISCOVERED) @synchronized_on("lock") def undiscover(self, discoverer, host, port, service_id): # print "Undiscovered:", (host, port, service_id) # Check to see if the specified service has been discovered. if service_id not in self.discovered_services: print ("Warning: discoverer " + str(discoverer) + " tried to " "undiscover " + str((host, port, service_id)) + " when " "such a service does not exist.") return discovered_service = self.discovered_services[service_id] if (host, port) not in discovered_service.locations: print ("Warning: discoverer " + str(discoverer) + " tried to " "undiscover " + str((host, port, service_id)) + " when " "that host/port has not yet been discovered.") return discoverer_list = discovered_service.locations[(host, port)] if discoverer not in discoverer_list: print ("Warning: discoverer " + str(discoverer) + " tried to " "undiscover " + str((host, port, service_id)) + " when " "this discoverer hasn't discovered that host/port yet.") return discoverer_list.remove(discoverer) if not discoverer_list: if discovered_service.locations.keys()[0] == (host, port): # We're # removing the first (and therefore default) location, so if # there's another location, we need to let the service # listeners know that there's a new default location if len(discovered_service.locations) > 1: # There will be # another location even after we delete this one new_host, new_port = discovered_service.locations.keys()[1] if not self.closed: # Don't issue changes if we're shutting down self.notify_service_listeners(service_id, new_host, new_port, discovered_service.info, CHANGED) del discovered_service.locations[(host, port)] if not discovered_service.locations: # That was the last location # available for this service, so we delete the service itself, # and notify listeners that it was deleted del self.discovered_services[service_id] self.notify_service_listeners(service_id, host, port, discovered_service.info, UNDISCOVERED) @synchronized_on("lock") def add_service_listener(self, listener, info_filter=None, initial=False): """ Listens for changes in services that are available. listener is a function listener(service_id, host, port, info, event) which will be called whenever a service becomes available, a service disappears, or the host/port that should be used to access a particular service changes. service_id is the id of the service; host/port is the host/port at which the service can be found, info is the service's info object, and event is one of DISCOVERED, UNDISCOVERED, or CHANGED. If info_filter is a dictionary, only services with info objects matching that particular filter (as per the filter_matches function) will cause the listener to be called. If info_filter is None (the default), or the empty dictionary (since all info objects match the empty dictionary), the listener will be called for all services. If initial is True, the listener will be immediately (and synchronously) called once for each service that already exists, passing in DISCOVERED as the event. Otherwise, the listener will only be called once the next """ # Add the listener to our list of listeners self.service_listeners.append((info_filter, listener)) # Check to see if we're supposed to notify the listener about all # matching services that already exist if initial: # Scan all of the services for service_id, discovered_service in self.discovered_services.items(): if filter_matches(discovered_service.info, info_filter): # If this service matches, notify the listener about it host, port = discovered_service.locations.keys()[0] with print_exceptions: listener(service_id, host, port, discovered_service.info, DISCOVERED) @synchronized_on("lock") def remove_service_listener(self, listener, initial=False): # Scan the list of listeners and remove this one. Inefficient, it's # true, and I hope to make it more efficient later on. for index, (info_filter, l) in enumerate(self.service_listeners[:]): # See if we've hit the right listener if l == listener: # If we have, remove the listener del self.service_listeners[index] if initial: # Scan through the list of services for service_id, discovered_service in self.discovered_services.items(): if filter_matches(discovered_service.info, info_filter): # This service matched, so we notify this # listener that the service was removed with print_exceptions: listener(service_id, None, None, None, UNDISCOVERED) # We've found our listener and deleted it, so we return now return def notify_service_listeners(self, service_id, host, port, info, event): for filter, listener in self.service_listeners: if filter_matches(info, filter): with print_exceptions: listener(service_id, host, port, info, event) def local_service_changed(self, service_id, old, new): """ Called (by the local_services property table) when services come and go. All we really need to do is publish/unpublish the service. """ if old: for publisher in self.publishers: publisher.remove(old) if new: for publisher in self.publishers: publisher.add(new)
def start_server(sock: socket.socket): """Start a server that accepts new connections from a given socket""" print("Started the server") while True: client_sock, client_address = sock.accept() print(f"Accepted new connection from {client_address}") ClientListener(client_sock, client_address).start()
def __accept_client(self, role: _RoleT, server_sock: socket.socket, sock_attr: _SockAttrT) -> None: try: (sock, peer) = server_sock.accept() sock.setblocking(True) except Exception: get_logger(0).exception("Can't accept %s client", role) else: if peer[0] not in self.__clients: if len(self.__clients) >= self.__max_clients: self.__close_sock(sock) get_logger(0).info( "Refused %s client: %s: max clients reached", role, peer[0]) return self.__clients[peer[0]] = _BtClient(peer[0]) client = self.__clients[peer[0]] assert hasattr(client, sock_attr) setattr(client, sock_attr, sock) self.__to_read.add(sock) get_logger(0).info("Accepted %s client: %s", role, peer[0]) self.__state_flags.update(online=True) self.__set_public(len(self.__clients) < self.__max_clients)
def accept(self, sock_file: socket.socket): connection, address = sock_file.accept() connection.setblocking(False) data = types.SimpleNamespace(address=address, in_data=b'', out_data=b'') event = selectors.EVENT_READ | selectors.EVENT_WRITE sel.register(connection, event, data=data) connection.send( bytes("NOTICE AUTH :*** Processing connection to " + HOSTNAME + "\r\n", encoding="UTF-8")) connection.send( bytes("NOTICE AUTH :*** Looking up your hostname...\r\n", encoding="UTF-8")) try: hname = socket.gethostbyaddr(address[0]) print(hname) user = User(connection_info=(connection, address), hostname=hname) connection.send( bytes("NOTICE AUTH :*** Found your hostname\r\n", encoding="UTF-8")) except Exception: connection.send( bytes( "NOTICE AUTH :*** Couldn't find your hostname. Reverting to naive.\r\n", encoding="UTF-8")) user = User(connection_info=(connection, address), hostname=address) self.users.append(user)
def accept_handler(serversocket: socket.socket) -> None: clientsocket, (client_address, client_port) = serversocket.accept() clientsocket.setblocking(False) logging.debug(f"New client: {client_address}:{client_port}") connections[clientsocket.fileno()] = (clientsocket, client_address, client_port) read_waiters[clientsocket.fileno()] = (recv_handler, (clientsocket.fileno(),)) read_waiters[serversocket.fileno()] = (accept_handler, (serversocket,))
def request_handler(listener: socket.socket) -> None: sock = listener.accept()[0] handler = handle_socks5_negotiation(sock, negotiate=False) addr, port = next(handler) assert addr == b"localhost" assert port == 443 with pytest.raises(StopIteration): handler.send(True) # Wrap in TLS context = better_ssl.SSLContext( ssl.PROTOCOL_SSLv23) # type: ignore[misc] context.load_cert_chain(DEFAULT_CERTS["certfile"], DEFAULT_CERTS["keyfile"]) tls = context.wrap_socket(sock, server_side=True) buf = b"" while True: buf += tls.recv(65535) if buf.endswith(b"\r\n\r\n"): break assert buf.startswith(b"GET / HTTP/1.1\r\n") tls.sendall(b"HTTP/1.1 200 OK\r\n" b"Server: SocksTestServer\r\n" b"Content-Length: 0\r\n" b"\r\n") tls.close() sock.close()
def request_handler(listener: socket.socket) -> None: sock = listener.accept()[0] handler = handle_socks4_negotiation(sock) addr, port = next(handler) assert addr == b"example.com" assert port == 80 with pytest.raises(StopIteration): handler.send(True) buf = b"" while True: buf += sock.recv(65535) if buf.endswith(b"\r\n\r\n"): break assert buf.startswith(b"GET / HTTP/1.1") assert b"Host: example.com" in buf sock.sendall(b"HTTP/1.1 200 OK\r\n" b"Server: SocksTestServer\r\n" b"Content-Length: 0\r\n" b"\r\n") sock.close()
def __accept(self, sock: socket): conn, addr = sock.accept() logger.debug('accepted: {0}, from: {1}'.format(conn, addr)) conn.setblocking(False) self.__selector.register(conn, EVENT_READ) self.__connections[conn] = self.Connection(time.time()) self.__connected(conn)
def _accept_endless(sock: socket.socket): while True: with contextlib.suppress(socket.timeout): client, address = sock.accept() with client: print(f'Connect From {address}') client.sendall('hello'.encode('utf-8'))
def Livestream(self, hostSocket: socket.socket): # accept the coming connection from client liveSocket, address = hostSocket.accept() TARGET_FPS = 30 TIME_FRAME = 1 / TARGET_FPS frame = 0 start = time.perf_counter() while not self.livestreamEvent.is_set(): w, h, data = self.screenHandler.TakeScreenshotAsBytes(960, 540) state = SendMessage(liveSocket, str(w) + " " + str(h) + " " + str(frame + 1), data) if not state: self.HandleMessageFault() break frame += 1 targetTime = frame * TIME_FRAME end = time.perf_counter() elapsed = (end - start) waitTime = targetTime - elapsed if targetTime >= elapsed else 0.0 time.sleep(waitTime) liveSocket.close()
def accept_connection(server_socket: socket.socket): client_socket, addr = server_socket.accept() print(f"Connection from {addr}") SELECTOR.register(fileobj=client_socket, events=selectors.EVENT_READ, data=send_message)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--port', '-p', default=2000, type=int, help='port to use') args = parser.parse_args() server_socket = Socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind(('', args.port)) server_socket.listen(1) print "server running" while True: connection_socket = server_socket.accept()[0] request = connection_socket.recv(1024) reply = http_handler(request) connection_socket.send("HTTP/1.1 200 OK\n") connection_socket.send("\n") connection_socket.send(reply) connection_socket.close() print "received request" print "reply sent" return 0
class Server: def __init__(self, address, port, max_connections): self.socket = Socket() self.address = address self.port = port self.connections = max_connections def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.socket.__exit__(exc_type, exc_val, exc_tb) logging.info('Server closed') def start(self): logging.info('Starting server') try: self.socket.bind((self.address, self.port)) except OSError: logging.debug("Address already taken - {address}:{port}".format( address=self.address, port=self.port)) exit(1) self.socket.listen(self.connections) logging.info('Server has been started. Waiting for connections.') def close(self): self.socket.close() def accept(self): client_sock, client_addr = self.socket.accept() return client_sock, client_addr
def main_process(listen_fd: socket.socket): contact = {} uid = 1 rlist = [listen_fd, sys.stdin] while True: res_read, _, _ = select.select(rlist, [], []) for x in res_read: if x is listen_fd: new_fd, _ = listen_fd.accept() print("新链接到来", new_fd) rlist.append(new_fd) contact[uid] = new_fd uid += 1 elif x is sys.stdin: buf = input('') k = buf.split(':')[0].strip() sock = contact[int(k)] sock.send(buf.encode('utf-8')) else: buf = x.recv(1024) if buf: print("收到:{},信息是 {}".format(x.fileno(), buf.decode('utf-8'))) else: x.close() print("关闭连接") rlist.remove(x)
def run_server(self: "BrowserAzureCredentialsProvider", listen_socket: socket.socket, idp_response_timeout: int, state: str) -> str: conn, addr = listen_socket.accept() conn.settimeout(float(idp_response_timeout)) size: int = 102400 with conn: while True: part: bytes = conn.recv(size) decoded_part = part.decode() state_idx: int = decoded_part.find("state=") if state_idx > -1: received_state: str = decoded_part[state_idx + 6:decoded_part. find("&", state_idx)] if received_state != state: raise InterfaceError( "Incoming state {received} does not match the outgoing state {expected}" .format(received=received_state, expected=state)) code_idx: int = decoded_part.find("code=") if code_idx < 0: raise InterfaceError("No code found") received_code: str = decoded_part[code_idx + 5:decoded_part. find("&", code_idx)] if received_code == "": raise InterfaceError("No valid code found") conn.send(self.close_window_http_resp()) return received_code
def accept(self, sock: socket.socket): conn, addr = sock.accept() conn.setblocking(False) self.log.log('accept connection to' + repr(addr)) handler = HTTPHandler.Handler(self.selector, conn, addr, self.log, self.db_handler) self.selector.register(conn, selectors.EVENT_READ, data=handler)
def accept(self, sock: socket.socket): # 在select线程中运行的 new_sock, r_address = sock.accept() new_sock.setblocking(False) print('~' * 30) key = self.selector.register(new_sock, selectors.EVENT_READ, self.rec) # 有n个 logging.info(key)
def _onUnknownReadable(self, conn : socket): if conn is self.__target: try: nconn, addr = conn.accept() self.__clients[str(addr)] = nconn self._addSocket(nconn) except Exception as ex: self._err(conn, ex) return packet = Packet() packet.op = Packet.OP_USER_CONN packet.host, packet.port = addr packet.send(self.__source) else: data = conn.recv(TCP_RECV_BUFF) packet = Packet() packet.host, packet.port = conn.getpeername() if data: packet.op = Packet.OP_USER_MSG packet.data = data else: packet.op = Packet.OP_USER_DISCONN del self.__clients[str(conn.getpeername())] self._removeSocket(conn) packet.send(self.__source)
def _accept_process(self, serv_sock: socket.socket): while not self.should_stop(): try: (sock, address) = serv_sock.accept() self._accept_queue.put((sock, address)) except socket.timeout: continue
def start_listen(server_sock: socket.socket): player_list = PlayerList() logging.info("开始监听") while True: try: sock, addr = server_sock.accept() # 发送当前列表信息 data = { "message": "player_list", "data": [p.name for p in player_list.get_player_in_queue()] } Player.send_obj(sock, data) logging.info("当前玩家: %s" % str([p.name for p in player_list.get_players()])) logging.info( "当前队列中玩家: %s" % str([p.name for p in player_list.get_player_in_queue()])) name = "玩家 {}".format(len(player_list.get_players())) player = Player(sock, name) player_list.add(player) data = {"message": "get_name", "data": name} Player.send_obj(sock, data) except OSError: logging.exception("监听失败, socket失效") break
def main_process(listen_fd: socket.socket): while True: new_fd, _ = listen_fd.accept() print("新链接到来", new_fd) task = Process(target=new_client_handler, args=(new_fd,)) task.start() new_fd.close()
def main(): parser = argparse.ArgumentParser() parser.add_argument('--port', '-p', default=8080, type=int, help='Port to use') args = parser.parse_args() try: server_socket = Socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, BUFFER_SIZE) server_socket.bind(('', args.port)) server_socket.listen(1) cache_dict = {} print "Proxy server ready..." while True: try: connection_socket = server_socket.accept()[0] t = Thread(target=handle_http, args=[cache_dict, connection_socket]) t.setDaemon(1) t.start() t.join() except socket.error, e: print e finally: connection_socket.close()
def redirector(): """ Redirects all incoming traffic through the proxy. """ PROXY_HOST = environ.get("PROXY_HOST") PROXY_PORT = environ.get("PROXY_PORT", 1080) setdefaultproxy(PROXY_TYPE_SOCKS5, PROXY_HOST, PROXY_PORT) server = Socket(AF_INET, SOCK_STREAM) server.bind(("127.0.0.1", 42000)) server.listen(5) while True: client_socket, (src_host, src_port) = server.accept() (dst_host, dst_port) = get_original_destination(client_socket) logger.info( f"Intercepted connection from {src_host}:{src_port} to {dst_host}:{dst_port}" ) proxy_socket = SocksSocket() proxy_socket.connect((dst_host, dst_port)) bidirectional_copy(client_socket, proxy_socket)
def worker_process(serversocket: socket.socket) -> None: while True: clientsocket, (client_address, client_port) = serversocket.accept() logging.debug(f"New client: {client_address}:{client_port}") while True: try: message = clientsocket.recv(1024) logging.debug( f"Recv: {message} from {client_address}:{client_port}") except OSError: break if len(message) == 0: break sent_message = message while True: sent_len = clientsocket.send(sent_message) if sent_len == len(sent_message): break sent_message = sent_message[sent_len:] logging.debug(f"Send: {message} to {client_address}:{client_port}") clientsocket.close() logging.debug(f"Bye-bye: {client_address}:{client_port}")
def send_time(self, socket_: socket.socket) -> None: while self._is_sending_time: client, address = socket_.accept() print('Connected client from {}'.format(address)) payload = time.ctime(time.time()) + '\n' client.send(payload.encode('ascii')) client.close()
def accept_wrapper(sock: socket.socket): conn, addr = sock.accept() print('Accepted Connection: ', addr) conn.setblocking(False) data = types.SimpleNamespace(addr=addr, inb=b'', outb=b'') events = selectors.EVENT_READ | selectors.EVENT_WRITE sel.register(conn, events, data=data)
def main(): register_builtin_interface() server = Socket() if len(sys.argv) > 1: server_port = int(sys.argv[1]) else: server_port = DEFAULT_PORT print "Listening on port " + str(server_port) server.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) server.bind(("", server_port)) server.listen(50) Thread(target=process_event_queue).start() print "\nAutobus has successfully started up." try: while True: socket, address = server.accept() connection = Connection(socket, address) event_queue.put((connection.id, discard_args(connection.register)), block=True) connection.start() except KeyboardInterrupt: print "KeyboardInterrupt received, shutting down" event_queue.put((None, None), block=True) print "Event queue has been notified to shut down" except: print "Unexpected exception occurred in the main loop, shutting down. Stack trace:" print_exc() event_queue.put((None, None), block=True) print "Event queue has been notified to shut down" server.close()
def recv_client(sock: socket.socket, save: Path) -> threading.Thread: client, address = sock.accept() Main.logger.info(f"connection established from {address}.") t = ThreadingFileReceiver(client, save / f'{time()}{address}', Main.logger) t.start() return t
def worker_thread(serversocket: socket.socket, shutdown_event: threading.Event) -> None: while not shutdown_event.isSet(): try: clientsock, (client_address, client_port) = serversocket.accept() logging.debug(f"New client: {client_address}:{client_port}") except (OSError, ConnectionAbortedError): continue while True: try: message = clientsock.recv(1024) logging.debug( f"Recv: {message} from {client_address}:{client_port}") except OSError: break if len(message) == 0: break sent_message = message while True: sent_len = clientsock.send(sent_message) if sent_len == len(sent_message): break sent_message = sent_message[sent_len:] logging.debug(f"Send: {message} to {client_address}:{client_port}") clientsock.close() logging.debug(f"Bye-bye: {client_address}:{client_port}") logging.debug("Shutting down thread")
def _await_tcp_connection( name: str, tcp_port: int, listener_socket: socket.socket, subprocess_starter: Callable[[], subprocess.Popen] ) -> Tuple[subprocess.Popen, socket.socket, IO[bytes], IO[bytes]]: # After we have accepted one client connection, we can close the listener socket. with closing(listener_socket): # We need to be able to start the process while also awaiting a client connection. def start_in_background(d: _SubprocessData) -> None: # Sleep for one second, because the listener socket needs to be in the "accept" state before starting the # subprocess. This is hacky, and will get better when we can use asyncio. time.sleep(1) process = subprocess_starter() d.process = process data = _SubprocessData() thread = threading.Thread(target=lambda: start_in_background(data)) thread.start() # Await one client connection (blocking!) sock, _ = listener_socket.accept() thread.join() reader = sock.makefile('rwb') # type: IO[bytes] writer = reader assert data.process return data.process, sock, reader, writer
class PyCashServer: HOST = '' # Symbolic name meaning all available interfaces PORT = 50007 def __init__(self): self.s = Socket(socket.AF_INET, socket.SOCK_STREAM) self.s.bind((self.HOST, self.PORT)) self.s.listen(3) def receveObject(self): conn, addr = self.s.accept() conn.settimeout(5.0) data = conn.recv(16) objString = '' while(data != None): objString += (data) conn.send(data) try: data = conn.recv(16) except: data = None x = json.loads(objString) return obj(x)
def socket_handler(listener: socket.socket) -> None: sock = listener.accept()[0] with cls.server_context.wrap_socket(sock, server_side=True) as ssock: request = consume_socket(ssock) validate_request(request) ssock.send(sample_response()) sock.close()
def _accept(self, sock: socket.socket, mask): conn, addr = sock.accept() # create relay socket to destination destination_sock = socket.create_connection(self.__sock_addr_map[sock][4]) destination_sock.setblocking(False) # add socket into relay table self.__relay_table[conn] = destination_sock self.__relay_table[destination_sock] = conn _logger.info("create relay: [%s]:%d <=> [%s]:%d" % (_format_addr(addr) + _format_addr(destination_sock.getpeername()))) # create send buffer self.__send_buffer[conn] = { 'buffer': None, 'send_pos': 0 } self.__send_buffer[destination_sock] = { 'buffer': None, 'send_pos': 0 } # add both socket into selector self.__selector.register(conn, selectors.EVENT_READ, self._relay_handle) self.__selector.register(destination_sock, selectors.EVENT_READ, self._relay_handle)
def accept_new_connection(self, server_sock: socket.socket) -> None: sock, _ = server_sock.accept() with bind_remote_address_to_logger(sock): sock.setblocking(False) disconnector = Disconnector() send_buffer = SendBuffer() msg_framer = MessageFramer(send_buffer) serializer = Serializer(msg_framer) msg_sender = MessageSender(serializer) client = Client(msg_sender, disconnector) msg_router = MessageRouter(self._server, client) parsed_msg_handler = ParsedMessageHandler(msg_router) deserializer = Deserializer(parsed_msg_handler) msg_splitter = MessageSplitter(deserializer) self._clients[sock] = ClientConnection(sock, send_buffer, msg_splitter, client) self._server.on_client_connected(client) self._sel.register( sock, selectors.EVENT_READ | selectors.EVENT_WRITE, self._process_sock_event # type: ignore ) _logger.debug("New client connected")
def __init__(self): # Set up the interrupt socket interrupt_server = Socket() interrupt_server.bind(("localhost", 0)) interrupt_server.listen(1) self.interrupt_writer = Socket() self.interrupt_writer.setblocking(False) self.interrupt_writer.connect("localhost", interrupt_server.getsockname()[1]) self.interrupt_reader = interrupt_server.accept() interrupt_server.shutdown(SHUT_RDWR) interrupt_server.close() self.interrupt_reader.setblocking(False) self.interrupt_writer.setblocking(False)
def listen_for_data(sock: socket.socket) -> None: """Make the socket listen for data forever.""" host = 'localhost' port = 41401 sock.bind((host, port)) sock.listen(1) while True: print('Waiting...') conn, addr = sock.accept() print(f'Connection from {addr}') if addr[0] != '127.0.0.1': continue with conn: data = receive_all(conn) parse_data(data)
class SocketStream(Stream): def __init__(self, port): self.listener = Socket(AF_INET, SOCK_STREAM) self.listener.bind(('', port)) def open(self): self.listener.listen(1) self.socket, address = self.listener.accept() def read(self, size=1024): return self.socket.recv(size) def write(self, data): self.socket.sendall(data) def close(self): self.socket.close() self.listener.close()
def main(): parser=argparse.ArgumentParser() parser.add_argument('--port','-p',default=2000,type=int,help='port to use') args=parser.parse_args() server_socket=Socket(socket.AF_INET,socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1) server_socket.bind(('',args.port)) server_socket.listen(1) print "server running" while True: connection_socket=server_socket.accept()[0] request=connection_socket.recv(1024) reply=http_handler(request) connection_socket.send("HTTP/1.1 200 OK\n") connection_socket.send("\n") connection_socket.send(reply) connection_socket.close() print "received request" print "reply sent" return 0
class SixjetServer(Thread): """ A sixjet server. Server instances listen on both a specified port for native sixjet commands and on an Autobus 2 service. They are created with a function that will be used to write bytes to the parallel port; you'll typically pass an instance of parallel.Parallel's setData method, but any function accepting an integer will do. """ def __init__(self, write_function, port, bus, service_extra={}): """ Creates a new sixjet server. write_function is the function to use to write data to the parallel port. port is the port on which the native protocol listener should listen. service_extra is an optionally-empty set of values that will be added to the Autobus 2's service info dictionary. (Keys such as type will be added automatically, but such keys present in service_extra will override the ones added automatically.) bus is the Autobus bus to use. You can usually just use: from autobus2 import Bus with Bus() as bus: server = SixjetServer(..., bus, ...) ... and things will work. If write_function is None, a new parallel.Parallel instance will be created, and its setData method used. """ Thread.__init__(self) if write_function is None: import parallel self._parallel = parallel.Parallel() write_function = self._parallel.setData else: self._parallel = None # The event queue. Events can be pushed from any thread, but can only # be read and processed from the SixjetServer's run method. self.queue = Queue() # The list of remote native-protocol connections. This must only be # read and modified from the event thread. self.connections = [] # The current states of all of the jets. This must only be read and # modified from the event thread. self.jet_states = [False] * 16 # True to shut down the server. This shouldn't be modified; instead, # stop() should be called, which will post an event that sets this to # True. self.shut_down = False # The server socket listening on the native protocol port self.socket = Socket() self.socket.bind(("", port)) # The Autobus service we're publishing self.service = self.bus.create_service( {"type": "sixjet", "sixjet.native_port": port}, from_py_object=AutobusService(self)) def listen_for_connections(self): # We can just infinitely loop here as run() closes the socket after # the event loop quits, which will cause an exception to be thrown here while True: s = self.socket.accept() # The connection is just a dictionary for now. We're creating it # before so that we can partial it into the input thread. connection = Connection(self, s) self.post_event({"event": "connected", "connection": connection}) connection.start() def remote_message_received(self, connection, message): self.post_event({"event": "message", "message": message}) def run(self): # Start a socket listening for connections Thread(target=self.listen_for_connections).start() # Read events and process them in a loop. while not self.shut_down: # TODO: add support for scheduled tasks here, or use separate # threads to post events when they happen. The latter would offer # a better guarantee that events will be processed when they happen # due to Python's sleep-waiting whenever using get with a timeout # (which is due to attempting to wait on a condition with a timeout # doing the aforementioned). event = self.queue.get() try: self.handle_event(event) except: traceback.print_exc() finally: self.queue.task_done() with print_exceptions: self.socket.close() def stop(self): self.post_event({"event": "stop"}) def post_event(self, event): self.queue.put(event) def handle_event(self, event): if event["event"] == "message": self.handle_message(event["message"]) elif event["event"] == "stop": self.shut_down = True elif event["event"] == "connected": self.connections.append(event["connection"]) elif event["event"] == "disconnected": self.connections.remove(event["connection"]) else: print "Warning: unrecognized event type: %r" % event def handle_message(self, message): if message["command"] == "set": for n in message["on"]: self.jet_states[n] = True for n in message["off"]: self.jet_states[n] = False self.write_jets() elif message["command"] == "clear": for n in range(len(self.jet_states)): self.jet_states[n] = False self.write_jets() else: print "Invalid message: %r" % message def set_parallel_data(self, data): """ Sets the parallel port's data pins to the specified state, which should be a number from 0 to 255, then waits a bit. """ self.write_function(data) sleep(0.0032) # 3.2 milliseconds; increase if needed def write_jets(self): """ Writes the jet states stored in jet_states to the parallel port. """ # The sixjet board is basically made up of two 74HC595 8-bit shift # registers. For those not familiar with shift registers, they're basically # a queue of bits; new bits can be pushed in at one end, and when the queue # is full, old bits will be dropped from the other end. They can then be # instructed to take the bits currently in the queue and set 8 of their # pins to those values. They're perfect for controlling things like banks # of relays from the parallel port. # To push a bit into the queue, you set the shift register's DATA pin to # 0 if you want to push a 0 and 1 if you want to push a 1. Then you set the # CLOCK pin high and then back low. # To have the shift register take the bits in the queue and set its eight # output pins to their values, you set the shift register's STROBE pin high # and then low. # On the 74HC595 shift register, pin 11 is CLOCK, pin 12 is STORBE, and pin # 15 is DATA. Googling "74HC595 datasheet" will pull up a map of which pin # numbers are which physical pins on the 74HC595. # The sixjet board has two shift registers that each control a bank of 8 # relays. The module constants DATA_A and DATA_B correspond to the data # pins of each of these shift registers. CLOCK and STROBE are connected to # both shift registers' clock and strobe pins. # So, to write out the data... # Clear the parallel port self.set_parallel_data(0) # Iterate over all the jets in reverse order. We reverse the ordering here # since the first bit written out will end up shifted to the last position # in the shift register, so we want to write the last bit first. for a, b in reversed(zip(self.jet_states[0:8], self.jet_states[8:16])): # Set lines A and B to the jets we're writing values = (DATA_A if a else 0) | (DATA_B if b else 0) self.set_parallel_data(values) # Do it an extra time just to see if it helps some issues I've been # seeing with data occasionally getting clocked in wrong self.set_parallel_data(values) # Set clock high self.set_parallel_data(values | CLOCK) # Set clock low self.set_parallel_data(values) # Set strobe high self.set_parallel_data(STROBE) # Set strobe low self.set_parallel_data(0)
def acceptUser(server : socket.socket): global userSock while (1): (userSock, addressUser) = server.accept() workWithUser(userSock)