def __init__(self): self._define_responsibility_chain() self.pipe = None self.federates: {str: Federate} = {} self.sel = selectors.DefaultSelector()
def __init__(self, host, port): self.logger = self.setup_logger() self.lsock = self.setup_socket(host, port) self.sel = selectors.DefaultSelector() self.sel.register(self.lsock, selectors.EVENT_READ, data=None)
msg = conn.recv(BUF_SIZE) heartbeat, steps, cal = msg.decode().split('/') now = time.strftime('%c', time.localtime(time.time())) string = now + f': Device2: Heartbeat={heartbeat}, Steps={steps}, Cal={cal}\n' print(string) f = open('data.txt', 'a') f.write(string) f.close() # 디바이스 1 소켓 device1_socket = socket(AF_INET, SOCK_STREAM) device1_socket.connect(('localhost', 7777)) # 디바이스 2 소켓 device2_socket = socket(AF_INET, SOCK_STREAM) device2_socket.connect(('localhost', 9999)) device1_socket.send(b'Register') device2_socket.send(b'Register') # Selectors sel = selectors.DefaultSelector() sel.register(device1_socket, selectors.EVENT_READ, read_device1) sel.register(device2_socket, selectors.EVENT_READ, read_device2) while True: events = sel.select() for key, mask in events: callback = key.data callback(key.fileobj, mask)
#!/usr/bin/env python # -*- coding: UTF-8 -*- __author__ = "Sigai" import selectors import socket ''' 这里讲的不是很仔细 需要复习复习 ''' sel = selectors.DefaultSelector() # 自动选择最优的协程模式. # 处理接收到的客户端连接请求(第一个参数是客户端传过来的连接, 第二个为读掩码) def accept(sock, mask): conn, addr = sock.accept() # 建立连接 # print("accepted", conn, "from", addr, "mask",mask) conn.setblocking(False) # 设置连接为非阻塞IO sel.register(conn, selectors.EVENT_READ, read) # 注册给selectors 监听客户端消息, 如果为可读, 表示客户端来消息了, 调用read函数处理. # 处理已连接客户端发来的新消息(第一个参数为服务端和该客户端的socket连接对象, 第二个参数为读掩码) def read(conn, mask): data = conn.recv(1024) # 接收新消息 if data: # 如果消息非空 # print("echoing",repr(data)) conn.send(data) # 把消息发送回去 else: print("closing", conn) # 如果客户端新消息为空, 表示客户端主动断开了连接 sel.unregister(conn) # 将客户端从监控字典中删除 conn.close() # 断开与该客户端的socket连接
def _run_in_child( self, *, chroot_dir: Path, network_config: Optional[pyspawner.NetworkConfig], compiled_module: CompiledModule, timeout: float, result: Any, function: str, args: List[Any], ) -> None: """ Fork a child process to run `function` with `args`. `args` must be Thrift data types. `result` must also be a Thrift type -- its `.read()` function will be called, which may produce an error if the child process has a bug. (EOFError is very likely.) Raise ModuleExitedError if the child process did not behave as expected. Raise ModuleTimeoutError if it did not exit after a delay -- or if it closed its file descriptors long before it exited. """ limit_time = time.time() + timeout module_process = self._pyspawner.spawn_child( args=[compiled_module, function, args], process_name=compiled_module.module_slug, sandbox_config=pyspawner.SandboxConfig(chroot_dir=chroot_dir, network=network_config), ) # stdout is Thrift package; stderr is logs output_reader = ChildReader(module_process.stdout.fileno(), OUTPUT_BUFFER_MAX_BYTES) log_reader = ChildReader(module_process.stderr.fileno(), LOG_BUFFER_MAX_BYTES) # Read until the child closes its stdout and stderr with selectors.DefaultSelector() as selector: selector.register(output_reader.fileno, selectors.EVENT_READ) selector.register(log_reader.fileno, selectors.EVENT_READ) timed_out = False while selector.get_map(): remaining = limit_time - time.time() if remaining <= 0: if not timed_out: timed_out = True module_process.kill( ) # untrusted code could ignore SIGTERM timeout = None # wait as long as it takes for everything to die # Fall through. After SIGKILL the child will close each fd, # sending EOF to us. That means the selector _must_ return. else: timeout = remaining # wait until we reach our timeout events = selector.select(timeout=timeout) ready = frozenset(key.fd for key, _ in events) for reader in (output_reader, log_reader): if reader.fileno in ready: reader.ingest() if reader.eof: selector.unregister(reader.fileno) # The child closed its fds, so it should die soon. If it doesn't, that's # a bug -- so kill -9 it! # # os.wait() has no timeout option, and asyncio messes with signals so # we won't use those. Spin until the process dies, and force-kill if we # spin too long. for _ in range(DEAD_PROCESS_N_WAITS): pid, exit_status = module_process.wait(os.WNOHANG) if pid != 0: # pid==0 means process is still running break time.sleep(DEAD_PROCESS_WAIT_POLL_INTERVAL) else: # we waited and waited. No luck. Dead module. Kill it. timed_out = True module_process.kill() _, exit_status = module_process.wait(0) if os.WIFEXITED(exit_status): exit_code = os.WEXITSTATUS(exit_status) elif os.WIFSIGNALED(exit_status): exit_code = -os.WTERMSIG(exit_status) else: raise RuntimeError("Unhandled wait() status: %r" % exit_status) if timed_out: raise ModuleTimeoutError if exit_code != 0: raise ModuleExitedError(exit_code, log_reader.to_str()) transport = thrift.transport.TTransport.TMemoryBuffer( output_reader.buffer) protocol = thrift.protocol.TBinaryProtocol.TBinaryProtocol(transport) try: result.read(protocol) except EOFError: # TODO handle other errors Thrift may throw raise ModuleExitedError(exit_code, log_reader.to_str()) from None # We should be at the end of the output now. If we aren't, that means # the child wrote too much. if transport.read(1) != b"": raise ModuleExitedError(exit_code, log_reader.to_str()) if log_reader.buffer: logger.info("Output from module process: %s", log_reader.to_str()) return result
def __init__(self): assert sys.version_info >= (3, 5) import selectors # Inline import: Python3 only! self._sel = selectors.DefaultSelector()
def __init__(self): self.sel = selectors.DefaultSelector() self._HOST = '127.0.0.1' self._PORT = 60001 self._database_received = False self.database = None
def run_predator(timeout, clang, infile): """ @timeout: int, number of seconds to wait until Predator produces result @infile: string, path to LLVM bitcode file to process by Predator @outfile: string, path to logfile to produce """ start = nstime() print('|> slllvm {} {}'.format(' '.join(clang), infile)) with subprocess.Popen(['slllvm'] + clang + [infile], stdout=PIPE, stderr=PIPE) as proc: compilation_result = str(proc.stderr.readline().strip()) if 'Trying to compile' in compilation_result and 'OK' in compilation_result: log('Predator successfully compiled the input file') else: log('Predator failed to compile the input file') log('STDERR') print('\n'.join(map(lambda l: str(l).strip(), proc.stderr.readlines()))) log('STDOUT') print('\n'.join(map(lambda l: str(l).strip(), proc.stdout.readlines()))) return ('error', set()) error_locations = set() sel = selectors.DefaultSelector() sel.register(proc.stderr, selectors.EVENT_READ) sel.register(proc.stdout, selectors.EVENT_READ) while True: for key, mask in sel.select(timeout): fd = key.fileobj line = fd.readline().strip().decode('utf-8') try: line_errors = parse_errors(line) except UnsupportedErrorLocation: log('Predator reported error in unsupported way') log('The report is based on this line:') log(line) proc.kill() return ('unknown', set()) error_locations.update(line_errors) if len(line_errors) == 0: has_warning_class = (re.search(r'\[([^]])+\]', line) != None) if line_reports_ignorable_warning(line): log('Ignore {}'.format(line)) pass elif line_reports_unsupported_feature(line): err('Predator encountered an unsupported feature, so the result is unknown') log('The report is based on this line:') log(line) proc.kill() return ('unknown', set()) elif not has_warning_class and re.search(r': error:', line): err('Predator encountered an unknown error, so the result is error') log('That is based on this line:') log(line) proc.kill() return ('error', set()) elif not has_warning_class and re.search(r': warning:', line): err('Predator encountered an unknown error, so the result is error') log('That is based on this line:') log(line) proc.kill() return ('error', set()) else: log('line reports errors: {}'.format(line)) if re.search('clEasyRun\(\) took', line): err('Predator finished') return ('ok', error_locations) if proc.poll() != None: break now = nstime() elapsed = now - start if elapsed > timeout: log('timeout, killing Predator') proc.kill() return ('timeout', set()) log('Predator has not finished gracefully') return ('error', set())
# don't send to sender if client != client_socket: client.send(message['header'] + message['payload']) if __name__ == "__main__": # create socket server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # reuse addresses for server socket server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # tell OS we want to bind our server socket server_socket.bind((HOST, PORT)) # tell server socket to start listening server_socket.listen() print(f'Server now listening for connections on {HOST}:{PORT}') socket_manager = selectors.DefaultSelector() socket_manager.register(server_socket, selectors.EVENT_READ, accept) while True: # socket manager knows theres stuff to be dealt with events = socket_manager.select() for unread_socket, _ in events: callback = unread_socket.data callback(unread_socket.fileobj)
def main(args: Iterable[str]) -> int: """ The main program loop :param args: Command line arguments :return: The program exit code """ # Handle command line arguments args = handle_args(args) set_verbosity_logger(logger, args.verbosity) # Go to the working directory config_file = os.path.realpath(args.config) os.chdir(os.path.dirname(config_file)) try: # Read the configuration config = config_parser.load_config(config_file) except (ConfigurationSyntaxError, DataConversionError) as e: # Make the config exceptions a bit more readable msg = e.message if e.lineno and e.lineno != -1: msg += ' on line {}'.format(e.lineno) if e.url: parts = urlparse(e.url) msg += ' in {}'.format(parts.path) logger.critical(msg) return 1 except ValueError as e: logger.critical(e) return 1 # Immediately drop privileges in a non-permanent way so we create logs with the correct owner drop_privileges(config.user, config.group, permanent=False) # Trigger the forkserver at this point, with dropped privileges, and ignoring KeyboardInterrupt signal.signal(signal.SIGINT, signal.SIG_IGN) multiprocessing.set_start_method('forkserver') forkserver.ensure_running() # Initialise the logger config.logging.configure(logger, verbosity=args.verbosity) logger.info("Starting Python DHCPv6 server v{}".format(dhcpkit.__version__)) # Create our selector sel = selectors.DefaultSelector() # Convert signals to messages on a pipe signal_r, signal_w = os.pipe() flags = fcntl.fcntl(signal_w, fcntl.F_GETFL, 0) flags = flags | os.O_NONBLOCK fcntl.fcntl(signal_w, fcntl.F_SETFL, flags) signal.set_wakeup_fd(signal_w) sel.register(signal_r, selectors.EVENT_READ) # Ignore normal signal handling by attaching dummy handlers (SIG_IGN will not put messages on the pipe) signal.signal(signal.SIGINT, lambda signum, frame: None) signal.signal(signal.SIGTERM, lambda signum, frame: None) signal.signal(signal.SIGHUP, lambda signum, frame: None) # Excessive exception catcher exception_history = [] # Some stats message_count = 0 # Create a queue for our children to log to logging_queue = multiprocessing.Queue() statistics = ServerStatistics() listeners = [] control_socket = None stopping = False while not stopping: # Safety first: assume we want to quit when we break the inner loop unless told otherwise stopping = True # Initialise the logger again lowest_log_level = config.logging.configure(logger, verbosity=args.verbosity) # Enable multiprocessing logging, mostly useful for development mp_logger = get_logger() mp_logger.propagate = config.logging.log_multiprocessing global logging_thread if logging_thread: logging_thread.stop() logging_thread = queue_logger.QueueLevelListener(logging_queue, *logger.handlers) logging_thread.start() # Use the logging queue in the main process as well so messages don't get out of order logging_handler = WorkerQueueHandler(logging_queue) logging_handler.setLevel(lowest_log_level) logger.handlers = [logging_handler] # Restore our privileges while we write the PID file and open network listeners restore_privileges() # Open the network listeners old_listeners = listeners listeners = [] for listener_factory in config.listener_factories: # Create new listener while trying to re-use existing sockets listeners.append(listener_factory(old_listeners + listeners)) # Write the PID file pid_filename = create_pidfile(args=args, config=config) # Create a control socket if control_socket: sel.unregister(control_socket) control_socket.close() control_socket = create_control_socket(config=config) if control_socket: sel.register(control_socket, selectors.EVENT_READ) # And Drop privileges again drop_privileges(config.user, config.group, permanent=False) # Remove any file descriptors from the previous config for fd, key in list(sel.get_map().items()): # Don't remove our signal handling pipe, control socket, still existing listeners and control connections if key.fileobj is signal_r \ or (control_socket and key.fileobj is control_socket) \ or key.fileobj in listeners \ or isinstance(key.fileobj, ControlConnection): continue # Seems we don't need this one anymore sel.unregister(key.fileobj) # Collect all the file descriptors we want to listen to existing_listeners = [key.fileobj for key in sel.get_map().values()] for listener in listeners: if listener not in existing_listeners: sel.register(listener, selectors.EVENT_READ) # Configuration tree message_handler = config.create_message_handler() # Make sure we have space to store all the interface statistics statistics.set_categories(config.statistics) # Start worker processes with NonBlockingPool(processes=config.workers, initializer=setup_worker, initargs=(message_handler, logging_queue, lowest_log_level, statistics)) as pool: logger.info("Python DHCPv6 server is ready to handle requests") running = True while running: # noinspection PyBroadException try: events = sel.select() for key, mask in events: # Handle signal notifications if key.fileobj == signal_r: signal_nr = os.read(signal_r, 1) if signal_nr[0] in (signal.SIGHUP,): # SIGHUP tells the server to reload try: # Read the new configuration config = config_parser.load_config(config_file) except (ConfigurationSyntaxError, DataConversionError) as e: # Make the config exceptions a bit more readable msg = "Not reloading: " + str(e.message) if e.lineno and e.lineno != -1: msg += ' on line {}'.format(e.lineno) if e.url: parts = urlparse(e.url) msg += ' in {}'.format(parts.path) logger.critical(msg) continue except ValueError as e: logger.critical("Not reloading: " + str(e)) continue logger.info("DHCPv6 server restarting after configuration change") running = False stopping = False continue elif signal_nr[0] in (signal.SIGINT, signal.SIGTERM): logger.debug("Received termination request") running = False stopping = True break elif isinstance(key.fileobj, ControlSocket): # A new control connection request control_connection = key.fileobj.accept() if control_connection: # We got a connection, listen to events sel.register(control_connection, selectors.EVENT_READ) elif isinstance(key.fileobj, ControlConnection): # Let the connection handle received data control_connection = key.fileobj commands = control_connection.get_commands() for command in commands: if command: logger.debug("Received control command '{}'".format(command)) if command == 'help': control_connection.send("Recognised commands:") control_connection.send(" help") control_connection.send(" stats") control_connection.send(" stats-json") control_connection.send(" reload") control_connection.send(" shutdown") control_connection.send(" quit") control_connection.acknowledge() elif command == 'stats': control_connection.send(str(statistics)) control_connection.acknowledge() elif command == 'stats-json': control_connection.send(json.dumps(statistics.export())) control_connection.acknowledge() elif command == 'reload': # Simulate a SIGHUP to reload os.write(signal_w, bytes([signal.SIGHUP])) control_connection.acknowledge('Reloading') elif command == 'shutdown': # Simulate a SIGTERM to reload os.write(signal_w, bytes([signal.SIGTERM])) control_connection.acknowledge('Shutting down') elif command == 'quit' or command is None: if command == 'quit': # User nicely signing off control_connection.acknowledge() control_connection.close() sel.unregister(control_connection) break else: logger.warning("Rejecting unknown control command '{}'".format(command)) control_connection.reject() elif isinstance(key.fileobj, Listener): packet = key.fileobj.recv_request() # Update stats message_count += 1 # Create the callback callback, error_callback = create_handler_callbacks(key.fileobj, packet.message_id) # Dispatch pool.apply_async(handle_message, args=(packet,), callback=callback, error_callback=error_callback) except Exception as e: # Catch-all exception handler logger.exception("Caught unexpected exception {!r}".format(e)) now = time.monotonic() # Add new exception time to the history exception_history.append(now) # Remove exceptions outside the window from the history cutoff = now - config.exception_window while exception_history and exception_history[0] < cutoff: exception_history.pop(0) # Did we receive too many exceptions shortly after each other? if len(exception_history) > config.max_exceptions: logger.critical("Received more than {} exceptions in {} seconds, " "exiting".format(config.max_exceptions, config.exception_window)) running = False stopping = True pool.close() pool.join() # Regain root so we can delete the PID file and control socket restore_privileges() try: if pid_filename: os.unlink(pid_filename) logger.info("Removing PID-file {}".format(pid_filename)) except OSError: pass try: if control_socket: os.unlink(control_socket.socket_path) logger.info("Removing control socket {}".format(control_socket.socket_path)) except OSError: pass logger.info("Shutting down Python DHCPv6 server v{}".format(dhcpkit.__version__)) return 0
class SocketServer: """Socket server.""" selector = selectors.DefaultSelector() clients = {} def __init__(self, host='0.0.0.0', port=3004, **kwargs): """Constructor.""" self.host = host self.port = port lsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Avoid bind() exception: OSError: [Errno 48] Address already in use lsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) lsock.bind((self.host, self.port)) lsock.listen() # TODO : use another logging method print("listening on", (self.host, self.port)) lsock.setblocking(False) self.selector.register(lsock, selectors.EVENT_READ | selectors.EVENT_WRITE, data=None) def run(self): """Run socket server.""" try: while True: events = self.selector.select(timeout=None) for key, mask in events: if key.data is None: self.accept_wrapper(key.fileobj) else: message = key.data try: message.process_events(mask) except Exception: print( "main: error: exception for", f"{message.addr}:\n{traceback.format_exc()}", ) message.close() except KeyboardInterrupt: # TODO : use another logging method print("caught keyboard interrupt, exiting") finally: self.selector.close() def accept_wrapper(self, sock): """Accept connection wrapper.""" connection, address = sock.accept() # Should be ready to read # TODO : use another logging method print("accepted connection from", address) connection.setblocking(False) message = SocketEventListener(self.selector, connection, address) self.clients[address[1]] = {'is_authenticated': False} self.selector.register(connection, selectors.EVENT_READ, data=message) @classmethod def get_clients(cls): """Retrieve all connected clients.""" # TODO : use another logging method print('nb clients:', len(cls.clients)) return cls.clients @classmethod def remove_client(cls, client): """Remove client for connected client.""" # TODO : use another logging method print('remove client', client)
def __init__(self, client_sock, outside_sock): self.client_sock = client_sock self.outside_sock = outside_sock self.stopped = False self.selector = selectors.DefaultSelector()
def main(): if len(sys.argv) < 4: print('Usage: {} inputfile outputfile cmd ...'.format(sys.argv[0]), file=sys.stderr) sys.exit(1) inputfile, outputfile = sys.argv[1], sys.argv[2] cmd = sys.argv[3:] # read all of the input with open(inputfile, 'rb') as fp: inputData = fp.read() # read all of the expected output with open(outputfile, 'rb') as fp: outputData = fp.read() # launch the process proc = subprocess.Popen(cmd, bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdin = proc.stdin.fileno() stdout = proc.stdout.fileno() stderr = proc.stderr.fileno() # start monitoring the output channels sel = selectors.DefaultSelector() sel.register(stdout, selectors.EVENT_READ, 'out') sel.register(stderr, selectors.EVENT_READ, 'err') # the next chunk of input to feed to the process # this gets filled when we have a timeout while waiting for output nextInput = '' inputClosed = False keepGoing = True warmup = True error = False while keepGoing: # wait for some output, and if we have input ready # check if we can send it if len(nextInput) > 0: sel.register(stdin, selectors.EVENT_WRITE, 'in') events = sel.select(timeout=(warmupdelay if warmup else delay)) if len(nextInput) > 0: sel.unregister(stdin) warmup = False # timeout? prepare some input to feed to the process if len(events) == 0 and len(nextInput) == 0: if len(inputData) > 0: # grab one line, or everything if there are no newlines newline = inputData.find(b'\n') if newline == -1: nextInput = inputData else: nextInput = inputData[:newline + 1] elif not inputClosed: os.close(stdin) inputClosed = True # handle each of the input/output channels that are ready for (key, mask) in events: if key.data == 'out': # there is stdout output ready data = os.read(stdout, bufsize) if len(data) == 0: keepGoing = False break # compare it to the expected output one line at a time while len(data) > 0: newline = data.find(b'\n') if newline < 0: chunk = data data = b'' else: chunk = data[:newline + 1] data = data[len(chunk):] # does it match what we expected? if outputData.startswith(chunk): print(chunk.decode('utf-8'), end='') outputData = outputData[len(chunk):] else: print( '\n!!INCORRECT OUTPUT!! Your next line of output was:' ) print(repr(chunk.decode('utf-8'))) print('but the next line of output expected was:') newline = outputData.find(b'\n') if newline < 0: print(repr(outputData.decode('utf-8'))) else: print( repr(outputData[:newline + 1].decode('utf-8'))) keepGoing = False error = True break if key.data == 'err': # there is stderr output ready data = os.read(stderr, bufsize) if len(data) == 0: keepGoing = False break print('\n!!ERROR OUTPUT!!') print(data.decode('utf-8'), end='') keepGoing = False error = True if key.data == 'in': # the stdin pipe is ready to receive data count = os.write(stdin, nextInput) if count == 0: keepGoing = False break print(nextInput[:count].decode('utf-8'), end='') inputData = inputData[count:] nextInput = '' # wait for the child process to end proc.kill() proc.wait() os.close(stdout) os.close(stderr) if not inputClosed: os.close(stdin) # report an error if we noticed error output, wrong regular output, or # any input/output leftover that should have been consumed if error or len(inputData) > 0 or len(outputData) > 0: sys.exit(1)
def __init__(self) -> None: self._selector = selectors.DefaultSelector()
def init_process(self): self.tpool = self.get_thread_pool() self.poller = selectors.DefaultSelector() self._lock = RLock() super().init_process()
def __init__(self, ip='127.0.0.1', port=9999): self.sock = socket.socket() self.address = ip, port self.event = threading.Event() self.selector = selectors.DefaultSelector()
def __init__(self, package_queue): self._selector = selectors.DefaultSelector() self._package_factory = PackageFactory() self._package_queue = package_queue self._running = False
def __init__(self, **configs): """Initialize an asynchronous kafka client Keyword Arguments: bootstrap_servers: 'host[:port]' string (or list of 'host[:port]' strings) that the consumer should contact to bootstrap initial cluster metadata. This does not have to be the full node list. It just needs to have at least one broker that will respond to a Metadata API Request. Default port is 9092. If no servers are specified, will default to localhost:9092. client_id (str): a name for this client. This string is passed in each request to servers and can be used to identify specific server-side log entries that correspond to this client. Also submitted to GroupCoordinator for logging with respect to consumer group administration. Default: 'kafka-python-{version}' request_timeout_ms (int): Client request timeout in milliseconds. Default: 40000. reconnect_backoff_ms (int): The amount of time in milliseconds to wait before attempting to reconnect to a given host. Default: 50. max_in_flight_requests_per_connection (int): Requests are pipelined to kafka brokers up to this number of maximum requests per broker connection. Default: 5. send_buffer_bytes (int): The size of the TCP send buffer (SO_SNDBUF) to use when sending data. Default: None (relies on system defaults). Java client defaults to 131072. receive_buffer_bytes (int): The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. Default: None (relies on system defaults). Java client defaults to 32768. metadata_max_age_ms (int): The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions. Default: 300000 retry_backoff_ms (int): Milliseconds to backoff when retrying on errors. Default: 100. security_protocol (str): Protocol used to communicate with brokers. Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT. ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping socket connections. If provided, all other ssl_* configurations will be ignored. Default: None. ssl_check_hostname (bool): flag to configure whether ssl handshake should verify that the certificate matches the brokers hostname. default: true. ssl_cafile (str): optional filename of ca file to use in certificate veriication. default: none. ssl_certfile (str): optional filename of file in pem format containing the client certificate, as well as any ca certificates needed to establish the certificate's authenticity. default: none. ssl_keyfile (str): optional filename containing the client private key. default: none. ssl_crlfile (str): optional filename containing the CRL to check for certificate expiration. By default, no CRL check is done. When providing a file, only the leaf certificate will be checked against this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+. default: none. """ self.config = copy.copy(self.DEFAULT_CONFIG) for key in self.config: if key in configs: self.config[key] = configs[key] self.cluster = ClusterMetadata(**self.config) self._topics = set() # empty set will fetch all topic metadata self._metadata_refresh_in_progress = False self._selector = selectors.DefaultSelector() self._conns = {} self._connecting = set() self._refresh_on_disconnects = True self._delayed_tasks = DelayedTaskQueue() self._last_bootstrap = 0 self._bootstrap_fails = 0 self._wake_r, self._wake_w = socket.socketpair() self._wake_r.setblocking(False) self._selector.register(self._wake_r, selectors.EVENT_READ) self._closed = False self._bootstrap(collect_hosts(self.config['bootstrap_servers']))
def __init__(self, path): self._path = path self.fd = None self.sel = selectors.DefaultSelector()
def multiplex(self): """ IO multiplex constructor. :return: a selector instance. """ return selectors.DefaultSelector()
def __init__(self, transaction): self.transaction = transaction self.sel = selectors.DefaultSelector() self._HOST = '127.0.0.1' self._PORT = 60001 self._broadcasting_done = False
def main(filename, *addresses): server_addresses = [parse_address(addr) for addr in addresses] servers = itertools.cycle(server_addresses) with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.connect(next(servers)) expected_file_digest, sections, total_size = list_sections(s) file_contents = bytearray(total_size) numOfSections = 0 with selectors.DefaultSelector() as sel: for section in sections: numOfSections += 1 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setblocking(False) err = s.connect_ex(next(servers)) if err != IN_PROGRESS: print(f'connect_ex returned {err}: {os.strerror(err)}') return err sel.register(s, selectors.EVENT_READ | selectors.EVENT_WRITE, section) Compsections = 0 while CompSections != len(sections): for key, mask in sel.select(timeout=1): connection = key.fileobj current_section = key.data if mask & selectors.EVENT_READ: print('ready to read section ', key.data.num) data = connection.recv(SIZE_1_KiB) while data: current_section.data.extend(data) data = connection.recv(SIZE_1_KiB) if current_section.check_integrity(): print( f"Current section: {current_section.num} is integral" ) file_contents[current_section.from_byte:current_section .to_byte] = current_section.data connection.close() sel.unregister(connection) Compsections = Compsections + 1 if mask & selectors.EVENT_WRITE: if not current_section.request_sent: current_section.request_sent = true print("sending request: ", current_section.num) connection.send( f'SECTION {current_section.num}'.encode()) print('Shutting down') #sel.unregister() sel.close() file_digest = md5(file_contents) if file_digest != expected_file_digest: print( f'{filename}: digest {file_digest}, expected {expected_file_digest}' ) else: with open(filename, 'wb') as f: f.write(file_contents)
action='store', type=int, default=0) option.add_argument('-n', '--NAME', help="群组名称", action='store', type=str, required=True) # Member join in immediately. gb_initial_group = set() # Mapping connction to members. gb_connpair = dict() gb_selector = selectors.DefaultSelector() class MetaStorage(metaclass=ABCMeta): """ Interface for database which stores content. """ @abstractmethod def read_list(self, name): """ Return list of conversation have received by name. :param name: :return: """ pass
def __init__(self, host='127.0.0.1', port=5000): self.host = host self.port = port self.selector = selectors.DefaultSelector()
def start(self, hostname, port): """ Connect to a Rivendell system and begin processing PAD events. Once started, a pypad object can be interacted with only within its callback method. Takes the following arguments: hostname - The hostname or IP address of the Rivendell system. port - The TCP port to connect to. For most cases, just use 'pypad.PAD_TCP_PORT'. """ # So we exit cleanly when shutdown by rdpadengined(8) signal.signal(signal.SIGTERM, SigHandler) # Open rd.conf(5) rd_config = configparser.ConfigParser(interpolation=None) rd_config.readfp(open('/etc/rd.conf')) # Open the syslog pypad_name = sys.argv[0].split('/')[-1] syslog.openlog(pypad_name, logoption=syslog.LOG_PID, facility=int( rd_config.get('Identity', 'SyslogFacility', fallback=syslog.LOG_USER))) # Connect to the PAD feed sock = socket.socket(socket.AF_INET) conn = sock.connect((hostname, port)) timeout = None if self.__timer_interval != None: timeout = self.__timer_interval deadline = datetime.datetime.now() + datetime.timedelta( seconds=timeout) sel = selectors.DefaultSelector() sel.register(sock, selectors.EVENT_READ) c = bytes() line = bytes() msg = "" while 1 < 2: if len(sel.select(timeout)) == 0: now = datetime.datetime.now() if now >= deadline: timeout = self.__timer_interval deadline = now + datetime.timedelta(seconds=timeout) self.__pypad_TimerProcess(self.__config_parser) else: timeout = (deadline - now).total_seconds() else: c = sock.recv(1) line += c if c[0] == 10: linebytes = line.decode('utf-8', 'replace') msg += linebytes if linebytes == '\r\n': self.__pypad_Process( Update(json.loads(msg), self.__config_parser, rd_config)) msg = "" line = bytes() if self.__timer_interval != None: timeout = (deadline - datetime.datetime.now()).total_seconds()
def __init__(self, ip, port): self.port = port self.ip = ip self.selector = selectors.DefaultSelector() #初始化selector
def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): '''Run forkserver.''' if preload: if '__main__' in preload and main_path is not None: process.current_process()._inheriting = True try: spawn.import_main_path(main_path) finally: del process.current_process()._inheriting for modname in preload: try: __import__(modname) except ImportError: pass util._close_stdin() handlers = { # no need to reap zombie processes; signal.SIGCHLD: signal.SIG_IGN, # protect the process from ^C signal.SIGINT: signal.SIG_IGN, } old_handlers = { sig: signal.signal(sig, val) for (sig, val) in handlers.items() } with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ selectors.DefaultSelector() as selector: _forkserver._forkserver_address = listener.getsockname() selector.register(listener, selectors.EVENT_READ) selector.register(alive_r, selectors.EVENT_READ) while True: try: while True: rfds = [key.fileobj for (key, events) in selector.select()] if rfds: break if alive_r in rfds: # EOF because no more client processes left assert os.read(alive_r, 1) == b'' raise SystemExit assert listener in rfds with listener.accept()[0] as s: code = 1 if os.fork() == 0: try: _serve_one(s, listener, alive_r, old_handlers) except Exception: sys.excepthook(*sys.exc_info()) sys.stderr.flush() finally: os._exit(code) except OSError as e: if e.errno != errno.ECONNABORTED: raise
def capture_script_output(proc, combined_path, stdout_path, stderr_path, timeout_seconds=None): """Capture stdout and stderr from `proc`. Standard output is written to a file named by `stdout_path`, and standard error is written to a file named by `stderr_path`. Both are also written to a file named by `combined_path`. If the given subprocess forks additional processes, and these write to the same stdout and stderr, their output will be captured only as long as `proc` is running. Optionally a timeout can be given in seconds. This time is padded by 5 minutes to allow for script cleanup. If the script runs past the timeout the process is killed and an exception is raised. Forked processes are not subject to the timeout. :return: The exit code of `proc`. """ if timeout_seconds in (None, 0): timeout = None else: # Pad the timeout by 5 minutes to allow for cleanup. timeout = time.monotonic() + timeout_seconds + (60 * 5) # Create the file and then open it in read write mode for terminal # emulation. for path in (stdout_path, stderr_path, combined_path): open(path, 'w').close() with open(stdout_path, 'r+b') as out, open(stderr_path, 'r+b') as err: with open(combined_path, 'r+b') as combined: with selectors.DefaultSelector() as selector: selector.register(proc.stdout, selectors.EVENT_READ, out) selector.register(proc.stderr, selectors.EVENT_READ, err) while selector.get_map() and proc.poll() is None: # Select with a short timeout so that we don't tight loop. _select_script_output(selector, combined, 0.1, proc) if timeout is not None and time.monotonic() > timeout: break # Process has finished or has closed stdout and stderr. # Process anything still sitting in the latter's buffers. _select_script_output(selector, combined, 0.0, proc) now = time.monotonic() # Wait for the process to finish. if timeout is None: # No timeout just wait until the process finishes. return proc.wait() elif now >= timeout: # Loop above detected time out execeed, kill the process. proc.kill() raise TimeoutExpired(proc.args, timeout_seconds) else: # stdout and stderr have been closed but the timeout has not been # exceeded. Run with the remaining amount of time. try: return proc.wait(timeout=(timeout - now)) except TimeoutExpired: # Make sure the process was killed proc.kill() raise
def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): '''Run forkserver.''' if preload: if '__main__' in preload and main_path is not None: process.current_process()._inheriting = True try: spawn.import_main_path(main_path) finally: del process.current_process()._inheriting for modname in preload: try: __import__(modname) except ImportError: pass util._close_stdin() sig_r, sig_w = os.pipe() os.set_blocking(sig_r, False) os.set_blocking(sig_w, False) def sigchld_handler(*_unused): # Dummy signal handler, doesn't do anything pass handlers = { # unblocking SIGCHLD allows the wakeup fd to notify our event loop signal.SIGCHLD: sigchld_handler, # protect the process from ^C signal.SIGINT: signal.SIG_IGN, } old_handlers = { sig: signal.signal(sig, val) for (sig, val) in handlers.items() } # calling os.write() in the Python signal handler is racy signal.set_wakeup_fd(sig_w) # map child pids to client fds pid_to_fd = {} with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ selectors.DefaultSelector() as selector: _forkserver._forkserver_address = listener.getsockname() selector.register(listener, selectors.EVENT_READ) selector.register(alive_r, selectors.EVENT_READ) selector.register(sig_r, selectors.EVENT_READ) while True: try: while True: rfds = [key.fileobj for (key, events) in selector.select()] if rfds: break if alive_r in rfds: # EOF because no more client processes left assert os.read(alive_r, 1) == b'', "Not at EOF?" raise SystemExit if sig_r in rfds: # Got SIGCHLD os.read(sig_r, 65536) # exhaust while True: # Scan for child processes try: pid, sts = os.waitpid(-1, os.WNOHANG) except ChildProcessError: break if pid == 0: break child_w = pid_to_fd.pop(pid, None) if child_w is not None: if os.WIFSIGNALED(sts): returncode = -os.WTERMSIG(sts) else: if not os.WIFEXITED(sts): raise AssertionError( "Child {0:n} status is {1:n}".format( pid, sts)) returncode = os.WEXITSTATUS(sts) # Send exit code to client process try: write_signed(child_w, returncode) except BrokenPipeError: # client vanished pass os.close(child_w) else: # This shouldn't happen really warnings.warn('forkserver: waitpid returned ' 'unexpected pid %d' % pid) if listener in rfds: # Incoming fork request with listener.accept()[0] as s: # Receive fds from client fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) if len(fds) > MAXFDS_TO_SEND: raise RuntimeError( "Too many ({0:n}) fds to send".format( len(fds))) child_r, child_w, *fds = fds s.close() pid = os.fork() if pid == 0: # Child code = 1 try: listener.close() selector.close() unused_fds = [alive_r, child_w, sig_r, sig_w] unused_fds.extend(pid_to_fd.values()) code = _serve_one(child_r, fds, unused_fds, old_handlers) except Exception: sys.excepthook(*sys.exc_info()) sys.stderr.flush() finally: os._exit(code) else: # Send pid to client process try: write_signed(child_w, pid) except BrokenPipeError: # client vanished pass pid_to_fd[pid] = child_w os.close(child_r) for fd in fds: os.close(fd) except OSError as e: if e.errno != errno.ECONNABORTED: raise
from typing import Callable import socket import selectors from typing import types from aioconsole import aprint import os import time # ----- GLOBAL CONSTANTS ---------- DEFAULT_SELECTOR = selectors.DefaultSelector( ) # the default selector to use for IO from sockets PORT_NUMBER = 5000 # the port number to run the server on, this will be the default if no port number gets set at start GLOBAL_CONNECTIONS = {} # a global dictionary to store our connections LINE_SEP = '-----------------------------------' # Line separator constat EVENTS = selectors.EVENT_READ | selectors.EVENT_WRITE # the events to check for in our selector #------ Server functions -------------- def set_port_number(port_num: int) -> None: global PORT_NUMBER PORT_NUMBER = port_num def accept_wrapper(sock: socket.socket) -> None: ''' wrapper function to accept a socket connection and register with the default selector ''' conn, addr = sock.accept() # Should be ready to read print("accepted connection from", addr)