def run(self): with ThreadPool(thread_name_prefix="HandlePool-%d" % self._counter()) as handle_pool: with multiprocessing_connection.Listener( self.address, authkey=self.authkey) as listener: c_recv, c_send = multiprocessing_connection.Pipe(False) def after_delete_handle( t: Tuple[multiprocessing_connection.Connection, bool]): k, v = t self.logger.debug("close timeout conn %s" % k) c_send.send_bytes(b'ok') k.close() conn_lru_dict = LRUCache( size=1024, timeout=CONN_LRU_TIMEOUT, after_delete_handle=after_delete_handle) handle_pool.spawn(self._process, conn_lru_dict, handle_pool, c_recv, c_send) while True: try: conn = listener.accept() self.logger.debug("get a new conn %s" % conn) conn_lru_dict[conn] = True c_send.send_bytes(b'ok') except: logging.exception("error")
def start(self, initializer=None, initargs=()): """ Spawn a server process for this manager object """ if initializer is not None and not hasattr(initializer, '__call__'): raise TypeError('initializer must be a callable') reader, writer = connection.Pipe(duplex=False) self._process = Process(target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs)) ident = ':'.join((str(i) for i in self._process._identity)) self._process.name = type(self).__name__ + '-' + ident self._process.start() writer.close() self._address = reader.recv() reader.close() self._state.value = State.STARTED self.shutdown = util.Finalize(self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client), exitpriority=0) return
def __init__(self, maxsize=0, reducers=None, ctx=None): if sys.version_info[:2] >= (3, 4): super().__init__(maxsize=maxsize, ctx=ctx) else: if maxsize <= 0: # Can raise ImportError (see issues #3770 and #23400) maxsize = SEM_VALUE_MAX if ctx is None: ctx = get_context() self._maxsize = maxsize self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() self._sem = ctx.BoundedSemaphore(maxsize) # For use by concurrent.futures self._ignore_epipe = False self._after_fork() if sys.platform != 'win32': util.register_after_fork(self, Queue._after_fork) self._reducers = reducers
def start(self): ''' Spawn a server process for this manager object ''' assert self._state.value == State.INITIAL # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = util.Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client), exitpriority=0 )
def __init__(self, max_workers=None): self._conn, child_conn = connection.Pipe() # Run the host in a brand-new bash. This gives the shell RC files # the chance to clean our whatever garbage the calling application # (e.g. Maya) has put into the environment. Ideally this would be # cleaned up by sitetools.environ, but I haven't had much success # with doing that at Mark Media. # TODO: Go back to calling python directly when that is fixed. cmd = ['bash', '-lc', 'python -m uifutures.host %s' % child_conn.fileno()] env = None self.proc = subprocess.Popen(cmd, env=env) child_conn.close() # Later, we may need to wait on the handshake to make sure that the # process has started. But since we know that the socket is open since # it is an OS pipe, we don't have to wait. # Send some configuration over. if max_workers: self._conn.send(dict( type='config', max_workers=max_workers, )) self._futures = {} self._host_alive = True self._host_listener_thread = threading.Thread(target=self._host_listener) self._host_listener_thread.daemon = True self._host_listener_thread.start()
def __init__(self, max_workers=None): self._conn, child_conn = connection.Pipe() cmd = ['python', '-m', 'uifutures.host', str(child_conn.fileno())] env = None self.proc = subprocess.Popen(cmd, env=env) child_conn.close() # Later, we may need to wait on the handshake to make sure that the # process has started. But since we know that the socket is open since # it is an OS pipe, we don't have to wait. # Send some configuration over. if max_workers: self._conn.send(dict( type='config', max_workers=max_workers, )) self._futures = {} self._host_alive = True self._host_listener_thread = threading.Thread( target=self._host_listener) self._host_listener_thread.daemon = True self._host_listener_thread.start()
def poke(self, allow_start): if self.state not in waiting_states: return if any(self.host.workers[x].state in failed_states for x in self.depends_on): self.state = DEPENDENCY_FAILED return if any(self.host.workers[x].state not in finished_states for x in self.depends_on): self.state = BLOCKED return if not allow_start: self.state = QUEUED return # Running! Finally... self.state = ACTIVE # Launch a worker, and tell it to connect to us. self.conn, child_conn = connection.Pipe() cmd = [ 'python', '-m', 'uifutures.sandbox.the_corner', str(child_conn.fileno()) ] self.proc = subprocess.Popen(cmd) child_conn.close() # Forward the submission. self.conn.send(self.submit_msg)
def __init__(self, *, ctx): # *是命名关键字参数用法 self._reader, self._writer = connection.Pipe( duplex=False ) # multiprocessing.Pipe(),说明管道也可以多进程传递,但同时读or同时写可能会出错,所以要加锁(待验证) self._poll = self._reader.poll self._rlock = ctx.Lock() # multiprocessing.Lock() self._wlock = None if sys.platform == 'win32' else ctx.Lock()
def __init__(self): self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = Lock() self._poll = self._reader.poll if sys.platform == 'win32': self._wlock = None else: self._wlock = Lock()
def __init__(self): self._master, self._slave = connection.Pipe(duplex=True) self._master_pid = os.getpid() self._my_pipe = None self.__set_resources() multiprocessing.util.register_after_fork(self, GeneDQueue.__set_resources)
def __init__(self, maxsize=0, *, ctx): if maxsize <= 0: maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX self._maxsize = maxsize self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._wlock = None if sys.platform == 'win32' else ctx.Lock() self._opid = os.getpid() self._sem = ctx.BoundedSemaphore(maxsize) self._ignore_epipe = False # For use by concurrent.futures self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork)
def __init__(self, *, copy_tensor=False): """ Args: copy_tensor: Set the queue to send a fully serialized tensor if ``True``, and only a stub of reference if ``False``. See Also: :func:`.dump_tensor` """ self._reader, self._writer = connection.Pipe(duplex=False) self._reader = ConnectionWrapper(self._reader) self._writer = ConnectionWrapper(self._writer) self._copy_tensor = copy_tensor
def start_gui(): ''' Spawns a new process that runs the pygame gui. Parameters ---------- None Returns ---------- multiprocessing.connection.PipeConnection a Pipe object for the pygame process to send user input to the LF program multiprocessing.connection.PipeConnection a Pipe object for the LF program to send graphics update to the pygame process ''' user_input_pout, user_input_pin = connection.Pipe(duplex=False) update_graphics_pout, update_graphics_pin = connection.Pipe(duplex=False) multiprocessing.set_start_method("spawn") p = multiprocessing.Process(target=gui, args=(user_input_pin, update_graphics_pout)) p.start() return user_input_pout, update_graphics_pin
def start_gui(piano_keys): ''' Spawns a process to run the pygame piano. Parameters ---------- piano_keys: dict[str] -> tuple(str, int) a mapping of keyboard characters to piano notes. Returns ---------- multiprocessing.connection.PipeConnection a pipe object for the LF program to receive key presses from the pygame process multiprocessing.connection.PipeConnection a pipe object for the LF program to send graphics update to the pygame process ''' multiprocessing.set_start_method("spawn") user_input_pout, user_input_pin = connection.Pipe(duplex=False) update_graphics_pout, update_graphics_pin = connection.Pipe(duplex=False) p = Process(target=gui, args=(user_input_pin, update_graphics_pout, piano_keys)) p.start() return user_input_pout, update_graphics_pin
def __init__(self, reducers=None, ctx=None): if sys.version_info[:2] >= (3, 4): super().__init__(ctx=ctx) else: # Use the context to create the sync objects for python2.7/3.3 if ctx is None: ctx = get_context() self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = ctx.Lock() self._poll = self._reader.poll if sys.platform == 'win32': self._wlock = None else: self._wlock = ctx.Lock() # Add possiblity to use custom reducers self._reducers = reducers
def start(self, initializer=None, initargs=()): ''' Spawn a server process for this manager object ''' if self._state.value != State.INITIAL: if self._state.value == State.STARTED: raise ProcessError("Already started server") elif self._state.value == State.SHUTDOWN: raise ProcessError("Manager has shut down") else: raise ProcessError( "Unknown state {!r}".format(self._state.value)) if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = self._ctx.Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = util.Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client), exitpriority=0 )
def __init__(self, maxsize=0): if maxsize <= 0: # Can raise ImportError (see issues #3770 and #23400) from multiprocessing.synchronize import SEM_VALUE_MAX as maxsize self._maxsize = maxsize self._reader, self._writer = connection.Pipe(duplex=False) self._rlock = Lock() self._opid = os.getpid() if sys.platform == 'win32': self._wlock = None else: self._wlock = Lock() self._sem = BoundedSemaphore(maxsize) # For use by concurrent.futures self._ignore_epipe = False self._after_fork() if sys.platform != 'win32': register_after_fork(self, Queue._after_fork)
def __init__(self, *, ctx=None, copy_tensor=False): """ Args: ctx: Multiprocessing context, you can get this using ``get_context`` copy_tensor: Set the queue to send a fully serialized tensor if ``True``, and only a stub of reference if ``False``. See Also: :func:`.dump_tensor` """ if ctx is None: # get default context ctx = get_context() self._reader, self._writer = connection.Pipe(duplex=False) self._reader = ConnectionWrapper(self._reader) self._writer = ConnectionWrapper(self._writer) # _rlock will be used by _help_stuff_finish() of multiprocessing.Pool self._rlock = ctx.Lock() self._copy_tensor = copy_tensor if sys.platform == "win32": self._wlock = None else: self._wlock = ctx.Lock()
self._abort = Event() #end def def run(self): try: sig = self._con.recv() print sig if isinstance(sig, AbortEvent): self._event.set() except (KeyboardInterrupt, EOFError, IOError): return except Exception: tb.print_exc() #end def def stop(self): self._abort.set() #end class if __name__ == '__main__': a, b = mpc.Pipe() e = Event() sl = SignalListener(a, e) sl.start() sleep(1) b.send(AbortEvent()) sleep(1) b.close() # sl.stop() sl.join(1) print 'Event is set: %s' % e.is_set() print 'Thread is alive: %s' % sl.is_alive()