예제 #1
0
def SocketClient(address):
    '''
    Return a connection object connected to the socket given by `address`
    '''
    family = address_type(address)
    with socket.socket( getattr(socket, family) ) as s:
        s.setblocking(True)
        t = _init_timeout()

        while 1:
            try:
                s.connect(address)
            except socket.error as e:
                if e.args[0] != errno.ECONNREFUSED or _check_timeout(t):
                    debug('failed to connect to address %s', address)
                    raise
                time.sleep(0.01)
            else:
                break
        else:
            raise

        fd = duplicate(s.fileno())
    conn = _multiprocessing.Connection(fd)
    return conn
예제 #2
0
 def _help_stuff_finish(inqueue, task_handler, size):
     # task_handler may be blocked trying to put items on inqueue
     debug('removing tasks from inqueue until task handler finished')
     inqueue._rlock.acquire()
     while task_handler.is_alive() and inqueue._reader.poll():
         inqueue._reader.recv()
         time.sleep(0)
예제 #3
0
 def cancel_join_thread(self):
     debug('Queue.cancel_join_thread()')
     self._joincancelled = True
     try:
         self._jointhread.cancel()
     except AttributeError:
         pass
예제 #4
0
 def join(self):
     debug('joining pool')
     assert self._state in (CLOSE, TERMINATE)
     self._task_handler.join()
     self._result_handler.join()
     for p in self._pool:
         p.join()
예제 #5
0
 def cancel_join_thread(self):
     debug('Queue.cancel_join_thread()')
     self._joincancelled = True
     try:
         self._jointhread.cancel()
     except AttributeError:
         pass
예제 #6
0
def SocketClient(address):
    '''
    Return a connection object connected to the socket given by `address`
    '''
    family = address_type(address)
    with socket.socket(getattr(socket, family)) as s:
        s.setblocking(True)
        t = _init_timeout()

        while 1:
            try:
                s.connect(address)
            except socket.error as e:
                if e.args[0] != errno.ECONNREFUSED or _check_timeout(t):
                    debug('failed to connect to address %s', address)
                    raise
                time.sleep(0.01)
            else:
                break
        else:
            raise

        fd = duplicate(s.fileno())
    conn = _multiprocessing.Connection(fd)
    return conn
예제 #7
0
def worker(inqueue, outqueue, initializer=None, initargs=()):
    put = outqueue.put
    get = inqueue.get
    if hasattr(inqueue, '_writer'):
        inqueue._writer.close()
        outqueue._reader.close()

    if initializer is not None:
        initializer(*initargs)

    while 1:
        try:
            task = get()
        except (EOFError, IOError):
            debug('worker got EOFError or IOError -- exiting')
            break

        if task is None:
            debug('worker got sentinel -- exiting')
            break

        job, i, func, args, kwds = task
        try:
            result = (True, func(*args, **kwds))
        except Exception, e:
            result = (False, e)
        put((job, i, result))
예제 #8
0
    def _callmethod(self, methodname, args=(), kwds={}):
        '''
        Try to call a method of the referrent and return a copy of the result
        '''
        try:
            conn = self._tls.connection
        except AttributeError:
            util.debug('thread %r does not own a connection',
                       threading.current_thread().name)
            self._connect()
            conn = self._tls.connection

        conn.send((self._id, methodname, args, kwds))
        kind, result = conn.recv()

        if kind == '#RETURN':
            return result
        elif kind == '#PROXY':
            exposed, token = result
            proxytype = self._manager._registry[token.typeid][-1]
            proxy = proxytype(
                token, self._serializer, manager=self._manager,
                authkey=self._authkey, exposed=exposed
                )
            conn = self._Client(token.address, authkey=self._authkey)
            dispatch(conn, None, 'decref', (token.id,))
            return proxy
        raise convert_to_error(kind, result)
예제 #9
0
    def _callmethod(self, methodname, args=(), kwds={}):
        '''
        Try to call a method of the referrent and return a copy of the result
        '''
        try:
            conn = self._tls.connection
        except AttributeError:
            util.debug('thread %r does not own a connection',
                       threading.current_thread().name)
            self._connect()
            conn = self._tls.connection

        conn.send((self._id, methodname, args, kwds))
        kind, result = conn.recv()

        if kind == '#RETURN':
            return result
        elif kind == '#PROXY':
            exposed, token = result
            proxytype = self._manager._registry[token.typeid][-1]
            token.address = self._token.address
            proxy = proxytype(
                token, self._serializer, manager=self._manager,
                authkey=self._authkey, exposed=exposed
                )
            conn = self._Client(token.address, authkey=self._authkey)
            dispatch(conn, None, 'decref', (token.id,))
            return proxy
        raise convert_to_error(kind, result)
예제 #10
0
 def _help_stuff_finish(inqueue, task_handler, size):
     # task_handler may be blocked trying to put items on inqueue
     debug('removing tasks from inqueue until task handler finished')
     inqueue._rlock.acquire()
     while task_handler.is_alive() and inqueue._reader.poll():
         inqueue._reader.recv()
         time.sleep(0)
예제 #11
0
 def _finalize_close(buffer, notempty):
     debug('telling queue thread to quit')
     notempty.acquire()
     try:
         buffer.append(_sentinel)
         notempty.notify()
     finally:
         notempty.release()
예제 #12
0
 def _finalize_close(buffer, notempty):
     debug('telling queue thread to quit')
     notempty.acquire()
     try:
         buffer.append(_sentinel)
         notempty.notify()
     finally:
         notempty.release()
예제 #13
0
 def _connect(self):
     util.debug('making connection to manager')
     name = current_process().name
     if threading.current_thread().name != 'MainThread':
         name += '|' + threading.current_thread().name
     conn = self._Client(self._token.address, authkey=self._authkey)
     dispatch(conn, None, 'accept_connection', (name,))
     self._tls.connection = conn
예제 #14
0
 def _connect(self):
     util.debug('making connection to manager')
     name = current_process().name
     if threading.current_thread().name != 'MainThread':
         name += '|' + threading.current_thread().name
     conn = self._Client(self._token.address, authkey=self._authkey)
     dispatch(conn, None, 'accept_connection', (name,))
     self._tls.connection = conn
예제 #15
0
    def __init__(self, kind, value, maxvalue):
        sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue)
        debug('created semlock with handle %s' % sl.handle)
        self._make_methods()

        if sys.platform != 'win32':
            def _after_fork(obj):
                obj._semlock._after_fork()
            register_after_fork(self, _after_fork)
예제 #16
0
    def __init__(self, kind, value, maxvalue):
        sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue)
        debug('created semlock with handle %s' % sl.handle)
        self._make_methods()

        if sys.platform != 'win32':
            def _after_fork(obj):
                obj._semlock._after_fork()
            register_after_fork(self, _after_fork)
예제 #17
0
    def _feed(buffer, notempty, send, writelock, close, ignore_epipe):
        debug('starting thread to feed data to pipe')
        from .util import is_exiting

        nacquire = notempty.acquire
        nrelease = notempty.release
        nwait = notempty.wait
        bpopleft = buffer.popleft
        sentinel = _sentinel
        if sys.platform != 'win32':
            wacquire = writelock.acquire
            wrelease = writelock.release
        else:
            wacquire = None

        try:
            while 1:
                nacquire()
                try:
                    if not buffer:
                        nwait()
                finally:
                    nrelease()
                try:
                    while 1:
                        obj = bpopleft()
                        if obj is sentinel:
                            debug('feeder thread got sentinel -- exiting')
                            close()
                            return

                        if wacquire is None:
                            send(obj)
                        else:
                            wacquire()
                            try:
                                send(obj)
                            finally:
                                wrelease()
                except IndexError:
                    pass
        except Exception as e:
            if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
                return
            # Since this runs in a daemon thread the resources it uses
            # may be become unusable while the process is cleaning up.
            # We ignore errors which happen after the process has
            # started to cleanup.
            try:
                if is_exiting():
                    info('error in queue thread: %s', e)
                else:
                    import traceback
                    traceback.print_exc()
            except Exception:
                pass
예제 #18
0
    def _feed(buffer, notempty, send, writelock, close, ignore_epipe):
        debug('starting thread to feed data to pipe')
        from .util import is_exiting

        nacquire = notempty.acquire
        nrelease = notempty.release
        nwait = notempty.wait
        bpopleft = buffer.popleft
        sentinel = _sentinel
        if sys.platform != 'win32':
            wacquire = writelock.acquire
            wrelease = writelock.release
        else:
            wacquire = None

        try:
            while 1:
                nacquire()
                try:
                    if not buffer:
                        nwait()
                finally:
                    nrelease()
                try:
                    while 1:
                        obj = bpopleft()
                        if obj is sentinel:
                            debug('feeder thread got sentinel -- exiting')
                            close()
                            return

                        if wacquire is None:
                            send(obj)
                        else:
                            wacquire()
                            try:
                                send(obj)
                            finally:
                                wrelease()
                except IndexError:
                    pass
        except Exception as e:
            if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
                return
            # Since this runs in a daemon thread the resources it uses
            # may be become unusable while the process is cleaning up.
            # We ignore errors which happen after the process has
            # started to cleanup.
            try:
                if is_exiting():
                    info('error in queue thread: %s', e)
                else:
                    import traceback
                    traceback.print_exc()
            except Exception:
                pass
예제 #19
0
 def _start(self):
     from .connection import Listener
     assert self._listener is None
     debug('starting listener and thread for sending handles')
     self._listener = Listener(authkey=current_process().authkey)
     self._address = self._listener.address
     t = threading.Thread(target=self._serve)
     t.daemon = True
     t.start()
     self._thread = t
예제 #20
0
 def _start(self):
     from .connection import Listener
     assert self._listener is None
     debug('starting listener and thread for sending handles')
     self._listener = Listener(authkey=current_process().authkey)
     self._address = self._listener.address
     t = threading.Thread(target=self._serve)
     t.daemon = True
     t.start()
     self._thread = t
예제 #21
0
 def temp(self, *args, **kwds):
     util.debug('requesting creation of a shared %r object', typeid)
     token, exp = self._create(typeid, *args, **kwds)
     proxy = proxytype(
         token, self._serializer, manager=self,
         authkey=self._authkey, exposed=exp
         )
     conn = self._Client(token.address, authkey=self._authkey)
     dispatch(conn, None, 'decref', (token.id,))
     return proxy
예제 #22
0
 def decref(self, c, ident):
     self.mutex.acquire()
     try:
         assert self.id_to_refcount[ident] >= 1
         self.id_to_refcount[ident] -= 1
         if self.id_to_refcount[ident] == 0:
             del self.id_to_obj[ident], self.id_to_refcount[ident]
             util.debug('disposing of obj with id %r', ident)
     finally:
         self.mutex.release()
예제 #23
0
 def decref(self, c, ident):
     self.mutex.acquire()
     try:
         assert self.id_to_refcount[ident] >= 1
         self.id_to_refcount[ident] -= 1
         if self.id_to_refcount[ident] == 0:
             del self.id_to_obj[ident], self.id_to_refcount[ident]
             util.debug('disposing of obj with id %r', ident)
     finally:
         self.mutex.release()
예제 #24
0
 def temp(self, *args, **kwds):
     util.debug('requesting creation of a shared %r object', typeid)
     token, exp = self._create(typeid, *args, **kwds)
     proxy = proxytype(
         token, self._serializer, manager=self,
         authkey=self._authkey, exposed=exp
         )
     conn = self._Client(token.address, authkey=self._authkey)
     dispatch(conn, None, 'decref', (token.id,))
     return proxy
예제 #25
0
    def _handle_workers(pool):
        thread = threading.current_thread()

        # Keep maintaining workers until the cache gets drained, unless the pool
        # is terminated.
        while thread._state == RUN or (pool._cache and thread._state != TERMINATE):
            pool._maintain_pool()
            time.sleep(0.1)
        # send sentinel to stop workers
        pool._taskqueue.put(None)
        debug('worker handler exiting')
예제 #26
0
    def _handle_workers(pool):
        thread = threading.current_thread()

        # Keep maintaining workers until the cache gets drained, unless the pool
        # is terminated.
        while thread._state == RUN or (pool._cache and thread._state != TERMINATE):
            pool._maintain_pool()
            time.sleep(0.1)
        # send sentinel to stop workers
        pool._taskqueue.put(None)
        debug('worker handler exiting')
예제 #27
0
 def shutdown(self, c):
     '''
     Shutdown this process
     '''
     try:
         util.debug('manager received shutdown message')
         c.send(('#RETURN', None))
     except:
         import traceback
         traceback.print_exc()
     finally:
         self.stop_event.set()
예제 #28
0
 def _after_fork(self):
     debug('Queue._after_fork()')
     self._notempty = threading.Condition(threading.Lock())
     self._buffer = collections.deque()
     self._thread = None
     self._jointhread = None
     self._joincancelled = False
     self._closed = False
     self._close = None
     self._send = self._writer.send
     self._recv = self._reader.recv
     self._poll = self._reader.poll
예제 #29
0
    def _decref(token, authkey, state, tls, idset, _Client):
        idset.discard(token.id)

        # check whether manager is still alive
        if state is None or state.value == State.STARTED:
            # tell manager this process no longer cares about referent
            try:
                util.debug('DECREF %r', token.id)
                conn = _Client(token.address, authkey=authkey)
                dispatch(conn, None, 'decref', (token.id,))
            except Exception, e:
                util.debug('... decref failed %s', e)
예제 #30
0
 def _after_fork(self):
     debug('Queue._after_fork()')
     self._notempty = threading.Condition(threading.Lock())
     self._buffer = collections.deque()
     self._thread = None
     self._jointhread = None
     self._joincancelled = False
     self._closed = False
     self._close = None
     self._send = self._writer.send
     self._recv = self._reader.recv
     self._poll = self._reader.poll
예제 #31
0
 def shutdown(self, c):
     '''
     Shutdown this process
     '''
     try:
         util.debug('manager received shutdown message')
         c.send(('#RETURN', None))
     except:
         import traceback
         traceback.print_exc()
     finally:
         self.stop_event.set()
예제 #32
0
    def _decref(token, authkey, state, tls, idset, _Client):
        idset.discard(token.id)

        # check whether manager is still alive
        if state is None or state.value == State.STARTED:
            # tell manager this process no longer cares about referent
            try:
                util.debug('DECREF %r', token.id)
                conn = _Client(token.address, authkey=authkey)
                dispatch(conn, None, 'decref', (token.id,))
            except Exception, e:
                util.debug('... decref failed %s', e)
예제 #33
0
 def _join_exited_workers(self):
     """Cleanup after any worker processes which have exited due to reaching
     their specified lifetime.  Returns True if any workers were cleaned up.
     """
     cleaned = False
     for i in reversed(range(len(self._pool))):
         worker = self._pool[i]
         if worker.exitcode is not None:
             # worker exited
             debug('cleaning up worker %d' % i)
             worker.join()
             cleaned = True
             del self._pool[i]
     return cleaned
예제 #34
0
    def shutdown(self, c):
        '''
        Shutdown this process
        '''
        try:
            try:
                util.debug('manager received shutdown message')
                c.send(('#RETURN', None))

                if sys.stdout != sys.__stdout__:
                    util.debug('resetting stdout, stderr')
                    sys.stdout = sys.__stdout__
                    sys.stderr = sys.__stderr__

                util._run_finalizers(0)

                for p in active_children():
                    util.debug('terminating a child process of manager')
                    p.terminate()

                for p in active_children():
                    util.debug('terminating a child process of manager')
                    p.join()

                util._run_finalizers()
                util.info('manager exiting with exitcode 0')
            except:
                import traceback
                traceback.print_exc()
        finally:
            exit(0)
예제 #35
0
 def _repopulate_pool(self):
     """Bring the number of pool processes up to the specified number,
     for use after reaping workers which have exited.
     """
     for i in range(self._processes - len(self._pool)):
         w = self.Process(target=worker,
                          args=(self._inqueue, self._outqueue,
                                self._initializer, self._initargs,
                                self._maxtasksperchild))
         self._pool.append(w)
         w.name = w.name.replace('Process', 'PoolWorker')
         w.daemon = True
         w.start()
         debug('added worker')
예제 #36
0
    def shutdown(self, c):
        '''
        Shutdown this process
        '''
        try:
            try:
                util.debug('manager received shutdown message')
                c.send(('#RETURN', None))

                if sys.stdout != sys.__stdout__:
                    util.debug('resetting stdout, stderr')
                    sys.stdout = sys.__stdout__
                    sys.stderr = sys.__stderr__

                util._run_finalizers(0)

                for p in active_children():
                    util.debug('terminating a child process of manager')
                    p.terminate()

                for p in active_children():
                    util.debug('terminating a child process of manager')
                    p.join()

                util._run_finalizers()
                util.info('manager exiting with exitcode 0')
            except:
                import traceback
                traceback.print_exc()
        finally:
            exit(0)
예제 #37
0
 def _join_exited_workers(self):
     """Cleanup after any worker processes which have exited due to reaching
     their specified lifetime.  Returns True if any workers were cleaned up.
     """
     cleaned = False
     for i in reversed(range(len(self._pool))):
         worker = self._pool[i]
         if worker.exitcode is not None:
             # worker exited
             debug('cleaning up worker %d' % i)
             worker.join()
             cleaned = True
             del self._pool[i]
     return cleaned
예제 #38
0
 def _repopulate_pool(self):
     """Bring the number of pool processes up to the specified number,
     for use after reaping workers which have exited.
     """
     for i in range(self._processes - len(self._pool)):
         w = self.Process(target=worker,
                          args=(self._inqueue, self._outqueue,
                                self._initializer,
                                self._initargs, self._maxtasksperchild)
                         )
         self._pool.append(w)
         w.name = w.name.replace('Process', 'PoolWorker')
         w.daemon = True
         w.start()
         debug('added worker')
예제 #39
0
    def _incref(self):
        conn = self._Client(self._token.address, authkey=self._authkey)
        dispatch(conn, None, 'incref', (self._id,))
        util.debug('INCREF %r', self._token.id)

        self._idset.add(self._id)

        state = self._manager and self._manager._state

        self._close = util.Finalize(
            self, BaseProxy._decref,
            args=(self._token, self._authkey, state,
                  self._tls, self._idset, self._Client),
            exitpriority=10
            )
예제 #40
0
    def _incref(self):
        conn = self._Client(self._token.address, authkey=self._authkey)
        dispatch(conn, None, 'incref', (self._id,))
        util.debug('INCREF %r', self._token.id)

        self._idset.add(self._id)

        state = self._manager and self._manager._state

        self._close = util.Finalize(
            self, BaseProxy._decref,
            args=(self._token, self._authkey, state,
                  self._tls, self._idset, self._Client),
            exitpriority=10
            )
예제 #41
0
def _get_listener():
    global _listener

    if _listener is None:
        _lock.acquire()
        try:
            if _listener is None:
                debug('starting listener and thread for sending handles')
                _listener = Listener(authkey=current_process().authkey)
                t = threading.Thread(target=_serve)
                t.daemon = True
                t.start()
        finally:
            _lock.release()

    return _listener
예제 #42
0
def _get_listener():
    global _listener

    if _listener is None:
        _lock.acquire()
        try:
            if _listener is None:
                debug('starting listener and thread for sending handles')
                _listener = Listener(authkey=current_process().authkey)
                t = threading.Thread(target=_serve)
                t.daemon = True
                t.start()
        finally:
            _lock.release()

    return _listener
예제 #43
0
def SocketClient(address):
    '''
    Return a connection object connected to the socket given by `address`
    '''
    family = address_type(address)
    s = socket.socket(getattr(socket, family))
    t = _init_timeout()

    while 1:
        try:
            s.connect(address)
        except socket.error, e:
            if e.args[0] != errno.ECONNREFUSED or _check_timeout(t):
                debug('failed to connect to address %s', address)
                raise
            time.sleep(0.01)
        else:
            break
예제 #44
0
def SocketClient(address):
    '''
    Return a connection object connected to the socket given by `address`
    '''
    family = address_type(address)
    s = socket.socket( getattr(socket, family) )
    t = _init_timeout()

    while 1:
        try:
            s.connect(address)
        except socket.error, e:
            if e.args[0] != errno.ECONNREFUSED or _check_timeout(t):
                debug('failed to connect to address %s', address)
                raise
            time.sleep(0.01)
        else:
            break
예제 #45
0
 def serve_forever(self):
     '''
     Run the server forever
     '''
     self.stop_event = threading.Event()
     current_process()._manager_server = self
     try:
         accepter = threading.Thread(target=self.accepter)
         accepter.daemon = True
         accepter.start()
         try:
             while not self.stop_event.is_set():
                 self.stop_event.wait(1)
         except (KeyboardInterrupt, SystemExit):
             pass
     finally:
         if sys.stdout != sys.__stdout__:
             util.debug('resetting stdout, stderr')
             sys.stdout = sys.__stdout__
             sys.stderr = sys.__stderr__
         sys.exit(0)
예제 #46
0
 def serve_forever(self):
     '''
     Run the server forever
     '''
     self.stop_event = threading.Event()
     current_process()._manager_server = self
     try:
         accepter = threading.Thread(target=self.accepter)
         accepter.daemon = True
         accepter.start()
         try:
             while not self.stop_event.is_set():
                 self.stop_event.wait(1)
         except (KeyboardInterrupt, SystemExit):
             pass
     finally:
         if sys.stdout != sys.__stdout__:
             util.debug('resetting stdout, stderr')
             sys.stdout = sys.__stdout__
             sys.stderr = sys.__stderr__
         sys.exit(0)
예제 #47
0
    def _start_thread(self):
        debug('Queue._start_thread()')

        # Start thread which transfers data from buffer to pipe
        self._buffer.clear()
        self._thread = threading.Thread(
            target=Queue._feed,
            args=(self._buffer, self._notempty, self._send, self._wlock,
                  self._writer.close, self._ignore_epipe),
            name='QueueFeederThread')
        self._thread.daemon = True

        debug('doing self._thread.start()')
        self._thread.start()
        debug('... done self._thread.start()')

        # On process exit we will wait for data to be flushed to pipe.
        #
        # However, if this process created the queue then all
        # processes which use the queue will be descendants of this
        # process.  Therefore waiting for the queue to be flushed
        # is pointless once all the child processes have been joined.
        created_by_this_process = (self._opid == os.getpid())
        if not self._joincancelled and not created_by_this_process:
            self._jointhread = Finalize(self._thread,
                                        Queue._finalize_join,
                                        [weakref.ref(self._thread)],
                                        exitpriority=-5)

        # Send sentinel to the thread queue object when garbage collected
        self._close = Finalize(self,
                               Queue._finalize_close,
                               [self._buffer, self._notempty],
                               exitpriority=10)
예제 #48
0
    def serve_client(self, conn):
        '''
        Handle requests from the proxies in a particular process/thread
        '''
        util.debug('starting server thread to service %r',
                   threading.current_thread().name)

        recv = conn.recv
        send = conn.send
        id_to_obj = self.id_to_obj

        while not self.stop:

            try:
                methodname = obj = None
                request = recv()
                ident, methodname, args, kwds = request
                obj, exposed, gettypeid = id_to_obj[ident]

                if methodname not in exposed:
                    raise AttributeError(
                        'method %r of %r object is not in exposed=%r' %
                        (methodname, type(obj), exposed)
                        )

                function = getattr(obj, methodname)

                try:
                    res = function(*args, **kwds)
                except Exception, e:
                    msg = ('#ERROR', e)
                else:
                    typeid = gettypeid and gettypeid.get(methodname, None)
                    if typeid:
                        rident, rexposed = self.create(conn, typeid, res)
                        token = Token(typeid, self.address, rident)
                        msg = ('#PROXY', (rexposed, token))
                    else:
                        msg = ('#RETURN', res)
예제 #49
0
    def serve_client(self, conn):
        '''
        Handle requests from the proxies in a particular process/thread
        '''
        util.debug('starting server thread to service %r',
                   threading.current_thread().name)

        recv = conn.recv
        send = conn.send
        id_to_obj = self.id_to_obj

        while not self.stop:

            try:
                methodname = obj = None
                request = recv()
                ident, methodname, args, kwds = request
                obj, exposed, gettypeid = id_to_obj[ident]

                if methodname not in exposed:
                    raise AttributeError(
                        'method %r of %r object is not in exposed=%r' %
                        (methodname, type(obj), exposed)
                        )

                function = getattr(obj, methodname)

                try:
                    res = function(*args, **kwds)
                except Exception, e:
                    msg = ('#ERROR', e)
                else:
                    typeid = gettypeid and gettypeid.get(methodname, None)
                    if typeid:
                        rident, rexposed = self.create(conn, typeid, res)
                        token = Token(typeid, self.address, rident)
                        msg = ('#PROXY', (rexposed, token))
                    else:
                        msg = ('#RETURN', res)
예제 #50
0
 def _finalize_join(twr):
     debug('joining queue thread')
     thread = twr()
     if thread is not None:
         thread.join()
         debug('... queue thread joined')
     else:
         debug('... queue thread already dead')
예제 #51
0
 def _finalize_join(twr):
     debug('joining queue thread')
     thread = twr()
     if thread is not None:
         thread.join()
         debug('... queue thread joined')
     else:
         debug('... queue thread already dead')
예제 #52
0
    def create(self, c, typeid, *args, **kwds):
        '''
        Create a new shared object and return its id
        '''
        self.mutex.acquire()
        try:
            callable, exposed, method_to_typeid, proxytype = \
                      self.registry[typeid]

            if callable is None:
                assert len(args) == 1 and not kwds
                obj = args[0]
            else:
                obj = callable(*args, **kwds)

            if exposed is None:
                exposed = public_methods(obj)
            if method_to_typeid is not None:
                assert type(method_to_typeid) is dict
                exposed = list(exposed) + list(method_to_typeid)

            ident = '%x' % id(obj)  # convert to string because xmlrpclib
                                    # only has 32 bit signed integers
            util.debug('%r callable returned object with id %r', typeid, ident)

            self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
            if ident not in self.id_to_refcount:
                self.id_to_refcount[ident] = 0
            # increment the reference count immediately, to avoid
            # this object being garbage collected before a Proxy
            # object for it can be created.  The caller of create()
            # is responsible for doing a decref once the Proxy object
            # has been created.
            self.incref(c, ident)
            return ident, tuple(exposed)
        finally:
            self.mutex.release()
예제 #53
0
    def create(self, c, typeid, *args, **kwds):
        '''
        Create a new shared object and return its id
        '''
        self.mutex.acquire()
        try:
            callable, exposed, method_to_typeid, proxytype = \
                      self.registry[typeid]

            if callable is None:
                assert len(args) == 1 and not kwds
                obj = args[0]
            else:
                obj = callable(*args, **kwds)

            if exposed is None:
                exposed = public_methods(obj)
            if method_to_typeid is not None:
                assert type(method_to_typeid) is dict
                exposed = list(exposed) + list(method_to_typeid)

            ident = '%x' % id(obj)  # convert to string because xmlrpclib
                                    # only has 32 bit signed integers
            util.debug('%r callable returned object with id %r', typeid, ident)

            self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
            if ident not in self.id_to_refcount:
                self.id_to_refcount[ident] = 0
            # increment the reference count immediately, to avoid
            # this object being garbage collected before a Proxy
            # object for it can be created.  The caller of create()
            # is responsible for doing a decref once the Proxy object
            # has been created.
            self.incref(c, ident)
            return ident, tuple(exposed)
        finally:
            self.mutex.release()
예제 #54
0
    def _decref(token, authkey, state, tls, idset, _Client):
        idset.discard(token.id)

        # check whether manager is still alive
        if state is None or state.value == State.STARTED:
            # tell manager this process no longer cares about referent
            try:
                util.debug('DECREF %r', token.id)
                conn = _Client(token.address, authkey=authkey)
                dispatch(conn, None, 'decref', (token.id, ))
            except Exception as e:
                util.debug('... decref failed %s', e)

        else:
            util.debug('DECREF %r -- manager already shutdown', token.id)

        # check whether we can close this thread's connection because
        # the process owns no more references to objects for this manager
        if not idset and hasattr(tls, 'connection'):
            util.debug('thread %r has no more proxies so closing conn',
                       threading.current_thread().name)
            tls.connection.close()
            del tls.connection
예제 #55
0
    def _decref(token, authkey, state, tls, idset, _Client):
        idset.discard(token.id)

        # check whether manager is still alive
        if state is None or state.value == State.STARTED:
            # tell manager this process no longer cares about referent
            try:
                util.debug('DECREF %r', token.id)
                conn = _Client(token.address, authkey=authkey)
                dispatch(conn, None, 'decref', (token.id,))
            except Exception as e:
                util.debug('... decref failed %s', e)

        else:
            util.debug('DECREF %r -- manager already shutdown', token.id)

        # check whether we can close this thread's connection because
        # the process owns no more references to objects for this manager
        if not idset and hasattr(tls, 'connection'):
            util.debug('thread %r has no more proxies so closing conn',
                       threading.current_thread().name)
            tls.connection.close()
            del tls.connection
예제 #56
0
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
    assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
    put = outqueue.put
    get = inqueue.get
    if hasattr(inqueue, '_writer'):
        inqueue._writer.close()
        outqueue._reader.close()

    if initializer is not None:
        initializer(*initargs)

    completed = 0
    while maxtasks is None or (maxtasks and completed < maxtasks):
        try:
            task = get()
        except (EOFError, IOError):
            debug('worker got EOFError or IOError -- exiting')
            break

        if task is None:
            debug('worker got sentinel -- exiting')
            break

        job, i, func, args, kwds = task
        try:
            result = (True, func(*args, **kwds))
        except Exception as e:
            result = (False, e)
        try:
            put((job, i, result))
        except Exception as e:
            wrapped = MaybeEncodingError(e, result[1])
            debug("Possible encoding error while sending result: %s" %
                  (wrapped))
            put((job, i, (False, wrapped)))
        completed += 1
    debug('worker exiting after %d tasks' % completed)
예제 #57
0
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
    assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
    put = outqueue.put
    get = inqueue.get
    if hasattr(inqueue, '_writer'):
        inqueue._writer.close()
        outqueue._reader.close()

    if initializer is not None:
        initializer(*initargs)

    completed = 0
    while maxtasks is None or (maxtasks and completed < maxtasks):
        try:
            task = get()
        except (EOFError, IOError):
            debug('worker got EOFError or IOError -- exiting')
            break

        if task is None:
            debug('worker got sentinel -- exiting')
            break

        job, i, func, args, kwds = task
        try:
            result = (True, func(*args, **kwds))
        except Exception as e:
            result = (False, e)
        try:
            put((job, i, result))
        except Exception as e:
            wrapped = MaybeEncodingError(e, result[1])
            debug("Possible encoding error while sending result: %s" % (
                wrapped))
            put((job, i, (False, wrapped)))
        completed += 1
    debug('worker exiting after %d tasks' % completed)
예제 #58
0
    def _start_thread(self):
        debug('Queue._start_thread()')

        # Start thread which transfers data from buffer to pipe
        self._buffer.clear()
        self._thread = threading.Thread(
            target=Queue._feed,
            args=(self._buffer, self._notempty, self._send,
                  self._wlock, self._writer.close),
            name='QueueFeederThread'
            )
        self._thread.daemon = True

        debug('doing self._thread.start()')
        self._thread.start()
        debug('... done self._thread.start()')

        # On process exit we will wait for data to be flushed to pipe.
        #
        # However, if this process created the queue then all
        # processes which use the queue will be descendants of this
        # process.  Therefore waiting for the queue to be flushed
        # is pointless once all the child processes have been joined.
        created_by_this_process = (self._opid == os.getpid())
        if not self._joincancelled and not created_by_this_process:
            self._jointhread = Finalize(
                self._thread, Queue._finalize_join,
                [weakref.ref(self._thread)],
                exitpriority=-5
                )

        # Send sentinel to the thread queue object when garbage collected
        self._close = Finalize(
            self, Queue._finalize_close,
            [self._buffer, self._notempty],
            exitpriority=10
            )