Beispiel #1
0
class TestQueueTimeouts(object):
    def setup(self):
        self.result = Event()
        self.queue = Queue()
        self.timeouts = 0
        diesel.fork(self.consumer, 0.01)
        diesel.fork(self.producer, 0.05)
        diesel.fork(self.consumer, 0.10)
        ev, val = diesel.first(sleep=TIMEOUT, waits=[self.result])
        if ev == 'sleep':
            assert 0, 'timed out'

    def consumer(self, timeout):
        try:
            self.queue.get(timeout=timeout)
            self.result.set()
        except QueueTimeout:
            self.timeouts += 1

    def producer(self, delay):
        diesel.sleep(delay)
        self.queue.put('test')

    def test_a_consumer_timed_out(self):
        assert self.timeouts == 1

    def test_a_consumer_got_a_value(self):
        assert self.result.is_set
Beispiel #2
0
    def websocket_protocol(self, req):
        """Runs the WebSocket protocol after the handshake is complete.

        Creates two `Queue` instances for incoming and outgoing messages and
        passes them to the `web_socket_handler` that was supplied to the
        `WebSocketServer` constructor.

        """
        inq = Queue()
        outq = Queue()

        def wrap(req, inq, outq):
            self.web_socket_handler(req, inq, outq)
            outq.put(WebSocketDisconnect())

        handler_loop = fork(wrap, req, inq, outq)

        if req.rfc_handshake:
            handle_frames = self.handle_rfc_6455_frames
        else:
            handle_frames = self.handle_non_rfc_frames

        try:
            handle_frames(inq, outq)
        except ConnectionClosed:
            if handler_loop.running:
                inq.put(WebSocketDisconnect())
            raise
Beispiel #3
0
    def __call__(self):
        self.q = Queue()
        self.trigger = Event()
        self.finished = Event()
        self.waiting = 0
        self.running = 0
        try:
            while True:
                for x in xrange(self.concurrency - self.running):
                    self.running += 1
                    fork(self.handler_wrap)

                if self.waiting == 0:
                    self.trigger.wait()
                    self.trigger.clear()

                try:
                    n = self.generator()
                except StopIteration:
                    break

                self.q.put(n)
                sleep()
        finally:
            for x in xrange(self.concurrency):
                self.q.put(ThreadPoolDie)
            if self.finalizer:
                self.finished.wait()
                fork(self.finalizer)
Beispiel #4
0
    def __init__(self, uri, logger=None, log_level=None):
        self.uri = uri
        self.zmq_socket = None
        self.log = logger or None
        self.selected_log_level = log_level
        self.clients = {}
        self.outgoing = Queue()
        self.incoming = Queue()
        self.name = self.name or self.__class__.__name__
        self._incoming_loop = None

        # Allow for custom `should_run` properties in subclasses.
        try:
            self.should_run = True
        except AttributeError:
            # A custom `should_run` property exists.
            pass

        if self.log and self.selected_log_level is not None:
            self.selected_log_level = None
            warnings.warn(
                "ignored `log_level` argument since `logger` was provided.",
                RuntimeWarning,
                stacklevel=2,
            )
Beispiel #5
0
class ChatClient(Client):
    def __init__(self, *args, **kw):
        Client.__init__(self, *args, **kw)
        self.input = Queue()

    def read_chat_message(self, prompt):
        msg = raw_input(prompt)
        return msg

    def input_handler(self):
        nick = thread(self.read_chat_message, "nick: ").strip()
        self.nick = nick
        self.input.put(nick)
        while True:
            msg = thread(self.read_chat_message, "").strip()
            self.input.put(msg)

    @call
    def chat(self):
        fork(self.input_handler)
        nick = self.input.get()
        send("%s\r\n" % nick)
        while True:
            evt, data = first(until_eol=True, waits=[self.input])
            if evt == "until_eol":
                print data.strip()
            else:
                send("%s\r\n" % data)
Beispiel #6
0
class ChatClient(Client):
    def __init__(self, *args, **kw):
        Client.__init__(self, *args, **kw)
        self.input = Queue()

    def read_chat_message(self, prompt):
        msg = raw_input(prompt)
        return msg

    def input_handler(self):
        nick = thread(self.read_chat_message, "nick: ").strip()
        self.nick = nick
        self.input.put(nick)
        while True:
            msg = thread(self.read_chat_message, "").strip()
            self.input.put(msg)

    @call
    def chat(self):
        fork(self.input_handler)
        nick = self.input.get()
        send("%s\r\n" % nick)
        while True:
            evt, data = first(until_eol=True, waits=[self.input])
            if evt == "until_eol":
                print data.strip()
            else:
                send("%s\r\n" % data)
Beispiel #7
0
class TestQueueTimeouts(object):
    def setup(self):
        self.result = Event()
        self.queue = Queue()
        self.timeouts = 0
        diesel.fork(self.consumer, 0.01)
        diesel.fork(self.producer, 0.05)
        diesel.fork(self.consumer, 0.10)
        ev, val = diesel.first(sleep=TIMEOUT, waits=[self.result])
        if ev == 'sleep':
            assert 0, 'timed out'

    def consumer(self, timeout):
        try:
            self.queue.get(timeout=timeout)
            self.result.set()
        except QueueTimeout:
            self.timeouts += 1

    def producer(self, delay):
        diesel.sleep(delay)
        self.queue.put('test')

    def test_a_consumer_timed_out(self):
        assert self.timeouts == 1

    def test_a_consumer_got_a_value(self):
        assert self.result.is_set
Beispiel #8
0
    def __init__(self, identity, zmq_return):

        # The identity is some information sent along with packets from the
        # remote client that uniquely identifies it.

        self.identity = identity

        # The zmq_return is from the envelope and tells the ZeroMQ ROUTER
        # socket where to route outbound packets.

        self.zmq_return = zmq_return

        # The incoming queue is typically populated by the DieselZMQService
        # and represents a queue of messages send from the remote client.

        self.incoming = Queue()

        # The outgoing queue is where return values from the
        # DieselZMQService.handle_client_packet method are placed. Those values
        # are sent on to the remote client.
        #
        # Other diesel threads can stick values directly into outgoing queue
        # and the service will send them on as well. This allows for
        # asynchronous sending of messages to remote clients. That's why it's
        # called 'async' in the context.

        self.outgoing = Queue()

        # The context in general is a place where you can put data that is
        # related specifically to the remote client and it will exist as long
        # the remote client doesn't timeout.

        self.context = {'async': self.outgoing}
Beispiel #9
0
    def __init__(self, uri, logger=None, log_level=None):
        self.uri = uri
        self.zmq_socket = None
        self.log = logger or None
        self.selected_log_level = log_level
        self.clients = {}
        self.outgoing = Queue()
        self.incoming = Queue()
        self.name = self.name or self.__class__.__name__
        self._incoming_loop = None

        # Allow for custom `should_run` properties in subclasses.
        try:
            self.should_run = True
        except AttributeError:
            # A custom `should_run` property exists.
            pass

        if self.log and self.selected_log_level is not None:
            self.selected_log_level = None
            warnings.warn(
                "ignored `log_level` argument since `logger` was provided.",
                RuntimeWarning,
                stacklevel=2,
            )
Beispiel #10
0
 def setup(self):
     self.queue = Queue()
     self.done = Countdown(N)
     self.results = []
     self.handled = defaultdict(int)
     self.populate()
     self.consume()
     self.trigger()
Beispiel #11
0
 def __call__(self):
     for h, p in self.hosts:
         pq = Queue()
         sq = Queue()
         gq = Queue()
         fork(manage_peer_connection, h, p, pq, sq, gq)
         self.proposal_qs.append(pq)
         self.save_qs.append(sq)
         self.get_qs.append(gq)
Beispiel #12
0
 def setup(self):
     self.result = Event()
     self.queue = Queue()
     self.timeouts = 0
     diesel.fork(self.consumer, 0.01)
     diesel.fork(self.producer, 0.05)
     diesel.fork(self.consumer, 0.10)
     ev, val = diesel.first(sleep=TIMEOUT, waits=[self.result])
     if ev == 'sleep':
         assert 0, 'timed out'
Beispiel #13
0
class ThreadPool(object):
    def __init__(self, concurrency, handler, generator, finalizer=None):
        self.concurrency = concurrency
        self.handler = handler
        self.generator = generator
        self.finalizer = finalizer

    def handler_wrap(self):
        try:
            label("thread-pool-%s" % self.handler)
            while True:
                self.waiting += 1
                if self.waiting == 1:
                    self.trigger.set()
                i = self.q.get()
                self.waiting -= 1
                if i == ThreadPoolDie:
                    return
                self.handler(i)
        finally:
            self.running -= 1
            if self.waiting == 0:
                self.trigger.set()
            if self.running == 0:
                self.finished.set()

    def __call__(self):
        self.q = Queue()
        self.trigger = Event()
        self.finished = Event()
        self.waiting = 0
        self.running = 0
        try:
            while True:
                for x in xrange(self.concurrency - self.running):
                    self.running += 1
                    fork(self.handler_wrap)

                if self.waiting == 0:
                    self.trigger.wait()
                    self.trigger.clear()

                try:
                    n = self.generator()
                except StopIteration:
                    break

                self.q.put(n)
                sleep()
        finally:
            for x in xrange(self.concurrency):
                self.q.put(ThreadPoolDie)
            if self.finalizer:
                self.finished.wait()
                fork(self.finalizer)
Beispiel #14
0
class ThreadPool(object):
    def __init__(self, concurrency, handler, generator, finalizer=None):
        self.concurrency = concurrency
        self.handler = handler
        self.generator = generator
        self.finalizer = finalizer

    def handler_wrap(self):
        try:
            label("thread-pool-%s" % self.handler)
            while True:
                self.waiting += 1
                if self.waiting == 1:
                    self.trigger.set()
                i = self.q.get()
                self.waiting -= 1
                if i == ThreadPoolDie:
                    return
                self.handler(i)
        finally:
            self.running -=1
            if self.waiting == 0:
                self.trigger.set()
            if self.running == 0:
                self.finished.set()

    def __call__(self):
        self.q = Queue()
        self.trigger = Event()
        self.finished = Event()
        self.waiting = 0
        self.running = 0
        try:
            while True:
                for x in xrange(self.concurrency - self.running):
                    self.running += 1
                    fork(self.handler_wrap)

                if self.waiting == 0:
                    self.trigger.wait()
                    self.trigger.clear()

                try:
                    n = self.generator()
                except StopIteration:
                    break

                self.q.put(n)
                sleep()
        finally:
            for x in xrange(self.concurrency):
                self.q.put(ThreadPoolDie)
            if self.finalizer:
                self.finished.wait()
                fork(self.finalizer)
Beispiel #15
0
    def websocket_protocol(self, req):
        """Runs the WebSocket protocol after the handshake is complete.

        Creates two `Queue` instances for incoming and outgoing messages and
        passes them to the `web_socket_handler` that was supplied to the
        `WebSocketServer` constructor.

        """
        inq = Queue()
        outq = Queue()

        if req.rfc_handshake:
            handle_frames = self.handle_rfc_6455_frames
        else:
            # Finish the non-RFC handshake
            key1 = req.headers.get('Sec-WebSocket-Key1')
            key2 = req.headers.get('Sec-WebSocket-Key2')

            # The final key can be in two places. The first is in the
            # `Request.data` attribute if diesel is *not* being proxied
            # to by a smart proxy that parsed HTTP requests. If it is being
            # proxied to, that data will not have been sent until after our
            # initial 101 Switching Protocols response, so we will need to
            # receive it here.

            if req.data:
                key3 = req.data
            else:
                evt, key3 = first(receive=8, sleep=5)
                assert evt == "receive", "timed out while finishing handshake"

            num1 = int(''.join(c for c in key1 if c in '0123456789'))
            num2 = int(''.join(c for c in key2 if c in '0123456789'))
            assert num1 % key1.count(' ') == 0
            assert num2 % key2.count(' ') == 0
            final = pack('!II8s', num1 / key1.count(' '),
                         num2 / key2.count(' '), key3)
            handshake_finish = hashlib.md5(final).digest()
            send(handshake_finish)

            handle_frames = self.handle_non_rfc_frames

        def wrap(req, inq, outq):
            self.web_socket_handler(req, inq, outq)
            outq.put(WebSocketDisconnect())

        handler_loop = fork(wrap, req, inq, outq)

        try:
            handle_frames(inq, outq)
        except ConnectionClosed:
            if handler_loop.running:
                inq.put(WebSocketDisconnect())
            raise
Beispiel #16
0
    def __init__(self, concurrency, handler):
        """Creates a new ProcessPool with subprocesses that run the handler.

        Args:
            concurrency (int): The number of subprocesses to spawn.
            handler (callable): A callable that the subprocesses will execute.

        """
        self.concurrency = concurrency
        self.handler = handler
        self.available_procs = Queue()
        self.all_procs = []
Beispiel #17
0
    def websocket_protocol(self, req):
        """Runs the WebSocket protocol after the handshake is complete.

        Creates two `Queue` instances for incoming and outgoing messages and
        passes them to the `web_socket_handler` that was supplied to the
        `WebSocketServer` constructor.

        """
        inq = Queue()
        outq = Queue()

        if req.rfc_handshake:
            handle_frames = self.handle_rfc_6455_frames
        else:
            # Finish the non-RFC handshake
            key1 = req.headers.get('Sec-WebSocket-Key1')
            key2 = req.headers.get('Sec-WebSocket-Key2')

            # The final key can be in two places. The first is in the
            # `Request.data` attribute if diesel is *not* being proxied
            # to by a smart proxy that parsed HTTP requests. If it is being
            # proxied to, that data will not have been sent until after our
            # initial 101 Switching Protocols response, so we will need to
            # receive it here.

            if req.data:
                key3 = req.data
            else:
                evt, key3 = first(receive=8, sleep=5)
                assert evt == "receive", "timed out while finishing handshake"

            num1 = int(''.join(c for c in key1 if c in '0123456789'))
            num2 = int(''.join(c for c in key2 if c in '0123456789'))
            assert num1 % key1.count(' ') == 0
            assert num2 % key2.count(' ') == 0
            final = pack('!II8s', num1 / key1.count(' '), num2 / key2.count(' '), key3)
            handshake_finish = hashlib.md5(final).digest()
            send(handshake_finish)

            handle_frames = self.handle_non_rfc_frames

        def wrap(req, inq, outq):
            self.web_socket_handler(req, inq, outq)
            outq.put(WebSocketDisconnect())

        handler_loop = fork(wrap, req, inq, outq)

        try:
            handle_frames(inq, outq)
        except ConnectionClosed:
            if handler_loop.running:
                inq.put(WebSocketDisconnect())
            raise
Beispiel #18
0
def test_pending_events_dont_break_ordering_when_handling_early_values():

    # This test confirms that "early values" returned from a Waiter do
    # not give other pending event sources the chance to switch their
    # values into the greenlet while it context switches to give other
    # greenlets a chance to run.

    # First we setup a fake connection. It mimics a connection that does
    # not have data waiting in the buffer, and has to wait for the system
    # to call it back when data is ready on the socket. The delay argument
    # specifies how long the test should wait before simulating that data
    # is ready.

    conn1 = FakeConnection(1, delay=[None, 0.1])

    # Next we setup a Queue instance and prime it with a value, so it will
    # be ready early and return an EarlyValue.

    q = Queue()
    q.put(1)

    # Force our fake connection into the connection stack for the current
    # loop so we can make network calls (like until_eol).

    loop = core.current_loop
    loop.connection_stack.append(conn1)

    try:

        # OK, this first() call does two things.
        # 1) It calls until_eol, finds that no data is ready, and sets up a
        #    callback to be triggered when data is ready (which our
        #    FakeConnection will simulate).
        # 2) Fetches from the 'q' which will result in an EarlyValue.

        source, value = diesel.first(until_eol=True, waits=[q])
        assert source == q, source

        # What must happen is that the callback registered to handle data
        # from the FakeConnection when it arrives MUST BE CANCELED/DISCARDED/
        # FORGOTTEN/NEVER CALLED. If it gets called, it will muck with
        # internal state, and possibly switch back into the running greenlet
        # with an unexpected value, which will throw off the ordering of
        # internal state and basically break everything.

        v = diesel.until_eol()
        assert v == 'expected value 1\r\n', 'actual value == %r !!!' % (v, )

    finally:
        loop.connection_stack = []
Beispiel #19
0
def test_pending_events_dont_break_ordering_when_handling_early_values():

    # This test confirms that "early values" returned from a Waiter do
    # not give other pending event sources the chance to switch their
    # values into the greenlet while it context switches to give other
    # greenlets a chance to run.

    # First we setup a fake connection. It mimics a connection that does
    # not have data waiting in the buffer, and has to wait for the system
    # to call it back when data is ready on the socket. The delay argument
    # specifies how long the test should wait before simulating that data
    # is ready.

    conn1 = FakeConnection(1, delay=[None, 0.1])

    # Next we setup a Queue instance and prime it with a value, so it will
    # be ready early and return an EarlyValue.

    q = Queue()
    q.put(1)

    # Force our fake connection into the connection stack for the current
    # loop so we can make network calls (like until_eol).

    loop = core.current_loop
    loop.connection_stack.append(conn1)

    try:

        # OK, this first() call does two things.
        # 1) It calls until_eol, finds that no data is ready, and sets up a
        #    callback to be triggered when data is ready (which our
        #    FakeConnection will simulate).
        # 2) Fetches from the 'q' which will result in an EarlyValue.

        source, value = diesel.first(until_eol=True, waits=[q])
        assert source == q, source

        # What must happen is that the callback registered to handle data
        # from the FakeConnection when it arrives MUST BE CANCELED/DISCARDED/
        # FORGOTTEN/NEVER CALLED. If it gets called, it will muck with
        # internal state, and possibly switch back into the running greenlet
        # with an unexpected value, which will throw off the ordering of
        # internal state and basically break everything.

        v = diesel.until_eol()
        assert v == 'expected value 1\r\n', 'actual value == %r !!!' % (v,)

    finally:
        loop.connection_stack = []
Beispiel #20
0
    def do_upgrade(self, req):
        if req.headers.get_one('Upgrade') != 'WebSocket':
            return self.web_handler(req)

        # do upgrade response
        org = req.headers.get_one('Origin')

        send(
'''HTTP/1.1 101 Web Socket Protocol Handshake\r
Upgrade: WebSocket\r
Connection: Upgrade\r
WebSocket-Origin: %s\r
WebSocket-Location: %s\r
WebSocket-Protocol: diesel-generic\r
\r
''' % (org, self.ws_location))
        
        inq = Queue()
        outq = Queue()

        def wrap(inq, outq):
            self.web_socket_handler(inq, outq)
            outq.put(WebSocketDisconnect())

        fork(wrap, inq, outq)
                                    
        while True:
            try:
                typ, val = first(receive=1, waits=[outq.wait_id])
                if typ == 'receive':
                    assert val == '\x00'
                    val = until('\xff')[:-1]
                    if val == '':
                        inq.put(WebSocketDisconnect())
                    else:
                        data = dict((k, v[0]) if len(v) == 1 else (k, v) for k, v in cgi.parse_qs(val).iteritems())
                        inq.put(WebSocketData(data))
                else:
                    try:
                        v = outq.get(waiting=False)
                    except QueueEmpty:
                        pass
                    else:
                        if type(v) is WebSocketDisconnect:
                            send('\x00\xff')
                            break
                        else:
                            data = dumps(dict(v))
                            send('\x00%s\xff' % data)

            except ConnectionClosed:
                inq.put(WebSocketDisconnect())
                raise ConnectionClosed("remote disconnected")
Beispiel #21
0
    def sub(self, classes):
        if type(classes) not in (set, list, tuple):
            classes = [classes]

        hb = self
        q = Queue()

        class Poller(object):
            def __init__(self):
                for cls in classes:
                    hb.sub_adds.append((cls, q))

                fire(hb.sub_wake_signal)

            def fetch(self, timeout=None):
                try:
                    qn, msg = q.get(timeout=timeout)
                except QueueTimeout:
                    return (None, None)
                else:
                    return (qn, msg)

            def close(self):
                for cls in classes:
                    hb.sub_rms.append((cls, q))

        pl = Poller()
        yield pl
        pl.close()
Beispiel #22
0
 def __init__(self, uri, logger=None, log_level=None):
     self.uri = uri
     self.zmq_socket = None
     self.log = logger or None
     self.selected_log_level = log_level
     self.clients = {}
     self.outgoing = Queue()
     self.incoming = Queue()
     self.name = self.name or self.__class__.__name__
     if self.log and self.selected_log_level is not None:
         self.selected_log_level = None
         warnings.warn(
             "ignored `log_level` argument since `logger` was provided.",
             RuntimeWarning,
             stacklevel=2,
         )
Beispiel #23
0
 def __init__(self, uri, logger=None, log_level=None):
     self.uri = uri
     self.zmq_socket = None
     self.log = logger or None
     self.selected_log_level = log_level
     self.clients = {}
     self.outgoing = Queue()
     self.incoming = Queue()
     self.name = self.name or self.__class__.__name__
     if self.log and self.selected_log_level is not None:
         self.selected_log_level = None
         warnings.warn(
             "ignored `log_level` argument since `logger` was provided.",
             RuntimeWarning,
             stacklevel=2,
         )
def test_queue_multi_consumer():
    q = Queue()
    c = 10000
    s1 = [0]
    s2 = [0]

    def g(seen):
        def run():
            for x in xrange(c):
                v = q.get()
                seen[0] += 1
                sleep()

        return run

    def p():
        for x in xrange(c):
            q.put(x)
        sleep(1)
        print s1, s2
        WVPASS(s1[0] < c)
        WVPASS(s2[0] < c)
        WVPASS(s1[0] + s2[0] == c)

        quickstop()

    quickstart(p, g(s1), g(s2))
Beispiel #25
0
    def __call__(self):
        self.q = Queue()
        self.trigger = Event()
        self.finished = Event()
        self.waiting = 0
        self.running = 0
        try:
            while True:
                for x in xrange(self.concurrency - self.running):
                    self.running += 1
                    fork(self.handler_wrap)

                if self.waiting == 0:
                    self.trigger.wait()
                    self.trigger.clear()

                try:
                    n = self.generator()
                except StopIteration:
                    break

                self.q.put(n)
                sleep()
        finally:
            for x in xrange(self.concurrency):
                self.q.put(ThreadPoolDie)
            if self.finalizer:
                self.finished.wait()
                fork(self.finalizer)
Beispiel #26
0
    def host_specific_send(self, host, msg, typ, transport_cb):
        if host not in self.host_queues:
            q = Queue()
            fork(host_loop, host, q)
            self.host_queues[host] = q

        self.host_queues[host].put((msg, typ, transport_cb))
Beispiel #27
0
class ProcessPool(object):
    """A bounded pool of subprocesses.

    An instance is callable, just like a Process, and will return the result
    of executing the function in a subprocess. If all subprocesses are busy,
    the caller will wait in a queue.

    """
    def __init__(self, concurrency, handler):
        """Creates a new ProcessPool with subprocesses that run the handler.

        Args:
            concurrency (int): The number of subprocesses to spawn.
            handler (callable): A callable that the subprocesses will execute.

        """
        self.concurrency = concurrency
        self.handler = handler
        self.available_procs = Queue()
        self.all_procs = []

    def __call__(self, *args, **params):
        """Gets a process from the pool, executes it, and returns the results.

        This call will block until there is a process available to handle it.

        """
        if not self.all_procs:
            raise NoSubProcesses("Did you forget to start the pool?")
        try:
            p = self.available_procs.get()
            result = p(*args, **params)
            return result
        finally:
            self.available_procs.put(p)

    def pool(self):
        """A callable that starts the processes in the pool.

        This is useful as the callable to pass to a diesel.Loop when adding a
        ProcessPool to your application.

        """
        for i in xrange(self.concurrency):
            proc = spawn(self.handler)
            self.available_procs.put(proc)
            self.all_procs.append(proc)
Beispiel #28
0
class ProcessPool(object):
    """A bounded pool of subprocesses.

    An instance is callable, just like a Process, and will return the result
    of executing the function in a subprocess. If all subprocesses are busy,
    the caller will wait in a queue.

    """
    def __init__(self, concurrency, handler):
        """Creates a new ProcessPool with subprocesses that run the handler.

        Args:
            concurrency (int): The number of subprocesses to spawn.
            handler (callable): A callable that the subprocesses will execute.

        """
        self.concurrency = concurrency
        self.handler = handler
        self.available_procs = Queue()
        self.all_procs = []

    def __call__(self, *args, **params):
        """Gets a process from the pool, executes it, and returns the results.

        This call will block until there is a process available to handle it.

        """
        if not self.all_procs:
            raise NoSubProcesses("Did you forget to start the pool?")
        try:
            p = self.available_procs.get()
            result = p(*args, **params)
            return result
        finally:
            self.available_procs.put(p)

    def pool(self):
        """A callable that starts the processes in the pool.

        This is useful as the callable to pass to a diesel.Loop when adding a
        ProcessPool to your application.

        """
        for i in xrange(self.concurrency):
            proc = spawn(self.handler)
            self.available_procs.put(proc)
            self.all_procs.append(proc)
Beispiel #29
0
 def setup(self):
     self.queue = Queue()
     self.done = Countdown(N)
     self.results = []
     self.handled = defaultdict(int)
     self.populate()
     self.consume()
     self.trigger()
Beispiel #30
0
 def __init__(self):
     self.routes = defaultdict(set)  # message name to host
     self.local_handlers = {}
     self.enabled_handlers = {}
     self.classes = {}
     self.host_queues = {}
     self.run_nameserver = None
     self.role_messages = defaultdict(list)
     self.roles = set()
     self.roles_wanted = set()
     self.roles_owned = set()
     self.role_clocks = {}
     self.role_by_name = {}
     self.incoming = Queue()
     self.pending = {}
     self.rpc_waits = {}
     self.table_changes = Queue()
Beispiel #31
0
 def setup(self):
     self.result = Event()
     self.queue = Queue()
     self.timeouts = 0
     diesel.fork(self.consumer, 0.01)
     diesel.fork(self.producer, 0.05)
     diesel.fork(self.consumer, 0.10)
     ev, val = diesel.first(sleep=TIMEOUT, waits=[self.result])
     if ev == 'sleep':
         assert 0, 'timed out'
Beispiel #32
0
    def __init__(self, concurrency, handler):
        """Creates a new ProcessPool with subprocesses that run the handler.

        Args:
            concurrency (int): The number of subprocesses to spawn.
            handler (callable): A callable that the subprocesses will execute.

        """
        self.concurrency = concurrency
        self.handler = handler
        self.available_procs = Queue()
        self.all_procs = []
Beispiel #33
0
    def network_set(self, client, key, value, new):
        proposal_id = idgen.next()
        resq = Queue()
        if new:
            rollback = '|' + new + ':' + proposal_id
            value += rollback
        else:
            rollback = None

        for q in self.proposal_qs:
            q.put((proposal_id, key, resq))

        success = 0
        while True:  # XXX timeout etc
            v = resq.get()
            if v == PROPOSE_SUCCESS:
                success += 1
                if success == self.quorum_size:
                    break
            elif v == PROPOSE_FAIL:
                return None
            else:
                assert 0

        for q in self.save_qs:
            q.put((proposal_id, key, value, client, rollback, resq))

        success = 0
        while True:  # XXX timeout etc
            v = resq.get()
            if v == PROPOSE_SUCCESS:
                pass  # don't care
            elif v == PROPOSE_FAIL:
                pass  # don't care
            elif v == SAVE_SUCCESS:
                success += 1
                if success == self.quorum_size:
                    return proposal_id
            else:
                assert 0
Beispiel #34
0
    def network_set(self, client, key, value, new):
        proposal_id = idgen.next()
        resq = Queue()
        if new:
            rollback = '|' + new + ':' + proposal_id
            value += rollback
        else:
            rollback = None

        for q in self.proposal_qs:
            q.put((proposal_id, key, resq))

        success = 0
        while True: # XXX timeout etc
            v = resq.get()
            if v == PROPOSE_SUCCESS:
                success += 1
                if success == self.quorum_size:
                    break
            elif v == PROPOSE_FAIL:
                return None
            else:
                assert 0

        for q in self.save_qs:
            q.put((proposal_id, key, value, client, rollback, resq))

        success = 0
        while True: # XXX timeout etc
            v = resq.get()
            if v == PROPOSE_SUCCESS:
                pass # don't care
            elif v == PROPOSE_FAIL:
                pass # don't care
            elif v == SAVE_SUCCESS:
                success += 1
                if success == self.quorum_size:
                    return proposal_id
            else:
                assert 0
Beispiel #35
0
    def network_get(self, key):
        answers = defaultdict(int)
        resq = Queue()

        for gq in self.get_qs:
            gq.put((key, resq))

        ans = None

        # XXX - timeout
        for x in xrange(self.num_hosts):
            value = resq.get()
            answers[value] += 1
            if answers[value] == self.quorum_size:
                ans = value
                break

        if ans is not None and (key not in store or store[key].proposal_id < ans.proposal_id):
            clog.error("read-repair %s" % ans)
            store.set(key, ans)

        return ans
Beispiel #36
0
    def get_many(self, keys, concurrency_limit=100, no_failures=False):
        assert self.used_client_context, "Cannot fetch in parallel without a pooled make_client_context!"
        inq = Queue()
        outq = Queue()
        for k in keys:
            inq.put(k)

        for x in xrange(min(len(keys), concurrency_limit)):
            diesel.fork(self._subrequest, inq, outq)

        failure = False
        okay, err = [], []
        for k in keys:
            (key, success, val) = outq.get()
            if success:
                okay.append((key, val))
            else:
                err.append((key, val))

        if no_failures:
            raise BucketSubrequestException("Error in parallel subrequests", err)
        return okay, err
Beispiel #37
0
 def rpc(self, m, timeout=10):
     q = Queue()
     self.incoming.put(Delivery(m, timeout, rqueue=q))
     ev, res = first(sleep=timeout, waits=[q])
     if ev == q:
         if res == ConvoyRemoteException:
             raise ConvoyRemoteError(res.exc_desc)
         if res == ConvoyRemoteNull:
             return None
         return res
     else:
         raise ConvoyTimeoutError("No response from a " + (
             "consensus remote within %ss timeout period" % timeout))
Beispiel #38
0
def handle(request):
    """Handle a request for a websocket.
    """
    if request.transport != 'xhr-polling':
        raise Response(404)

    org = request.headers.one('Origin')
    inq = Queue()
    outq = Queue()

    def wrap(request, inq, outq):
        handler(request, inq, outq)
        outq.put(WebSocketDisconnect())
    fork(wrap, request, inq, outq)

    while True:
        try:
            log.debug("trying websocket thing")
            typ, val = first(receive=1, waits=[outq.wait_id])
            log.debug(typ)
            log.debug(val)
            if typ == 'receive':
                assert val == '\x00'
                val = until('\xff')[:-1]
                if val == '':
                    inq.put(WebSocketDisconnect())
                else:
                    inq.put(request)
            else:
                try:
                    v = outq.get(waiting=False)
                except QueueEmpty:
                    pass
                else:
                    if type(v) is WebSocketDisconnect:
                        send('\x00\xff')
                        break
                    else:
                        send('\x00%s\xff' % response.to_http(request.version))

        except ConnectionClosed:
            inq.put(WebSocketDisconnect())
            raise ConnectionClosed("remote disconnected")
Beispiel #39
0
    def network_get(self, key):
        answers = defaultdict(int)
        resq = Queue()

        for gq in self.get_qs:
            gq.put((key, resq))

        ans = None

        # XXX - timeout
        for x in xrange(self.num_hosts):
            value = resq.get()
            answers[value] += 1
            if answers[value] == self.quorum_size:
                ans = value
                break

        if ans is not None and (key not in store
                                or store[key].proposal_id < ans.proposal_id):
            clog.error("read-repair %s" % ans)
            store.set(key, ans)

        return ans
Beispiel #40
0
    def subq(self, classes):
        if type(classes) not in (set, list, tuple):
            classes = [classes]

        q = Queue()

        for cls in classes:
            self.sub_adds.append((cls, q))

        fire(self.sub_wake_signal)

        try:
            yield q
        finally:
            for cls in classes:
                self.sub_rms.append((cls, q))
Beispiel #41
0
 def __init__(self):
     self.routes = defaultdict(set) # message name to host
     self.local_handlers = {}
     self.enabled_handlers = {}
     self.classes = {}
     self.host_queues = {}
     self.run_nameserver = None
     self.role_messages = defaultdict(list)
     self.roles = set()
     self.roles_wanted = set()
     self.roles_owned = set()
     self.role_clocks = {}
     self.role_by_name = {}
     self.incoming = Queue()
     self.pending = {}
     self.rpc_waits = {}
     self.table_changes = Queue()
Beispiel #42
0
 def __init__(self,
              init_callable,
              close_callable,
              pool_size=5,
              pool_max=None,
              poll_max_timeout=5):
     self.init_callable = init_callable
     self.close_callable = close_callable
     self.pool_size = pool_size
     self.poll_max_timeout = poll_max_timeout
     if pool_max:
         self.remaining_conns = Queue()
         for _ in xrange(pool_max):
             self.remaining_conns.inp.append(None)
     else:
         self.remaining_conns = InfiniteQueue()
     self.connections = deque()
Beispiel #43
0
    def __init__(self, identity):

        # The identity is some information sent along with packets from the
        # remote client that uniquely identifies it.

        self.identity = identity

        # The incoming queue is typically populated by the DieselNitroService
        # and represents a queue of messages send from the remote client.

        self.incoming = Queue()

        # The context in general is a place where you can put data that is
        # related specifically to the remote client and it will exist as long
        # the remote client doesn't timeout.

        self.context = {}

        # A skeleton frame to hang onto for async sending back
        self.async_frame = None
def test_queue_put_noloss():
    q = Queue()
    c = 10000
    done = [0]

    def g():
        for x in xrange(c):
            v = q.get()
            WVPASS(v == x)

        done[0] = 1

    def p():
        for x in xrange(c):
            q.put(x)
        sleep(1)
        WVPASS(done[0] == 1)

        quickstop()

    quickstart(p, g)
Beispiel #45
0
class QueueHarness(object):
    def setup(self):
        self.queue = Queue()
        self.done = Countdown(N)
        self.results = []
        self.handled = defaultdict(int)
        self.populate()
        self.consume()
        self.trigger()

    def consume(self):
        def worker(myid):
            while True:
                # Test both queue.get and wait() on queue (both are valid
                # APIs for getting items from the queue). The results should
                # be the same.
                if random.random() > 0.5:
                    v = self.queue.get()
                else:
                    v = diesel.wait(self.queue)
                self.results.append(v)
                self.handled[myid] += 1
                self.done.tick()

        for i in xrange(W):
            diesel.fork(worker, i)

    def trigger(self):
        ev, val = diesel.first(sleep=TIMEOUT, waits=[self.done])
        if ev == 'sleep':
            assert 0, "timed out"

    def test_results_are_ordered_as_expected(self):
        assert self.results == range(N), self.results

    def test_results_are_balanced(self):
        for wid, count in self.handled.iteritems():
            assert count == N / W, count
Beispiel #46
0
class QueueHarness(object):
    def setup(self):
        self.queue = Queue()
        self.done = Countdown(N)
        self.results = []
        self.handled = defaultdict(int)
        self.populate()
        self.consume()
        self.trigger()

    def consume(self):
        def worker(myid):
            while True:
                # Test both queue.get and wait() on queue (both are valid
                # APIs for getting items from the queue). The results should
                # be the same.
                if random.random() > 0.5:
                    v = self.queue.get()
                else:
                    v = diesel.wait(self.queue)
                self.results.append(v)
                self.handled[myid] += 1
                self.done.tick()
        for i in xrange(W):
            diesel.fork(worker, i)

    def trigger(self):
        ev, val = diesel.first(sleep=TIMEOUT, waits=[self.done])
        if ev == 'sleep':
            assert 0, "timed out"

    def test_results_are_ordered_as_expected(self):
        assert self.results == range(N), self.results

    def test_results_are_balanced(self):
        for wid, count in self.handled.iteritems():
            assert count == N/W, count
Beispiel #47
0
    def get_many(self, keys, concurrency_limit=100, no_failures=False):
        assert self.used_client_context,\
        "Cannot fetch in parallel without a pooled make_client_context!"
        inq = Queue()
        outq = Queue()
        for k in keys:
            inq.put(k)

        for x in xrange(min(len(keys), concurrency_limit)):
            diesel.fork(self._subrequest, inq, outq)

        failure = False
        okay, err = [], []
        for k in keys:
            (key, success, val) = outq.get()
            if success:
                okay.append((key, val))
            else:
                err.append((key, val))

        if no_failures:
            raise BucketSubrequestException("Error in parallel subrequests",
                                            err)
        return okay, err
Beispiel #48
0
 def clear(self, key):
     rq = Queue()
     self.request_queue.put((ConvoySetRequest(key, None, 0, 5, 0), rq))
     return rq.get()
Beispiel #49
0
def pair(done):
    q = Queue()
    diesel.fork(producer, q)
    diesel.fork(consumer, q, done)
Beispiel #50
0
 def __init__(self, *args, **kw):
     Client.__init__(self, *args, **kw)
     self.input = Queue()
Beispiel #51
0
 def alive(self):
     rq = Queue()
     self.request_queue.put((ConvoyAliveRequest(), rq))
     return rq.get()
Beispiel #52
0
 def wait(self, timeout, clocks):
     rq = Queue()
     self.request_queue.put((ConvoyWaitRequest(timeout, clocks), rq))
     return rq.get()
Beispiel #53
0
 def add(self, key, value, cap, to=0):
     rq = Queue()
     self.request_queue.put((ConvoySetRequest(key, value, cap, to, 1), rq))
     return rq.get()
Beispiel #54
0
class ConvoyNameService(object):
    def __init__(self, servers):
        self.servers = servers
        self.request_queue = Queue()
        self.pool_locks = {}

    def __call__(self):
        while True:
            server = random.choice(self.servers)
            with ConvoyConsensusClient(*server) as client:
                while True:
                    req, rq = self.request_queue.get()
                    if type(req) is ConvoyGetRequest:
                        resp = client.get(req.key)
                    elif type(req) is ConvoySetRequest:
                        resp = client.add_to_set(req.key, req.value, req.cap, req.timeout, req.lock)
                    elif type(req) is ConvoyWaitRequest:
                        resp = client.wait(req.timeout, req.clocks)
                    elif type(req) is ConvoyAliveRequest:
                        resp = client.keep_alive()
                    else:
                        assert 0
                    rq.put(resp)

    def lookup(self, key):
        rq = Queue()
        self.request_queue.put((ConvoyGetRequest(key), rq))
        return rq.get()

    def clear(self, key):
        rq = Queue()
        self.request_queue.put((ConvoySetRequest(key, None, 0, 5, 0), rq))
        return rq.get()

    def set(self, key, value):
        rq = Queue()
        self.request_queue.put((ConvoySetRequest(key, value, 0, 5, 0), rq))
        return rq.get()

    def add(self, key, value, cap, to=0):
        rq = Queue()
        self.request_queue.put((ConvoySetRequest(key, value, cap, to, 1), rq))
        return rq.get()

    def wait(self, timeout, clocks):
        rq = Queue()
        self.request_queue.put((ConvoyWaitRequest(timeout, clocks), rq))
        return rq.get()

    def alive(self):
        rq = Queue()
        self.request_queue.put((ConvoyAliveRequest(), rq))
        return rq.get()
Beispiel #55
0
class DieselNitroService(object):
    """A Nitro service that can handle multiple clients.

    Clients must maintain a steady flow of messages in order to maintain
    state in the service. A heartbeat of some sort. Or the timeout can be
    set to a sufficiently large value understanding that it will cause more
    resource consumption.

    """
    name = ''
    default_log_level = loglevels.DEBUG
    timeout = 10

    def __init__(self, uri, logger=None, log_level=None):
        self.uri = uri
        self.nitro_socket = None
        self.log = logger or None
        self.selected_log_level = log_level
        self.clients = {}
        self.outgoing = Queue()
        self.incoming = Queue()
        self.name = self.name or self.__class__.__name__
        self._incoming_loop = None

        # Allow for custom `should_run` properties in subclasses.
        try:
            self.should_run = True
        except AttributeError:
            # A custom `should_run` property exists.
            pass

        if self.log and self.selected_log_level is not None:
            self.selected_log_level = None
            warnings.warn(
                "ignored `log_level` argument since `logger` was provided.",
                RuntimeWarning,
                stacklevel=2,
            )

    def _create_server_socket(self):
        self.nitro_socket = DieselNitroSocket(bind=self.uri)

    def _setup_the_logging_system(self):
        if not self.log:
            if self.selected_log_level is not None:
                log_level = self.selected_log_level
            else:
                log_level = self.default_log_level
            log_name = self.name or self.__class__.__name__
            self.log = log.name(log_name)
            self.log.min_level = log_level

    def _handle_client_requests_and_responses(self, remote_client):
        assert self.nitro_socket
        queues = [remote_client.incoming]
        try:
            while True:
                (evt, value) = diesel.first(waits=queues, sleep=self.timeout)
                if evt is remote_client.incoming:
                    assert isinstance(value, Message)
                    remote_client.async_frame = value.orig_frame
                    resp = self.handle_client_packet(value.data, remote_client.context)
                    if resp:
                        if isinstance(resp, basestring):
                            output = [resp]
                        else:
                            output = iter(resp)
                        for part in output:
                            msg = Message(
                                value.orig_frame,
                                remote_client.identity,
                                self.serialize_message(remote_client.identity, part),
                            )
                            self.outgoing.put(msg)
                elif evt == 'sleep':
                    break
        finally:
            self._cleanup_client(remote_client)

    def _cleanup_client(self, remote_client):
        del self.clients[remote_client.identity]
        self.cleanup_client(remote_client)
        self.log.debug("cleaned up client %r" % remote_client.identity)

    def _handle_all_inbound_and_outbound_traffic(self):
        assert self.nitro_socket
        queues = [self.nitro_socket, self.outgoing]
        socket = self.nitro_socket
        make_frame = pynitro.NitroFrame
        while self.should_run:
            (queue, msg) = diesel.first(waits=queues)

            if queue is self.outgoing:
                socket.reply(msg.orig_frame, make_frame(msg.data))
            else:
                id, obj = self.parse_message(msg.data)
                msg.clear_data()
                msg = Message(msg, id, obj)
                if msg.identity not in self.clients:
                    self._register_client(msg)
                self.clients[msg.identity].incoming.put(msg)


    def _register_client(self, msg):
        remote = RemoteClient.from_message(msg)
        self.clients[msg.identity] = remote
        self.register_client(remote, msg)
        diesel.fork_child(self._handle_client_requests_and_responses, remote)

    # Public API
    # ==========

    def __call__(self):
        return self.run()

    def run(self):
        self._create_server_socket()
        self._setup_the_logging_system()
        self._handle_all_inbound_and_outbound_traffic()

    def handle_client_packet(self, packet, context):
        """Called with a bytestring packet and dictionary context.

        Return an iterable of bytestrings.

        """
        raise NotImplementedError()

    def cleanup_client(self, remote_client):
        """Called with a RemoteClient instance. Do any cleanup you need to."""
        pass

    def register_client(self, remote_client, msg):
        """Called with a RemoteClient instance. Do any registration here."""
        pass

    def parse_message(self, raw_data):
        """Subclasses can override to alter the handling of inbound data.

        Transform an incoming bytestring into a structure (aka, json.loads)
        """
        return None, raw_data

    def serialize_message(self, identity, raw_data):
        """Subclasses can override to alter the handling of outbound data.

        Turn some structure into a bytestring (aka, json.dumps)
        """
        return raw_data

    def async_send(self, identity, msg):
        """Raises KeyError if client is no longer connected.
        """
        remote_client = self.clients[identity]
        out = self.serialize_message(msg)
        self.outgoing.put(
            Message(
                remote_client.async_frame,
                identity,
                out))
Beispiel #56
0
 def lookup(self, key):
     rq = Queue()
     self.request_queue.put((ConvoyGetRequest(key), rq))
     return rq.get()
Beispiel #57
0
 def __init__(self, servers):
     self.servers = servers
     self.request_queue = Queue()
     self.pool_locks = {}
Beispiel #58
0
 def set(self, key, value):
     rq = Queue()
     self.request_queue.put((ConvoySetRequest(key, value, 0, 5, 0), rq))
     return rq.get()