def __init__(self, uri, logger=None, log_level=None): self.uri = uri self.zmq_socket = None self.log = logger or None self.selected_log_level = log_level self.clients = {} self.outgoing = Queue() self.incoming = Queue() self.name = self.name or self.__class__.__name__ self._incoming_loop = None # Allow for custom `should_run` properties in subclasses. try: self.should_run = True except AttributeError: # A custom `should_run` property exists. pass if self.log and self.selected_log_level is not None: self.selected_log_level = None warnings.warn( "ignored `log_level` argument since `logger` was provided.", RuntimeWarning, stacklevel=2, )
def __init__(self, identity, zmq_return): # The identity is some information sent along with packets from the # remote client that uniquely identifies it. self.identity = identity # The zmq_return is from the envelope and tells the ZeroMQ ROUTER # socket where to route outbound packets. self.zmq_return = zmq_return # The incoming queue is typically populated by the DieselZMQService # and represents a queue of messages send from the remote client. self.incoming = Queue() # The outgoing queue is where return values from the # DieselZMQService.handle_client_packet method are placed. Those values # are sent on to the remote client. # # Other diesel threads can stick values directly into outgoing queue # and the service will send them on as well. This allows for # asynchronous sending of messages to remote clients. That's why it's # called 'async' in the context. self.outgoing = Queue() # The context in general is a place where you can put data that is # related specifically to the remote client and it will exist as long # the remote client doesn't timeout. self.context = {'async': self.outgoing}
def __call__(self): for h, p in self.hosts: pq = Queue() sq = Queue() gq = Queue() fork(manage_peer_connection, h, p, pq, sq, gq) self.proposal_qs.append(pq) self.save_qs.append(sq) self.get_qs.append(gq)
def websocket_protocol(self, req): """Runs the WebSocket protocol after the handshake is complete. Creates two `Queue` instances for incoming and outgoing messages and passes them to the `web_socket_handler` that was supplied to the `WebSocketServer` constructor. """ inq = Queue() outq = Queue() if req.rfc_handshake: handle_frames = self.handle_rfc_6455_frames else: # Finish the non-RFC handshake key1 = req.headers.get('Sec-WebSocket-Key1') key2 = req.headers.get('Sec-WebSocket-Key2') # The final key can be in two places. The first is in the # `Request.data` attribute if diesel is *not* being proxied # to by a smart proxy that parsed HTTP requests. If it is being # proxied to, that data will not have been sent until after our # initial 101 Switching Protocols response, so we will need to # receive it here. if req.data: key3 = req.data else: evt, key3 = first(receive=8, sleep=5) assert evt == "receive", "timed out while finishing handshake" num1 = int(''.join(c for c in key1 if c in '0123456789')) num2 = int(''.join(c for c in key2 if c in '0123456789')) assert num1 % key1.count(' ') == 0 assert num2 % key2.count(' ') == 0 final = pack('!II8s', num1 / key1.count(' '), num2 / key2.count(' '), key3) handshake_finish = hashlib.md5(final).digest() send(handshake_finish) handle_frames = self.handle_non_rfc_frames def wrap(req, inq, outq): self.web_socket_handler(req, inq, outq) outq.put(WebSocketDisconnect()) handler_loop = fork(wrap, req, inq, outq) try: handle_frames(inq, outq) except ConnectionClosed: if handler_loop.running: inq.put(WebSocketDisconnect()) raise
def test_queue_multi_consumer(): q = Queue() c = 10000 s1 = [0] s2 = [0] def g(seen): def run(): for x in xrange(c): v = q.get() seen[0] += 1 sleep() return run def p(): for x in xrange(c): q.put(x) sleep(1) print s1, s2 WVPASS(s1[0] < c) WVPASS(s2[0] < c) WVPASS(s1[0] + s2[0] == c) quickstop() quickstart(p, g(s1), g(s2))
def host_specific_send(self, host, msg, typ, transport_cb): if host not in self.host_queues: q = Queue() fork(host_loop, host, q) self.host_queues[host] = q self.host_queues[host].put((msg, typ, transport_cb))
def sub(self, classes): if type(classes) not in (set, list, tuple): classes = [classes] hb = self q = Queue() class Poller(object): def __init__(self): for cls in classes: hb.sub_adds.append((cls, q)) fire(hb.sub_wake_signal) def fetch(self, timeout=None): try: qn, msg = q.get(timeout=timeout) except QueueTimeout: return (None, None) else: return (qn, msg) def close(self): for cls in classes: hb.sub_rms.append((cls, q)) pl = Poller() yield pl pl.close()
def __init__(self, uri, logger=None, log_level=None): self.uri = uri self.zmq_socket = None self.log = logger or None self.selected_log_level = log_level self.clients = {} self.outgoing = Queue() self.incoming = Queue() self.name = self.name or self.__class__.__name__ if self.log and self.selected_log_level is not None: self.selected_log_level = None warnings.warn( "ignored `log_level` argument since `logger` was provided.", RuntimeWarning, stacklevel=2, )
def __call__(self): self.q = Queue() self.trigger = Event() self.finished = Event() self.waiting = 0 self.running = 0 try: while True: for x in xrange(self.concurrency - self.running): self.running += 1 fork(self.handler_wrap) if self.waiting == 0: self.trigger.wait() self.trigger.clear() try: n = self.generator() except StopIteration: break self.q.put(n) sleep() finally: for x in xrange(self.concurrency): self.q.put(ThreadPoolDie) if self.finalizer: self.finished.wait() fork(self.finalizer)
def setup(self): self.queue = Queue() self.done = Countdown(N) self.results = [] self.handled = defaultdict(int) self.populate() self.consume() self.trigger()
def __init__(self): self.routes = defaultdict(set) # message name to host self.local_handlers = {} self.enabled_handlers = {} self.classes = {} self.host_queues = {} self.run_nameserver = None self.role_messages = defaultdict(list) self.roles = set() self.roles_wanted = set() self.roles_owned = set() self.role_clocks = {} self.role_by_name = {} self.incoming = Queue() self.pending = {} self.rpc_waits = {} self.table_changes = Queue()
def handle(request): """Handle a request for a websocket. """ if request.transport != 'xhr-polling': raise Response(404) org = request.headers.one('Origin') inq = Queue() outq = Queue() def wrap(request, inq, outq): handler(request, inq, outq) outq.put(WebSocketDisconnect()) fork(wrap, request, inq, outq) while True: try: log.debug("trying websocket thing") typ, val = first(receive=1, waits=[outq.wait_id]) log.debug(typ) log.debug(val) if typ == 'receive': assert val == '\x00' val = until('\xff')[:-1] if val == '': inq.put(WebSocketDisconnect()) else: inq.put(request) else: try: v = outq.get(waiting=False) except QueueEmpty: pass else: if type(v) is WebSocketDisconnect: send('\x00\xff') break else: send('\x00%s\xff' % response.to_http(request.version)) except ConnectionClosed: inq.put(WebSocketDisconnect()) raise ConnectionClosed("remote disconnected")
def setup(self): self.result = Event() self.queue = Queue() self.timeouts = 0 diesel.fork(self.consumer, 0.01) diesel.fork(self.producer, 0.05) diesel.fork(self.consumer, 0.10) ev, val = diesel.first(sleep=TIMEOUT, waits=[self.result]) if ev == 'sleep': assert 0, 'timed out'
def __init__(self, concurrency, handler): """Creates a new ProcessPool with subprocesses that run the handler. Args: concurrency (int): The number of subprocesses to spawn. handler (callable): A callable that the subprocesses will execute. """ self.concurrency = concurrency self.handler = handler self.available_procs = Queue() self.all_procs = []
def rpc(self, m, timeout=10): q = Queue() self.incoming.put(Delivery(m, timeout, rqueue=q)) ev, res = first(sleep=timeout, waits=[q]) if ev == q: if res == ConvoyRemoteException: raise ConvoyRemoteError(res.exc_desc) if res == ConvoyRemoteNull: return None return res else: raise ConvoyTimeoutError("No response from a " + ( "consensus remote within %ss timeout period" % timeout))
def test_pending_events_dont_break_ordering_when_handling_early_values(): # This test confirms that "early values" returned from a Waiter do # not give other pending event sources the chance to switch their # values into the greenlet while it context switches to give other # greenlets a chance to run. # First we setup a fake connection. It mimics a connection that does # not have data waiting in the buffer, and has to wait for the system # to call it back when data is ready on the socket. The delay argument # specifies how long the test should wait before simulating that data # is ready. conn1 = FakeConnection(1, delay=[None, 0.1]) # Next we setup a Queue instance and prime it with a value, so it will # be ready early and return an EarlyValue. q = Queue() q.put(1) # Force our fake connection into the connection stack for the current # loop so we can make network calls (like until_eol). loop = core.current_loop loop.connection_stack.append(conn1) try: # OK, this first() call does two things. # 1) It calls until_eol, finds that no data is ready, and sets up a # callback to be triggered when data is ready (which our # FakeConnection will simulate). # 2) Fetches from the 'q' which will result in an EarlyValue. source, value = diesel.first(until_eol=True, waits=[q]) assert source == q, source # What must happen is that the callback registered to handle data # from the FakeConnection when it arrives MUST BE CANCELED/DISCARDED/ # FORGOTTEN/NEVER CALLED. If it gets called, it will muck with # internal state, and possibly switch back into the running greenlet # with an unexpected value, which will throw off the ordering of # internal state and basically break everything. v = diesel.until_eol() assert v == 'expected value 1\r\n', 'actual value == %r !!!' % (v, ) finally: loop.connection_stack = []
def get_many(self, keys, concurrency_limit=100, no_failures=False): assert self.used_client_context,\ "Cannot fetch in parallel without a pooled make_client_context!" inq = Queue() outq = Queue() for k in keys: inq.put(k) for x in xrange(min(len(keys), concurrency_limit)): diesel.fork(self._subrequest, inq, outq) failure = False okay, err = [], [] for k in keys: (key, success, val) = outq.get() if success: okay.append((key, val)) else: err.append((key, val)) if no_failures: raise BucketSubrequestException("Error in parallel subrequests", err) return okay, err
def subq(self, classes): if type(classes) not in (set, list, tuple): classes = [classes] q = Queue() for cls in classes: self.sub_adds.append((cls, q)) fire(self.sub_wake_signal) try: yield q finally: for cls in classes: self.sub_rms.append((cls, q))
def __init__(self, init_callable, close_callable, pool_size=5, pool_max=None, poll_max_timeout=5): self.init_callable = init_callable self.close_callable = close_callable self.pool_size = pool_size self.poll_max_timeout = poll_max_timeout if pool_max: self.remaining_conns = Queue() for _ in xrange(pool_max): self.remaining_conns.inp.append(None) else: self.remaining_conns = InfiniteQueue() self.connections = deque()
def __init__(self, identity): # The identity is some information sent along with packets from the # remote client that uniquely identifies it. self.identity = identity # The incoming queue is typically populated by the DieselNitroService # and represents a queue of messages send from the remote client. self.incoming = Queue() # The context in general is a place where you can put data that is # related specifically to the remote client and it will exist as long # the remote client doesn't timeout. self.context = {} # A skeleton frame to hang onto for async sending back self.async_frame = None
def test_queue_put_noloss(): q = Queue() c = 10000 done = [0] def g(): for x in xrange(c): v = q.get() WVPASS(v == x) done[0] = 1 def p(): for x in xrange(c): q.put(x) sleep(1) WVPASS(done[0] == 1) quickstop() quickstart(p, g)
def network_set(self, client, key, value, new): proposal_id = idgen.next() resq = Queue() if new: rollback = '|' + new + ':' + proposal_id value += rollback else: rollback = None for q in self.proposal_qs: q.put((proposal_id, key, resq)) success = 0 while True: # XXX timeout etc v = resq.get() if v == PROPOSE_SUCCESS: success += 1 if success == self.quorum_size: break elif v == PROPOSE_FAIL: return None else: assert 0 for q in self.save_qs: q.put((proposal_id, key, value, client, rollback, resq)) success = 0 while True: # XXX timeout etc v = resq.get() if v == PROPOSE_SUCCESS: pass # don't care elif v == PROPOSE_FAIL: pass # don't care elif v == SAVE_SUCCESS: success += 1 if success == self.quorum_size: return proposal_id else: assert 0
def network_get(self, key): answers = defaultdict(int) resq = Queue() for gq in self.get_qs: gq.put((key, resq)) ans = None # XXX - timeout for x in xrange(self.num_hosts): value = resq.get() answers[value] += 1 if answers[value] == self.quorum_size: ans = value break if ans is not None and (key not in store or store[key].proposal_id < ans.proposal_id): clog.error("read-repair %s" % ans) store.set(key, ans) return ans
def pair(done): q = Queue() diesel.fork(producer, q) diesel.fork(consumer, q, done)
def alive(self): rq = Queue() self.request_queue.put((ConvoyAliveRequest(), rq)) return rq.get()
def wait(self, timeout, clocks): rq = Queue() self.request_queue.put((ConvoyWaitRequest(timeout, clocks), rq)) return rq.get()
def add(self, key, value, cap, to=0): rq = Queue() self.request_queue.put((ConvoySetRequest(key, value, cap, to, 1), rq)) return rq.get()
def set(self, key, value): rq = Queue() self.request_queue.put((ConvoySetRequest(key, value, 0, 5, 0), rq)) return rq.get()
def clear(self, key): rq = Queue() self.request_queue.put((ConvoySetRequest(key, None, 0, 5, 0), rq)) return rq.get()
def lookup(self, key): rq = Queue() self.request_queue.put((ConvoyGetRequest(key), rq)) return rq.get()