コード例 #1
0
    def __init__(self) -> None:
        # Tanner (6/15/21): black does not treat all these type definitions and comments kindly, so turning off formatting
        # fmt: off
        self._instrument_communication_error_queue: Queue[Tuple[
            Exception, str]] = Queue()  # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498
        self._instrument_comm_board_queues: Tuple[Tuple[Queue[Dict[str, Any]],
                                                        Queue[Dict[str, Any]],
                                                        Queue[Any]], ...,  # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498
                                                  ] = tuple((Queue(), Queue(),
                                                             Queue())
                                                            for _ in range(1))

        self._from_main_to_file_writer_queue: Queue[Dict[str, Any]] = Queue()  # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498
        self._from_file_writer_to_main_queue: Queue[Dict[str, Any]] = Queue()  # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498
        self._file_writer_error_queue: Queue[Tuple[Exception, str]] = Queue()  # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498
        self._file_writer_board_queues: Tuple[
            Tuple[Queue[Any], Queue[Any]], ...  # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498
        ] = tuple((self._instrument_comm_board_queues[i][2], Queue())
                  for i in range(1))

        self._data_analyzer_board_queues: Tuple[
            Tuple[Queue[Any], Queue[Any]], ...  # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498,  # noqa: E231 # flake8 doesn't understand the 3 dots for type definition
        ] = tuple(
            (self._file_writer_board_queues[i][1], Queue()) for i in range(1))
        self._from_main_to_data_analyzer_queue: Queue[Dict[str, Any]] = Queue()  # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498
        self._from_data_analyzer_to_main_queue: Queue[Dict[str, Any]] = Queue()  # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498
        self._data_analyzer_error_queue: Queue[Tuple[Exception, str]] = Queue()  # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498

        self._from_server_to_main_queue: queue.Queue[Dict[
            str, Any]] = queue.Queue()  # pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/1498
        self._data_to_server_queue: LightQueue = LightQueue()
コード例 #2
0
 def get(self, block=True, timeout=None):
     event = LightQueue(1)
     if flask.has_request_context():
         emit('{}#get'.format(self._uuid), callback=lambda x: event.put(x))
     else:
         sio = flask.current_app.extensions['socketio']
         sio.emit('{}#get'.format(self._uuid),
                  callback=lambda x: event.put(x))
     return event.get(timeout=1)
コード例 #3
0
 def get(self, timeout=10):
     name = getter.__name__
     # pylint: disable=protected-access
     signal = '{uuid}#{event}'.format(uuid=self._uuid, event=name)
     event = LightQueue(1)
     if flask.has_request_context():
         emit(signal, callback=lambda x: event.put(unpack(x)))
     else:
         sio = flask.current_app.extensions['socketio']
         sio.emit(signal, callback=lambda x: event.put(unpack(x)))
     data = event.get(timeout=timeout)
     return getter(self, data)
コード例 #4
0
def fail_fast_imap(pool, call, items):
    """ Run a function against each item in a given list, yielding each
    function result in turn, where the function call is handled in a
    :class:`~eventlet.greenthread.GreenThread` spawned by the provided pool.

    If any function raises an exception, all other ongoing threads are killed,
    and the exception is raised to the caller.

    This function is similar to :meth:`~eventlet.greenpool.GreenPool.imap`.

    :param pool: Pool to spawn function threads from
    :type pool: eventlet.greenpool.GreenPool
    :param call: Function call to make, expecting to receive an item from the
        given list
    """
    result_queue = LightQueue(maxsize=len(items))
    spawned_threads = set()

    def handle_result(finished_thread):
        try:
            thread_result = finished_thread.wait()
            spawned_threads.remove(finished_thread)
            result_queue.put((thread_result, None))
        except Exception:
            spawned_threads.remove(finished_thread)
            result_queue.put((None, sys.exc_info()))

    for item in items:
        gt = pool.spawn(call, item)
        spawned_threads.add(gt)
        gt.link(handle_result)

    while spawned_threads:
        result, exc_info = result_queue.get()
        if exc_info is not None:
            # Kill all other ongoing threads
            for ongoing_thread in spawned_threads:
                ongoing_thread.kill()
            # simply raising here (even raising a full exc_info) isn't
            # sufficient to preserve the original stack trace.
            # greenlet.throw() achieves this.
            eventlet.getcurrent().throw(*exc_info)
        yield result
コード例 #5
0
def main(myid, queue, concurrency, delay=5.0, duration=DURATION):
    counter = 0
    created = list()
    results = LightQueue(concurrency * 10)
    pool = GreenPool(concurrency)
    api = AccountClient({'namespace': NS}, pool_maxsize=concurrency+1)
    now = start = checkpoint = time.time()
    pool.starmap(create_loop, [(api, 'buck-%d-%d' % (myid, n), results)
                               for n in range(concurrency)])
    while now - start < duration:
        try:
            res = results.get(timeout=delay)
            created.append(res)
            counter += 1
        except Empty:
            pass
        if now - checkpoint > delay:
            print("Proc %d: %d updates in %fs, %f updates per second." % (
                  myid, counter, now - checkpoint,
                  counter / (now - checkpoint)))
            counter = 0
            checkpoint = now
        now = time.time()
    for coro in pool.coroutines_running:
        coro.kill()
    while not results.empty():
        created.append(results.get(block=False))
    end = time.time()
    rate = len(created) / (end - start)
    print("Proc %d: end. %d updates in %fs, %f updates per second." % (
          myid, len(created), end - start, rate))
    time.sleep(2)
    print("Proc %d: cleaning..." % myid)
    del_req = {'dtime': time.time()}
    # Do not delete twice (or an exception is raised)
    uniq_ct = set(created)
    for _ in pool.starmap(api.container_update,
                          [(ACCOUNT, n, del_req) for n in uniq_ct]):
        pass
    pool.waitall()
    queue.put(rate)
    return 0
コード例 #6
0
ファイル: cache.py プロジェクト: tomymacmillan/bowtie
def load(key):
    """Load the value stored with the key.

    Parameters
    ----------
    key : object
        The key to lookup the value stored.

    Returns
    -------
    object
        The value if the key exists in the cache, otherwise None.

    """
    signal = 'cache_load'
    event = LightQueue(1)
    if flask.has_request_context():
        emit(signal, {'data': pack(key)}, callback=event.put)
    else:
        sio = flask.current_app.extensions['socketio']
        sio.emit(signal, {'data': pack(key)}, callback=event.put)
    return msgpack.unpackb(bytes(event.get(timeout=10)), encoding='utf8')
コード例 #7
0
 def __init__(self, app, size=20):
     self.app = app
     self.size = size
     self.inqueue = LightQueue()
     self._running = None
     self._producers = None
コード例 #8
0
    def __init__(self, logger: Logger):
        self._logger = logger
        self._q = LightQueue(maxsize=1000)

        # ensure no one is writing to the same broker connection concurrently
        self._sio = storage.utils.SIOManager.create(write_only=True)
コード例 #9
0
            print "%d containers in %fs, %f containers per second." % (
                counter, now - checkpoint, counter / (now - checkpoint))
            counter = 0
            checkpoint = now
        created.append(res)
        now = time.time()
    for coro in POOL.coroutines_running:
        coro.kill()
    while not RESULTS.empty():
        created.append(RESULTS.get(block=False))
    end = time.time()
    rate = len(created) / (end - start)
    print "End. %d containers created in %fs, %f containers per second." % (
        len(created), end - start, rate)
    print "Cleaning..."
    for _ in POOL.starmap(API.container_delete,
                          [('benchmark', n) for n in created]):
        pass
    POOL.waitall()
    return rate


if __name__ == '__main__':
    import os
    import sys
    THREADS = int(sys.argv[1]) if len(sys.argv) > 1 else 1
    API = ObjectStorageApi(os.getenv('OIO_NS', 'OPENIO'))
    RESULTS = LightQueue(THREADS * 10)
    POOL = GreenPool(THREADS)
    main(THREADS)
コード例 #10
0
    def get(self, request):
        """Provide a streaming interface for the event bus."""
        from eventlet.queue import LightQueue, Empty
        import eventlet

        cur_hub = eventlet.hubs.get_hub()
        request.environ['eventlet.minimum_write_chunk_size'] = 0
        to_write = LightQueue()
        stop_obj = object()

        restrict = request.args.get('restrict')
        if restrict:
            restrict = restrict.split(',')

        def thread_forward_events(event):
            """Forward events to the open request."""
            if event.event_type == EVENT_TIME_CHANGED:
                return

            if restrict and event.event_type not in restrict:
                return

            _LOGGER.debug('STREAM %s FORWARDING %s', id(stop_obj), event)

            if event.event_type == EVENT_BLUMATE_STOP:
                data = stop_obj
            else:
                data = json.dumps(event, cls=rem.JSONEncoder)

            cur_hub.schedule_call_global(0, lambda: to_write.put(data))

        def stream():
            """Stream events to response."""
            self.hass.bus.listen(MATCH_ALL, thread_forward_events)

            _LOGGER.debug('STREAM %s ATTACHED', id(stop_obj))

            last_msg = time()
            # Fire off one message right away to have browsers fire open event
            to_write.put(STREAM_PING_PAYLOAD)

            while True:
                try:
                    # Somehow our queue.get sometimes takes too long to
                    # be notified of arrival of data. Probably
                    # because of our spawning on hub in other thread
                    # hack. Because current goal is to get this out,
                    # We just timeout every second because it will
                    # return right away if qsize() > 0.
                    # So yes, we're basically polling :(
                    payload = to_write.get(timeout=1)

                    if payload is stop_obj:
                        break

                    msg = "data: {}\n\n".format(payload)
                    _LOGGER.debug('STREAM %s WRITING %s', id(stop_obj),
                                  msg.strip())
                    yield msg.encode("UTF-8")
                    last_msg = time()
                except Empty:
                    if time() - last_msg > 50:
                        to_write.put(STREAM_PING_PAYLOAD)
                except GeneratorExit:
                    _LOGGER.debug('STREAM %s RESPONSE CLOSED', id(stop_obj))
                    break

            self.hass.bus.remove_listener(MATCH_ALL, thread_forward_events)

        return self.Response(stream(), mimetype='text/event-stream')