Example #1
0
class BufferedReader(Listener):
    """
    A BufferedReader is a subclass of :class:`~can.Listener` which implements a
    **message buffer**: that is, when the :class:`can.BufferedReader` instance is
    notified of a new message it pushes it into a queue of messages waiting to
    be serviced. The messages can then be fetched with
    :meth:`~can.BufferedReader.get_message`.

    Putting in messages after :meth:`~can.BufferedReader.stop` has be called will raise
    an exception, see :meth:`~can.BufferedReader.on_message_received`.

    :attr bool is_stopped: ``True`` iff the reader has been stopped
    """

    def __init__(self):
        # set to "infinite" size
        self.buffer = SimpleQueue()
        self.is_stopped = False

    def on_message_received(self, msg):
        """Append a message to the buffer.

        :raises: BufferError
            if the reader has already been stopped
        """
        if self.is_stopped:
            raise RuntimeError("reader has already been stopped")
        else:
            self.buffer.put(msg)

    def get_message(self, timeout=0.5):
        """
        Attempts to retrieve the latest message received by the instance. If no message is
        available it blocks for given timeout or until a message is received, or else
        returns None (whichever is shorter). This method does not block after
        :meth:`can.BufferedReader.stop` has been called.

        :param float timeout: The number of seconds to wait for a new message.
        :rytpe: can.Message or None
        :return: the message if there is one, or None if there is not.
        """
        try:
            return self.buffer.get(block=not self.is_stopped, timeout=timeout)
        except Empty:
            return None

    def stop(self):
        """Prohibits any more additions to this reader.
        """
        self.is_stopped = True
Example #2
0
    def __init__(self, pool: Executor) -> None:
        self._pool = pool
        self._handlers: MutableMapping[str, RpcCallable] = {}
        self._event_queue: SimpleQueue = SimpleQueue()
        self._stack: Optional[Stack] = None

        _set_debug()
Example #3
0
    def subtree_levelorder_traversal(self):
        from queue import SimpleQueue
        q = SimpleQueue()
        q.put(self)

        keys = []
        while not q.empty():
            curr = q.get()
            keys.append(curr.key)

            if curr.left:
                q.put(curr.left)
            if curr.right:
                q.put(curr.right)

        return keys
Example #4
0
def robot_painter(opcodes):
    """
    Create a processor for the robot and run the opcodes.

    There's no idea of where the robot starts, so let's assume (0, 0) for now. We can record locationss in
    a dictionary, and record output values etc.
    """
    painting = defaultdict(lambda: 0)
    current_location = (0, 0)

    movement = deque([
        (1, 0),  # up
        (0, 1),  # right
        (-1, 0),  # down
        (0, -1),  # left
    ])

    input_queue = SimpleQueue()
    output_queue = SimpleQueue()
    processor = Processor(input_queue, output_queue)

    # let the robot run in a separate thread
    thread = threading.Thread(target=processor, name='Robot', args=(opcodes, ))
    thread.start()

    while True:
        # not sure where the robot will break, so
        if not thread.is_alive():
            break

        # first, give the system the colour of the current location
        input_queue.put(painting[current_location])

        # not sure where the robot will break, so
        if not thread.is_alive():
            break

        # get the new colour
        painting[current_location] = output_queue.get()

        # not sure where the robot will break, so
        if not thread.is_alive():
            break

        # move the robot
        new_direction = output_queue.get()
        if new_direction == 0:
            movement.rotate(1)
        elif new_direction == 1:
            movement.rotate(-1)
        else:
            raise ValueError('Movement direction not modified')

        current_location = (current_location[0] + movement[0][0],
                            current_location[1] + movement[0][1])

    return len(painting)
    def __init__(self, weights: Sequence[float], keys: Sequence[Any] = None):
        """
        Initialize a coder on a set of alphabets with weights (frequencies)
        :param weights: weights or frequencies of the alphabets
        :param keys: the alphabets. Defaults to a range with the length of the weights.
        """
        # a minimum of 2 alphabets are required
        if len(weights) < 2:
            raise ValueError
        # validate alphabets or set the default
        if keys:
            assert len(keys) == len(weights)
        else:
            keys = range(len(weights))

        # use a heap to grow a binary tree bottom up in increasing order of the weights
        # the valued heap will keep track of both the weight for sorting, and the branches as values
        heap = ValuedMinHeap()
        for key, weight in zip(keys, weights):
            heap.put((weight, BinaryTree(key)))

        while len(heap) >= 2:
            weight1, tree1 = heap.get()
            weight2, tree2 = heap.get()
            heap.put((weight1 + weight2, BinaryTree(None, tree2, tree1)))

        # retrieve the final tree
        _, self.tree = heap.get()

        # search the tree to retrieve the key mapping
        # recursion can be used here, but a queue is used here to avoid max recursion error
        queue = SimpleQueue()
        queue.put(
            (self.tree, '')
        )  # for simplicity, use a string to simulate the binary compressed data
        self.key_map = {}
        while not queue.empty():
            tree, prefix = queue.get()
            if tree.leave():
                self.key_map[tree.value] = prefix
                continue
            if tree.left_child:
                queue.put(
                    (tree.left_child, prefix + '0'))  # code left edges as 0
            if tree.right_child:
                queue.put(
                    (tree.right_child, prefix + '1'))  # code left edges as 1
Example #6
0
    def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:
       
        val_list = []
        q = SimpleQueue()
        q.put((root,0))

        while not q.empty():
            cur_node, pos = q.get()
            if cur_node:
                try:
                    val_list[pos].append(cur_node.val)
                except IndexError:
                    val_list.append([cur_node.val])
                q.put((cur_node.left,pos+1))
                q.put((cur_node.right, pos+1))

        return val_list[::-1]
Example #7
0
    def __init__(self, server_url, keys_to_filter=[]):
        """
        :param keys_to_filter: Iterable of keys in the synchronized dictionary.
            In order to not overwrite the values which are produced locally and
            thus more accurate locally than on the server, provide the keys to
            those values here.  Values from the remote server for those keys will
            be ignored.
        """
        self.data = {}
        self.inbox = SimpleQueue()

        self.server_url = server_url
        self.keys_to_filter = keys_to_filter

        self.thread = Thread(target=self._thread_function)
        self.daemon = True
        self.is_thread_running = False
  def breadth_first_for_each(self, cb):
    # Create a queue.
    breadth_queue = SimpleQueue()
    # Add root to queue.
    breadth_queue.put(self)

    # Check that queue isn't empty.
    while not breadth_queue.empty():
      # Pop first node from queue.
      curr_node = breadth_queue.get()
      # Perform anonymous function on value of node.
      cb(curr_node.value)
      # Add any existing child nodes to queue from left to right.
      if curr_node.left:
        breadth_queue.put(curr_node.left)
      if curr_node.right:
        breadth_queue.put(curr_node.right)
Example #9
0
    def __init__(self, config: Config) -> None:
        self.config = config
        self.announcer = create_announcer(config.irc)
        self.enabled_channel_names: set[str] = set()
        self.message_queue: SimpleQueue = SimpleQueue()

        # Up to this point, no signals must have been sent.
        self.connect_to_signals()
Example #10
0
    def __init__(self):
        self.ip = ''
        self.port = 0

        self.updates = 0

        self.forStr = 0
        self.StrOutQ = SimpleQueue()

        self.forJson = 0
        self.JsonOutQ = SimpleQueue()

        self.startTime = 0
        self.serverTime = 0
        self.endTime = 0
        self.retSize = 0
        self.gotSize = 0
Example #11
0
    def create_pair(self) -> tuple[Callable, Callable]:
        self._queue = SimpleQueue()  # maxsize=1)

        def putter(*args):  # callback
            self._loop.call_soon_threadsafe(self._queue.put_nowait, args)

        async def getter(timeout=self.DEFAULT_TIMEOUT) -> tuple:
            timeout = self.DEFAULT_TIMEOUT if timeout is None else timeout
            dt_expired = dt.now() + td(seconds=timeout)
            while dt.now() < dt_expired:
                try:
                    return self._queue.get_nowait()
                except Empty:
                    await asyncio.sleep(0.005)
            raise TimeoutError

        return getter, putter  # awaitable, callback
def main() -> None:
    t0 = perf_counter()
    results = SimpleQueue()  # type: ignore
    workers: List[Thread] = []  # <2>

    for n in NUMBERS:
        worker = Thread(target=job, args=(n, results))  # <3>
        worker.start()  # <4>
        workers.append(worker)  # <5>

    for _ in workers:  # <6>
        n, (prime, elapsed) = results.get()  # <7>
        label = 'P' if prime else ' '
        print(f'{n:16}  {label} {elapsed:9.6f}s')

    time = perf_counter() - t0
    print('Total time:', f'{time:0.2f}s')
Example #13
0
def new(pool: Executor, root: PurePath, index: Index) -> Node:
    acc: SimpleQueue = SimpleQueue()
    bfs_q: SimpleQueue = SimpleQueue()

    def drain() -> Iterator[PurePath]:
        while not bfs_q.empty():
            yield bfs_q.get()

    bfs_q.put(root)
    while not bfs_q.empty():
        tasks = tuple(
            pool.submit(_new, roots=paths, index=index, acc=acc, bfs_q=bfs_q)
            for paths in chunk(drain(), n=WALK_PARALLELISM_FACTOR)
        )
        wait(tasks)

    return _join(acc)
Example #14
0
class JobObjectsManager(BaseManager):
    """Shared object manager for a scanchanges cluster job"""

    work_queue = SimpleQueue()
    result_queue = SimpleQueue()

    @classmethod
    def new_server_manager(cls, *args, **kwds):
        cls.register('get_work_queue', callable=lambda: cls.work_queue)
        cls.register('get_result_queue', callable=lambda: cls.result_queue)
        return cls(*args, **kwds)

    @classmethod
    def new_client_stub(cls, *args, **kwds):
        cls.register('get_work_queue')
        cls.register('get_result_queue')
        return cls(*args, **kwds)
Example #15
0
 def __init__(self, maxtasks=5, eco=False):
     self.queue = SimpleQueue()
     self.__status = False
     self.__task = []  # running task ( as threads instance )
     self.__left = []  # list of tasks in queue (tskname,func,args,kwargs)
     self.__done = []  # list of tasks done (tskname,*perf[todo])
     self.__aborted = [
     ]  # list of tasks aborted when stop() issued and left items still in queue (tskname,func,args,kwargs)
     self.__stoppending = False  # True when we empty the remaining queue after a task.stop(), doing nothing.
     self.__reports = {}
     self.__polls = []
     self.__pollslatency = 1.0
     self.__pollslast = 0
     self.__eco = eco
     self.__verbose = False
     self.maxtasks = maxtasks
     if not (self.__eco): self.start()
Example #16
0
    def __init__(self, url, callback=None):
        self.engine = detect_image.Engine(MODEL_FILE, LABELS_FILE)
        self.rtsp_url = url
        self.frames = SimpleQueue()
        self.callback = callback
        self._event = False
        self.event_detected = False

        self.configuration = {
            CONF_ARMED: True,
            CONF_MJPEG_FPS: FPS,
            CONF_PF: PF,
            CONF_THRESHOLD: detect_image.TF_THRESHOLD,
            CONF_EVENT_BUFFER: BUFFER_SIZE,
        }

        self.set_buffer()
Example #17
0
def part1(passcode):
    start = 0, 0
    queue = SimpleQueue()
    queue.put((start, ''))
    while not queue.empty():
        location, path = queue.get()
        if location == GOAL:
            return path
        for x in get_neighbours(location, path, passcode):
            queue.put(x)
Example #18
0
def intcode_helper(code, input_val=None):
    in_msg = SimpleQueue()
    if input_val is not None:
        in_msg.put(input_val)
    out_msg = SimpleQueue()
    intcode(code, in_msg, out_msg)
    output = []
    while not out_msg.empty():
        output.append(out_msg.get())
    return output
Example #19
0
class MessageBuffer:
    def __init__(self, batch_size):
        self._buffer = SimpleQueue()
        self.set_batch_size(batch_size)

    def put(self, mesg):
        self._buffer.put(mesg)
        return self.batch_size - self._buffer.qsize()

    def get(self):
        if not self._buffer.empty():
            return self._buffer.get()
        return None

    def is_batch_ready(self):
        return self.batch_size <= self._buffer.qsize()

    def empty(self):
        return self._buffer.empty()

    def set_batch_size(self, batch_size):
        if batch_size < 1 or not isinstance(batch_size, (int, float)):
            batch_size = 1
        self.batch_size = int(batch_size)

    def get_batch_size(self):
        return self.batch_size
    def __init__(self, initial_memory, inputs=None):
        self.opcodes = {}
        self.memory = [int(i) for i in initial_memory]
        self.instruction_pointer = 0
        if (isinstance(inputs, int)):
            inputs = [inputs]
        self.inputs = SimpleQueue()
        if inputs != None:
            for input in inputs:
                self.inputs.put(input)
        self.output = LifoQueue()
        self.relative_base = 0
        self.status = Status.IDLE

        self.addOpCode(
            1, 'add', lambda memory, params:
            (params[2], memory[params[0]] + memory[params[1]], None), 3)
        self.addOpCode(
            2, 'mul', lambda memory, params:
            (params[2], memory[params[0]] * memory[params[1]], None), 3)
        self.addOpCode(3, 'input', self.get_input, 1)  # SimpleQueue.get blocks
        self.addOpCode(
            4, 'output', lambda memory, params:
            (None, self.output.put(memory[params[0]]), None), 1)
        self.addOpCode(
            5, 'jump-if-true', lambda memory, params:
            (None, None, None
             if memory[params[0]] == 0 else memory[params[1]]), 2, 0)
        self.addOpCode(
            6, 'jump-if-false', lambda memory, params:
            (None, None, None
             if memory[params[0]] != 0 else memory[params[1]]), 2, 0)
        self.addOpCode(
            7, 'less-than', lambda memory, params:
            (params[2], 1
             if memory[params[0]] < memory[params[1]] else 0, None), 3)
        self.addOpCode(
            8, 'equals', lambda memory, params:
            (params[2], 1
             if memory[params[0]] == memory[params[1]] else 0, None), 3)
        self.addOpCode(9, 'rel_base', self.update_rel_base, 1)
        self.addOpCode(98, 'seti', lambda memory, params:
                       (params[1], params[0], None), 2)
        self.addOpCode(99, 'halt', lambda memory, params: (None, None, None),
                       0)
class BoundedBlockingQueue(object):
    def __init__(self, capacity: int):
        self.enque_lock = Lock()
        self.deque_lock = Lock()
        self.cap = capacity
        self.q = SimpleQueue()
        self.deque_lock.acquire()

    def enqueue(self, element: int) -> None:
        self.enque_lock.acquire()
        self.q.put(element)

        if self.q.qsize() < self.cap:
            self.enque_lock.release()

        if self.deque_lock.locked():
            self.deque_lock.release()

    def dequeue(self) -> int:
        self.deque_lock.acquire()
        val = None

        if self.q.qsize() > 0:
            val = self.q.get()

        if self.q.qsize():
            self.deque_lock.release()

        if val and self.enque_lock.locked():
            self.enque_lock.release()
        return val

    def size(self) -> int:
        return self.q.qsize()
Example #22
0
 def __init__(self,
              broker,
              schema_registry,
              topic,
              logging_enabled=False,
              groupId="asgardConsumerGroup",
              autocommit=True):
     """
     Initialiser for Confluent Consumer using AvroConsumer. 
     Each consumer can only be subscribed to one topic 
     Parameters
     ----------
     broker: str
         The URL of the broker (example: 'localhost:9092')
     schema_registry: str
         The URL of the confluent Schema Registry endpoint (example: 'http://localhost:8081')
     topic: str
         The topic to subscribe too
     logger: Logger object, Optional
         The logger object which will be used to log messages if provided
     groupId: str, Optional
         An optional groupId which can be used to loadbalance consumers default is "asgard"
     """
     """self.__consumer = AvroConsumer(
         {
             "bootstrap.servers": broker,
             "group.id": groupId,
             "schema.registry.url": schema_registry,
             "enable.auto.commit": autocommit 
         }
     )"""
     self.__consumer = KafkaConsumer({
         "bootstrap.servers": broker,
         "group.id": groupId,
         "enable.auto.commit": autocommit,
         "auto.offset.reset": "latest"
     })
     self.autocommit = autocommit
     if not autocommit:
         self.consumed_messages = SimpleQueue()
     self.__consumer.subscribe([topic])
     if logging_enabled:
         self.logger = logging.getLogger(__name__)
     else:
         self.logger = None
Example #23
0
    def __init__(self,
                 max_workers: int = max((cpu_count(), 4)),
                 loop: AbstractEventLoop = None) -> None:
        if loop:
            warnings.warn(DeprecationWarning("loop argument is obsolete"))

        self.__futures = set()  # type: typing.Set[Future[Any]]

        self.__thread_events = set()  # type: typing.Set[threading.Event]
        self.__tasks = SimpleQueue()  # type: SimpleQueue[Optional[WorkItem]]
        self.__write_lock = threading.RLock()

        pools = set()
        for idx in range(max_workers):
            pools.add(self._start_thread(idx))

        self.__pool = frozenset(
            pools)  # type: typing.FrozenSet[threading.Thread]
Example #24
0
    def __init__(self, message_bus, activatable, forbidden, optional):
        """Create a new task.

        Anaconda modules are specified by their full DBus name or a prefix
        of their DBus name that ends with '*'.

        :param message_bus: a message bus
        :param activatable: a list of modules that can be activated.
        :param forbidden: a list of modules that are are not allowed to run
        :param optional: a list of modules that are optional
        """
        super().__init__()
        self._message_bus = message_bus
        self._activatable = activatable
        self._forbidden = forbidden
        self._optional = optional
        self._module_observers = []
        self._callbacks = SimpleQueue()
Example #25
0
class MovingAverage:
    def __init__(self, size: int):
        """
        Initialize your data structure here.
        """
        self.size = size
        self.queue = SimpleQueue()
        self.total = 0

    def next(self, val: int) -> float:
        if self.size == 0:
            self.total -= self.queue.get()
            self.size += 1

        self.total += val
        self.queue.put(val)
        self.size -= 1
        return self.total / self.queue.qsize()
Example #26
0
    def _ensure_loop(self, loop):
        if loop is None:
            loop = asyncio.get_event_loop()

        if loop not in self.conn_map:
            _wrap_close(loop, self)
            self.conn_map[loop] = SimpleQueue()

        return self.conn_map[loop], loop
Example #27
0
class ClientProtocol(Protocol):
    def __init__(self, on_con_lost):
        self.on_con_lost = on_con_lost
        self.connected = False
        self.response_data_queue = SimpleQueue()
        self.transport = None

    def connection_made(self, transport: Transport) -> None:
        self.connected = True
        self.transport = transport

    def data_received(self, data: bytes) -> None:
        self.response_data_queue.put(data)

    def connection_lost(self, exc):
        print('服务器连接断开')
        self.connected = False
        self.on_con_lost.set_result(True)
Example #28
0
    def estimate_score():
        start = time.time()
        paths = request.json.get('paths')
        task_id = uuid.uuid4()

        assert len(paths)

        result_queue = SimpleQueue()
        app.config["PERCENTILE_QUEUES"][task_id] = result_queue
        img_paths_queue = app.config["IMG_PATHS_QUEUE"]
        img_paths_queue.put(dict(paths=paths, id=task_id))
        percentile = result_queue.get()
        if percentile is None:
            raise JsonError(500)
        data = dict(percentile=percentile)
        logging.info("Percentile: {:.0f}. Estimation took {:.3f}s".format(
            percentile * 100, time.time() - start))
        return json_response(data=data)
Example #29
0
class FilteringEventStream(IEventStream):
    def __init__(self, stream, filter_fn):
        self.filter_fn = filter_fn
        self.event_queue = SimpleQueue()
        self.stream = stream

        self.stream.register_callback(
            self.__event_callback,
            lambda packet: self.filter_fn(packet) is not None)

    def __event_callback(self, event):
        self.event_queue.put(self.filter_fn(event))

    def get_event_queue(self):
        return self.event_queue

    def unregister(self):
        self.stream.unregister(self.__event_callback)
Example #30
0
def _new(
    roots: Iterable[PurePath], index: Index, acc: SimpleQueue, bfs_q: SimpleQueue
) -> None:
    for root in roots:
        with suppress(PermissionError):
            mode = _fs_stat(root)
            _ancestors = ancestors(root)
            node = Node(
                path=root,
                mode=mode,
                ancestors=_ancestors,
            )
            acc.put(node)

            if root in index:
                for item in _listdir(root):
                    path = root / item
                    bfs_q.put(path)
Example #31
0
 def __init__(self, network: Network):
     # Obtém uma referência para a rede, com históricos.
     self.network = network
     # Define os parâmetros da conexão (local do broker RabbitMQ)
     self.parameters = pika.ConnectionParameters(host="localhost")
     # Cria a exchange e a fila de observação de ciclos
     self.init_cycle_connection()
     # Cria a thread que escuta cycle
     self.cycle_thread = threading.Thread(target=self.cycle_listening,
                                          daemon=True)
     # Cria a thread que realiza a otimização
     self.optimization_thread = threading.Thread(target=self.optimizing,
                                                 daemon=True)
     # Cria a fila que recebe as otimizações a serem realizadas
     self.optimization_queue = SimpleQueue()
     # Cria a fila que recebe os novos setpoints para serem
     # obtidos pelo controller
     self.setpoint_queue = SimpleQueue()
    def __init__(self, name):
        # Don't need this in python, queue already has one
        # self.m_mutex = threading.Lock()

        self.m_listeners = UidVector()
        self.m_queue = Queue()
        self.m_pollers = UidVector()

        self.m_active = False
        self.name = name
    def __init__(self, uid, stream, notifier, handshake, get_entry_type, verbose=False):

        # logging debugging
        self.m_verbose = verbose

        self.m_uid = uid
        self.m_stream = stream
        self.m_notifier = notifier
        self.m_handshake = handshake
        self.m_get_entry_type = get_entry_type

        self.m_active = False
        self.m_proto_rev = 0x0300
        self.state = self.State.kCreated
        self.m_state_mutex = threading.Lock()
        self.m_last_update = 0

        self.m_outgoing = Queue()

        self.m_process_incoming = None
        self.m_read_thread = None
        self.m_write_thread = None

        self.m_remote_id_mutex = threading.Lock()
        self.m_remote_id = None
        self.m_last_post = 0

        self.m_pending_mutex = threading.Lock()
        self.m_pending_outgoing = []
        self.m_pending_update = {}

        # Condition variables for shutdown
        self.m_shutdown_mutex = threading.Lock()
        # Not needed in python
        # self.m_read_shutdown_cv = threading.Condition()
        # self.m_write_shutdown_cv = threading.Condition()
        self.m_read_shutdown = False
        self.m_write_shutdown = False

        # turn off Nagle algorithm; we bundle packets for transmission
        try:
            self.m_stream.setNoDelay()
        except IOError as e:
            logger.warning("Setting TCP_NODELAY: %s", e)
class NetworkConnection(object):
    class State(object):
        kCreated = 0
        kInit = 1
        kHandshake = 2
        kSynchronized = 3
        kActive = 4
        kDead = 5

    def __init__(self, uid, stream, notifier, handshake, get_entry_type, verbose=False):

        # logging debugging
        self.m_verbose = verbose

        self.m_uid = uid
        self.m_stream = stream
        self.m_notifier = notifier
        self.m_handshake = handshake
        self.m_get_entry_type = get_entry_type

        self.m_active = False
        self.m_proto_rev = 0x0300
        self.state = self.State.kCreated
        self.m_state_mutex = threading.Lock()
        self.m_last_update = 0

        self.m_outgoing = Queue()

        self.m_process_incoming = None
        self.m_read_thread = None
        self.m_write_thread = None

        self.m_remote_id_mutex = threading.Lock()
        self.m_remote_id = None
        self.m_last_post = 0

        self.m_pending_mutex = threading.Lock()
        self.m_pending_outgoing = []
        self.m_pending_update = {}

        # Condition variables for shutdown
        self.m_shutdown_mutex = threading.Lock()
        # Not needed in python
        # self.m_read_shutdown_cv = threading.Condition()
        # self.m_write_shutdown_cv = threading.Condition()
        self.m_read_shutdown = False
        self.m_write_shutdown = False

        # turn off Nagle algorithm; we bundle packets for transmission
        try:
            self.m_stream.setNoDelay()
        except IOError as e:
            logger.warning("Setting TCP_NODELAY: %s", e)

    def start(self):
        if self.m_active:
            return

        self.m_active = True
        self.set_state(self.State.kInit)

        # clear queue
        try:
            while True:
                self.m_outgoing.get_nowait()
        except Empty:
            pass

        # reset shutdown flags
        with self.m_shutdown_mutex:
            self.m_read_shutdown = False
            self.m_write_shutdown = False

        # start threads
        self.m_write_thread = SafeThread(
            target=self._writeThreadMain, name="nt-net-write"
        )
        self.m_read_thread = SafeThread(target=self._readThreadMain, name="nt-net-read")

    def __repr__(self):
        try:
            return "<NetworkConnection 0x%x %s>" % (id(self), self.info())
        except Exception:
            return "<NetworkConnection 0x%x ???>" % id(self)

    def stop(self):
        logger.debug("NetworkConnection stopping (%s)", self)

        if not self.m_active:
            return

        self.set_state(self.State.kDead)
        self.m_active = False
        # closing the stream so the read thread terminates
        self.m_stream.close()

        # send an empty outgoing message set so the write thread terminates
        self.m_outgoing.put([])

        # wait for threads to terminate, timeout
        self.m_write_thread.join(1)
        if self.m_write_thread.is_alive():
            logger.warning("%s did not die", self.m_write_thread.name)

        self.m_read_thread.join(1)
        if self.m_read_thread.is_alive():
            logger.warning("%s did not die", self.m_write_thread.name)

        # clear queue
        try:
            while True:
                self.m_outgoing.get_nowait()
        except Empty:
            pass

    def get_proto_rev(self):
        return self.m_proto_rev

    def get_stream(self):
        return self.m_stream

    def info(self):
        return ConnectionInfo(
            self.remote_id(),
            self.m_stream.getPeerIP(),
            self.m_stream.getPeerPort(),
            self.m_last_update,
            self.m_proto_rev,
        )

    def is_connected(self):
        return self.state == self.State.kActive

    def last_update(self):
        return self.m_last_update

    def set_process_incoming(self, func):
        self.m_process_incoming = func

    def set_proto_rev(self, proto_rev):
        self.m_proto_rev = proto_rev

    def set_state(self, state):
        with self.m_state_mutex:
            State = self.State

            # Don't update state any more once we've died
            if self.state == State.kDead:
                return

            # One-shot notify state changes
            if self.state != State.kActive and state == State.kActive:
                info = self.info()
                self.m_notifier.notifyConnection(True, info)
                logger.info(
                    "CONNECTED %s port %s (%s)",
                    info.remote_ip,
                    info.remote_port,
                    info.remote_id,
                )
            elif self.state != State.kDead and state == State.kDead:
                info = self.info()
                self.m_notifier.notifyConnection(False, info)
                logger.info(
                    "DISCONNECTED %s port %s (%s)",
                    info.remote_ip,
                    info.remote_port,
                    info.remote_id,
                )

            if self.m_verbose:
                logger.debug(
                    "%s: %s -> %s", self, _state_map[self.state], _state_map[state]
                )

            self.state = state

    # python optimization: don't use getter here
    # def state(self):
    #     return self.m_state

    def remote_id(self):
        with self.m_remote_id_mutex:
            return self.m_remote_id

    def set_remote_id(self, remote_id):
        with self.m_remote_id_mutex:
            self.m_remote_id = remote_id

    def uid(self):
        return self.m_uid

    def _sendMessages(self, msgs):
        self.m_outgoing.put(msgs)

    def _readThreadMain(self):
        decoder = WireCodec(self.m_proto_rev)

        verbose = self.m_verbose

        def _getMessage():
            decoder.set_proto_rev(self.m_proto_rev)
            try:
                return Message.read(self.m_stream, decoder, self.m_get_entry_type)
            except IOError as e:
                logger.warning("read error in handshake: %s", e)

                # terminate connection on bad message
                self.m_stream.close()

                return None

        self.set_state(self.State.kHandshake)

        try:
            handshake_success = self.m_handshake(self, _getMessage, self._sendMessages)
        except Exception:
            logger.exception("Unhandled exception during handshake")
            handshake_success = False

        if not handshake_success:
            self.set_state(self.State.kDead)
            self.m_active = False
        else:
            self.set_state(self.State.kActive)

            try:
                while self.m_active:
                    if not self.m_stream:
                        break

                    decoder.set_proto_rev(self.m_proto_rev)

                    try:
                        msg = Message.read(
                            self.m_stream, decoder, self.m_get_entry_type
                        )
                    except Exception as e:
                        if not isinstance(e, StreamEOF):
                            if verbose:
                                logger.exception("read error")
                            else:
                                logger.warning("read error: %s", e)

                        # terminate connection on bad message
                        self.m_stream.close()

                        break

                    if verbose:
                        logger.debug(
                            "%s received type=%s with str=%s id=%s seq_num=%s value=%s",
                            self.m_stream.sock_type,
                            msgtype_str(msg.type),
                            msg.str,
                            msg.id,
                            msg.seq_num_uid,
                            msg.value,
                        )

                    self.m_last_update = monotonic()
                    self.m_process_incoming(msg, self)
            except IOError as e:
                # connection died probably
                logger.debug("IOError in read thread: %s", e)
            except Exception:
                logger.warning("Unhandled exception in read thread", exc_info=True)

            self.set_state(self.State.kDead)
            self.m_active = False

        # also kill write thread
        self.m_outgoing.put([])

        with self.m_shutdown_mutex:
            self.m_read_shutdown = True

    def _writeThreadMain(self):
        encoder = WireCodec(self.m_proto_rev)

        verbose = self.m_verbose
        out = []

        try:
            while self.m_active:
                msgs = self.m_outgoing.get()

                if verbose:
                    logger.debug("write thread woke up")
                    if msgs:
                        logger.debug(
                            "%s sending %s messages", self.m_stream.sock_type, len(msgs)
                        )

                if not msgs:
                    continue

                encoder.set_proto_rev(self.m_proto_rev)

                # python-optimization: checking verbose causes extra overhead
                if verbose:
                    for msg in msgs:
                        if msg:
                            logger.debug(
                                "%s sending type=%s with str=%s id=%s seq_num=%s value=%s",
                                self.m_stream.sock_type,
                                msgtype_str(msg.type),
                                msg.str,
                                msg.id,
                                msg.seq_num_uid,
                                msg.value,
                            )
                            Message.write(msg, out, encoder)
                else:
                    for msg in msgs:
                        if msg:
                            Message.write(msg, out, encoder)

                if not self.m_stream:
                    break

                if not out:
                    continue

                self.m_stream.send(b"".join(out))

                del out[:]

                # if verbose:
                #    logger.debug('send %s bytes', encoder.size())
        except IOError as e:
            # connection died probably
            if not isinstance(e, StreamEOF):
                logger.debug("IOError in write thread: %s", e)
        except Exception:
            logger.warning("Unhandled exception in write thread", exc_info=True)

        self.set_state(self.State.kDead)
        self.m_active = False
        self.m_stream.close()  # also kill read thread

        with self.m_shutdown_mutex:
            self.m_write_shutdown = True

    def queueOutgoing(self, msg):
        with self.m_pending_mutex:

            # Merge with previous.  One case we don't combine: delete/assign loop.
            msgtype = msg.type
            if msgtype in [kEntryAssign, kEntryUpdate]:

                # don't do this for unassigned id's
                msg_id = msg.id
                if msg_id == 0xFFFF:
                    self.m_pending_outgoing.append(msg)
                    return

                mpend = self.m_pending_update.get(msg_id)
                if mpend is not None and mpend.first != 0:
                    # overwrite the previous one for this id
                    oldidx = mpend.first - 1
                    oldmsg = self.m_pending_outgoing[oldidx]
                    if (
                        oldmsg
                        and oldmsg.type == kEntryAssign
                        and msgtype == kEntryUpdate
                    ):
                        # need to update assignment with seq_num and value
                        oldmsg = Message.entryAssign(
                            oldmsg.str, msg_id, msg.seq_num_uid, msg.value, oldmsg.flags
                        )

                    else:
                        oldmsg = msg  # easy update

                    self.m_pending_outgoing[oldidx] = oldmsg

                else:
                    # new, remember it
                    pos = len(self.m_pending_outgoing)
                    self.m_pending_outgoing.append(msg)
                    self.m_pending_update[msg_id] = Pair(pos + 1, 0)

            elif msgtype == kEntryDelete:
                # don't do this for unassigned id's
                msg_id = msg.id
                if msg_id == 0xFFFF:
                    self.m_pending_outgoing.append(msg)
                    return

                # clear previous updates
                mpend = self.m_pending_update.get(msg_id)
                if mpend is not None:
                    if mpend.first != 0:
                        self.m_pending_outgoing[mpend.first - 1] = None

                    if mpend.second != 0:
                        self.m_pending_outgoing[mpend.second - 1] = None

                    self.m_pending_update[msg_id] = _empty_pair

                # add deletion
                self.m_pending_outgoing.append(msg)

            elif msgtype == kFlagsUpdate:
                # don't do this for unassigned id's
                msg_id = msg.id
                if id == 0xFFFF:
                    self.m_pending_outgoing.append(msg)
                    return

                mpend = self.m_pending_update.get(msg_id)
                if mpend is not None and mpend.second != 0:
                    # overwrite the previous one for this id
                    self.m_pending_outgoing[mpend.second - 1] = msg

                else:
                    # new, remember it
                    pos = len(self.m_pending_outgoing)
                    self.m_pending_outgoing.append(msg)
                    self.m_pending_update[msg_id] = Pair(0, pos + 1)

            elif msgtype == kClearEntries:
                # knock out all previous assigns/updates!
                for i, m in enumerate(self.m_pending_outgoing):
                    if not m:
                        continue

                    t = m.type
                    if t in [
                        kEntryAssign,
                        kEntryUpdate,
                        kFlagsUpdate,
                        kEntryDelete,
                        kClearEntries,
                    ]:
                        self.m_pending_outgoing[i] = None

                self.m_pending_update.clear()
                self.m_pending_outgoing.append(msg)

            else:
                self.m_pending_outgoing.append(msg)

    def postOutgoing(self, keep_alive):
        with self.m_pending_mutex:
            # optimization: don't call monotonic unless needed
            # now = monotonic()
            if not self.m_pending_outgoing:
                if not keep_alive:
                    return

                # send keep-alives once a second (if no other messages have been sent)
                now = monotonic()
                if (now - self.m_last_post) < 1.0:
                    return

                self.m_outgoing.put((Message.keepAlive(),))

            else:
                now = monotonic()
                self.m_outgoing.put(self.m_pending_outgoing)

                self.m_pending_outgoing = []
                self.m_pending_update.clear()

            self.m_last_post = now
class CallbackThread(object):
    def __init__(self, name):
        # Don't need this in python, queue already has one
        # self.m_mutex = threading.Lock()

        self.m_listeners = UidVector()
        self.m_queue = Queue()
        self.m_pollers = UidVector()

        self.m_active = False
        self.name = name

    #
    # derived must implement the following
    #

    def matches(self, listener, data):
        raise NotImplementedError

    def setListener(self, data, listener_uid):
        raise NotImplementedError

    def doCallback(self, callback, data):
        raise NotImplementedError

    #
    # Impl
    #

    def start(self):
        self.m_active = True
        self._thread = SafeThread(target=self.main, name=self.name)

    def stop(self):
        self.m_active = False
        self.m_queue.put(None)

    def sendPoller(self, poller_uid, *args):
        # args are (listener_uid, item)
        poller = self.m_pollers.get(poller_uid)
        if poller:
            with poller.poll_cond:
                poller.poll_queue.append(args)
                poller.poll_cond.notify()

    def main(self):
        # micro-optimization: lift these out of the loop
        doCallback = self.doCallback
        matches = self.matches
        queue_get = self.m_queue.get
        setListener = self.setListener
        listeners_get = self.m_listeners.get
        listeners_items = self.m_listeners.items

        while True:
            item = queue_get()
            if not item:
                logger.debug("%s thread no longer active", self.name)
                break

            listener_uid, item = item
            if listener_uid is not None:
                listener = listeners_get(listener_uid)
                if listener and matches(listener, item):
                    setListener(item, listener_uid)
                    cb = listener.callback
                    if cb:
                        try:
                            doCallback(cb, item)
                        except Exception:
                            logger.warning(
                                "Unhandled exception processing %s callback",
                                self.name,
                                exc_info=True,
                            )
                    elif listener.poller_uid is not None:
                        self.sendPoller(listener.poller_uid, listener_uid, item)
            else:
                # Use copy because iterator might get invalidated
                for listener_uid, listener in list(listeners_items()):
                    if matches(listener, item):
                        setListener(item, listener_uid)
                        cb = listener.callback
                        if cb:
                            try:
                                doCallback(cb, item)
                            except Exception:
                                logger.warning(
                                    "Unhandled exception processing %s callback",
                                    self.name,
                                    exc_info=True,
                                )
                        elif listener.poller_uid is not None:
                            self.sendPoller(listener.poller_uid, listener_uid, item)

        # Wake any blocked pollers
        for poller in self.m_pollers.values():
            poller.terminate()
Example #36
0
 def __init__(self):
     # set to "infinite" size
     self.buffer = SimpleQueue()
     self.is_stopped = False