예제 #1
0
파일: community.py 프로젝트: devos50/noodle
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # self.transfer_queue = Queue()
        # self.transfer_queue_task = ensure_future(self.evaluate_transfer_queue())

        # Add state db
        if not kwargs.get("settings"):
            self._settings = PaymentSettings()
        self.state_db = PaymentState(self._settings.asset_precision)

        self.context = self.state_db.context

        self.reachability_cache = defaultdict(lambda: cachetools.LRUCache(100))

        # Dictionary chain_id: block_dot -> block
        self.tracked_blocks = defaultdict(lambda: {})
        self.peer_conf = defaultdict(lambda: defaultdict(int))
        self.should_witness_subcom = {}

        self.counter_signing_block_queue = PriorityQueue()
        self.block_sign_queue_task = ensure_future(
            self.evaluate_counter_signing_blocks()
        )

        self.witness_delta = kwargs.get("witness_delta")
        if not self.witness_delta:
            self.witness_delta = self.settings.witness_block_delta
예제 #2
0
파일: queues.py 프로젝트: hyq-python/hoopa
 async def init(self, setting):
     """
     初始化
     """
     self.setting = setting
     self.waiting = PriorityQueue()
     self.module = importlib.import_module(setting.SERIALIZATION)
예제 #3
0
 def __init__(self, *params):
     self.domain, self.options, self.process_num, self.dns_servers, self.next_subs, \
         self.scan_count, self.found_count, self.queue_size_array, tmp_dir = params
     self.dns_count = len(self.dns_servers)
     self.scan_count_local = 0
     self.found_count_local = 0
     self.resolvers = [
         dns.asyncresolver.Resolver(configure=False)
         for _ in range(self.options.threads)
     ]
     for r in self.resolvers:
         r.lifetime = 6.0
         r.timeout = 10.0
     self.queue = PriorityQueue()
     self.ip_dict = {}
     self.found_subs = set()
     self.cert_subs = set()
     self.timeout_subs = {}
     self.no_server_subs = {}
     self.count_time = time.time()
     self.outfile = open(
         '%s/%s_part_%s.txt' % (tmp_dir, self.domain, self.process_num),
         'w')
     self.normal_names_set = set()
     self.lock = asyncio.Lock()
     self.threads_status = ['1'] * self.options.threads
예제 #4
0
    def __init__(self,
                 check_url,
                 allowed_anonymity_levels=None,
                 qps_per_proxy=1,
                 max_consecutive_failures=5,
                 providers=PROVIDERS,
                 timeout=5):
        self._proxies = Queue()
        self._pending_providers = Queue()
        self._providers = providers

        self._verified_proxies = {}
        self._throttled_proxies = PriorityQueue()
        self._errors = {}

        self._check_url = check_url
        self._qps_per_proxy = qps_per_proxy
        self._max_consecutive_failures = max_consecutive_failures
        self._timeout = timeout

        self._ip = None
        self._ip_lock = Lock()

        if not allowed_anonymity_levels:
            self._allowed_anonymity_levels = ['Anonymous', 'Elite']
        else:
            self._allowed_anonymity_levels = allowed_anonymity_levels
예제 #5
0
class EventQueue:
    __queue = None

    def __init__(self):
        self.__locker = {}
        self.__queue = PriorityQueue()

    async def consume(self):
        while True:
            _, caller, data = await self.__queue.get()
            wait_for(caller.sendData(data), MAX_TIMEOUT)
            if caller in self.__locker:
                self.__locker[caller].release()
                if not isRealtime():
                    # Wait for the lock to be locked
                    while not self.__locker[caller].locked():
                        await sleep(0.001)
                        if caller not in self.__locker:
                            break

    async def put(self, caller, data):
        if caller not in self.__locker:
            self.__locker[caller] = Lock()
        await self.__locker[caller]
        self.__queue.put_nowait((data[0], caller, data))

    def remove(self, caller):
        self.__locker.pop(caller)

    @staticmethod
    def getInstance():
        if not EventQueue.__queue:
            EventQueue.__queue = EventQueue()
        return EventQueue.__queue
예제 #6
0
파일: peers.py 프로젝트: pamir-s/trinity
class WaitingPeers(Generic[TChainPeer]):
    """
    Peers waiting to perform some action. When getting a peer from this queue,
    prefer the peer with the best throughput for the given command.
    """
    _waiting_peers: 'PriorityQueue[SortableTask[TChainPeer]]'
    _response_command_type: Tuple[Type[CommandAPI[Any]], ...]

    def __init__(
        self,
        response_command_type: Union[Type[CommandAPI[Any]],
                                     Sequence[Type[CommandAPI[Any]]]],
        sort_key: Callable[[PerformanceAPI],
                           float] = _items_per_second) -> None:
        """
        :param sort_key: how should we sort the peers to get the fastest? low score means top-ranked
        """
        self._waiting_peers = PriorityQueue()

        if isinstance(response_command_type, type):
            self._response_command_type = (response_command_type, )
        elif isinstance(response_command_type, collections.Sequence):
            self._response_command_type = tuple(response_command_type)
        else:
            raise TypeError(f"Unsupported value: {response_command_type}")

        self._peer_wrapper = SortableTask.orderable_by_func(
            self._get_peer_rank)
        self._sort_key = sort_key

    def _get_peer_rank(self, peer: TChainPeer) -> float:
        scores = [
            self._sort_key(exchange.tracker)
            for exchange in peer.chain_api.exchanges if issubclass(
                exchange.get_response_cmd_type(), self._response_command_type)
        ]

        if len(scores) == 0:
            raise ValidationError(
                f"Could not find any exchanges on {peer} "
                f"with response {self._response_command_type!r}")

        # Typically there will only be one score, but we might want to match multiple commands.
        # To handle that case, we take the average of the scores:
        return sum(scores) / len(scores)

    def put_nowait(self, peer: TChainPeer) -> None:
        self._waiting_peers.put_nowait(self._peer_wrapper(peer))

    async def get_fastest(self) -> TChainPeer:
        wrapped_peer = await self._waiting_peers.get()
        peer = wrapped_peer.original

        # make sure the peer has not gone offline while waiting in the queue
        while not peer.manager.is_running:
            # if so, look for the next best peer
            wrapped_peer = await self._waiting_peers.get()
            peer = wrapped_peer.original

        return peer
예제 #7
0
 def __init__(self):
     """
     Initialize and run an asyncio event loop for ever.
     """
     self.loop = asyncio.get_event_loop()
     self.queue = PriorityQueue(loop=self.loop)
     self.loop.create_task(self.json_server(('', 25000)))
     self.loop.create_task(self.queue_dumper())
     self.loop.run_forever()
예제 #8
0
 def __init__(
         self,
         response_command_type: Type[Command],
         sort_key: Callable[[BasePerformance], float]=_items_per_second) -> None:
     """
     :param sort_key: how should we sort the peers to get the fastest? low score means top-ranked
     """
     self._waiting_peers = PriorityQueue()
     self._response_command_type = response_command_type
     self._peer_wrapper = SortableTask.orderable_by_func(self._get_peer_rank)
     self._sort_key = sort_key
예제 #9
0
 def __init__(self,
              maxsize: int = 0,
              order_fn: Callable[[TTask], Any] = identity,
              *,
              loop: AbstractEventLoop = None) -> None:
     self._maxsize = maxsize
     self._full_lock = Lock(loop=loop)
     self._open_queue = PriorityQueue(maxsize, loop=loop)
     self._order_fn = order_fn
     self._id_generator = count()
     self._tasks = set()
     self._in_progress = {}
예제 #10
0
def djikstra(grid, start, end):
    distance = defaultdict(lambda: float("inf"))
    distance[start] = 0
    frontier = PriorityQueue()
    frontier.put_nowait((0, start))
    visited = set([])
    retrace = {}

    while not frontier.empty():
        (d, p) = frontier.get_nowait()
        if p in visited:
            continue

        visited.add(p)

        if p == end:
            break

        for n in reachable(grid, p):
            if distance[n] > d + 1:
                distance[n] = d + 1
                retrace[n] = p
            frontier.put_nowait((distance[n], n))

    # Backtrack
    current = end
    forward = {}
    while current != start:
        forward[retrace[current]] = current
        current = retrace[current]

    return ({p: distance[p] for p in visited}, forward)
예제 #11
0
async def main():
    priority_queue = PriorityQueue()

    work_items = [
        WorkItem(3, 'Lowest priority'),
        WorkItem(2, 'Medium priority'),
        WorkItem(1, 'High priority')
    ]

    worker_task = asyncio.create_task(worker(priority_queue))

    for work in work_items:
        priority_queue.put_nowait(work)

    await asyncio.gather(priority_queue.join(), worker_task)
예제 #12
0
    def __init__(self, bot: IBot, name: str):
        super().__init__(name)
        self.bot = bot

        self.disconnected = False

        self.throttle = Throttler(rate_limit=100, period=1)

        self.sasl_state = SASLResult.NONE
        self.last_read = monotonic()

        self._sent_count: int = 0
        self._send_queue: PriorityQueue[SentLine] = PriorityQueue()
        self.desired_caps: Set[ICapability] = set([])

        self._read_queue: Deque[Line] = deque()
        self._process_queue: Deque[Tuple[Line, Optional[Emit]]] = deque()

        self._ping_sent = False
        self._read_lguard = RLock()
        self.read_lock = self._read_lguard
        self._read_lwork = asyncio.Lock()
        self._wait_for = asyncio.Event()

        self._pending_who: Deque[str] = deque()
        self._alt_nicks: List[str] = []
예제 #13
0
 async def queue_dumper(self):
     """
     Dumps status of queue to terminal (Eventually a file) every second.
     """
     # TODO : Ensure this also prints to a file.
     while True:
         if not self.queue.qsize():
             await asyncio.sleep(1)
         else:
             _copy = PriorityQueue()
             while not self.queue.empty():
                 await _copy.put(await self.queue.get())
             print(chr(27) + "[2J")  # Bit of Ctr + L magic trick
             while not _copy.empty():
                 element = await _copy.get()
                 print(element)
                 await self.queue.put(element)
         await asyncio.sleep(1)
예제 #14
0
    def __init__(
            self,
            response_command_type: Union[Type[CommandAPI[Any]], Sequence[Type[CommandAPI[Any]]]],
            sort_key: Callable[[PerformanceAPI], float] = _items_per_second) -> None:
        """
        :param sort_key: how should we sort the peers to get the fastest? low score means top-ranked
        """
        self._waiting_peers = PriorityQueue()

        if isinstance(response_command_type, type):
            self._response_command_type = (response_command_type,)
        elif isinstance(response_command_type, collections.abc.Sequence):
            self._response_command_type = tuple(response_command_type)
        else:
            raise TypeError(f"Unsupported value: {response_command_type}")

        self._peer_wrapper = SortableTask.orderable_by_func(self._get_peer_rank)
        self._sort_key = sort_key
예제 #15
0
파일: chain.py 프로젝트: nsabharwal/py-evm
class WaitingPeers:
    """
    Peers waiting to perform some action. When getting a peer from this queue,
    prefer the peer with the best throughput for the given command.
    """
    _waiting_peers: 'PriorityQueue[SortableTask[ETHPeer]]'

    def __init__(self, response_command_type: Type[Command]) -> None:
        self._waiting_peers = PriorityQueue()
        self._response_command_type = response_command_type
        self._peer_wrapper = SortableTask.orderable_by_func(self._ranked_peer)

    def _ranked_peer(self, peer: ETHPeer) -> float:
        relevant_throughputs = [
            exchange.tracker.items_per_second_ema.value
            for exchange in peer.requests
            if exchange.response_cmd_type == self._response_command_type
        ]

        if len(relevant_throughputs) == 0:
            raise ValidationError(
                f"Could not find any exchanges on {peer} "
                f"with response {self._response_command_type!r}"
            )

        avg_throughput = sum(relevant_throughputs) / len(relevant_throughputs)

        # high throughput peers should pop out of the queue first, so ranked as negative
        return -1 * avg_throughput

    def put_nowait(self, peer: ETHPeer) -> None:
        self._waiting_peers.put_nowait(self._peer_wrapper(peer))

    async def get_fastest(self) -> ETHPeer:
        wrapped_peer = await self._waiting_peers.get()
        peer = wrapped_peer.original

        # make sure the peer has not gone offline while waiting in the queue
        while not peer.is_operational:
            # if so, look for the next best peer
            wrapped_peer = await self._waiting_peers.get()
            peer = wrapped_peer.original

        return peer
예제 #16
0
    def __init__(
        self,
        records: int,
        handlers: dict[str, Optional[Callable]],
        retry: int,
        publisher: BrokerPublisher,
        consumer_concurrency: int = 15,
        **kwargs: Any,
    ):
        super().__init__(**kwargs)
        self._handlers = handlers
        self._records = records
        self._retry = retry

        self._queue = PriorityQueue(maxsize=self._records)
        self._consumers: list[Task] = list()
        self._consumer_concurrency = consumer_concurrency

        self._publisher = publisher
예제 #17
0
 def __init__(self, *params):
     self.domain, self.options, self.process_num, self.dns_servers, self.next_subs, \
         self.scan_count, self.found_count, self.queue_size_array, tmp_dir = params
     self.dns_count = len(self.dns_servers)
     self.scan_count_local = 0
     self.found_count_local = 0
     self.resolvers = [
         aiodns.DNSResolver(tries=1) for _ in range(self.options.threads)
     ]
     self.queue = PriorityQueue()
     self.ip_dict = {}
     self.found_subs = set()
     self.timeout_subs = {}
     self.count_time = time.time()
     self.outfile = open(
         '%s/%s_part_%s.txt' % (tmp_dir, self.domain, self.process_num),
         'w')
     self.normal_names_set = set()
     self.lock = asyncio.Lock()
     self.loop = None
     self.threads_status = ['1'] * self.options.threads
예제 #18
0
class EventQueue:
    __queue = None

    def __init__(self):
        self.__locker = {}
        self.__queue = PriorityQueue()

    async def consume(self):
        while True:
            _, caller, evt = await self.__queue.get()
            wait_for(caller.sendData(evt), MAX_TIMEOUT)
            if caller in self.__locker:
                self.__locker[caller].release()
                if not isRealtime():
                    # Wait for the lock to be locked
                    while not self.__locker[caller].locked():
                        await sleep(0.001)
                        if caller not in self.__locker:
                            break

    async def put(self, caller, evt):
        if not isinstance(evt, Event):
            LOG.critical("Expect class event.Event but got %s" %
                         (str(evt.__class__)))
            return

        if caller not in self.__locker:
            self.__locker[caller] = Lock()
        await self.__locker[caller]
        self.__queue.put_nowait((evt.date, caller, evt))

    def remove(self, caller):
        self.__locker.pop(caller)

    @staticmethod
    def getInstance():
        if not EventQueue.__queue:
            EventQueue.__queue = EventQueue()
        return EventQueue.__queue
예제 #19
0
class KthLargest:
    def __init__(self, k, nums):
        self.k = k
        self.pq = PriorityQueue()
        for i in nums:
            self.pq.put(i)

    def add(self, n):
        if self.pq.qsize() < self.k:
            self.pq.put(n)

        return self.pq.get_nowait()
예제 #20
0
    async def _dequeue_entry(self,
                             queue: asyncio.PriorityQueue,
                             timeout: int = 5):
        while not self.is_shutting_down:
            wrapper: DateTimePriorityWrapper = await asyncio.wait_for(
                queue.get(), timeout=timeout)
            if not wrapper.can_process_now():
                await queue.put(wrapper)
                wait_for = max(wrapper.scheduled_in_seconds() * .5, 1)
                log.debug(
                    "it's not yet time to use this request, waiting for %d",
                    wait_for)
                await asyncio.sleep(wait_for)
                continue

            return wrapper.item

        raise ShuttingDown()
class EventManager:
    queue = PriorityQueue()
    
    def __init_(self):
        pass

    def addevent(self, tick, function, data=None):
        if (data == None):
            self.queue.put_nowait(TupleSortingOn0((tick, function)))
        
        else:
            self.queue.put_nowait(TupleSortingOn0((tick, function, data)))
            
    def getevent(self):
        if self.queue.empty():
            return None

        return self.queue.get_nowait()
예제 #22
0
def reverse_djikstra(grid, end):
    distance = defaultdict(lambda: float("inf"))
    distance[end] = 0
    frontier = PriorityQueue()
    frontier.put_nowait((0, end))
    visited = set([])

    while not frontier.empty():
        (d, p) = frontier.get_nowait()

        if p in visited:
            continue
        else:
            visited.add(p)

        for n in reachable(grid, p):
            if distance[n] > d + 1:
                distance[n] = d + 1
            frontier.put_nowait((distance[n], n))
    return {p: distance[p] for p in visited}
예제 #23
0
class SubNameBrute(object):
    def __init__(self, *params):
        self.domain, self.options, self.process_num, self.dns_servers, self.next_subs, \
            self.scan_count, self.found_count, self.queue_size_array, tmp_dir = params
        self.dns_count = len(self.dns_servers)
        self.scan_count_local = 0
        self.found_count_local = 0
        self.resolvers = [
            aiodns.DNSResolver(tries=1) for _ in range(self.options.threads)
        ]
        self.queue = PriorityQueue()
        self.ip_dict = {}
        self.found_subs = set()
        self.timeout_subs = {}
        self.count_time = time.time()
        self.outfile = open(
            '%s/%s_part_%s.txt' % (tmp_dir, self.domain, self.process_num),
            'w')
        self.normal_names_set = set()
        self.lock = asyncio.Lock()
        self.loop = None
        self.threads_status = ['1'] * self.options.threads

    async def load_sub_names(self):
        normal_lines = []
        wildcard_lines = []
        wildcard_set = set()
        regex_list = []
        lines = set()
        with open(self.options.file) as inFile:
            for line in inFile.readlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)

                brace_count = sub.count('{')
                if brace_count > 0:
                    wildcard_lines.append((brace_count, sub))
                    sub = sub.replace('{alphnum}', '[a-z0-9]')
                    sub = sub.replace('{alpha}', '[a-z]')
                    sub = sub.replace('{num}', '[0-9]')
                    if sub not in wildcard_set:
                        wildcard_set.add(sub)
                        regex_list.append('^' + sub + '$')
                else:
                    normal_lines.append(sub)
                    self.normal_names_set.add(sub)

        if regex_list:
            pattern = '|'.join(regex_list)
            _regex = re.compile(pattern)
            for line in normal_lines:
                if _regex.search(line):
                    normal_lines.remove(line)

        for _ in normal_lines[self.process_num::self.options.process]:
            await self.queue.put((0, _))  # priority set to 0
        for _ in wildcard_lines[self.process_num::self.options.process]:
            await self.queue.put(_)

    async def scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]
        if self.dns_count > 1:
            while True:
                s = random.choice(self.resolvers)
                if s != self.dns_servers[j % self.dns_count]:
                    self.resolvers[j].nameservers.append(s)
                    break
        while True:
            try:
                if time.time() - self.count_time > 1.0:
                    async with self.lock:
                        self.scan_count.value += self.scan_count_local
                        self.scan_count_local = 0
                        self.queue_size_array[
                            self.process_num] = self.queue.qsize()
                        if self.found_count_local:
                            self.found_count.value += self.found_count_local
                            self.found_count_local = 0
                        self.count_time = time.time()

                try:
                    brace_count, sub = self.queue.get_nowait()
                    self.threads_status[j] = '1'
                except asyncio.queues.QueueEmpty as e:
                    self.threads_status[j] = '0'
                    await asyncio.sleep(0.5)
                    if '1' not in self.threads_status:
                        break
                    else:
                        continue

                if brace_count > 0:
                    brace_count -= 1
                    if sub.find('{next_sub}') >= 0:
                        for _ in self.next_subs:
                            await self.queue.put(
                                (0, sub.replace('{next_sub}', _)))
                    if sub.find('{alphnum}') >= 0:
                        for _ in 'abcdefghijklmnopqrstuvwxyz0123456789':
                            await self.queue.put(
                                (brace_count, sub.replace('{alphnum}', _, 1)))
                    elif sub.find('{alpha}') >= 0:
                        for _ in 'abcdefghijklmnopqrstuvwxyz':
                            await self.queue.put(
                                (brace_count, sub.replace('{alpha}', _, 1)))
                    elif sub.find('{num}') >= 0:
                        for _ in '0123456789':
                            await self.queue.put(
                                (brace_count, sub.replace('{num}', _, 1)))
                    continue
            except Exception as e:
                import traceback
                print(traceback.format_exc())
                break

            try:

                if sub in self.found_subs:
                    continue

                self.scan_count_local += 1
                cur_domain = sub + '.' + self.domain
                # print('Query %s' % cur_domain)
                answers = await self.resolvers[j].query(cur_domain, 'A')

                if answers:
                    self.found_subs.add(sub)
                    ips = ', '.join(sorted([answer.host
                                            for answer in answers]))
                    if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0', '0.0.0.1']:
                        continue
                    if self.options.i and is_intranet(answers[0].host):
                        continue

                    try:
                        self.scan_count_local += 1
                        answers = await self.resolvers[j].query(
                            cur_domain, 'CNAME')
                        cname = answers[0].target.to_unicode().rstrip('.')
                        if cname.endswith(
                                self.domain) and cname not in self.found_subs:
                            cname_sub = cname[:len(cname) - len(self.domain) -
                                              1]  # new sub
                            if cname_sub not in self.normal_names_set:
                                self.found_subs.add(cname)
                                await self.queue.put((0, cname_sub))
                    except Exception as e:
                        pass

                    first_level_sub = sub.split('.')[-1]
                    if (first_level_sub, ips) not in self.ip_dict:
                        self.ip_dict[(first_level_sub, ips)] = 1
                    else:
                        self.ip_dict[(first_level_sub, ips)] += 1
                        if self.ip_dict[(first_level_sub, ips)] > 30:
                            continue

                    self.found_count_local += 1

                    self.outfile.write(
                        cur_domain.ljust(30) + '\t' + ips + '\n')
                    self.outfile.flush()
                    try:
                        self.scan_count_local += 1
                        await self.resolvers[j].query(
                            'lijiejie-test-not-existed.' + cur_domain, 'A')
                    except aiodns.error.DNSError as e:
                        if e.args[0] in [4]:
                            if self.queue.qsize() < 50000:
                                for _ in self.next_subs:
                                    await self.queue.put((0, _ + '.' + sub))
                            else:
                                await self.queue.put((1, '{next_sub}.' + sub))
                    except Exception as e:
                        pass

            except aiodns.error.DNSError as e:
                if e.args[0] in [1, 4]:
                    pass
                elif e.args[0] in [
                        11, 12
                ]:  # 12 timeout   # (11, 'Could not contact DNS servers')
                    # print('timed out sub %s' % sub)
                    self.timeout_subs[sub] = self.timeout_subs.get(sub, 0) + 1
                    if self.timeout_subs[sub] <= 1:
                        await self.queue.put((0, sub))  # Retry
                else:
                    print(e)
            except asyncio.TimeoutError as e:
                pass
            except Exception as e:
                import traceback
                traceback.print_exc()
                with open('errors.log', 'a') as errFile:
                    errFile.write('[%s] %s\n' % (type(e), str(e)))

    async def async_run(self):
        await self.load_sub_names()
        tasks = [self.scan(i) for i in range(self.options.threads)]
        await asyncio.gather(*tasks)

    def run(self):
        self.loop = asyncio.get_event_loop()
        asyncio.set_event_loop(self.loop)
        self.loop.run_until_complete(self.async_run())
예제 #24
0
 def __init__(self, maxsize):
     PriorityQueue.__init__(self, maxsize=maxsize)
     self.counter = 0
예제 #25
0
 def __init__(self):
     self.__locker = {}
     self.__queue = PriorityQueue()
예제 #26
0
class TaskQueue(Generic[TTask]):
    """
    TaskQueue keeps priority-order track of pending tasks, with a limit on number pending.

    A producer of tasks will insert pending tasks with await add(), which will not return until
    all tasks have been added to the queue.

    A task consumer calls await get() to retrieve tasks for processing. Tasks will be returned in
    priority order. If no tasks are pending, get()
    will pause until at least one is available. Only one consumer will have a task "checked out"
    from get() at a time.

    After tasks are successfully completed, the consumer will call complete() to remove them from
    the queue. The consumer doesn't need to complete all tasks, but any uncompleted tasks will be
    considered abandoned. Another consumer can pick it up at the next get() call.
    """

    # a function that determines the priority order (lower int is higher priority)
    _order_fn: FunctionProperty[Callable[[TTask], Any]]

    # batches of tasks that have been started but not completed
    _in_progress: Dict[int, Tuple[TTask, ...]]

    # all tasks that have been placed in the queue and have not been started
    _open_queue: 'PriorityQueue[Tuple[Any, TTask]]'

    # all tasks that have been placed in the queue and have not been completed
    _tasks: Set[TTask]

    def __init__(self,
                 maxsize: int = 0,
                 order_fn: Callable[[TTask], Any] = identity,
                 *,
                 loop: AbstractEventLoop = None) -> None:
        self._maxsize = maxsize
        self._full_lock = Lock(loop=loop)
        self._open_queue = PriorityQueue(maxsize, loop=loop)
        self._order_fn = order_fn
        self._id_generator = count()
        self._tasks = set()
        self._in_progress = {}

    async def add(self, tasks: Tuple[TTask, ...]) -> None:
        """
        add() will insert as many tasks as can be inserted until the queue fills up.
        Then it will pause until the queue is no longer full, and continue adding tasks.
        It will finally return when all tasks have been inserted.
        """
        if not isinstance(tasks, tuple):
            raise ValidationError(
                f"must pass a tuple of tasks to add(), but got {tasks!r}")

        already_pending = self._tasks.intersection(tasks)
        if already_pending:
            raise ValidationError(
                f"Duplicate tasks detected: {already_pending!r} are already present in the queue"
            )

        # make sure to insert the highest-priority items first, in case queue fills up
        remaining = tuple(
            sorted((self._order_fn(task), task) for task in tasks))

        while remaining:
            num_tasks = len(self._tasks)

            if self._maxsize <= 0:
                # no cap at all, immediately insert all tasks
                open_slots = len(remaining)
            elif num_tasks < self._maxsize:
                # there is room to add at least one more task
                open_slots = self._maxsize - num_tasks
            else:
                # wait until there is room in the queue
                await self._full_lock.acquire()

                # the current number of tasks has changed, can't reuse num_tasks
                num_tasks = len(self._tasks)
                open_slots = self._maxsize - num_tasks

            queueing, remaining = remaining[:open_slots], remaining[
                open_slots:]

            for task in queueing:
                # There will always be room in _open_queue until _maxsize is reached
                try:
                    self._open_queue.put_nowait(task)
                except QueueFull as exc:
                    task_idx = queueing.index(task)
                    qsize = self._open_queue.qsize()
                    raise QueueFull(
                        f'TaskQueue unsuccessful in adding task {task[1]!r} because qsize={qsize}, '
                        f'num_tasks={num_tasks}, maxsize={self._maxsize}, open_slots={open_slots}, '
                        f'num queueing={len(queueing)}, len(_tasks)={len(self._tasks)}, task_idx='
                        f'{task_idx}, queuing={queueing}, original msg: {exc}',
                    )

            unranked_queued = tuple(task for _rank, task in queueing)
            self._tasks.update(unranked_queued)

            if self._full_lock.locked() and len(self._tasks) < self._maxsize:
                self._full_lock.release()

    def get_nowait(self,
                   max_results: int = None) -> Tuple[int, Tuple[TTask, ...]]:
        """
        Get pending tasks. If no tasks are pending, raise an exception.

        :param max_results: return up to this many pending tasks. If None, return all pending tasks.
        :return: (batch_id, tasks to attempt)
        :raise ~asyncio.QueueFull: if no tasks are available
        """
        if self._open_queue.empty():
            raise QueueFull("No tasks are available to get")
        else:
            pending_tasks = self._get_nowait(max_results)

            # Generate a pending batch of tasks, so uncompleted tasks can be inferred
            next_id = next(self._id_generator)
            self._in_progress[next_id] = pending_tasks

            return (next_id, pending_tasks)

    async def get(self,
                  max_results: int = None) -> Tuple[int, Tuple[TTask, ...]]:
        """
        Get pending tasks. If no tasks are pending, wait until a task is added.

        :param max_results: return up to this many pending tasks. If None, return all pending tasks.
        :return: (batch_id, tasks to attempt)
        """
        if max_results is not None and max_results < 1:
            raise ValidationError(
                "Must request at least one task to process, not {max_results!r}"
            )

        # if the queue is empty, wait until at least one item is available
        queue = self._open_queue
        if queue.empty():
            _rank, first_task = await queue.get()
        else:
            _rank, first_task = queue.get_nowait()

        # In order to return from get() as soon as possible, never await again.
        # Instead, take only the tasks that are already available.
        if max_results is None:
            remaining_count = None
        else:
            remaining_count = max_results - 1
        remaining_tasks = self._get_nowait(remaining_count)

        # Combine the first and remaining tasks
        all_tasks = (first_task, ) + remaining_tasks

        # Generate a pending batch of tasks, so uncompleted tasks can be inferred
        next_id = next(self._id_generator)
        self._in_progress[next_id] = all_tasks

        return (next_id, all_tasks)

    def _get_nowait(self, max_results: int = None) -> Tuple[TTask, ...]:
        queue = self._open_queue

        # How many results do we want?
        available = queue.qsize()
        if max_results is None:
            num_tasks = available
        else:
            num_tasks = min((available, max_results))

        # Combine the remaining tasks with the first task we already pulled.
        ranked_tasks = tuple(queue.get_nowait() for _ in range(num_tasks))

        # strip out the rank value used internally for sorting in the priority queue
        return tuple(task for _rank, task in ranked_tasks)

    def complete(self, batch_id: int, completed: Tuple[TTask, ...]) -> None:
        if batch_id not in self._in_progress:
            raise ValidationError(
                f"batch id {batch_id} not recognized, with tasks {completed!r}"
            )

        attempted = self._in_progress.pop(batch_id)

        unrecognized_tasks = set(completed).difference(attempted)
        if unrecognized_tasks:
            self._in_progress[batch_id] = attempted
            raise ValidationError(
                f"cannot complete tasks {unrecognized_tasks!r} in this batch, only {attempted!r}"
            )

        incomplete = set(attempted).difference(completed)

        for task in incomplete:
            # These tasks are already counted in the total task count, so there will be room
            self._open_queue.put_nowait((self._order_fn(task), task))

        self._tasks.difference_update(completed)

        if self._full_lock.locked() and len(self._tasks) < self._maxsize:
            self._full_lock.release()

    def __contains__(self, task: TTask) -> bool:
        """Determine if a task has been added and not yet completed"""
        return task in self._tasks
예제 #27
0
 def __init__(self, k, nums):
     self.k = k
     self.pq = PriorityQueue()
     for i in nums:
         self.pq.put(i)
예제 #28
0
파일: community.py 프로젝트: devos50/noodle
class PaymentCommunity(BamiCommunity, metaclass=ABCMeta):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # self.transfer_queue = Queue()
        # self.transfer_queue_task = ensure_future(self.evaluate_transfer_queue())

        # Add state db
        if not kwargs.get("settings"):
            self._settings = PaymentSettings()
        self.state_db = PaymentState(self._settings.asset_precision)

        self.context = self.state_db.context

        self.reachability_cache = defaultdict(lambda: cachetools.LRUCache(100))

        # Dictionary chain_id: block_dot -> block
        self.tracked_blocks = defaultdict(lambda: {})
        self.peer_conf = defaultdict(lambda: defaultdict(int))
        self.should_witness_subcom = {}

        self.counter_signing_block_queue = PriorityQueue()
        self.block_sign_queue_task = ensure_future(
            self.evaluate_counter_signing_blocks()
        )

        self.witness_delta = kwargs.get("witness_delta")
        if not self.witness_delta:
            self.witness_delta = self.settings.witness_block_delta

    @property
    def settings(self) -> PaymentSettings:
        return super().settings

    def join_subcommunity_gossip(self, sub_com_id: bytes) -> None:
        # 0. Add master peer to the known minter group
        self.state_db.add_known_minters(sub_com_id, {sub_com_id})

        # 1. Main payment chain: spends and their confirmations
        # - Start gossip sync task periodically on the chain updates
        self.start_gossip_sync(sub_com_id)
        # - Process incoming blocks on the chain in order for payments
        self.subscribe_in_order_block(sub_com_id, self.receive_block_in_order)

        # 2. Witness chain:
        # - Gossip witness updates on the sub-chain
        self.start_gossip_sync(sub_com_id, prefix=b"w")
        # - Process witness block out of order
        self.subscribe_out_order_block(b"w" + sub_com_id, self.process_witness_block)
        # - Witness all updates on payment chain
        self.should_witness_subcom[sub_com_id] = self.settings.should_witness_block

    def receive_block_in_order(self, block: BamiBlock) -> None:
        if block.com_dot in self.state_db.applied_dots:
            raise Exception(
                "Block already applied?",
                block.com_dot,
                self.state_db.vals_cache,
                self.state_db.peer_mints,
                self.state_db.applied_dots,
            )
        chain_id = block.com_id
        dot = block.com_dot
        self.state_db.applied_dots.add(dot)

        # Check reachability for target block -> update risk
        for blk_dot in self.tracked_blocks[chain_id]:
            if self.dot_reachable(chain_id, blk_dot, dot):
                self.update_risk(chain_id, block.public_key, blk_dot[0])

        # Process blocks according to their type
        self.logger.debug(
            "Processing block %s, %s, %s", block.type, chain_id, block.hash
        )
        if block.type == MINT_TYPE:
            self.process_mint(block)
        elif block.type == SPEND_TYPE:
            self.process_spend(block)
        elif block.type == CONFIRM_TYPE:
            self.process_confirm(block)
        elif block.type == REJECT_TYPE:
            self.process_reject(block)
        elif block.type == WITNESS_TYPE:
            raise Exception("Witness block received, while shouldn't")
        # Witness block react on new block:
        if (
            self.should_witness_subcom.get(chain_id)
            and block.type != WITNESS_TYPE
            and self.should_witness_chain_point(
                chain_id, self.my_pub_key_bin, block.com_seq_num
            )
        ):
            self.schedule_witness_block(chain_id, block.com_seq_num)

    def process_witness_block(self, blk: BamiBlock) -> None:
        """Process witness block out of order"""
        # No block is processed out of order in this community
        self.logger.debug(
            "Processing block %s, %s, %s", blk.type, blk.com_dot, blk.com_id
        )
        if blk.type != WITNESS_TYPE:
            raise Exception("Received not witness block on witness sub-chain!")
        self.process_witness(blk)

    def should_store_store_update(self, chain_id: bytes, seq_num: int) -> bool:
        """Store the status of the chain at the seq_num for further witnessing or verification"""
        # Should depend on if witnessing? - or something different
        return True
        # return self.should_witness_chain_point(chain_id, self.my_pub_key_bin, seq_num)

    def should_witness_chain_point(
        self, chain_id: bytes, peer_id: bytes, seq_num: int
    ) -> bool:
        """
        Returns:
            True if peer should witness this chain at seq_num
        """
        # Based on random coin tossing?
        seed = chain_id + peer_id + bytes(seq_num)
        ran = Random(seed)

        # Every peer should witness every K blocks?
        # TODO: that should depend on the number of peers in the community - change that
        # + account for the fault tolerance
        if ran.random() < 1 / self.witness_delta:
            return True
        return False

    # -------------- Mint transaction ----------------------

    def verify_mint(
        self, chain_id: bytes, minter: bytes, mint_transaction: Dict
    ) -> None:
        """
        Verify that mint transaction from minter is valid:
            - minter is known and acceptable
            - mint if properly formatted
            - mint value is withing the acceptable range
            - total value minted by the minter is limited
        Args:
            chain_id: chain identifier
            minter: id of the minter, e.g. public key
            mint_transaction: transaction as a dictionary
        Raises:
            InvalidMintException if not valid mint
        """
        # 1. Is minter known and acceptable?
        if not self.state_db.known_chain_minters(
            chain_id
        ) or minter not in self.state_db.known_chain_minters(chain_id):
            raise UnknownMinterException(
                "Got minting from unacceptable peer ", chain_id, minter
            )
        # 2. Mint if properly formatted
        if not mint_transaction.get(b"value"):
            raise InvalidTransactionFormatException(
                "Mint transaction badly formatted ", mint_transaction, chain_id, minter
            )
        # 3. Minting value within the range
        if not (
            Decimal(self.settings.mint_value_range[0], self.context)
            < mint_transaction[b"value"]
            < Decimal(self.settings.mint_value_range[1], self.context)
        ):
            raise InvalidMintRangeException(
                chain_id, minter, mint_transaction.get(b"value")
            )
        # 4. Total value is bounded
        if not (
            self.state_db.peer_mints[minter]
            + Decimal(mint_transaction.get(b"value"), self.context)
            < Decimal(self.settings.mint_max_value, self.context)
        ):
            raise UnboundedMintException(
                chain_id,
                minter,
                self.state_db.peer_mints[minter],
                mint_transaction.get(b"value"),
            )

    def mint(self, value: Decimal = None, chain_id: bytes = None) -> None:
        """
        Create mint for own reputation: Reputation & Liveness  at Stake
        """
        if not value:
            value = self.settings.initial_mint_value
        if not chain_id:
            # Community id is the same as the peer id
            chain_id = self.my_pub_key_bin
        # Mint transaction: value
        mint_tx = {b"value": float(value)}
        self.verify_mint(chain_id, self.my_pub_key_bin, mint_tx)
        block = self.create_signed_block(
            block_type=MINT_TYPE, transaction=encode_raw(mint_tx), com_id=chain_id
        )
        self.share_in_community(block, chain_id)

    def process_mint(self, mint_blk: BamiBlock) -> None:
        """Process received mint transaction"""
        minter = mint_blk.public_key
        mint_tx = decode_raw(mint_blk.transaction)
        chain_id = mint_blk.com_id
        mint_dot = mint_blk.com_dot
        prev_links = mint_blk.links
        self.verify_mint(chain_id, minter, mint_tx)

        seq_num = mint_dot[0]
        self.state_db.apply_mint(
            chain_id,
            mint_dot,
            prev_links,
            minter,
            Decimal(mint_tx.get(b"value"), self.context),
            self.should_store_store_update(chain_id, seq_num),
        )

    # ------ Spend transaction -----------
    def spend(
        self,
        chain_id: bytes,
        counter_party: bytes,
        value: Decimal,
        ignore_validation: bool = False,
    ) -> None:
        """
        Spend tokens in the chain to the counter_party.
        Args:
            chain_id: identity of the chain
            counter_party: identity of the counter-party
            value: Decimal value to transfer
            ignore_validation: if True and balance is negative - will raise an Exception
        """
        bal = self.state_db.get_balance(self.my_pub_key_bin)
        if ignore_validation or bal - value >= 0:
            spend_tx = {
                b"value": float(value),
                b"to_peer": counter_party,
                b"prev_pairwise_link": self.state_db.get_last_pairwise_links(
                    self.my_pub_key_bin, counter_party
                ),
            }
            self.verify_spend(self.my_pub_key_bin, spend_tx)
            block = self.create_signed_block(
                block_type=SPEND_TYPE, transaction=encode_raw(spend_tx), com_id=chain_id
            )
            self.logger.info("Created spend block %s", block.com_dot)
            counter_peer = self.get_peer_by_key(counter_party, chain_id)
            if counter_peer:
                self.send_block(block, [counter_peer])
            self.share_in_community(block, chain_id)
        else:
            raise InsufficientBalanceException("Not enough balance for spend")

    def verify_spend(self, spender: bytes, spend_transaction: Dict) -> None:
        """Verify the spend transaction:
            - spend formatted correctly
        Raises:
            InvalidTransactionFormat
        """
        # 1. Verify the spend format
        if (
            not spend_transaction.get(b"value")
            or not spend_transaction.get(b"to_peer")
            or not spend_transaction.get(b"prev_pairwise_link")
        ):
            raise InvalidTransactionFormatException(
                "Spend transaction badly formatted ", spender, spend_transaction
            )
        # 2. Verify the spend value in range
        if not (
            self.settings.spend_value_range[0]
            < spend_transaction.get(b"value")
            < self.settings.spend_value_range[1]
        ):
            raise InvalidSpendRangeException(
                "Spend value out of range", spender, spend_transaction.get(b"value")
            )

    def process_spend(self, spend_block: BamiBlock) -> None:
        # Store spend in the database
        spend_tx = decode_raw(spend_block.transaction)
        spender = spend_block.public_key
        self.verify_spend(spender, spend_tx)

        chain_id = spend_block.com_id
        spend_dot = spend_block.com_dot
        pers_links = spend_block.links

        prev_spend_links = spend_tx.get(b"prev_pairwise_link")
        value = Decimal(spend_tx.get(b"value"), self.context)
        to_peer = spend_tx.get(b"to_peer")
        seq_num = spend_dot[0]

        self.state_db.apply_spend(
            chain_id,
            prev_spend_links,
            pers_links,
            spend_dot,
            spender,
            to_peer,
            value,
            self.should_store_store_update(chain_id, seq_num),
        )

        # Is this block related to my peer?
        if to_peer == self.my_pub_key_bin:
            self.add_block_to_response_processing(spend_block)

    # ------------ Block Response processing ---------

    def add_block_to_response_processing(self, block: BamiBlock) -> None:
        self.tracked_blocks[block.com_id][block.com_dot] = block

        self.counter_signing_block_queue.put_nowait((block.com_seq_num, (0, block)))

    def process_counter_signing_block(
        self, block: BamiBlock, time_passed: float = None, num_block_passed: int = None,
    ) -> bool:
        """
        Process block that should be counter-signed and return True if the block should be delayed more.
        Args:
            block: Processed block
            time_passed: time passed since first added
            num_block_passed: number of blocks passed since first added
        Returns:
            Should add to queue again.
        """
        res = self.block_response(block, time_passed, num_block_passed)
        if res == BlockResponse.CONFIRM:
            self.confirm(
                block,
                extra_data={b"value": decode_raw(block.transaction).get(b"value")},
            )
            return False
        elif res == BlockResponse.REJECT:
            self.reject(block)
            return False
        return True

    async def evaluate_counter_signing_blocks(self, delta: float = None):
        while True:
            _delta = delta if delta else self.settings.block_sign_delta
            priority, block_info = await self.counter_signing_block_queue.get()
            process_time, block = block_info
            should_delay = self.process_counter_signing_block(block, process_time)
            self.logger.debug(
                "Processing counter signing block. Delayed: %s", should_delay
            )
            if should_delay:
                self.counter_signing_block_queue.put_nowait(
                    (priority, (process_time + _delta, block))
                )
                await sleep(_delta)
            else:
                self.tracked_blocks[block.com_id].pop(block.com_dot)
                await sleep(0.001)

    def block_response(
        self, block: BamiBlock, wait_time: float = None, wait_blocks: int = None
    ) -> BlockResponse:
        # Analyze the risk of accepting this block
        stat = self.state_db.get_closest_peers_status(block.com_id, block.com_seq_num)
        # If there is no information or chain is forked or
        peer_id = shorten(block.public_key)

        if not stat or not stat[1].get(peer_id):
            # Check that it is not infinite
            if (wait_time and wait_time > self.settings.max_wait_time) or (
                wait_blocks and wait_blocks > self.settings.max_wait_block
            ):
                return BlockResponse.REJECT
            return BlockResponse.DELAY
        if not stat[1][peer_id][1] or not stat[1][peer_id][0]:
            # If chain is forked or negative balance => reject
            return BlockResponse.REJECT

        # Verify the risk of missing some information:
        #  - There is diverse peers building upon the block

        # TODO: revisit that - number should depend on total number of peers in community.
        # 1. Diversity on the block building
        f = self.settings.diversity_confirm

        if len(self.peer_conf[(block.com_id, block.com_seq_num)]) >= f:
            return BlockResponse.CONFIRM
        else:
            return BlockResponse.DELAY

    def dot_reachable(self, chain_id: bytes, target_dot: Dot, block_dot: Dot):
        val = self.reachability_cache[(chain_id, target_dot)].get(block_dot)
        if val is not None:
            return val
        res = self.persistence.get_chain(chain_id).get_prev_links(block_dot)
        if target_dot in res:
            return True
        if max(res)[0] < target_dot[0]:
            return False
        else:
            # Need to take more step
            for prev_dot in res:
                new_val = self.dot_reachable(chain_id, target_dot, prev_dot)
                if new_val:
                    self.reachability_cache[(chain_id, target_dot)][block_dot] = True
                    return True
            self.reachability_cache[(chain_id, target_dot)][block_dot] = False
            return False

    def update_risk(self, chain_id: bytes, conf_peer_id: bytes, target_seq_num: int):
        print("Risk update: ", shorten(conf_peer_id), target_seq_num)
        self.peer_conf[(chain_id, target_seq_num)][conf_peer_id] += 1

    # ----------- Witness transactions --------------

    def schedule_witness_block(
        self, chain_id: bytes, seq_num: int, delay: float = None
    ):
        # Schedule witness transaction
        name_prefix = str(hex_to_int(chain_id + bytes(seq_num)))
        if self.is_pending_task_active(name_prefix):
            self.replace_task(
                name_prefix,
                self.witness,
                chain_id,
                seq_num,
                delay=self.settings.witness_delta_time,
            )
        else:
            self.register_task(
                name_prefix,
                self.witness,
                chain_id,
                seq_num,
                delay=self.settings.witness_delta_time,
            )

    def witness_tx_well_formatted(self, witness_tx: Any) -> bool:
        return len(witness_tx) == 2 and witness_tx[0] > 0 and len(witness_tx[1]) > 0

    def build_witness_blob(self, chain_id: bytes, seq_num: int) -> Optional[bytes]:
        chain_state = self.state_db.get_closest_peers_status(chain_id, seq_num)
        if not chain_state:
            return None
        return encode_raw(chain_state)

    def apply_witness_tx(
        self, block: BamiBlock, witness_tx: Tuple[int, ChainState]
    ) -> None:
        state = witness_tx[1]
        state_hash = take_hash(state)
        seq_num = witness_tx[0]

        if not self.should_witness_chain_point(block.com_id, block.public_key, seq_num):
            # This is invalid witnessing - react
            raise InvalidWitnessTransactionException(
                "Received invalid witness transaction",
                block.com_id,
                block.public_key,
                seq_num,
            )
        self.state_db.add_witness_vote(
            block.com_id, seq_num, state_hash, block.public_key
        )
        self.state_db.add_chain_state(block.com_id, seq_num, state_hash, state)

        chain_id = block.com_id
        if self.tracked_blocks.get(chain_id):
            for block_dot, tracked_block in self.tracked_blocks[chain_id].items():
                if (
                    block_dot[0] <= seq_num
                    and state.get(shorten(tracked_block.public_key))
                    and state.get(shorten(tracked_block.public_key)) == (True, True)
                ):
                    self.update_risk(chain_id, block.public_key, block_dot[0])

    # ------ Confirm and reject transactions -------

    def apply_confirm_tx(self, block: BamiBlock, confirm_tx: Dict) -> None:
        claim_dot = block.com_dot
        chain_id = block.com_id
        claimer = block.public_key
        com_links = block.links
        seq_num = claim_dot[0]
        self.state_db.apply_confirm(
            chain_id,
            claimer,
            com_links,
            claim_dot,
            confirm_tx[b"initiator"],
            confirm_tx[b"dot"],
            Decimal(confirm_tx[b"value"], self.context),
            self.should_store_store_update(chain_id, seq_num),
        )

    def apply_reject_tx(self, block: BamiBlock, reject_tx: Dict) -> None:
        self.state_db.apply_reject(
            block.com_id,
            block.public_key,
            block.links,
            block.com_dot,
            reject_tx[b"initiator"],
            reject_tx[b"dot"],
            self.should_store_store_update(block.com_id, block.com_seq_num),
        )

    async def unload(self):
        if not self.block_sign_queue_task.done():
            self.block_sign_queue_task.cancel()
        await super().unload()
예제 #29
0
 def __init__(self, response_command_type: Type[Command]) -> None:
     self._waiting_peers = PriorityQueue()
     self._response_command_type = response_command_type
     self._peer_wrapper = SortableTask.orderable_by_func(self._ranked_peer)
예제 #30
0
class BrokerHandler(BrokerHandlerSetup):
    """Broker Handler class."""

    __slots__ = "_handlers", "_records", "_retry", "_queue", "_consumers", "_consumer_concurrency"

    def __init__(
        self,
        records: int,
        handlers: dict[str, Optional[Callable]],
        retry: int,
        publisher: BrokerPublisher,
        consumer_concurrency: int = 15,
        **kwargs: Any,
    ):
        super().__init__(**kwargs)
        self._handlers = handlers
        self._records = records
        self._retry = retry

        self._queue = PriorityQueue(maxsize=self._records)
        self._consumers: list[Task] = list()
        self._consumer_concurrency = consumer_concurrency

        self._publisher = publisher

    @classmethod
    def _from_config(cls, config: MinosConfig, **kwargs) -> BrokerHandler:
        kwargs["handlers"] = cls._get_handlers(config, **kwargs)
        kwargs["publisher"] = cls._get_publisher(**kwargs)
        # noinspection PyProtectedMember
        return cls(**config.broker.queue._asdict(), **kwargs)

    @staticmethod
    def _get_handlers(
        config: MinosConfig, handlers: dict[str, Optional[Callable]] = None, **kwargs
    ) -> dict[str, Callable[[BrokerRequest], Awaitable[Optional[BrokerResponse]]]]:
        if handlers is None:
            builder = EnrouteBuilder(*config.services, middleware=config.middleware)
            decorators = builder.get_broker_command_query_event(config=config, **kwargs)
            handlers = {decorator.topic: fn for decorator, fn in decorators.items()}
        return handlers

    # noinspection PyUnusedLocal
    @staticmethod
    @inject
    def _get_publisher(
        publisher: Optional[BrokerPublisher] = None,
        broker_publisher: BrokerPublisher = Provide["broker_publisher"],
        **kwargs,
    ) -> BrokerPublisher:
        if publisher is None:
            publisher = broker_publisher
        if publisher is None or isinstance(publisher, Provide):
            raise NotProvidedException(f"A {BrokerPublisher!r} object must be provided.")
        return publisher

    async def _setup(self) -> None:
        await super()._setup()
        await self._create_consumers()

    async def _destroy(self) -> None:
        await self._destroy_consumers()
        await super()._destroy()

    async def _create_consumers(self):
        while len(self._consumers) < self._consumer_concurrency:
            self._consumers.append(create_task(self._consume()))

    async def _destroy_consumers(self):
        for consumer in self._consumers:
            consumer.cancel()
        await gather(*self._consumers, return_exceptions=True)
        self._consumers = list()

        while not self._queue.empty():
            entry = self._queue.get_nowait()
            await self.submit_query(self._queries["update_not_processed"], (entry.id,))

    async def _consume(self) -> None:
        while True:
            await self._consume_one()

    async def _consume_one(self) -> None:
        entry = await self._queue.get()
        try:
            await self._dispatch_one(entry)
        finally:
            self._queue.task_done()

    @property
    def publisher(self) -> BrokerPublisher:
        """Get the publisher instance.

        :return: A ``BrokerPublisher`` instance.
        """
        return self._publisher

    @property
    def consumers(self) -> list[Task]:
        """Get the consumers.

        :return: A list of ``Task`` instances.
        """
        return self._consumers

    @property
    def handlers(self) -> dict[str, Optional[Callable]]:
        """Handlers getter.

        :return: A dictionary in which the keys are topics and the values are the handler.
        """
        return self._handlers

    @property
    def topics(self) -> KeysView[str]:
        """Get an iterable containing the topic names.

        :return: An ``Iterable`` of ``str``.
        """
        return self.handlers.keys()

    async def dispatch_forever(self, max_wait: Optional[float] = 60.0) -> NoReturn:
        """Dispatch the items in the consuming queue forever.

        :param max_wait: Maximum seconds to wait for notifications. If ``None`` the wait is performed until infinity.
        :return: This method does not return anything.
        """
        async with self.cursor() as cursor:
            await self._listen_entries(cursor)
            try:
                while True:
                    await self._wait_for_entries(cursor, max_wait)
                    await self.dispatch(cursor, background_mode=True)
            finally:
                await self._unlisten_entries(cursor)

    async def _listen_entries(self, cursor: Cursor):
        for topic in self.topics:
            # noinspection PyTypeChecker
            await cursor.execute(_LISTEN_QUERY.format(Identifier(topic)))

    async def _unlisten_entries(self, cursor: Cursor) -> None:
        for topic in self.topics:
            # noinspection PyTypeChecker
            await cursor.execute(_UNLISTEN_QUERY.format(Identifier(topic)))

    async def _wait_for_entries(self, cursor: Cursor, max_wait: Optional[float]) -> None:
        if await self._get_count(cursor):
            return

        while True:
            try:
                return await wait_for(consume_queue(cursor.connection.notifies, self._records), max_wait)
            except TimeoutError:
                if await self._get_count(cursor):
                    return

    async def _get_count(self, cursor) -> int:
        if not len(self.topics):
            return 0
        await cursor.execute(_COUNT_NOT_PROCESSED_QUERY, (self._retry, tuple(self.topics)))
        count = (await cursor.fetchone())[0]
        return count

    async def dispatch(self, cursor: Optional[Cursor] = None, background_mode: bool = False) -> None:
        """Dispatch a batch of ``HandlerEntry`` instances from the database's queue.

        :param cursor: The cursor to interact with the database. If ``None`` is provided a new one is acquired.
        :param background_mode: If ``True`` the entries dispatching waits until every entry is processed. Otherwise,
            the dispatching is performed on background.
        :return: This method does not return anything.
        """

        is_external_cursor = cursor is not None
        if not is_external_cursor:
            cursor = await self.cursor().__aenter__()

        async with cursor.begin():
            await cursor.execute(
                self._queries["select_not_processed"], (self._retry, tuple(self.topics), self._records)
            )
            result = await cursor.fetchall()

            if len(result):
                entries = self._build_entries(result)

                await cursor.execute(self._queries["mark_processing"], (tuple(e.id for e in entries),))

                for entry in entries:
                    await self._queue.put(entry)

        if not is_external_cursor:
            await cursor.__aexit__(None, None, None)

        if not background_mode:
            await self._queue.join()

    def _build_entries(self, rows: list[tuple]) -> list[BrokerHandlerEntry]:
        kwargs = {"callback_lookup": self.get_action}
        return [BrokerHandlerEntry(*row, **kwargs) for row in rows]

    async def _dispatch_one(self, entry: BrokerHandlerEntry) -> None:
        logger.debug(f"Dispatching '{entry!r}'...")
        try:
            await self.dispatch_one(entry)
        except (CancelledError, Exception) as exc:
            logger.warning(f"Raised an exception while dispatching {entry!r}: {exc!r}")
            entry.exception = exc
            if isinstance(exc, CancelledError):
                raise exc
        finally:
            query_id = "delete_processed" if entry.success else "update_not_processed"
            await self.submit_query(self._queries[query_id], (entry.id,))

    async def dispatch_one(self, entry: BrokerHandlerEntry) -> None:
        """Dispatch one row.

        :param entry: Entry to be dispatched.
        :return: This method does not return anything.
        """
        logger.info(f"Dispatching '{entry!s}'...")

        fn = self.get_callback(entry.callback)
        message = entry.data
        data, status, headers = await fn(message)

        if message.reply_topic is not None:
            await self.publisher.send(
                data,
                topic=message.reply_topic,
                identifier=message.identifier,
                status=status,
                user=message.user,
                headers=headers,
            )

    @staticmethod
    def get_callback(
        fn: Callable[[BrokerRequest], Union[Optional[BrokerRequest], Awaitable[Optional[BrokerRequest]]]]
    ) -> Callable[[BrokerMessage], Awaitable[tuple[Any, BrokerMessageStatus, dict[str, str]]]]:
        """Get the handler function to be used by the Broker Handler.

        :param fn: The action function.
        :return: A wrapper function around the given one that is compatible with the Broker Handler API.
        """

        @wraps(fn)
        async def _wrapper(raw: BrokerMessage) -> tuple[Any, BrokerMessageStatus, dict[str, str]]:
            request = BrokerRequest(raw)
            user_token = REQUEST_USER_CONTEXT_VAR.set(request.user)
            headers_token = REQUEST_HEADERS_CONTEXT_VAR.set(raw.headers)

            try:
                response = fn(request)
                if isawaitable(response):
                    response = await response
                if isinstance(response, Response):
                    response = await response.content()
                return response, BrokerMessageStatus.SUCCESS, REQUEST_HEADERS_CONTEXT_VAR.get()
            except ResponseException as exc:
                logger.warning(f"Raised an application exception: {exc!s}")
                return repr(exc), BrokerMessageStatus.ERROR, REQUEST_HEADERS_CONTEXT_VAR.get()
            except Exception as exc:
                logger.exception(f"Raised a system exception: {exc!r}")
                return repr(exc), BrokerMessageStatus.SYSTEM_ERROR, REQUEST_HEADERS_CONTEXT_VAR.get()
            finally:
                REQUEST_USER_CONTEXT_VAR.reset(user_token)
                REQUEST_HEADERS_CONTEXT_VAR.reset(headers_token)

        return _wrapper

    def get_action(self, topic: str) -> Optional[Callable]:
        """Get handling function to be called.

        Gets the instance of the class and method to call.

        Args:
            topic: Kafka topic. Example: "TicketAdded"

        Raises:
            MinosNetworkException: topic TicketAdded have no controller/action configured, please review th
                configuration file.
        """
        if topic not in self._handlers:
            raise MinosActionNotFoundException(
                f"topic {topic} have no controller/action configured, " f"please review th configuration file"
            )

        handler = self._handlers[topic]

        logger.debug(f"Loaded {handler!r} action!")
        return handler

    @cached_property
    def _queries(self) -> dict[str, str]:
        # noinspection PyTypeChecker
        return {
            "count_not_processed": _COUNT_NOT_PROCESSED_QUERY,
            "select_not_processed": _SELECT_NOT_PROCESSED_QUERY,
            "mark_processing": _MARK_PROCESSING_QUERY,
            "delete_processed": _DELETE_PROCESSED_QUERY,
            "update_not_processed": _UPDATE_NOT_PROCESSED_QUERY,
        }