コード例 #1
0
ファイル: eventqueue.py プロジェクト: wynfred/presso
class EventQueue:
    __queue = None

    def __init__(self):
        self.__locker = {}
        self.__queue = PriorityQueue()

    async def consume(self):
        while True:
            _, caller, data = await self.__queue.get()
            wait_for(caller.sendData(data), MAX_TIMEOUT)
            if caller in self.__locker:
                self.__locker[caller].release()
                if not isRealtime():
                    # Wait for the lock to be locked
                    while not self.__locker[caller].locked():
                        await sleep(0.001)
                        if caller not in self.__locker:
                            break

    async def put(self, caller, data):
        if caller not in self.__locker:
            self.__locker[caller] = Lock()
        await self.__locker[caller]
        self.__queue.put_nowait((data[0], caller, data))

    def remove(self, caller):
        self.__locker.pop(caller)

    @staticmethod
    def getInstance():
        if not EventQueue.__queue:
            EventQueue.__queue = EventQueue()
        return EventQueue.__queue
コード例 #2
0
ファイル: peers.py プロジェクト: pamir-s/trinity
class WaitingPeers(Generic[TChainPeer]):
    """
    Peers waiting to perform some action. When getting a peer from this queue,
    prefer the peer with the best throughput for the given command.
    """
    _waiting_peers: 'PriorityQueue[SortableTask[TChainPeer]]'
    _response_command_type: Tuple[Type[CommandAPI[Any]], ...]

    def __init__(
        self,
        response_command_type: Union[Type[CommandAPI[Any]],
                                     Sequence[Type[CommandAPI[Any]]]],
        sort_key: Callable[[PerformanceAPI],
                           float] = _items_per_second) -> None:
        """
        :param sort_key: how should we sort the peers to get the fastest? low score means top-ranked
        """
        self._waiting_peers = PriorityQueue()

        if isinstance(response_command_type, type):
            self._response_command_type = (response_command_type, )
        elif isinstance(response_command_type, collections.Sequence):
            self._response_command_type = tuple(response_command_type)
        else:
            raise TypeError(f"Unsupported value: {response_command_type}")

        self._peer_wrapper = SortableTask.orderable_by_func(
            self._get_peer_rank)
        self._sort_key = sort_key

    def _get_peer_rank(self, peer: TChainPeer) -> float:
        scores = [
            self._sort_key(exchange.tracker)
            for exchange in peer.chain_api.exchanges if issubclass(
                exchange.get_response_cmd_type(), self._response_command_type)
        ]

        if len(scores) == 0:
            raise ValidationError(
                f"Could not find any exchanges on {peer} "
                f"with response {self._response_command_type!r}")

        # Typically there will only be one score, but we might want to match multiple commands.
        # To handle that case, we take the average of the scores:
        return sum(scores) / len(scores)

    def put_nowait(self, peer: TChainPeer) -> None:
        self._waiting_peers.put_nowait(self._peer_wrapper(peer))

    async def get_fastest(self) -> TChainPeer:
        wrapped_peer = await self._waiting_peers.get()
        peer = wrapped_peer.original

        # make sure the peer has not gone offline while waiting in the queue
        while not peer.manager.is_running:
            # if so, look for the next best peer
            wrapped_peer = await self._waiting_peers.get()
            peer = wrapped_peer.original

        return peer
コード例 #3
0
def djikstra(grid, start, end):
    distance = defaultdict(lambda: float("inf"))
    distance[start] = 0
    frontier = PriorityQueue()
    frontier.put_nowait((0, start))
    visited = set([])
    retrace = {}

    while not frontier.empty():
        (d, p) = frontier.get_nowait()
        if p in visited:
            continue

        visited.add(p)

        if p == end:
            break

        for n in reachable(grid, p):
            if distance[n] > d + 1:
                distance[n] = d + 1
                retrace[n] = p
            frontier.put_nowait((distance[n], n))

    # Backtrack
    current = end
    forward = {}
    while current != start:
        forward[retrace[current]] = current
        current = retrace[current]

    return ({p: distance[p] for p in visited}, forward)
コード例 #4
0
async def main():
    priority_queue = PriorityQueue()

    work_items = [
        WorkItem(3, 'Lowest priority'),
        WorkItem(2, 'Medium priority'),
        WorkItem(1, 'High priority')
    ]

    worker_task = asyncio.create_task(worker(priority_queue))

    for work in work_items:
        priority_queue.put_nowait(work)

    await asyncio.gather(priority_queue.join(), worker_task)
コード例 #5
0
ファイル: chain.py プロジェクト: nsabharwal/py-evm
class WaitingPeers:
    """
    Peers waiting to perform some action. When getting a peer from this queue,
    prefer the peer with the best throughput for the given command.
    """
    _waiting_peers: 'PriorityQueue[SortableTask[ETHPeer]]'

    def __init__(self, response_command_type: Type[Command]) -> None:
        self._waiting_peers = PriorityQueue()
        self._response_command_type = response_command_type
        self._peer_wrapper = SortableTask.orderable_by_func(self._ranked_peer)

    def _ranked_peer(self, peer: ETHPeer) -> float:
        relevant_throughputs = [
            exchange.tracker.items_per_second_ema.value
            for exchange in peer.requests
            if exchange.response_cmd_type == self._response_command_type
        ]

        if len(relevant_throughputs) == 0:
            raise ValidationError(
                f"Could not find any exchanges on {peer} "
                f"with response {self._response_command_type!r}"
            )

        avg_throughput = sum(relevant_throughputs) / len(relevant_throughputs)

        # high throughput peers should pop out of the queue first, so ranked as negative
        return -1 * avg_throughput

    def put_nowait(self, peer: ETHPeer) -> None:
        self._waiting_peers.put_nowait(self._peer_wrapper(peer))

    async def get_fastest(self) -> ETHPeer:
        wrapped_peer = await self._waiting_peers.get()
        peer = wrapped_peer.original

        # make sure the peer has not gone offline while waiting in the queue
        while not peer.is_operational:
            # if so, look for the next best peer
            wrapped_peer = await self._waiting_peers.get()
            peer = wrapped_peer.original

        return peer
コード例 #6
0
def reverse_djikstra(grid, end):
    distance = defaultdict(lambda: float("inf"))
    distance[end] = 0
    frontier = PriorityQueue()
    frontier.put_nowait((0, end))
    visited = set([])

    while not frontier.empty():
        (d, p) = frontier.get_nowait()

        if p in visited:
            continue
        else:
            visited.add(p)

        for n in reachable(grid, p):
            if distance[n] > d + 1:
                distance[n] = d + 1
            frontier.put_nowait((distance[n], n))
    return {p: distance[p] for p in visited}
コード例 #7
0
ファイル: eventqueue.py プロジェクト: ynzheng/presso
class EventQueue:
    __queue = None

    def __init__(self):
        self.__locker = {}
        self.__queue = PriorityQueue()

    async def consume(self):
        while True:
            _, caller, evt = await self.__queue.get()
            wait_for(caller.sendData(evt), MAX_TIMEOUT)
            if caller in self.__locker:
                self.__locker[caller].release()
                if not isRealtime():
                    # Wait for the lock to be locked
                    while not self.__locker[caller].locked():
                        await sleep(0.001)
                        if caller not in self.__locker:
                            break

    async def put(self, caller, evt):
        if not isinstance(evt, Event):
            LOG.critical("Expect class event.Event but got %s" %
                         (str(evt.__class__)))
            return

        if caller not in self.__locker:
            self.__locker[caller] = Lock()
        await self.__locker[caller]
        self.__queue.put_nowait((evt.date, caller, evt))

    def remove(self, caller):
        self.__locker.pop(caller)

    @staticmethod
    def getInstance():
        if not EventQueue.__queue:
            EventQueue.__queue = EventQueue()
        return EventQueue.__queue
コード例 #8
0
ファイル: datastructures.py プロジェクト: solversa/py-evm
class TaskQueue(Generic[TTask]):
    """
    TaskQueue keeps priority-order track of pending tasks, with a limit on number pending.

    A producer of tasks will insert pending tasks with await add(), which will not return until
    all tasks have been added to the queue.

    A task consumer calls await get() to retrieve tasks for processing. Tasks will be returned in
    priority order. If no tasks are pending, get()
    will pause until at least one is available. Only one consumer will have a task "checked out"
    from get() at a time.

    After tasks are successfully completed, the consumer will call complete() to remove them from
    the queue. The consumer doesn't need to complete all tasks, but any uncompleted tasks will be
    considered abandoned. Another consumer can pick it up at the next get() call.
    """

    # a function that determines the priority order (lower int is higher priority)
    _order_fn: FunctionProperty[Callable[[TTask], Any]]

    # batches of tasks that have been started but not completed
    _in_progress: Dict[int, Tuple[TTask, ...]]

    # all tasks that have been placed in the queue and have not been started
    _open_queue: 'PriorityQueue[Tuple[Any, TTask]]'

    # all tasks that have been placed in the queue and have not been completed
    _tasks: Set[TTask]

    def __init__(self,
                 maxsize: int = 0,
                 order_fn: Callable[[TTask], Any] = identity,
                 *,
                 loop: AbstractEventLoop = None) -> None:
        self._maxsize = maxsize
        self._full_lock = Lock(loop=loop)
        self._open_queue = PriorityQueue(maxsize, loop=loop)
        self._order_fn = order_fn
        self._id_generator = count()
        self._tasks = set()
        self._in_progress = {}

    async def add(self, tasks: Tuple[TTask, ...]) -> None:
        """
        add() will insert as many tasks as can be inserted until the queue fills up.
        Then it will pause until the queue is no longer full, and continue adding tasks.
        It will finally return when all tasks have been inserted.
        """
        if not isinstance(tasks, tuple):
            raise ValidationError(
                f"must pass a tuple of tasks to add(), but got {tasks!r}")

        already_pending = self._tasks.intersection(tasks)
        if already_pending:
            raise ValidationError(
                f"Duplicate tasks detected: {already_pending!r} are already present in the queue"
            )

        # make sure to insert the highest-priority items first, in case queue fills up
        remaining = tuple(
            sorted((self._order_fn(task), task) for task in tasks))

        while remaining:
            num_tasks = len(self._tasks)

            if self._maxsize <= 0:
                # no cap at all, immediately insert all tasks
                open_slots = len(remaining)
            elif num_tasks < self._maxsize:
                # there is room to add at least one more task
                open_slots = self._maxsize - num_tasks
            else:
                # wait until there is room in the queue
                await self._full_lock.acquire()

                # the current number of tasks has changed, can't reuse num_tasks
                num_tasks = len(self._tasks)
                open_slots = self._maxsize - num_tasks

            queueing, remaining = remaining[:open_slots], remaining[
                open_slots:]

            for task in queueing:
                # There will always be room in _open_queue until _maxsize is reached
                try:
                    self._open_queue.put_nowait(task)
                except QueueFull as exc:
                    task_idx = queueing.index(task)
                    qsize = self._open_queue.qsize()
                    raise QueueFull(
                        f'TaskQueue unsuccessful in adding task {task[1]!r} because qsize={qsize}, '
                        f'num_tasks={num_tasks}, maxsize={self._maxsize}, open_slots={open_slots}, '
                        f'num queueing={len(queueing)}, len(_tasks)={len(self._tasks)}, task_idx='
                        f'{task_idx}, queuing={queueing}, original msg: {exc}',
                    )

            unranked_queued = tuple(task for _rank, task in queueing)
            self._tasks.update(unranked_queued)

            if self._full_lock.locked() and len(self._tasks) < self._maxsize:
                self._full_lock.release()

    def get_nowait(self,
                   max_results: int = None) -> Tuple[int, Tuple[TTask, ...]]:
        """
        Get pending tasks. If no tasks are pending, raise an exception.

        :param max_results: return up to this many pending tasks. If None, return all pending tasks.
        :return: (batch_id, tasks to attempt)
        :raise ~asyncio.QueueFull: if no tasks are available
        """
        if self._open_queue.empty():
            raise QueueFull("No tasks are available to get")
        else:
            pending_tasks = self._get_nowait(max_results)

            # Generate a pending batch of tasks, so uncompleted tasks can be inferred
            next_id = next(self._id_generator)
            self._in_progress[next_id] = pending_tasks

            return (next_id, pending_tasks)

    async def get(self,
                  max_results: int = None) -> Tuple[int, Tuple[TTask, ...]]:
        """
        Get pending tasks. If no tasks are pending, wait until a task is added.

        :param max_results: return up to this many pending tasks. If None, return all pending tasks.
        :return: (batch_id, tasks to attempt)
        """
        if max_results is not None and max_results < 1:
            raise ValidationError(
                "Must request at least one task to process, not {max_results!r}"
            )

        # if the queue is empty, wait until at least one item is available
        queue = self._open_queue
        if queue.empty():
            _rank, first_task = await queue.get()
        else:
            _rank, first_task = queue.get_nowait()

        # In order to return from get() as soon as possible, never await again.
        # Instead, take only the tasks that are already available.
        if max_results is None:
            remaining_count = None
        else:
            remaining_count = max_results - 1
        remaining_tasks = self._get_nowait(remaining_count)

        # Combine the first and remaining tasks
        all_tasks = (first_task, ) + remaining_tasks

        # Generate a pending batch of tasks, so uncompleted tasks can be inferred
        next_id = next(self._id_generator)
        self._in_progress[next_id] = all_tasks

        return (next_id, all_tasks)

    def _get_nowait(self, max_results: int = None) -> Tuple[TTask, ...]:
        queue = self._open_queue

        # How many results do we want?
        available = queue.qsize()
        if max_results is None:
            num_tasks = available
        else:
            num_tasks = min((available, max_results))

        # Combine the remaining tasks with the first task we already pulled.
        ranked_tasks = tuple(queue.get_nowait() for _ in range(num_tasks))

        # strip out the rank value used internally for sorting in the priority queue
        return tuple(task for _rank, task in ranked_tasks)

    def complete(self, batch_id: int, completed: Tuple[TTask, ...]) -> None:
        if batch_id not in self._in_progress:
            raise ValidationError(
                f"batch id {batch_id} not recognized, with tasks {completed!r}"
            )

        attempted = self._in_progress.pop(batch_id)

        unrecognized_tasks = set(completed).difference(attempted)
        if unrecognized_tasks:
            self._in_progress[batch_id] = attempted
            raise ValidationError(
                f"cannot complete tasks {unrecognized_tasks!r} in this batch, only {attempted!r}"
            )

        incomplete = set(attempted).difference(completed)

        for task in incomplete:
            # These tasks are already counted in the total task count, so there will be room
            self._open_queue.put_nowait((self._order_fn(task), task))

        self._tasks.difference_update(completed)

        if self._full_lock.locked() and len(self._tasks) < self._maxsize:
            self._full_lock.release()

    def __contains__(self, task: TTask) -> bool:
        """Determine if a task has been added and not yet completed"""
        return task in self._tasks
コード例 #9
0
ファイル: community.py プロジェクト: devos50/noodle
class PaymentCommunity(BamiCommunity, metaclass=ABCMeta):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # self.transfer_queue = Queue()
        # self.transfer_queue_task = ensure_future(self.evaluate_transfer_queue())

        # Add state db
        if not kwargs.get("settings"):
            self._settings = PaymentSettings()
        self.state_db = PaymentState(self._settings.asset_precision)

        self.context = self.state_db.context

        self.reachability_cache = defaultdict(lambda: cachetools.LRUCache(100))

        # Dictionary chain_id: block_dot -> block
        self.tracked_blocks = defaultdict(lambda: {})
        self.peer_conf = defaultdict(lambda: defaultdict(int))
        self.should_witness_subcom = {}

        self.counter_signing_block_queue = PriorityQueue()
        self.block_sign_queue_task = ensure_future(
            self.evaluate_counter_signing_blocks()
        )

        self.witness_delta = kwargs.get("witness_delta")
        if not self.witness_delta:
            self.witness_delta = self.settings.witness_block_delta

    @property
    def settings(self) -> PaymentSettings:
        return super().settings

    def join_subcommunity_gossip(self, sub_com_id: bytes) -> None:
        # 0. Add master peer to the known minter group
        self.state_db.add_known_minters(sub_com_id, {sub_com_id})

        # 1. Main payment chain: spends and their confirmations
        # - Start gossip sync task periodically on the chain updates
        self.start_gossip_sync(sub_com_id)
        # - Process incoming blocks on the chain in order for payments
        self.subscribe_in_order_block(sub_com_id, self.receive_block_in_order)

        # 2. Witness chain:
        # - Gossip witness updates on the sub-chain
        self.start_gossip_sync(sub_com_id, prefix=b"w")
        # - Process witness block out of order
        self.subscribe_out_order_block(b"w" + sub_com_id, self.process_witness_block)
        # - Witness all updates on payment chain
        self.should_witness_subcom[sub_com_id] = self.settings.should_witness_block

    def receive_block_in_order(self, block: BamiBlock) -> None:
        if block.com_dot in self.state_db.applied_dots:
            raise Exception(
                "Block already applied?",
                block.com_dot,
                self.state_db.vals_cache,
                self.state_db.peer_mints,
                self.state_db.applied_dots,
            )
        chain_id = block.com_id
        dot = block.com_dot
        self.state_db.applied_dots.add(dot)

        # Check reachability for target block -> update risk
        for blk_dot in self.tracked_blocks[chain_id]:
            if self.dot_reachable(chain_id, blk_dot, dot):
                self.update_risk(chain_id, block.public_key, blk_dot[0])

        # Process blocks according to their type
        self.logger.debug(
            "Processing block %s, %s, %s", block.type, chain_id, block.hash
        )
        if block.type == MINT_TYPE:
            self.process_mint(block)
        elif block.type == SPEND_TYPE:
            self.process_spend(block)
        elif block.type == CONFIRM_TYPE:
            self.process_confirm(block)
        elif block.type == REJECT_TYPE:
            self.process_reject(block)
        elif block.type == WITNESS_TYPE:
            raise Exception("Witness block received, while shouldn't")
        # Witness block react on new block:
        if (
            self.should_witness_subcom.get(chain_id)
            and block.type != WITNESS_TYPE
            and self.should_witness_chain_point(
                chain_id, self.my_pub_key_bin, block.com_seq_num
            )
        ):
            self.schedule_witness_block(chain_id, block.com_seq_num)

    def process_witness_block(self, blk: BamiBlock) -> None:
        """Process witness block out of order"""
        # No block is processed out of order in this community
        self.logger.debug(
            "Processing block %s, %s, %s", blk.type, blk.com_dot, blk.com_id
        )
        if blk.type != WITNESS_TYPE:
            raise Exception("Received not witness block on witness sub-chain!")
        self.process_witness(blk)

    def should_store_store_update(self, chain_id: bytes, seq_num: int) -> bool:
        """Store the status of the chain at the seq_num for further witnessing or verification"""
        # Should depend on if witnessing? - or something different
        return True
        # return self.should_witness_chain_point(chain_id, self.my_pub_key_bin, seq_num)

    def should_witness_chain_point(
        self, chain_id: bytes, peer_id: bytes, seq_num: int
    ) -> bool:
        """
        Returns:
            True if peer should witness this chain at seq_num
        """
        # Based on random coin tossing?
        seed = chain_id + peer_id + bytes(seq_num)
        ran = Random(seed)

        # Every peer should witness every K blocks?
        # TODO: that should depend on the number of peers in the community - change that
        # + account for the fault tolerance
        if ran.random() < 1 / self.witness_delta:
            return True
        return False

    # -------------- Mint transaction ----------------------

    def verify_mint(
        self, chain_id: bytes, minter: bytes, mint_transaction: Dict
    ) -> None:
        """
        Verify that mint transaction from minter is valid:
            - minter is known and acceptable
            - mint if properly formatted
            - mint value is withing the acceptable range
            - total value minted by the minter is limited
        Args:
            chain_id: chain identifier
            minter: id of the minter, e.g. public key
            mint_transaction: transaction as a dictionary
        Raises:
            InvalidMintException if not valid mint
        """
        # 1. Is minter known and acceptable?
        if not self.state_db.known_chain_minters(
            chain_id
        ) or minter not in self.state_db.known_chain_minters(chain_id):
            raise UnknownMinterException(
                "Got minting from unacceptable peer ", chain_id, minter
            )
        # 2. Mint if properly formatted
        if not mint_transaction.get(b"value"):
            raise InvalidTransactionFormatException(
                "Mint transaction badly formatted ", mint_transaction, chain_id, minter
            )
        # 3. Minting value within the range
        if not (
            Decimal(self.settings.mint_value_range[0], self.context)
            < mint_transaction[b"value"]
            < Decimal(self.settings.mint_value_range[1], self.context)
        ):
            raise InvalidMintRangeException(
                chain_id, minter, mint_transaction.get(b"value")
            )
        # 4. Total value is bounded
        if not (
            self.state_db.peer_mints[minter]
            + Decimal(mint_transaction.get(b"value"), self.context)
            < Decimal(self.settings.mint_max_value, self.context)
        ):
            raise UnboundedMintException(
                chain_id,
                minter,
                self.state_db.peer_mints[minter],
                mint_transaction.get(b"value"),
            )

    def mint(self, value: Decimal = None, chain_id: bytes = None) -> None:
        """
        Create mint for own reputation: Reputation & Liveness  at Stake
        """
        if not value:
            value = self.settings.initial_mint_value
        if not chain_id:
            # Community id is the same as the peer id
            chain_id = self.my_pub_key_bin
        # Mint transaction: value
        mint_tx = {b"value": float(value)}
        self.verify_mint(chain_id, self.my_pub_key_bin, mint_tx)
        block = self.create_signed_block(
            block_type=MINT_TYPE, transaction=encode_raw(mint_tx), com_id=chain_id
        )
        self.share_in_community(block, chain_id)

    def process_mint(self, mint_blk: BamiBlock) -> None:
        """Process received mint transaction"""
        minter = mint_blk.public_key
        mint_tx = decode_raw(mint_blk.transaction)
        chain_id = mint_blk.com_id
        mint_dot = mint_blk.com_dot
        prev_links = mint_blk.links
        self.verify_mint(chain_id, minter, mint_tx)

        seq_num = mint_dot[0]
        self.state_db.apply_mint(
            chain_id,
            mint_dot,
            prev_links,
            minter,
            Decimal(mint_tx.get(b"value"), self.context),
            self.should_store_store_update(chain_id, seq_num),
        )

    # ------ Spend transaction -----------
    def spend(
        self,
        chain_id: bytes,
        counter_party: bytes,
        value: Decimal,
        ignore_validation: bool = False,
    ) -> None:
        """
        Spend tokens in the chain to the counter_party.
        Args:
            chain_id: identity of the chain
            counter_party: identity of the counter-party
            value: Decimal value to transfer
            ignore_validation: if True and balance is negative - will raise an Exception
        """
        bal = self.state_db.get_balance(self.my_pub_key_bin)
        if ignore_validation or bal - value >= 0:
            spend_tx = {
                b"value": float(value),
                b"to_peer": counter_party,
                b"prev_pairwise_link": self.state_db.get_last_pairwise_links(
                    self.my_pub_key_bin, counter_party
                ),
            }
            self.verify_spend(self.my_pub_key_bin, spend_tx)
            block = self.create_signed_block(
                block_type=SPEND_TYPE, transaction=encode_raw(spend_tx), com_id=chain_id
            )
            self.logger.info("Created spend block %s", block.com_dot)
            counter_peer = self.get_peer_by_key(counter_party, chain_id)
            if counter_peer:
                self.send_block(block, [counter_peer])
            self.share_in_community(block, chain_id)
        else:
            raise InsufficientBalanceException("Not enough balance for spend")

    def verify_spend(self, spender: bytes, spend_transaction: Dict) -> None:
        """Verify the spend transaction:
            - spend formatted correctly
        Raises:
            InvalidTransactionFormat
        """
        # 1. Verify the spend format
        if (
            not spend_transaction.get(b"value")
            or not spend_transaction.get(b"to_peer")
            or not spend_transaction.get(b"prev_pairwise_link")
        ):
            raise InvalidTransactionFormatException(
                "Spend transaction badly formatted ", spender, spend_transaction
            )
        # 2. Verify the spend value in range
        if not (
            self.settings.spend_value_range[0]
            < spend_transaction.get(b"value")
            < self.settings.spend_value_range[1]
        ):
            raise InvalidSpendRangeException(
                "Spend value out of range", spender, spend_transaction.get(b"value")
            )

    def process_spend(self, spend_block: BamiBlock) -> None:
        # Store spend in the database
        spend_tx = decode_raw(spend_block.transaction)
        spender = spend_block.public_key
        self.verify_spend(spender, spend_tx)

        chain_id = spend_block.com_id
        spend_dot = spend_block.com_dot
        pers_links = spend_block.links

        prev_spend_links = spend_tx.get(b"prev_pairwise_link")
        value = Decimal(spend_tx.get(b"value"), self.context)
        to_peer = spend_tx.get(b"to_peer")
        seq_num = spend_dot[0]

        self.state_db.apply_spend(
            chain_id,
            prev_spend_links,
            pers_links,
            spend_dot,
            spender,
            to_peer,
            value,
            self.should_store_store_update(chain_id, seq_num),
        )

        # Is this block related to my peer?
        if to_peer == self.my_pub_key_bin:
            self.add_block_to_response_processing(spend_block)

    # ------------ Block Response processing ---------

    def add_block_to_response_processing(self, block: BamiBlock) -> None:
        self.tracked_blocks[block.com_id][block.com_dot] = block

        self.counter_signing_block_queue.put_nowait((block.com_seq_num, (0, block)))

    def process_counter_signing_block(
        self, block: BamiBlock, time_passed: float = None, num_block_passed: int = None,
    ) -> bool:
        """
        Process block that should be counter-signed and return True if the block should be delayed more.
        Args:
            block: Processed block
            time_passed: time passed since first added
            num_block_passed: number of blocks passed since first added
        Returns:
            Should add to queue again.
        """
        res = self.block_response(block, time_passed, num_block_passed)
        if res == BlockResponse.CONFIRM:
            self.confirm(
                block,
                extra_data={b"value": decode_raw(block.transaction).get(b"value")},
            )
            return False
        elif res == BlockResponse.REJECT:
            self.reject(block)
            return False
        return True

    async def evaluate_counter_signing_blocks(self, delta: float = None):
        while True:
            _delta = delta if delta else self.settings.block_sign_delta
            priority, block_info = await self.counter_signing_block_queue.get()
            process_time, block = block_info
            should_delay = self.process_counter_signing_block(block, process_time)
            self.logger.debug(
                "Processing counter signing block. Delayed: %s", should_delay
            )
            if should_delay:
                self.counter_signing_block_queue.put_nowait(
                    (priority, (process_time + _delta, block))
                )
                await sleep(_delta)
            else:
                self.tracked_blocks[block.com_id].pop(block.com_dot)
                await sleep(0.001)

    def block_response(
        self, block: BamiBlock, wait_time: float = None, wait_blocks: int = None
    ) -> BlockResponse:
        # Analyze the risk of accepting this block
        stat = self.state_db.get_closest_peers_status(block.com_id, block.com_seq_num)
        # If there is no information or chain is forked or
        peer_id = shorten(block.public_key)

        if not stat or not stat[1].get(peer_id):
            # Check that it is not infinite
            if (wait_time and wait_time > self.settings.max_wait_time) or (
                wait_blocks and wait_blocks > self.settings.max_wait_block
            ):
                return BlockResponse.REJECT
            return BlockResponse.DELAY
        if not stat[1][peer_id][1] or not stat[1][peer_id][0]:
            # If chain is forked or negative balance => reject
            return BlockResponse.REJECT

        # Verify the risk of missing some information:
        #  - There is diverse peers building upon the block

        # TODO: revisit that - number should depend on total number of peers in community.
        # 1. Diversity on the block building
        f = self.settings.diversity_confirm

        if len(self.peer_conf[(block.com_id, block.com_seq_num)]) >= f:
            return BlockResponse.CONFIRM
        else:
            return BlockResponse.DELAY

    def dot_reachable(self, chain_id: bytes, target_dot: Dot, block_dot: Dot):
        val = self.reachability_cache[(chain_id, target_dot)].get(block_dot)
        if val is not None:
            return val
        res = self.persistence.get_chain(chain_id).get_prev_links(block_dot)
        if target_dot in res:
            return True
        if max(res)[0] < target_dot[0]:
            return False
        else:
            # Need to take more step
            for prev_dot in res:
                new_val = self.dot_reachable(chain_id, target_dot, prev_dot)
                if new_val:
                    self.reachability_cache[(chain_id, target_dot)][block_dot] = True
                    return True
            self.reachability_cache[(chain_id, target_dot)][block_dot] = False
            return False

    def update_risk(self, chain_id: bytes, conf_peer_id: bytes, target_seq_num: int):
        print("Risk update: ", shorten(conf_peer_id), target_seq_num)
        self.peer_conf[(chain_id, target_seq_num)][conf_peer_id] += 1

    # ----------- Witness transactions --------------

    def schedule_witness_block(
        self, chain_id: bytes, seq_num: int, delay: float = None
    ):
        # Schedule witness transaction
        name_prefix = str(hex_to_int(chain_id + bytes(seq_num)))
        if self.is_pending_task_active(name_prefix):
            self.replace_task(
                name_prefix,
                self.witness,
                chain_id,
                seq_num,
                delay=self.settings.witness_delta_time,
            )
        else:
            self.register_task(
                name_prefix,
                self.witness,
                chain_id,
                seq_num,
                delay=self.settings.witness_delta_time,
            )

    def witness_tx_well_formatted(self, witness_tx: Any) -> bool:
        return len(witness_tx) == 2 and witness_tx[0] > 0 and len(witness_tx[1]) > 0

    def build_witness_blob(self, chain_id: bytes, seq_num: int) -> Optional[bytes]:
        chain_state = self.state_db.get_closest_peers_status(chain_id, seq_num)
        if not chain_state:
            return None
        return encode_raw(chain_state)

    def apply_witness_tx(
        self, block: BamiBlock, witness_tx: Tuple[int, ChainState]
    ) -> None:
        state = witness_tx[1]
        state_hash = take_hash(state)
        seq_num = witness_tx[0]

        if not self.should_witness_chain_point(block.com_id, block.public_key, seq_num):
            # This is invalid witnessing - react
            raise InvalidWitnessTransactionException(
                "Received invalid witness transaction",
                block.com_id,
                block.public_key,
                seq_num,
            )
        self.state_db.add_witness_vote(
            block.com_id, seq_num, state_hash, block.public_key
        )
        self.state_db.add_chain_state(block.com_id, seq_num, state_hash, state)

        chain_id = block.com_id
        if self.tracked_blocks.get(chain_id):
            for block_dot, tracked_block in self.tracked_blocks[chain_id].items():
                if (
                    block_dot[0] <= seq_num
                    and state.get(shorten(tracked_block.public_key))
                    and state.get(shorten(tracked_block.public_key)) == (True, True)
                ):
                    self.update_risk(chain_id, block.public_key, block_dot[0])

    # ------ Confirm and reject transactions -------

    def apply_confirm_tx(self, block: BamiBlock, confirm_tx: Dict) -> None:
        claim_dot = block.com_dot
        chain_id = block.com_id
        claimer = block.public_key
        com_links = block.links
        seq_num = claim_dot[0]
        self.state_db.apply_confirm(
            chain_id,
            claimer,
            com_links,
            claim_dot,
            confirm_tx[b"initiator"],
            confirm_tx[b"dot"],
            Decimal(confirm_tx[b"value"], self.context),
            self.should_store_store_update(chain_id, seq_num),
        )

    def apply_reject_tx(self, block: BamiBlock, reject_tx: Dict) -> None:
        self.state_db.apply_reject(
            block.com_id,
            block.public_key,
            block.links,
            block.com_dot,
            reject_tx[b"initiator"],
            reject_tx[b"dot"],
            self.should_store_store_update(block.com_id, block.com_seq_num),
        )

    async def unload(self):
        if not self.block_sign_queue_task.done():
            self.block_sign_queue_task.cancel()
        await super().unload()
コード例 #10
0
ファイル: urlsmanager.py プロジェクト: szzend/zspider
class UrlsManager:
    """
    内部记录每个url的运行信息
    并保存最终运行结果
    """
    class _counter:
        """
        内部计数的自定义类,
        维护一个namedtuple[Counter]
        """
        def __new__(cls, fields):
            cls.fields = fields._fields_defaults
            return super().__new__(cls)

        def __init__(self, fields: Counter):
            # _dict用于实际保存并计数
            f = fields._fields
            self._dict = dict(zip(f, [0 for i in range(len(f))]))

        def __setattr__(self, name, value):
            """所有的赋值操作都会调用"""
            # 阻止对_dict的直接赋值
            if (name == '_dict' and hasattr(self, '_dict')
                    and isinstance(getattr(self, '_dict'), dict)):
                raise ValueError(f' Forbidden to modify attribute:[{name}]')
            if name == '_dict':  # 本实现将阻止除了更新计数之外的其它设值及增加属性,模拟了namedtuple抛出异常
                super().__setattr__(name, value)
            elif name in self._dict:
                self._dict[name] = value
            else:
                raise ValueError(f' Got unexpected field names:[{name}]')

        def __getattribute__(self, name):
            """
            __getattribute__在任何属性查找操作中都会调用(包含特殊属性),所以注意以下要super调用
            否则会陷入无限递归调用.
            __getattr__方法则是在本身及其类上查找不到才会调用
            """
            # 注意特殊属性及类自身属性.实际应用时应注意
            if name in super().__getattribute__('_dict'):
                return super().__getattribute__('_dict')[name]
            else:
                return super().__getattribute__(name)

        def __delattr__(self, name):
            """拦截了所有删除操作"""
            raise ValueError(f' Forbidden to delete attribute:[{name}]')

        def add(self, n: Counter):
            """
            使用数值累加计数器
            """
            for key in n._fields:
                v = getattr(n, key)
                if v:
                    self._dict[key] += v

        def update(self, n: Counter):
            v = None
            for key in n._fields:
                v = getattr(n, key)
                if v:
                    self._dict[key] = v

        @property
        def values(self):
            return Counter(**self._dict)

    def __init__(self, max_tried_times: int = 3):
        """
        参数:max_tried_times 为任务失败前最大尝试次数,需为大于0的整数
        """
        self.max_tried_times = max_tried_times
        # 以下Queue必须在主事件循环event loop确定后再建立,
        self.__queue_todo = None  # 保存待完成的任务
        # 其构造函数为Queue(maxsize=0, *, loop=None),不指定loop时
        # 内部获取的event loop可能与主event loop不同,则join方法会产生runtime error
        self.__total_key_urls = defaultdict(
            int)  # 用于过滤url并保存处理次数,键为url,值为已处理次数
        self.__done_urls = set()  # 用于保存完成记录
        self.__failed_urls = set()  # 用于保存每次失败记录
        self.__working_urls = set()  # 用于保存正在处理的url
        self.__discarded_urls = set()  # 保存丢弃的url及丢弃次数
        self.__counter = self._counter(Counter)  # 维护内部处理计数器

    async def prepare(self) -> bool:
        """需要控制时间的初始化或资源准备"""
        self.__queue_todo = PriorityQueue()
        return True

    async def put(self, url: str or URL) -> Counter:
        """
        向UlrsManager发送1条url
        参数:url为字符串
        返回:Counter 指示此次添加的url处理方式
        此方法在内部对url有两种处理方式:put进任务队列或discard
        """
        url = URL(url)
        put = discarded = 0  # 传递给计数器_counter
        times = self.__total_key_urls[url]
        _todo = {url: rank
                 for rank, url in self.__queue_todo._queue}  # 将优先队列中数据转换成字典形式
        # 如果达到尝试次数或者已在任务中则丢弃它
        if (times >= self.max_tried_times or url in itertools.chain(
                _todo, self.__working_urls, self.__done_urls)):
            self.__discarded_urls.add(url)
            discarded = 1
        else:
            self.__queue_todo.put_nowait((times, url))
            put = 1
        c = Counter(count=1, put=put, discarded=discarded)
        self.__counter.add(c)  # 更新计数器
        return c

    async def put_urls(self, urls: Iterable) -> Counter:
        """
        向UlrsManager发送多条url
        参数:urls:为列表,元组或集合
        返回:urls的处理情况计数
        """
        c = self._counter(Counter)
        for url in urls:
            c.add(await self.put(url))
        return c

    async def task_done(self, url: str or URL, is_OK=True) -> Counter:
        """
        通知UrlsManager本url的任务处理已经结束
        参数:is_OK 标识处理是否正常完成
        此方法在内部对url做完成或失败处理,并从working池中移除
        """
        self.__working_urls.remove(url)
        self.__total_key_urls[url] = self.__total_key_urls[url] + 1
        if is_OK:
            self.__done_urls.add(url)
            c = Counter(done=1)
            self.__counter.add(c)
            self.__queue_todo.task_done()
            return c
        else:
            times = self.__total_key_urls[url]
            if times >= self.max_tried_times:
                self.__failed_urls.add(url)
                c = Counter(failed=1)
                self.__counter.add(c)
            else:
                c = await self.put(url)
            self.__queue_todo.task_done()
            return c

    async def get(self) -> str:
        """从UrlsManager中获取1个url"""
        urlItem = await self.__queue_todo.get()  # 优先队列中存储的为tuple
        self.__working_urls.add(urlItem[1])
        return urlItem[1]

    async def get_urls(self, qty: int) -> tuple:
        """取得多个url"""

    async def join(self) -> bool:
        """阻塞执行线程直到UrlsManager中的url全部被取出并处理完 """
        await self.__queue_todo.join()
        return True

    async def get_todo(self):
        """得到待处理的url元组"""
        _todo = self.__queue_todo._queue
        _todo = (url for v, url in _todo)
        return tuple(_todo)

    async def get_results(self):
        #count put discarded done failed todo working
        _todo = self.__queue_todo._queue
        _todo = (url for v, url in _todo)
        _todo_urls = set(_todo)
        results = namedtuple(
            'results',
            'key_urls discarded_urls done_urls failed_urls todo_urls working_urls'
        )
        results = results(self.__total_key_urls, self.__discarded_urls,
                          self.__done_urls, self.__failed_urls, _todo_urls,
                          self.__working_urls)
        return results

    async def get_count(self) -> Counter:
        todo = len(self.__queue_todo._queue)
        working = len(self.__working_urls)
        ikeys = len(self.__total_key_urls)
        isum = sum(self.__total_key_urls.values())
        imin = min(self.__total_key_urls.values())
        imax = max(self.__total_key_urls.values())
        c = Counter(todo=todo,
                    working=working,
                    keys=ikeys,
                    sum=isum,
                    min=imin,
                    max=imax)
        self.__counter.update(c)
        return self.__counter.values
コード例 #11
0
ファイル: queues.py プロジェクト: hyq-python/hoopa
class MemoryQueue(BaseQueue):
    def __init__(self, ):
        # 下载队列,一个优先级队列
        self.waiting = None
        # 进行中的队列,key为Request,value取出的时间戳
        self.pending = {}
        # 失败次数记录,key为Request,value失败次数
        self.failure = {}
        self.module = None
        self.setting = None

    async def init(self, setting):
        """
        初始化
        """
        self.setting = setting
        self.waiting = PriorityQueue()
        self.module = importlib.import_module(setting.SERIALIZATION)

    async def clean_scheduler(self, waiting=True, pending=True, failure=True, data=True):
        """
        清空队列
        """
        pass

    async def get(self, priority):
        """
        从队列中获取一个request
        """
        if not self.waiting.empty():
            result = await self.waiting.get()
            self.pending[result[1]] = get_timestamp()
            return Request.unserialize(result[1], self.module)
        return None

    async def add(self, requests: typing.Union[Request, typing.List[Request]]):
        """
        向队列添加多个request
        @param requests:
        """
        if isinstance(requests, Request):
            requests = [requests]

        count = 0
        # 判断是否在pending中,如果在,是否过了最大时间
        for request in requests:
            str_request = request.serialize(self.module)
            pended_time = self.pending.get(str_request, 0)
            if time.time() - pended_time < self.setting["PENDING_THRESHOLD"]:
                continue

            count += 1
            self.waiting.put_nowait((-request.priority, str_request))
            if pended_time:
                self.pending.pop(str_request)
        return count

    async def set_result(self, request: Request, response: Response, task_request: Request):
        """
        保存结果
        @param request:
        @param response:
        @param task_request:
        """
        # 如果失败,且失败次数未达到,返回waiting
        str_request = request.serialize(self.module)

        # 如果在进行队列中,删除
        if str_request in self.pending:
            self.pending.pop(str_request)

        # 如果成功
        if response.ok == 1:
            return True

        if response.ok == -1:
            self.failure[str_request] = response.status
            return False

        if str_request in self.failure:
            self.failure[str_request] += 1
            await self.add(request)
        else:
            self.failure[str_request] = 1
            await self.add(request)

    async def check_status(self, spider_ins, run_forever=False):
        if len(self.pending) == 0 and self.waiting.empty():
            spider_ins.run = False
コード例 #12
0
class AsyncProxyBroker:
    def __init__(self,
                 check_url,
                 allowed_anonymity_levels=None,
                 qps_per_proxy=1,
                 max_consecutive_failures=5,
                 providers=PROVIDERS,
                 timeout=5):
        self._proxies = Queue()
        self._pending_providers = Queue()
        self._providers = providers

        self._verified_proxies = {}
        self._throttled_proxies = PriorityQueue()
        self._errors = {}

        self._check_url = check_url
        self._qps_per_proxy = qps_per_proxy
        self._max_consecutive_failures = max_consecutive_failures
        self._timeout = timeout

        self._ip = None
        self._ip_lock = Lock()

        if not allowed_anonymity_levels:
            self._allowed_anonymity_levels = ['Anonymous', 'Elite']
        else:
            self._allowed_anonymity_levels = allowed_anonymity_levels

    async def _get_real_ip(self):
        while not self._ip:
            async with self._ip_lock:
                if self._ip:
                    return self._ip

                try:
                    async with aiohttp.request(
                            url=random.choice(IP_HOSTS),
                            method='GET',
                            timeout=aiohttp.ClientTimeout(
                                total=self._timeout)) as response:
                        contents = await response.text()
                        ips = get_all_ip(contents)

                        if len(ips) == 1:
                            self._ip = ips.pop()
                            return self._ip
                except (UnicodeDecodeError, asyncio.TimeoutError,
                        aiohttp.ClientOSError, aiohttp.ClientResponseError,
                        aiohttp.ServerDisconnectedError):
                    pass

        return self._ip

    async def _get_anonymity_level(self, proxy_address):
        judge = random.choice(JUDGES)
        ip = await self._get_real_ip()

        try:
            async with aiohttp.request(url=judge,
                                       method='GET',
                                       proxy=proxy_address,
                                       timeout=aiohttp.ClientTimeout(
                                           total=self._timeout)) as response:
                contents = (await response.text()).lower()
                contained_ips = get_all_ip(contents)

                if ip in contained_ips:
                    return 'Transparent'
                elif 'via' in contents or 'proxy' in contents:
                    return 'Anonymous'
                else:
                    return 'Elite'
        except (UnicodeDecodeError, asyncio.TimeoutError,
                aiohttp.ClientOSError, aiohttp.ClientResponseError,
                aiohttp.ServerDisconnectedError):
            return 'None'

    def _populate_providers(self):
        for provider in self._providers:
            self._pending_providers.put_nowait(provider)

    async def _can_connect_to_test_url(self, proxy_address):
        try:
            async with aiohttp.request(url=self._check_url,
                                       method='GET',
                                       proxy=proxy_address,
                                       timeout=aiohttp.ClientTimeout(
                                           total=self._timeout)) as response:
                await response.text()
                return True
        except (UnicodeDecodeError, asyncio.TimeoutError,
                aiohttp.ClientOSError, aiohttp.ClientResponseError,
                aiohttp.ServerDisconnectedError):
            return False

    async def _populate_proxies(self):
        if self._pending_providers.empty():
            self._populate_providers()

        provider = self._pending_providers.get_nowait()
        proxies = await provider.get_proxies()

        for proxy in proxies:
            self._proxies.put_nowait(proxy)

        self._pending_providers.task_done()

    async def _try_verify_one_proxy(self):
        if self._proxies.empty():
            await self._populate_proxies()
            return

        (host, port, types) = self._proxies.get_nowait()
        proxy_address = 'http://%s:%s' % (host, port)

        if await self._get_anonymity_level(proxy_address) in self._allowed_anonymity_levels and \
                await self._can_connect_to_test_url(proxy_address):
            self._verified_proxies[proxy_address] = deque()
            self._errors[proxy_address] = 0

        self._proxies.task_done()

    @staticmethod
    def _flush_history(history):
        executions_removed = 0
        earliest_time = time.monotonic()

        while len(history) > 0:
            earliest_time = history.popleft()
            if time.monotonic() - earliest_time < 1:
                history.appendleft(earliest_time)
                break
            executions_removed += 1

        return executions_removed, earliest_time

    def _flush_throttled_proxies(self):
        while not self._throttled_proxies.empty():
            (_, proxy_url, history) = self._throttled_proxies.get_nowait()
            executions_removed, earliest_time = self._flush_history(history)

            if executions_removed == 0:
                self._throttled_proxies.put_nowait(
                    (earliest_time, proxy_url, history))
                self._throttled_proxies.task_done()
                return

            self._verified_proxies[proxy_url] = history
            self._throttled_proxies.task_done()

    def mark_successful(self, proxy_url):
        if proxy_url not in self._errors:
            return

        self._errors[proxy_url] = max(0, self._errors[proxy_url] - 1)

    def mark_failure(self, proxy_url):
        if proxy_url not in self._errors:
            return

        self._errors[proxy_url] += 1

    async def random_proxy(self):
        while True:
            self._flush_throttled_proxies()

            if not self._verified_proxies:
                await self._try_verify_one_proxy()

            while self._verified_proxies:
                proxy_url = random.choice(list(self._verified_proxies.keys()))

                if self._errors[proxy_url] >= self._max_consecutive_failures:
                    del self._errors[proxy_url]
                    del self._verified_proxies[proxy_url]
                    continue

                history = self._verified_proxies[proxy_url]

                _, earliest_time = self._flush_history(history)
                if len(history) < self._qps_per_proxy:
                    history.append(time.monotonic())
                    return proxy_url

                del self._verified_proxies[proxy_url]
                self._throttled_proxies.put_nowait(
                    (earliest_time, proxy_url, history))
コード例 #13
0
ファイル: blockresponse.py プロジェクト: Tribler/bami
class BlockResponseMixin(StatedMixin, metaclass=ABCMeta):
    """
    Adding this mixin class to your overlays enables routines to respond to incoming blocks with another block.
    """
    def setup_mixin(self) -> None:
        # Dictionary chain_id: block_dot -> block
        self.tracked_blocks = defaultdict(lambda: {})
        self.block_sign_queue_task = ensure_future(
            self.evaluate_counter_signing_blocks())
        self.counter_signing_block_queue = PriorityQueue()

    def unload_mixin(self) -> None:
        if not self.block_sign_queue_task.done():
            self.block_sign_queue_task.cancel()

    @abstractmethod
    def block_response(self,
                       block: PlexusBlock,
                       wait_time: float = None,
                       wait_blocks: int = None) -> BlockResponse:
        """
        Respond to block BlockResponse: Reject, Confirm, Delay
        Args:
            block: to respond to
            wait_time: time that passed since first block process initiated
            wait_blocks: number of blocks passed since the block
        Returns:
            BlockResponse: Confirm, Reject or Delay
        """
        pass

    def confirm_tx_extra_data(self, block: PlexusBlock) -> Dict:
        """
        Return additional data that should be added to the confirm transaction.
        Args:
            block: The block that is about to be confirmed.

        Returns: A dictionary with values to add to the confirm transaction.
        """
        return {}

    def add_block_to_response_processing(self, block: PlexusBlock) -> None:
        self.counter_signing_block_queue.put_nowait(
            (block.com_seq_num, (0, block)))

    def process_counter_signing_block(
        self,
        block: PlexusBlock,
        time_passed: float = None,
        num_block_passed: int = None,
    ) -> bool:
        """
        Process block that should be counter-signed and return True if the block should be delayed more.
        Args:
            block: Processed block
            time_passed: time passed since first added
            num_block_passed: number of blocks passed since first added
        Returns:
            Should add to queue again.
        """
        res = self.block_response(block, time_passed, num_block_passed)
        if res == BlockResponse.CONFIRM:
            self.confirm(block, extra_data=self.confirm_tx_extra_data(block))
            return False
        elif res == BlockResponse.REJECT:
            self.reject(block)
            return False
        return True

    async def evaluate_counter_signing_blocks(self, delta: float = None):
        while True:
            _delta = delta if delta else self.settings.block_sign_delta
            priority, block_info = await self.counter_signing_block_queue.get()
            process_time, block = block_info
            should_delay = self.process_counter_signing_block(
                block, process_time)
            self.logger.debug("Processing counter signing block. Delayed: %s",
                              should_delay)
            if should_delay:
                self.counter_signing_block_queue.put_nowait(
                    (priority, (process_time + _delta, block)))
                await sleep(_delta)
            else:
                self.tracked_blocks[block.com_id].pop(block.com_dot, None)
                await sleep(0.001)

    def confirm(self, block: PlexusBlock, extra_data: Dict = None) -> None:
        """
        Confirm the transaction in an incoming block. Link will be in the transaction with block dot.
        Args:
            block: The PlexusBlock to confirm.
            extra_data: An optional dictionary with extra data that is appended to the confirmation.
        """
        self.logger.info("Confirming block %s", block)
        chain_id = block.com_id if block.com_id != EMPTY_PK else block.public_key
        dot = block.com_dot if block.com_id != EMPTY_PK else block.pers_dot
        confirm_tx = {b"initiator": block.public_key, b"dot": dot}
        if extra_data:
            confirm_tx.update(extra_data)
        block = self.create_signed_block(block_type=CONFIRM_TYPE,
                                         transaction=encode_raw(confirm_tx),
                                         com_id=chain_id)
        self.share_in_community(block, chain_id)

    def reject(self, block: PlexusBlock, extra_data: Dict = None) -> None:
        """
        Reject the transaction in an incoming block.

        Args:
            block: The PlexusBlock to reject.
            extra_data: Some additional data to append to the reject transaction, e.g., a reason.
        """
        chain_id = block.com_id if block.com_id != EMPTY_PK else block.public_key
        dot = block.com_dot if block.com_id != EMPTY_PK else block.pers_dot
        reject_tx = {b"initiator": block.public_key, b"dot": dot}
        if extra_data:
            reject_tx.update(extra_data)
        block = self.create_signed_block(block_type=REJECT_TYPE,
                                         transaction=encode_raw(reject_tx),
                                         com_id=chain_id)
        self.share_in_community(block, chain_id)

    def verify_confirm_tx(self, claimer: bytes, confirm_tx: Dict) -> None:
        # 1. verify claim format
        if not confirm_tx.get(b"initiator") or not confirm_tx.get(b"dot"):
            raise InvalidTransactionFormatException("Invalid confirmation ",
                                                    claimer, confirm_tx)

    def process_confirm(self, block: PlexusBlock) -> None:
        confirm_tx = decode_raw(block.transaction)
        self.verify_confirm_tx(block.public_key, confirm_tx)
        self.apply_confirm_tx(block, confirm_tx)

    @abstractmethod
    def apply_confirm_tx(self, block: PlexusBlock, confirm_tx: Dict) -> None:
        pass

    def verify_reject_tx(self, rejector: bytes, confirm_tx: Dict) -> None:
        # 1. verify reject format
        if not confirm_tx.get(b"initiator") or not confirm_tx.get(b"dot"):
            raise InvalidTransactionFormatException("Invalid reject ",
                                                    rejector, confirm_tx)

    def process_reject(self, block: PlexusBlock) -> None:
        reject_tx = decode_raw(block.transaction)
        self.verify_reject_tx(block.public_key, reject_tx)
        self.apply_reject_tx(block, reject_tx)

    @abstractmethod
    def apply_reject_tx(self, block: PlexusBlock, reject_tx: Dict) -> None:
        pass