Exemple #1
0
 def __validate_block_headers(self,
                              block_header_list: List[MinorBlockHeader]):
     for i in range(len(block_header_list) - 1):
         header, prev = block_header_list[i:i + 2]  # type: MinorBlockHeader
         if header.height != prev.height + 1:
             return False
         if header.hash_prev_minor_block != prev.get_hash():
             return False
         try:
             # Note that PoSW may lower diff, so checks here are necessary but not sufficient
             # More checks happen during block addition
             shard_config = self.shard.env.quark_chain_config.shards[
                 header.branch.get_full_shard_id()]
             consensus_type = shard_config.CONSENSUS_TYPE
             diff = header.difficulty
             if shard_config.POSW_CONFIG.ENABLED:
                 diff //= shard_config.POSW_CONFIG.DIFF_DIVIDER
             validate_seal(
                 header,
                 consensus_type,
                 adjusted_diff=diff,
                 qkchash_with_rotation_stats=consensus_type
                 == ConsensusType.POW_QKCHASH
                 and self.shard.state._qkchashx_enabled(header),
             )
         except Exception as e:
             Logger.warning(
                 "[{}] got block with bad seal in sync: {}".format(
                     header.branch.to_str(), str(e)))
             return False
     return True
Exemple #2
0
    def __init__(
        self,
        consensus_type: ConsensusType,
        create_block_async_func: Callable[[], Awaitable[Optional[Block]]],
        add_block_async_func: Callable[[Block], Awaitable[None]],
        get_mining_param_func: Callable[[], Dict[str, Any]],
        remote: bool = False,
    ):
        """Mining will happen on a subprocess managed by this class

        create_block_async_func: takes no argument, returns a block (either RootBlock or MinorBlock)
        add_block_async_func: takes a block, add it to chain
        get_mining_param_func: takes no argument, returns the mining-specific params
        """
        self.consensus_type = consensus_type

        self.create_block_async_func = create_block_async_func
        self.add_block_async_func = add_block_async_func
        self.get_mining_param_func = get_mining_param_func
        self.enabled = False
        self.process = None

        self.input_q = AioQueue()  # [(MiningWork, param dict)]
        self.output_q = AioQueue()  # [MiningResult]

        # header hash -> work
        self.work_map = {}  # type: Dict[bytes, Block]

        if not remote and consensus_type != ConsensusType.POW_SIMULATE:
            Logger.warning("Mining locally, could be slow and error-prone")
        # remote miner specific attributes
        self.remote = remote
        self.current_work = None  # type: Optional[Block]
Exemple #3
0
    async def __run_sync(self):
        if self.__has_block_hash(self.header.get_hash()):
            return

        # descending height
        block_header_chain = [self.header]

        # TODO: Stop if too many headers to revert
        while not self.__has_block_hash(
                block_header_chain[-1].hash_prev_minor_block):
            block_hash = block_header_chain[-1].hash_prev_minor_block
            height = block_header_chain[-1].height - 1

            if self.shard_state.header_tip.height - height > self.max_staleness:
                Logger.warning(
                    "[{}] abort syncing due to forking at very old block {} << {}"
                    .format(
                        self.header.branch.get_shard_id(),
                        height,
                        self.shard_state.header_tip.height,
                    ))
                return

            if not self.shard_state.db.contain_root_block_by_hash(
                    block_header_chain[-1].hash_prev_root_block):
                return
            Logger.info("[{}] downloading headers from {} {}".format(
                self.shard_state.branch.get_shard_id(), height,
                block_hash.hex()))
            block_header_list = await asyncio.wait_for(
                self.__download_block_headers(block_hash), TIMEOUT)
            Logger.info("[{}] downloaded {} headers from peer".format(
                self.shard_state.branch.get_shard_id(),
                len(block_header_list)))
            if not self.__validate_block_headers(block_header_list):
                # TODO: tag bad peer
                return self.shard_conn.close_with_error(
                    "Bad peer sending discontinuing block headers")
            for header in block_header_list:
                if self.__has_block_hash(header.get_hash()):
                    break
                block_header_chain.append(header)

        # ascending height
        block_header_chain.reverse()
        while len(block_header_chain) > 0:
            block_chain = await asyncio.wait_for(
                self.__download_blocks(block_header_chain[:100]), TIMEOUT)
            Logger.info("[{}] downloaded {} blocks from peer".format(
                self.shard_state.branch.get_shard_id(), len(block_chain)))
            check(len(block_chain) == len(block_header_chain[:100]))

            for block in block_chain:
                # Stop if the block depends on an unknown root block
                # TODO: move this check to early stage to avoid downloading unnecessary headers
                if not self.shard_state.db.contain_root_block_by_hash(
                        block.header.hash_prev_root_block):
                    return
                await self.shard.add_block(block)
                block_header_chain.pop(0)
def update_genesis_alloc(cluser_config):
    """ Update ShardConfig.GENESIS.ALLOC """
    ALLOC_FILE_TEMPLATE = "alloc/{}.json"
    LOADTEST_FILE = "loadtest.json"

    if not cluser_config.GENESIS_DIR:
        return
    alloc_file_template = os.path.join(cluser_config.GENESIS_DIR,
                                       ALLOC_FILE_TEMPLATE)
    loadtest_file = os.path.join(cluser_config.GENESIS_DIR, LOADTEST_FILE)

    qkc_config = cluser_config.QUARKCHAIN

    allocation = {
        qkc_config.GENESIS_TOKEN: 1000000 * (10**18),
        "QETC": 2 * (10**8) * (10**18),
        "QFB": 3 * (10**8) * (10**18),
        "QAAPL": 4 * (10**8) * (10**18),
        "QTSLA": 5 * (10**8) * (10**18),
    }

    old_shards = copy.deepcopy(qkc_config.shards)
    try:
        for chain_id in range(qkc_config.CHAIN_SIZE):
            alloc_file = alloc_file_template.format(chain_id)
            with open(alloc_file, "r") as f:
                items = json.load(f)
            for item in items:
                address = Address.create_from(item["address"])
                full_shard_id = qkc_config.get_full_shard_id_by_full_shard_key(
                    address.full_shard_key)
                qkc_config.shards[full_shard_id].GENESIS.ALLOC[
                    item["address"]] = allocation

            Logger.info(
                "[{}] Imported {} genesis accounts into config from {}".format(
                    chain_id, len(items), alloc_file))
    except Exception as e:
        Logger.warning("Error importing genesis accounts from {}: {}".format(
            alloc_file, e))
        qkc_config.shards = old_shards
        Logger.warning("Cleared all partially imported genesis accounts!")

    # each account in loadtest file is funded on all the shards
    try:
        with open(loadtest_file, "r") as f:
            items = json.load(f)
            qkc_config.loadtest_accounts = items

        for item in items:
            address = Address.create_from(item["address"])
            for full_shard_id, shard_config in qkc_config.shards.items():
                shard_config.GENESIS.ALLOC[address.address_in_shard(
                    full_shard_id).serialize().hex()] = allocation

        Logger.info("Imported {} loadtest accounts from {}".format(
            len(items), loadtest_file))
    except Exception:
        Logger.info("No loadtest accounts imported into genesis alloc")
Exemple #5
0
    async def handle_new_block(self, block):
        """
        This is a fast path for block propagation. The block is broadcasted to peers before being added to local state.
        0. if local shard is syncing, doesn't make sense to add, skip
        1. if block parent is not in local state/new block pool, discard (TODO: is this necessary?)
        2. if already in cache or in local state/new block pool, pass
        3. validate: check time, difficulty, POW
        4. add it to new minor block broadcast cache
        5. broadcast to all peers (minus peer that sent it, optional)
        6. add_block() to local state (then remove from cache)
           also, broadcast tip if tip is updated (so that peers can sync if they missed blocks, or are new)
        """
        if self.synchronizer.running:
            # TODO optional: queue the block if it came from broadcast to so that once sync is over,
            # catch up immediately
            return

        if block.header.get_hash() in self.state.new_block_pool:
            return
        if self.state.db.contain_minor_block_by_hash(block.header.get_hash()):
            return

        if not self.state.db.contain_minor_block_by_hash(
                block.header.hash_prev_minor_block):
            if block.header.hash_prev_minor_block not in self.state.new_block_pool:
                return

        # Doing full POSW check requires prev block has been added to the state, which could
        # slow down block propagation.
        # TODO: this is a copy of the code in SyncTask.__validate_block_headers. this it a helper
        try:
            header = block.header
            # Note that PoSW may lower diff, so checks here are necessary but not sufficient
            # More checks happen during block addition
            shard_config = self.env.quark_chain_config.shards[
                header.branch.get_full_shard_id()]
            consensus_type = shard_config.CONSENSUS_TYPE
            diff = header.difficulty
            if shard_config.POSW_CONFIG.ENABLED:
                diff //= shard_config.POSW_CONFIG.DIFF_DIVIDER
            validate_seal(header, consensus_type, adjusted_diff=diff)
        except Exception as e:
            Logger.warning(
                "[{}] got block with bad seal in handle_new_block: {}".format(
                    header.branch.to_str(), str(e)))
            raise e

        if block.header.create_time > time_ms() // 1000 + 30:
            return

        self.state.new_block_pool[block.header.get_hash()] = block

        Logger.info("[{}/{}] got new block with height {}".format(
            block.header.branch.get_chain_id(),
            block.header.branch.get_shard_id(),
            block.header.height,
        ))
        self.broadcast_new_block(block)
        await self.add_block(block)
def update_genesis_alloc(cluser_config):
    """ Update ShardConfig.GENESIS.ALLOC """
    ALLOC_FILE = "alloc.json"
    LOADTEST_FILE = "loadtest.json"

    if not cluser_config.GENESIS_DIR:
        return
    alloc_file = os.path.join(cluser_config.GENESIS_DIR, ALLOC_FILE)
    loadtest_file = os.path.join(cluser_config.GENESIS_DIR, LOADTEST_FILE)

    qkc_config = cluser_config.QUARKCHAIN

    # each account in alloc_file is only funded on the shard it belongs to
    try:
        with open(alloc_file, "r") as f:
            items = json.load(f)
            # address_hex -> key_hex for jsonrpc faucet drip
            qkc_config.alloc_accounts = dict()
        for item in items:
            qkc_config.alloc_accounts[item["address"]] = item["key"]
            address = Address.create_from(item["address"])
            shard = address.get_shard_id(qkc_config.SHARD_SIZE)
            qkc_config.SHARD_LIST[shard].GENESIS.ALLOC[item["address"]] = 1000000 * (
                10 ** 18
            )

        Logger.info(
            "Imported {} accounts from genesis alloc at {}".format(
                len(items), alloc_file
            )
        )
    except Exception as e:
        Logger.warning("Unable to load genesis alloc from {}: {}".format(alloc_file, e))

    # each account in loadtest file is funded on all the shards
    try:
        with open(loadtest_file, "r") as f:
            items = json.load(f)
            qkc_config.loadtest_accounts = items

        for item in items:
            address = Address.create_from(item["address"])
            for i, shard in enumerate(qkc_config.SHARD_LIST):
                shard.GENESIS.ALLOC[
                    address.address_in_shard(i).serialize().hex()
                ] = 1000 * (10 ** 18)

        Logger.info(
            "Imported {} loadtest accounts from {}".format(len(items), loadtest_file)
        )
    except Exception:
        Logger.info("No loadtest accounts imported into genesis alloc")
Exemple #7
0
    async def handle_new_block(self, block):
        """
        0. if local shard is syncing, doesn't make sense to add, skip
        1. if block parent is not in local state/new block pool, discard
        2. if already in cache or in local state/new block pool, pass
        3. validate: check time, difficulty, POW
        4. add it to new minor block broadcast cache
        5. broadcast to all peers (minus peer that sent it, optional)
        6. add_block() to local state (then remove from cache)
             also, broadcast tip if tip is updated (so that peers can sync if they missed blocks, or are new)
        """
        if self.synchronizer.running:
            # TODO optinal: queue the block if it came from broadcast to so that once sync is over, catch up immediately
            return

        if block.header.get_hash() in self.state.new_block_pool:
            return
        if self.state.db.contain_minor_block_by_hash(block.header.get_hash()):
            return

        if not self.state.db.contain_minor_block_by_hash(
                block.header.hash_prev_minor_block):
            if block.header.hash_prev_minor_block not in self.state.new_block_pool:
                return

        full_shard_id = block.header.branch.get_full_shard_id()
        consensus_type = self.env.quark_chain_config.shards[
            full_shard_id].CONSENSUS_TYPE
        try:
            validate_seal(block.header, consensus_type)
        except Exception as e:
            Logger.warning("[{}] Got block with bad seal: {}".format(
                full_shard_id, str(e)))
            return

        if block.header.create_time > time_ms() // 1000 + 30:
            return

        self.state.new_block_pool[block.header.get_hash()] = block

        Logger.info("[{}/{}] got new block with height {}".format(
            block.header.branch.get_chain_id(),
            block.header.branch.get_shard_id(),
            block.header.height,
        ))
        self.broadcast_new_block(block)
        await self.add_block(block)
 def get_random_nodes(self, count: int) -> Iterator[Node]:
     if count > len(self):
         if time.monotonic() - self._initialized_at > 30:
             Logger.warning(
                 "Cannot get {} nodes as RoutingTable contains only {} nodes"
                 .format(count, len(self)))
         count = len(self)
     seen = []
     # This is a rather inneficient way of randomizing nodes from all buckets, but even if we
     # iterate over all nodes in the routing table, the time it takes would still be
     # insignificant compared to the time it takes for the network roundtrips when connecting
     # to nodes.
     while len(seen) < count:
         bucket = random.choice(self.buckets)
         if not bucket.nodes:
             continue
         node = random.choice(bucket.nodes)
         if node not in seen:
             yield node
             seen.append(node)
Exemple #9
0
    def __init__(
        self,
        consensus_type: ConsensusType,
        create_block_async_func: Callable[..., Awaitable[Optional[Block]]],
        add_block_async_func: Callable[[Block], Awaitable[None]],
        get_mining_param_func: Callable[[], Dict[str, Any]],
        get_header_tip_func: Callable[[], Header],
        remote: bool = False,
        root_signer_private_key: Optional[KeyAPI.PrivateKey] = None,
    ):
        """Mining will happen on a subprocess managed by this class

        create_block_async_func: takes no argument, returns a block (either RootBlock or MinorBlock)
        add_block_async_func: takes a block, add it to chain
        get_mining_param_func: takes no argument, returns the mining-specific params
        """
        self.consensus_type = consensus_type

        self.create_block_async_func = create_block_async_func
        self.add_block_async_func = add_block_async_func
        self.get_mining_param_func = get_mining_param_func
        self.get_header_tip_func = get_header_tip_func
        self.enabled = False
        self.process = None

        self.input_q = AioQueue()  # [(MiningWork, param dict)]
        self.output_q = AioQueue()  # [MiningResult]

        # header hash -> block under work
        # max size (tx max 258 bytes, gas limit 12m) ~= ((12m / 21000) * 258) * 128 = 18mb
        self.work_map = LRUCache(maxsize=128)

        if not remote and consensus_type != ConsensusType.POW_SIMULATE:
            Logger.warning("Mining locally, could be slow and error-prone")
        # remote miner specific attributes
        self.remote = remote
        # coinbase address -> header hash
        # key can be None, meaning default coinbase address from local config
        self.current_works = LRUCache(128)
        self.root_signer_private_key = root_signer_private_key
Exemple #10
0
    async def add_block_list_for_sync(self, block_list):
        """ Add blocks in batch to reduce RPCs. Will NOT broadcast to peers.

        Returns true if blocks are successfully added. False on any error.
        Additionally, returns list of coinbase_amount_map for each block
        This function only adds blocks to local and propagate xshard list to other shards.
        It does NOT notify master because the master should already have the minor header list,
        and will add them once this function returns successfully.
        """
        coinbase_amount_list = []
        if not block_list:
            return True, coinbase_amount_list

        existing_add_block_futures = []
        block_hash_to_x_shard_list = dict()
        uncommitted_block_header_list = []
        uncommitted_coinbase_amount_map_list = []
        for block in block_list:
            check(
                block.header.branch.get_full_shard_id() == self.full_shard_id)

            block_hash = block.header.get_hash()
            # adding the block header one assuming the block will be validated.
            coinbase_amount_list.append(block.header.coinbase_amount_map)

            commit_status, future = self.__get_block_commit_status_by_hash(
                block_hash)
            if commit_status == BLOCK_COMMITTED:
                # Skip processing the block if it is already committed
                Logger.warning(
                    "minor block to sync {} is already committed".format(
                        block_hash.hex()))
                continue
            elif commit_status == BLOCK_COMMITTING:
                # Check if the block is being propagating to other slaves and the master
                # Let's make sure all the shards and master got it before committing it
                Logger.info(
                    "[{}] {} is being added ... waiting for it to finish".
                    format(block.header.branch.to_str(), block.header.height))
                existing_add_block_futures.append(future)
                continue

            check(commit_status == BLOCK_UNCOMMITTED)
            # Validate and add the block
            try:
                xshard_list, coinbase_amount_map = self.state.add_block(
                    block, skip_if_too_old=False, force=True)
            except Exception as e:
                Logger.error_exception()
                return False, None

            prev_root_height = self.state.db.get_root_block_header_by_hash(
                block.header.hash_prev_root_block).height
            block_hash_to_x_shard_list[block_hash] = (xshard_list,
                                                      prev_root_height)
            self.add_block_futures[block_hash] = self.loop.create_future()
            uncommitted_block_header_list.append(block.header)
            uncommitted_coinbase_amount_map_list.append(
                block.header.coinbase_amount_map)

        await self.slave.batch_broadcast_xshard_tx_list(
            block_hash_to_x_shard_list, block_list[0].header.branch)
        check(
            len(uncommitted_coinbase_amount_map_list) == len(
                uncommitted_block_header_list))
        await self.slave.send_minor_block_header_list_to_master(
            uncommitted_block_header_list,
            uncommitted_coinbase_amount_map_list)

        # Commit all blocks and notify all rest add block operations
        for block_header in uncommitted_block_header_list:
            block_hash = block_header.get_hash()
            self.state.commit_by_hash(block_hash)
            Logger.debug("committed mblock {}".format(block_hash.hex()))

            self.add_block_futures[block_hash].set_result(None)
            del self.add_block_futures[block_hash]

        # Wait for the other add block operations
        await asyncio.gather(*existing_add_block_futures)

        return True, coinbase_amount_list
Exemple #11
0
    async def handle_new_block(self, block):
        """
        This is a fast path for block propagation. The block is broadcasted to peers before being added to local state.
        0. if local shard is syncing, doesn't make sense to add, skip
        1. if block parent is not in local state/new block pool, discard (TODO: is this necessary?)
        2. if already in cache or in local state/new block pool, pass
        3. validate: check time, difficulty, POW
        4. add it to new minor block broadcast cache
        5. broadcast to all peers (minus peer that sent it, optional)
        6. add_block() to local state (then remove from cache)
           also, broadcast tip if tip is updated (so that peers can sync if they missed blocks, or are new)
        """
        if self.synchronizer.running:
            # TODO optional: queue the block if it came from broadcast to so that once sync is over,
            # catch up immediately
            return

        if block.header.get_hash() in self.state.new_block_header_pool:
            return
        if self.state.db.contain_minor_block_by_hash(block.header.get_hash()):
            return

        prev_hash, prev_header = block.header.hash_prev_minor_block, None
        if prev_hash in self.state.new_block_header_pool:
            prev_header = self.state.new_block_header_pool[prev_hash]
        else:
            prev_header = self.state.db.get_minor_block_header_by_hash(
                prev_hash)
        if prev_header is None:  # Missing prev
            return

        # Sanity check on timestamp and block height
        if (block.header.create_time >
                time_ms() // 1000 + ALLOWED_FUTURE_BLOCKS_TIME_BROADCAST):
            return
        # Ignore old blocks
        if (self.state.header_tip
                and self.state.header_tip.height - block.header.height >
                self.state.shard_config.max_stale_minor_block_height_diff):
            return

        # There is a race that the root block may not be processed at the moment.
        # Ignore it if its root block is not found.
        # Otherwise, validate_block() will fail and we will disconnect the peer.
        if (self.state.get_root_block_header_by_hash(
                block.header.hash_prev_root_block) is None):
            return

        try:
            self.state.validate_block(block)
        except Exception as e:
            Logger.warning("[{}] got bad block in handle_new_block: {}".format(
                block.header.branch.to_str(), str(e)))
            raise e

        self.state.new_block_header_pool[
            block.header.get_hash()] = block.header

        Logger.info("[{}/{}] got new block with height {}".format(
            block.header.branch.get_chain_id(),
            block.header.branch.get_shard_id(),
            block.header.height,
        ))

        self.broadcast_new_block(block)
        await self.add_block(block)
Exemple #12
0
    async def __run_sync(self, notify_sync: Callable):
        if self.__has_block_hash(self.header.get_hash()):
            return

        # descending height
        block_header_chain = [self.header]

        while not self.__has_block_hash(
                block_header_chain[-1].hash_prev_minor_block):
            block_hash = block_header_chain[-1].hash_prev_minor_block
            height = block_header_chain[-1].height - 1

            if self.shard_state.header_tip.height - height > self.max_staleness:
                Logger.warning(
                    "[{}] abort syncing due to forking at very old block {} << {}"
                    .format(
                        self.header.branch.to_str(),
                        height,
                        self.shard_state.header_tip.height,
                    ))
                return

            if not self.shard_state.db.contain_root_block_by_hash(
                    block_header_chain[-1].hash_prev_root_block):
                return
            Logger.info("[{}] downloading headers from {} {}".format(
                self.shard_state.branch.to_str(), height, block_hash.hex()))
            block_header_list = await asyncio.wait_for(
                self.__download_block_headers(block_hash), SYNC_TIMEOUT)
            Logger.info("[{}] downloaded {} headers from peer".format(
                self.shard_state.branch.to_str(), len(block_header_list)))
            if not self.__validate_block_headers(block_header_list):
                # TODO: tag bad peer
                return self.shard_conn.close_with_error(
                    "Bad peer sending discontinuing block headers")
            for header in block_header_list:
                if self.__has_block_hash(header.get_hash()):
                    break
                block_header_chain.append(header)

        # ascending height
        block_header_chain.reverse()
        while len(block_header_chain) > 0:
            block_chain = await asyncio.wait_for(
                self.__download_blocks(
                    block_header_chain[:MINOR_BLOCK_BATCH_SIZE]),
                SYNC_TIMEOUT,
            )
            Logger.info("[{}] downloaded {} blocks from peer".format(
                self.shard_state.branch.to_str(), len(block_chain)))
            if len(block_chain) != len(
                    block_header_chain[:MINOR_BLOCK_BATCH_SIZE]):
                # TODO: tag bad peer
                return self.shard_conn.close_with_error(
                    "Bad peer sending less than requested blocks")

            counter = 0
            for block in block_chain:
                # Stop if the block depends on an unknown root block
                # TODO: move this check to early stage to avoid downloading unnecessary headers
                if not self.shard_state.db.contain_root_block_by_hash(
                        block.header.hash_prev_root_block):
                    return
                await self.shard.add_block(block)
                if counter % 100 == 0:
                    sync_data = (block.header.height, block_header_chain[-1])
                    asyncio.ensure_future(notify_sync(sync_data))
                    counter = 0
                counter += 1
                block_header_chain.pop(0)