コード例 #1
0
 def add_peer(self, peer: PeerShardConnection):
     self.peers[peer.cluster_peer_id] = peer
     Logger.info(
         "[{}] connected to peer {}".format(
             Branch(self.full_shard_id).to_str(), peer.cluster_peer_id
         )
     )
コード例 #2
0
 def on_wire_protocol_start(self, proto):
     Logger.info("NODE{} on_wire_protocol_start proto={}".format(
         self.config["node_num"], proto))
     active_peers = self.get_connected_peers()
     self.app.network.loop.call_soon_threadsafe(
         asyncio.ensure_future,
         self.app.network.refresh_connections(active_peers))
コード例 #3
0
    def __init__(self, env, diff_calc=None):
        self.env = env
        self.root_config = env.quark_chain_config.ROOT
        if not diff_calc:
            cutoff = self.root_config.DIFFICULTY_ADJUSTMENT_CUTOFF_TIME
            diff_factor = self.root_config.DIFFICULTY_ADJUSTMENT_FACTOR
            min_diff = self.root_config.GENESIS.DIFFICULTY
            check(cutoff > 0 and diff_factor > 0 and min_diff > 0)
            diff_calc = EthDifficultyCalculator(cutoff=cutoff,
                                                diff_factor=diff_factor,
                                                minimum_diff=min_diff)
        self.diff_calc = diff_calc
        self.raw_db = env.db
        self.db = RootDb(
            self.raw_db,
            env.quark_chain_config,
            count_minor_blocks=env.cluster_config.ENABLE_TRANSACTION_HISTORY,
        )

        persisted_tip = self.db.get_tip_header()
        if persisted_tip:
            self.tip = persisted_tip
            Logger.info("Recovered root state with tip height {}".format(
                self.tip.height))
        else:
            self.__create_genesis_block()
            Logger.info("Created genesis root block")
コード例 #4
0
    def __recover_from_db(self):
        """ Recover the best chain from local database.
        """
        Logger.info("Recovering root chain from local database...")

        if b"tipHash" not in self.db:
            return None

        r_hash = self.db.get(b"tipHash")
        r_block = RootBlock.deserialize(self.db.get(b"rblock_" + r_hash))
        if r_block.header.height <= 0:
            return None
        # use the parent of the tipHash block as the new tip
        # since it's guaranteed to have been accepted by all the shards
        # while shards might not have seen the block of tipHash
        r_hash = r_block.header.hash_prev_block
        r_block = RootBlock.deserialize(self.db.get(b"rblock_" + r_hash))
        self.tip_header = r_block.header  # type: RootBlockHeader

        while len(self.r_header_pool) < self.max_num_blocks_to_recover:
            self.r_header_pool[r_hash] = r_block.header
            for m_header in r_block.minor_block_header_list:
                mtokens = TokenBalanceMap.deserialize(
                    self.db.get(b"mheader_" + m_header.get_hash())).balance_map
                self.m_hash_dict[m_header.get_hash()] = mtokens

            if r_block.header.height <= 0:
                break

            r_hash = r_block.header.hash_prev_block
            r_block = RootBlock.deserialize(self.db.get(b"rblock_" + r_hash))
コード例 #5
0
ファイル: tx_generator.py プロジェクト: marksteve9494/qkc-tps
    async def __gen(self, num_tx, x_shard_percent, sample_tx: Transaction):
        Logger.info(
            "[{}] start generating {} transactions with {}% cross-shard".
            format(self.shard_id, num_tx, x_shard_percent))
        if num_tx <= 0:
            return
        start_time = time.time()
        tx_list = []
        total = 0
        sample_evm_tx = sample_tx.code.get_evm_transaction()
        for account in self.accounts:
            nonce = self.shard.state.get_transaction_count(
                account.address.recipient)
            tx = self.create_transaction(account, nonce, x_shard_percent,
                                         sample_evm_tx)
            if not tx:
                continue
            tx_list.append(tx)
            total += 1
            if len(tx_list) >= 600 or total >= num_tx:
                self.shard.add_tx_list(tx_list)
                tx_list = []
                await asyncio.sleep(
                    random.uniform(8, 12)
                )  # yield CPU so that other stuff won't be held for too long

            if total >= num_tx:
                break

        end_time = time.time()
        Logger.info("[{}] generated {} transactions in {:.2f} seconds".format(
            self.shard_id, total, end_time - start_time))
        self.running = False
コード例 #6
0
    def close(self):
        for shard in self.shards.values():
            for peer_shard_conn in shard.peers.values():
                peer_shard_conn.get_forwarding_connection().close()

        Logger.info("Lost connection with master")
        return super().close()
コード例 #7
0
ファイル: slave.py プロジェクト: RufusDerrid/pyquarkchain
def main():
    env = parse_args()

    slave_server = SlaveServer(env)
    slave_server.start_and_loop()

    Logger.info("Slave server is shutdown")
コード例 #8
0
    def recover_state(self, r_header, m_header):
        """ When recovering from local database, we can only guarantee the consistency of the best chain.
        Forking blocks can be in inconsistent state and thus should be pruned from the database
        so that they can be retried in the future.
        """
        r_hash = r_header.get_hash()
        while (len(self.r_header_pool) <
               self.env.quark_chain_config.ROOT.max_root_blocks_in_memory):
            block = RootBlock.deserialize(self.db.get(b"rblock_" + r_hash))
            self.r_minor_header_pool[
                r_hash] = self.__get_last_minor_block_in_root_block(block)
            self.r_header_pool[r_hash] = block.header
            if (block.header.height <=
                    self.env.quark_chain_config.get_genesis_root_height(
                        self.branch.get_shard_id())):
                break
            r_hash = block.header.hash_prev_block

        m_hash = m_header.get_hash()
        shard_config = self.env.quark_chain_config.SHARD_LIST[
            self.branch.get_shard_id()]
        while len(
                self.m_header_pool) < shard_config.max_minor_blocks_in_memory:
            block = MinorBlock.deserialize(self.db.get(b"mblock_" + m_hash))
            self.m_header_pool[m_hash] = block.header
            self.m_meta_pool[m_hash] = block.meta
            if block.header.height <= 0:
                break
            m_hash = block.header.hash_prev_minor_block

        Logger.info("[{}] recovered {} minor blocks and {} root blocks".format(
            self.branch.get_shard_id(),
            len(self.m_header_pool),
            len(self.r_header_pool),
        ))
コード例 #9
0
    def __init__(self, env, diff_calc=None):
        self.env = env
        self.root_config = env.quark_chain_config.ROOT
        if not diff_calc:
            cutoff = self.root_config.DIFFICULTY_ADJUSTMENT_CUTOFF_TIME
            diff_factor = self.root_config.DIFFICULTY_ADJUSTMENT_FACTOR
            min_diff = self.root_config.GENESIS.DIFFICULTY
            check(cutoff > 0 and diff_factor > 0 and min_diff > 0)
            diff_calc = EthDifficultyCalculator(
                cutoff=cutoff, diff_factor=diff_factor, minimum_diff=min_diff
            )
        self.diff_calc = diff_calc
        self.raw_db = env.db
        self.db = RootDb(
            self.raw_db,
            env.quark_chain_config,
            count_minor_blocks=env.cluster_config.ENABLE_TRANSACTION_HISTORY,
        )
        # header hash -> [coinbase address] during previous blocks (ascending)
        self.coinbase_addr_cache = LRUCache(maxsize=128)

        self.tip = self.db.get_tip_header()  # type: RootBlockHeader
        if self.tip:
            Logger.info(
                "Recovered root state with tip height {}".format(self.tip.height)
            )
        else:
            self.tip = self.__create_genesis_block()
            Logger.info("Created genesis root block")
コード例 #10
0
ファイル: slave.py プロジェクト: quomap/pyquarkchain
    async def handle_sync_minor_block_list_request(self, req):
        """ Raises on error"""
        async def __download_blocks(block_hash_list):
            op, resp, rpc_id = await peer_shard_conn.write_rpc_request(
                CommandOp.GET_MINOR_BLOCK_LIST_REQUEST,
                GetMinorBlockListRequest(block_hash_list),
            )
            return resp.minor_block_list

        shard = self.shards.get(req.branch, None)
        if not shard:
            return SyncMinorBlockListResponse(error_code=errno.EBADMSG)
        peer_shard_conn = shard.peers.get(req.cluster_peer_id, None)
        if not peer_shard_conn:
            return SyncMinorBlockListResponse(error_code=errno.EBADMSG)

        BLOCK_BATCH_SIZE = 100
        block_hash_list = req.minor_block_hash_list
        # empty
        if not block_hash_list:
            return SyncMinorBlockListResponse(error_code=0)

        try:
            while len(block_hash_list) > 0:
                blocks_to_download = block_hash_list[:BLOCK_BATCH_SIZE]
                try:
                    block_chain = await asyncio.wait_for(
                        __download_blocks(blocks_to_download), TIMEOUT)
                except asyncio.TimeoutError as e:
                    Logger.info(
                        "[{}] sync request from master failed due to timeout".
                        format(req.branch.get_full_shard_id()))
                    raise e

                Logger.info(
                    "[{}] sync request from master, downloaded {} blocks ({} - {})"
                    .format(
                        req.branch.get_full_shard_id(),
                        len(block_chain),
                        block_chain[0].header.height,
                        block_chain[-1].header.height,
                    ))
                check(len(block_chain) == len(blocks_to_download))

                add_block_success = await self.slave_server.add_block_list_for_sync(
                    block_chain)
                if not add_block_success:
                    raise RuntimeError(
                        "Failed to add minor blocks for syncing root block")
                block_hash_list = block_hash_list[BLOCK_BATCH_SIZE:]

            branch = block_chain[0].header.branch
            shard = self.slave_server.shards.get(branch, None)
            check(shard is not None)
            return SyncMinorBlockListResponse(
                error_code=0, shard_stats=shard.state.get_shard_stats())
        except Exception:
            Logger.error_exception()
            return SyncMinorBlockListResponse(error_code=1)
コード例 #11
0
ファイル: shard.py プロジェクト: tim-yoshi/pyquarkchain
    async def handle_new_block(self, block):
        """
        This is a fast path for block propagation. The block is broadcasted to peers before being added to local state.
        0. if local shard is syncing, doesn't make sense to add, skip
        1. if block parent is not in local state/new block pool, discard (TODO: is this necessary?)
        2. if already in cache or in local state/new block pool, pass
        3. validate: check time, difficulty, POW
        4. add it to new minor block broadcast cache
        5. broadcast to all peers (minus peer that sent it, optional)
        6. add_block() to local state (then remove from cache)
           also, broadcast tip if tip is updated (so that peers can sync if they missed blocks, or are new)
        """
        if self.synchronizer.running:
            # TODO optional: queue the block if it came from broadcast to so that once sync is over,
            # catch up immediately
            return

        if block.header.get_hash() in self.state.new_block_pool:
            return
        if self.state.db.contain_minor_block_by_hash(block.header.get_hash()):
            return

        if not self.state.db.contain_minor_block_by_hash(
                block.header.hash_prev_minor_block):
            if block.header.hash_prev_minor_block not in self.state.new_block_pool:
                return

        # Doing full POSW check requires prev block has been added to the state, which could
        # slow down block propagation.
        # TODO: this is a copy of the code in SyncTask.__validate_block_headers. this it a helper
        try:
            header = block.header
            # Note that PoSW may lower diff, so checks here are necessary but not sufficient
            # More checks happen during block addition
            shard_config = self.env.quark_chain_config.shards[
                header.branch.get_full_shard_id()]
            consensus_type = shard_config.CONSENSUS_TYPE
            diff = header.difficulty
            if shard_config.POSW_CONFIG.ENABLED:
                diff //= shard_config.POSW_CONFIG.DIFF_DIVIDER
            validate_seal(header, consensus_type, adjusted_diff=diff)
        except Exception as e:
            Logger.warning(
                "[{}] got block with bad seal in handle_new_block: {}".format(
                    header.branch.to_str(), str(e)))
            raise e

        if block.header.create_time > time_ms() // 1000 + 30:
            return

        self.state.new_block_pool[block.header.get_hash()] = block

        Logger.info("[{}/{}] got new block with height {}".format(
            block.header.branch.get_chain_id(),
            block.header.branch.get_shard_id(),
            block.header.height,
        ))
        self.broadcast_new_block(block)
        await self.add_block(block)
コード例 #12
0
    def add_block(self,
                  block,
                  write_db=True,
                  skip_if_too_old=True,
                  adjusted_diff: int = None):
        """ Add new block.
        return True if a longest block is added, False otherwise
        There are a couple of optimizations can be done here:
        - the root block could only contain minor block header hashes as long as the shards fully validate the headers
        - the header (or hashes) are un-ordered as long as they contains valid sub-chains from previous root block
        """

        if skip_if_too_old and (
                self.tip.height - block.header.height >
                self.root_config.MAX_STALE_ROOT_BLOCK_HEIGHT_DIFF):
            Logger.info("[R] drop old block {} << {}".format(
                block.header.height, self.tip.height))
            raise ValueError("block is too old {} << {}".format(
                block.header.height, self.tip.height))

        start_ms = time_ms()
        block_hash, last_minor_block_header_list = self.validate_block(
            block, adjusted_diff)

        if write_db:
            self.db.put_root_block(block, last_minor_block_header_list)

        tracking_data_str = block.tracking_data.decode("utf-8")
        if tracking_data_str != "":
            tracking_data = json.loads(tracking_data_str)
            sample = {
                "time": time_ms() // 1000,
                "shard": "R",
                "network": self.env.cluster_config.MONITORING.NETWORK_NAME,
                "cluster": self.env.cluster_config.MONITORING.CLUSTER_ID,
                "hash": block.header.get_hash().hex(),
                "height": block.header.height,
                "original_cluster": tracking_data["cluster"],
                "inception": tracking_data["inception"],
                "creation_latency_ms": tracking_data["creation_ms"],
                "add_block_latency_ms": time_ms() - start_ms,
                "mined": tracking_data.get("mined", 0),
                "propagation_latency_ms":
                start_ms - tracking_data.get("mined", 0),
                "num_tx": len(block.minor_block_header_list),
            }
            asyncio.ensure_future(
                self.env.cluster_config.kafka_logger.log_kafka_sample_async(
                    self.env.cluster_config.MONITORING.PROPAGATION_TOPIC,
                    sample))

        if self.tip.total_difficulty < block.header.total_difficulty:
            old_tip = self.tip
            self.tip = block.header
            # TODO: Atomicity during shutdown
            self.db.update_tip_hash(block_hash)
            self.__rewrite_block_index_to(old_tip, block)
            return True
        return False
コード例 #13
0
ファイル: p2p_manager.py プロジェクト: twelveze/pyquarkchain
 def close(self):
     if self.state == ConnectionState.ACTIVE:
         Logger.info("destroying proxy slave connections for {}".format(
             self.quark_peer.remote))
         self.master_server.destroy_peer_cluster_connections(
             self.cluster_peer_id)
     super(Connection, self).close()
     self.quark_peer.close()
コード例 #14
0
ファイル: slave.py プロジェクト: RufusDerrid/pyquarkchain
    def close(self):
        for shard in self.shards.values():
            for peer_shard_conn in shard.peers.values():
                peer_shard_conn.get_forwarding_connection().close()

        Logger.info("Lost connection with master. Shutting down slave ...")
        self.slave_server.shutdown()
        return super().close()
コード例 #15
0
ファイル: slave.py プロジェクト: quomap/pyquarkchain
def main():
    os.chdir(os.path.dirname(os.path.abspath(__file__)))
    env = parse_args()

    slave_server = SlaveServer(env)
    slave_server.start()
    slave_server.do_loop()

    Logger.info("Slave server is shutdown")
コード例 #16
0
 def loop(self):
     while True:
         gevent.sleep(self.REFRESH_INTERVAL)
         Logger.info("p2p periodic refresh")
         active_peers = self.get_connected_peers()
         self.app.network.loop.call_soon_threadsafe(
             asyncio.ensure_future,
             self.app.network.refresh_connections(active_peers),
         )
コード例 #17
0
 def start_server(self):
     coro = asyncio.start_server(self.new_peer,
                                 "0.0.0.0",
                                 self.port,
                                 loop=self.loop)
     self.server = self.loop.run_until_complete(coro)
     Logger.info("Self id {}".format(self.self_id.hex()))
     Logger.info("Listening on {} for p2p".format(
         self.server.sockets[0].getsockname()))
コード例 #18
0
ファイル: slave.py プロジェクト: RufusDerrid/pyquarkchain
 def start_mining(self, artificial_tx_config):
     self.artificial_tx_config = artificial_tx_config
     self.mining = True
     for branch, shard in self.shards.items():
         Logger.info(
             "[{}] start mining with target minor block time {} seconds".
             format(branch.get_shard_id(),
                    artificial_tx_config.target_minor_block_time))
         shard.miner.start()
コード例 #19
0
 async def __start_server(self):
     """ Run the server until shutdown is called """
     self.server = await asyncio.start_server(
         self.__handle_new_connection,
         "0.0.0.0",
         self.env.slave_config.PORT,
         loop=self.loop,
     )
     Logger.info("Listening on {} for intra-cluster RPC".format(
         self.server.sockets[0].getsockname()))
コード例 #20
0
def main():
    env, unknown_flags = parse_args()
    FLAGS(sys.argv[:1] + unknown_flags)
    if FLAGS["verbosity"].using_default_value:
        FLAGS.verbosity = 0  # INFO level

    slave_server = SlaveServer(env)
    slave_server.start_and_loop()

    Logger.info("Slave server is shutdown")
コード例 #21
0
ファイル: p2p_manager.py プロジェクト: twelveze/pyquarkchain
 async def do_sub_proto_handshake(self) -> None:
     """ overrides BasePeer.do_sub_proto_handshake()
     """
     self.secure_peer = SecurePeer(self)
     Logger.info("starting peer hello exchange")
     start_state = await self.secure_peer.start()
     if start_state:
         # returns None if successful
         raise HandshakeFailure(
             "hello message exchange failed: {}".format(start_state))
コード例 #22
0
    async def handle_new_minor_block_header_list_command(self, _op, cmd, _rpc_id):
        # TODO: allow multiple headers if needed
        if len(cmd.minor_block_header_list) != 1:
            self.close_with_error("minor block header list must have only one header")
            return
        for m_header in cmd.minor_block_header_list:
            Logger.info(
                "[{}] received new header with height {}".format(
                    m_header.branch.get_shard_id(), m_header.height
                )
            )
            if m_header.branch != self.shard_state.branch:
                self.close_with_error("incorrect branch")
                return

        if self.best_root_block_header_observed:
            # check root header is not decreasing
            if (
                cmd.root_block_header.height
                < self.best_root_block_header_observed.height
            ):
                return self.close_with_error(
                    "best observed root header height is decreasing {} < {}".format(
                        cmd.root_block_header.height,
                        self.best_root_block_header_observed.height,
                    )
                )
            if (
                cmd.root_block_header.height
                == self.best_root_block_header_observed.height
            ):
                if cmd.root_block_header != self.best_root_block_header_observed:
                    return self.close_with_error(
                        "best observed root header changed with same height {}".format(
                            self.best_root_block_header_observed.height
                        )
                    )

                # check minor header is not decreasing
                if m_header.height < self.best_minor_block_header_observed.height:
                    return self.close_with_error(
                        "best observed minor header is decreasing {} < {}".format(
                            m_header.height,
                            self.best_minor_block_header_observed.height,
                        )
                    )

        self.best_root_block_header_observed = cmd.root_block_header
        self.best_minor_block_header_observed = m_header

        # Do not download if the new header is not higher than the current tip
        if self.shard_state.header_tip.height >= m_header.height:
            return

        self.shard.synchronizer.add_task(m_header, self)
コード例 #23
0
ファイル: shard.py プロジェクト: techamazed/pyquarkchain
    async def add_block(self, block):
        """ Returns true if block is successfully added. False on any error.
        called by 1. local miner (will not run if syncing) 2. SyncTask
        """
        old_tip = self.state.header_tip
        try:
            xshard_list = self.state.add_block(block)
        except Exception as e:
            Logger.error_exception()
            return False

        # only remove from pool if the block successfully added to state,
        #   this may cache failed blocks but prevents them being broadcasted more than needed
        # TODO add ttl to blocks in new_block_pool
        self.state.new_block_pool.pop(block.header.get_hash(), None)
        # block has been added to local state, broadcast tip so that peers can sync if needed
        try:
            if old_tip != self.state.header_tip:
                self.broadcast_new_tip()
        except Exception:
            Logger.warning_every_sec("broadcast tip failure", 1)

        # block already existed in local shard state
        # but might not have been propagated to other shards and master
        # let's make sure all the shards and master got it before return
        if xshard_list is None:
            future = self.add_block_futures.get(block.header.get_hash(), None)
            if future:
                Logger.info(
                    "[{}] {} is being added ... waiting for it to finish".
                    format(block.header.branch.get_shard_id(),
                           block.header.height))
                await future
            return True

        self.add_block_futures[
            block.header.get_hash()] = self.loop.create_future()

        # Start mining new one before propagating inside cluster
        # The propagation should be done by the time the new block is mined
        self.miner.mine_new_block_async()
        prev_root_height = self.state.db.get_root_block_by_hash(
            block.header.hash_prev_root_block).header.height
        await self.slave.broadcast_xshard_tx_list(block, xshard_list,
                                                  prev_root_height)
        await self.slave.send_minor_block_header_to_master(
            block.header,
            len(block.tx_list),
            len(xshard_list),
            self.state.get_shard_stats(),
        )

        self.add_block_futures[block.header.get_hash()].set_result(None)
        del self.add_block_futures[block.header.get_hash()]
        return True
コード例 #24
0
 def close_dead_peer(self):
     assert self.id is not None
     if self.id in self.network.active_peer_pool:
         del self.network.active_peer_pool[self.id]
     if self.cluster_peer_id in self.network.cluster_peer_pool:
         del self.network.cluster_peer_pool[self.cluster_peer_id]
     Logger.info("Peer {} ({}:{}) disconnected, remaining {}".format(
         self.id.hex(), self.ip, self.port,
         len(self.network.active_peer_pool)))
     self.master_server.destroy_peer_cluster_connections(
         self.cluster_peer_id)
     super().close()
コード例 #25
0
    def close(self):
        if self.state == ConnectionState.ACTIVE:
            assert self.id is not None
            if self.id in self.network.active_peer_pool:
                del self.network.active_peer_pool[self.id]
            if self.cluster_peer_id in self.network.cluster_peer_pool:
                del self.network.cluster_peer_pool[self.cluster_peer_id]
            Logger.info("Peer {} disconnected, remaining {}".format(
                self.id.hex(), len(self.network.active_peer_pool)))
            self.master_server.destroy_peer_cluster_connections(
                self.cluster_peer_id)

        super().close()
コード例 #26
0
 async def run() -> None:
     await loop.create_datagram_endpoint(
         lambda: discovery, local_addr=("0.0.0.0", args.listen_port)
     )
     try:
         await discovery.bootstrap()
         while True:
             Logger.info("Routing table size={}".format(len(discovery.routing)))
             await cancel_token.cancellable_wait(asyncio.sleep(5))
     except OperationCancelled:
         pass
     finally:
         await discovery.stop()
コード例 #27
0
 def get_connected_peers(self):
     ps = [p for p in self.app.services.peermanager.peers if p]
     aps = [p for p in ps if not p.is_stopped]
     Logger.info("I am {} I have {} peers: {}".format(
         self.app.config["client_version_string"],
         len(aps),
         [
             p.remote_client_version
             if p.remote_client_version != "" else "Not Ready" for p in aps
         ],
     ))
     return [
         p.remote_client_version.decode("utf-8") for p in aps
         if p.remote_client_version != ""
     ]
コード例 #28
0
    def __init__(self, env, diff_calc=None):
        self.env = env
        self.diff_calc = (diff_calc if diff_calc else EthDifficultyCalculator(
            cutoff=45, diff_factor=2048, minimum_diff=1000000))
        self.raw_db = env.db
        self.db = RootDb(self.raw_db,
                         env.quark_chain_config.ROOT.max_root_blocks_in_memory)

        persisted_tip = self.db.get_tip_header()
        if persisted_tip:
            self.tip = persisted_tip
            Logger.info("Recovered root state with tip height {}".format(
                self.tip.height))
        else:
            self.__create_genesis_block()
            Logger.info("Created genesis root block")
コード例 #29
0
    async def handle_new_block(self, block):
        """
        0. if local shard is syncing, doesn't make sense to add, skip
        1. if block parent is not in local state/new block pool, discard
        2. if already in cache or in local state/new block pool, pass
        3. validate: check time, difficulty, POW
        4. add it to new minor block broadcast cache
        5. broadcast to all peers (minus peer that sent it, optional)
        6. add_block() to local state (then remove from cache)
             also, broadcast tip if tip is updated (so that peers can sync if they missed blocks, or are new)
        """
        if self.synchronizer.running:
            # TODO optinal: queue the block if it came from broadcast to so that once sync is over, catch up immediately
            return

        if block.header.get_hash() in self.state.new_block_pool:
            return
        if self.state.db.contain_minor_block_by_hash(block.header.get_hash()):
            return

        if not self.state.db.contain_minor_block_by_hash(
                block.header.hash_prev_minor_block):
            if block.header.hash_prev_minor_block not in self.state.new_block_pool:
                return

        full_shard_id = block.header.branch.get_full_shard_id()
        consensus_type = self.env.quark_chain_config.shards[
            full_shard_id].CONSENSUS_TYPE
        try:
            validate_seal(block.header, consensus_type)
        except Exception as e:
            Logger.warning("[{}] Got block with bad seal: {}".format(
                full_shard_id, str(e)))
            return

        if block.header.create_time > time_ms() // 1000 + 30:
            return

        self.state.new_block_pool[block.header.get_hash()] = block

        Logger.info("[{}/{}] got new block with height {}".format(
            block.header.branch.get_chain_id(),
            block.header.branch.get_shard_id(),
            block.header.height,
        ))
        self.broadcast_new_block(block)
        await self.add_block(block)
コード例 #30
0
    def __recover_from_db(self):
        """ Recover the best chain from local database.
        """
        Logger.info("Recovering root chain from local database...")

        if b"tipHash" not in self.db:
            return None

        r_hash = self.db.get(b"tipHash")
        r_block = RootBlock.deserialize(self.db.get(b"rblock_" + r_hash))
        if r_block.header.height <= 0:
            return None
        # use the parent of the tipHash block as the new tip
        # since it's guaranteed to have been accepted by all the shards
        # while shards might not have seen the block of tipHash
        r_hash = r_block.header.hash_prev_block
        r_block = RootBlock.deserialize(self.db.get(b"rblock_" + r_hash))
        self.tip_header = r_block.header  # type: RootBlockHeader