def test_from_dict_raises_exception_for_wrong_type(dummy_block): data = deepcopy(dummy_block) data["id"] = float(data["id"]) with pytest.raises(TypeError) as excinfo: Block.from_dict(data) assert str(excinfo.value) == ( "Attribute id (<class 'float'>) must be of type (<class 'str'>, <class 'bytes'>" ")")
def test_from_dict_correctly_sets_data(dummy_block): block = Block.from_dict(dummy_block) assert block.version == 0 assert block.timestamp == 24760440 assert block.height == 2243161 assert block.previous_block_hex == b"2b324b8b33a85802" assert block.previous_block == "3112633353705641986" assert block.number_of_transactions == 2 assert block.total_amount == 3890300 assert block.total_fee == 70000000 assert block.reward == 200000000 assert block.payload_length == 224 assert block.payload_hash == ( "3784b953afcf936bdffd43fdf005b5732b49c1fc6b11e195c364c20b2eb06282") assert block.generator_public_key == ( "020f5df4d2bc736d12ce43af5b1663885a893fade7ee5e62b3cc59315a63e6a325") assert block.block_signature == ( "3045022100eee6c37b5e592e99811d588532726353592923f347c701d52912e6d583443e40022" "0277ffe38ad31e216ba0907c4738fed19b2071246b150c72c0a52bae4477ebe29") assert block.id == "10977713934532967004" assert block.id_hex == b"9858aca939b17a5c" assert block.transactions is not None assert len(block.transactions) == 2 for transaction, expected in zip(block.transactions, dummy_block["transactions"]): assert transaction.version is None assert transaction.network is None assert transaction.type == expected["type"] assert transaction.timestamp == expected["timestamp"] assert transaction.sender_public_key == expected["senderPublicKey"] assert transaction.fee == expected["fee"] assert transaction.amount == expected["amount"] assert transaction.asset == expected["asset"] assert transaction.vendor_field == expected["vendorField"]
def fetch_blocks_from_height(self, from_height): payload = { "lastBlockHeight": from_height, "serialized": True, "headersOnly": False, } blocks = self._fetch("p2p.peer.getBlocks", payload) return [Block.from_dict(block) for block in blocks]
def _create_genesis_block(): _clear_db() block = CryptoBlock.from_dict(config.genesis_block) db_block = Block.from_crypto(block) db_block.save(force_insert=True) for transaction in block.transactions: db_trans = Transaction.from_crypto(transaction) db_trans.save(force_insert=True)
async def post_block(self, data, ip): # TODO: Wrap everything in try except # TODO: Validate request data that it's correct block structure block_data = data["block"] # if not block_data: # raise Exception( # "There was no block in request to the /peer/blocks endpoint" # ) block = Block.from_dict(block_data) self.socket.log_info( "Received new block at height %s with %s transactions, from %s", block.height, block.number_of_transactions, ip, ) is_verified, errors = block.verify() if not is_verified: self.socket.log_error(errors) # TODO: raise Exception("Verification failed") last_block = self.db.get_last_block() if last_block.height >= block.height: self.socket.log_info( "Received block with height %s which was already processed. Our last " "block height %s. Skipping process queue.", block.height, last_block.height, ) return if self.process_queue.block_exists(block): self.socket.log_info( "Received block with height %s is already in process queue.", block.height, ) return current_slot = slots.get_slot_number(last_block.height, time.get_time()) received_slot = slots.get_slot_number(last_block.height, block.timestamp) if current_slot >= received_slot and is_block_chained( last_block, block): # Put the block to process queue self.process_queue.push_block(block) else: self.socket.log_info( "Discarded block %s because it takes a future slot", block.height)
def start(self): """Starts the blockchain. Depending of the state of the blockchain it will decide what needs to be done in order to correctly start syncing. """ logger.info("Starting the blockchain") apply_genesis_round = False try: block = self.database.get_last_block() # If block is not found in the db, insert a genesis block if not block: logger.info("No block found in the database") block = Block.from_dict(config.genesis_block) if block.payload_hash != config.network["nethash"]: logger.error( "FATAL: The genesis block payload hash is different from " "the configured nethash" ) self.stop() return else: self.database.save_block(block) apply_genesis_round = True logger.info("Verifying database integrity") is_valid = False errors = None for _ in range(5): is_valid, errors = self.database.verify_blockchain() if is_valid: break else: logger.error("Database is corrupted: {}".format(errors)) milestone = config.get_milestone(block.height) previous_round = math.floor( (block.height - 1) / milestone["activeDelegates"] ) if previous_round <= 1: raise Exception( "FATAL: Database is corrupted: {}".format(errors) ) logger.info("Rolling back to round {}".format(previous_round)) self.database.rollback_to_round(previous_round) logger.info("Rolled back to round {}".format(previous_round)) else: raise Exception( "FATAL: After rolling back for 5 rounds, database is still " "corrupted: {}".format(errors) ) logger.info("Verified database integrity") # if (stateStorage.networkStart) { # await blockchain.database.buildWallets(block.data.height); # await blockchain.database.saveWallets(true); # await blockchain.database.applyRound(block.data.height); # await blockchain.transactionPool.buildWallets(); # return blockchain.dispatch("STARTED"); # } logger.info("Last block in database: %s", block.height) # if the node is shutdown between round, the round has already been applied # so we delete it to start a new, fresh round if is_new_round(block.height + 1): current_round, _, _ = calculate_round(block.height + 1) logger.info( "Start of new round detected %s. Removing it in order to correctly " "start the chain with new round.", current_round, ) self.database.delete_round(current_round) # Rebuild wallets self.database.wallets.build() self.transaction_pool.build_wallets() if apply_genesis_round: self.database.apply_round(block.height) self.sync_chain() logger.info("Blockhain is syced!") # Blockchain was just synced, so remove all blocks from process queue # as it was just synced. We clear it only on the start of the chain, to # awoid processing old blocks. If we ever run sync while it's already # runing, we don't want to run clear after sync as that might leave us # with missing blocks which will cause the blockchain to always sync back # rather than sync by accepting block from peers. self.process_queue.clear() self.consume_queue() except Exception as e: self.stop() # TODO: log exception raise e # TODO: