def backup_blocks(self, raw_blocks): '''Backup the raw blocks and flush. The blocks should be in order of decreasing height, starting at. self.height. A flush is performed once the blocks are backed up. ''' self.db.assert_flushed(self.flush_data()) assert self.height >= len(raw_blocks) genesis_activation = self.coin.GENESIS_ACTIVATION coin = self.coin for raw_block in raw_blocks: # Check and update self.tip block = coin.block(raw_block, self.height) header_hash = coin.header_hash(block.header) if header_hash != self.tip: raise ChainError( 'backup block {} not tip {} at height {:,d}'.format( hash_to_hex_str(header_hash), hash_to_hex_str(self.tip), self.height)) self.tip = coin.header_prevhash(block.header) is_unspendable = (is_unspendable_genesis if self.height >= genesis_activation else is_unspendable_legacy) self.backup_txs(block.transactions, is_unspendable) self.height -= 1 self.db.tx_counts.pop() self.logger.info('backed up to height {:,d}'.format(self.height))
async def calc_reorg_range(self, count): '''Calculate the reorg range''' def diff_pos(hashes1, hashes2): '''Returns the index of the first difference in the hash lists. If both lists match returns their length.''' for n, (hash1, hash2) in enumerate(zip(hashes1, hashes2)): if hash1 != hash2: return n return len(hashes) if count is None: # A real reorg start = self.height - 1 count = 1 while start > 0: hashes = await self.db.fs_block_hashes(start, count) hex_hashes = [hash_to_hex_str(hash) for hash in hashes] d_hex_hashes = await self.daemon.block_hex_hashes(start, count) n = diff_pos(hex_hashes, d_hex_hashes) if n > 0: start += n break count = min(count * 2, start) start -= count count = (self.height - start) + 1 else: start = (self.height - count) + 1 return start, count
def read_utxo_state(self): state = self.utxo_db.get(b'state') if not state: self.db_height = -1 self.db_tx_count = 0 self.db_tip = b'\0' * 32 self.db_version = max(self.DB_VERSIONS) self.utxo_flush_count = 0 self.wall_time = 0 self.first_sync = True else: state = ast.literal_eval(state.decode()) if not isinstance(state, dict): raise self.DBError('failed reading state from DB') self.db_version = state['db_version'] if self.db_version not in self.DB_VERSIONS: raise self.DBError('your UTXO DB version is {} but this ' 'software only handles versions {}'.format( self.db_version, self.DB_VERSIONS)) # backwards compat genesis_hash = state['genesis'] if isinstance(genesis_hash, bytes): genesis_hash = genesis_hash.decode() if genesis_hash != self.coin.GENESIS_HASH: raise self.DBError( 'DB genesis hash {} does not match coin {}'.format( genesis_hash, self.coin.GENESIS_HASH)) self.db_height = state['height'] self.db_tx_count = state['tx_count'] self.db_tip = state['tip'] self.utxo_flush_count = state['utxo_flush_count'] self.wall_time = state['wall_time'] self.first_sync = state['first_sync'] # These are our state as we move ahead of DB state self.fs_height = self.db_height self.fs_tx_count = self.db_tx_count self.last_flush_tx_count = self.fs_tx_count # Upgrade DB if self.db_version != max(self.DB_VERSIONS): self.upgrade_db() # Log some stats self.logger.info('UTXO DB version: {:d}'.format(self.db_version)) self.logger.info('coin: {}'.format(self.coin.NAME)) self.logger.info('network: {}'.format(self.coin.NET)) self.logger.info('height: {:,d}'.format(self.db_height)) self.logger.info('tip: {}'.format(hash_to_hex_str(self.db_tip))) self.logger.info('tx count: {:,d}'.format(self.db_tx_count)) if self.utxo_db.for_sync: self.logger.info(f'flushing DB cache at {self.env.cache_MB:,d} MB') if self.first_sync: self.logger.info('sync time so far: {}'.format( util.formatted_time(self.wall_time)))
def test_transaction(transaction_details): coin, tx_info = transaction_details raw_tx = unhexlify(tx_info['hex']) tx, tx_hash = coin.DESERIALIZER(raw_tx, 0).read_tx_and_hash() assert tx_info['txid'] == hash_to_hex_str(tx_hash) vin = tx_info['vin'] for i in range(len(vin)): assert vin[i]['txid'] == hash_to_hex_str(tx.inputs[i].prev_hash) assert vin[i]['vout'] == tx.inputs[i].prev_idx vout = tx_info['vout'] for i in range(len(vout)): # value pk_script assert vout[i]['value'] == tx.outputs[i].value spk = vout[i]['scriptPubKey'] tx_pks = tx.outputs[i].pk_script assert spk['hex'] == tx_pks.hex() if "addresses" in spk: assert len(spk["addresses"]) == 1 address = spk["addresses"][0] else: address = spk["address"] assert coin.address_to_hashX(address) == coin.hashX_from_script(tx_pks) if issubclass(coin, Namecoin): if "nameOp" not in spk or "name" not in spk["nameOp"]: assert coin.name_hashX_from_script(tx_pks) is None else: OP_NAME_UPDATE = OpCodes.OP_3 normalized_name_op_script = bytearray() normalized_name_op_script.append(OP_NAME_UPDATE) normalized_name_op_script.extend( Script.push_data(spk["nameOp"]["name"].encode("ascii"))) normalized_name_op_script.extend(Script.push_data(bytes([]))) normalized_name_op_script.append(OpCodes.OP_2DROP) normalized_name_op_script.append(OpCodes.OP_DROP) normalized_name_op_script.append(OpCodes.OP_RETURN) assert coin.name_hashX_from_script( tx_pks) == Coin.hashX_from_script( normalized_name_op_script)
def spend_utxo(self, tx_hash, tx_idx): '''Spend a UTXO and return the 33-byte value. If the UTXO is not in the cache it must be on disk. We store all UTXOs so not finding one indicates a logic error or DB corruption. ''' # Fast track is it being in the cache idx_packed = pack_le_uint32(tx_idx) cache_value = self.utxo_cache.pop(tx_hash + idx_packed, None) if cache_value: return cache_value # Spend it from the DB. # Key: b'h' + compressed_tx_hash + tx_idx + tx_num # Value: hashX prefix = b'h' + tx_hash[:4] + idx_packed candidates = { db_key: hashX for db_key, hashX in self.db.utxo_db.iterator(prefix=prefix) } for hdb_key, hashX in candidates.items(): tx_num_packed = hdb_key[-5:] if len(candidates) > 1: tx_num, = unpack_le_uint64(tx_num_packed + bytes(3)) hash, _height = self.db.fs_tx_hash(tx_num) if hash != tx_hash: assert hash is not None # Should always be found continue # Key: b'u' + address_hashX + tx_idx + tx_num # Value: the UTXO value as a 64-bit unsigned integer udb_key = b'u' + hashX + hdb_key[-9:] utxo_value_packed = self.db.utxo_db.get(udb_key) if utxo_value_packed: # Remove both entries for this UTXO self.db_deletes.append(hdb_key) self.db_deletes.append(udb_key) return hashX + tx_num_packed + utxo_value_packed raise ChainError('UTXO {} / {:,d} not found in "h" table'.format( hash_to_hex_str(tx_hash), tx_idx))
def read_tx(self): # Return a Deserialized TX. version = self._get_version() if version != self.bitcoin_diamond_tx_version: return Tx( self._read_le_int32(), # version self._read_inputs(), # inputs self._read_outputs(), # outputs self._read_le_uint32() # locktime ) else: return TxBitcoinDiamond( self._read_le_int32(), # version hash_to_hex_str(self._read_nbytes(32)), # blockhash self._read_inputs(), # inputs self._read_outputs(), # outputs self._read_le_uint32() # locktime )
def _read_tx_parts(self): '''Return a (deserialized TX, tx_hash, vsize) tuple.''' start = self.cursor tx_version = self._get_version() if tx_version == self.bitcoin_diamond_tx_version: marker = self.binary[self.cursor + 4 + 32] else: marker = self.binary[self.cursor + 4] if marker: tx = super().read_tx() tx_hash = self.TX_HASH_FN(self.binary[start:self.cursor]) return tx, tx_hash, self.binary_length # Ugh, this is nasty. version = self._read_le_int32() present_block_hash = None if version == self.bitcoin_diamond_tx_version: present_block_hash = hash_to_hex_str(self._read_nbytes(32)) orig_ser = self.binary[start:self.cursor] marker = self._read_byte() flag = self._read_byte() start = self.cursor inputs = self._read_inputs() outputs = self._read_outputs() orig_ser += self.binary[start:self.cursor] base_size = self.cursor - start witness = self._read_witness(len(inputs)) start = self.cursor locktime = self._read_le_uint32() orig_ser += self.binary[start:self.cursor] vsize = (3 * base_size + self.binary_length) // 4 if present_block_hash is not None: return TxBitcoinDiamondSegWit( version, present_block_hash, marker, flag, inputs, outputs, witness, locktime), self.TX_HASH_FN(orig_ser), vsize else: return TxSegWit(version, marker, flag, inputs, outputs, witness, locktime), self.TX_HASH_FN(orig_ser), vsize
async def _fetch_and_accept(self, hashes, all_hashes, touched): '''Fetch a list of mempool transactions.''' hex_hashes_iter = (hash_to_hex_str(hash) for hash in hashes) raw_txs = await self.api.raw_transactions(hex_hashes_iter) def deserialize_txs(): # This function is pure to_hashX = self.coin.hashX_from_script deserializer = self.coin.DESERIALIZER txs = {} for hash, raw_tx in zip(hashes, raw_txs): # The daemon may have evicted the tx from its # mempool or it may have gotten in a block if not raw_tx: continue tx, tx_size = deserializer(raw_tx).read_tx_and_vsize() # Convert the inputs and outputs into (hashX, value) pairs # Drop generation-like inputs from MemPoolTx.prevouts txin_pairs = tuple((txin.prev_hash, txin.prev_idx) for txin in tx.inputs if not txin.is_generation()) txout_pairs = tuple((to_hashX(txout.pk_script), txout.value) for txout in tx.outputs) txs[hash] = MemPoolTx(txin_pairs, None, txout_pairs, 0, tx_size) return txs # Thread this potentially slow operation so as not to block tx_map = await run_in_thread(deserialize_txs) # Determine all prevouts not in the mempool, and fetch the # UTXO information from the database. Failed prevout lookups # return None - concurrent database updates happen - which is # relied upon by _accept_transactions. Ignore prevouts that are # generation-like. prevouts = tuple(prevout for tx in tx_map.values() for prevout in tx.prevouts if prevout[0] not in all_hashes) utxos = await self.api.lookup_utxos(prevouts) utxo_map = {prevout: utxo for prevout, utxo in zip(prevouts, utxos)} return self._accept_transactions(tx_map, utxo_map, touched)
async def reorg_chain(self, count=None): '''Handle a chain reorganisation. Count is the number of blocks to simulate a reorg, or None for a real reorg.''' if count is None: self.logger.info('chain reorg detected') else: self.logger.info(f'faking a reorg of {count:,d} blocks') await self.flush(True) async def get_raw_blocks(last_height, hex_hashes): heights = range(last_height, last_height - len(hex_hashes), -1) try: blocks = [self.db.read_raw_block(height) for height in heights] self.logger.info(f'read {len(blocks)} blocks from disk') return blocks except FileNotFoundError: return await self.daemon.raw_blocks(hex_hashes) def flush_backup(): # self.touched can include other addresses which is # harmless, but remove None. self.touched.discard(None) self.db.flush_backup(self.flush_data(), self.touched) _start, last, hashes = await self.reorg_hashes(count) # Reverse and convert to hex strings. hashes = [hash_to_hex_str(hash) for hash in reversed(hashes)] for hex_hashes in chunks(hashes, 50): raw_blocks = await get_raw_blocks(last, hex_hashes) await self.run_in_thread_with_lock(self.backup_blocks, raw_blocks) await self.run_in_thread_with_lock(flush_backup) last -= len(raw_blocks) await self.prefetcher.reset_height(self.height) self.backed_up_event.set() self.backed_up_event.clear()
def __str__(self): prev_hash = hash_to_hex_str(self.prev_hash) return ("Input({}, {:d}, tree={}, sequence={:d})".format( prev_hash, self.prev_idx, self.tree, self.sequence))
def __str__(self): script = self.script.hex() prev_hash = hash_to_hex_str(self.prev_hash) return ("Input({}, {:d}, script={}, sequence={:d})".format( prev_hash, self.prev_idx, script, self.sequence))
def prev_hex_hash(raw_block): return hash_to_hex_str(raw_block[4:36])
def test_hash_to_hex_str(): assert lib_hash.hash_to_hex_str(b'hash_to_str') == '7274735f6f745f68736168'