Ejemplo n.º 1
0
    def backup_blocks(self, raw_blocks):
        """Backup the raw blocks and flush.

        The blocks should be in order of decreasing height, starting at.
        self.height.  A flush is performed once the blocks are backed up.
        """
        self.db.assert_flushed(self.flush_data())
        assert self.height >= len(raw_blocks)

        coin = self.coin
        for raw_block in raw_blocks:
            # Check and update self.tip
            block = coin.block(raw_block, self.height)
            header_hash = coin.header_hash(block.header)
            if header_hash != self.tip:
                raise ChainError(
                    'backup block {} not tip {} at height {:,d}'.format(
                        hash_to_hex_str(header_hash),
                        hash_to_hex_str(self.tip), self.height))
            self.tip = coin.header_prevhash(block.header)
            self.backup_txs(block.transactions)
            self.height -= 1
            self.db.tx_counts.pop()

        self.logger.info(f'backed up to height {self.height:,d}')
Ejemplo n.º 2
0
    def spend_utxo(self, tx_hash, tx_idx):
        """Spend a UTXO and return the 33-byte value.

        If the UTXO is not in the cache it must be on disk.  We store
        all UTXOs so not finding one indicates a logic error or DB
        corruption.
        """
        # Fast track is it being in the cache
        idx_packed = pack('<H', tx_idx)
        cache_value = self.utxo_cache.pop(tx_hash + idx_packed, None)
        if cache_value:
            return cache_value

        # Spend it from the DB.

        # Key: b'h' + compressed_tx_hash + tx_idx + tx_num
        # Value: hashX
        prefix = b'h' + tx_hash[:4] + idx_packed
        candidates = {
            db_key: hashX
            for db_key, hashX in self.db.utxo_db.iterator(prefix=prefix)
        }
        for hdb_key, hashX in candidates.items():
            tx_num_packed = hdb_key[-4:]
            if len(candidates) > 1:

                tx_num, = unpack('<I', tx_num_packed)
                try:
                    hash, height = self.db.fs_tx_hash(tx_num)
                except IndexError:
                    self.logger.error(
                        "data integrity error for hashx history: %s missing tx #%s (%s:%s)",
                        hashX.hex(), tx_num, hash_to_hex_str(tx_hash), tx_idx)
                    continue
                if hash != tx_hash:
                    assert hash is not None  # Should always be found
                    continue

            # Key: b'u' + address_hashX + tx_idx + tx_num
            # Value: the UTXO value as a 64-bit unsigned integer
            udb_key = b'u' + hashX + hdb_key[-6:]
            utxo_value_packed = self.db.utxo_db.get(udb_key)
            if utxo_value_packed is None:
                self.logger.warning("%s:%s is not found in UTXO db for %s",
                                    hash_to_hex_str(tx_hash), tx_idx,
                                    hash_to_hex_str(hashX))
                raise ChainError(
                    f"{hash_to_hex_str(tx_hash)}:{tx_idx} is not found in UTXO db for {hash_to_hex_str(hashX)}"
                )
            # Remove both entries for this UTXO
            self.db_deletes.append(hdb_key)
            self.db_deletes.append(udb_key)
            return hashX + tx_num_packed + utxo_value_packed

        self.logger.error(
            'UTXO {hash_to_hex_str(tx_hash)} / {tx_idx} not found in "h" table'
        )
        raise ChainError('UTXO {} / {:,d} not found in "h" table'.format(
            hash_to_hex_str(tx_hash), tx_idx))
Ejemplo n.º 3
0
 def electrum_header(cls, header, height):
     h = dict(zip(cls.HEADER_VALUES, cls.HEADER_UNPACK(header)))
     # Add the height that is not present in the header itself
     h['block_height'] = height
     # Convert bytes to str
     h['prev_block_hash'] = hash_to_hex_str(h['prev_block_hash'])
     h['merkle_root'] = hash_to_hex_str(h['merkle_root'])
     return h
Ejemplo n.º 4
0
 def electrum_header(cls, header, height):
     version, = struct.unpack('<I', header[:4])
     timestamp, bits, nonce = struct.unpack('<III', header[100:112])
     return {
         'version': version,
         'prev_block_hash': hash_to_hex_str(header[4:36]),
         'merkle_root': hash_to_hex_str(header[36:68]),
         'claim_trie_root': hash_to_hex_str(header[68:100]),
         'timestamp': timestamp,
         'bits': bits,
         'nonce': nonce,
         'block_height': height,
     }
Ejemplo n.º 5
0
    async def calc_reorg_range(self, count):
        """Calculate the reorg range"""
        def diff_pos(hashes1, hashes2):
            """Returns the index of the first difference in the hash lists.
            If both lists match returns their length."""
            for n, (hash1, hash2) in enumerate(zip(hashes1, hashes2)):
                if hash1 != hash2:
                    return n
            return len(hashes)

        if count is None:
            # A real reorg
            start = self.height - 1
            count = 1
            while start > 0:
                hashes = await self.db.fs_block_hashes(start, count)
                hex_hashes = [hash_to_hex_str(hash) for hash in hashes]
                d_hex_hashes = await self.daemon.block_hex_hashes(start, count)
                n = diff_pos(hex_hashes, d_hex_hashes)
                if n > 0:
                    start += n
                    break
                count = min(count * 2, start)
                start -= count

            count = (self.height - start) + 1
        else:
            start = (self.height - count) + 1

        return start, count
Ejemplo n.º 6
0
    async def reorg_chain(self, count=None):
        """Handle a chain reorganisation.

        Count is the number of blocks to simulate a reorg, or None for
        a real reorg."""
        if count is None:
            self.logger.info('chain reorg detected')
        else:
            self.logger.info(f'faking a reorg of {count:,d} blocks')
        await self.flush(True)

        async def get_raw_blocks(last_height, hex_hashes):
            heights = range(last_height, last_height - len(hex_hashes), -1)
            try:
                blocks = [self.db.read_raw_block(height) for height in heights]
                self.logger.info(f'read {len(blocks)} blocks from disk')
                return blocks
            except FileNotFoundError:
                return await self.daemon.raw_blocks(hex_hashes)

        def flush_backup():
            # self.touched can include other addresses which is
            # harmless, but remove None.
            self.touched.discard(None)
            self.db.flush_backup(self.flush_data(), self.touched)

        start, last, hashes = await self.reorg_hashes(count)
        # Reverse and convert to hex strings.
        hashes = [hash_to_hex_str(hash) for hash in reversed(hashes)]
        for hex_hashes in chunks(hashes, 50):
            raw_blocks = await get_raw_blocks(last, hex_hashes)
            await self.run_in_thread_with_lock(self.backup_blocks, raw_blocks)
            await self.run_in_thread_with_lock(flush_backup)
            last -= len(raw_blocks)
        await self.prefetcher.reset_height(self.height)
Ejemplo n.º 7
0
    def _compact_hashX(self, hashX, hist_map, hist_list, write_items,
                       keys_to_delete):
        """Compress history for a hashX.  hist_list is an ordered list of
        the histories to be compressed."""
        # History entries (tx numbers) are 4 bytes each.  Distribute
        # over rows of up to 50KB in size.  A fixed row size means
        # future compactions will not need to update the first N - 1
        # rows.
        max_row_size = self.max_hist_row_entries * 4
        full_hist = b''.join(hist_list)
        nrows = (len(full_hist) + max_row_size - 1) // max_row_size
        if nrows > 4:
            self.logger.info('hashX {} is large: {:,d} entries across '
                             '{:,d} rows'.format(hash_to_hex_str(hashX),
                                                 len(full_hist) // 4, nrows))

        # Find what history needs to be written, and what keys need to
        # be deleted.  Start by assuming all keys are to be deleted,
        # and then remove those that are the same on-disk as when
        # compacted.
        write_size = 0
        keys_to_delete.update(hist_map)
        for n, chunk in enumerate(util.chunks(full_hist, max_row_size)):
            key = hashX + pack_be_uint16(n)
            if hist_map.get(key) == chunk:
                keys_to_delete.remove(key)
            else:
                write_items.append((key, chunk))
                write_size += len(chunk)

        assert n + 1 == nrows
        self.comp_flush_count = max(self.comp_flush_count, n)

        return write_size
Ejemplo n.º 8
0
    async def tx_merkle(self, tx_num, tx_height):
        if tx_height == -1:
            return {'block_height': -1}
        tx_counts = self.tx_counts
        tx_pos = tx_num - tx_counts[tx_height - 1]

        def _update_block_txs_cache():
            block_txs = list(
                self.tx_db.iterator(
                    start=TX_HASH_PREFIX +
                    util.pack_be_uint64(tx_counts[tx_height - 1]),
                    stop=None if tx_height +
                    1 == len(tx_counts) else TX_HASH_PREFIX +
                    util.pack_be_uint64(tx_counts[tx_height]),
                    include_key=False))
            if tx_height + 100 > self.db_height:
                return block_txs
            self._block_txs_cache[tx_height] = block_txs

        uncached = None
        if (tx_num, tx_height) in self._merkle_tx_cache:
            return self._merkle_tx_cache[(tx_num, tx_height)]
        if tx_height not in self._block_txs_cache:
            uncached = await asyncio.get_event_loop().run_in_executor(
                self.executor, _update_block_txs_cache)
        block_txs = self._block_txs_cache.get(tx_height, uncached)
        branch, root = self.merkle.branch_and_root(block_txs, tx_pos)
        merkle = {
            'block_height': tx_height,
            'merkle': [hash_to_hex_str(hash) for hash in branch],
            'pos': tx_pos
        }
        if tx_height + 100 < self.db_height:
            self._merkle_tx_cache[(tx_num, tx_height)] = merkle
        return merkle
Ejemplo n.º 9
0
    def genesis_block(cls, block):
        """Check the Genesis block is the right one for this coin.

        Return the block less its unspendable coinbase.
        """
        header = cls.block_header(block, 0)
        header_hex_hash = hash_to_hex_str(cls.header_hash(header))
        if header_hex_hash != cls.GENESIS_HASH:
            raise CoinError(
                f'genesis block has hash {header_hex_hash} expected {cls.GENESIS_HASH}'
            )

        return header + bytes(1)
Ejemplo n.º 10
0
    async def _fetch_and_accept(self, hashes, all_hashes, touched):
        """Fetch a list of mempool transactions."""
        hex_hashes_iter = (hash_to_hex_str(hash) for hash in hashes)
        raw_txs = await self.api.raw_transactions(hex_hashes_iter)

        def deserialize_txs():  # This function is pure
            to_hashX = self.coin.hashX_from_script
            deserializer = self.coin.DESERIALIZER

            txs = {}
            for hash, raw_tx in zip(hashes, raw_txs):
                # The daemon may have evicted the tx from its
                # mempool or it may have gotten in a block
                if not raw_tx:
                    continue
                tx, tx_size = deserializer(raw_tx).read_tx_and_vsize()
                # Convert the inputs and outputs into (hashX, value) pairs
                # Drop generation-like inputs from MemPoolTx.prevouts
                txin_pairs = tuple((txin.prev_hash, txin.prev_idx)
                                   for txin in tx.inputs
                                   if not txin.is_generation())
                txout_pairs = tuple((to_hashX(txout.pk_script), txout.value)
                                    for txout in tx.outputs)
                txs[hash] = MemPoolTx(txin_pairs, None, txout_pairs, 0,
                                      tx_size)
            return txs

        # Thread this potentially slow operation so as not to block
        tx_map = await asyncio.get_event_loop().run_in_executor(
            self.executor, deserialize_txs)

        # Determine all prevouts not in the mempool, and fetch the
        # UTXO information from the database.  Failed prevout lookups
        # return None - concurrent database updates happen - which is
        # relied upon by _accept_transactions. Ignore prevouts that are
        # generation-like.
        prevouts = tuple(prevout for tx in tx_map.values()
                         for prevout in tx.prevouts
                         if prevout[0] not in all_hashes)
        utxos = await self.api.lookup_utxos(prevouts)
        utxo_map = {prevout: utxo for prevout, utxo in zip(prevouts, utxos)}

        return self._accept_transactions(tx_map, utxo_map, touched)
Ejemplo n.º 11
0
    def _fs_transactions(self, txids: Iterable[str]):
        unpack_be_uint64 = util.unpack_be_uint64
        tx_counts = self.tx_counts
        tx_db_get = self.tx_db.get
        tx_cache = self._tx_and_merkle_cache

        tx_infos = {}

        for tx_hash in txids:
            cached_tx = tx_cache.get(tx_hash)
            if cached_tx:
                tx, merkle = cached_tx
            else:
                tx_hash_bytes = bytes.fromhex(tx_hash)[::-1]
                tx_num = tx_db_get(TX_NUM_PREFIX + tx_hash_bytes)
                tx = None
                tx_height = -1
                if tx_num is not None:
                    tx_num = unpack_be_uint64(tx_num)
                    tx_height = bisect_right(tx_counts, tx_num)
                    if tx_height < self.db_height:
                        tx = tx_db_get(TX_PREFIX + tx_hash_bytes)
                if tx_height == -1:
                    merkle = {
                        'block_height': -1
                    }
                else:
                    tx_pos = tx_num - tx_counts[tx_height - 1]
                    branch, root = self.merkle.branch_and_root(
                        self.total_transactions[tx_counts[tx_height - 1]:tx_counts[tx_height]], tx_pos
                    )
                    merkle = {
                        'block_height': tx_height,
                        'merkle': [
                            hash_to_hex_str(hash)
                            for hash in branch
                        ],
                        'pos': tx_pos
                    }
                if tx_height + 10 < self.db_height:
                    tx_cache[tx_hash] = tx, merkle
            tx_infos[tx_hash] = (None if not tx else tx.hex(), merkle)
        return tx_infos
Ejemplo n.º 12
0
 def __str__(self):
     script = self.script.hex()
     prev_hash = hash_to_hex_str(self.prev_hash)
     return (
         f"Input({prev_hash}, {self.prev_idx:d}, script={script}, sequence={self.sequence:d})"
     )
Ejemplo n.º 13
0
 def __str__(self):
     prev_hash = hash_to_hex_str(self.prev_hash)
     return (
         f"Input({prev_hash}, {self.prev_idx:d}, tree={self.tree}, sequence={self.sequence:d})"
     )