コード例 #1
0
 def _update_block_txs_cache():
     block_txs = list(
         self.tx_db.iterator(
             start=TX_HASH_PREFIX +
             util.pack_be_uint64(tx_counts[tx_height - 1]),
             stop=None if tx_height +
             1 == len(tx_counts) else TX_HASH_PREFIX +
             util.pack_be_uint64(tx_counts[tx_height]),
             include_key=False))
     if tx_height + 100 > self.db_height:
         return block_txs
     self._block_txs_cache[tx_height] = block_txs
コード例 #2
0
 def read_headers():
     # Read some from disk
     disk_count = max(0, min(count, self.db_height + 1 - start_height))
     if disk_count:
         return b''.join(
             self.headers_db.iterator(
                 start=HEADER_PREFIX +
                 util.pack_be_uint64(start_height),
                 stop=HEADER_PREFIX +
                 util.pack_be_uint64(start_height + disk_count),
                 include_key=False)), disk_count
     return b'', 0
コード例 #3
0
    def flush_fs(self, flush_data):
        """Write headers, tx counts and block tx hashes to the filesystem.

        The first height to write is self.fs_height + 1.  The FS
        metadata is all append-only, so in a crash we just pick up
        again from the height stored in the DB.
        """
        prior_tx_count = (self.tx_counts[self.fs_height]
                          if self.fs_height >= 0 else 0)
        assert len(flush_data.block_tx_hashes) == len(flush_data.headers)
        assert flush_data.height == self.fs_height + len(flush_data.headers)
        assert flush_data.tx_count == (self.tx_counts[-1]
                                       if self.tx_counts else 0)
        assert len(self.tx_counts) == flush_data.height + 1
        assert len(b''.join(flush_data.block_tx_hashes)
                   ) // 32 == flush_data.tx_count - prior_tx_count

        # Write the headers, tx counts, and tx hashes
        start_time = time.perf_counter()
        height_start = self.fs_height + 1
        tx_num = prior_tx_count

        for header, tx_hashes in zip(flush_data.headers,
                                     flush_data.block_tx_hashes):
            tx_count = self.tx_counts[height_start]
            self.headers_db.put(
                HEADER_PREFIX + util.pack_be_uint64(height_start), header)
            self.tx_count_db.put(
                TX_COUNT_PREFIX + util.pack_be_uint64(height_start),
                util.pack_be_uint64(tx_count))
            height_start += 1
            offset = 0
            while offset < len(tx_hashes):
                self.hashes_db.put(
                    TX_HASH_PREFIX + util.pack_be_uint64(tx_num),
                    tx_hashes[offset:offset + 32])
                tx_num += 1
                offset += 32

        flush_data.block_tx_hashes.clear()
        self.fs_height = flush_data.height
        self.fs_tx_count = flush_data.tx_count
        flush_data.headers.clear()
        elapsed = time.perf_counter() - start_time
        self.logger.info(f'flushed filesystem data in {elapsed:.2f}s')
コード例 #4
0
    def fs_tx_hash(self, tx_num):
        """Return a par (tx_hash, tx_height) for the given tx number.

        If the tx_height is not on disk, returns (None, tx_height)."""
        tx_height = bisect_right(self.tx_counts, tx_num)
        if tx_height > self.db_height:
            tx_hash = None
        else:
            tx_hash = self.hashes_db.get(TX_HASH_PREFIX +
                                         util.pack_be_uint64(tx_num))
        return tx_hash, tx_height
コード例 #5
0
 def read_history():
     hashx_history = []
     for key, hist in self.history.db.iterator(prefix=hashX):
         a = array.array('I')
         a.frombytes(hist)
         for tx_num in a:
             tx_height = bisect_right(self.tx_counts, tx_num)
             if tx_height > self.db_height:
                 tx_hash = None
             else:
                 tx_hash = self.hashes_db.get(
                     TX_HASH_PREFIX + util.pack_be_uint64(tx_num))
             hashx_history.append((tx_hash, tx_height))
             if limit and len(hashx_history) >= limit:
                 return hashx_history
     return hashx_history
コード例 #6
0
    def flush_fs(self, flush_data):
        """Write headers, tx counts and block tx hashes to the filesystem.

        The first height to write is self.fs_height + 1.  The FS
        metadata is all append-only, so in a crash we just pick up
        again from the height stored in the DB.
        """
        prior_tx_count = (self.tx_counts[self.fs_height]
                          if self.fs_height >= 0 else 0)
        assert len(flush_data.block_txs) == len(flush_data.headers)
        assert flush_data.height == self.fs_height + len(flush_data.headers)
        assert flush_data.tx_count == (self.tx_counts[-1] if self.tx_counts
                                       else 0)
        assert len(self.tx_counts) == flush_data.height + 1
        assert len(
            b''.join(hashes for hashes, _ in flush_data.block_txs)
        ) // 32 == flush_data.tx_count - prior_tx_count

        # Write the headers
        start_time = time.perf_counter()

        with self.headers_db.write_batch() as batch:
            batch_put = batch.put
            for i, header in enumerate(flush_data.headers):
                batch_put(HEADER_PREFIX + util.pack_be_uint64(self.fs_height + i + 1), header)
                self.headers.append(header)
        flush_data.headers.clear()

        height_start = self.fs_height + 1
        tx_num = prior_tx_count

        with self.tx_db.write_batch() as batch:
            batch_put = batch.put
            for block_hash, (tx_hashes, txs) in zip(flush_data.block_hashes, flush_data.block_txs):
                tx_count = self.tx_counts[height_start]
                batch_put(BLOCK_HASH_PREFIX + util.pack_be_uint64(height_start), block_hash[::-1])
                batch_put(TX_COUNT_PREFIX + util.pack_be_uint64(height_start), util.pack_be_uint64(tx_count))
                height_start += 1
                offset = 0
                while offset < len(tx_hashes):
                    batch_put(TX_HASH_PREFIX + util.pack_be_uint64(tx_num), tx_hashes[offset:offset+32])
                    batch_put(TX_NUM_PREFIX + tx_hashes[offset:offset+32], util.pack_be_uint64(tx_num))
                    batch_put(TX_PREFIX + tx_hashes[offset:offset+32], txs[offset // 32])
                    tx_num += 1
                    offset += 32

        flush_data.block_txs.clear()
        flush_data.block_hashes.clear()

        self.fs_height = flush_data.height
        self.fs_tx_count = flush_data.tx_count
        elapsed = time.perf_counter() - start_time
        self.logger.info(f'flushed filesystem data in {elapsed:.2f}s')