def __init__(self, env): self.logger = util.class_logger(__name__, self.__class__.__name__) self.env = env self.coin = env.coin self.logger.info(f'switching current directory to {env.db_dir}') os.chdir(env.db_dir) self.db_class = db_class(self.env.db_engine) self.history = History() self.utxo_db = None self.utxo_flush_count = 0 self.fs_height = -1 self.fs_tx_count = 0 self.db_height = -1 self.db_tx_count = 0 self.db_tip = None self.tx_counts = None self.last_flush = time.time() self.last_flush_tx_count = 0 self.wall_time = 0 self.first_sync = True self.db_version = -1 self.logger.info(f'using {self.env.db_engine} for DB backend') # Header merkle cache self.merkle = Merkle() self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes) self.headers_file = util.LogicalFile('meta/headers', 2, 16000000) self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000) self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000)
def __init__(self, env: 'Env'): self.logger = util.class_logger(__name__, self.__class__.__name__) self.env = env self.coin = env.coin # Setup block header size handlers if self.coin.STATIC_BLOCK_HEADERS: self.header_offset = self.coin.static_header_offset self.header_len = self.coin.static_header_len else: self.header_offset = self.dynamic_header_offset self.header_len = self.dynamic_header_len self.logger.info(f'switching current directory to {env.db_dir}') os.chdir(env.db_dir) self.db_class = db_class(self.env.db_engine) self.history = History() # Key: b'u' + address_hashX + txout_idx + tx_num # Value: the UTXO value as a 64-bit unsigned integer (in satoshis) # "at address, at outpoint, there is a UTXO of value v" # --- # Key: b'h' + compressed_tx_hash + txout_idx + tx_num # Value: hashX # "some outpoint created a UTXO at address" # --- # Key: b'U' + block_height # Value: byte-concat list of (hashX + tx_num + value_sats) # "undo data: list of UTXOs spent at block height" self.utxo_db = None self.utxo_flush_count = 0 self.fs_height = -1 self.fs_tx_count = 0 self.db_height = -1 self.db_tx_count = 0 self.db_tip = None # type: Optional[bytes] self.tx_counts = None self.last_flush = time.time() self.last_flush_tx_count = 0 self.wall_time = 0 self.first_sync = True self.db_version = -1 self.logger.info(f'using {self.env.db_engine} for DB backend') # Header merkle cache self.merkle = Merkle() self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes) # on-disk: raw block headers in chain order self.headers_file = util.LogicalFile('meta/headers', 2, 16000000) # on-disk: cumulative number of txs at the end of height N self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000) # on-disk: 32 byte txids in chain order, allows (tx_num -> txid) map self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000) if not self.coin.STATIC_BLOCK_HEADERS: self.headers_offsets_file = util.LogicalFile( 'meta/headers_offsets', 2, 16000000)
def db(tmpdir, request): cwd = os.getcwd() os.chdir(str(tmpdir)) db = db_class(request.param)("db", False) yield db os.chdir(cwd) db.close()
def __init__(self, env): self.logger = util.class_logger(__name__, self.__class__.__name__) self.env = env self.coin = env.coin # Setup block header size handlers if self.coin.STATIC_BLOCK_HEADERS: self.header_offset = self.coin.static_header_offset self.header_len = self.coin.static_header_len else: self.header_offset = self.dynamic_header_offset self.header_len = self.dynamic_header_len self.logger.info(f'switching current directory to {env.db_dir}') os.chdir(env.db_dir) self.db_class = db_class(self.env.db_engine) self.history = History() self.utxo_db = None self.tx_counts = None self.last_flush = time.time() self.logger.info(f'using {self.env.db_engine} for DB backend') # Header merkle cache self.merkle = Merkle() self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes) self.headers_file = util.LogicalFile('meta/headers', 2, 16000000) self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000) self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000) if not self.coin.STATIC_BLOCK_HEADERS: self.headers_offsets_file = util.LogicalFile( 'meta/headers_offsets', 2, 16000000)
def db(tmpdir, request): cwd = os.getcwd() os.chdir(str(tmpdir)) if request.param is 'skip': raise pytest.skip() db = db_class(request.param)("db", False) yield db os.chdir(cwd) db.close()
def db(tmpdir, request): cwd = os.getcwd() os.chdir(str(tmpdir)) try: name = request.param.args[0] except AttributeError: name = request.param db = db_class(name)("db", False) yield db os.chdir(cwd) db.close()
def __init__(self, env): self.logger = util.class_logger(__name__, self.__class__.__name__) self.env = env self.coin = env.coin # Setup block header size handlers if self.coin.STATIC_BLOCK_HEADERS: self.header_offset = self.coin.static_header_offset self.header_len = self.coin.static_header_len else: self.header_offset = self.dynamic_header_offset self.header_len = self.dynamic_header_len self.logger.info(f'switching current directory to {env.db_dir}') os.chdir(env.db_dir) self.db_class = db_class(self.env.db_engine) self.history = History() self.eventlog = Eventlog() self.unflushed_hashYs = defaultdict( set) # {blockHash => [hashY_topic, ]}, for reorg_chain self.hashY_db = None self.utxo_db = None self.utxo_flush_count = 0 self.fs_height = -1 self.fs_tx_count = 0 self.db_height = -1 self.db_tx_count = 0 self.db_tip = None self.tx_counts = None self.last_flush = time.time() self.last_flush_tx_count = 0 self.wall_time = 0 self.first_sync = True self.db_version = -1 self.logger.info(f'using {self.env.db_engine} for DB backend') # Header merkle cache self.merkle = Merkle() self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes) self.headers_file = util.LogicalFile('meta/headers', 2, 16000000) self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000) self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000) if not self.coin.STATIC_BLOCK_HEADERS: self.headers_offsets_file = util.LogicalFile( 'meta/headers_offsets', 2, 16000000)
def __init__(self, env): self.logger = logging.getLogger(__name__)\ .getChild(self.__class__.__name__) self.env = env self.coin = env.coin # Setup block header size handlers if self.coin.STATIC_BLOCK_HEADERS: self.header_offset = self.coin.static_header_offset self.header_len = self.coin.static_header_len else: self.header_offset = self.dynamic_header_offset self.header_len = self.dynamic_header_len self.logger.info('switching current directory to {}'.format( env.db_dir)) os.chdir(env.db_dir) self.db_class = db_class(self.env.db_engine) self.logger.info('using {} for DB backend'.format(self.env.db_engine)) self.history = History() self.utxo_db = None self.open_dbs() self.logger.info('reorg limit is {:,d} blocks'.format( self.env.reorg_limit)) self.headers_file = util.LogicalFile('meta/headers', 2, 16000000) self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000) self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000) if not self.coin.STATIC_BLOCK_HEADERS: self.headers_offsets_file = util.LogicalFile( 'meta/headers_offsets', 2, 16000000) # Write the offset of the genesis block if self.headers_offsets_file.read(0, 8) != b'\x00' * 8: self.headers_offsets_file.write(0, b'\x00' * 8) # tx_counts[N] has the cumulative number of txs at the end of # height N. So tx_counts[0] is 1 - the genesis coinbase size = (self.db_height + 1) * 4 tx_counts = self.tx_counts_file.read(0, size) assert len(tx_counts) == size self.tx_counts = array.array('I', tx_counts) if self.tx_counts: assert self.db_tx_count == self.tx_counts[-1] else: assert self.db_tx_count == 0
def test_close(db): db.put(b"a", b"b") db.close() db = db_class(db.__class__.__name__)("db", False) assert db.get(b"a") == b"b"