async def test_merkle_cache_truncation():
    max_length = 33
    source = Source(max_length).hashes
    for length in range(max_length - 2, max_length + 1):
        for trunc_length in range(1, 20, 3):
            cache = MerkleCache(merkle, source)
            await cache.initialize(length)
            cache.truncate(trunc_length)
            assert cache.length <= trunc_length
            for cp_length in range(1, length + 1, 3):
                cp_hashes = await source(0, cp_length)
                # All possible indices
                for index in range(cp_length):
                    # Compare correct answer with cache
                    branch, root = merkle.branch_and_root(cp_hashes, index)
                    branch2, root2 = await cache.branch_and_root(
                        cp_length, index)
                    assert branch == branch2
                    assert root == root2

    # Truncation is a no-op if longer
    cache = MerkleCache(merkle, source)
    await cache.initialize(10)
    level = cache.level.copy()
    for length in range(10, 13):
        cache.truncate(length)
        assert cache.level == level
        assert cache.length == 10
Exemple #2
0
def test_truncation_bad():
    cache = MerkleCache(merkle, Source(10), 10)
    with pytest.raises(TypeError):
        cache.truncate(1.0)
    for n in (-1, 0):
        with pytest.raises(ValueError):
            cache.truncate(n)
Exemple #3
0
    def __init__(self, env: 'Env'):
        self.logger = util.class_logger(__name__, self.__class__.__name__)
        self.env = env
        self.coin = env.coin

        # Setup block header size handlers
        if self.coin.STATIC_BLOCK_HEADERS:
            self.header_offset = self.coin.static_header_offset
            self.header_len = self.coin.static_header_len
        else:
            self.header_offset = self.dynamic_header_offset
            self.header_len = self.dynamic_header_len

        self.logger.info(f'switching current directory to {env.db_dir}')
        os.chdir(env.db_dir)

        self.db_class = db_class(self.env.db_engine)
        self.history = History()

        # Key: b'u' + address_hashX + txout_idx + tx_num
        # Value: the UTXO value as a 64-bit unsigned integer (in satoshis)
        # "at address, at outpoint, there is a UTXO of value v"
        # ---
        # Key: b'h' + compressed_tx_hash + txout_idx + tx_num
        # Value: hashX
        # "some outpoint created a UTXO at address"
        # ---
        # Key: b'U' + block_height
        # Value: byte-concat list of (hashX + tx_num + value_sats)
        # "undo data: list of UTXOs spent at block height"
        self.utxo_db = None

        self.utxo_flush_count = 0
        self.fs_height = -1
        self.fs_tx_count = 0
        self.db_height = -1
        self.db_tx_count = 0
        self.db_tip = None  # type: Optional[bytes]
        self.tx_counts = None
        self.last_flush = time.time()
        self.last_flush_tx_count = 0
        self.wall_time = 0
        self.first_sync = True
        self.db_version = -1

        self.logger.info(f'using {self.env.db_engine} for DB backend')

        # Header merkle cache
        self.merkle = Merkle()
        self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes)

        # on-disk: raw block headers in chain order
        self.headers_file = util.LogicalFile('meta/headers', 2, 16000000)
        # on-disk: cumulative number of txs at the end of height N
        self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000)
        # on-disk: 32 byte txids in chain order, allows (tx_num -> txid) map
        self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000)
        if not self.coin.STATIC_BLOCK_HEADERS:
            self.headers_offsets_file = util.LogicalFile(
                'meta/headers_offsets', 2, 16000000)
Exemple #4
0
    def __init__(self, env):
        self.logger = util.class_logger(__name__, self.__class__.__name__)
        self.env = env
        self.coin = env.coin

        # Setup block header size handlers
        if self.coin.STATIC_BLOCK_HEADERS:
            self.header_offset = self.coin.static_header_offset
            self.header_len = self.coin.static_header_len
        else:
            self.header_offset = self.dynamic_header_offset
            self.header_len = self.dynamic_header_len

        self.logger.info(f'switching current directory to {env.db_dir}')
        os.chdir(env.db_dir)

        self.db_class = db_class(self.env.db_engine)
        self.history = History()
        self.utxo_db = None
        self.tx_counts = None
        self.last_flush = time.time()

        self.logger.info(f'using {self.env.db_engine} for DB backend')

        # Header merkle cache
        self.merkle = Merkle()
        self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes)

        self.headers_file = util.LogicalFile('meta/headers', 2, 16000000)
        self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000)
        self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000)
        if not self.coin.STATIC_BLOCK_HEADERS:
            self.headers_offsets_file = util.LogicalFile(
                'meta/headers_offsets', 2, 16000000)
Exemple #5
0
    def __init__(self, env):
        self.logger = util.class_logger(__name__, self.__class__.__name__)
        self.env = env
        self.coin = env.coin

        self.logger.info(f'switching current directory to {env.db_dir}')
        os.chdir(env.db_dir)

        self.db_class = db_class(self.env.db_engine)
        self.history = History()
        self.utxo_db = None
        self.utxo_flush_count = 0
        self.fs_height = -1
        self.fs_tx_count = 0
        self.db_height = -1
        self.db_tx_count = 0
        self.db_tip = None
        self.tx_counts = None
        self.last_flush = time.time()
        self.last_flush_tx_count = 0
        self.wall_time = 0
        self.first_sync = True
        self.db_version = -1

        self.logger.info(f'using {self.env.db_engine} for DB backend')

        # Header merkle cache
        self.merkle = Merkle()
        self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes)

        self.headers_file = util.LogicalFile('meta/headers', 2, 16000000)
        self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000)
        self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000)
    async def catch_up_to_daemon(self):
        '''Process and index blocks until we catch up with the daemon.

        Returns once caught up.  Future blocks continue to be
        processed in a separate task.
        '''
        # Open the databases first.
        await self.open_for_sync()
        self._on_dbs_opened()
        # Get the prefetcher running
        self.tasks.create_task(self.prefetcher.main_loop())
        await self.prefetcher.reset_height(self.height)
        # Start our loop that processes blocks as they are fetched
        self.worker_task = self.tasks.create_task(self._process_queue())
        # Wait until caught up
        await self._caught_up_event.wait()
        # Flush everything but with first_sync->False state.
        first_sync = self.first_sync
        self.first_sync = False
        self.flush(True)
        if first_sync:
            self.logger.info(f'{electrumx.version} synced to '
                             f'height {self.height:,d}')
        # Reopen for serving
        await self.open_for_serving()

        # Populate the header merkle cache
        length = max(1, self.height - self.env.reorg_limit)
        self.header_mc = MerkleCache(self.merkle, HeaderSource(self), length)
        self.logger.info('populated header merkle cache')
async def test_truncation_bad():
    cache = MerkleCache(merkle, Source(10).hashes)
    await cache.initialize(10)
    with pytest.raises(TypeError):
        cache.truncate(1.0)
    for n in (-1, 0):
        with pytest.raises(ValueError):
            cache.truncate(n)
Exemple #8
0
def test_bad_extension():
    length = 5
    source = Source(length)
    cache = MerkleCache(merkle, source, length)
    level = cache.level.copy()
    with pytest.raises(AssertionError):
        cache.branch_and_root(8, 0)
    # The bad extension should not destroy the cache
    assert cache.level == level
    assert cache.length == length
Exemple #9
0
def test_merkle_cache_extension():
    source = Source(64)
    for length in range(14, 18):
        for cp_length in range(30, 36):
            cache = MerkleCache(merkle, source, length)
            cp_hashes = source.hashes(0, cp_length)
            # All possible indices
            for index in range(cp_length):
                # Compare correct answer with cache
                branch, root = merkle.branch_and_root(cp_hashes, index)
                branch2, root2 = cache.branch_and_root(cp_length, index)
                assert branch == branch2
                assert root == root2
Exemple #10
0
def time_it():
    source = Source(500000)
    import time
    cache = MerkleCache(merkle, source)
    cp_length = 492000
    cp_hashes = source.hashes(0, cp_length)
    brs2 = []
    t1 = time.time()
    for index in range(5, 400000, 500):
        brs2.append(cache.branch_and_root(cp_length, index))
    t2 = time.time()
    print(t2 - t1)
    assert False
Exemple #11
0
def test_markle_cache_bad():
    length = 23
    source = Source(length)
    cache = MerkleCache(merkle, source, length)
    cache.branch_and_root(5, 3)
    with pytest.raises(TypeError):
        cache.branch_and_root(5.0, 3)
    with pytest.raises(TypeError):
        cache.branch_and_root(5, 3.0)
    with pytest.raises(ValueError):
        cache.branch_and_root(0, -1)
    with pytest.raises(ValueError):
        cache.branch_and_root(3, 3)
async def time_it():
    source = Source(500000).hashes
    cp_length = 492000
    import time
    cache = MerkleCache(merkle, source)
    await cache.initialize(cp_length)
    cp_hashes = await source(0, cp_length)
    brs2 = []
    t1 = time.monotonic()
    for index in range(5, 400000, 500):
        brs2.append(await cache.branch_and_root(cp_length, index))
    t2 = time.monotonic()
    print(t2 - t1)
    assert False
async def test_merkle_cache_bad():
    length = 23
    source = Source(length).hashes
    cache = MerkleCache(merkle, source)
    await cache.initialize(length)
    await cache.branch_and_root(5, 3)
    with pytest.raises(TypeError):
        await cache.branch_and_root(5.0, 3)
    with pytest.raises(TypeError):
        await cache.branch_and_root(5, 3.0)
    with pytest.raises(ValueError):
        await cache.branch_and_root(0, -1)
    with pytest.raises(ValueError):
        await cache.branch_and_root(3, 3)
Exemple #14
0
def test_merkle_cache():
    lengths = (*range(1, 18), 31, 32, 33, 57)
    source = Source(max(lengths))
    for length in lengths:
        cache = MerkleCache(merkle, source, length)
        # Simulate all possible checkpoints
        for cp_length in range(1, length + 1):
            cp_hashes = source.hashes(0, cp_length)
            # All possible indices
            for index in range(cp_length):
                # Compare correct answer with cache
                branch, root = merkle.branch_and_root(cp_hashes, index)
                branch2, root2 = cache.branch_and_root(cp_length, index)
                assert branch == branch2
                assert root == root2
Exemple #15
0
    def __init__(self, env):
        self.logger = util.class_logger(__name__, self.__class__.__name__)
        self.env = env
        self.coin = env.coin

        # Setup block header size handlers
        if self.coin.STATIC_BLOCK_HEADERS:
            self.header_offset = self.coin.static_header_offset
            self.header_len = self.coin.static_header_len
        else:
            self.header_offset = self.dynamic_header_offset
            self.header_len = self.dynamic_header_len

        self.logger.info(f'switching current directory to {env.db_dir}')
        os.chdir(env.db_dir)

        self.db_class = db_class(self.env.db_engine)
        self.history = History()
        self.eventlog = Eventlog()
        self.unflushed_hashYs = defaultdict(
            set)  # {blockHash => [hashY_topic, ]}, for reorg_chain
        self.hashY_db = None
        self.utxo_db = None
        self.utxo_flush_count = 0
        self.fs_height = -1
        self.fs_tx_count = 0
        self.db_height = -1
        self.db_tx_count = 0
        self.db_tip = None
        self.tx_counts = None
        self.last_flush = time.time()
        self.last_flush_tx_count = 0
        self.wall_time = 0
        self.first_sync = True
        self.db_version = -1

        self.logger.info(f'using {self.env.db_engine} for DB backend')

        # Header merkle cache
        self.merkle = Merkle()
        self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes)

        self.headers_file = util.LogicalFile('meta/headers', 2, 16000000)
        self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000)
        self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000)
        if not self.coin.STATIC_BLOCK_HEADERS:
            self.headers_offsets_file = util.LogicalFile(
                'meta/headers_offsets', 2, 16000000)
Exemple #16
0
    async def first_caught_up(self):
        '''Called when first caught up to daemon after starting.'''
        # Flush everything with updated first_sync->False state.
        self.first_sync = False
        await self.controller.run_in_executor(self.flush, True)
        if self.utxo_db.for_sync:
            self.logger.info(f'{electrumx.version} synced to '
                             f'height {self.height:,d}')
        self.open_dbs()
        self.logger.info(f'caught up to height {self.height:,d}')
        length = max(1, self.height - self.env.reorg_limit)
        self.header_mc = MerkleCache(self.merkle, HeaderSource(self), length)
        self.logger.info('populated header merkle cache')

        # Reorgs use header_mc so safest to set this after initializing it
        self.caught_up_event.set()
 async def _first_caught_up(self):
     self.logger.info(f'caught up to height {self.height}')
     # Flush everything but with first_sync->False state.
     first_sync = self.first_sync
     self.first_sync = False
     self.flush(True)
     if first_sync:
         self.logger.info(f'{electrumx.version} synced to '
                          f'height {self.height:,d}')
     # Initialise the notification framework
     await self.notifications.on_block(set(), set(), self.height)
     # Reopen for serving
     await self.open_for_serving()
     # Populate the header merkle cache
     length = max(1, self.height - self.env.reorg_limit)
     self.header_mc = MerkleCache(self.merkle, HeaderSource(self), length)
     self.logger.info('populated header merkle cache')