Ejemplo n.º 1
0
async def mock_merkle_branch(height, tx_hashes, tx_pos, tsc_format):
    COST = 0.0008
    merkle = Merkle()
    branch, root = merkle.branch_and_root(tx_hashes, tx_pos, tsc_format=tsc_format)

    def converter(hash):
        if hash == b"*":
            return hash.decode()
        else:
            return hash_to_hex_str(hash)

    branch = [converter(hash) for hash in branch]
    return branch, root, COST
Ejemplo n.º 2
0
    def __init__(self, env: 'Env'):
        self.logger = util.class_logger(__name__, self.__class__.__name__)
        self.env = env
        self.coin = env.coin

        # Setup block header size handlers
        if self.coin.STATIC_BLOCK_HEADERS:
            self.header_offset = self.coin.static_header_offset
            self.header_len = self.coin.static_header_len
        else:
            self.header_offset = self.dynamic_header_offset
            self.header_len = self.dynamic_header_len

        self.logger.info(f'switching current directory to {env.db_dir}')
        os.chdir(env.db_dir)

        self.db_class = db_class(self.env.db_engine)
        self.history = History()

        # Key: b'u' + address_hashX + txout_idx + tx_num
        # Value: the UTXO value as a 64-bit unsigned integer (in satoshis)
        # "at address, at outpoint, there is a UTXO of value v"
        # ---
        # Key: b'h' + compressed_tx_hash + txout_idx + tx_num
        # Value: hashX
        # "some outpoint created a UTXO at address"
        # ---
        # Key: b'U' + block_height
        # Value: byte-concat list of (hashX + tx_num + value_sats)
        # "undo data: list of UTXOs spent at block height"
        self.utxo_db = None

        self.utxo_flush_count = 0
        self.fs_height = -1
        self.fs_tx_count = 0
        self.db_height = -1
        self.db_tx_count = 0
        self.db_tip = None  # type: Optional[bytes]
        self.tx_counts = None
        self.last_flush = time.time()
        self.last_flush_tx_count = 0
        self.wall_time = 0
        self.first_sync = True
        self.db_version = -1

        self.logger.info(f'using {self.env.db_engine} for DB backend')

        # Header merkle cache
        self.merkle = Merkle()
        self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes)

        # on-disk: raw block headers in chain order
        self.headers_file = util.LogicalFile('meta/headers', 2, 16000000)
        # on-disk: cumulative number of txs at the end of height N
        self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000)
        # on-disk: 32 byte txids in chain order, allows (tx_num -> txid) map
        self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000)
        if not self.coin.STATIC_BLOCK_HEADERS:
            self.headers_offsets_file = util.LogicalFile(
                'meta/headers_offsets', 2, 16000000)
Ejemplo n.º 3
0
    def __init__(self, env):
        self.logger = util.class_logger(__name__, self.__class__.__name__)
        self.env = env
        self.coin = env.coin

        self.logger.info(f'switching current directory to {env.db_dir}')
        os.chdir(env.db_dir)

        self.db_class = db_class(self.env.db_engine)
        self.history = History()
        self.utxo_db = None
        self.utxo_flush_count = 0
        self.fs_height = -1
        self.fs_tx_count = 0
        self.db_height = -1
        self.db_tx_count = 0
        self.db_tip = None
        self.tx_counts = None
        self.last_flush = time.time()
        self.last_flush_tx_count = 0
        self.wall_time = 0
        self.first_sync = True
        self.db_version = -1

        self.logger.info(f'using {self.env.db_engine} for DB backend')

        # Header merkle cache
        self.merkle = Merkle()
        self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes)

        self.headers_file = util.LogicalFile('meta/headers', 2, 16000000)
        self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000)
        self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000)
Ejemplo n.º 4
0
    def __init__(self, env):
        self.logger = util.class_logger(__name__, self.__class__.__name__)
        self.env = env
        self.coin = env.coin

        # Setup block header size handlers
        if self.coin.STATIC_BLOCK_HEADERS:
            self.header_offset = self.coin.static_header_offset
            self.header_len = self.coin.static_header_len
        else:
            self.header_offset = self.dynamic_header_offset
            self.header_len = self.dynamic_header_len

        self.logger.info(f'switching current directory to {env.db_dir}')
        os.chdir(env.db_dir)

        self.db_class = db_class(self.env.db_engine)
        self.history = History()
        self.utxo_db = None
        self.tx_counts = None
        self.last_flush = time.time()

        self.logger.info(f'using {self.env.db_engine} for DB backend')

        # Header merkle cache
        self.merkle = Merkle()
        self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes)

        self.headers_file = util.LogicalFile('meta/headers', 2, 16000000)
        self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000)
        self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000)
        if not self.coin.STATIC_BLOCK_HEADERS:
            self.headers_offsets_file = util.LogicalFile(
                'meta/headers_offsets', 2, 16000000)
Ejemplo n.º 5
0
    def __init__(self, env, tasks, daemon, notifications):
        super().__init__(env)

        self.tasks = tasks
        self.daemon = daemon
        self.notifications = notifications

        # Work queue
        self.queue = asyncio.Queue()
        self._caught_up_event = asyncio.Event()
        self.prefetcher = Prefetcher(daemon, env.coin, self.queue)

        # Meta
        self.cache_MB = env.cache_MB
        self.next_cache_check = 0
        self.last_flush = time.time()
        self.touched = set()

        # Header merkle cache
        self.merkle = Merkle()
        self.header_mc = None

        # Caches of unflushed items.
        self.headers = []
        self.tx_hashes = []
        self.undo_infos = []

        # UTXO cache
        self.utxo_cache = {}
        self.db_deletes = []

        # If the lock is successfully acquired, in-memory chain state
        # is consistent with self.height
        self.state_lock = asyncio.Lock()
        self.worker_task = None
Ejemplo n.º 6
0
    def __init__(self, env):
        self.logger = util.class_logger(__name__, self.__class__.__name__)
        self.env = env
        self.coin = env.coin

        # Setup block header size handlers
        if self.coin.STATIC_BLOCK_HEADERS:
            self.header_offset = self.coin.static_header_offset
            self.header_len = self.coin.static_header_len
        else:
            self.header_offset = self.dynamic_header_offset
            self.header_len = self.dynamic_header_len

        self.logger.info(f'switching current directory to {env.db_dir}')
        os.chdir(env.db_dir)

        self.db_class = db_class(self.env.db_engine)
        self.history = History()
        self.eventlog = Eventlog()
        self.unflushed_hashYs = defaultdict(
            set)  # {blockHash => [hashY_topic, ]}, for reorg_chain
        self.hashY_db = None
        self.utxo_db = None
        self.utxo_flush_count = 0
        self.fs_height = -1
        self.fs_tx_count = 0
        self.db_height = -1
        self.db_tx_count = 0
        self.db_tip = None
        self.tx_counts = None
        self.last_flush = time.time()
        self.last_flush_tx_count = 0
        self.wall_time = 0
        self.first_sync = True
        self.db_version = -1

        self.logger.info(f'using {self.env.db_engine} for DB backend')

        # Header merkle cache
        self.merkle = Merkle()
        self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes)

        self.headers_file = util.LogicalFile('meta/headers', 2, 16000000)
        self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000)
        self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000)
        if not self.coin.STATIC_BLOCK_HEADERS:
            self.headers_offsets_file = util.LogicalFile(
                'meta/headers_offsets', 2, 16000000)
Ejemplo n.º 7
0
    def __init__(self, env, tasks, daemon):
        super().__init__(env)

        # An incomplete compaction needs to be cancelled otherwise
        # restarting it will corrupt the history
        self.history.cancel_compaction()

        self.tasks = tasks
        self.daemon = daemon

        # These are our state as we move ahead of DB state
        self.fs_height = self.db_height
        self.fs_tx_count = self.db_tx_count
        self.height = self.db_height
        self.tip = self.db_tip
        self.tx_count = self.db_tx_count

        self.caught_up_event = asyncio.Event()
        self.task_queue = asyncio.Queue()

        # Meta
        self.cache_MB = env.cache_MB
        self.next_cache_check = 0
        self.last_flush = time.time()
        self.last_flush_tx_count = self.tx_count
        self.touched = set()
        self.callbacks = []

        # Header merkle cache
        self.merkle = Merkle()
        self.header_mc = None

        # Caches of unflushed items.
        self.headers = []
        self.tx_hashes = []
        self.undo_infos = []

        # UTXO cache
        self.utxo_cache = {}
        self.db_deletes = []

        self.prefetcher = Prefetcher(self)

        if self.utxo_db.for_sync:
            self.logger.info('flushing DB cache at {:,d} MB'
                             .format(self.cache_MB))
    def __init__(self, env, daemon, notifications):
        super().__init__(env)

        self.daemon = daemon
        self.notifications = notifications

        self.blocks_event = asyncio.Event()
        self.prefetcher = Prefetcher(daemon, env.coin, self.blocks_event)

        # Meta
        self.cache_MB = env.cache_MB
        self.next_cache_check = 0
        self.last_flush = time.time()
        self.touched = set()
        self.reorg_count = 0

        # Header merkle cache
        self.merkle = Merkle()
        self.header_mc = None

        # Caches of unflushed items.
        self.headers = []
        self.tx_hashes = []
        self.undo_infos = []

        # UTXO cache
        self.utxo_cache = {}
        self.db_deletes = []

        # eventlog and hashY
        self.eventlog_touched = set()
        self.hashYs = defaultdict(
            set)  # {blockHash => [hashY, ]}, for reorg_chain

        # If the lock is successfully acquired, in-memory chain state
        # is consistent with self.height
        self.state_lock = asyncio.Lock()
import os

import pytest

from electrumx.lib.merkle import Merkle, MerkleCache

merkle = Merkle()
hashes = [merkle.hash_func(bytes([x])) for x in range(8)]
roots = [
    b'\x14\x06\xe0X\x81\xe2\x996wf\xd3\x13\xe2l\x05VN\xc9\x1b\xf7!\xd3\x17&\xbdnF\xe6\x06\x89S\x9a',
    b'K\xbe\x83\xbc8\xeb\xe2\xbc\xc7R\r#A9\xdf\x1c\x0e\xb9\xff\xa5\x1f\x83\xea\xb1\xc5\x12\x9b[\x90kvU',
    b'\xe1)\xdf\xe0/V\x7f\xc6\x12\xd1&YmC@aD\xf4\nw\x18\x10\xacqCB\x1d-\xf3\xe5\xc1\xd0',
    b'\xe3/W\x01\xa0\x11Z+M\xc7/Rj\xf1aLY,\x19\xee\x95\xcf\xcb\x055\x96\x1e\x07g\xba\xf7\x8e',
    b'\xf4\x118I\xd6(\xf7\xc3\xbc\x91\xcc\x0f\xf7\x85\xa6\xae\xe3\xee#l\x1c\x91+(\xcc\t\xc4O\x9f\x97\xb7H',
    b'\xfb[\xb7\xe4\x82Y"\xea\xe8\xc2\xba\xec\x96\x0c\x8fR3\x84R"\x13Jj=\x84\x0e<\x12\x01\xafu\xed',
    b'}\xe6\\}W\xcd\xc7)q\xc9\xbe\xab\x94\xafj\xd4\xe9\x9f#?\xb6\xcc\xeb\xd2\xb4\xb1\x9f\x13i|\xa5M',
    b'o\x97(*\xb3G\xa2e\xae3\x83\xe1V\x9eb\xda\x8c\x19\xa6\x8c\xfag\r+az\x7f\xedGD\xbb\xfe'
]


def test_branch_length():
    assert merkle.branch_length(1) == 0
    assert merkle.branch_length(2) == 1
    for n in range(3, 5):
        assert merkle.branch_length(n) == 2
    for n in range(5, 9):
        assert merkle.branch_length(n) == 3


def test_branch_length_bad():
    with pytest.raises(TypeError):