Exemplo n.º 1
0
def test_fix_cache_bitcoind_crash(block_processor):
    # A real BlockProcessor is required to test blocking functionality, since the mock does not implement that stuff
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))

    run_test_blocking_command_bitcoind_crash(
        block_processor.bitcoind_reachable,
        lambda: locator_cache.fix(block_processor.get_best_block_hash(),
                                  block_processor),
    )
Exemplo n.º 2
0
def test_locator_cache_is_full(block_processor):
    # Empty cache
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))

    for _ in range(locator_cache.cache_size):
        locator_cache.blocks[uuid4().hex] = 0
        assert not locator_cache.is_full()

    locator_cache.blocks[uuid4().hex] = 0
    assert locator_cache.is_full()
Exemplo n.º 3
0
def test_locator_cache_init(block_processor):
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))

    # Generate enough blocks so the cache can start full
    generate_blocks(2 * locator_cache.cache_size)

    locator_cache.init(block_processor.get_best_block_hash(), block_processor)
    assert len(locator_cache.blocks) == locator_cache.cache_size
    for k, v in locator_cache.blocks.items():
        assert block_processor.get_block(k)
Exemplo n.º 4
0
def test_cache_get_txid():
    # Not much to test here, this is shadowing dict.get
    locator = get_random_value_hex(16)
    txid = get_random_value_hex(32)

    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
    locator_cache.cache[locator] = txid

    assert locator_cache.get_txid(locator) == txid
    # A random locator should fail
    assert locator_cache.get_txid(get_random_value_hex(16)) is None
Exemplo n.º 5
0
def test_locator_cache_init_not_enough_blocks(run_bitcoind, block_processor):
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
    # Make sure there are at least 3 blocks
    block_count = block_processor.get_block_count()
    if block_count < 3:
        generate_blocks_w_delay(3 - block_count)

    # Simulate there are only 3 blocks
    third_block_hash = bitcoin_cli(bitcoind_connect_params).getblockhash(2)
    locator_cache.init(third_block_hash, block_processor)
    assert len(locator_cache.blocks) == 3
    for k, v in locator_cache.blocks.items():
        assert block_processor.get_block(k)
Exemplo n.º 6
0
def test_locator_cache_is_full():
    # is_full should return whether the cache is full or not.
    # Create an empty cache
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))

    # Fill it one by one and check it is not full
    for _ in range(locator_cache.cache_size):
        locator_cache.blocks[uuid4().hex] = 0
        assert not locator_cache.is_full()

    # Add one more block and check again, it should be full now
    locator_cache.blocks[uuid4().hex] = 0
    assert locator_cache.is_full()
Exemplo n.º 7
0
def test_fix_cache(block_processor_mock, monkeypatch):
    # This tests how a reorg will create a new version of the cache
    # Let's start setting a full cache. We'll mine ``cache_size`` bocks to be sure it's full
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))

    # We'll need two additional blocks since we'll rollback the chain into the past
    blocks = dict()
    mock_generate_blocks(locator_cache.cache_size + 2, blocks, queue.Queue())
    best_block_hash = list(blocks.keys())[-1]

    # Mock the interaction with the BlockProcessor based on the mocked blocks
    monkeypatch.setattr(block_processor_mock, "get_block",
                        lambda x, blocking: blocks.get(x))
    monkeypatch.setattr(block_processor_mock, "get_block_count",
                        lambda: len(blocks))

    locator_cache.init(best_block_hash, block_processor_mock)
    assert len(locator_cache.blocks) == locator_cache.cache_size

    # Now let's fake a reorg of less than ``cache_size``. We'll go two blocks into the past.
    current_tip = best_block_hash
    current_tip_locators = locator_cache.blocks[current_tip]
    current_tip_parent = block_processor_mock.get_block(
        current_tip, False).get("previousblockhash")
    current_tip_parent_locators = locator_cache.blocks[current_tip_parent]
    fake_tip = block_processor_mock.get_block(current_tip_parent,
                                              False).get("previousblockhash")
    locator_cache.fix(fake_tip, block_processor_mock)

    # The last two blocks are not in the cache nor are there any of its locators
    assert current_tip not in locator_cache.blocks and current_tip_parent not in locator_cache.blocks
    for locator in current_tip_parent_locators + current_tip_locators:
        assert locator not in locator_cache.cache

    # The fake tip is the new tip, and two additional blocks are at the bottom
    assert fake_tip in locator_cache.blocks and list(
        locator_cache.blocks.keys())[-1] == fake_tip
    assert len(locator_cache.blocks) == locator_cache.cache_size

    # Test the same for a full cache reorg. We can simulate this by adding more blocks than the cache can fit and
    # trigger a fix. We'll use a new cache to compare with the old
    old_cache_blocks = deepcopy(locator_cache.blocks)

    mock_generate_blocks(locator_cache.cache_size, blocks, queue.Queue())
    best_block_hash = list(blocks.keys())[-1]
    locator_cache.fix(best_block_hash, block_processor_mock)

    # None of the data from the old cache is in the new cache
    for block_hash, locators in old_cache_blocks.items():
        assert block_hash not in locator_cache.blocks
        for locator in locators:
            assert locator not in locator_cache.cache

    # The data in the new cache corresponds to the last ``cache_size`` blocks.
    block_count = block_processor_mock.get_block_count()
    for i in range(block_count, block_count - locator_cache.cache_size, -1):
        block_hash = list(blocks.keys())[i - 1]
        assert block_hash in locator_cache.blocks
        for locator in locator_cache.blocks[block_hash]:
            assert locator in locator_cache.cache
Exemplo n.º 8
0
def test_locator_cache_init(block_processor_mock, monkeypatch):
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))

    # Generate enough blocks so the cache can start full
    blocks = dict()
    mock_generate_blocks(locator_cache.cache_size, blocks, queue.Queue())
    best_block_hash = list(blocks.keys())[-1]

    # Mock the interaction with the BlockProcessor based on the mocked blocks
    monkeypatch.setattr(block_processor_mock, "get_block",
                        lambda x, blocking: blocks.get(x))
    locator_cache.init(best_block_hash, block_processor_mock)

    assert len(locator_cache.blocks) == locator_cache.cache_size
    for k, v in locator_cache.blocks.items():
        assert block_processor_mock.get_block(k, blocking=False)
Exemplo n.º 9
0
def test_locator_cache_init_not_enough_blocks(block_processor_mock,
                                              monkeypatch):
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))

    # Mock generating 3 blocks
    blocks = dict()
    mock_generate_blocks(3, blocks, queue.Queue())
    third_block_hash = list(blocks.keys())[2]

    # Mock the interaction with the BlockProcessor based on the mocked blocks
    monkeypatch.setattr(block_processor_mock, "get_block",
                        lambda x, blocking: blocks.get(x))
    locator_cache.init(third_block_hash, block_processor_mock)

    assert len(locator_cache.blocks) == 3
    for k, v in locator_cache.blocks.items():
        assert block_processor_mock.get_block(k, blocking=False)
Exemplo n.º 10
0
def test_update_cache():
    # Update should add data about a new block in the cache. If the cache is full, the oldest block is dropped.
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))

    block_hash = get_random_value_hex(32)
    txs = [get_random_value_hex(32) for _ in range(10)]
    locator_txid_map = {compute_locator(txid): txid for txid in txs}

    # Cache is empty
    assert block_hash not in locator_cache.blocks
    for locator in locator_txid_map.keys():
        assert locator not in locator_cache.cache

    # The data has been added to the cache
    locator_cache.update(block_hash, locator_txid_map)
    assert block_hash in locator_cache.blocks
    for locator in locator_txid_map.keys():
        assert locator in locator_cache.cache
Exemplo n.º 11
0
def test_locator_remove_oldest_block(block_processor):
    # Empty cache
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))

    # Add some blocks to the cache
    for _ in range(locator_cache.cache_size):
        txid = get_random_value_hex(32)
        locator = txid[:16]
        locator_cache.blocks[get_random_value_hex(32)] = {locator: txid}
        locator_cache.cache[locator] = txid

    blocks_in_cache = locator_cache.blocks
    oldest_block_hash = list(blocks_in_cache.keys())[0]
    oldest_block_data = blocks_in_cache.get(oldest_block_hash)
    rest_of_blocks = list(blocks_in_cache.keys())[1:]
    locator_cache.remove_oldest_block()

    # Oldest block data is not in the cache
    assert oldest_block_hash not in locator_cache.blocks
    for locator in oldest_block_data:
        assert locator not in locator_cache.cache

    # The rest of data is in the cache
    assert set(rest_of_blocks).issubset(locator_cache.blocks)
    for block_hash in rest_of_blocks:
        for locator in locator_cache.blocks[block_hash]:
            assert locator in locator_cache.cache
Exemplo n.º 12
0
def test_fix_cache(block_processor):
    # This tests how a reorg will create a new version of the cache
    # Let's start setting a full cache. We'll mine ``cache_size`` bocks to be sure it's full
    generate_blocks(config.get("LOCATOR_CACHE_SIZE"))

    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
    locator_cache.init(block_processor.get_best_block_hash(), block_processor)
    assert len(locator_cache.blocks) == locator_cache.cache_size

    # Now let's fake a reorg of less than ``cache_size``. We'll go two blocks into the past.
    current_tip = block_processor.get_best_block_hash()
    current_tip_locators = locator_cache.blocks[current_tip]
    current_tip_parent = block_processor.get_block(current_tip).get(
        "previousblockhash")
    current_tip_parent_locators = locator_cache.blocks[current_tip_parent]
    fake_tip = block_processor.get_block(current_tip_parent).get(
        "previousblockhash")
    locator_cache.fix(fake_tip, block_processor)

    # The last two blocks are not in the cache nor are there any of its locators
    assert current_tip not in locator_cache.blocks and current_tip_parent not in locator_cache.blocks
    for locator in current_tip_parent_locators + current_tip_locators:
        assert locator not in locator_cache.cache

    # The fake tip is the new tip, and two additional blocks are at the bottom
    assert fake_tip in locator_cache.blocks and list(
        locator_cache.blocks.keys())[-1] == fake_tip
    assert len(locator_cache.blocks) == locator_cache.cache_size

    # Test the same for a full cache reorg. We can simulate this by adding more blocks than the cache can fit and
    # trigger a fix. We'll use a new cache to compare with the old
    old_cache_blocks = deepcopy(locator_cache.blocks)

    generate_blocks((config.get("LOCATOR_CACHE_SIZE") * 2))
    locator_cache.fix(block_processor.get_best_block_hash(), block_processor)

    # None of the data from the old cache is in the new cache
    for block_hash, locators in old_cache_blocks.items():
        assert block_hash not in locator_cache.blocks
        for locator in locators:
            assert locator not in locator_cache.cache

    # The data in the new cache corresponds to the last ``cache_size`` blocks.
    block_count = block_processor.get_block_count()
    for i in range(block_count, block_count - locator_cache.cache_size, -1):
        block_hash = bitcoin_cli.getblockhash(i)
        assert block_hash in locator_cache.blocks
        for locator in locator_cache.blocks[block_hash]:
            assert locator in locator_cache.cache
Exemplo n.º 13
0
def test_update_cache_full():
    # Updating a full cache should be dropping the oldest block one by one
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
    block_hashes = []
    big_map = {}

    # Fill the cache first
    for i in range(locator_cache.cache_size):
        block_hash = get_random_value_hex(32)
        txs = [get_random_value_hex(32) for _ in range(10)]
        locator_txid_map = {compute_locator(txid): txid for txid in txs}
        locator_cache.update(block_hash, locator_txid_map)

        if i == 0:
            first_block_hash = block_hash
            first_locator_txid_map = locator_txid_map
        else:
            block_hashes.append(block_hash)
            big_map.update(locator_txid_map)

    # The cache is now full.
    assert first_block_hash in locator_cache.blocks
    for locator in first_locator_txid_map.keys():
        assert locator in locator_cache.cache

    # Add one more
    block_hash = get_random_value_hex(32)
    txs = [get_random_value_hex(32) for _ in range(10)]
    locator_txid_map = {compute_locator(txid): txid for txid in txs}
    locator_cache.update(block_hash, locator_txid_map)

    # The first block is not there anymore, but the rest are there
    assert first_block_hash not in locator_cache.blocks
    for locator in first_locator_txid_map.keys():
        assert locator not in locator_cache.cache

    for block_hash in block_hashes:
        assert block_hash in locator_cache.blocks

    for locator in big_map.keys():
        assert locator in locator_cache.cache