Ejemplo n.º 1
0
def test_fix_cache(block_processor_mock, monkeypatch):
    # This tests how a reorg will create a new version of the cache
    # Let's start setting a full cache. We'll mine ``cache_size`` bocks to be sure it's full
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))

    # We'll need two additional blocks since we'll rollback the chain into the past
    blocks = dict()
    mock_generate_blocks(locator_cache.cache_size + 2, blocks, queue.Queue())
    best_block_hash = list(blocks.keys())[-1]

    # Mock the interaction with the BlockProcessor based on the mocked blocks
    monkeypatch.setattr(block_processor_mock, "get_block",
                        lambda x, blocking: blocks.get(x))
    monkeypatch.setattr(block_processor_mock, "get_block_count",
                        lambda: len(blocks))

    locator_cache.init(best_block_hash, block_processor_mock)
    assert len(locator_cache.blocks) == locator_cache.cache_size

    # Now let's fake a reorg of less than ``cache_size``. We'll go two blocks into the past.
    current_tip = best_block_hash
    current_tip_locators = locator_cache.blocks[current_tip]
    current_tip_parent = block_processor_mock.get_block(
        current_tip, False).get("previousblockhash")
    current_tip_parent_locators = locator_cache.blocks[current_tip_parent]
    fake_tip = block_processor_mock.get_block(current_tip_parent,
                                              False).get("previousblockhash")
    locator_cache.fix(fake_tip, block_processor_mock)

    # The last two blocks are not in the cache nor are there any of its locators
    assert current_tip not in locator_cache.blocks and current_tip_parent not in locator_cache.blocks
    for locator in current_tip_parent_locators + current_tip_locators:
        assert locator not in locator_cache.cache

    # The fake tip is the new tip, and two additional blocks are at the bottom
    assert fake_tip in locator_cache.blocks and list(
        locator_cache.blocks.keys())[-1] == fake_tip
    assert len(locator_cache.blocks) == locator_cache.cache_size

    # Test the same for a full cache reorg. We can simulate this by adding more blocks than the cache can fit and
    # trigger a fix. We'll use a new cache to compare with the old
    old_cache_blocks = deepcopy(locator_cache.blocks)

    mock_generate_blocks(locator_cache.cache_size, blocks, queue.Queue())
    best_block_hash = list(blocks.keys())[-1]
    locator_cache.fix(best_block_hash, block_processor_mock)

    # None of the data from the old cache is in the new cache
    for block_hash, locators in old_cache_blocks.items():
        assert block_hash not in locator_cache.blocks
        for locator in locators:
            assert locator not in locator_cache.cache

    # The data in the new cache corresponds to the last ``cache_size`` blocks.
    block_count = block_processor_mock.get_block_count()
    for i in range(block_count, block_count - locator_cache.cache_size, -1):
        block_hash = list(blocks.keys())[i - 1]
        assert block_hash in locator_cache.blocks
        for locator in locator_cache.blocks[block_hash]:
            assert locator in locator_cache.cache
Ejemplo n.º 2
0
def test_locator_cache_init(block_processor):
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))

    # Generate enough blocks so the cache can start full
    generate_blocks(2 * locator_cache.cache_size)

    locator_cache.init(block_processor.get_best_block_hash(), block_processor)
    assert len(locator_cache.blocks) == locator_cache.cache_size
    for k, v in locator_cache.blocks.items():
        assert block_processor.get_block(k)
Ejemplo n.º 3
0
def test_locator_cache_init_not_enough_blocks(run_bitcoind, block_processor):
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
    # Make sure there are at least 3 blocks
    block_count = block_processor.get_block_count()
    if block_count < 3:
        generate_blocks_w_delay(3 - block_count)

    # Simulate there are only 3 blocks
    third_block_hash = bitcoin_cli(bitcoind_connect_params).getblockhash(2)
    locator_cache.init(third_block_hash, block_processor)
    assert len(locator_cache.blocks) == 3
    for k, v in locator_cache.blocks.items():
        assert block_processor.get_block(k)
Ejemplo n.º 4
0
def test_fix_cache(block_processor):
    # This tests how a reorg will create a new version of the cache
    # Let's start setting a full cache. We'll mine ``cache_size`` bocks to be sure it's full
    generate_blocks(config.get("LOCATOR_CACHE_SIZE"))

    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
    locator_cache.init(block_processor.get_best_block_hash(), block_processor)
    assert len(locator_cache.blocks) == locator_cache.cache_size

    # Now let's fake a reorg of less than ``cache_size``. We'll go two blocks into the past.
    current_tip = block_processor.get_best_block_hash()
    current_tip_locators = locator_cache.blocks[current_tip]
    current_tip_parent = block_processor.get_block(current_tip).get(
        "previousblockhash")
    current_tip_parent_locators = locator_cache.blocks[current_tip_parent]
    fake_tip = block_processor.get_block(current_tip_parent).get(
        "previousblockhash")
    locator_cache.fix(fake_tip, block_processor)

    # The last two blocks are not in the cache nor are there any of its locators
    assert current_tip not in locator_cache.blocks and current_tip_parent not in locator_cache.blocks
    for locator in current_tip_parent_locators + current_tip_locators:
        assert locator not in locator_cache.cache

    # The fake tip is the new tip, and two additional blocks are at the bottom
    assert fake_tip in locator_cache.blocks and list(
        locator_cache.blocks.keys())[-1] == fake_tip
    assert len(locator_cache.blocks) == locator_cache.cache_size

    # Test the same for a full cache reorg. We can simulate this by adding more blocks than the cache can fit and
    # trigger a fix. We'll use a new cache to compare with the old
    old_cache_blocks = deepcopy(locator_cache.blocks)

    generate_blocks((config.get("LOCATOR_CACHE_SIZE") * 2))
    locator_cache.fix(block_processor.get_best_block_hash(), block_processor)

    # None of the data from the old cache is in the new cache
    for block_hash, locators in old_cache_blocks.items():
        assert block_hash not in locator_cache.blocks
        for locator in locators:
            assert locator not in locator_cache.cache

    # The data in the new cache corresponds to the last ``cache_size`` blocks.
    block_count = block_processor.get_block_count()
    for i in range(block_count, block_count - locator_cache.cache_size, -1):
        block_hash = bitcoin_cli.getblockhash(i)
        assert block_hash in locator_cache.blocks
        for locator in locator_cache.blocks[block_hash]:
            assert locator in locator_cache.cache
Ejemplo n.º 5
0
def test_locator_cache_init(block_processor_mock, monkeypatch):
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))

    # Generate enough blocks so the cache can start full
    blocks = dict()
    mock_generate_blocks(locator_cache.cache_size, blocks, queue.Queue())
    best_block_hash = list(blocks.keys())[-1]

    # Mock the interaction with the BlockProcessor based on the mocked blocks
    monkeypatch.setattr(block_processor_mock, "get_block",
                        lambda x, blocking: blocks.get(x))
    locator_cache.init(best_block_hash, block_processor_mock)

    assert len(locator_cache.blocks) == locator_cache.cache_size
    for k, v in locator_cache.blocks.items():
        assert block_processor_mock.get_block(k, blocking=False)
Ejemplo n.º 6
0
def test_locator_cache_init_not_enough_blocks(block_processor_mock,
                                              monkeypatch):
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))

    # Mock generating 3 blocks
    blocks = dict()
    mock_generate_blocks(3, blocks, queue.Queue())
    third_block_hash = list(blocks.keys())[2]

    # Mock the interaction with the BlockProcessor based on the mocked blocks
    monkeypatch.setattr(block_processor_mock, "get_block",
                        lambda x, blocking: blocks.get(x))
    locator_cache.init(third_block_hash, block_processor_mock)

    assert len(locator_cache.blocks) == 3
    for k, v in locator_cache.blocks.items():
        assert block_processor_mock.get_block(k, blocking=False)
Ejemplo n.º 7
0
def test_locator_cache_init_bitcoind_crash(block_processor):
    # A real BlockProcessor is required to test blocking functionality, since the mock does not implement that stuff
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))

    run_test_blocking_command_bitcoind_crash(
        block_processor.bitcoind_reachable,
        lambda: locator_cache.init(block_processor.get_best_block_hash(),
                                   block_processor),
    )