def test_do_watch_cache_update(watcher):
    # Test that data is properly added/remove to/from the cache

    for _ in range(10):
        blocks_in_cache = watcher.locator_cache.blocks
        oldest_block_hash = list(blocks_in_cache.keys())[0]
        oldest_block_data = blocks_in_cache.get(oldest_block_hash)
        rest_of_blocks = list(blocks_in_cache.keys())[1:]
        assert len(watcher.locator_cache.blocks) == watcher.locator_cache.cache_size

        generate_blocks_w_delay(1)

        # The last oldest block is gone but the rest remain
        assert oldest_block_hash not in watcher.locator_cache.blocks
        assert set(rest_of_blocks).issubset(watcher.locator_cache.blocks.keys())

        # The locators of the oldest block are gone but the rest remain
        for locator in oldest_block_data:
            assert locator not in watcher.locator_cache.cache
        for block_hash in rest_of_blocks:
            for locator in watcher.locator_cache.blocks[block_hash]:
                assert locator in watcher.locator_cache.cache

        # The size of the cache is the same
        assert len(watcher.locator_cache.blocks) == watcher.locator_cache.cache_size
def test_locator_cache_init_not_enough_blocks(run_bitcoind, block_processor):
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
    # Make sure there are at least 3 blocks
    block_count = block_processor.get_block_count()
    if block_count < 3:
        generate_blocks_w_delay(3 - block_count)

    # Simulate there are only 3 blocks
    third_block_hash = bitcoin_cli(bitcoind_connect_params).getblockhash(2)
    locator_cache.init(third_block_hash, block_processor)
    assert len(locator_cache.blocks) == 3
    for k, v in locator_cache.blocks.items():
        assert block_processor.get_block(k)
def test_fix_cache(block_processor):
    # This tests how a reorg will create a new version of the cache
    # Let's start setting a full cache. We'll mine ``cache_size`` bocks to be sure it's full
    generate_blocks_w_delay((config.get("LOCATOR_CACHE_SIZE")))

    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
    locator_cache.init(block_processor.get_best_block_hash(), block_processor)
    assert len(locator_cache.blocks) == locator_cache.cache_size

    # Now let's fake a reorg of less than ``cache_size``. We'll go two blocks into the past.
    current_tip = block_processor.get_best_block_hash()
    current_tip_locators = locator_cache.blocks[current_tip]
    current_tip_parent = block_processor.get_block(current_tip).get("previousblockhash")
    current_tip_parent_locators = locator_cache.blocks[current_tip_parent]
    fake_tip = block_processor.get_block(current_tip_parent).get("previousblockhash")
    locator_cache.fix(fake_tip, block_processor)

    # The last two blocks are not in the cache nor are the any of its locators
    assert current_tip not in locator_cache.blocks and current_tip_parent not in locator_cache.blocks
    for locator in current_tip_parent_locators + current_tip_locators:
        assert locator not in locator_cache.cache

    # The fake tip is the new tip, and two additional blocks are at the bottom
    assert fake_tip in locator_cache.blocks and list(locator_cache.blocks.keys())[-1] == fake_tip
    assert len(locator_cache.blocks) == locator_cache.cache_size

    # Test the same for a full cache reorg. We can simulate this by adding more blocks than the cache can fit and
    # trigger a fix. We'll use a new cache to compare with the old
    old_cache_blocks = deepcopy(locator_cache.blocks)

    generate_blocks_w_delay((config.get("LOCATOR_CACHE_SIZE") * 2))
    locator_cache.fix(block_processor.get_best_block_hash(), block_processor)

    # None of the data from the old cache is in the new cache
    for block_hash, locators in old_cache_blocks.items():
        assert block_hash not in locator_cache.blocks
        for locator in locators:
            assert locator not in locator_cache.cache

    # The data in the new cache corresponds to the last ``cache_size`` blocks.
    block_count = block_processor.get_block_count()
    for i in range(block_count, block_count - locator_cache.cache_size, -1):
        block_hash = bitcoin_cli(bitcoind_connect_params).getblockhash(i - 1)
        assert block_hash in locator_cache.blocks
        for locator in locator_cache.blocks[block_hash]:
            assert locator in locator_cache.cache
def test_do_watch(watcher, temp_db_manager):
    watcher.db_manager = temp_db_manager

    # We will wipe all the previous data and add 5 appointments
    appointments, locator_uuid_map, dispute_txs = create_appointments(APPOINTMENTS)

    # Set the data into the Watcher and in the db
    watcher.locator_uuid_map = locator_uuid_map
    watcher.appointments = {}
    watcher.gatekeeper.registered_users = {}

    # Simulate a register (times out in 10 bocks)
    user_id = get_random_value_hex(16)
    watcher.gatekeeper.registered_users[user_id] = UserInfo(
        available_slots=100, subscription_expiry=watcher.block_processor.get_block_count() + 10
    )

    # Add the appointments
    for uuid, appointment in appointments.items():
        watcher.appointments[uuid] = {"locator": appointment.locator, "user_id": user_id}
        # Assume the appointment only takes one slot
        watcher.gatekeeper.registered_users[user_id].appointments[uuid] = 1
        watcher.db_manager.store_watcher_appointment(uuid, appointment.to_dict())
        watcher.db_manager.create_append_locator_map(appointment.locator, uuid)

    do_watch_thread = Thread(target=watcher.do_watch, daemon=True)
    do_watch_thread.start()

    # Broadcast the first two
    for dispute_tx in dispute_txs[:2]:
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(dispute_tx)

    # After generating a block, the appointment count should have been reduced by 2 (two breaches)
    generate_blocks_w_delay(1)

    assert len(watcher.appointments) == APPOINTMENTS - 2

    # The rest of appointments will timeout after the subscription times-out (9 more blocks) + EXPIRY_DELTA
    # Wait for an additional block to be safe
    generate_blocks_w_delay(10 + config.get("EXPIRY_DELTA"))
    assert len(watcher.appointments) == 0

    # Check that they are not in the Gatekeeper either, only the two that passed to the Responder should remain
    assert len(watcher.gatekeeper.registered_users[user_id].appointments) == 2
def test_get_completed_trackers(db_manager, gatekeeper, carrier,
                                block_processor):
    responder = Responder(db_manager, gatekeeper, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    # A complete tracker is a tracker which penalty transaction has been irrevocably resolved (i.e. has reached 100
    # confirmations)
    # We'll create 3 type of txs: irrevocably resolved, confirmed but not irrevocably resolved, and unconfirmed
    trackers_ir_resolved = {
        uuid4().hex:
        create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
        for _ in range(10)
    }

    trackers_confirmed = {
        uuid4().hex:
        create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
        for _ in range(10)
    }

    trackers_unconfirmed = {}
    for _ in range(10):
        tracker = create_dummy_tracker(
            penalty_rawtx=create_dummy_transaction().hex())
        responder.unconfirmed_txs.append(tracker.penalty_txid)
        trackers_unconfirmed[uuid4().hex] = tracker

    all_trackers = {}
    all_trackers.update(trackers_ir_resolved)
    all_trackers.update(trackers_confirmed)
    all_trackers.update(trackers_unconfirmed)

    # Let's add all to the Responder
    for uuid, tracker in all_trackers.items():
        responder.trackers[uuid] = tracker.get_summary()

    for uuid, tracker in trackers_ir_resolved.items():
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)

    generate_block_w_delay()

    for uuid, tracker in trackers_confirmed.items():
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)

    # ir_resolved have 100 confirmations and confirmed have 99
    generate_blocks_w_delay(99)

    # Let's check
    completed_trackers = responder.get_completed_trackers()
    ended_trackers_keys = list(trackers_ir_resolved.keys())
    assert set(completed_trackers) == set(ended_trackers_keys)

    # Generating 1 additional blocks should also include confirmed
    generate_block_w_delay()

    completed_trackers = responder.get_completed_trackers()
    ended_trackers_keys.extend(list(trackers_confirmed.keys()))
    assert set(completed_trackers) == set(ended_trackers_keys)
def test_do_watch(temp_db_manager, gatekeeper, carrier, block_processor):
    # Create a fresh responder to simplify the test
    responder = Responder(temp_db_manager, gatekeeper, carrier,
                          block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    trackers = [
        create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
        for _ in range(20)
    ]
    subscription_expiry = responder.block_processor.get_block_count() + 110

    # Let's set up the trackers first
    for tracker in trackers:
        uuid = uuid4().hex

        # Simulate user registration so trackers can properly expire
        responder.gatekeeper.registered_users[tracker.user_id] = UserInfo(
            available_slots=10, subscription_expiry=subscription_expiry)

        # Add data to the Responder
        responder.trackers[uuid] = tracker.get_summary()
        responder.tx_tracker_map[tracker.penalty_txid] = [uuid]
        responder.missed_confirmations[tracker.penalty_txid] = 0
        responder.unconfirmed_txs.append(tracker.penalty_txid)
        # Assuming the appointment only took a single slot
        responder.gatekeeper.registered_users[
            tracker.user_id].appointments[uuid] = 1

        # We also need to store the info in the db
        responder.db_manager.create_triggered_appointment_flag(uuid)
        responder.db_manager.store_responder_tracker(uuid, tracker.to_dict())

    # Let's start to watch
    Thread(target=responder.do_watch, daemon=True).start()

    # And broadcast some of the transactions
    broadcast_txs = []
    for tracker in trackers[:5]:
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)
        broadcast_txs.append(tracker.penalty_txid)

    # Mine a block
    generate_block_w_delay()

    # The transactions we sent shouldn't be in the unconfirmed transaction list anymore
    assert not set(broadcast_txs).issubset(responder.unconfirmed_txs)

    # CONFIRMATIONS_BEFORE_RETRY+1 blocks after, the responder should rebroadcast the unconfirmed txs (15 remaining)
    generate_blocks_w_delay(CONFIRMATIONS_BEFORE_RETRY + 1)
    assert len(responder.unconfirmed_txs) == 0
    assert len(responder.trackers) == 20

    # Generating 100 - CONFIRMATIONS_BEFORE_RETRY -2 additional blocks should complete the first 5 trackers
    generate_blocks_w_delay(100 - CONFIRMATIONS_BEFORE_RETRY - 2)
    assert len(responder.unconfirmed_txs) == 0
    assert len(responder.trackers) == 15
    # Check they are not in the Gatekeeper either
    for tracker in trackers[:5]:
        assert len(responder.gatekeeper.registered_users[
            tracker.user_id].appointments) == 0

    # CONFIRMATIONS_BEFORE_RETRY additional blocks should complete the rest
    generate_blocks_w_delay(CONFIRMATIONS_BEFORE_RETRY)
    assert len(responder.unconfirmed_txs) == 0
    assert len(responder.trackers) == 0
    # Check they are not in the Gatekeeper either
    for tracker in trackers[5:]:
        assert len(responder.gatekeeper.registered_users[
            tracker.user_id].appointments) == 0