Ejemplo n.º 1
0
def test_check_confirmations(db_manager, carrier, block_processor):
    responder = Responder(db_manager, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    # check_confirmations checks, given a list of transaction for a block, what of the known penalty transaction have
    # been confirmed. To test this we need to create a list of transactions and the state of the responder
    txs = [get_random_value_hex(32) for _ in range(20)]

    # The responder has a list of unconfirmed transaction, let make that some of them are the ones we've received
    responder.unconfirmed_txs = [get_random_value_hex(32) for _ in range(10)]
    txs_subset = random.sample(txs, k=10)
    responder.unconfirmed_txs.extend(txs_subset)

    # We also need to add them to the tx_tracker_map since they would be there in normal conditions
    responder.tx_tracker_map = {
        txid: TransactionTracker(txid[:LOCATOR_LEN_HEX], txid, None, None,
                                 None)
        for txid in responder.unconfirmed_txs
    }

    # Let's make sure that there are no txs with missed confirmations yet
    assert len(responder.missed_confirmations) == 0

    responder.check_confirmations(txs)

    # After checking confirmations the txs in txs_subset should be confirmed (not part of unconfirmed_txs anymore)
    # and the rest should have a missing confirmation
    for tx in txs_subset:
        assert tx not in responder.unconfirmed_txs

    for tx in responder.unconfirmed_txs:
        assert responder.missed_confirmations[tx] == 1
def test_monitor_chain_polling(db_manager, block_processor):
    # Try polling with the Watcher
    watcher_queue = Queue()
    chain_monitor = ChainMonitor(watcher_queue, Queue(), block_processor,
                                 bitcoind_feed_params)
    chain_monitor.best_tip = block_processor.get_best_block_hash()
    chain_monitor.polling_delta = 0.1

    # monitor_chain_polling runs until terminate if set
    polling_thread = Thread(target=chain_monitor.monitor_chain_polling,
                            daemon=True)
    polling_thread.start()

    # Check that nothing changes as long as a block is not generated
    for _ in range(5):
        assert chain_monitor.watcher_queue.empty()
        time.sleep(0.1)

    # And that it does if we generate a block
    generate_block()

    chain_monitor.watcher_queue.get()
    assert chain_monitor.watcher_queue.empty()

    chain_monitor.terminate = True
    polling_thread.join()
Ejemplo n.º 3
0
def responder(db_manager, carrier, block_processor):
    responder = Responder(db_manager, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    return responder
def test_notify_subscribers(block_processor):
    queue1 = Queue()
    queue2 = Queue()
    chain_monitor = ChainMonitor([queue1, queue2], block_processor, bitcoind_feed_params)

    # Queues should be empty to start with
    assert queue1.qsize() == 0
    assert queue2.qsize() == 0

    block1 = get_random_value_hex(32)
    block2 = get_random_value_hex(32)
    block3 = get_random_value_hex(32)

    # we add two elements to the internal queue before the thread is started
    chain_monitor.queue.put(block1)
    chain_monitor.queue.put(block2)

    assert queue1.qsize() == 0
    assert queue2.qsize() == 0

    notifying_thread = Thread(target=chain_monitor.notify_subscribers, daemon=True)
    notifying_thread.start()

    # the existing elements should be processed soon and in order for all queues
    for q in [queue1, queue2]:
        assert q.get(timeout=0.1) == block1
        assert q.get(timeout=0.1) == block2

    # Subscribers are only notified as long as they are awake
    chain_monitor.queue.put(block3)

    assert queue1.get(timeout=0.1) == block3
    assert queue2.get(timeout=0.1) == block3

    chain_monitor.terminate()
def test_monitor_chain_single_update(block_processor):
    # This test tests that if both threads try to add the same block to the queue, only the first one will make it
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)

    chain_monitor.polling_delta = 2

    # We will create a block and wait for the polling thread. Then check the queues to see that the block hash has only
    # been added once.
    chain_monitor.monitor_chain()
    chain_monitor.activate()
    generate_blocks(1)

    assert len(chain_monitor.receiving_queues) == 2

    queue0_block = chain_monitor.receiving_queues[0].get()
    queue1_block = chain_monitor.receiving_queues[1].get()
    assert queue0_block == queue1_block
    assert chain_monitor.receiving_queues[0].empty()
    assert chain_monitor.receiving_queues[1].empty()

    # The delta for polling is 2 secs, so let's wait and see
    time.sleep(2)
    assert chain_monitor.receiving_queues[0].empty()
    assert chain_monitor.receiving_queues[1].empty()

    # We can also force an update and see that it won't go through
    assert chain_monitor.enqueue(queue0_block) is False

    chain_monitor.terminate()
    # The zmq thread needs a block generation to release from the recv method.
    generate_blocks(1)
def test_monitor_chain_wrong_status_raises(block_processor):
    # calling monitor_chain when not idle should raise
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)

    for status in ChainMonitorStatus:
        if status != ChainMonitorStatus.IDLE:
            chain_monitor.status = status  # mock the status
            with pytest.raises(RuntimeError, match="can only be called in IDLE status"):
                chain_monitor.monitor_chain()
def test_activate_wrong_status_raises(block_processor):
    # calling activate when not listening should raise
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)

    for status in ChainMonitorStatus:
        if status != ChainMonitorStatus.LISTENING:
            chain_monitor.status = status  # mock the status
            with pytest.raises(RuntimeError, match="can only be called in LISTENING status"):
                chain_monitor.activate()
Ejemplo n.º 8
0
def test_monitor_chain_wrong_status_raises(block_processor_mock, monkeypatch):
    # Calling monitor_chain when not idle should raise
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor_mock, bitcoind_feed_params)

    for status in ChainMonitorStatus:
        if status != ChainMonitorStatus.IDLE:
            monkeypatch.setattr(chain_monitor, "status", status)
            with pytest.raises(RuntimeError, match="can only be called in IDLE status"):
                chain_monitor.monitor_chain()
def test_threads_stop_when_terminated(block_processor):
    # When status is "terminated", the methods running the threads should stop immediately

    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)
    chain_monitor.terminate()

    # If any of the function does not exit immediately, the test will timeout
    chain_monitor.monitor_chain_polling()
    chain_monitor.monitor_chain_zmq()
    chain_monitor.notify_subscribers()
Ejemplo n.º 10
0
def test_enqueue(block_processor):
    # The state is updated after receiving a new block (and only if the block is not already known).
    # Let's start by adding some hashes to last_tips
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)
    chain_monitor.last_tips = [get_random_value_hex(32) for _ in range(5)]

    # Now we can try to update the state with an hash already seen and see how it doesn't work
    assert chain_monitor.enqueue(chain_monitor.last_tips[0]) is False

    # The state should be correctly updated with a new block hash, which should be added as last element of last_tips
    another_block_hash = get_random_value_hex(32)
    assert chain_monitor.enqueue(another_block_hash) is True
    assert chain_monitor.last_tips[-1] == another_block_hash
def test_notify_subscribers(block_processor):
    chain_monitor = ChainMonitor(Queue(), Queue(), block_processor,
                                 bitcoind_feed_params)
    # Subscribers are only notified as long as they are awake
    new_block = get_random_value_hex(32)

    # Queues should be empty to start with
    assert chain_monitor.watcher_queue.empty()
    assert chain_monitor.responder_queue.empty()

    chain_monitor.notify_subscribers(new_block)

    assert chain_monitor.watcher_queue.get() == new_block
    assert chain_monitor.responder_queue.get() == new_block
def test_monitor_chain_zmq(db_manager, block_processor):
    responder_queue = Queue()
    chain_monitor = ChainMonitor(Queue(), responder_queue, block_processor,
                                 bitcoind_feed_params)
    chain_monitor.best_tip = block_processor.get_best_block_hash()

    zmq_thread = Thread(target=chain_monitor.monitor_chain_zmq, daemon=True)
    zmq_thread.start()

    # Queues should start empty
    assert chain_monitor.responder_queue.empty()

    # And have a new block every time we generate one
    for _ in range(3):
        generate_block()
        chain_monitor.responder_queue.get()
        assert chain_monitor.responder_queue.empty()
def test_monitor_chain_single_update(db_manager, block_processor):
    # This test tests that if both threads try to add the same block to the queue, only the first one will make it
    chain_monitor = ChainMonitor(Queue(), Queue(), block_processor,
                                 bitcoind_feed_params)

    chain_monitor.best_tip = None
    chain_monitor.polling_delta = 2

    # We will create a block and wait for the polling thread. Then check the queues to see that the block hash has only
    # been added once.
    chain_monitor.monitor_chain()
    generate_block()

    watcher_block = chain_monitor.watcher_queue.get()
    responder_block = chain_monitor.responder_queue.get()
    assert watcher_block == responder_block
    assert chain_monitor.watcher_queue.empty()
    assert chain_monitor.responder_queue.empty()

    # The delta for polling is 2 secs, so let's wait and see
    time.sleep(2)
    assert chain_monitor.watcher_queue.empty()
    assert chain_monitor.responder_queue.empty()

    # We can also force an update and see that it won't go through
    assert chain_monitor.update_state(watcher_block) is False
Ejemplo n.º 14
0
def test_rebroadcast(db_manager, carrier, block_processor):
    responder = Responder(db_manager, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    txs_to_rebroadcast = []

    # Rebroadcast calls add_response with retry=True. The tracker data is already in trackers.
    for i in range(20):
        uuid = uuid4().hex
        locator, dispute_txid, penalty_txid, penalty_rawtx, appointment_end = create_dummy_tracker_data(
            penalty_rawtx=create_dummy_transaction().hex())

        tracker = TransactionTracker(locator, dispute_txid, penalty_txid,
                                     penalty_rawtx, appointment_end)

        responder.trackers[uuid] = {
            "locator": locator,
            "penalty_txid": penalty_txid,
            "appointment_end": appointment_end,
        }

        # We need to add it to the db too
        responder.db_manager.create_triggered_appointment_flag(uuid)
        responder.db_manager.store_responder_tracker(uuid, tracker.to_json())

        responder.tx_tracker_map[penalty_txid] = [uuid]
        responder.unconfirmed_txs.append(penalty_txid)

        # Let's add some of the txs in the rebroadcast list
        if (i % 2) == 0:
            txs_to_rebroadcast.append(penalty_txid)

    # The block_hash passed to rebroadcast does not matter much now. It will in the future to deal with errors
    receipts = responder.rebroadcast(txs_to_rebroadcast)

    # All txs should have been delivered and the missed confirmation reset
    for txid, receipt in receipts:
        # Sanity check
        assert txid in txs_to_rebroadcast

        assert receipt.delivered is True
        assert responder.missed_confirmations[txid] == 0
Ejemplo n.º 15
0
def responder(db_manager, gatekeeper, carrier, block_processor):
    responder = Responder(db_manager, gatekeeper, carrier, block_processor)
    chain_monitor = ChainMonitor([Queue(), responder.block_queue],
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()
    responder_thread = responder.awake()
    chain_monitor.activate()

    yield responder

    chain_monitor.terminate()
    responder_thread.join()
Ejemplo n.º 16
0
def watcher(db_manager):
    block_processor = BlockProcessor(bitcoind_connect_params)
    carrier = Carrier(bitcoind_connect_params)

    responder = Responder(db_manager, carrier, block_processor)
    watcher = Watcher(
        db_manager,
        block_processor,
        responder,
        signing_key.to_der(),
        config.get("MAX_APPOINTMENTS"),
        config.get("EXPIRY_DELTA"),
    )

    chain_monitor = ChainMonitor(watcher.block_queue,
                                 watcher.responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    return watcher
Ejemplo n.º 17
0
def watcher(db_manager, gatekeeper):
    block_processor = BlockProcessor(bitcoind_connect_params)
    carrier = Carrier(bitcoind_connect_params)

    responder = Responder(db_manager, gatekeeper, carrier, block_processor)
    watcher = Watcher(
        db_manager,
        gatekeeper,
        block_processor,
        responder,
        signing_key.to_der(),
        MAX_APPOINTMENTS,
        config.get("LOCATOR_CACHE_SIZE"),
    )

    chain_monitor = ChainMonitor(
        watcher.block_queue, watcher.responder.block_queue, block_processor, bitcoind_feed_params
    )
    chain_monitor.monitor_chain()

    return watcher
Ejemplo n.º 18
0
def test_monitor_chain_zmq(block_processor):
    responder_queue = Queue()
    chain_monitor = ChainMonitor([Queue(), responder_queue], block_processor, bitcoind_feed_params)
    chain_monitor.last_tips = [block_processor.get_best_block_hash()]

    zmq_thread = Thread(target=chain_monitor.monitor_chain_zmq, daemon=True)
    zmq_thread.start()

    # the internal queue should start empty
    assert chain_monitor.queue.empty()

    # And have a new block every time we generate one
    for _ in range(3):
        generate_blocks(1)

        chain_monitor.queue.get()
        assert chain_monitor.queue.empty()

    chain_monitor.terminate()
    # The zmq thread needs a block generation to release from the recv method.
    generate_blocks(1)
Ejemplo n.º 19
0
def test_monitor_chain_polling(block_processor_mock, monkeypatch):
    # Monkeypatch the BlockProcessor so the best tip remains unchanged
    fixed_tip = get_random_value_hex(32)
    monkeypatch.setattr(block_processor_mock, "get_best_block_hash", lambda blocking: fixed_tip)

    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor_mock, bitcoind_feed_params)
    chain_monitor.last_tips = [fixed_tip]
    chain_monitor.polling_delta = 0.1

    # monitor_chain_polling runs until not terminated
    polling_thread = Thread(target=chain_monitor.monitor_chain_polling, daemon=True)
    polling_thread.start()

    # Check that nothing changes as long as a block is not generated
    for _ in range(5):
        assert chain_monitor.queue.empty()
        time.sleep(0.1)

    # And that it does if we generate a block
    monkeypatch.setattr(block_processor_mock, "get_best_block_hash", lambda blocking: get_random_value_hex(32))
    time.sleep(0.1)

    chain_monitor.queue.get()
    assert chain_monitor.queue.empty()

    # Check that the bitcoind_reachable event is cleared if the connection is lost, and set once it's recovered
    monkeypatch.setattr(block_processor_mock, "get_best_block_hash", mock_connection_refused_return)
    time.sleep(0.5)
    assert not chain_monitor.bitcoind_reachable.is_set()
    monkeypatch.delattr(block_processor_mock, "get_best_block_hash")
    time.sleep(0.5)
    assert chain_monitor.bitcoind_reachable.is_set()

    chain_monitor.terminate()
Ejemplo n.º 20
0
def run_api(db_manager, carrier, block_processor):
    sk, pk = generate_keypair()

    responder = Responder(db_manager, carrier, block_processor)
    watcher = Watcher(db_manager, block_processor, responder, sk.to_der(),
                      config.get("MAX_APPOINTMENTS"),
                      config.get("EXPIRY_DELTA"))

    chain_monitor = ChainMonitor(watcher.block_queue,
                                 watcher.responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    watcher.awake()
    chain_monitor.monitor_chain()

    api_thread = Thread(
        target=API(Inspector(block_processor, config.get("MIN_TO_SELF_DELAY")),
                   watcher).start)
    api_thread.daemon = True
    api_thread.start()

    # It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail)
    sleep(0.1)
Ejemplo n.º 21
0
def test_init(block_processor_mock):
    # Not much to test here, just sanity checks to make sure nothing goes south in the future
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor_mock, bitcoind_feed_params)

    assert chain_monitor.status == ChainMonitorStatus.IDLE
    assert isinstance(chain_monitor.last_tips, list) and len(chain_monitor.last_tips) == 0
    assert chain_monitor.status == ChainMonitorStatus.IDLE
    assert isinstance(chain_monitor.check_tip, Event)
    assert isinstance(chain_monitor.lock, Condition)
    assert isinstance(chain_monitor.zmqSubSocket, zmq.Socket)

    assert isinstance(chain_monitor.receiving_queues[0], Queue)
    assert isinstance(chain_monitor.receiving_queues[1], Queue)
Ejemplo n.º 22
0
def test_terminate(block_processor):
    queue = Queue()
    chain_monitor = ChainMonitor([queue, Queue()], block_processor, bitcoind_feed_params)
    chain_monitor.polling_delta = 0.1

    chain_monitor.monitor_chain()
    chain_monitor.activate()

    chain_monitor.terminate()

    assert chain_monitor.status == ChainMonitorStatus.TERMINATED

    # generate a new block
    generate_blocks(1)
    time.sleep(0.11)  # wait longer than the polling_delta

    # there should be only the ChainMonitor.END_MESSAGE message in the receiving queue, as the new block was generated
    # after terminating
    assert queue.qsize() == 1
    assert queue.get() == ChainMonitor.END_MESSAGE
Ejemplo n.º 23
0
def test_init(block_processor):
    # run_bitcoind is started here instead of later on to avoid race conditions while it initializes

    # Not much to test here, just sanity checks to make sure nothing goes south in the future
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)

    assert chain_monitor.status == ChainMonitorStatus.IDLE
    assert isinstance(chain_monitor.last_tips, list) and len(chain_monitor.last_tips) == 0
    assert chain_monitor.status == ChainMonitorStatus.IDLE
    assert isinstance(chain_monitor.check_tip, Event)
    assert isinstance(chain_monitor.lock, Condition)
    assert isinstance(chain_monitor.zmqSubSocket, zmq.Socket)

    assert isinstance(chain_monitor.receiving_queues[0], Queue)
    assert isinstance(chain_monitor.receiving_queues[1], Queue)
Ejemplo n.º 24
0
def test_monitor_chain_and_activate(block_processor):
    # In this test, we generate some blocks after `monitor_chain`, then `activate` and generate few more blocks.
    # We verify that all the generated blocks are indeed sent to the queues in the right order.

    queue1 = Queue()
    queue2 = Queue()

    # We add some initial blocks to the receiving queues, to simulate a bootstrap with previous information
    pre_blocks = [get_random_value_hex(32) for _ in range(5)]
    for block in pre_blocks:
        queue1.put(block)
        queue2.put(block)

    # We don't activate the ChainMonitor but we start listening; therefore received blocks should accumulate in the
    # internal queue
    chain_monitor = ChainMonitor([queue1, queue2], block_processor, bitcoind_feed_params)
    chain_monitor.polling_delta = 0.1

    chain_monitor.monitor_chain()
    assert chain_monitor.status == ChainMonitorStatus.LISTENING

    # we generate some blocks while the monitor is listening but not active
    init_blocks = generate_blocks_with_delay(3, 0.15)

    time.sleep(0.11)  # higher than the polling interval

    chain_monitor.activate()

    # generate some more blocks after activating
    after_blocks = generate_blocks_with_delay(3, 0.15)

    # we now check that all the blocks are in the receiving queues in the correct order
    all_blocks = pre_blocks + init_blocks + after_blocks
    for block in all_blocks:
        assert queue1.get(timeout=0.1) == block
        assert queue2.get(timeout=0.1) == block

    chain_monitor.terminate()
    # The zmq thread needs a block generation to release from the recv method.
    generate_blocks(1)
Ejemplo n.º 25
0
def test_terminate(block_processor_mock, monkeypatch):
    # Test that the ChainMonitor is stopped on a terminate signal
    queue = Queue()
    chain_monitor = ChainMonitor([queue, Queue()], block_processor_mock, bitcoind_feed_params)
    chain_monitor.polling_delta = 0.1

    # Activate the monitor
    chain_monitor.monitor_chain()
    chain_monitor.activate()

    # Ask it to terminate
    chain_monitor.terminate()
    assert chain_monitor.status == ChainMonitorStatus.TERMINATED

    # Mock generating a block generate a new block
    monkeypatch.setattr(block_processor_mock, "get_best_block_hash", lambda blocking: get_random_value_hex(32))
    time.sleep(0.11)  # wait longer than the polling_delta

    # there should be only the ChainMonitor.END_MESSAGE message in the receiving queue, as the new block was generated
    # after terminating
    assert queue.qsize() == 1
    assert queue.get() == ChainMonitor.END_MESSAGE
Ejemplo n.º 26
0
def test_manage_subscription_expiry(gatekeeper):
    # The subscription are expired at expiry but data is deleted once outdated (expiry_delta blocks after)
    current_height = gatekeeper.block_processor.get_block_count()
    expiring_users = {
        get_random_value_hex(32):
        UserInfo(available_slots=10, subscription_expiry=current_height + 1)
        for _ in range(10)
    }
    gatekeeper.registered_users.update(expiring_users)

    # We will need a ChainMonitor instance for this so data can be feed to us
    bitcoind_feed_params = {
        k: v
        for k, v in config.items() if k.startswith("BTC_FEED")
    }
    chain_monitor = ChainMonitor([gatekeeper.block_queue],
                                 gatekeeper.block_processor,
                                 bitcoind_feed_params)
    chain_monitor.monitor_chain()
    chain_monitor.activate()

    # Users expire after this block. Check that they are currently not expired
    for user_id in expiring_users.keys():
        has_subscription_expired, _ = gatekeeper.has_subscription_expired(
            user_id)
        assert not has_subscription_expired

    # Generate a block and users must have expired
    generate_blocks_with_delay(1)
    for user_id in expiring_users.keys():
        has_subscription_expired, _ = gatekeeper.has_subscription_expired(
            user_id)
        assert has_subscription_expired

    # Users will remain in the registered_users dictionary until expiry_delta blocks later.
    generate_blocks_with_delay(gatekeeper.expiry_delta - 1)
    # Users will be deleted in the next block
    assert set(expiring_users).issubset(gatekeeper.registered_users)

    generate_blocks_with_delay(1)
    # Data has just been deleted but should still be present on the cache
    block_height_deletion = gatekeeper.block_processor.get_block_count()
    assert not set(expiring_users).issubset(gatekeeper.registered_users)
    for user_id, _ in expiring_users.items():
        assert not gatekeeper.user_db.load_user(user_id)
    assert gatekeeper.outdated_users_cache[block_height_deletion].keys(
    ) == expiring_users.keys()

    # After OUTDATED_USERS_CACHE_SIZE_BLOCKS they data should not be there anymore (check one before and the one)
    generate_blocks_with_delay(OUTDATED_USERS_CACHE_SIZE_BLOCKS - 1)
    assert block_height_deletion in gatekeeper.outdated_users_cache
    generate_blocks_with_delay(1)
    assert block_height_deletion not in gatekeeper.outdated_users_cache
def test_init(run_bitcoind, block_processor):
    # run_bitcoind is started here instead of later on to avoid race conditions while it initializes

    # Not much to test here, just sanity checks to make sure nothing goes south in the future
    chain_monitor = ChainMonitor(Queue(), Queue(), block_processor,
                                 bitcoind_feed_params)

    assert chain_monitor.best_tip is None
    assert isinstance(chain_monitor.last_tips, list) and len(
        chain_monitor.last_tips) == 0
    assert chain_monitor.terminate is False
    assert isinstance(chain_monitor.check_tip, Event)
    assert isinstance(chain_monitor.lock, Condition)
    assert isinstance(chain_monitor.zmqSubSocket, zmq.Socket)

    # The Queues and asleep flags are initialized when attaching the corresponding subscriber
    assert isinstance(chain_monitor.watcher_queue, Queue)
    assert isinstance(chain_monitor.responder_queue, Queue)
Ejemplo n.º 28
0
def test_activate(block_processor):
    # Not much to test here, this should launch two threads (one per monitor approach) and finish on terminate
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()
    chain_monitor.activate()
    assert chain_monitor.status == ChainMonitorStatus.ACTIVE

    # last_tips is updated before starting the threads, so it not be empty now.
    assert len(chain_monitor.last_tips) > 0

    # Blocks should be received
    for _ in range(5):
        generate_blocks(1)
        watcher_block = chain_monitor.receiving_queues[0].get()
        responder_block = chain_monitor.receiving_queues[1].get()
        assert watcher_block == responder_block
        assert chain_monitor.receiving_queues[0].empty()
        assert chain_monitor.receiving_queues[1].empty()

    chain_monitor.terminate()
    # The zmq thread needs a block generation to release from the recv method.
    generate_blocks(1)
def test_update_state(block_processor):
    # The state is updated after receiving a new block (and only if the block is not already known).
    # Let's start by setting a best_tip and a couple of old tips
    new_block_hash = get_random_value_hex(32)
    chain_monitor = ChainMonitor(Queue(), Queue(), block_processor,
                                 bitcoind_feed_params)
    chain_monitor.best_tip = new_block_hash
    chain_monitor.last_tips = [get_random_value_hex(32) for _ in range(5)]

    # Now we can try to update the state with an old best_tip and see how it doesn't work
    assert chain_monitor.update_state(chain_monitor.last_tips[0]) is False

    # Same should happen with the current tip
    assert chain_monitor.update_state(chain_monitor.best_tip) is False

    # The state should be correctly updated with a new block hash, the chain tip should change and the old tip should
    # have been added to the last_tips
    another_block_hash = get_random_value_hex(32)
    assert chain_monitor.update_state(another_block_hash) is True
    assert chain_monitor.best_tip == another_block_hash and new_block_hash == chain_monitor.last_tips[
        -1]
Ejemplo n.º 30
0
def test_monitor_chain(block_processor):
    # We don't activate it but we start listening; therefore received blocks should accumulate in the internal queue
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)
    chain_monitor.polling_delta = 0.1

    chain_monitor.monitor_chain()
    assert chain_monitor.status == ChainMonitorStatus.LISTENING

    # The tip is updated before starting the threads, so it should have been added to last_tips.
    assert len(chain_monitor.last_tips) > 0

    # Blocks should be received and added to the queue
    count = 0
    for _ in range(5):
        generate_blocks(1)
        count += 1
        time.sleep(0.11)  # higher than the polling interval
        assert chain_monitor.receiving_queues[0].empty()
        assert chain_monitor.receiving_queues[1].empty()
        assert chain_monitor.queue.qsize() == count

    chain_monitor.terminate()
    # The zmq thread needs a block generation to release from the recv method.
    generate_blocks(1)