def test_monitor_chain_polling(db_manager, block_processor):
    # Try polling with the Watcher
    watcher_queue = Queue()
    chain_monitor = ChainMonitor(watcher_queue, Queue(), block_processor,
                                 bitcoind_feed_params)
    chain_monitor.best_tip = block_processor.get_best_block_hash()
    chain_monitor.polling_delta = 0.1

    # monitor_chain_polling runs until terminate if set
    polling_thread = Thread(target=chain_monitor.monitor_chain_polling,
                            daemon=True)
    polling_thread.start()

    # Check that nothing changes as long as a block is not generated
    for _ in range(5):
        assert chain_monitor.watcher_queue.empty()
        time.sleep(0.1)

    # And that it does if we generate a block
    generate_block()

    chain_monitor.watcher_queue.get()
    assert chain_monitor.watcher_queue.empty()

    chain_monitor.terminate = True
    polling_thread.join()
示例#2
0
def test_on_sync(run_bitcoind, responder, block_processor):
    # We're on sync if we're 1 or less blocks behind the tip
    chain_tip = block_processor.get_best_block_hash()
    assert responder.on_sync(chain_tip) is True

    generate_block()
    assert responder.on_sync(chain_tip) is True
示例#3
0
def test_request_appointment_responder(new_appt_data):
    # Let's do something similar to what we did with the watcher but now we'll send the dispute tx to the network
    dispute_tx = locator_dispute_tx_map[new_appt_data["appointment"]
                                        ["locator"]]
    bitcoin_cli(bitcoind_connect_params).sendrawtransaction(dispute_tx)

    r = add_appointment(new_appt_data)
    assert r.status_code == 200

    # Generate a block to trigger the watcher
    generate_block()

    r = requests.get(url=TEOS_API + "/get_appointment?locator=" +
                     new_appt_data["appointment"]["locator"])
    assert r.status_code == 200

    received_appointments = json.loads(r.content)
    appointment_status = [
        appointment.pop("status") for appointment in received_appointments
    ]
    appointment_locators = [
        appointment["locator"] for appointment in received_appointments
    ]

    assert new_appt_data["appointment"][
        "locator"] in appointment_locators and len(received_appointments) == 1
    assert all(
        [status == "dispute_responded"
         for status in appointment_status]) and len(appointment_status) == 1
def test_monitor_chain_single_update(db_manager, block_processor):
    # This test tests that if both threads try to add the same block to the queue, only the first one will make it
    chain_monitor = ChainMonitor(Queue(), Queue(), block_processor,
                                 bitcoind_feed_params)

    chain_monitor.best_tip = None
    chain_monitor.polling_delta = 2

    # We will create a block and wait for the polling thread. Then check the queues to see that the block hash has only
    # been added once.
    chain_monitor.monitor_chain()
    generate_block()

    watcher_block = chain_monitor.watcher_queue.get()
    responder_block = chain_monitor.responder_queue.get()
    assert watcher_block == responder_block
    assert chain_monitor.watcher_queue.empty()
    assert chain_monitor.responder_queue.empty()

    # The delta for polling is 2 secs, so let's wait and see
    time.sleep(2)
    assert chain_monitor.watcher_queue.empty()
    assert chain_monitor.responder_queue.empty()

    # We can also force an update and see that it won't go through
    assert chain_monitor.update_state(watcher_block) is False
def test_update_states_responder_misses_more(run_bitcoind, db_manager,
                                             gatekeeper, carrier,
                                             block_processor):
    w = Watcher(
        db_manager=db_manager,
        gatekeeper=gatekeeper,
        block_processor=block_processor,
        responder=Responder(db_manager, gatekeeper, carrier, block_processor),
        sk_der=generate_keypair()[0].to_der(),
        max_appointments=config.get("MAX_APPOINTMENTS"),
        blocks_in_cache=config.get("LOCATOR_CACHE_SIZE"),
    )

    blocks = []
    for _ in range(5):
        generate_block()
        blocks.append(bitcoin_cli(bitcoind_connect_params).getbestblockhash())

    # Updating the states should bring both to the same last known block.
    w.awake()
    w.responder.awake()
    Builder.update_states(w, blocks, blocks[1:])

    assert db_manager.load_last_block_hash_watcher() == blocks[-1]
    assert w.responder.last_known_block == blocks[-1]
def test_monitor_chain_zmq(db_manager, block_processor):
    responder_queue = Queue()
    chain_monitor = ChainMonitor(Queue(), responder_queue, block_processor,
                                 bitcoind_feed_params)
    chain_monitor.best_tip = block_processor.get_best_block_hash()

    zmq_thread = Thread(target=chain_monitor.monitor_chain_zmq, daemon=True)
    zmq_thread.start()

    # Queues should start empty
    assert chain_monitor.responder_queue.empty()

    # And have a new block every time we generate one
    for _ in range(3):
        generate_block()
        chain_monitor.responder_queue.get()
        assert chain_monitor.responder_queue.empty()
def test_get_missed_blocks(block_processor):
    target_block = block_processor.get_best_block_hash()

    # Generate some blocks and store the hash in a list
    missed_blocks = []
    for _ in range(5):
        generate_block()
        missed_blocks.append(block_processor.get_best_block_hash())

    # Check what we've missed
    assert block_processor.get_missed_blocks(target_block) == missed_blocks

    # We can see how it does not work if we replace the target by the first element in the list
    block_tip = missed_blocks[0]
    assert block_processor.get_missed_blocks(block_tip) != missed_blocks

    # But it does again if we skip that block
    assert block_processor.get_missed_blocks(block_tip) == missed_blocks[1:]
示例#8
0
def test_update_states_watcher_misses_more(db_manager, carrier,
                                           block_processor):
    # Same as before, but data is now in the Responder
    w = Watcher(
        db_manager=db_manager,
        block_processor=block_processor,
        responder=Responder(db_manager, carrier, block_processor),
        sk_der=None,
        max_appointments=config.get("MAX_APPOINTMENTS"),
        expiry_delta=config.get("EXPIRY_DELTA"),
    )

    blocks = []
    for _ in range(5):
        generate_block()
        blocks.append(bitcoin_cli(bitcoind_connect_params).getbestblockhash())

    w.awake()
    w.responder.awake()
    Builder.update_states(w, blocks[1:], blocks)

    assert db_manager.load_last_block_hash_watcher() == blocks[-1]
    assert db_manager.load_last_block_hash_responder() == blocks[-1]
示例#9
0
def test_update_states_responder_misses_more(run_bitcoind, db_manager, carrier,
                                             block_processor):
    w = Watcher(
        db_manager=db_manager,
        block_processor=block_processor,
        responder=Responder(db_manager, carrier, block_processor),
        sk_der=None,
        max_appointments=config.get("MAX_APPOINTMENTS"),
        expiry_delta=config.get("EXPIRY_DELTA"),
    )

    blocks = []
    for _ in range(5):
        generate_block()
        blocks.append(bitcoin_cli(bitcoind_connect_params).getbestblockhash())

    # Updating the states should bring both to the same last known block.
    w.awake()
    w.responder.awake()
    Builder.update_states(w, blocks, blocks[1:])

    assert db_manager.load_last_block_hash_watcher() == blocks[-1]
    assert w.responder.last_known_block == blocks[-1]
def test_monitor_chain(db_manager, block_processor):
    # Not much to test here, this should launch two threads (one per monitor approach) and finish on terminate
    chain_monitor = ChainMonitor(Queue(), Queue(), block_processor,
                                 bitcoind_feed_params)

    chain_monitor.best_tip = None
    chain_monitor.monitor_chain()

    # The tip is updated before starting the threads, so it should have changed.
    assert chain_monitor.best_tip is not None

    # Blocks should be received
    for _ in range(5):
        generate_block()
        watcher_block = chain_monitor.watcher_queue.get()
        responder_block = chain_monitor.responder_queue.get()
        assert watcher_block == responder_block
        assert chain_monitor.watcher_queue.empty()
        assert chain_monitor.responder_queue.empty()

    # And the thread be terminated on terminate
    chain_monitor.terminate = True
    # The zmq thread needs a block generation to release from the recv method.
    generate_block()
示例#11
0
def test_update_states_watcher_misses_more(db_manager, gatekeeper, carrier,
                                           block_processor):
    # Same as before, but data is now in the Responder
    w = Watcher(
        db_manager=db_manager,
        gatekeeper=gatekeeper,
        block_processor=block_processor,
        responder=Responder(db_manager, gatekeeper, carrier, block_processor),
        sk_der=generate_keypair()[0].to_der(),
        max_appointments=config.get("MAX_APPOINTMENTS"),
        blocks_in_cache=config.get("LOCATOR_CACHE_SIZE"),
    )

    blocks = []
    for _ in range(5):
        generate_block()
        blocks.append(bitcoin_cli(bitcoind_connect_params).getbestblockhash())

    w.awake()
    w.responder.awake()
    Builder.update_states(w, blocks[1:], blocks)

    assert db_manager.load_last_block_hash_watcher() == blocks[-1]
    assert db_manager.load_last_block_hash_responder() == blocks[-1]
示例#12
0
def test_do_watch(temp_db_manager, carrier, block_processor):
    # Create a fresh responder to simplify the test
    responder = Responder(temp_db_manager, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    trackers = [
        create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
        for _ in range(20)
    ]

    # Let's set up the trackers first
    for tracker in trackers:
        uuid = uuid4().hex

        responder.trackers[uuid] = {
            "locator": tracker.locator,
            "penalty_txid": tracker.penalty_txid,
            "appointment_end": tracker.appointment_end,
        }
        responder.tx_tracker_map[tracker.penalty_txid] = [uuid]
        responder.missed_confirmations[tracker.penalty_txid] = 0
        responder.unconfirmed_txs.append(tracker.penalty_txid)

        # We also need to store the info in the db
        responder.db_manager.create_triggered_appointment_flag(uuid)
        responder.db_manager.store_responder_tracker(uuid, tracker.to_json())

    # Let's start to watch
    Thread(target=responder.do_watch, daemon=True).start()

    # And broadcast some of the transactions
    broadcast_txs = []
    for tracker in trackers[:5]:
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)
        broadcast_txs.append(tracker.penalty_txid)

    # Mine a block
    generate_block()

    # The transactions we sent shouldn't be in the unconfirmed transaction list anymore
    assert not set(broadcast_txs).issubset(responder.unconfirmed_txs)

    # TODO: test that reorgs can be detected once data persistence is merged (new version of the simulator)

    # Generating 5 additional blocks should complete the 5 trackers
    generate_blocks(5)

    assert not set(broadcast_txs).issubset(responder.tx_tracker_map)

    # Do the rest
    broadcast_txs = []
    for tracker in trackers[5:]:
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)
        broadcast_txs.append(tracker.penalty_txid)

    # Mine a block
    generate_blocks(6)

    assert len(responder.tx_tracker_map) == 0