コード例 #1
0
def test_monitor_chain_single_update(block_processor):
    # This test tests that if both threads try to add the same block to the queue, only the first one will make it
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)

    chain_monitor.polling_delta = 2

    # We will create a block and wait for the polling thread. Then check the queues to see that the block hash has only
    # been added once.
    chain_monitor.monitor_chain()
    chain_monitor.activate()
    generate_blocks(1)

    assert len(chain_monitor.receiving_queues) == 2

    queue0_block = chain_monitor.receiving_queues[0].get()
    queue1_block = chain_monitor.receiving_queues[1].get()
    assert queue0_block == queue1_block
    assert chain_monitor.receiving_queues[0].empty()
    assert chain_monitor.receiving_queues[1].empty()

    # The delta for polling is 2 secs, so let's wait and see
    time.sleep(2)
    assert chain_monitor.receiving_queues[0].empty()
    assert chain_monitor.receiving_queues[1].empty()

    # We can also force an update and see that it won't go through
    assert chain_monitor.enqueue(queue0_block) is False

    chain_monitor.terminate()
    # The zmq thread needs a block generation to release from the recv method.
    generate_blocks(1)
コード例 #2
0
def test_on_sync(responder, block_processor):
    # We're on sync if we're 1 or less blocks behind the tip
    chain_tip = block_processor.get_best_block_hash()
    assert responder.on_sync(chain_tip) is True

    generate_blocks(1)
    assert responder.on_sync(chain_tip) is True
コード例 #3
0
def test_get_distance_to_tip(block_processor):
    target_distance = 5

    target_block = block_processor.get_best_block_hash()

    # Mine some blocks up to the target distance
    generate_blocks(target_distance)

    # Check if the distance is properly computed
    assert block_processor.get_distance_to_tip(target_block) == target_distance
コード例 #4
0
def test_locator_cache_init(block_processor):
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))

    # Generate enough blocks so the cache can start full
    generate_blocks(2 * locator_cache.cache_size)

    locator_cache.init(block_processor.get_best_block_hash(), block_processor)
    assert len(locator_cache.blocks) == locator_cache.cache_size
    for k, v in locator_cache.blocks.items():
        assert block_processor.get_block(k)
コード例 #5
0
def test_get_distance_to_tip(block_processor):
    # get_distance_to_tip returns how many blocks the best chain contains from a given block hash to the best tip
    target_distance = 5

    target_block = block_processor.get_best_block_hash()

    # Mine some blocks up to the target distance
    generate_blocks(target_distance)

    # Check if the distance is properly computed
    assert block_processor.get_distance_to_tip(target_block) == target_distance
コード例 #6
0
def test_locator_cache_init_not_enough_blocks(block_processor):
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
    # Make sure there are at least 3 blocks
    block_count = block_processor.get_block_count()
    if block_count < 3:
        generate_blocks(3 - block_count)

    # Simulate there are only 3 blocks
    third_block_hash = bitcoin_cli.getblockhash(2)
    locator_cache.init(third_block_hash, block_processor)
    assert len(locator_cache.blocks) == 3
    for k, v in locator_cache.blocks.items():
        assert block_processor.get_block(k)
コード例 #7
0
def test_fix_cache(block_processor):
    # This tests how a reorg will create a new version of the cache
    # Let's start setting a full cache. We'll mine ``cache_size`` bocks to be sure it's full
    generate_blocks(config.get("LOCATOR_CACHE_SIZE"))

    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))
    locator_cache.init(block_processor.get_best_block_hash(), block_processor)
    assert len(locator_cache.blocks) == locator_cache.cache_size

    # Now let's fake a reorg of less than ``cache_size``. We'll go two blocks into the past.
    current_tip = block_processor.get_best_block_hash()
    current_tip_locators = locator_cache.blocks[current_tip]
    current_tip_parent = block_processor.get_block(current_tip).get(
        "previousblockhash")
    current_tip_parent_locators = locator_cache.blocks[current_tip_parent]
    fake_tip = block_processor.get_block(current_tip_parent).get(
        "previousblockhash")
    locator_cache.fix(fake_tip, block_processor)

    # The last two blocks are not in the cache nor are there any of its locators
    assert current_tip not in locator_cache.blocks and current_tip_parent not in locator_cache.blocks
    for locator in current_tip_parent_locators + current_tip_locators:
        assert locator not in locator_cache.cache

    # The fake tip is the new tip, and two additional blocks are at the bottom
    assert fake_tip in locator_cache.blocks and list(
        locator_cache.blocks.keys())[-1] == fake_tip
    assert len(locator_cache.blocks) == locator_cache.cache_size

    # Test the same for a full cache reorg. We can simulate this by adding more blocks than the cache can fit and
    # trigger a fix. We'll use a new cache to compare with the old
    old_cache_blocks = deepcopy(locator_cache.blocks)

    generate_blocks((config.get("LOCATOR_CACHE_SIZE") * 2))
    locator_cache.fix(block_processor.get_best_block_hash(), block_processor)

    # None of the data from the old cache is in the new cache
    for block_hash, locators in old_cache_blocks.items():
        assert block_hash not in locator_cache.blocks
        for locator in locators:
            assert locator not in locator_cache.cache

    # The data in the new cache corresponds to the last ``cache_size`` blocks.
    block_count = block_processor.get_block_count()
    for i in range(block_count, block_count - locator_cache.cache_size, -1):
        block_hash = bitcoin_cli.getblockhash(i)
        assert block_hash in locator_cache.blocks
        for locator in locator_cache.blocks[block_hash]:
            assert locator in locator_cache.cache
コード例 #8
0
def test_send_double_spending_transaction(carrier):
    # We can test what happens if the same transaction is sent twice
    tx = create_commitment_tx()
    txid = bitcoin_cli.decoderawtransaction(tx).get("txid")

    receipt = carrier.send_transaction(tx, txid)
    sent_txs.append(txid)

    # Wait for a block to be mined. Issued receipts are reset from the Responder every block, so we should do it too.
    generate_blocks(2)
    carrier.issued_receipts = {}

    # Try to send it again
    receipt2 = carrier.send_transaction(tx, txid)

    # The carrier should report delivered True for both, but in the second case the transaction was already delivered
    # (either by himself or someone else)
    assert receipt.delivered is True
    assert receipt2.delivered is True and receipt2.confirmations >= 1 and receipt2.reason == RPC_VERIFY_ALREADY_IN_CHAIN
コード例 #9
0
def test_get_all_appointments(teosd, rpc_client):
    _, teos_id = teosd

    # Check that there is no appointment, so far
    all_appointments = json.loads(rpc_client.get_all_appointments())
    watching = all_appointments.get("watcher_appointments")
    responding = all_appointments.get("responder_trackers")
    assert len(watching) == 0 and len(responding) == 0

    # Register a user
    teos_client.register(user_id, teos_id, teos_base_endpoint)

    # After that we can build an appointment and send it to the tower
    commitment_tx, commitment_txid, penalty_tx = create_txs()
    appointment_data = build_appointment_data(commitment_txid, penalty_tx)
    appointment = teos_client.create_appointment(appointment_data)
    add_appointment(teos_id, appointment)

    # Now there should now be one appointment in the watcher
    all_appointments = json.loads(rpc_client.get_all_appointments())
    watching = all_appointments.get("watcher_appointments")
    responding = all_appointments.get("responder_trackers")
    assert len(watching) == 1 and len(responding) == 0

    # Trigger a breach and check again; now the appointment should be in the responder
    generate_block_with_transactions(commitment_tx)
    sleep(1)

    all_appointments = json.loads(rpc_client.get_all_appointments())
    watching = all_appointments.get("watcher_appointments")
    responding = all_appointments.get("responder_trackers")
    assert len(watching) == 0 and len(responding) == 1

    # Now let's mine some blocks so the appointment reaches its end. We need 100 + EXPIRY_DELTA -1
    generate_blocks(100 + config.get("EXPIRY_DELTA"))
    sleep(1)

    # Now the appointment should not be in the tower, back to 0
    all_appointments = json.loads(rpc_client.get_all_appointments())
    watching = all_appointments.get("watcher_appointments")
    responding = all_appointments.get("responder_trackers")
    assert len(watching) == 0 and len(responding) == 0
コード例 #10
0
def test_terminate(block_processor):
    queue = Queue()
    chain_monitor = ChainMonitor([queue, Queue()], block_processor, bitcoind_feed_params)
    chain_monitor.polling_delta = 0.1

    chain_monitor.monitor_chain()
    chain_monitor.activate()

    chain_monitor.terminate()

    assert chain_monitor.status == ChainMonitorStatus.TERMINATED

    # generate a new block
    generate_blocks(1)
    time.sleep(0.11)  # wait longer than the polling_delta

    # there should be only the ChainMonitor.END_MESSAGE message in the receiving queue, as the new block was generated
    # after terminating
    assert queue.qsize() == 1
    assert queue.get() == ChainMonitor.END_MESSAGE
コード例 #11
0
def test_monitor_chain_polling(block_processor):
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)
    chain_monitor.last_tips = [block_processor.get_best_block_hash()]
    chain_monitor.polling_delta = 0.1

    # monitor_chain_polling runs until not terminated
    polling_thread = Thread(target=chain_monitor.monitor_chain_polling, daemon=True)
    polling_thread.start()

    # Check that nothing changes as long as a block is not generated
    for _ in range(5):
        assert chain_monitor.queue.empty()
        time.sleep(0.1)

    # And that it does if we generate a block
    generate_blocks(1)

    chain_monitor.queue.get()
    assert chain_monitor.queue.empty()

    chain_monitor.terminate()
コード例 #12
0
def test_monitor_chain_zmq(block_processor):
    responder_queue = Queue()
    chain_monitor = ChainMonitor([Queue(), responder_queue], block_processor, bitcoind_feed_params)
    chain_monitor.last_tips = [block_processor.get_best_block_hash()]

    zmq_thread = Thread(target=chain_monitor.monitor_chain_zmq, daemon=True)
    zmq_thread.start()

    # the internal queue should start empty
    assert chain_monitor.queue.empty()

    # And have a new block every time we generate one
    for _ in range(3):
        generate_blocks(1)

        chain_monitor.queue.get()
        assert chain_monitor.queue.empty()

    chain_monitor.terminate()
    # The zmq thread needs a block generation to release from the recv method.
    generate_blocks(1)
コード例 #13
0
def test_monitor_chain_and_activate(block_processor):
    # In this test, we generate some blocks after `monitor_chain`, then `activate` and generate few more blocks.
    # We verify that all the generated blocks are indeed sent to the queues in the right order.

    queue1 = Queue()
    queue2 = Queue()

    # We add some initial blocks to the receiving queues, to simulate a bootstrap with previous information
    pre_blocks = [get_random_value_hex(32) for _ in range(5)]
    for block in pre_blocks:
        queue1.put(block)
        queue2.put(block)

    # We don't activate the ChainMonitor but we start listening; therefore received blocks should accumulate in the
    # internal queue
    chain_monitor = ChainMonitor([queue1, queue2], block_processor, bitcoind_feed_params)
    chain_monitor.polling_delta = 0.1

    chain_monitor.monitor_chain()
    assert chain_monitor.status == ChainMonitorStatus.LISTENING

    # we generate some blocks while the monitor is listening but not active
    init_blocks = generate_blocks_with_delay(3, 0.15)

    time.sleep(0.11)  # higher than the polling interval

    chain_monitor.activate()

    # generate some more blocks after activating
    after_blocks = generate_blocks_with_delay(3, 0.15)

    # we now check that all the blocks are in the receiving queues in the correct order
    all_blocks = pre_blocks + init_blocks + after_blocks
    for block in all_blocks:
        assert queue1.get(timeout=0.1) == block
        assert queue2.get(timeout=0.1) == block

    chain_monitor.terminate()
    # The zmq thread needs a block generation to release from the recv method.
    generate_blocks(1)
コード例 #14
0
def test_activate(block_processor):
    # Not much to test here, this should launch two threads (one per monitor approach) and finish on terminate
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()
    chain_monitor.activate()
    assert chain_monitor.status == ChainMonitorStatus.ACTIVE

    # last_tips is updated before starting the threads, so it not be empty now.
    assert len(chain_monitor.last_tips) > 0

    # Blocks should be received
    for _ in range(5):
        generate_blocks(1)
        watcher_block = chain_monitor.receiving_queues[0].get()
        responder_block = chain_monitor.receiving_queues[1].get()
        assert watcher_block == responder_block
        assert chain_monitor.receiving_queues[0].empty()
        assert chain_monitor.receiving_queues[1].empty()

    chain_monitor.terminate()
    # The zmq thread needs a block generation to release from the recv method.
    generate_blocks(1)
コード例 #15
0
def test_find_last_common_ancestor(block_processor):
    # find_last_common_ancestor finds the last common block between the best tip and a given block
    ancestor = block_processor.get_best_block_hash()
    blocks = generate_blocks(3)
    best_block_hash = blocks[-1]

    # Create a fork (invalidate the next block after the ancestor and mine 4 blocks on top)
    fork(blocks[0], 4)

    # The last common ancestor between the old best and the new best should be the "ancestor"
    last_common_ancestor, dropped_txs = block_processor.find_last_common_ancestor(best_block_hash)
    assert last_common_ancestor == ancestor
    assert len(dropped_txs) == 3
コード例 #16
0
def test_has_subscription_expired(gatekeeper):
    user_info = UserInfo(
        available_slots=1,
        subscription_expiry=gatekeeper.block_processor.get_block_count() + 1)
    user_id = get_random_value_hex(32)
    gatekeeper.registered_users[user_id] = user_info

    # Check that the subscription is still live
    has_subscription_expired, expiry = gatekeeper.has_subscription_expired(
        user_id)
    assert not has_subscription_expired

    # Generating 1 additional block will expire the subscription
    generate_blocks(1)
    has_subscription_expired, expiry = gatekeeper.has_subscription_expired(
        user_id)
    assert has_subscription_expired

    # Check it remains expired afterwards
    generate_blocks(1)
    has_subscription_expired, expiry = gatekeeper.has_subscription_expired(
        user_id)
    assert has_subscription_expired
コード例 #17
0
def test_get_missed_blocks(block_processor):
    target_block = block_processor.get_best_block_hash()

    # Generate some blocks and store the hash in a list
    missed_blocks = generate_blocks(5)

    # Check what we've missed
    assert block_processor.get_missed_blocks(target_block) == missed_blocks

    # We can see how it does not work if we replace the target by the first element in the list
    block_tip = missed_blocks[0]
    assert block_processor.get_missed_blocks(block_tip) != missed_blocks

    # But it does again if we skip that block
    assert block_processor.get_missed_blocks(block_tip) == missed_blocks[1:]
コード例 #18
0
def test_monitor_chain(block_processor):
    # We don't activate it but we start listening; therefore received blocks should accumulate in the internal queue
    chain_monitor = ChainMonitor([Queue(), Queue()], block_processor, bitcoind_feed_params)
    chain_monitor.polling_delta = 0.1

    chain_monitor.monitor_chain()
    assert chain_monitor.status == ChainMonitorStatus.LISTENING

    # The tip is updated before starting the threads, so it should have been added to last_tips.
    assert len(chain_monitor.last_tips) > 0

    # Blocks should be received and added to the queue
    count = 0
    for _ in range(5):
        generate_blocks(1)
        count += 1
        time.sleep(0.11)  # higher than the polling interval
        assert chain_monitor.receiving_queues[0].empty()
        assert chain_monitor.receiving_queues[1].empty()
        assert chain_monitor.queue.qsize() == count

    chain_monitor.terminate()
    # The zmq thread needs a block generation to release from the recv method.
    generate_blocks(1)
コード例 #19
0
def test_manage_subscription_expiry_bitcoind_crash(gatekeeper_real_bp,
                                                   monkeypatch):
    # Test that the data is not deleted until bitcoind comes back online
    current_height = gatekeeper_real_bp.block_processor.get_block_count()

    # Mock the user registration
    user_id = get_random_value_hex(32)
    user_info = UserInfo(available_slots=10,
                         subscription_expiry=current_height + 1)
    monkeypatch.setitem(gatekeeper_real_bp.registered_users, user_id,
                        user_info)

    # Since the gatekeeper is not currently hooked to any ChainMonitor, it won't be notified.
    block_id = generate_blocks(1)[0]

    # Now we can set wrong connection params and feed the block to mock a crash with bitcoind
    monkeypatch.setattr(gatekeeper_real_bp.block_processor,
                        "btc_connect_params", wrong_bitcoind_connect_params)
    gatekeeper_real_bp.block_queue.put(block_id)
    time.sleep(1)

    # The gatekeeper's subscription manager thread should be blocked now. The thread cannot check if the subscription
    # has expired, and the query is blocking
    assert not gatekeeper_real_bp.block_processor.bitcoind_reachable.is_set()
    with pytest.raises(ConnectionRefusedError):
        gatekeeper_real_bp.has_subscription_expired(user_id)

    # Setting the event should unblock the thread and expire the subscription
    monkeypatch.setattr(gatekeeper_real_bp.block_processor,
                        "btc_connect_params", bitcoind_connect_params)
    gatekeeper_real_bp.block_processor.bitcoind_reachable.set()
    time.sleep(1)

    has_subscription_expired, _ = gatekeeper_real_bp.has_subscription_expired(
        user_id)
    assert has_subscription_expired
コード例 #20
0
def test_appointment_life_cycle(teosd):
    global appointments_in_watcher, appointments_in_responder, available_slots, subscription_expiry

    _, teos_id = teosd

    # First of all we need to register
    available_slots, subscription_expiry = teos_client.register(user_id, teos_id, teos_base_endpoint)

    # After that we can build an appointment and send it to the tower
    commitment_tx, commitment_txid, penalty_tx = create_txs()
    appointment_data = build_appointment_data(commitment_txid, penalty_tx)
    locator = compute_locator(commitment_txid)
    appointment = teos_client.create_appointment(appointment_data)
    add_appointment(teos_id, appointment)
    appointments_in_watcher += 1

    # Get the information from the tower to check that it matches
    appointment_info = get_appointment_info(teos_id, locator)
    assert appointment_info.get("status") == AppointmentStatus.BEING_WATCHED
    assert appointment_info.get("locator") == locator
    assert appointment_info.get("appointment") == appointment.to_dict()

    rpc_client = RPCClient(config.get("RPC_BIND"), config.get("RPC_PORT"))

    # Check also the get_all_appointments endpoint
    all_appointments = json.loads(rpc_client.get_all_appointments())
    watching = all_appointments.get("watcher_appointments")
    responding = all_appointments.get("responder_trackers")
    assert len(watching) == appointments_in_watcher and len(responding) == 0

    # Trigger a breach and check again
    generate_block_with_transactions(commitment_tx)
    appointment_info = get_appointment_info(teos_id, locator)
    assert appointment_info.get("status") == AppointmentStatus.DISPUTE_RESPONDED
    assert appointment_info.get("locator") == locator
    appointments_in_watcher -= 1
    appointments_in_responder += 1

    all_appointments = json.loads(rpc_client.get_all_appointments())
    watching = all_appointments.get("watcher_appointments")
    responding = all_appointments.get("responder_trackers")
    assert len(watching) == appointments_in_watcher and len(responding) == appointments_in_responder

    # It can be also checked by ensuring that the penalty transaction made it to the network
    penalty_tx_id = bitcoin_cli.decoderawtransaction(penalty_tx).get("txid")

    try:
        bitcoin_cli.getrawtransaction(penalty_tx_id)
        assert True

    except JSONRPCException:
        # If the transaction is not found.
        assert False

    # Now let's mine some blocks so the appointment reaches its end. We need 100 + EXPIRY_DELTA -1
    generate_blocks(100 + config.get("EXPIRY_DELTA") - 1)
    appointments_in_responder -= 1

    # The appointment is no longer in the tower
    with pytest.raises(TowerResponseError):
        get_appointment_info(teos_id, locator)

    assert get_subscription_info(teos_id).get("available_slots") == available_slots
コード例 #21
0
def test_on_sync_fail(responder, block_processor):
    # This should fail if we're more than 1 block behind the tip
    chain_tip = block_processor.get_best_block_hash()
    generate_blocks(2)

    assert responder.on_sync(chain_tip) is False