def test_is_block_in_best_chain(block_processor):
    best_block_hash = block_processor.get_best_block_hash()
    best_block = block_processor.get_block(best_block_hash)

    assert block_processor.is_block_in_best_chain(best_block_hash)

    fork(best_block.get("previousblockhash"))
    generate_blocks(2)

    assert not block_processor.is_block_in_best_chain(best_block_hash)
def test_locator_cache_init(block_processor):
    locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE"))

    # Generate enough blocks so the cache can start full
    generate_blocks(2 * locator_cache.cache_size)

    locator_cache.init(block_processor.get_best_block_hash(), block_processor)
    assert len(locator_cache.blocks) == locator_cache.cache_size
    for k, v in locator_cache.blocks.items():
        assert block_processor.get_block(k)
def test_get_distance_to_tip(block_processor):
    target_distance = 5

    target_block = block_processor.get_best_block_hash()

    # Mine some blocks up to the target distance
    generate_blocks(target_distance)

    # Check if the distance is properly computed
    assert block_processor.get_distance_to_tip(target_block) == target_distance
def test_find_last_common_ancestor(block_processor):
    ancestor = block_processor.get_best_block_hash()
    generate_blocks(3)
    best_block_hash = block_processor.get_best_block_hash()

    # Create a fork (forking creates a block if the mock is set by events)
    fork(ancestor)

    # Create another block to make the best tip change (now both chains are at the same height)
    generate_blocks(5)

    # The last common ancestor between the old best and the new best should be the "ancestor"
    last_common_ancestor, dropped_txs = block_processor.find_last_common_ancestor(
        best_block_hash)
    assert last_common_ancestor == ancestor
    assert len(dropped_txs) == 3
Beispiel #5
0
def test_send_double_spending_transaction(carrier):
    # We can test what happens if the same transaction is sent twice
    tx = create_dummy_transaction()
    txid = tx.tx_id.hex()

    receipt = carrier.send_transaction(tx.hex(), txid)
    sent_txs.append(txid)

    # Wait for a block to be mined. Issued receipts is reset from the Responder every block, so we should do it too.
    generate_blocks(2)
    carrier.issued_receipts = {}

    # Try to send it again
    receipt2 = carrier.send_transaction(tx.hex(), txid)

    # The carrier should report delivered True for both, but in the second case the transaction was already delivered
    # (either by himself or someone else)
    assert receipt.delivered is True
    assert receipt2.delivered is True and receipt2.confirmations >= 1 and receipt2.reason == RPC_VERIFY_ALREADY_IN_CHAIN
Beispiel #6
0
def test_do_watch(watcher, temp_db_manager):
    watcher.db_manager = temp_db_manager

    # We will wipe all the previous data and add 5 appointments
    appointments, locator_uuid_map, dispute_txs = create_appointments(
        APPOINTMENTS)

    # Set the data into the Watcher and in the db
    watcher.locator_uuid_map = locator_uuid_map
    watcher.appointments = {}

    for uuid, appointment in appointments.items():
        watcher.appointments[uuid] = {
            "locator": appointment.locator,
            "end_time": appointment.end_time
        }
        watcher.db_manager.store_watcher_appointment(uuid,
                                                     appointment.to_json())
        watcher.db_manager.create_append_locator_map(appointment.locator, uuid)

    do_watch_thread = Thread(target=watcher.do_watch, daemon=True)
    do_watch_thread.start()

    # Broadcast the first two
    for dispute_tx in dispute_txs[:2]:
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(dispute_tx)

    # After generating enough blocks, the number of appointments should have reduced by two
    generate_blocks(START_TIME_OFFSET + END_TIME_OFFSET)

    assert len(watcher.appointments) == APPOINTMENTS - 2

    # The rest of appointments will timeout after the end (2) + EXPIRY_DELTA
    # Wait for an additional block to be safe
    generate_blocks(
        config.get("EXPIRY_DELTA") + START_TIME_OFFSET + END_TIME_OFFSET)

    assert len(watcher.appointments) == 0
Beispiel #7
0
def test_get_all_appointments_responder():
    # Trigger all disputes
    locators = [appointment["locator"] for appointment in appointments]
    for locator, dispute_tx in locator_dispute_tx_map.items():
        if locator in locators:
            bitcoin_cli(bitcoind_connect_params).sendrawtransaction(dispute_tx)

    # Confirm transactions
    generate_blocks(6)

    # Get all appointments
    r = requests.get(url=TEOS_API + "/get_all_appointments")
    received_appointments = json.loads(r.content)

    # Make sure there is not pending locator in the watcher
    responder_trackers = [
        v["locator"]
        for k, v in received_appointments["responder_trackers"].items()
    ]
    local_locators = [appointment["locator"] for appointment in appointments]

    assert set(responder_trackers) == set(local_locators)
    assert len(received_appointments["watcher_appointments"]) == 0
def test_get_completed_trackers(db_manager, carrier, block_processor):
    initial_height = bitcoin_cli(bitcoind_connect_params).getblockcount()

    responder = Responder(db_manager, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    # A complete tracker is a tracker that has reached the appointment end with enough confs (> MIN_CONFIRMATIONS)
    # We'll create three type of transactions: end reached + enough conf, end reached + no enough conf, end not reached
    trackers_end_conf = {
        uuid4().hex:
        create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
        for _ in range(10)
    }

    trackers_end_no_conf = {}
    for _ in range(10):
        tracker = create_dummy_tracker(
            penalty_rawtx=create_dummy_transaction().hex())
        responder.unconfirmed_txs.append(tracker.penalty_txid)
        trackers_end_no_conf[uuid4().hex] = tracker

    trackers_no_end = {}
    for _ in range(10):
        tracker = create_dummy_tracker(
            penalty_rawtx=create_dummy_transaction().hex())
        tracker.appointment_end += 10
        trackers_no_end[uuid4().hex] = tracker

    all_trackers = {}
    all_trackers.update(trackers_end_conf)
    all_trackers.update(trackers_end_no_conf)
    all_trackers.update(trackers_no_end)

    # Let's add all to the  responder
    for uuid, tracker in all_trackers.items():
        responder.trackers[uuid] = {
            "locator": tracker.locator,
            "penalty_txid": tracker.penalty_txid,
            "appointment_end": tracker.appointment_end,
        }

    for uuid, tracker in all_trackers.items():
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)

    # The dummy appointments have a end_appointment time of current + 2, but trackers need at least 6 confs by default
    generate_blocks(6)

    # And now let's check
    completed_trackers = responder.get_completed_trackers(initial_height + 6)
    completed_trackers_ids = [
        tracker_id for tracker_id, confirmations in completed_trackers.items()
    ]
    ended_trackers_keys = list(trackers_end_conf.keys())
    assert set(completed_trackers_ids) == set(ended_trackers_keys)

    # Generating 6 additional blocks should also confirm trackers_no_end
    generate_blocks(6)

    completed_trackers = responder.get_completed_trackers(initial_height + 12)
    completed_trackers_ids = [
        tracker_id for tracker_id, confirmations in completed_trackers.items()
    ]
    ended_trackers_keys.extend(list(trackers_no_end.keys()))

    assert set(completed_trackers_ids) == set(ended_trackers_keys)
def test_do_watch(temp_db_manager, carrier, block_processor):
    # Create a fresh responder to simplify the test
    responder = Responder(temp_db_manager, carrier, block_processor)
    chain_monitor = ChainMonitor(Queue(), responder.block_queue,
                                 block_processor, bitcoind_feed_params)
    chain_monitor.monitor_chain()

    trackers = [
        create_dummy_tracker(penalty_rawtx=create_dummy_transaction().hex())
        for _ in range(20)
    ]

    # Let's set up the trackers first
    for tracker in trackers:
        uuid = uuid4().hex

        responder.trackers[uuid] = {
            "locator": tracker.locator,
            "penalty_txid": tracker.penalty_txid,
            "appointment_end": tracker.appointment_end,
        }
        responder.tx_tracker_map[tracker.penalty_txid] = [uuid]
        responder.missed_confirmations[tracker.penalty_txid] = 0
        responder.unconfirmed_txs.append(tracker.penalty_txid)

        # We also need to store the info in the db
        responder.db_manager.create_triggered_appointment_flag(uuid)
        responder.db_manager.store_responder_tracker(uuid, tracker.to_json())

    # Let's start to watch
    Thread(target=responder.do_watch, daemon=True).start()

    # And broadcast some of the transactions
    broadcast_txs = []
    for tracker in trackers[:5]:
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)
        broadcast_txs.append(tracker.penalty_txid)

    # Mine a block
    generate_block()

    # The transactions we sent shouldn't be in the unconfirmed transaction list anymore
    assert not set(broadcast_txs).issubset(responder.unconfirmed_txs)

    # TODO: test that reorgs can be detected once data persistence is merged (new version of the simulator)

    # Generating 5 additional blocks should complete the 5 trackers
    generate_blocks(5)

    assert not set(broadcast_txs).issubset(responder.tx_tracker_map)

    # Do the rest
    broadcast_txs = []
    for tracker in trackers[5:]:
        bitcoin_cli(bitcoind_connect_params).sendrawtransaction(
            tracker.penalty_rawtx)
        broadcast_txs.append(tracker.penalty_txid)

    # Mine a block
    generate_blocks(6)

    assert len(responder.tx_tracker_map) == 0
def test_on_sync_fail(responder, block_processor):
    # This should fail if we're more than 1 block behind the tip
    chain_tip = block_processor.get_best_block_hash()
    generate_blocks(2)

    assert responder.on_sync(chain_tip) is False