def test_appointment_shutdown_teos_trigger_while_offline(teosd): teosd_process, teos_id = teosd # This tests data persistence. An appointment is sent to the tower and the tower is stopped. The appointment is then # triggered with the tower offline, and then the tower is brought back online. teos_pid = teosd_process.pid commitment_tx, commitment_txid, penalty_tx = create_txs() appointment_data = build_appointment_data(commitment_txid, penalty_tx) locator = compute_locator(commitment_txid) appointment = teos_client.create_appointment(appointment_data) add_appointment(teos_id, appointment) # Check that the appointment is still in the Watcher appointment_info = get_appointment_info(teos_id, locator) assert appointment_info.get("status") == AppointmentStatus.BEING_WATCHED assert appointment_info.get("appointment") == appointment.to_dict() # Shutdown and trigger rpc_client = RPCClient(config.get("RPC_BIND"), config.get("RPC_PORT")) rpc_client.stop() teosd_process.join() generate_block_with_transactions(commitment_tx) # Restart teosd_process, _ = run_teosd() assert teos_pid != teosd_process.pid # The appointment should have been moved to the Responder appointment_info = get_appointment_info(teos_id, locator) assert appointment_info.get("status") == AppointmentStatus.DISPUTE_RESPONDED
def test_register(api, client, monkeypatch): # Tests registering a user within the tower # Monkeypatch the response from the InternalAPI so the user is accepted slots = config.get("SUBSCRIPTION_SLOTS") expiry = config.get("SUBSCRIPTION_DURATION") receipt = receipts.create_registration_receipt(user_id, slots, expiry) signature = Cryptographer.sign(receipt, teos_sk) response = RegisterResponse( user_id=user_id, available_slots=slots, subscription_expiry=expiry, subscription_signature=signature, ) monkeypatch.setattr(api.stub, "register", lambda x: response) # Send the register request data = {"public_key": user_id} r = client.post(register_endpoint, json=data) # Check the reply assert r.status_code == HTTP_OK assert r.json.get("public_key") == user_id assert r.json.get("available_slots") == config.get("SUBSCRIPTION_SLOTS") assert r.json.get("subscription_expiry") == config.get( "SUBSCRIPTION_DURATION") rpk = Cryptographer.recover_pk(receipt, r.json.get("subscription_signature")) assert Cryptographer.get_compressed_pk(rpk) == teos_id
def test_add_update_user(gatekeeper, monkeypatch): # add_update_user adds SUBSCRIPTION_SLOTS to a given user as long as the identifier is {02, 03}| 32-byte hex str. # It also adds SUBSCRIPTION_DURATION + current_block_height to the user user_id = "02" + get_random_value_hex(32) init_height = 0 # Mock the required BlockProcessor calls from the Gatekeeper monkeypatch.setattr(gatekeeper.block_processor, "get_block_count", lambda: init_height) for i in range(10): gatekeeper.add_update_user(user_id) user = gatekeeper.registered_users.get(user_id) assert user.available_slots == config.get("SUBSCRIPTION_SLOTS") * (i + 1) assert user.subscription_expiry == init_height + config.get( "SUBSCRIPTION_DURATION") # The same can be checked for multiple users for _ in range(10): # The user identifier is changed every call user_id = "03" + get_random_value_hex(32) gatekeeper.add_update_user(user_id) user = gatekeeper.registered_users.get(user_id) assert user.available_slots == config.get("SUBSCRIPTION_SLOTS") assert user.subscription_expiry == init_height + config.get( "SUBSCRIPTION_DURATION")
def test_add_update_user(gatekeeper): # add_update_user adds SUBSCRIPTION_SLOTS to a given user as long as the identifier is {02, 03}| 32-byte hex str # it also add SUBSCRIPTION_DURATION + current_block_height to the user user_id = "02" + get_random_value_hex(32) for _ in range(10): user = gatekeeper.registered_users.get(user_id) current_slots = user.available_slots if user is not None else 0 gatekeeper.add_update_user(user_id) assert gatekeeper.registered_users.get( user_id ).available_slots == current_slots + config.get("SUBSCRIPTION_SLOTS") assert gatekeeper.registered_users[ user_id].subscription_expiry == gatekeeper.block_processor.get_block_count( ) + config.get("SUBSCRIPTION_DURATION") # The same can be checked for multiple users for _ in range(10): # The user identifier is changed every call user_id = "03" + get_random_value_hex(32) gatekeeper.add_update_user(user_id) assert gatekeeper.registered_users.get( user_id).available_slots == config.get("SUBSCRIPTION_SLOTS") assert gatekeeper.registered_users[ user_id].subscription_expiry == gatekeeper.block_processor.get_block_count( ) + config.get("SUBSCRIPTION_DURATION")
def gatekeeper(user_db_manager, block_processor_mock): return Gatekeeper( user_db_manager, block_processor_mock, config.get("SUBSCRIPTION_SLOTS"), config.get("SUBSCRIPTION_DURATION"), config.get("EXPIRY_DELTA"), )
def test_add_update_appointment(gatekeeper, generate_dummy_appointment, monkeypatch): # add_update_appointment should decrease the slot count if a new appointment is added init_height = 0 # Mock the required BlockProcessor calls from the Gatekeeper monkeypatch.setattr(gatekeeper.block_processor, "get_block_count", lambda: init_height) # Mock the user registration user_id = "02" + get_random_value_hex(32) user_info = UserInfo(100, init_height + 100) monkeypatch.setitem(gatekeeper.registered_users, user_id, user_info) # And now add a new appointment appointment = generate_dummy_appointment() appointment_uuid = get_random_value_hex(16) remaining_slots = gatekeeper.add_update_appointment( user_id, appointment_uuid, appointment) # This is a standard size appointment, so it should have reduced the slots by one assert appointment_uuid in gatekeeper.registered_users[ user_id].appointments assert remaining_slots == config.get("SUBSCRIPTION_SLOTS") - 1 # Updates can leave the count as is, decrease it, or increase it, depending on the appointment size (modulo # ENCRYPTED_BLOB_MAX_SIZE_HEX) # Appointments of the same size leave it as is appointment_same_size = generate_dummy_appointment() remaining_slots = gatekeeper.add_update_appointment( user_id, appointment_uuid, appointment) assert appointment_uuid in gatekeeper.registered_users[ user_id].appointments assert remaining_slots == config.get("SUBSCRIPTION_SLOTS") - 1 # Bigger appointments decrease it appointment_x2_size = appointment_same_size appointment_x2_size.encrypted_blob = "A" * (ENCRYPTED_BLOB_MAX_SIZE_HEX + 1) remaining_slots = gatekeeper.add_update_appointment( user_id, appointment_uuid, appointment_x2_size) assert appointment_uuid in gatekeeper.registered_users[ user_id].appointments assert remaining_slots == config.get("SUBSCRIPTION_SLOTS") - 2 # Smaller appointments increase it remaining_slots = gatekeeper.add_update_appointment( user_id, appointment_uuid, appointment) assert remaining_slots == config.get("SUBSCRIPTION_SLOTS") - 1 # If the appointment needs more slots than there's free, it should fail gatekeeper.registered_users[user_id].available_slots = 1 appointment_uuid = get_random_value_hex(16) with pytest.raises(NotEnoughSlots): gatekeeper.add_update_appointment(user_id, appointment_uuid, appointment_x2_size)
def gatekeeper_wrong_connection( user_db_manager, block_processor_wrong_connection): # noqa: F811 return Gatekeeper( user_db_manager, block_processor_wrong_connection, config.get("SUBSCRIPTION_SLOTS"), config.get("SUBSCRIPTION_DURATION"), config.get("EXPIRY_DELTA"), )
def test_get_outdated_trackers(responder, generate_dummy_tracker, monkeypatch): # Expired trackers are those whose subscription has reached the expiry block and have not been confirmed. # Confirmed trackers that have reached their expiry will be kept until completed # Create some trackers and add them to the corresponding user in the Gatekeeper outdated_unconfirmed_trackers = {} outdated_unconfirmed_trackers_next = {} outdated_confirmed_trackers = {} unconfirmed_txs = [] for i in range(20): uuid = uuid4().hex dummy_tracker = generate_dummy_tracker() # Make 10 of them confirmed and 10 of them unconfirmed expiring next block and 10 unconfirmed expiring in two if i % 3: outdated_unconfirmed_trackers[uuid] = dummy_tracker unconfirmed_txs.append(dummy_tracker.penalty_txid) elif i % 2: outdated_unconfirmed_trackers_next[uuid] = dummy_tracker unconfirmed_txs.append(dummy_tracker.penalty_txid) else: outdated_confirmed_trackers[uuid] = dummy_tracker # Get all the trackers summary to add the to the Responder all_trackers_summary = {} all_trackers_summary.update(outdated_confirmed_trackers) all_trackers_summary.update(outdated_unconfirmed_trackers) all_trackers_summary.update(outdated_unconfirmed_trackers_next) for uuid, tracker in all_trackers_summary.items(): all_trackers_summary[uuid] = tracker.get_summary() # Add the data to the the Gatekeeper and the Responder init_block = 0 monkeypatch.setattr(responder, "trackers", all_trackers_summary) monkeypatch.setattr(responder, "unconfirmed_txs", unconfirmed_txs) # Mock the expiry for this block, next block and two blocks from now (plus EXPIRY_DELTA) monkeypatch.setattr( responder.gatekeeper, "get_outdated_appointments", lambda x: [] if x == init_block else outdated_unconfirmed_trackers if x == init_block + 1 + config.get("EXPIRY_DELTA") else outdated_unconfirmed_trackers_next, ) # Currently nothing should be outdated assert responder.get_outdated_trackers(init_block) == [] # 1 block (+ EXPIRY_DELTA) afterwards only user1's confirmed trackers should be outdated assert responder.get_outdated_trackers( init_block + 1 + config.get("EXPIRY_DELTA")) == list( outdated_unconfirmed_trackers.keys()) # 2 blocks (+ EXPIRY_DELTA) block after user2's should be outdated assert responder.get_outdated_trackers( init_block + 2 + config.get("EXPIRY_DELTA")) == list( outdated_unconfirmed_trackers_next.keys())
def internal_api(db_manager, gatekeeper, carrier, block_processor): responder = Responder(db_manager, gatekeeper, carrier, block_processor) watcher = Watcher(db_manager, gatekeeper, block_processor, responder, teos_sk, MAX_APPOINTMENTS, config.get("LOCATOR_CACHE_SIZE")) watcher.last_known_block = block_processor.get_best_block_hash() i_api = InternalAPI(watcher, internal_api_endpoint, config.get("INTERNAL_API_WORKERS"), Event()) i_api.rpc_server.start() yield i_api i_api.rpc_server.stop(None)
def test_init(gatekeeper): assert isinstance(gatekeeper.subscription_slots, int) and gatekeeper.subscription_slots == config.get( "SUBSCRIPTION_SLOTS") assert isinstance(gatekeeper.subscription_duration, int) and gatekeeper.subscription_duration == config.get( "SUBSCRIPTION_DURATION") assert isinstance( gatekeeper.expiry_delta, int) and gatekeeper.expiry_delta == config.get("EXPIRY_DELTA") assert isinstance(gatekeeper.block_processor, BlockProcessor) assert isinstance(gatekeeper.user_db, UsersDBM) assert isinstance(gatekeeper.registered_users, dict) and len( gatekeeper.registered_users) == 0
def test_add_update_appointment(gatekeeper, generate_dummy_appointment): # add_update_appointment should decrease the slot count if a new appointment is added # let's add a new user sk, pk = generate_keypair() user_id = Cryptographer.get_compressed_pk(pk) gatekeeper.add_update_user(user_id) # And now update add a new appointment appointment, _ = generate_dummy_appointment() appointment_uuid = get_random_value_hex(16) remaining_slots = gatekeeper.add_update_appointment( user_id, appointment_uuid, appointment) # This is a standard size appointment, so it should have reduced the slots by one assert appointment_uuid in gatekeeper.registered_users[ user_id].appointments assert remaining_slots == config.get("SUBSCRIPTION_SLOTS") - 1 # Updates can leave the count as is, decrease it, or increase it, depending on the appointment size (modulo # ENCRYPTED_BLOB_MAX_SIZE_HEX) # Appointments of the same size leave it as is appointment_same_size, _ = generate_dummy_appointment() remaining_slots = gatekeeper.add_update_appointment( user_id, appointment_uuid, appointment) assert appointment_uuid in gatekeeper.registered_users[ user_id].appointments assert remaining_slots == config.get("SUBSCRIPTION_SLOTS") - 1 # Bigger appointments decrease it appointment_x2_size = appointment_same_size appointment_x2_size.encrypted_blob = "A" * (ENCRYPTED_BLOB_MAX_SIZE_HEX + 1) remaining_slots = gatekeeper.add_update_appointment( user_id, appointment_uuid, appointment_x2_size) assert appointment_uuid in gatekeeper.registered_users[ user_id].appointments assert remaining_slots == config.get("SUBSCRIPTION_SLOTS") - 2 # Smaller appointments increase it remaining_slots = gatekeeper.add_update_appointment( user_id, appointment_uuid, appointment) assert remaining_slots == config.get("SUBSCRIPTION_SLOTS") - 1 # If the appointment needs more slots than there's free, it should fail gatekeeper.registered_users[user_id].available_slots = 1 appointment_uuid = get_random_value_hex(16) with pytest.raises(NotEnoughSlots): gatekeeper.add_update_appointment(user_id, appointment_uuid, appointment_x2_size)
def test_fix_cache(block_processor): # This tests how a reorg will create a new version of the cache # Let's start setting a full cache. We'll mine ``cache_size`` bocks to be sure it's full generate_blocks(config.get("LOCATOR_CACHE_SIZE")) locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE")) locator_cache.init(block_processor.get_best_block_hash(), block_processor) assert len(locator_cache.blocks) == locator_cache.cache_size # Now let's fake a reorg of less than ``cache_size``. We'll go two blocks into the past. current_tip = block_processor.get_best_block_hash() current_tip_locators = locator_cache.blocks[current_tip] current_tip_parent = block_processor.get_block(current_tip).get( "previousblockhash") current_tip_parent_locators = locator_cache.blocks[current_tip_parent] fake_tip = block_processor.get_block(current_tip_parent).get( "previousblockhash") locator_cache.fix(fake_tip, block_processor) # The last two blocks are not in the cache nor are there any of its locators assert current_tip not in locator_cache.blocks and current_tip_parent not in locator_cache.blocks for locator in current_tip_parent_locators + current_tip_locators: assert locator not in locator_cache.cache # The fake tip is the new tip, and two additional blocks are at the bottom assert fake_tip in locator_cache.blocks and list( locator_cache.blocks.keys())[-1] == fake_tip assert len(locator_cache.blocks) == locator_cache.cache_size # Test the same for a full cache reorg. We can simulate this by adding more blocks than the cache can fit and # trigger a fix. We'll use a new cache to compare with the old old_cache_blocks = deepcopy(locator_cache.blocks) generate_blocks((config.get("LOCATOR_CACHE_SIZE") * 2)) locator_cache.fix(block_processor.get_best_block_hash(), block_processor) # None of the data from the old cache is in the new cache for block_hash, locators in old_cache_blocks.items(): assert block_hash not in locator_cache.blocks for locator in locators: assert locator not in locator_cache.cache # The data in the new cache corresponds to the last ``cache_size`` blocks. block_count = block_processor.get_block_count() for i in range(block_count, block_count - locator_cache.cache_size, -1): block_hash = bitcoin_cli.getblockhash(i) assert block_hash in locator_cache.blocks for locator in locator_cache.blocks[block_hash]: assert locator in locator_cache.cache
def test_locator_remove_oldest_block(): # remove_oldest block should drop the oldest block from the cache. # Create an empty caches locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE")) # Add some blocks to the cache for _ in range(locator_cache.cache_size): txid = get_random_value_hex(32) locator = txid[:16] locator_cache.blocks[get_random_value_hex(32)] = {locator: txid} locator_cache.cache[locator] = txid blocks_in_cache = locator_cache.blocks oldest_block_hash = list(blocks_in_cache.keys())[0] oldest_block_data = blocks_in_cache.get(oldest_block_hash) rest_of_blocks = list(blocks_in_cache.keys())[1:] # Remove the block locator_cache.remove_oldest_block() # The oldest block data is not in the cache anymore assert oldest_block_hash not in locator_cache.blocks for locator in oldest_block_data: assert locator not in locator_cache.cache # The rest of data is in the cache assert set(rest_of_blocks).issubset(locator_cache.blocks) for block_hash in rest_of_blocks: for locator in locator_cache.blocks[block_hash]: assert locator in locator_cache.cache
def test_fix_cache(block_processor_mock, monkeypatch): # This tests how a reorg will create a new version of the cache # Let's start setting a full cache. We'll mine ``cache_size`` bocks to be sure it's full locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE")) # We'll need two additional blocks since we'll rollback the chain into the past blocks = dict() mock_generate_blocks(locator_cache.cache_size + 2, blocks, queue.Queue()) best_block_hash = list(blocks.keys())[-1] # Mock the interaction with the BlockProcessor based on the mocked blocks monkeypatch.setattr(block_processor_mock, "get_block", lambda x, blocking: blocks.get(x)) monkeypatch.setattr(block_processor_mock, "get_block_count", lambda: len(blocks)) locator_cache.init(best_block_hash, block_processor_mock) assert len(locator_cache.blocks) == locator_cache.cache_size # Now let's fake a reorg of less than ``cache_size``. We'll go two blocks into the past. current_tip = best_block_hash current_tip_locators = locator_cache.blocks[current_tip] current_tip_parent = block_processor_mock.get_block( current_tip, False).get("previousblockhash") current_tip_parent_locators = locator_cache.blocks[current_tip_parent] fake_tip = block_processor_mock.get_block(current_tip_parent, False).get("previousblockhash") locator_cache.fix(fake_tip, block_processor_mock) # The last two blocks are not in the cache nor are there any of its locators assert current_tip not in locator_cache.blocks and current_tip_parent not in locator_cache.blocks for locator in current_tip_parent_locators + current_tip_locators: assert locator not in locator_cache.cache # The fake tip is the new tip, and two additional blocks are at the bottom assert fake_tip in locator_cache.blocks and list( locator_cache.blocks.keys())[-1] == fake_tip assert len(locator_cache.blocks) == locator_cache.cache_size # Test the same for a full cache reorg. We can simulate this by adding more blocks than the cache can fit and # trigger a fix. We'll use a new cache to compare with the old old_cache_blocks = deepcopy(locator_cache.blocks) mock_generate_blocks(locator_cache.cache_size, blocks, queue.Queue()) best_block_hash = list(blocks.keys())[-1] locator_cache.fix(best_block_hash, block_processor_mock) # None of the data from the old cache is in the new cache for block_hash, locators in old_cache_blocks.items(): assert block_hash not in locator_cache.blocks for locator in locators: assert locator not in locator_cache.cache # The data in the new cache corresponds to the last ``cache_size`` blocks. block_count = block_processor_mock.get_block_count() for i in range(block_count, block_count - locator_cache.cache_size, -1): block_hash = list(blocks.keys())[i - 1] assert block_hash in locator_cache.blocks for locator in locator_cache.blocks[block_hash]: assert locator in locator_cache.cache
def test_register(internal_api, client): # Tests registering a user within the tower current_height = internal_api.watcher.block_processor.get_block_count() data = {"public_key": user_id} r = client.post(register_endpoint, json=data) assert r.status_code == HTTP_OK assert r.json.get("public_key") == user_id assert r.json.get("available_slots") == config.get("SUBSCRIPTION_SLOTS") assert r.json.get("subscription_expiry" ) == current_height + config.get("SUBSCRIPTION_DURATION") slots = r.json.get("available_slots") expiry = r.json.get("subscription_expiry") subscription_receipt = receipts.create_registration_receipt( user_id, slots, expiry) rpk = Cryptographer.recover_pk(subscription_receipt, r.json.get("subscription_signature")) assert Cryptographer.get_compressed_pk(rpk) == teos_id
def test_fix_cache_bitcoind_crash(block_processor): # A real BlockProcessor is required to test blocking functionality, since the mock does not implement that stuff locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE")) run_test_blocking_command_bitcoind_crash( block_processor.bitcoind_reachable, lambda: locator_cache.fix(block_processor.get_best_block_hash(), block_processor), )
def test_locator_cache_is_full(): # Empty cache locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE")) for _ in range(locator_cache.cache_size): locator_cache.blocks[uuid4().hex] = 0 assert not locator_cache.is_full() locator_cache.blocks[uuid4().hex] = 0 assert locator_cache.is_full()
def test_locator_cache_init(block_processor): locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE")) # Generate enough blocks so the cache can start full generate_blocks(2 * locator_cache.cache_size) locator_cache.init(block_processor.get_best_block_hash(), block_processor) assert len(locator_cache.blocks) == locator_cache.cache_size for k, v in locator_cache.blocks.items(): assert block_processor.get_block(k)
def test_cache_get_txid(): # Not much to test here, this is shadowing dict.get locator = get_random_value_hex(16) txid = get_random_value_hex(32) locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE")) locator_cache.cache[locator] = txid assert locator_cache.get_txid(locator) == txid # A random locator should fail assert locator_cache.get_txid(get_random_value_hex(16)) is None
def teosd(run_bitcoind): teosd_process, teos_id = run_teosd() yield teosd_process, teos_id # FIXME: This is not ideal, but for some reason stop raises socket being closed on the first try here. stopped = False while not stopped: try: rpc_client = RPCClient(config.get("RPC_BIND"), config.get("RPC_PORT")) rpc_client.stop() stopped = True except RpcError: print("failed") pass teosd_process.join() shutil.rmtree(".teos") # FIXME: wait some time, otherwise it might fail when multiple e2e tests are ran in the same session. Not sure why. sleep(1)
def internal_api(gatekeeper_mock, carrier_mock): db_manager = DBManagerMock() responder = Responder(db_manager, gatekeeper_mock, carrier_mock, gatekeeper_mock.block_processor) watcher = Watcher( db_manager, gatekeeper_mock, gatekeeper_mock.block_processor, responder, teos_sk, MAX_APPOINTMENTS, config.get("LOCATOR_CACHE_SIZE"), ) i_api = InternalAPI(watcher, internal_api_endpoint, config.get("INTERNAL_API_WORKERS"), Event()) i_api.rpc_server.start() yield i_api i_api.rpc_server.stop(None)
def watcher(dbm_mock, gatekeeper_mock, responder_mock, block_processor_mock): watcher = Watcher( dbm_mock, gatekeeper_mock, block_processor_mock, responder_mock, signing_key, MAX_APPOINTMENTS, config.get("LOCATOR_CACHE_SIZE"), ) return watcher
def run_teosd(): sk_file_path = os.path.join(config.get("DATA_DIR"), "teos_sk.der") if not os.path.exists(sk_file_path): # Generating teos sk so we can return the teos_id teos_sk = Cryptographer.generate_key() Cryptographer.save_key_file(teos_sk.to_der(), "teos_sk", config.get("DATA_DIR")) else: teos_sk = Cryptographer.load_private_key_der(Cryptographer.load_key_file(sk_file_path)) teos_id = Cryptographer.get_compressed_pk(teos_sk.public_key) # Change the default WSGI for Windows if os.name == "nt": config["WSGI"] = "waitress" teosd_process = Process(target=main, kwargs={"config": config}) teosd_process.start() # Give it some time to bootstrap # TODO: we should do better synchronization using an Event sleep(3) return teosd_process, teos_id
def test_locator_cache_init_not_enough_blocks(block_processor): locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE")) # Make sure there are at least 3 blocks block_count = block_processor.get_block_count() if block_count < 3: generate_blocks(3 - block_count) # Simulate there are only 3 blocks third_block_hash = bitcoin_cli.getblockhash(2) locator_cache.init(third_block_hash, block_processor) assert len(locator_cache.blocks) == 3 for k, v in locator_cache.blocks.items(): assert block_processor.get_block(k)
def test_locator_cache_is_full(): # is_full should return whether the cache is full or not. # Create an empty cache locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE")) # Fill it one by one and check it is not full for _ in range(locator_cache.cache_size): locator_cache.blocks[uuid4().hex] = 0 assert not locator_cache.is_full() # Add one more block and check again, it should be full now locator_cache.blocks[uuid4().hex] = 0 assert locator_cache.is_full()
def test_register_top_up(internal_api, client): # Calling register more than once will give us SUBSCRIPTION_SLOTS * number_of_calls slots. # It will also refresh the expiry. temp_sk, tmp_pk = generate_keypair() tmp_user_id = Cryptographer.get_compressed_pk(tmp_pk) current_height = internal_api.watcher.block_processor.get_block_count() data = {"public_key": tmp_user_id} for i in range(10): r = client.post(register_endpoint, json=data) slots = r.json.get("available_slots") expiry = r.json.get("subscription_expiry") assert r.status_code == HTTP_OK assert r.json.get("public_key") == tmp_user_id assert slots == config.get("SUBSCRIPTION_SLOTS") * (i + 1) assert expiry == current_height + config.get("SUBSCRIPTION_DURATION") subscription_receipt = receipts.create_registration_receipt( tmp_user_id, slots, expiry) rpk = Cryptographer.recover_pk(subscription_receipt, r.json.get("subscription_signature")) assert Cryptographer.get_compressed_pk(rpk) == teos_id
def test_locator_cache_init(block_processor_mock, monkeypatch): locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE")) # Generate enough blocks so the cache can start full blocks = dict() mock_generate_blocks(locator_cache.cache_size, blocks, queue.Queue()) best_block_hash = list(blocks.keys())[-1] # Mock the interaction with the BlockProcessor based on the mocked blocks monkeypatch.setattr(block_processor_mock, "get_block", lambda x, blocking: blocks.get(x)) locator_cache.init(best_block_hash, block_processor_mock) assert len(locator_cache.blocks) == locator_cache.cache_size for k, v in locator_cache.blocks.items(): assert block_processor_mock.get_block(k, blocking=False)
def test_locator_cache_init_not_enough_blocks(block_processor_mock, monkeypatch): locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE")) # Mock generating 3 blocks blocks = dict() mock_generate_blocks(3, blocks, queue.Queue()) third_block_hash = list(blocks.keys())[2] # Mock the interaction with the BlockProcessor based on the mocked blocks monkeypatch.setattr(block_processor_mock, "get_block", lambda x, blocking: blocks.get(x)) locator_cache.init(third_block_hash, block_processor_mock) assert len(locator_cache.blocks) == 3 for k, v in locator_cache.blocks.items(): assert block_processor_mock.get_block(k, blocking=False)
def test_do_watch(watcher, temp_db_manager, generate_dummy_appointment): watcher.db_manager = temp_db_manager # We will wipe all the previous data and add 5 appointments appointments, locator_uuid_map, dispute_txs = create_appointments( generate_dummy_appointment, APPOINTMENTS) # Set the data into the Watcher and in the db watcher.locator_uuid_map = locator_uuid_map watcher.appointments = {} watcher.gatekeeper.registered_users = {} # Simulate a register (times out in 10 bocks) user_id = get_random_value_hex(16) watcher.gatekeeper.registered_users[user_id] = UserInfo( available_slots=100, subscription_expiry=watcher.block_processor.get_block_count() + 10) # Add the appointments for uuid, appointment in appointments.items(): watcher.appointments[uuid] = { "locator": appointment.locator, "user_id": user_id } # Assume the appointment only takes one slot watcher.gatekeeper.registered_users[user_id].appointments[uuid] = 1 watcher.db_manager.store_watcher_appointment(uuid, appointment.to_dict()) watcher.db_manager.create_append_locator_map(appointment.locator, uuid) do_watch_thread = Thread(target=watcher.do_watch, daemon=True) do_watch_thread.start() # Broadcast the first two for dispute_tx in dispute_txs[:2]: bitcoin_cli.sendrawtransaction(dispute_tx) # After generating a block, the appointment count should have been reduced by 2 (two breaches) generate_blocks_with_delay(1) assert len(watcher.appointments) == APPOINTMENTS - 2 # The rest of appointments will timeout after the subscription times-out (9 more blocks) + EXPIRY_DELTA generate_blocks_with_delay(9 + config.get("EXPIRY_DELTA")) assert len(watcher.appointments) == 0
def test_update_cache(): # Update should add data about a new block in the cache. If the cache is full, the oldest block is dropped. locator_cache = LocatorCache(config.get("LOCATOR_CACHE_SIZE")) block_hash = get_random_value_hex(32) txs = [get_random_value_hex(32) for _ in range(10)] locator_txid_map = {compute_locator(txid): txid for txid in txs} # Cache is empty assert block_hash not in locator_cache.blocks for locator in locator_txid_map.keys(): assert locator not in locator_cache.cache # The data has been added to the cache locator_cache.update(block_hash, locator_txid_map) assert block_hash in locator_cache.blocks for locator in locator_txid_map.keys(): assert locator in locator_cache.cache