def test_add_contact_with_full_replacement_cache(self): """ Ensures that if the replacement cache is full (length = k) then the oldest contact within the cache is replaced with the new contact that was just seen. """ parent_node_id = hex((2 ** 512) + 1)[2:] r = RoutingTable(parent_node_id) # Fill up the bucket and replacement cache for i in range(40): uri = 'netstring://192.168.0.%d:9999/' % i contact = PeerNode(PUBLIC_KEY, self.version, uri, 0) contact.network_id = hex(i) r.add_contact(contact) # Sanity check of the replacement cache. cache_key = (r._buckets[0].range_min, r._buckets[0].range_max) self.assertEqual(len(r._replacement_cache[cache_key]), 20) self.assertEqual(hex(20), r._replacement_cache[cache_key][0].network_id) # Create a new contact that will be added to the replacement cache. new_contact = PeerNode(PUBLIC_KEY, self.version, 'netstring://192.168.0.20:9999/', 0) new_contact.network_id = hex(40) r.add_contact(new_contact) self.assertEqual(len(r._replacement_cache[cache_key]), 20) self.assertEqual(new_contact, r._replacement_cache[cache_key][19]) self.assertEqual(hex(21), r._replacement_cache[cache_key][0].network_id)
def test_add_contact_with_existing_contact_in_replacement_cache(self): """ Ensures that if the contact to be put in the replacement cache already exists in the replacement cache then it is bumped to the most recent position. """ parent_node_id = hex((2 ** 512) + 1)[2:] r = RoutingTable(parent_node_id) # Fill up the bucket and replacement cache for i in range(40): uri = 'netstring://192.168.0.%d:9999/' % i contact = PeerNode(PUBLIC_KEY, self.version, uri, 0) contact.network_id = hex(i) r.add_contact(contact) # Sanity check of the replacement cache. cache_key = (r._buckets[0].range_min, r._buckets[0].range_max) self.assertEqual(len(r._replacement_cache[cache_key]), 20) self.assertEqual(hex(20), r._replacement_cache[cache_key][0].network_id) # Create a new contact that will be added to the replacement cache. new_contact = PeerNode(PUBLIC_KEY, self.version, 'netstring://192.168.0.41:9999/', 0) new_contact.network_id = hex(20) r.add_contact(new_contact) self.assertEqual(len(r._replacement_cache[cache_key]), 20) self.assertEqual(new_contact, r._replacement_cache[cache_key][19]) self.assertEqual(hex(21), r._replacement_cache[cache_key][0].network_id)
def test_find_close_nodes_in_correct_order(self): """ Ensures that the nearest nodes are returned in the correct order: from the node closest to the target key to the node furthest away. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) # Fill up the bucket and replacement cache for i in range(512): uri = 'netstring://192.168.0.%d:9999/' % i contact = PeerNode(PUBLIC_KEY, self.version, uri, 0) contact.network_id = hex(2 ** i) r.add_contact(contact) target_key = hex(2 ** 256) result = r.find_close_nodes(target_key) self.assertEqual(constants.K, len(result)) # Ensure results are in the correct order. def key(node): return distance(node.network_id, target_key) sorted_nodes = sorted(result, key=key) self.assertEqual(sorted_nodes, result) # Ensure the order is from lowest to highest in terms of distance distances = [distance(x.network_id, target_key) for x in result] self.assertEqual(sorted(distances), distances)
def setUp(self): """ Common vars. """ loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) self.event_loop = asyncio.get_event_loop() self.version = get_version() self.sender = mock.MagicMock() self.reply_port = 1908 self.node = Node(PUBLIC_KEY, PRIVATE_KEY, self.event_loop, self.sender, self.reply_port) self.target = TARGET self.seal = 'afakesealthatwillnotverify' node_list = [] remote_node_list = [] for i in range(100, 120): uri = 'netstring://192.168.0.%d:9999/' % i contact = PeerNode(ORDERED_HASHES[i], self.version, uri, 0) node_list.append(contact) remote_node_list.append((ORDERED_HASHES[i], self.version, uri)) self.nodes = tuple(sort_peer_nodes(node_list, self.target)) self.remote_nodes = tuple(remote_node_list) def side_effect(*args, **kwargs): return (str(uuid.uuid4()), asyncio.Future()) self.node.send_find = mock.MagicMock(side_effect=side_effect) self.contacts = [] node_list = [] for i in range(20): uri = 'netstring://192.168.0.%d:%d/' % (i, self.reply_port) contact = PeerNode(ORDERED_HASHES[i], self.version, uri, 0) self.node.routing_table.add_contact(contact) self.contacts.append((ORDERED_HASHES[i], self.version, uri))
def test_eq_other_peer(self): """ Ensure equality works between two PeerNode instances. """ uri = 'netstring://192.168.0.1:9999' version = get_version() last_seen = 123 contact1 = PeerNode(PUBLIC_KEY, version, uri, last_seen) contact2 = PeerNode(PUBLIC_KEY, version, uri, last_seen) self.assertTrue(contact1 == contact2)
def test_add_contact_with_parent_node_id(self): """ If the newly discovered contact is, in fact, this node then it's not added to the routing table. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) contact = PeerNode(PUBLIC_KEY, '192.168.0.1', 9999, 0) contact.network_id = parent_node_id r.add_contact(contact) self.assertEqual(len(r._buckets[0]), 0)
def test_get_contact(self): """ Ensures that the correct contact is returned. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) contact1 = PeerNode(PUBLIC_KEY, self.version, 'netstring://192.168.0.1:9999/', 0) contact1.network_id = 'a' r.add_contact(contact1) result = r.get_contact('a') self.assertEqual(contact1, result)
def test_dump(self): """ Ensure the expected dictionary object is returned from a call to the instance's dump method (used for backing up the routing table). """ uri = 'netstring://192.168.0.1:9999' version = get_version() contact = PeerNode(PUBLIC_KEY, version, uri) result = contact.dump() self.assertEqual(result['public_key'], PUBLIC_KEY) self.assertEqual(result['version'], version) self.assertEqual(result['uri'], uri) self.assertEqual(3, len(result))
def test_sort_peer_nodes_no_longer_than_k(self): """ Ensure that no more than constants.K contacts are returned from the sort_peer_nodes function despite a longer list being passed in. """ contacts = [] for i in range(512): uri = 'netstring://192.168.0.%d:9999' % i contact = PeerNode(str(i), self.version, uri, 0) contact.network_id = hex(2**i) contacts.append(contact) target_key = hex(2**256) result = sort_peer_nodes(contacts, target_key) self.assertEqual(constants.K, len(result))
def test_add_contact_to_full_bucket(self): """ Ensures that if one attempts to add a contact to a bucket whose size is greater than the constant K, then the BucketFull exception is raised. """ range_min = 12345 range_max = 98765 bucket = Bucket(range_min, range_max) for i in range(K): contact = PeerNode("%d" % i, "192.168.0.%d" % i, 9999, 123) bucket.add_contact(contact) with self.assertRaises(BucketFull): contact_too_many = PeerNode("12345", "192.168.0.2", 8888, 123) bucket.add_contact(contact_too_many)
def test_find_close_nodes_single_bucket(self): """ Ensures K number of closest nodes get returned. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) # Fill up the bucket and replacement cache for i in range(40): uri = 'netstring://192.168.0.%d:9999/' % i contact = PeerNode(PUBLIC_KEY, self.version, uri, 0) contact.network_id = hex(i) r.add_contact(contact) result = r.find_close_nodes(hex(1)) self.assertEqual(constants.K, len(result))
def test_find_close_nodes_fewer_than_K(self): """ Ensures that all close nodes are returned if their number is < K. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) # Fill up the bucket and replacement cache for i in range(10): uri = 'netstring://192.168.0.%d:9999/' % i contact = PeerNode(PUBLIC_KEY, self.version, uri, 0) contact.network_id = hex(i) r.add_contact(contact) result = r.find_close_nodes(hex(1)) self.assertEqual(10, len(result))
def test_sort_peer_nodes_no_longer_than_k(self): """ Ensure that no more than constants.K contacts are returned from the sort_peer_nodes function despite a longer list being passed in. """ contacts = [] for i in range(512): uri = 'netstring://192.168.0.%d:9999' % i contact = PeerNode(str(i), self.version, uri, 0) contact.network_id = hex(2 ** i) contacts.append(contact) target_key = hex(2 ** 256) result = sort_peer_nodes(contacts, target_key) self.assertEqual(constants.K, len(result))
def test_add_contact_simple(self): """ Ensures that a newly discovered node in the network is added to the correct bucket in the routing table. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) contact1 = PeerNode(PUBLIC_KEY, '192.168.0.1', 9999, 0) contact1.network_id = hex(2) contact2 = PeerNode(PUBLIC_KEY, '192.168.0.2', 9999, 0) contact2.network_id = hex(4) r.add_contact(contact1) self.assertEqual(len(r._buckets[0]), 1) r.add_contact(contact2) self.assertEqual(len(r._buckets[0]), 2)
def test_find_close_nodes_exclude_contact(self): """ Ensure that nearest nodes are returned except for the specified excluded node. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) # Fill up the bucket and replacement cache for i in range(20): uri = 'netstring://192.168.0.%d:9999/' % i contact = PeerNode(PUBLIC_KEY, self.version, uri, 0) contact.network_id = hex(i) r.add_contact(contact) result = r.find_close_nodes(hex(1), excluded_id=contact.network_id) self.assertEqual(constants.K - 1, len(result))
def test_get_contact(self): """ Ensures it is possible to get a contact from the k-bucket with a valid id. """ range_min = 12345 range_max = 98765 bucket = Bucket(range_min, range_max) for i in range(K): contact = PeerNode(PUBLIC_KEY, "192.168.0.%d" % i, 9999, 123) contact.network_id = hex(i) bucket.add_contact(contact) for i in range(K): self.assertTrue(bucket.get_contact(hex(i)), "Could not get contact with id %d" % i)
def test_find_close_nodes_multiple_buckets(self): """ Ensures that nodes are returned from neighbouring k-buckets if the k-bucket containing the referenced ID doesn't contain K entries. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) # Fill up the bucket and replacement cache for i in range(512): uri = 'netstring://192.168.0.%d:9999/' % i contact = PeerNode(PUBLIC_KEY, self.version, uri, 0) contact.network_id = hex(2 ** i) r.add_contact(contact) result = r.find_close_nodes(hex(2 ** 256)) self.assertEqual(constants.K, len(result))
def test_send_to_new_contact_successful_connection(self): """ Send a message to a new contact causes a new connection to be made whose associated protocol object is cached for later use. """ nc = NetstringConnector(self.event_loop) contact = PeerNode(PUBLIC_KEY, self.version, 'netstring://192.168.0.1:1908') msg = OK('uuid', 'recipient', 'sender', 9999, 'version', 'seal') protocol = mock.MagicMock() protocol.send_string = mock.MagicMock() sender = Node(PUBLIC_KEY, PRIVATE_KEY, self.event_loop, nc, 1908) @asyncio.coroutine def faux_connect(protocol=protocol): return ('foo', protocol) with mock.patch.object(self.event_loop, 'create_connection', return_value=faux_connect()): result = nc.send(contact, msg, sender) self.event_loop.run_until_complete(result) self.assertEqual(1, protocol.send_string.call_count) self.assertTrue(result.done()) self.assertEqual(True, result.result()) self.assertIn(contact.network_id, nc._connections) self.assertEqual(nc._connections[contact.network_id], protocol) expected = to_dict(msg) actual = json.loads(protocol.send_string.call_args[0][0]) self.assertEqual(expected, actual)
def test_remove_contact(self): """ Ensures it is possible to remove a contact with a certain ID from the k-bucket. """ range_min = 12345 range_max = 98765 bucket = Bucket(range_min, range_max) for i in range(K): contact = PeerNode(PUBLIC_KEY, "192.168.0.%d" % i, 9999, 123) contact.network_id = hex(i) bucket.add_contact(contact) for i in range(K): bucket.remove_contact(hex(i)) self.assertFalse(hex(i) in bucket._contacts, "Could not remove contact with id %s" % hex(i))
def test_send_to_new_contact_failed_to_connect(self): """ Sending a message to a new but unreachable contact results in the resulting deferred to be resolved with the expected exception. """ nc = NetstringConnector(self.event_loop) contact = PeerNode(PUBLIC_KEY, self.version, 'netstring://192.168.0.1:1908') msg = OK('uuid', 'recipient', 'sender', 9999, 'version', 'seal') protocol = mock.MagicMock() def side_effect(*args, **kwargs): raise ValueError() protocol.send_string = mock.MagicMock(side_effect=side_effect) sender = Node(PUBLIC_KEY, PRIVATE_KEY, self.event_loop, nc, 1908) @asyncio.coroutine def faux_connect(protocol=protocol): return ('foo', protocol) with mock.patch.object(self.event_loop, 'create_connection', return_value=faux_connect()): result = nc.send(contact, msg, sender) with self.assertRaises(ValueError) as ex: self.event_loop.run_until_complete(result) self.assertEqual(1, protocol.send_string.call_count) self.assertTrue(result.done()) self.assertEqual(ex.exception, result.exception()) self.assertNotIn(contact.network_id, nc._connections)
def test_add_contact_with_blacklisted_contact(self): """ If the newly discovered contact is, in fact, already in the local node's blacklist then ensure it doesn't get re-added. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) contact1 = PeerNode(PUBLIC_KEY, '192.168.0.1', 9999, 0) contact1.network_id = hex(2) contact2 = PeerNode(BAD_PUBLIC_KEY, '192.168.0.2', 9999, 0) contact2.network_id = hex(4) r.blacklist(contact2) r.add_contact(contact1) self.assertEqual(len(r._buckets[0]), 1) r.add_contact(contact2) self.assertEqual(len(r._buckets[0]), 1)
def test_hash(self): """ Ensure the hash for the object is correct. """ uri = 'netstring://192.168.0.1:9999' contact = PeerNode(PUBLIC_KEY, get_version(), uri, 0) expected = hash(sha512(PUBLIC_KEY.encode('ascii')).hexdigest()) self.assertEqual(expected, hash(contact))
def test_remove_contact(self): """ Ensures it is possible to remove a contact with a certain ID from the k-bucket. """ range_min = 12345 range_max = 98765 bucket = Bucket(range_min, range_max) for i in range(K): contact = PeerNode(PUBLIC_KEY, "192.168.0.%d" % i, 9999, 123) contact.network_id = hex(i) bucket.add_contact(contact) for i in range(K): bucket.remove_contact(hex(i)) self.assertFalse( hex(i) in bucket._contacts, "Could not remove contact with id %s" % hex(i))
def test_remove_contact_with_unknown_contact(self): """ Ensures that attempting to remove a non-existent contact results in no change. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) contact1 = PeerNode(PUBLIC_KEY, self.version, 'netstring://192.168.0.1:9999/', 0) contact1.network_id = 'a' r.add_contact(contact1) # Sanity check self.assertEqual(len(r._buckets[0]), 1) result = r.remove_contact('b') self.assertEqual(None, result) self.assertEqual(len(r._buckets[0]), 1) self.assertEqual(contact1, r._buckets[0]._contacts[0])
def test_remove_contact_with_cached_replacement(self): """ Ensures that the removed contact is replaced by the most up-to-date contact in the affected k-bucket's cache. """ parent_node_id = hex((2 ** 512) + 1)[2:] r = RoutingTable(parent_node_id) cache_key = (r._buckets[0].range_min, r._buckets[0].range_max) contact1 = PeerNode(PUBLIC_KEY, self.version, 'netstring://192.168.0.1:9999/', 0) contact2 = PeerNode(BAD_PUBLIC_KEY, self.version, 'netstring://192.168.0.1:9999/', 0) r.add_contact(contact1) r.add_contact(contact2) contact2.failed_RPCs = constants.ALLOWED_RPC_FAILS # Add something into the cache. contact3 = PeerNode(PUBLIC_KEY + 'foo', self.version, 'netstring://192.168.0.1:9999/', 0) contact3.network_id = '3' r._replacement_cache[cache_key] = [contact3, ] # Sanity check self.assertEqual(len(r._buckets[0]), 2) self.assertEqual(len(r._replacement_cache[cache_key]), 1) r.remove_contact(BAD_PUBLIC_KEY) self.assertEqual(len(r._buckets[0]), 2) self.assertEqual(contact1, r._buckets[0]._contacts[0]) self.assertEqual(contact3, r._buckets[0]._contacts[1]) self.assertEqual(len(r._replacement_cache[cache_key]), 0)
def test_ne(self): """ Makes sure non-equality works between a string representation of an ID and a PeerNode object. """ uri = 'netstring://192.168.0.1:9999' version = get_version() last_seen = 123 contact = PeerNode(PUBLIC_KEY, version, uri, last_seen) self.assertTrue('54321' != contact)
def test_remove_contact_with_not_enough_RPC_but_forced(self): """ Ensures that the contact is removed despite it's failedRPCs counter being less than constants.ALLOWED_RPC_FAILS because the 'forced' flag is used. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) contact1 = PeerNode(PUBLIC_KEY, self.version, 'netstring://192.168.0.1:9999/', 0) contact2 = PeerNode(BAD_PUBLIC_KEY, self.version, 'netstring://192.168.0.1:9999/', 0) r.add_contact(contact1) r.add_contact(contact2) # Sanity check self.assertEqual(len(r._buckets[0]), 2) r.remove_contact(BAD_PUBLIC_KEY, forced=True) self.assertEqual(len(r._buckets[0]), 1)
def test_eq_wrong_type(self): """ Ensure equality returns false if comparing a PeerNode with some other type of object. """ uri = 'netstring://192.168.0.1:9999' version = get_version() last_seen = 123 contact = PeerNode(PUBLIC_KEY, version, uri, last_seen) self.assertFalse(12345 == contact)
def test_len(self): """ Ensures the number of nodes in the k-bucket is returned by __len__. """ range_min = 12345 range_max = 98765 bucket = Bucket(range_min, range_max) contact = PeerNode("12345", "192.168.0.2", 8888, 123) bucket.add_contact(contact) self.assertEqual(1, len(bucket))
def test_repr(self): """ Ensure the repr for the object is something useful. """ network_id = sha512(PUBLIC_KEY.encode('ascii')).hexdigest() uri = 'netstring://192.168.0.1:9999' version = get_version() last_seen = 123 contact = PeerNode(PUBLIC_KEY, version, uri, last_seen) expected = str((network_id, PUBLIC_KEY, version, uri, last_seen, 0)) self.assertEqual(expected, repr(contact))
def test_bucket_index_out_of_range(self): """ If the requested id is not within the range of the keyspace then a ValueError should be raised. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) # Populate the routing table with contacts. for i in range(512): uri = 'netstring://192.168.0.%d:9999/' % i contact = PeerNode(PUBLIC_KEY, self.version, uri, 0) contact.network_id = hex(2 ** i) r.add_contact(contact) with self.assertRaises(ValueError): # Incoming id that's too small. r.find_close_nodes('-1') with self.assertRaises(ValueError): # Incoming id that's too big big_id = hex(2 ** 512)[2:] r.find_close_nodes(big_id)
def test_eq(self): """ Makes sure equality works between a string representation of an ID and a PeerNode object. """ network_id = sha512(PUBLIC_KEY.encode('ascii')).hexdigest() version = get_version() uri = 'netstring://192.168.0.1:9999' last_seen = 123 contact = PeerNode(PUBLIC_KEY, version, uri, last_seen) self.assertTrue(network_id == contact)
def test_remove_contact_with_not_enough_RPC_fails(self): """ Ensures that the contact is not removed if it's failedRPCs counter is less than constants.ALLOWED_RPC_FAILS """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) contact1 = PeerNode(PUBLIC_KEY, self.version, 'netstring://192.168.0.1:9999/', 0) contact1.network_id = 'a' contact2 = PeerNode(PUBLIC_KEY, self.version, 'netstring://192.168.0.1:9999/', 0) contact2.network_id = 'b' r.add_contact(contact1) r.add_contact(contact2) # Sanity check self.assertEqual(len(r._buckets[0]), 2) r.remove_contact('b') self.assertEqual(len(r._buckets[0]), 2)
def test_get_contact_does_not_exist(self): """ Ensures that a ValueError is returned if the referenced contact does not exist in the routing table. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) contact1 = PeerNode(PUBLIC_KEY, self.version, 'netstring://192.168.0.1:9999/', 0) r.add_contact(contact1) self.assertRaises(ValueError, r.get_contact, 'b')
def test_remove_contact(self): """ Ensures that a contact is removed, given that it's failedRPCs counter exceeds or is equal to constants.ALLOWED_RPC_FAILS """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) contact1 = PeerNode(PUBLIC_KEY, self.version, 'netstring://192.168.0.1:9999/', 0) contact2 = PeerNode(BAD_PUBLIC_KEY, self.version, 'netstring://192.168.0.1:9999/', 0) r.add_contact(contact1) # contact2 will have the wrong number of failedRPCs r.add_contact(contact2) contact2.failed_RPCs = constants.ALLOWED_RPC_FAILS # Sanity check self.assertEqual(len(r._buckets[0]), 2) r.remove_contact(BAD_PUBLIC_KEY) self.assertEqual(len(r._buckets[0]), 1) self.assertEqual(contact1, r._buckets[0]._contacts[0])
def test_add_contact_with_bucket_split(self): """ Ensures that newly discovered nodes are added to the appropriate bucket given a bucket split. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) for i in range(20): uri = 'netstring://192.168.0.%d:9999/' % i contact = PeerNode(PUBLIC_KEY, self.version, uri, 0) contact.network_id = hex(i) r.add_contact(contact) # This id will be just over the max range for the bucket in position 0 contact = PeerNode(PUBLIC_KEY, self.version, 'netstring://192.168.0.20:9999/', 0) large_id = int(((2 ** 512) / 2) + 1) contact.network_id = hex(large_id) r.add_contact(contact) self.assertEqual(len(r._buckets), 2) self.assertEqual(len(r._buckets[0]), 20) self.assertEqual(len(r._buckets[1]), 1)
def test_add_existing_contact(self): """ Ensures that if an existing contact is re-added to the kbucket it is simply moved to the end of the _contacts list (as specified in the original Kademlia paper) signifying that it is the most recently seen contact within this bucket. """ range_min = 12345 range_max = 98765 bucket = Bucket(range_min, range_max) contact1 = PeerNode("1", "192.168.0.1", 9999, 123) bucket.add_contact(contact1) contact2 = PeerNode("2", "192.168.0.2", 8888, 123) bucket.add_contact(contact2) bucket.add_contact(contact1) # There should still only be two contacts in the bucket. self.assertEqual(2, len(bucket._contacts), "Too many contacts in the k-bucket.") # The end contact should be the most recently added contact. self.assertEqual(contact1, bucket._contacts[-1:][0], "The expected most recent contact is wrong.")
def test_blacklist_public_key(self): """ Ensure that a contact is removed from the routing table and blacklist given a matching public_key. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) contact = PeerNode(PUBLIC_KEY, '192.168.0.1', 9999, 0) r.remove_contact = MagicMock() r._blacklist_public_key(PUBLIC_KEY) r.remove_contact.called_once_with(contact, True) self.assertIn(contact.public_key, r._blacklist)
def test_add_new_contact(self): """ Ensures that a new contact, when added to the kbucket is appended to the end of the _contacts list (as specified in the original Kademlia paper) signifying that it is the most recently seen contact within this bucket. """ range_min = 12345 range_max = 98765 bucket = Bucket(range_min, range_max) contact1 = PeerNode(PUBLIC_KEY, "192.168.0.1", 9999, 123) contact1.network_id = hex(1) bucket.add_contact(contact1) self.assertEqual(1, len(bucket._contacts), "Single contact not added to k-bucket.") contact2 = PeerNode(PUBLIC_KEY, "192.168.0.2", 8888, 123) contact2.network_id = hex(2) bucket.add_contact(contact2) self.assertEqual(2, len(bucket._contacts), "K-bucket's contact list not the expected length.") self.assertEqual(contact2, bucket._contacts[-1:][0], "K-bucket's most recent (last) contact wrong.")
def test_add_contact_with_bucket_full(self): """ Checks if a bucket is full and a new contact within the full bucket's range is added then it gets put in the replacement cache. """ parent_node_id = hex((2 ** 512) + 1)[2:] r = RoutingTable(parent_node_id) # Fill up the bucket for i in range(20): uri = 'netstring://192.168.0.%d:9999/' % i contact = PeerNode(PUBLIC_KEY, self.version, uri, 0) contact.network_id = hex(i) r.add_contact(contact) # Create a new contact that will be added to the replacement cache. contact = PeerNode(PUBLIC_KEY, self.version, 'netstring://192.168.0.20:9999/', 0) contact.network_id = hex(20) r.add_contact(contact) cache_key = (r._buckets[0].range_min, r._buckets[0].range_max) self.assertTrue(cache_key in r._replacement_cache) self.assertEqual(len(r._buckets[0]), 20) self.assertEqual(contact, r._replacement_cache[cache_key][0])
def test_sort_peer_nodes(self): """ Ensures that the sort_peer_nodes function returns the list ordered in such a way that the contacts closest to the target key are at the head of the list. """ contacts = [] for i in range(512): uri = 'netstring://192.168.0.%d:9999' % i contact = PeerNode(str(i), self.version, uri, 0) contact.network_id = hex(2 ** i) contacts.append(contact) target_key = hex(2 ** 256) result = sort_peer_nodes(contacts, target_key) # Ensure results are in the correct order. def key(node): return distance(node.network_id, target_key) sorted_nodes = sorted(result, key=key) self.assertEqual(sorted_nodes, result) # Ensure the order is from lowest to highest in terms of distance distances = [distance(x.network_id, target_key) for x in result] self.assertEqual(sorted(distances), distances)
def test_split_bucket(self): """ Ensures that the correct bucket is split in two and that the contacts are found in the right place. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) bucket = Bucket(0, 100) contact1 = PeerNode(PUBLIC_KEY, '192.168.0.1', 9999, 0) contact1.network_id = hex(20) bucket.add_contact(contact1) contact2 = PeerNode(PUBLIC_KEY, '192.168.0.2', 8888, 0) contact2.network_id = hex(40) bucket.add_contact(contact2) contact3 = PeerNode(PUBLIC_KEY, '192.168.0.3', 8888, 0) contact3.network_id = hex(60) bucket.add_contact(contact3) contact4 = PeerNode(PUBLIC_KEY, '192.168.0.4', 8888, 0) contact4.network_id = hex(80) bucket.add_contact(contact4) r._buckets[0] = bucket # Sanity check self.assertEqual(1, len(r._buckets)) r._split_bucket(0) # Two buckets! self.assertEqual(2, len(r._buckets)) bucket1 = r._buckets[0] bucket2 = r._buckets[1] # Ensure the right number of contacts are in each bucket in the correct # order (most recently added at the head of the list). self.assertEqual(2, len(bucket1._contacts)) self.assertEqual(2, len(bucket2._contacts)) self.assertEqual(contact1, bucket1._contacts[0]) self.assertEqual(contact2, bucket1._contacts[1]) self.assertEqual(contact3, bucket2._contacts[0]) self.assertEqual(contact4, bucket2._contacts[1]) # Split the new bucket again, ensuring that only the target bucket is # modified. r._split_bucket(1) self.assertEqual(3, len(r._buckets)) bucket3 = r._buckets[2] # bucket1 remains un-changed self.assertEqual(2, len(bucket1._contacts)) # bucket2 only contains the lower half of its original contacts. self.assertEqual(1, len(bucket2._contacts)) self.assertEqual(contact3, bucket2._contacts[0]) # bucket3 now contains the upper half of the original contacts. self.assertEqual(1, len(bucket3._contacts)) self.assertEqual(contact4, bucket3._contacts[0]) # Split the bucket at position 0 and ensure the resulting buckets are # in the correct position with the correct content. r._split_bucket(0) self.assertEqual(4, len(r._buckets)) bucket1, bucket2, bucket3, bucket4 = r._buckets self.assertEqual(1, len(bucket1._contacts)) self.assertEqual(contact1, bucket1._contacts[0]) self.assertEqual(1, len(bucket2._contacts)) self.assertEqual(contact2, bucket2._contacts[0]) self.assertEqual(1, len(bucket3._contacts)) self.assertEqual(contact3, bucket3._contacts[0]) self.assertEqual(1, len(bucket4._contacts)) self.assertEqual(contact4, bucket4._contacts[0])
def test_split_bucket_cache_update(self): """ Ensures that if there are cached contacts for the split bucket then the two new buckets are topped up, the old cache is removed and two new caches created (one for each of the new buckets). """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) bucket = Bucket(0, 100) contact1 = PeerNode(PUBLIC_KEY, '192.168.0.1', 9999, 0) contact1.network_id = hex(20) bucket.add_contact(contact1) contact2 = PeerNode(PUBLIC_KEY, '192.168.0.2', 8888, 0) contact2.network_id = hex(40) bucket.add_contact(contact2) contact3 = PeerNode(PUBLIC_KEY, '192.168.0.3', 8888, 0) contact3.network_id = hex(60) bucket.add_contact(contact3) contact4 = PeerNode(PUBLIC_KEY, '192.168.0.4', 8888, 0) contact4.network_id = hex(80) bucket.add_contact(contact4) r._buckets[0] = bucket # Add two items to the cache. cache = [] cache_contact1 = PeerNode(PUBLIC_KEY, '192.168.0.5', 8888, 0) cache_contact1.network_id = hex(10) cache.append(cache_contact1) cache_contact2 = PeerNode(PUBLIC_KEY, '192.168.0.6', 8888, 0) cache_contact2.network_id = hex(70) cache.append(cache_contact2) r._replacement_cache = { (0, 100): cache } # Two buckets! r._split_bucket(0) self.assertEqual(2, len(r._buckets)) bucket1 = r._buckets[0] bucket2 = r._buckets[1] # Ensure the right number of contacts are in each bucket in the correct # order (most recently added at the head of the list). self.assertEqual(3, len(bucket1._contacts)) self.assertEqual(3, len(bucket2._contacts)) self.assertEqual(contact1, bucket1._contacts[0]) self.assertEqual(contact2, bucket1._contacts[1]) self.assertEqual(cache_contact1, bucket1._contacts[2]) self.assertEqual(contact3, bucket2._contacts[0]) self.assertEqual(contact4, bucket2._contacts[1]) self.assertEqual(cache_contact2, bucket2._contacts[2]) # Ensure the _replacement_cache is in the expected state. self.assertEqual(2, len(r._replacement_cache)) self.assertNotIn((0, 100), r._replacement_cache) self.assertIn((0, 50), r._replacement_cache) self.assertIn((50, 100), r._replacement_cache)
def test_split_bucket_cache_too_full(self): """ If the split occurs and there are too many contacts and cached contacts for a new bucket, the remainder or cached contacts are added to the cache for the new bucket. """ parent_node_id = 'deadbeef' r = RoutingTable(parent_node_id) bucket = Bucket(0, 100) low_contacts = [] high_contacts = [] for i in range(10): contact = PeerNode(PUBLIC_KEY, '192.168.0.1', 9999, 0) contact.network_id = hex(i) bucket.add_contact(contact) low_contacts.append(contact) for i in range(50, 60): contact = PeerNode(PUBLIC_KEY, '192.168.0.1', 9999, 0) contact.network_id = hex(i) bucket.add_contact(contact) high_contacts.append(contact) r._buckets[0] = bucket # Add items to the cache. cache = [] low_cache = [] high_cache = [] for i in range(10, 30): contact = PeerNode(PUBLIC_KEY, '192.168.0.1', 9999, 0) contact.network_id = hex(i) cache.append(contact) low_cache.append(contact) for i in range(60, 80): contact = PeerNode(PUBLIC_KEY, '192.168.0.1', 9999, 0) contact.network_id = hex(i) cache.append(contact) high_cache.append(contact) r._replacement_cache = { (0, 100): cache } # Two buckets! r._split_bucket(0) self.assertEqual(2, len(r._buckets)) bucket1 = r._buckets[0] bucket2 = r._buckets[1] # Ensure the right number of contacts are in each bucket in the correct # order (most recently added at the head of the list). self.assertEqual(20, len(bucket1._contacts)) self.assertEqual(20, len(bucket2._contacts)) for i in range(10): self.assertEqual(low_contacts[i], bucket1._contacts[i]) self.assertEqual(low_cache[i], bucket1._contacts[i + 10]) for i in range(10): self.assertEqual(high_contacts[i], bucket2._contacts[i]) self.assertEqual(high_cache[i], bucket2._contacts[i + 10]) # Ensure the _replacement_cache is in the expected state. self.assertEqual(2, len(r._replacement_cache)) self.assertNotIn((0, 100), r._replacement_cache) self.assertIn((0, 50), r._replacement_cache) self.assertIn((50, 100), r._replacement_cache) self.assertEqual(10, len(r._replacement_cache[(0, 50)])) self.assertEqual(10, len(r._replacement_cache[(50, 100)])) for i in range(10): self.assertEqual(low_cache[i + 10], r._replacement_cache[(0, 50)][i]) self.assertEqual(high_cache[i + 10], r._replacement_cache[(50, 100)][i])