def test_weighting_behaviour_full_usage(self): """ Test that when all slots are used up that the allocation weighting goes to zero everywhere. """ usage_vector = np.full((5, 9), 1).astype(np.float32) weighting = AllocationAdressing.weighting( tf.convert_to_tensor(usage_vector)) expected_weighting = np.full((5, 9), 0).astype(np.float32) self.assertAllEqual(weighting, expected_weighting)
def test_weighting_behaviour_minmax(self): """ Test that we allocate based on the inverse of the usage vector. """ usage_vector = tf.convert_to_tensor( np.random.uniform(0, 1, (8, 13)).astype(np.float32)) weighting = AllocationAdressing.weighting(usage_vector) # we require that max usage gets the min weighting self.assertAllEqual(tf.math.argmax(usage_vector, axis=1), tf.math.argmin(weighting, axis=1)) # we require that min usage gets the max weighting self.assertAllEqual(tf.math.argmin(usage_vector, axis=1), tf.math.argmax(weighting, axis=1))
def test_weighting_calculation(self): """ Test that the vectorized implementation is correct and that the calculation forces the weighting to sum to one. """ usage_vector = np.random.uniform(0, 1, (3, 13)).astype(np.float32) weighting = AllocationAdressing.weighting( tf.convert_to_tensor(usage_vector)) free_list = np.argsort(usage_vector, axis=1) expected_weighting = np.zeros((3, 13)).astype(np.float32) free_list_indicies = [(x, y) for x in range(3) for y in range(13)] for b, j in free_list_indicies: prod = np.prod( [usage_vector[b, free_list[b, i]] for i in range(j)]) free_list_entry = free_list[b, j] expected_weighting[b, free_list_entry] = ( 1 - usage_vector[b, free_list_entry]) * prod self.assertEqual(weighting.shape, (3, 13)) self.assertAllClose(weighting, tf.convert_to_tensor(expected_weighting)) self.assertAllClose(tf.reduce_sum(weighting, axis=1), np.ones(3), 1e-2)