예제 #1
0
 def test_weighting_behaviour_full_usage(self):
     """
     Test that when all slots are used up that the allocation weighting
     goes to zero everywhere.
     """
     usage_vector = np.full((5, 9), 1).astype(np.float32)
     weighting = AllocationAdressing.weighting(
         tf.convert_to_tensor(usage_vector))
     expected_weighting = np.full((5, 9), 0).astype(np.float32)
     self.assertAllEqual(weighting, expected_weighting)
예제 #2
0
    def test_weighting_behaviour_minmax(self):
        """
        Test that we allocate based on the inverse of the usage vector.
        """
        usage_vector = tf.convert_to_tensor(
            np.random.uniform(0, 1, (8, 13)).astype(np.float32))
        weighting = AllocationAdressing.weighting(usage_vector)

        # we require that max usage gets the min weighting
        self.assertAllEqual(tf.math.argmax(usage_vector, axis=1),
                            tf.math.argmin(weighting, axis=1))
        # we require that min usage gets the max weighting
        self.assertAllEqual(tf.math.argmin(usage_vector, axis=1),
                            tf.math.argmax(weighting, axis=1))
예제 #3
0
    def test_update_usage_vector(self):
        free_gates = np.random.uniform(0, 1, (6, 5)).astype(np.float32)
        prev_read_weightings = DNCMemoryTests.softmax_sample((6, 3, 5))
        prev_write_weighting = DNCMemoryTests.softmax_sample((6, 3))
        prev_usage_vector = np.random.uniform(0, 1, (6, 3)).astype(np.float32)

        retention = np.product(
            1 - np.expand_dims(free_gates, 1) * prev_read_weightings, axis=2)
        expected_usage_vector = (
            prev_usage_vector + prev_write_weighting -
            prev_usage_vector * prev_write_weighting) * retention

        new_usage_vector = AllocationAdressing.update_usage_vector(
            free_gates,
            tf.convert_to_tensor(prev_read_weightings),
            tf.convert_to_tensor(prev_write_weighting),
            tf.convert_to_tensor(prev_usage_vector),
        )

        self.assertEqual(new_usage_vector.shape, (6, 3))
        self.assertAllClose(new_usage_vector, expected_usage_vector)
예제 #4
0
    def test_weighting_calculation(self):
        """
        Test that the vectorized implementation is correct and that
        the calculation forces the weighting to sum to one.
        """
        usage_vector = np.random.uniform(0, 1, (3, 13)).astype(np.float32)
        weighting = AllocationAdressing.weighting(
            tf.convert_to_tensor(usage_vector))

        free_list = np.argsort(usage_vector, axis=1)
        expected_weighting = np.zeros((3, 13)).astype(np.float32)
        free_list_indicies = [(x, y) for x in range(3) for y in range(13)]
        for b, j in free_list_indicies:
            prod = np.prod(
                [usage_vector[b, free_list[b, i]] for i in range(j)])
            free_list_entry = free_list[b, j]
            expected_weighting[b, free_list_entry] = (
                1 - usage_vector[b, free_list_entry]) * prod

        self.assertEqual(weighting.shape, (3, 13))
        self.assertAllClose(weighting,
                            tf.convert_to_tensor(expected_weighting))
        self.assertAllClose(tf.reduce_sum(weighting, axis=1), np.ones(3), 1e-2)