コード例 #1
0
ファイル: test_memory.py プロジェクト: wyngjf/tf-DNC
    def test_construction(self):
        interface = DNC.interface(
            read_keys=None,
            read_strengths=None,
            write_key=np.random.uniform(0, 1, (3, 9, 1)).astype(np.float32),
            write_strength=np.random.uniform(0, 1, (3, 1)).astype(np.float32),
            erase_vector=tf.convert_to_tensor(
                np.zeros((3, 9)).astype(np.float32)),
            write_vector=tf.convert_to_tensor(
                np.random.uniform(0, 1, (3, 9)).astype(np.float32)),
            free_gates=np.random.uniform(0, 1, (3, 5)).astype(np.float32),
            allocation_gate=np.random.uniform(0, 1, (3, 1)).astype(np.float32),
            write_gate=np.random.uniform(0, 1, (3, 1)).astype(np.float32),
            read_modes=None,
        )

        memory = Memory(13, 9, 5)
        memory_state = memory.get_initial_state(batch_size=3)
        usage, write_weighting, memory, link_matrix, precedence = memory.write(
            memory_state, interface)

        self.assertEqual(usage.shape, (3, 13))
        self.assertEqual(write_weighting.shape, (3, 13))
        self.assertEqual(memory.shape, (3, 13, 9))
        self.assertEqual(link_matrix.shape, (3, 13, 13))
        self.assertEqual(precedence.shape, (3, 13))
コード例 #2
0
    def test_construction(self):
        interface = DNC.interface(
            read_keys=None,
            read_strengths=None,
            write_key=np.random.uniform(0, 1, (3, 9, 1)).astype(np.float32),
            write_strength=np.random.uniform(0, 1, (3, 1)).astype(np.float32),
            erase_vector=tf.convert_to_tensor(
                np.zeros((3, 9)).astype(np.float32)),
            write_vector=tf.convert_to_tensor(
                np.random.uniform(0, 1, (3, 9)).astype(np.float32)),
            free_gates=np.random.uniform(0, 1, (3, 5)).astype(np.float32),
            allocation_gate=np.random.uniform(0, 1, (3, 1)).astype(np.float32),
            write_gate=np.random.uniform(0, 1, (3, 1)).astype(np.float32),
            read_modes=None,
        )

        memory = Memory(13, 9, 5)
        memory_state = memory.initial_state(3)
        write_op = memory.write(memory_state, interface)
        init_op = tf.global_variables_initializer()

        with self.test_session() as session:
            init_op.run()
            usage, write_weighting, memory, link_matrix, precedence = session.run(
                write_op)

        self.assertEqual(usage.shape, (3, 13))
        self.assertEqual(write_weighting.shape, (3, 13))
        self.assertEqual(memory.shape, (3, 13, 9))
        self.assertEqual(link_matrix.shape, (3, 13, 13))
        self.assertEqual(precedence.shape, (3, 13))
コード例 #3
0
ファイル: test_memory.py プロジェクト: wyngjf/tf-DNC
    def test_read_vectors_and_weightings(self):
        m = Memory.state(
            memory_matrix=np.random.uniform(-1, 1,
                                            (5, 11, 7)).astype(np.float32),
            usage_vector=None,
            link_matrix=None,
            precedence_vector=None,
            write_weighting=None,
            read_weightings=DNCMemoryTests.softmax_sample((5, 11, 3), axis=1),
        )
        # pull out read_modes due to https://github.com/tensorflow/tensorflow/issues/1409
        # hack to circumvent tf bug in not doing `convert_to_tensor` in einsum reductions correctly
        read_modes = DNCMemoryTests.softmax_sample((5, 3, 3), axis=1)
        i = DNC.interface(
            read_keys=np.random.uniform(0, 1, (5, 7, 3)).astype(np.float32),
            read_strengths=np.random.uniform(0, 1, (5, 3)).astype(np.float32),
            write_key=None,
            write_strength=None,
            erase_vector=None,
            write_vector=None,
            free_gates=None,
            allocation_gate=None,
            write_gate=None,
            read_modes=tf.convert_to_tensor(read_modes),
        )

        # read uses the link matrix that is produced after a write operation
        new_link_matrix = np.random.uniform(0, 1,
                                            (5, 11, 11)).astype(np.float32)
        # assume ContentAddressing and TemporalLinkAddressing are already correct
        lookup_weightings, forward_weighting, backward_weighting, \
            updated_read_weightings, updated_read_vectors = self.get_addressing_weights(
                m, i, new_link_matrix)
        self.assertEqual(updated_read_weightings.shape, (5, 11, 3))
        self.assertEqual(updated_read_vectors.shape, (5, 7, 3))

        expected_read_weightings = np.zeros((5, 11, 3)).astype(np.float32)
        for read_head in range(3):
            backward_weight = read_modes[:, 0, read_head, np.
                                         newaxis] * backward_weighting[:, :,
                                                                       read_head]
            lookup_weight = read_modes[:, 1, read_head, np.newaxis] * \
                lookup_weightings[:, :, read_head]
            forward_weight = read_modes[:, 2, read_head, np.newaxis] * \
                forward_weighting[:, :, read_head]
            expected_read_weightings[:, :, read_head] = backward_weight + \
                lookup_weight + forward_weight
        expected_read_vectors = np.matmul(
            np.transpose(m.memory_matrix, [0, 2, 1]), updated_read_weightings)

        self.assertAllClose(updated_read_weightings, expected_read_weightings)
        self.assertEqual(updated_read_weightings.shape, (5, 11, 3))
        self.assertAllClose(updated_read_vectors, expected_read_vectors)
コード例 #4
0
    def test_read_vectors_and_weightings(self):
        m = Memory.state(
            memory_matrix=np.random.uniform(-1, 1,
                                            (5, 11, 7)).astype(np.float32),
            usage_vector=None,
            link_matrix=None,
            precedence_vector=None,
            write_weighting=None,
            read_weightings=DNCMemoryTests.softmax_sample((5, 11, 3), axis=1),
        )
        i = DNC.interface(
            read_keys=np.random.uniform(0, 1, (5, 7, 3)).astype(np.float32),
            read_strengths=np.random.uniform(0, 1, (5, 3)).astype(np.float32),
            write_key=None,
            write_strength=None,
            erase_vector=None,
            write_vector=None,
            free_gates=None,
            allocation_gate=None,
            write_gate=None,
            read_modes=tf.convert_to_tensor(
                DNCMemoryTests.softmax_sample((5, 3, 3), axis=1)),
        )
        # read uses the link matrix that is produced after a write operation
        new_link_matrix = np.random.uniform(0, 1,
                                            (5, 11, 11)).astype(np.float32)

        # assume ContentAddressing and TemporalLinkAddressing are already correct
        op_ca = ContentAddressing.weighting(m.memory_matrix, i.read_keys,
                                            i.read_strengths)
        op_f, op_b = TemporalLinkAddressing.weightings(new_link_matrix,
                                                       m.read_weightings)
        read_op = Memory.read(m.memory_matrix, m.read_weightings,
                              new_link_matrix, i)
        with self.test_session() as session:
            lookup_weightings = session.run(op_ca)
            forward_weighting, backward_weighting = session.run([op_f, op_b])
            updated_read_weightings, updated_read_vectors = session.run(
                read_op)
            # hack to circumvent tf bug in not doing `convert_to_tensor` in einsum reductions correctly
            read_modes_numpy = tf.Session().run(i.read_modes)

        self.assertEqual(updated_read_weightings.shape, (5, 11, 3))
        self.assertEqual(updated_read_vectors.shape, (5, 7, 3))

        expected_read_weightings = np.zeros((5, 11, 3)).astype(np.float32)
        for read_head in range(3):
            backward_weight = read_modes_numpy[:, 0, read_head, np.
                                               newaxis] * backward_weighting[:, :,
                                                                             read_head]
            lookup_weight = read_modes_numpy[:, 1, read_head, np.
                                             newaxis] * lookup_weightings[:, :,
                                                                          read_head]
            forward_weight = read_modes_numpy[:, 2, read_head, np.
                                              newaxis] * forward_weighting[:, :,
                                                                           read_head]
            expected_read_weightings[:, :,
                                     read_head] = backward_weight + lookup_weight + forward_weight
        expected_read_vectors = np.matmul(
            np.transpose(m.memory_matrix, [0, 2, 1]), updated_read_weightings)

        self.assertAllClose(updated_read_weightings, expected_read_weightings)
        self.assertEqual(updated_read_weightings.shape, (5, 11, 3))
        self.assertAllClose(updated_read_vectors, expected_read_vectors)