Ejemplo n.º 1
0
    def test_write(self):
        graph = tf.Graph()
        with graph.as_default():
            with tf.Session(graph=graph) as session:

                mem = Memory(4, 5, 2, 1)
                M, u, p, L, ww, rw, r = session.run(mem.init_memory())
                key = np.random.uniform(0, 1, (1, 5, 1)).astype(np.float32)
                strength = np.random.uniform(0, 1, (1, 1)).astype(np.float32)
                free_gates = np.random.uniform(0, 1, (1, 2)).astype(np.float32)
                write_gate = np.random.uniform(0, 1, (1, 1)).astype(np.float32)
                allocation_gate = np.random.uniform(0, 1,
                                                    (1, 1)).astype(np.float32)
                write_vector = np.random.uniform(0, 1,
                                                 (1, 5)).astype(np.float32)
                erase_vector = np.zeros((1, 5)).astype(np.float32)

                u_op, ww_op, M_op, L_op, p_op = mem.write(
                    M, u, rw, ww, p, L, key, strength, free_gates,
                    allocation_gate, write_gate, write_vector, erase_vector)
                session.run(tf.initialize_all_variables())
                u, ww, M, L, p = session.run([u_op, ww_op, M_op, L_op, p_op])

                self.assertEqual(u.shape, (1, 4))
                self.assertEqual(ww.shape, (1, 4))
                self.assertEqual(M.shape, (1, 4, 5))
                self.assertEqual(L.shape, (1, 4, 4))
                self.assertEqual(p.shape, (1, 4))
Ejemplo n.º 2
0
    def test_construction(self):
        interface = DNC.interface(
            read_keys=None,
            read_strengths=None,
            write_key=np.random.uniform(0, 1, (3, 9, 1)).astype(np.float32),
            write_strength=np.random.uniform(0, 1, (3, 1)).astype(np.float32),
            erase_vector=tf.convert_to_tensor(
                np.zeros((3, 9)).astype(np.float32)),
            write_vector=tf.convert_to_tensor(
                np.random.uniform(0, 1, (3, 9)).astype(np.float32)),
            free_gates=np.random.uniform(0, 1, (3, 5)).astype(np.float32),
            allocation_gate=np.random.uniform(0, 1, (3, 1)).astype(np.float32),
            write_gate=np.random.uniform(0, 1, (3, 1)).astype(np.float32),
            read_modes=None,
        )

        memory = Memory(13, 9, 5)
        memory_state = memory.get_initial_state(batch_size=3)
        usage, write_weighting, memory, link_matrix, precedence = memory.write(
            memory_state, interface)

        self.assertEqual(usage.shape, (3, 13))
        self.assertEqual(write_weighting.shape, (3, 13))
        self.assertEqual(memory.shape, (3, 13, 9))
        self.assertEqual(link_matrix.shape, (3, 13, 13))
        self.assertEqual(precedence.shape, (3, 13))
Ejemplo n.º 3
0
    def test_construction(self):
        interface = DNC.interface(
            read_keys=None,
            read_strengths=None,
            write_key=np.random.uniform(0, 1, (3, 9, 1)).astype(np.float32),
            write_strength=np.random.uniform(0, 1, (3, 1)).astype(np.float32),
            erase_vector=tf.convert_to_tensor(
                np.zeros((3, 9)).astype(np.float32)),
            write_vector=tf.convert_to_tensor(
                np.random.uniform(0, 1, (3, 9)).astype(np.float32)),
            free_gates=np.random.uniform(0, 1, (3, 5)).astype(np.float32),
            allocation_gate=np.random.uniform(0, 1, (3, 1)).astype(np.float32),
            write_gate=np.random.uniform(0, 1, (3, 1)).astype(np.float32),
            read_modes=None,
        )

        memory = Memory(13, 9, 5)
        memory_state = memory.initial_state(3)
        write_op = memory.write(memory_state, interface)
        init_op = tf.global_variables_initializer()

        with self.test_session() as session:
            init_op.run()
            usage, write_weighting, memory, link_matrix, precedence = session.run(
                write_op)

        self.assertEqual(usage.shape, (3, 13))
        self.assertEqual(write_weighting.shape, (3, 13))
        self.assertEqual(memory.shape, (3, 13, 9))
        self.assertEqual(link_matrix.shape, (3, 13, 13))
        self.assertEqual(precedence.shape, (3, 13))
Ejemplo n.º 4
0
Archivo: dnc.py Proyecto: revz345/dnc
class DNC:
    def __init__(self,
                 controller_class,
                 input_size,
                 output_size,
                 max_sequence_length,
                 memory_words_num=256,
                 memory_word_size=64,
                 memory_read_heads=4,
                 batch_size=128):
        """
        constructs a complete DNC architecture as described in the DNC paper
        http://www.nature.com/nature/journal/vaop/ncurrent/full/nature20101.html

        Parameters:
        -----------
        controller_class: BaseController
            a concrete implementation of the BaseController class
        input_size: int
            the size of the input vector
        output_size: int
            the size of the output vector
        max_sequence_length: int
            the maximum length of an input sequence
        memory_words_num: int
            the number of words that can be stored in memory
        memory_word_size: int
            the size of an individual word in memory
        memory_read_heads: int
            the number of read heads in the memory
        batch_size: int
            the size of the data batch
        """

        self.input_size = input_size
        self.output_size = output_size
        self.max_sequence_length = max_sequence_length
        self.words_num = memory_words_num
        self.word_size = memory_word_size
        self.read_heads = memory_read_heads
        self.batch_size = batch_size

        self.memory = Memory(self.words_num, self.word_size, self.read_heads,
                             self.batch_size)
        self.controller = controller_class(self.input_size, self.output_size,
                                           self.read_heads, self.word_size,
                                           self.batch_size)

        # input data placeholders
        self.input_data = tf.placeholder(tf.float32, [None, None, chunk_size],
                                         name='input')
        self.target_output = tf.placeholder(tf.float32,
                                            [None, None, output_size],
                                            name='targets')
        #self.input_data = tf.placeholder(tf.float32, [batch_size, None, input_size], name='input')
        #self.target_output = tf.placeholder(tf.float32, [batch_size, None, output_size], name='targets')
        self.sequence_length = tf.placeholder(tf.int32, name='sequence_length')

        self.build_graph()

    def _step_op(self, step, memory_state, controller_state=None):
        """
        performs a step operation on the input step data

        Parameters:
        ----------
        step: Tensor (batch_size, input_size)
        memory_state: Tuple
            a tuple of current memory parameters
        controller_state: Tuple
            the state of the controller if it's recurrent

        Returns: Tuple
            output: Tensor (batch_size, output_size)
            memory_view: dict
        """

        last_read_vectors = memory_state[6]
        pre_output, interface, nn_state = None, None, None

        if self.controller.has_recurrent_nn:
            pre_output, interface, nn_state = self.controller.process_input(
                step, last_read_vectors, controller_state)
        else:
            pre_output, interface = self.controller.process_input(
                step, last_read_vectors)

        usage_vector, write_weighting, memory_matrix, link_matrix, precedence_vector = self.memory.write(
            memory_state[0], memory_state[1], memory_state[5], memory_state[4],
            memory_state[2], memory_state[3], interface['write_key'],
            interface['write_strength'], interface['free_gates'],
            interface['allocation_gate'], interface['write_gate'],
            interface['write_vector'], interface['erase_vector'])

        read_weightings, read_vectors = self.memory.read(
            memory_matrix,
            memory_state[5],
            interface['read_keys'],
            interface['read_strengths'],
            link_matrix,
            interface['read_modes'],
        )

        return [

            # report new memory state to be updated outside the condition branch
            memory_matrix,
            usage_vector,
            precedence_vector,
            link_matrix,
            write_weighting,
            read_weightings,
            read_vectors,
            self.controller.final_output(pre_output, read_vectors),
            interface['free_gates'],
            interface['allocation_gate'],
            interface['write_gate'],

            # report new state of RNN if exists
            nn_state[0] if nn_state is not None else tf.zeros(1),
            nn_state[1] if nn_state is not None else tf.zeros(1)
        ]

    def _loop_body(self, time, memory_state, outputs, free_gates,
                   allocation_gates, write_gates, read_weightings,
                   write_weightings, usage_vectors, controller_state):
        """
        the body of the DNC sequence processing loop

        Parameters:
        ----------
        time: Tensor
        outputs: TensorArray
        memory_state: Tuple
        free_gates: TensorArray
        allocation_gates: TensorArray
        write_gates: TensorArray
        read_weightings: TensorArray,
        write_weightings: TensorArray,
        usage_vectors: TensorArray,
        controller_state: Tuple

        Returns: Tuple containing all updated arguments
        """

        step_input = self.unpacked_input_data.read(time)

        output_list = self._step_op(step_input, memory_state, controller_state)

        # update memory parameters

        new_controller_state = tf.zeros(1)
        new_memory_state = tuple(output_list[0:7])

        new_controller_state = LSTMStateTuple(output_list[11], output_list[12])

        outputs = outputs.write(time, output_list[7])

        # collecting memory view for the current step
        free_gates = free_gates.write(time, output_list[8])
        allocation_gates = allocation_gates.write(time, output_list[9])
        write_gates = write_gates.write(time, output_list[10])
        read_weightings = read_weightings.write(time, output_list[5])
        write_weightings = write_weightings.write(time, output_list[4])
        usage_vectors = usage_vectors.write(time, output_list[1])

        return (time + 1, new_memory_state, outputs, free_gates,
                allocation_gates, write_gates, read_weightings,
                write_weightings, usage_vectors, new_controller_state)

    def build_graph(self):
        """
        builds the computational graph that performs a step-by-step evaluation
        of the input data batches
        """

        self.unpacked_input_data = dnc.utility.unpack_into_tensorarray(
            self.input_data, 1, self.sequence_length)

        outputs = tf.TensorArray(tf.float32,
                                 self.sequence_length,
                                 name='outputs')
        free_gates = tf.TensorArray(tf.float32,
                                    self.sequence_length,
                                    name='free_gates')
        allocation_gates = tf.TensorArray(tf.float32,
                                          self.sequence_length,
                                          name='allocation_gates')
        write_gates = tf.TensorArray(tf.float32,
                                     self.sequence_length,
                                     name='write_gates')
        read_weightings = tf.TensorArray(tf.float32,
                                         self.sequence_length,
                                         name='read_weightings')
        write_weightings = tf.TensorArray(tf.float32,
                                          self.sequence_length,
                                          name='write_weightings')
        usage_vectors = tf.TensorArray(tf.float32,
                                       self.sequence_length,
                                       name='usage_vectors')

        controller_state = self.controller.get_state(
        ) if self.controller.has_recurrent_nn else (tf.zeros(1), tf.zeros(1))
        memory_state = self.memory.init_memory()
        if not isinstance(controller_state, LSTMStateTuple):
            controller_state = LSTMStateTuple(controller_state[0],
                                              controller_state[1])
        final_results = None

        with tf.variable_scope("sequence_loop") as scope:
            time = tf.placeholder(dtype=tf.int32, name='time')
            final_results = tf.while_loop(
                cond=lambda time, *_: time < self.sequence_length,
                body=self._loop_body,
                loop_vars=(time, memory_state, outputs, free_gates,
                           allocation_gates, write_gates, read_weightings,
                           write_weightings, usage_vectors, controller_state),
                parallel_iterations=32,
                swap_memory=False)

        dependencies = []
        if self.controller.has_recurrent_nn:
            dependencies.append(self.controller.update_state(final_results[9]))

        with tf.control_dependencies(dependencies):
            self.packed_output = dnc.utility.pack_into_tensor(final_results[2],
                                                              axis=1)
            self.packed_memory_view = {
                'free_gates':
                dnc.utility.pack_into_tensor(final_results[3], axis=1),
                'allocation_gates':
                dnc.utility.pack_into_tensor(final_results[4], axis=1),
                'write_gates':
                dnc.utility.pack_into_tensor(final_results[5], axis=1),
                'read_weightings':
                dnc.utility.pack_into_tensor(final_results[6], axis=1),
                'write_weightings':
                dnc.utility.pack_into_tensor(final_results[7], axis=1),
                'usage_vectors':
                dnc.utility.pack_into_tensor(final_results[8], axis=1)
            }

    def get_outputs(self):
        """
        returns the graph nodes for the output and memory view

        Returns: Tuple
            outputs: Tensor (batch_size, time_steps, output_size)
            memory_view: dict
        """
        return self.packed_output, self.packed_memory_view

    def save(self, session, ckpts_dir, name):
        """
        saves the current values of the model's parameters to a checkpoint

        Parameters:
        ----------
        session: tf.Session
            the tensorflow session to save
        ckpts_dir: string
            the path to the checkpoints directories
        name: string
            the name of the checkpoint subdirectory
        """
        checkpoint_dir = os.path.join(ckpts_dir, name)

        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)

        tf.train.Saver(tf.trainable_variables()).save(
            session, os.path.join(checkpoint_dir, 'model.ckpt'))

    def restore(self, session, ckpts_dir, name):
        """
        session: tf.Session
            the tensorflow session to restore into
        ckpts_dir: string
            the path to the checkpoints directories
        name: string
            the name of the checkpoint subdirectory
        """
        tf.train.Saver(tf.trainable_variables()).restore(
            session, os.path.join(ckpts_dir, name, 'model.ckpt'))