Beispiel #1
0
    def test_zero_output(self):
        """Tests the zero-output."""
        batch_size = 2
        decoder_out_size = 3
        shortlist_size = 7
        timesteps = 7
        location_size = timesteps
        state_size = 5
        output_size = shortlist_size + location_size

        states = tf.placeholder(dtype=tf.float32, shape=[None, None, None])
        batch_dim = tf.shape(states)[0]
        location_dim = tf.shape(states)[1]
        layer = layers.PointingSoftmaxOutput(shortlist_size=shortlist_size,
                                             decoder_out_size=decoder_out_size,
                                             state_size=state_size)
        zero_output = layer.zero_output(batch_dim, location_dim)

        data = np.ones((batch_size, location_size, state_size))
        exp_shape = (batch_size, output_size)
        exp_zero_output = np.zeros(exp_shape)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            act_zero_output = sess.run(zero_output, {states: data})
        self.assertAllEqual(exp_zero_output, act_zero_output)
Beispiel #2
0
    def _build_graph(self):
        trainable = self.mode == tf.contrib.learn.ModeKeys.TRAIN
        words = self.inputs.get(self.inputs.WORDS_KEY)
        slengths = self.inputs.get(self.inputs.SENTENCE_LENGTH_KEY)
        targets = self.inputs.get(self.inputs.FORMULA_KEY)
        flengths = self.inputs.get(self.inputs.FORMULA_LENGTH_KEY)
        with self._graph.as_default():  # pylint: disable=E1129
            with tf.variable_scope('Embedding'):  # pylint: disable=E1129
                with tf.device('CPU:0'):
                    embedding_size = self._params['embedding_size']
                    vocabulary_size = self._params[self.INPUT_VOC_SIZE_PK]
                    embeddings = tf.get_variable(
                        'E', [vocabulary_size, embedding_size])
                    inputs = tf.nn.embedding_lookup(embeddings, words)

            batch_dim = utils.get_dimension(words, 0)
            with tf.variable_scope('Encoder'):  # pylint: disable=E1129
                encoder_params = self._params['encoder']
                encoder_cell_type = encoder_params['cell.type']
                encoder_cell_params = encoder_params['cell.params']
                encoder_cell = configurable.factory(encoder_cell_type,
                                                    self._mode,
                                                    encoder_cell_params, rnn)
                state = encoder_cell.zero_state(batch_dim, tf.float32)
                encoder_out, _ = tf.nn.dynamic_rnn(
                    cell=encoder_cell,
                    initial_state=state,
                    inputs=inputs,
                    sequence_length=slengths,
                    parallel_iterations=self._params['parallel_iterations'])

            with tf.variable_scope('Decoder'):  # pylint: disable=E1129
                decoder_params = self._params['decoder']
                decoder_cell_type = decoder_params['cell.type']
                decoder_cell_params = decoder_params['cell.params']
                decoder_cell = configurable.factory(decoder_cell_type,
                                                    self._mode,
                                                    decoder_cell_params, rnn)
                attention = layers.BahdanauAttention(
                    states=encoder_out,
                    inner_size=self._params['attention_size'],
                    trainable=trainable)
                location = layers.LocationSoftmax(attention=attention,
                                                  sequence_length=slengths)
                output = layers.PointingSoftmaxOutput(
                    shortlist_size=self._params[self.OUTPUT_VOC_SIZE_PK],
                    decoder_out_size=decoder_cell.output_size,
                    state_size=encoder_out.shape[-1].value,
                    trainable=trainable)

                self._decoder_inputs = None
                if trainable:
                    location_size = utils.get_dimension(words, 1)
                    output_size = self._params[
                        self.OUTPUT_VOC_SIZE_PK] + location_size
                    self._decoder_inputs = tf.one_hot(
                        targets,
                        output_size,
                        dtype=tf.float32,
                        name='decoder_training_input')

                ps_decoder = layers.PointingSoftmaxDecoder(
                    cell=decoder_cell,
                    location_softmax=location,
                    pointing_output=output,
                    input_size=self._params['feedback_size'],
                    decoder_inputs=self._decoder_inputs,
                    trainable=trainable)

                eos = None if trainable else self.EOS_IDX
                pad_to = None if trainable else utils.get_dimension(targets, 1)
                helper = layers.TerminationHelper(lengths=flengths, EOS=eos)
                decoder = layers.DynamicDecoder(
                    decoder=ps_decoder,
                    helper=helper,
                    pad_to=pad_to,
                    parallel_iterations=self._params['parallel_iterations'],
                    swap_memory=False)

                self._predictions, _ = decoder.decode()
Beispiel #3
0
    def test_base(self):
        """Base test for the PointingSoftmaxOutput layer."""
        batch_size = 2
        decoder_out_size = 3
        state_size = 4
        shortlist_size = 7
        timesteps = 5
        location_size = timesteps
        output_size = shortlist_size + location_size

        decoder_out = tf.constant(
            [[1, 1, 1], [2, 2, 2]],
            dtype=tf.float32)  # [batch_size, decoder_out_size]
        location_softmax = tf.constant(
            [[0.1, 0.1, 0.1, 0.2, 0.5], [0.2, 0.1, 0.5, 0.1, 0.1]],
            dtype=tf.float32)  # [batch_size, location_size]
        attention_context = tf.constant(
            [[3, 3, 3, 3], [4, 4, 4, 4]],
            dtype=tf.float32)  # [batch_size, state_size]
        initializer = tf.constant_initializer(value=0.1)

        with tf.variable_scope('', initializer=initializer):  # pylint: disable=E1129
            layer = layers.PointingSoftmaxOutput(
                shortlist_size=shortlist_size,
                decoder_out_size=decoder_out_size,
                state_size=state_size)
            output = layer(decoder_out=decoder_out,
                           location_softmax=location_softmax,
                           attention_context=attention_context)

        # the expected output has shape [batch, output_size] where
        # output size is given by the sum:
        # output_size = emission_size + pointing_size
        # exp_output = np.asarray(
        #     [[0.49811914, 0.49811914, 0.49811914, 0.49811914, 0.49811914,
        #       0.49811914, 0.49811914, 0.01679816, 0.01679816, 0.01679816,
        #       0.03359632, 0.08399081],
        #      [0.60730052, 0.60730052, 0.60730052, 0.60730052, 0.60730052,
        #       0.60730052, 0.60730052, 0.01822459, 0.00911230, 0.04556148,
        #       0.00911230, 0.0091123]],
        #     dtype=np.float32)  # pylint: disable=I0011,E1101

        exp_output = np.asarray(
            [[
                0.11886, 0.11886, 0.11886, 0.11886, 0.11886, 0.11886, 0.11886,
                0.016798, 0.016798, 0.016798, 0.033596, 0.083991
            ],
             [
                 0.12984, 0.12984, 0.12984, 0.12984, 0.12984, 0.12984, 0.12984,
                 0.018225, 0.009112, 0.045561, 0.009112, 0.009112
             ]],
            dtype=np.float32)  # pylint: disable=I0011,E1101
        exp_output_shape = (batch_size, output_size)

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            act_output = sess.run(output)
        for output in act_output:
            self.assertAllClose(sum(output), 1)
        self.assertEqual(exp_output_shape, exp_output.shape)
        self.assertAllClose(exp_output, act_output)