Ejemplo n.º 1
0
    def _inputEncodingBlock(self, scope):
        with tf.device('/cpu:0'):
            self.Embedding = tf.get_variable(
                'Embedding', [self.n_vocab, self.embedding_size], tf.float32)
            self.embeded_left = tf.nn.embedding_lookup(self.Embedding,
                                                       self.premise)
            self.embeded_right = tf.nn.embedding_lookup(
                self.Embedding, self.hypothesis)
            print_shape('embeded_left', self.embeded_left)
            print_shape('embeded_right', self.embeded_right)

        with tf.variable_scope(scope):
            outputsPremise, finalStatePremise = self._biLSTMBlock(
                self.embeded_left, self.hidden_size, 'biLSTM',
                self.premise_mask)
            outputsHypothesis, finalStateHypothesis = self._biLSTMBlock(
                self.embeded_right,
                self.hidden_size,
                'biLSTM',
                self.hypothesis_mask,
                isReuse=True)
            a_bar = tf.concat(outputsPremise, axis=2)
            b_bar = tf.concat(outputsHypothesis, axis=2)
            print_shape('a_bar', a_bar)
            print_shape('b_bar', b_bar)
            return a_bar, b_bar
Ejemplo n.º 2
0
    def _compositionBlock(self, m_a, m_b, hiddenSize, scope):
        outputV_a, finalStateV_a = self._biLSTMBlock(m_a, hiddenSize, 'biLSTM')
        outputV_b, finalStateV_b = self._biLSTMBlock(m_b,
                                                     hiddenSize,
                                                     'biLSTM',
                                                     isReuse=True)
        v_a = tf.concat(outputV_a, axis=2)
        v_b = tf.concat(outputV_b, axis=2)

        print_shape('v_a', v_a)
        print_shape('v_b', v_b)

        v_a_avg = tf.reduce_mean(v_a, axis=1)
        v_b_avg = tf.reduce_mean(v_b, axis=1)
        v_a_max = tf.reduce_max(v_a, axis=1)
        v_b_max = tf.reduce_max(v_b, axis=1)
        print_shape('v_a_avg', v_a_avg)
        print_shape('v_a_max', v_a_max)
        print_shape('v_b_avg', v_b_avg)
        print_shape('v_b_max', v_b_max)

        v = tf.concat([v_a_avg, v_a_max, v_b_avg, v_b_max], axis=1)
        print_shape('v', v)
        y_hat = self._feedForwardBlock(v, self.hidden_size, self.n_classes,
                                       'feed_forward')
        return y_hat
Ejemplo n.º 3
0
    def _localInferenceBlock(self, a_bar, b_bar, scope):
        with tf.variable_scope(scope):
            attentionWeights = tf.matmul(a_bar, tf.transpose(b_bar, [0, 2, 1]))
            print_shape('att_wei', attentionWeights)

            attentionSoft_a = tf.nn.softmax(attentionWeights)
            attentionSoft_b = tf.nn.softmax(tf.transpose(attentionWeights))
            attentionSoft_b = tf.transpose(attentionSoft_b)
            print_shape('att_soft_a', attentionSoft_a)
            print_shape('att_soft_b', attentionSoft_b)

            a_hat = tf.matmul(attentionSoft_a, b_bar)
            b_hat = tf.matmul(attentionSoft_b, a_bar)
            print_shape('a_hat', a_hat)
            print_shape('b_hat', b_hat)

            a_diff = tf.subtract(a_bar, a_hat)
            a_mul = tf.multiply(a_bar, a_hat)
            print_shape('a_diff', a_diff)
            print_shape('a_mul', a_mul)

            b_diff = tf.subtract(b_bar, b_hat)
            b_mul = tf.multiply(b_bar, b_hat)
            print_shape('b_diff', b_diff)
            print_shape('a_mul', b_mul)

            m_a = tf.concat([a_bar, a_hat, a_diff, a_mul], axis=2)
            m_b = tf.concat([b_bar, b_hat, b_diff, b_mul], axis=2)
            print_shape('m_a', m_a)
            print_shape('m_b', m_b)
            return m_a, m_b