def build(self, data, sess):
        self.build_input(data, sess)
        input_dropout = tf.nn.dropout(self.input_sent, self.dropout_keep_prob
                                      )
        W1 = weight_variable('W1', [data.embedding_size, data.embedding_size])
        b1 = bias_variable('b1', [data.embedding_size])
        layer_1 = tf.nn.relu(tf.nn.xw_plus_b(input_dropout, W1, b1))

        W2 = weight_variable('W2', [data.embedding_size, len(data.classes)])
        b2 = bias_variable('b2', [len(data.classes)])
        prediction = tf.nn.xw_plus_b(layer_1, W2, b2)
        self.create_outputs(prediction)
예제 #2
0
    def build(self, data, sess):
        self.build_input(data, sess)
        W1 = weight_variable('W1', [data.embedding_size, len(data.classes)])
        b1 = bias_variable('b1', [len(data.classes)])

        input_dropout = tf.nn.dropout(self.input_sent, self.dropout_keep_prob)
        prediction = tf.nn.xw_plus_b(input_dropout, W1, b1)

        self.create_outputs(prediction)
예제 #3
0
    def build(self, data, sess):
        self.build_input(data, sess)

        self.W_conv1 = weight_variable('W_conv', [self.window_size, self.embedding_size, 1, self.n_filters])
        self.b_conv1 = bias_variable('b_conv', [self.n_filters])

        representation_sentence = tf.reduce_max(
            tf.nn.relu(
                self.cnn_representation_raw(
                    self.embeddings_sentence,
                    self.sentence_length
                )
            ),
            axis=1,
            keep_dims=False
        )

        representation_sentence = tf.nn.dropout(representation_sentence, self.dropout_keep_prob)

        W2_task_A = weight_variable('W2_task_A', [self.n_filters, len(data.classes)])
        b2_task_A = bias_variable('b2_task_A', [len(data.classes)])
        W2_task_B = weight_variable('W2_task_B', [self.n_filters, len(data.classes)])
        b2_task_B = bias_variable('b2_task_B', [len(data.classes)])

        prediction_task_A = tf.nn.xw_plus_b(representation_sentence, W2_task_A, b2_task_A)
        prediction_task_B = tf.nn.xw_plus_b(representation_sentence, W2_task_B, b2_task_B)

        with tf.variable_scope("Loss_A") as scope:
            self.loss_individual_task_A = tf.nn.softmax_cross_entropy_with_logits(
                labels=self.input_label, logits=prediction_task_A
            )
            self.loss_task_A = tf.reduce_mean(self.loss_individual_task_A)

        with tf.variable_scope("Loss_B") as scope:
            self.loss_individual_task_B = tf.nn.softmax_cross_entropy_with_logits(
                labels=self.input_label, logits=prediction_task_B
            )
            self.loss_task_B = tf.reduce_mean(self.loss_individual_task_B)

        self.predict_task_A = tf.nn.softmax(prediction_task_A)
        self.predict_task_B = tf.nn.softmax(prediction_task_B)

        tf.summary.scalar('Loss-Task-A', self.loss_task_A)
        tf.summary.scalar('Loss-Task-B', self.loss_task_B)
예제 #4
0
    def build(self, data, sess):
        self.build_input(data, sess)

        W1_src = weight_variable('W1_src',
                                 [data.embedding_size, data.embedding_size])
        b1_src = bias_variable('b1_src', [data.embedding_size])

        W1_target = weight_variable('W1_target',
                                    [data.embedding_size, data.embedding_size])
        b1_target = bias_variable('b1_target', [data.embedding_size])

        source_rep = tf.nn.tanh(
            tf.nn.xw_plus_b(self.input_source, W1_src, b1_src))
        translation_rep = tf.nn.tanh(
            tf.nn.xw_plus_b(self.input_translation, W1_target, b1_target))
        random_other_rep = tf.nn.tanh(
            tf.nn.xw_plus_b(self.input_random_other, W1_target, b1_target))

        self.create_outputs(source_rep, translation_rep, random_other_rep)
예제 #5
0
    def build(self, data, sess):
        self.build_input(data, sess)

        self.W_conv1 = weight_variable(
            'W_conv',
            [self.window_size, self.embedding_size, 1, self.n_filters])
        self.b_conv1 = bias_variable('b_conv', [self.n_filters])

        representation_sentence = tf.reduce_max(tf.nn.relu(
            self.cnn_representation_raw(self.embeddings_sentence,
                                        self.sentence_length)),
                                                axis=1,
                                                keep_dims=False)

        representation_sentence = tf.nn.dropout(representation_sentence,
                                                self.dropout_keep_prob)

        W2 = weight_variable('W2', [self.n_filters, len(data.classes)])
        b2 = bias_variable('b2', [len(data.classes)])
        prediction = tf.nn.xw_plus_b(representation_sentence, W2, b2)
        self.create_outputs(prediction)
    def build(self, data, sess):
        self.build_input(data, sess)

        n_tokens = tf.reduce_sum(non_zero_tokens(
            tf.to_float(self.input_sentence)),
                                 axis=1,
                                 keep_dims=True)
        avg = tf.reduce_sum(self.embeddings_sentence, axis=1,
                            keep_dims=False) / n_tokens

        W2 = weight_variable('W2',
                             [self.embedding_size, self.embedding_size / 4])
        b2 = bias_variable('b2', [self.embedding_size / 4])
        prediction = tf.nn.relu(tf.nn.xw_plus_b(avg, W2, b2))
        prediction = tf.nn.dropout(prediction, self.dropout_keep_prob)

        W3 = weight_variable('W3',
                             [self.embedding_size / 4,
                              len(data.classes)])
        b3 = bias_variable('b3', [len(data.classes)])
        prediction = tf.nn.xw_plus_b(prediction, W3, b3)

        self.create_outputs(prediction)