Exemple #1
0
    def add_prediction_op(self):
        x = self.add_embedding(
        )  # [batch_size, num_time_steps, embedding_size]

        cell = tf.nn.rnn_cell.BasicLSTMCell(self.config.rnn_hidden_size)
        outputs, state = tf.nn.dynamic_rnn(cell,
                                           x,
                                           time_major=True,
                                           dtype=tf.float32)
        x = outputs[:, -1, :]

        xavier_initializer = xavier_weight_init()

        W = tf.Variable(
            xavier_initializer(
                [self.config.rnn_hidden_size, self.config.hidden_size_1]))
        b1 = tf.Variable(
            tf.zeros([self.config.hidden_size_1], dtype=tf.float32))
        U = tf.Variable(
            xavier_initializer(
                [self.config.hidden_size_1, self.config.n_classes]))
        b3 = tf.Variable(tf.zeros([self.config.n_classes], dtype=tf.float32))
        layer_1 = tf.add(tf.matmul(x, W), b1)
        h_drop = tf.nn.dropout(layer_1, self.dropout_placeholder)
        pred = tf.matmul(h_drop, U) + b3

        self.regularization = self.config.reg * tf.nn.l2_loss(
            U) + self.config.reg * tf.nn.l2_loss(W)

        return pred
Exemple #2
0
    def add_prediction_op(self):
        x = self.add_embedding()
        xavier_initializer = xavier_weight_init()

        W = tf.Variable(
            xavier_initializer([
                self.config.n_window_features * self.config.embed_size,
                self.config.hidden_size_1
            ]))
        b1 = tf.Variable(
            tf.zeros([self.config.hidden_size_1], dtype=tf.float32))
        U = tf.Variable(
            xavier_initializer(
                [self.config.hidden_size_2, self.config.n_classes]))
        W2 = tf.Variable(
            xavier_initializer(
                [self.config.hidden_size_1, self.config.hidden_size_2]))
        b2 = tf.Variable(
            tf.zeros([self.config.hidden_size_2], dtype=tf.float32))
        b3 = tf.Variable(tf.zeros([self.config.n_classes], dtype=tf.float32))

        layer_1 = tf.add(tf.matmul(x, W), b1)
        layer_1 = tf.nn.sigmoid(layer_1)
        layer_2 = tf.add(tf.matmul(layer_1, W2), b2)
        layer_2 = tf.nn.sigmoid(layer_2)
        h_drop = tf.nn.dropout(layer_2, self.dropout_placeholder)
        pred = tf.matmul(h_drop, U) + b3

        # self.regularization = self.config.lr*tf.nn.l2_loss(U) + self.config.lr*tf.nn.l2_loss(W)

        return pred
Exemple #3
0
    def add_prediction_op(self):
        x = self.add_embedding()

        cell = tf.nn.rnn_cell.BasicLSTMCell(self.config.rnn_hidden_size)
        outputs, state = tf.nn.dynamic_rnn(cell, x, time_major=True, dtype=tf.float32)
        x = outputs[:,-1,:]

        xavier_initializer = xavier_weight_init()

        W1 = tf.Variable(xavier_initializer([self.config.rnn_hidden_size, self.config.hidden_size_1]))
        b1 = tf.Variable(tf.zeros([self.config.hidden_size_1], dtype=tf.float32))
        W2 = tf.Variable(xavier_initializer([self.config.hidden_size_1, self.config.hidden_size_2]))
        b2 = tf.Variable(tf.zeros([self.config.hidden_size_2], dtype=tf.float32))
        W3 = tf.Variable(xavier_initializer([self.config.hidden_size_2, self.config.n_classes]))
        b3 = tf.Variable(tf.zeros([self.config.n_classes], dtype=tf.float32))

        layer_1 = tf.add(tf.matmul(x, W1), b1)
        layer_1 = tf.nn.relu(layer_1)
        layer_2 = tf.add(tf.matmul(layer_1, W2), b2)
        layer_2 = tf.nn.relu(layer_2)
        distrib = tf.add(tf.matmul(layer_2, W3), b3) # not actually the probability distrib yet because using softmax_cross_entropy
        pred    = tf.nn.softmax(distrib)

        self.regularization = self.config.reg*tf.nn.l2_loss(W1) + self.config.reg*tf.nn.l2_loss(W2) + self.config.reg*tf.nn.l2_loss(W3)

        return pred
    def add_prediction_op(self):
        x = self.add_embedding()
        xavier_initializer = xavier_weight_init()

        W1 = tf.Variable(xavier_initializer([self.config.n_features*self.config.embed_size, self.config.hidden_size_1]))
        b1 = tf.Variable(tf.zeros([self.config.hidden_size_1], dtype=tf.float32))
        W2 = tf.Variable(xavier_initializer([self.config.hidden_size_1, self.config.hidden_size_2]))
        b2 = tf.Variable(tf.zeros([self.config.hidden_size_2], dtype=tf.float32))
        W3 = tf.Variable(xavier_initializer([self.config.hidden_size_2, self.config.n_classes]))
        b3 = tf.Variable(tf.zeros([self.config.n_classes], dtype=tf.float32))

        layer_1 = tf.add(tf.matmul(x, W1), b1)
        layer_1 = tf.nn.relu(layer_1)
        layer_2 = tf.add(tf.matmul(layer_1, W2), b2)
        layer_2 = tf.nn.relu(layer_2)
        distrib = tf.add(tf.matmul(layer_2, W3), b3) # not actually the probability distrib yet because using softmax_cross_entropy
        pred    = tf.nn.softmax(distrib)

        self.regularization = self.config.reg*tf.nn.l2_loss(W1) + self.config.reg*tf.nn.l2_loss(W2) + self.config.reg*tf.nn.l2_loss(W3)

        return pred