Exemple #1
0
    def __init__(self,
                 batch_size=FLAGS.batch_size,
                 learning_rate=FLAGS.learning_rate,
                 keep_prob=FLAGS.keep_prob,
                 l2_reg=FLAGS.l2_reg,
                 display_step=FLAGS.display_step,
                 training_iter=FLAGS.training_iter,
                 embedding_dim=FLAGS.embedding_dim,
                 n_class=FLAGS.n_class,
                 max_doc_len=FLAGS.max_doc_len,
                 train_file_path=FLAGS.train_file_path,
                 w2v_file=FLAGS.embedding_file_path,
                 test_index=FLAGS.test_index,
                 embedding_type=FLAGS.embedding_type,
                 scope='test'):
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.Keep_Prob = keep_prob
        self.l2_reg = l2_reg

        self.display_step = display_step
        self.training_iter = training_iter
        self.embedding_dim = embedding_dim
        self.n_class = n_class
        self.max_doc_len = max_doc_len

        self.train_file_path = train_file_path
        self.w2v_file = w2v_file
        self.test_index = test_index
        self.embedding_type = embedding_type
        self.scope = scope

        self.word_id_mapping, self.w2v = load_w2v(self.w2v_file,
                                                  self.embedding_dim)
        if embedding_type == 0:  # Pretrained and Untrainable
            self.word_embedding = tf.constant(self.w2v,
                                              dtype=tf.float32,
                                              name='word_embedding')
        elif embedding_type == 1:  # Pretrained and Trainable
            self.word_embedding = tf.Variable(self.w2v,
                                              dtype=tf.float32,
                                              name='word_embedding')
        elif embedding_type == 2:  # Random and Trainable
            self.word_embedding = tf.Variable(tf.random_uniform(
                [len(self.word_id_mapping) + 1, self.embedding_dim], -0.1,
                0.1),
                                              name='word_embedding')

        with tf.name_scope('inputs'):
            self.x = tf.placeholder(tf.int32, [None, self.max_doc_len])
            self.y = tf.placeholder(tf.float32, [None, self.n_class])
            self.doc_len = tf.placeholder(tf.int32, None)
            self.keep_prob = tf.placeholder(tf.float32)

        def init_variable(shape):
            initial = tf.random_uniform(shape, -0.01, 0.01)
            return tf.Variable(initial)

        with tf.name_scope('weights'):
            self.weights = {
                'softmax': init_variable([100, self.n_class]),
                'softmax1': init_variable([200, self.n_class]),
            }

        with tf.name_scope('biases'):
            self.biases = {
                'softmax': init_variable([self.n_class]),
            }
    def __init__(self,
                 batch_size=FLAGS.batch_size,
                 learning_rate=FLAGS.learning_rate,
                 keep_prob1=FLAGS.keep_prob1,
                 keep_prob2=FLAGS.keep_prob2,
                 l2_reg=FLAGS.l2_reg,
                 n_hidden=FLAGS.n_hidden,
                 n_topic=FLAGS.n_topic,
                 display_step=FLAGS.display_step,
                 training_iter=FLAGS.training_iter,
                 n_doc_class=FLAGS.n_doc_class,
                 n_sentence_class=FLAGS.n_sentence_class,
                 max_sentence_len=FLAGS.max_sentence_len,
                 max_doc_len=FLAGS.max_doc_len,
                 train_file_path=FLAGS.train_file_path,
                 emoWord_file_path=FLAGS.emoWord_file_path,
                 w2v_file=FLAGS.embedding_file_path,
                 embedding_dim=FLAGS.embedding_dim,
                 test_index=FLAGS.test_index,
                 embedding_type=FLAGS.embedding_type,
                 alpha=FLAGS.alpha,
                 scope='test'):
        self.batch_size = batch_size
        self.learning_rate = learning_rate

        self.keep_prob1 = keep_prob1
        self.keep_prob2 = keep_prob2

        self.l2_reg = l2_reg

        self.n_hidden = n_hidden
        self.n_topic = n_topic

        self.display_step = display_step
        self.training_iter = training_iter

        self.n_doc_class = n_doc_class
        self.n_sentence_class = n_sentence_class

        self.max_sentence_len = max_sentence_len
        self.max_doc_len = max_doc_len

        self.train_file_path = train_file_path
        self.emoWord_file_path = emoWord_file_path
        self.w2v_file = w2v_file
        self.embedding_dim = embedding_dim

        self.test_index = test_index
        self.embedding_type = embedding_type

        self.alpha = alpha

        self.scope = scope

        self.word_id_mapping, self.w2v = load_w2v(self.w2v_file,
                                                  self.emoWord_file_path,
                                                  self.embedding_dim)

        if embedding_type == 0:  # Pretrained and Untrainable
            self.word_embedding = tf.constant(self.w2v,
                                              dtype=tf.float32,
                                              name='word_embedding')
        elif embedding_type == 1:  # Pretrained and Trainable
            self.word_embedding = tf.Variable(self.w2v,
                                              dtype=tf.float32,
                                              name='word_embedding')
        elif embedding_type == 2:  # Random and Trainable
            self.word_embedding = tf.Variable(tf.random_uniform(
                [len(self.word_id_mapping) + 1, self.embedding_dim], -0.1,
                0.1),
                                              name='word_embedding')

        #定义需feed数据的tensor
        with tf.name_scope('inputs'):
            self.x = tf.placeholder(
                tf.int32, [None, self.max_doc_len, self.max_sentence_len])
            # self.y = tf.placeholder(tf.float32, [None, self.n_class])

            self.y_doc = tf.placeholder(tf.float32, [None, self.n_doc_class])
            self.y_sen = tf.placeholder(
                tf.float32, [None, self.max_doc_len, self.n_sentence_class])

            self.sen_len = tf.placeholder(tf.int32, [None, self.max_doc_len])
            self.doc_len = tf.placeholder(tf.int32, None)

            # self.topic = tf.placeholder(tf.float32, [None, self.max_doc_len, self.n_topic])
            # add aspect id
            self.aspect_id = tf.placeholder(tf.int32, [None, self.max_doc_len],
                                            name='aspect_id')

            self.keep_prob1 = tf.placeholder(tf.float32)
            self.keep_prob2 = tf.placeholder(tf.float32)

        def init_variable(shape):
            initial = tf.random_uniform(shape, -0.01, 0.01)
            return tf.Variable(initial)

    #所有的模型可训练参数

        with tf.name_scope('weights'):
            self.weights = {
                # Attention sentence
                # 'w_1': tf.Variable(tf.random_uniform([2 * self.n_hidden, 2 * self.n_hidden], -0.01, 0.01)),
                # 'u_1': tf.Variable(tf.random_uniform([2 * self.n_hidden, 1], -0.01, 0.01)),
                # 'w_1': tf.Variable(tf.random_uniform([2 * self.n_hidden + self.embedding_dim, 2 * self.n_hidden + self.embedding_dim], -0.01, 0.01)),
                # 'u_1': tf.Variable(tf.random_uniform([2* self.n_hidden + self.embedding_dim, 1], -0.01, 0.01)),
                # # Attention doc
                # 'w_2': tf.Variable(tf.random_uniform([2 * self.n_hidden, 2 * self.n_hidden], -0.01, 0.01)),
                # 'u_2': tf.Variable(tf.random_uniform([2 * self.n_hidden, 1], -0.01, 0.01)),

                # softmax sentence
                # 'softmax_sen': init_variable([self.n_hidden*2 + self.n_topic, self.n_sentence_class]),
                'softmax_sen':
                init_variable([2 * self.n_hidden, self.n_sentence_class]),
                # softmax doc
                # 'softmax_doc': init_variable([self.n_hidden*2, self.n_doc_class]),
            }

        with tf.name_scope('biases'):
            self.biases = {
                #Attention
                # 'w_1': tf.Variable(tf.random_uniform([2 * self.n_hidden], -0.01, 0.01)),
                # 'w_1': tf.Variable(tf.random_uniform([2 * self.n_hidden + self.embedding_dim], -0.01, 0.01)),
                # # 'w_2': tf.Variable(tf.random_uniform([2 * self.n_hidden], -0.01, 0.01)),
                'softmax_sen': init_variable([self.n_sentence_class]),
                # 'softmax_doc': init_variable([self.n_doc_class]),
            }
Exemple #3
0
    def __init__(
        self,
        batch_size=FLAGS.batch_size,
        learning_rate=FLAGS.learning_rate,
        keep_prob1=FLAGS.keep_prob1,
        keep_prob2=FLAGS.keep_prob2,
        l2_reg=FLAGS.l2_reg,
        display_step=FLAGS.display_step,
        training_iter=FLAGS.training_iter,
        embedding_dim=FLAGS.embedding_dim,
        n_class=FLAGS.n_class,
        max_doc_len=FLAGS.max_doc_len,
        max_sentence_len=FLAGS.max_sentence_len,
        n_hidden=FLAGS.n_hidden,
        train_file_path=FLAGS.train_file_path,
        test_file_path=FLAGS.test_file_path,
        w2v_file=FLAGS.embedding_file_path,
        embedding_type=0,
        scope='sentence',
    ):
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.Keep_Prob1 = keep_prob1
        self.Keep_Prob2 = keep_prob2
        self.l2_reg = l2_reg

        self.display_step = display_step
        self.training_iter = training_iter
        self.embedding_dim = embedding_dim
        self.n_class = n_class
        self.max_doc_len = max_doc_len
        self.max_sentence_len = max_sentence_len
        self.n_hidden = n_hidden

        self.train_file_path = train_file_path
        self.test_file_path = test_file_path
        self.w2v_file = w2v_file
        self.scope = scope

        self.word_id_mapping, self.w2v = load_w2v(self.w2v_file,
                                                  self.embedding_dim)
        if embedding_type == 0:  # Pretrained and Untrainable
            self.word_embedding = tf.constant(self.w2v,
                                              dtype=tf.float32,
                                              name='word_embedding')
        elif embedding_type == 1:  # Pretrained and Trainable
            self.word_embedding = tf.Variable(self.w2v,
                                              dtype=tf.float32,
                                              name='word_embedding')
        elif embedding_type == 2:  # Random and Trainable
            self.word_embedding = tf.Variable(tf.random_uniform(
                [len(self.word_id_mapping) + 1, self.embedding_dim], -0.1,
                0.1),
                                              name='word_embedding')

        with tf.name_scope('inputs'):
            self.x = tf.placeholder(
                tf.int32, [None, self.max_doc_len, self.max_sentence_len])
            self.y = tf.placeholder(tf.float32, [None, self.n_class])
            self.sen_len = tf.placeholder(tf.int32, [None, self.max_doc_len])
            self.doc_len = tf.placeholder(tf.int32, None)
            self.keep_prob1 = tf.placeholder(tf.float32)
            self.keep_prob2 = tf.placeholder(tf.float32)

        def init_variable(shape):
            initial = tf.random_uniform(shape, -0.01, 0.01)
            return tf.Variable(initial)

        with tf.name_scope('weights'):
            self.weights = {
                'conv1':
                init_variable([3, self.embedding_dim, 1, self.n_hidden]),
                'conv2':
                init_variable([2, self.embedding_dim, 1, self.n_hidden]),
                'conv3':
                init_variable([1, self.embedding_dim, 1, self.n_hidden]),
                'gconv1':
                init_variable([3, self.embedding_dim, 1, self.n_hidden]),
                'gconv2':
                init_variable([2, self.embedding_dim, 1, self.n_hidden]),
                'gconv3':
                init_variable([1, self.embedding_dim, 1, self.n_hidden]),
                'softmax': init_variable([2 * self.n_hidden, self.n_class]),
            }
            self.cell_fw = tf.nn.rnn_cell.GRUCell(self.n_hidden)
            self.cell_bw = tf.nn.rnn_cell.GRUCell(self.n_hidden)

        with tf.name_scope('biases'):
            self.biases = {
                'conv1': init_variable([self.n_hidden]),
                'conv2': init_variable([self.n_hidden]),
                'conv3': init_variable([self.n_hidden]),
                'gconv1': init_variable([self.n_hidden]),
                'gconv2': init_variable([self.n_hidden]),
                'gconv3': init_variable([self.n_hidden]),
                'softmax': init_variable([self.n_class]),
            }