Beispiel #1
0
def rnn_net(x):
    # 将x 从二维转成3维 目的使用rnn(输入需是3维数据)
    inputs=slim.embed_sequence(tf.cast(x,tf.int32),vocab_size=len(lex),embed_dim=n_layer_1) # [none,len(train_dataset[0][0]),n_layer_1] 3维
    cell = tf.contrib.rnn.BasicLSTMCell(n_layer_1, state_is_tuple=True)
    cell = tf.nn.rnn_cell.MultiRNNCell([cell] * 4, state_is_tuple=True) # 多层核
    initial_state = cell.zero_state(batch_size, tf.float32)
    outputs, last_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state, scope='rnnlm')
    # outputs shape [batch_size,len(train_dataset[0][0]),n_layer_1]

    results=slim.fully_connected(outputs[:,-1,:],n_output_layer,activation_fn=None) # [batch,2]

    return results
def rnn_net_1(x):
    cell = tf.nn.rnn_cell.BasicLSTMCell(n_layer_1, state_is_tuple=True)
    cell = tf.nn.rnn_cell.MultiRNNCell([cell] * 2, state_is_tuple=True)
    initial_state = cell.zero_state(batch_size, tf.float32)
    inputs = slim.embed_sequence(
        tf.cast(x, tf.int32), vocab_size=len(lex),
        embed_dim=n_layer_1)  # # [batch_size,-1,n_layer_1]
    outputs, last_state = tf.nn.dynamic_rnn(cell,
                                            inputs,
                                            initial_state=initial_state,
                                            scope='rnnlm')
    # outputs shape [batch_size,-1,n_layer_1]
    # outputs = tf.reshape(outputs, [-1, n_layer_1])
    logits = slim.fully_connected(outputs[:, -1, :],
                                  n_output_layer,
                                  activation_fn=None)  # [-1,2]

    return logits
Beispiel #3
0
def cnn_net(x):
    inputs = slim.embed_sequence(tf.cast(x, tf.int32), vocab_size=len(lex),
                                 embed_dim=n_layer_1)  # [batch_size,len(train_dataset[0][0]),n_layer_1] 3维

    branch1=tf.layers.conv1d(inputs,n_layer_1,3,padding='valid',activation=tf.nn.relu,kernel_regularizer=tf.nn.l2_loss)
    branch2 = tf.layers.conv1d(inputs, n_layer_1, 4, padding='valid', activation=tf.nn.relu,
                               kernel_regularizer=tf.nn.l2_loss)
    branch3 = tf.layers.conv1d(inputs, n_layer_1, 5, padding='valid', activation=tf.nn.relu,
                               kernel_regularizer=tf.nn.l2_loss)
    network = tf.concat([branch1, branch2, branch3], 1) # # [batch_size,-1,n_layer_1]

    network = tf.expand_dims(network, 2)  # [batch_size,-1,1,n_layer_1]

    network = tf.reduce_max(network, [1, 2]) # [batch_size,n_layer_1]

    results = slim.fully_connected(network, n_output_layer, activation_fn=None)  # [batch,2]

    return results
Beispiel #4
0
def neural_network(vocabulary_size, embedding_size=128, num_filters=128):
    '''
    使用RNN, 动态Rnn,静态rnn,多层rnn
    :param vocabulary_size: 词汇量的大小
    :param embedding_size: 嵌套大小
    :param num_filters: 隐藏层节点数
    :return: 
    '''
    # embedding layer
    with tf.device('/cpu:0'), tf.name_scope("embedding"):
        # W = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
        # embedded_chars = tf.nn.embedding_lookup(W, X) # [-1,8,128]

        # embedded_chars=tflearn.embedding(X, vocabulary_size, embedding_size)
        embedded_chars = slim.embed_sequence(
            X, vocabulary_size, embedding_size)  # X 必须是二维的 [-1,8,128]
        # embedded_chars_expanded = tf.expand_dims(embedded_chars, -1) # [-1,8,128,1] 变成4维方便使用卷积

        # 方法一 多层rnn+动态rnn
        '''
        cell = rnn.BasicLSTMCell(num_units=num_filters)
        cell = rnn.DropoutWrapper(cell=cell, input_keep_prob=1.0, output_keep_prob=dropout_keep_prob)
        cells = rnn.MultiRNNCell([cell] * 4, state_is_tuple=True)  # 4个cell
        init_state = cells.zero_state(batch_size, dtype=tf.float32)
        outputs, final_state = tf.nn.dynamic_rnn(cells, embedded_chars, initial_state=init_state, time_major=False)
        
        # 相当于 以下4层
        outputs, final_state = tf.nn.dynamic_rnn(cell, embedded_chars, initial_state=init_state, time_major=False)
        outputs, final_state = tf.nn.dynamic_rnn(cell, outputs, initial_state=init_state, time_major=False)
        outputs, final_state = tf.nn.dynamic_rnn(cell, outputs, initial_state=init_state, time_major=False)
        outputs, final_state = tf.nn.dynamic_rnn(cell, outputs, initial_state=init_state, time_major=False)
        
        # # outputs shape [batch,8,128]  outputs[:, -1, :] shape [batch,128]
        results=tf.layers.dense(outputs[:, -1, :],num_classes,activation=None)
        '''

        # 方法二:多层rnn+静态rnn
        '''
        cell = rnn.BasicLSTMCell(num_units=num_filters)
        cell = rnn.DropoutWrapper(cell=cell, input_keep_prob=1.0, output_keep_prob=dropout_keep_prob)
        cells = rnn.MultiRNNCell([cell] * 4, state_is_tuple=True)  # 4个cell
        init_state = cells.zero_state(batch_size, dtype=tf.float32)

        inputs = tf.unstack(embedded_chars, 8, 1)  # 拆成8个[-1,128]序列
        outputs, final_state = tf.nn.static_rnn(cells, inputs, initial_state=init_state)
        # outputs 8个[batch,128]
        results = tf.layers.dense(outputs[-1], num_classes, activation=None)
        '''

        # 方法三:动态rnn
        '''
        cell = rnn.BasicLSTMCell(num_units=num_filters * 2)
        init_state = cell.zero_state(batch_size, dtype=tf.float32)
        outputs, final_state = tf.nn.dynamic_rnn(cell, embedded_chars, initial_state=init_state)

        # # outputs shape [batch,8,256]  outputs[:, -1, :] shape [batch,256]
        results=tf.layers.dense(outputs[:, -1, :],num_classes,activation=None)
        '''
        # 方法四:静态rnn
        cell = rnn.BasicLSTMCell(num_units=num_filters * 2)
        init_state = cell.zero_state(batch_size, dtype=tf.float32)

        inputs = tf.unstack(embedded_chars, 8, 1)  # 拆成8个[-1,128]序列

        outputs, final_state = tf.nn.static_rnn(cell,
                                                inputs,
                                                initial_state=init_state)
        # outputs 8个[batch,256]

        results = tf.layers.dense(outputs[-1], num_classes, activation=None)
        # '''
        return results
Beispiel #5
0
    return text


# 我们可以通过将标记列表转换回单词来重新创建除标点符号和其他符号之外的文本
# tokens_to_string(x_train_tokens[1])

batch_size = 64
# ---------改成layer API--------------#
from tensorflow.contrib import slim, rnn
x = tf.placeholder(tf.float32, [None, 544])
Y = tf.placeholder(tf.float32, [
    None,
])

inputs = slim.embed_sequence(ids=tf.cast(x, tf.int32),
                             vocab_size=num_words,
                             embed_dim=8,
                             scope='layer_embedding')
cell1 = rnn.GRUCell(16, activation=tf.nn.relu)
cell2 = rnn.GRUCell(8, activation=tf.nn.relu)
cell3 = rnn.GRUCell(4, activation=tf.nn.relu)

cell = rnn.MultiRNNCell([cell1, cell2, cell3], state_is_tuple=True)
initial_state = cell.zero_state(batch_size, tf.float32)

outputs, last_state = tf.nn.dynamic_rnn(cell,
                                        inputs,
                                        initial_state=initial_state,
                                        scope='rnnlm')
predict = slim.fully_connected(outputs[:, -1, :], 2,
                               activation_fn=None)  # [-1,2]