예제 #1
0
    hiddendim,
    state_is_tuple=True,
    initializer=tf.contrib.layers.xavier_initializer(seed=20160501))
lstm[0] = tf.contrib.rnn.DropoutWrapper(lstm[0], output_keep_prob=dropout)
lstm[1] = tf.contrib.rnn.DropoutWrapper(lstm[1], output_keep_prob=dropout)
lstm[2] = tf.contrib.rnn.DropoutWrapper(lstm[2], output_keep_prob=dropout)
lstm[0] = tf.contrib.rnn.MultiRNNCell(cells=[lstm[0]] * multicells,
                                      state_is_tuple=True)
lstm[1] = tf.contrib.rnn.MultiRNNCell(cells=[lstm[1]] * multicells,
                                      state_is_tuple=True)
lstm[2] = tf.contrib.rnn.MultiRNNCell(cells=[lstm[2]] * multicells,
                                      state_is_tuple=True)

# Prediction
output_layer = {}
output_layer[0] = Layer.W(multicells * hiddendim, 20, 'Output-Sou')
output_layer[1] = Layer.W(multicells * hiddendim, 20, 'Output-Ref')
output_layer[2] = Layer.W(multicells * hiddendim, 9, 'Output-Dir')
output_bias = {}
output_bias[0] = Layer.b(20, 'OutputBias-Sou')
output_bias[1] = Layer.b(20, 'OutputBias-Ref')
output_bias[2] = Layer.b(9, 'OutputBias-Dir')

outputs = {}
fstate = {}
with tf.variable_scope("lstm0"):
    outputs[0], fstate[0] = tf.nn.dynamic_rnn(lstm[0],
                                              embeddings[0].lookup(inputs),
                                              sequence_length=lengths,
                                              dtype=tf.float32)
with tf.variable_scope("lstm1"):
예제 #2
0
batch_size = 128
hiddendim = 100
embeddingdim = 100
graph = tf.Graph()
onehot = True
inputdim = maxlength * vocabsize if onehot else maxlength * embeddingdim

# Define embeddings matrix
embeddings = Embedding(vocabsize, one_hot=onehot, embedding_size=embeddingdim)
# Input data.
dataset = tf.placeholder(tf.int32, shape=[batch_size, maxlength], name='Train')
labels = tf.placeholder(tf.float32,
                        shape=[batch_size, labelspace],
                        name='Label')
# Model
hidden_layer = Layer.W(inputdim, hiddendim, 'Hidden')
hidden_bias = Layer.b(hiddendim, 'HiddenBias')
# Prediction
output_layer = Layer.W(hiddendim, labelspace, 'Output')
output_bias = Layer.b(labelspace, 'OutputBias')

embedded = tf.reshape(embeddings.lookup(dataset), [batch_size, inputdim])
forward = tf.nn.relu(tf.matmul(embedded, hidden_layer) + hidden_bias)
dropout = tf.nn.dropout(forward, 0.5)
logits = tf.matmul(dropout, output_layer) + output_bias

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, labels))
train_op = tf.train.AdamOptimizer().minimize(loss)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))

with tf.Session() as sess:
예제 #3
0
final_size = rep_dim - 5 * 2
## weights && Convolutions
W = {
    'cl1':
    Layer.convW([3, 3, 20, filters]),
    'cl2':
    Layer.convW([3, 3, filters, filters]),
    'cl3':
    Layer.convW([3, 3, filters, filters]),
    'cl4':
    Layer.convW([3, 3, filters, filters]),
    'cl5':
    Layer.convW([3, 3, filters, filters]),
    'out':
    Layer.W(final_size * final_size * filters + 2 * hiddendim,
            rep_dim * rep_dim)
}

B = {
    'cb1': Layer.b(filters, init='Normal'),
    'cb2': Layer.b(filters, init='Normal'),
    'cb3': Layer.b(filters, init='Normal'),
    'cb4': Layer.b(filters, init='Normal'),
    'cb5': Layer.b(filters, init='Normal'),
    'out': Layer.b(rep_dim * rep_dim)
}

# Define embeddings matrix
embeddings = Embedding(vocabsize, one_hot=False, embedding_size=hiddendim)

# RNN