コード例 #1
0
import ReadFile as RF
import tensorflow as tf
import numpy as np

x_train, y_train, vocab_size, int_to_word, word_to_int = RF.GetData()

#define the architecture for the neural network
EMBEDDING_DIM = 5
eta = 0.1
epochs = 1000

#Make placeholders for x and y
x = tf.placeholder(tf.float32, shape=(None, vocab_size))
y_label = tf.placeholder(tf.float32, shape=(None, vocab_size))
#define the embedding layer
w1 = tf.Variable(tf.random_normal([vocab_size, EMBEDDING_DIM]))
b1 = tf.Variable(tf.random_normal([EMBEDDING_DIM]))
#define output for embedding layer
hidden_representation = tf.add(tf.matmul(x, w1), b1)
#define the ouput layer
w2 = tf.Variable(tf.random_normal([EMBEDDING_DIM, vocab_size]))
b2 = tf.Variable(tf.random_normal([vocab_size]))
#define output for final layer
prediction = tf.nn.softmax(tf.add(tf.matmul(hidden_representation, w2), b2))
#define the loss function
cross_entropy_loss = tf.reduce_mean(
    -tf.reduce_sum(y_label * tf.log(prediction), reduction_indices=[1]))
#define the train step
train_step = tf.train.GradientDescentOptimizer(eta).minimize(
    cross_entropy_loss)
#run session