SEQLEN = 30
BATCHSIZE = 100
ALPHASIZE = txt.ALPHASIZE
INTERNALSIZE = 512
NLAYERS = 3
learning_rate = 0.001  # fixed learning rate

# load data, either davinci, or the Python source of Tensorflow itself
shakedir = "davinci/*.txt"
# shakedir = "../tensorflow/**/*.py"
codetext, valitext, bookranges = txt.read_data_files(shakedir,
                                                     validation=False)

# display some stats on the data
epoch_size = len(codetext) // (BATCHSIZE * SEQLEN)
txt.print_data_stats(len(codetext), len(valitext), epoch_size)

#
# the model
#
lr = tf.placeholder(tf.float32, name='lr')  # learning rate
batchsize = tf.placeholder(tf.int32, name='batchsize')

# inputs
X = tf.placeholder(tf.uint8, [None, None], name='X')  # [ BATCHSIZE, SEQLEN ]
Xo = tf.one_hot(X, ALPHASIZE, 1.0, 0.0)  # [ BATCHSIZE, SEQLEN, ALPHASIZE ]
# expected outputs = same sequence shifted by 1 since we are trying to predict the next character
Y_ = tf.placeholder(tf.uint8, [None, None], name='Y_')  # [ BATCHSIZE, SEQLEN ]
Yo_ = tf.one_hot(Y_, ALPHASIZE, 1.0, 0.0)  # [ BATCHSIZE, SEQLEN, ALPHASIZE ]

cells = [rnn.GRUCell(INTERNALSIZE) for _ in range(NLAYERS)]
Beispiel #2
0
NUM_LAYERS = 3  # How many layers deep we are going

SET_LR = 0.001  # Small fixed learning rate
SET_PKEEP = 0.75  # Dropping 20% of neurons

# Seed our random number generator
tf.set_random_seed(0)

# Load our Star Wars Scripts.
filedir = "StarWarsScripts/*.txt"
traintext, validtext, scriptranges = txt.read_data_files(filedir,
                                                         validation=True)

# Print out information about our data
size_of_epoch = len(traintext) // (BATCH_SIZE * SEQ_LEN)
txt.print_data_stats(len(traintext), len(validtext), size_of_epoch)

# Create our TensorFlow Graph.
batchsize = tf.placeholder(tf.int32, name='batchsize')
lr = tf.placeholder(tf.float32, name='lr')
pkeep = tf.placeholder(tf.float32, name='pkeep')
X = tf.placeholder(tf.uint8, [None, None], name='X')  # Input vector
Xo = tf.one_hot(
    X, ALPHA_SIZE, 1.0,
    0.0)  # One Hots create vector size ALPHA_SIZE, all set 0 except character
Y_ = tf.placeholder(tf.uint8, [None, None], name='Y_')  # Output tensor
Yo_ = tf.one_hot(Y_, ALPHA_SIZE, 1.0, 0.0)  # OneHot our output  also
Hin = tf.placeholder(tf.float32, [None, NUM_OF_GRUS * NUM_LAYERS],
                     name='Hin')  # Recurrent input states
cells = [rnn.GRUCell(NUM_OF_GRUS)
         for _ in range(NUM_LAYERS)]  # Create all our GRU cells per layer