Exemplo n.º 1
0
def getInputData(batchsize):
    readdata = ReadData()
    trainingFiles, testingFiles = readdata.filePathConstructor()
    features = readdata.input_pipeline(trainingFiles, batchsize)
    example_batch = tf.reshape(features, [-1])
    item = tf.string_split(example_batch, delimiter="").values.eval()
    return [dict1[alp.decode().lower()] for alp in list(item)]
Exemplo n.º 2
0
def main():
    readdata = ReadData()

    trainingFiles, testingFiles = readdata.filePathConstructor()
    features = readdata.input_pipeline(trainingFiles, batch_size)

    with tf.Session() as sess:
        # Create the graph, etc.
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        # Start populating the filename queue.
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        dict1 = {
            value: (int(key) + 1)
            for key, value in enumerate(list(string.ascii_lowercase))
        }
        dict1[' '] = 0
        dict1[';'] = -1
        dict1['-'] = -1
        vocab_size = len(dict1)
        for i in range(1):
            example_batch = tf.reshape(features, [-1])
            item = tf.string_split(example_batch, delimiter="").values.eval()
            chars = [dict1[alp.decode().lower()] for alp in list(item)]
            data_size = len(chars)
            print('Data has %d characters, %d unique.' %
                  (data_size, vocab_size))

            # # Hyper-parameters
            # hidden_size   = 100  # hidden layer's size
            # seq_length    = 25   # number of steps to unroll
            # learning_rate = 1e-1

            # inputs     = tf.placeholder(shape=[None, vocab_size], dtype=tf.float32, name="inputs")
            # targets    = tf.placeholder(shape=[None, vocab_size], dtype=tf.float32, name="targets")
            # init_state = tf.placeholder(shape=[1, hidden_size], dtype=tf.float32, name="state")

            # intializer = tf.random_normal_initializer(stddev=1.0)

            # with tf.variable_scope("RNN") as scope:
            #     hs_t = init_state
            #     ys = []
            #     for t,xs_t in enumerate(tf.split(inputs,seq_length,axis=0)):
            #         if t > 0:scope.reuse_variables()
            #         Wxh = tf.get_variable("Wxh",shape=[vocab_size,hidden_size],dtype=tf.float32,intializer=intializer)
            #         Whh = tf.get_variable("Whh",shape=[hidden_size,hidden_size],dtype=tf.float32,intializer=intializer)
            #         Why = tf.get_variable("Why",shape=[hidden_size,vocab_size],dtype=tf.float32,intializer=initializer)
            #         bh = tf.get_variable("bh",shape=[hidden_size],intializer=intializer)
            #         by = tf.get_variable("by",shape=[vocab_size],initializer=intializer)

            #         hs_t = tf.tanh(tf.matmul(xs_t,Wxh) + tf.matmul(hs_t,Whh) + bh)
            #         ys_t = tf.matmul(hs_t,Why) + by
            #         ys.append(ys_t)

            # h_prev = hs_t

            # output_softmax = tf.nn.softmax(ys[-1])

            # outputs = tf.concat(ys,axis=0)
            # loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=targets,logits=outputs))

            # #optimizer
            # minimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
            # grad_and_vars = minimizer.compute_gradients(loss)

            # pred = RNN(chars,weights,biases)
            # # Loss and optimizer
            # # cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
            # # optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost)

            # # # Model evaluation
            # # correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
            # # accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
            # # print(example_batch)

        coord.request_stop()
        coord.join(threads)
Exemplo n.º 3
0
# Y = predict
# # Cost & Optimizer
# loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=Y))
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=learningrate)
# train_op = optimizer.minimize(loss_op)

# # build accuracy
# correct_pred = tf.equal(tf.argmax(predict,1),tf.argmax(Y,1))
# accuracy = tf.reduce_mean(correct_pred,tf.float32)

# Initiate Global variable
init = tf.global_variables_initializer()

readdata = ReadData()
trainingFiles, testingFiles = readdata.filePathConstructor()
features = readdata.input_pipeline(trainingFiles, batch_size)

# Start training
with tf.Session() as sess:
    # init session
    sess.run(init)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)

    # loop training steps
    for step in range(training_steps):
        # read input data
        example_batch = tf.reshape(features, [-1])
        item = tf.string_split(example_batch, delimiter="").values.eval()
        batch_x = [dict1[alp.decode().lower()] for alp in list(item)]