Esempio n. 1
0
cost = tf.add_n(tf.get_collection('losses'), name='total_loss')
lr = tf.placeholder(dtype=tf.float32)
optimizer = tf.train.RMSPropOptimizer(learning_rate=lr, epsilon=1e-6, centered=True).minimize(cost)

# Test model & calculate accuracy
cp = tf.cast(tf.argmax(nn, 1), tf.int32)
err = tf.reduce_mean(tf.cast(tf.not_equal(cp, y), dtype=tf.float32))
# Calculate accuracy
correct_prediction = tf.cast(tf.argmax(nn, 1), tf.int32)
accuracy = tf.reduce_mean(tf.cast(tf.not_equal(correct_prediction, y), dtype=tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
saver = tf.train.Saver()

# Xtrain, S1train, S2train, ytrain, Xtest, S1test, S2test, ytest = process_gridworld_data(input=config.input, imsize=config.imsize)
Xtrain, Strain, ytrain, Xtest, Stest,  ytest =  process_gridworld_data(input=config.input, imsize=config.imsize, statebatchsize=config.statebatchsize)

print "Xtrain shape: ", Xtrain.shape
print "Strain.shape: ", Strain.shape
print "ytrain.shape: ", ytrain.shape

# print Xtrain[0,:,:,0]
# print Xtrain[0,:,:,1]
# print Strain[0,:,:,0]

# Launch the graph
config_T = tf.ConfigProto()
config_T.gpu_options.allow_growth = True

with tf.Session(config=config_T) as sess:
    if config.log:
Esempio n. 2
0
tf.add_to_collection('losses', cross_entropy_mean)

cost = tf.add_n(tf.get_collection('losses'), name='total_loss')
optimizer = tf.train.RMSPropOptimizer(
    learning_rate=LR, epsilon=1e-6, centered=True).minimize(cost)

# Test model & calculate accuracy
cp = tf.cast(tf.argmax(nn, 1), tf.int32)
err = tf.reduce_mean(tf.cast
                     (tf.not_equal(cp, y), dtype=tf.float32))

# Initializing the variables
init = tf.global_variables_initializer()
saver = tf.train.Saver()

Xtrain, S1train, S2train, ytrain, Xtest, S1test, S2test, ytest = process_gridworld_data(
    input=config.input, imsize=config.imsize)
learning_rate = 0.003  # 0.001
Have_trained = 0
TrA = []
TeA = []
# Launch the graph
with tf.Session() as sess:
    if config.log:
        for var in tf.trainable_variables():
            tf.summary.histogram(var.op.name, var)
        summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(config.logdir, sess.graph)
    sess.run(init)
    if Have_trained == True:
        model_file = tf.train.latest_checkpoint('ckpt/')
        saver.restore(sess, model_file)
    logits=logits, labels=y_, name='cross_entropy')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')
tf.add_to_collection('losses', cross_entropy_mean)

cost = tf.add_n(tf.get_collection('losses'), name='total_loss')
optimizer = tf.train.RMSPropOptimizer(learning_rate=config.lr, epsilon=1e-6, centered=True).minimize(cost)

# Test model & calculate accuracy
cp = tf.cast(tf.argmax(nn, 1), tf.int32)
err = tf.reduce_mean(tf.cast(tf.not_equal(cp, y), dtype=tf.float32))

# Initializing the variables
init = tf.global_variables_initializer()
saver = tf.train.Saver()

Xtrain, S1train, S2train, ytrain, Xtest, S1test, S2test, ytest = process_gridworld_data(input=config.input, imsize=config.imsize)

# Launch the graph
with tf.Session() as sess:
    if config.log:
        for var in tf.trainable_variables():
            tf.summary.histogram(var.op.name, var)
        summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(config.logdir, sess.graph)
    sess.run(init)

    batch_size = config.batchsize
    print(fmt_row(10, ["Epoch", "Train Cost", "Train Err", "Epoch Time"]))
    for epoch in range(int(config.epochs)):
        tstart = time.time()
        avg_err, avg_cost = 0.0, 0.0