예제 #1
0
x = tf.placeholder("float", [None, 784])
sess = tf.Session(config=config)


with tf.variable_scope("regression"):
    print(model.regression(x))
    y1, variables = model.regression(x)
saver = tf.train.Saver(variables)
regression_file = tf.train.latest_checkpoint("mnist/data/regreesion.ckpt")
if regression_file is not None:
    saver.restore(sess, regression_file)

with tf.variable_scope("convolutional"):
    keep_prob = tf.placeholder("float")
    y2, variables = model.convolutional(x, keep_prob)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(variables)
convolutional_file = tf.train.latest_checkpoint(
    "mnist/data/convolutional.ckpt")
if convolutional_file is not None:
    saver.restore(sess, convolutional_file)


def regression(input):
    return sess.run(y1, feed_dict={x: input}).flatten().tolist()


def convolutional(input):
    return sess.run(
        y2, feed_dict={
import os
from mnist import module
import tensorflow as tf
from mnist import input_data

data = input_data.read_data_sets('MNIST_data', one_hot=True)
# 定义模型
with tf.variable_scope("convolutional"):
    x = tf.placeholder(tf.float32, [None, 784], name="x")
    keep_prob = tf.placeholder(tf.float32)
    y, variables = module.convolutional(x, keep_prob)

# 训练
y1 = tf.placeholder(dtype=tf.float32, shape=[None, 10], name='y1')
cross_entropy = -tf.reduce_sum(y1 * tf.log(y))
learning_rate = 1e-4
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y1, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

saver = tf.train.Saver(variables)

with tf.Session() as sess:
    merged_summary_op = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter('mnist_log/1', sess.graph)
    summary_writer.add_graph(sess.graph)

    sess.run(tf.global_variables_initializer())

    # 断点续训
    ckpt = tf.train.get_checkpoint_state(