# images_placeholder将每张图片批处理成一定尺寸乘以像素的大小,
# 批处理大小设定为“None”允许运行图片时可随时设定大小。
# 用于训练网络的批处理大小可以通过命令行参数设置,但是对于测试,我们将整个测试集作为一个批处理
images_placeholder = tf.placeholder(tf.float32, shape=[None, IMAGE_PIXELS],
  name='images')
labels_placeholder = tf.placeholder(tf.int64, shape=[None], name='image-labels')

# Operation for the classifier's result
logits = two_layer_fc.inference(images_placeholder, IMAGE_PIXELS,
  FLAGS.hidden1, CLASSES, reg_constant=FLAGS.reg_constant)

# Operation for the loss function
loss = two_layer_fc.loss(logits, labels_placeholder)

# Operation for the training step
train_step = two_layer_fc.training(loss, FLAGS.learning_rate)

# Operation calculating the accuracy of our predictions
accuracy = two_layer_fc.evaluation(logits, labels_placeholder)

# Operation merging summary data for TensorBoard
# 为TensorBoard定义一个summary操作函数
summary = tf.summary.merge_all()

# Define saver to save model state at checkpoints
# 生成一个保存对象以保存模型在检查点的状态,
# 处理不同会话(session)中任何与文件系统有持续数据传输的交互,
# 构造函数含有三个部分:目标target,图graph和配置config。
# 保存/恢复之后,它需要知道的唯一的事情是使用哪个图和变量。
saver = tf.train.Saver()
#Load CIFAR-10 data

data_sets = data_helpers.load_data()

# prepare tensorflow graph
#input placeholders

images_placeholder = tf.placeholder(tf.float32,shape=[None,IMAGE_PIXELS])
labels_placeholder = tf.placeholder(tf.int64,shape=[None],name='image-labels')

# Operation for classifier's result
logits = two_layer_fc.inference(image_placeholder,IMAGE_PIXEL,Flags.hidden1,CLASSES,reg_constant=Flags.reg_constant)
# Operation for calculating loss
loss = two_layer_fc.loss(logits,labels_placeholder)
# Operation for training_step
train_step = two_layer_fc.training(loss,Flags.learning_rate)
# Operation for calculating accuracy of our predictions 
accuracy = two_layer_fc.evaluation(logits,labels_placeholder)
# used for merging all the summaries at one place
summary = tf.summary.merge_all()
saver = tf.train.Saver()

# use tf.session() to run tensorflow graph.

with tf.Session() as sess:
    sess.run(tf.global_variable_initializer())
    summary_writer = tf.summary.FileWriter(logdir,sess.graph)

    zipped_data = zip(data_sets['images_train'],data_sets['label_train'])
    batches = data_helpers.gen_batch(list(zipped_data),FLAGS.batch_size,FLAGS.max_steps)