Beispiel #1
0
def test(image_arr):
    with tf.Graph().as_default():
        image = tf.cast(image_arr, tf.float32)
        image = tf.image.per_image_standardization(image)
        image = tf.reshape(image, [1, 64, 64, 3])
        # print(image.shape)
        p = deep_CNN(image, 1, N_CLASSES)
        logits = tf.nn.softmax(p)
        x = tf.placeholder(tf.float32, shape=[64, 64, 3])
        saver = tf.train.Saver()
        sess = tf.Session()
        ckpt = tf.train.get_checkpoint_state(log_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            # 调用saver.restore()函数,加载训练好的网络模型
            print('Loading success')
        prediction = sess.run(logits, feed_dict={x: image_arr})
        max_index = np.argmax(prediction)
        print('预测的标签为:', max_index, lists[max_index])
        print('预测的结果为:', prediction)
Beispiel #2
0
def test(image_arr):
    with tf.Graph().as_default():
        image = tf.cast(image_arr, tf.float32)
        # image = tf.image.per_image_standardization(image)
        image = tf.reshape(image, [1, 28, 28, 1])
        # print(image.shape)
        p = deep_CNN(image, 1, 17)
        logits = tf.nn.softmax(p)
        x = tf.placeholder(tf.float32, shape=[28, 28])  # , 1])
        saver = tf.train.Saver()
        sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(log_dir)
        if ckpt and ckpt.model_checkpoint_path:
            # print(ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
            # 调用saver.restore()函数,加载训练好的网络模型
            #print('Loading success')
        prediction = sess.run(logits, feed_dict={x: image_arr})
        max_index = np.argmax(prediction)
        # print('预测的标签为:', max_index, lists[max_index])
        # print('预测的结果为:', prediction)
        return max_index
N_CLASSES = 17
IMG_W = 28  # resize图像,太大的话训练时间久
IMG_H = 28
BATCH_SIZE = 40    # 每个batch要放多少张图片
CAPACITY = 20      # 一个队列最大多少
MAX_STEP = 25000
learning_rate = 0.00001  # 一般小于0.0001

# 获取批次batch
train_dir = './DataSet'  # 训练样本的读入路径
logs_train_dir = './final'  #logs存储路径
train, train_label = get_file(train_dir)

train_batch, train_label_batch = get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

train_logits = deep_CNN(train_batch, BATCH_SIZE, N_CLASSES)
train_loss = losses(train_logits, train_label_batch)
train_op = trainning(train_loss, learning_rate)
train_acc = evaluation(train_logits, train_label_batch)

summary_op = tf.summary.merge_all()
sess = tf.Session()
train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator() # 设置多线程协调器
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
 
# 进行batch的训练
try:
    # 执行MAX_STEP步的训练,一步一个batch