示例#1
0
def build_model():
    global yan_chi, images, labels
    global acc, loss, c_loss
    global train_op, lr, global_step
    global x_train, y_train
    global x_test, y_test

    yan_chi = tf.placeholder(tf.float32)
    images = tf.placeholder(tf.float32, [None, 24, 24, 3])
    labels = tf.placeholder(tf.float32, [None, 10])

    x_train, y_train = LeNet5.train_input()
    x_test, y_test = LeNet5.test_input()

    logits = LeNet5.inference(images)
    acc = LeNet5.get_acc(logits, labels)
    c_loss, loss = LeNet5.get_loss(logits, labels)
    train_op, lr, global_step = LeNet5.get_op(yan_chi, loss)
def evaluate_one_image():
    # 存放的是我从百度下载的猫狗图片路径
    train ='D:/workspace/python_dir/tersonflowdemo/dogvscat/dataset/data/test1/'
    image_array = get_one_image(train)

    with tf.Graph().as_default():
        BATCH_SIZE = 1  # 因为只读取一副图片 所以batch 设置为1
        N_CLASSES = 2  # 2个输出神经元,[1,0] 或者 [0,1]猫和狗的概率
        # 转化图片格式
        image = tf.cast(image_array, tf.float32)
        # 图片标准化
        # image = tf.image.per_image_standardization(image)#这行不要加,我们训练的图片并没有做标准化,这行改了我两天
        # 图片原来是三维的 [208, 208, 3] 重新定义图片形状 改为一个4D  四维的 tensor
        image = tf.reshape(image, [1, 208, 208, 3])
        logit = model.inference(image, BATCH_SIZE, N_CLASSES)
        # 因为 inference 的返回没有用激活函数,所以在这里对结果用softmax 激活
        logit = tf.nn.softmax(logit)

        # 用最原始的输入数据的方式向模型输入数据 placeholder
        x = tf.placeholder(tf.float32, shape=[208, 208, 3])

        # 我门存放模型的路径
        logs_train_dir = 'D:/workspace/python_dir/tersonflowdemo/dogvscat/result/'
        # 定义saver
        saver = tf.train.Saver()

        with tf.Session() as sess:

            print("从指定的路径中加载模型。。。。")
            # 将模型加载到sess
            ckpt = tf.train.get_checkpoint_state(logs_train_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('模型加载成功, 训练的步数为 %s' % global_step)
            else:
                print('模型加载失败,,,文件没有找到')
            # 将图片输入到模型计算
            prediction = sess.run(logit, feed_dict={x: image_array})
            # 获取输出结果中最大概率的索引
            max_index = np.argmax(prediction)
            print('猫的概率 %.6f' % prediction[:, 0])
            print('狗的概率 %.6f' % prediction[:, 1])
示例#3
0
MAX_STEP = 8000
learning_rate = 0.0001  # 小于0.001

print("I'm OK")
train_dir = 'D:/workspace/python_dir/tersonflowdemo/dogvscat/dataset/data/train/'  # 训练图片文件夹
logs_train_dir = 'D:/workspace/python_dir/tersonflowdemo/dogvscat/result/'  # 保存训练结果文件夹

train, train_label = test.get_files(train_dir)

train_batch, train_label_batch = test.get_batch(train, train_label, IMG_W,
                                                IMG_H, BATCH_SIZE, CAPACITY)

# 训练操作定义
sess = tf.Session()

train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)  #网络结构
train_loss = model.losses(train_logits, train_label_batch)  #反向传播结构
train_op = model.trainning(train_loss, learning_rate)
train_acc = model.evaluation(train_logits, train_label_batch)

# train_label_batch = tf.one_hot(train_label_batch,2,1,0)
# 测试操作定义

summary_op = tf.summary.merge_all()

# 产生一个writer来写log文件
train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
saver = tf.train.Saver()

sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()