def inputs(eval_data, inner_data_dir, batch_size=FLAGS.batch_size):
    """Construct input for CIFAR evaluation using the Reader ops.

  Args:
    eval_data: bool, indicating if one should use the train or eval data set.

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.

  Raises:
    ValueError: If no data_dir
  """
    # if not FLAGS.data_dir:
    #   raise ValueError('Please supply a data_dir')
    # data_dir = os.path.join(FLAGS.data_dir, inner_data_dir)

    images, labels = cifar10_input.inputs(eval_data=eval_data,
                                          data_dir=inner_data_dir,
                                          batch_size=batch_size)
    # print('images shape:',images.shape,'labels shape:',labels.shape)
    if FLAGS.use_fp16:
        images = tf.cast(images, tf.float16)
        labels = tf.cast(labels, tf.float16)
    return images, labels
示例#2
0
def inputs(eval_data):
    if not FLAGS.data_dir:
        raise ValueError('Please supply a data_dir')
    data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
    images, labels = cifar10_input.inputs(eval_data, data_dir,
                                          FLAGS.batch_size)
    if FLAGS.use_fp16:
        images = tf.cast(images, tf.float16)
        labels = tf.cast(labels, tf.float16)
    return images, labels
示例#3
0
def inputs(eval_data):
    """Construct input for CIFAR evaluation using the Reader ops.
  Args:
    eval_data: bool, indicating if one should use the train or eval data set.

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
    images, labels = cifar10_input.inputs(eval_data=eval_data,
                                          batch_size=FLAGS.batch_size)
    if FLAGS.use_fp16:
        images = tf.cast(images, tf.float16)
        labels = tf.cast(labels, tf.float16)
    return images, labels
示例#4
0
'''
1.cnn + maxpool
2.cnn + maxpool
3.cnn + avgpool
4.softmax + crossentropy
5.在卷积后添加Batch Normalization
'''
from cifar10 import cifar10_input
import tensorflow as tf
import numpy as np
from tensorflow.contrib.layers.python.layers import batch_norm

# get data
batch_size = 128
data_dir = "./cifar10"
images_train, labels_train = cifar10_input.inputs(eval_data=False,
                                                  batch_size=batch_size)
images_test, labels_test = cifar10_input.inputs(eval_data=True,
                                                batch_size=batch_size)

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners()
image_batch, label_batch = sess.run([images_test, labels_test])

# print(image_batch[0])
# print(label_batch[0])
# pylab.imshow(image_batch[0])
# pylab.show()


def weight_variable(shape):
示例#5
0
from cifar10 import cifar10_input, cifar10

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

batch_size = 100
# data_dir='D:\\学习笔记\\ai\\dataSets\\cifar10\\cifar-10-binary.tar'
data_dir = 'D:\\学习笔记\\ai\\dataSets\\cifar10\\cifar-10-batches-bin'
# data_dir='D:\\学习笔记\\ai\\dataSets\\cifar10'

cifar10.maybe_download_and_extract()

images_train, label_train = cifar10_input.distorted_inputs(
    data_dir=data_dir, batch_size=batch_size)
images_test, label_test = cifar10_input.inputs(eval_data=True,
                                               data_dir=data_dir,
                                               batch_size=batch_size)

X = tf.placeholder(shape=images_train.get_shape(), dtype=tf.float32)
y = tf.placeholder(shape=label_train.get_shape(), dtype=tf.int32)
w_cnn = tf.Variable(tf.truncated_normal(shape=(4, 4, 3, 24), stddev=0.1))
b_cnn = tf.Variable(tf.ones(shape=24))
cnn = tf.nn.conv2d(X, w_cnn, strides=(1, 1, 1, 1), padding='SAME', name='cnn')
cnn = tf.reshape(cnn, shape=(batch_size, -1))
cnn_shape = cnn.get_shape()
relu_cnn = tf.nn.relu(cnn)

print('cnn shape', cnn_shape)
w_fc = tf.Variable(tf.truncated_normal(shape=(13824, 10), stddev=0.1))
b_fc = tf.Variable(tf.ones(shape=10))
fc = tf.matmul(relu_cnn, w_fc) + b_fc
    var = tf.Variable(tf.truncated_normal(shape, stddev=stddev))
    if w1 is not None:
        weight_loss = tf.multiply(tf.nn.l2_loss(var), w1, name='weight_loss')
        tf.add_to_collection('losses', weight_loss)
    return var


maybe_download_and_extract()  # 下载解压数据

# 使用distorted_inputs 产生训练需要的数据,包括特征及其对应的label, 该方法做了数据增强
images_train, labels_train = distorted_inputs(data_dir=data_dir,
                                              batch_size=batch_size)

# cifar10_input.inputs() 生成测试数据
image_test, labels_test = inputs(eval_data=True,
                                 data_dir=data_dir,
                                 batch_size=batch_size)

image_holder = tf.placeholder(tf.float32,
                              [batch_size, 24, 24, 3])  # 3表示有RGB三种通道
label_holder = tf.placeholder(tf.int32, [batch_size])

# 创建卷积层,注意LRN的理解
# 使用 5x5的卷积核大小,3个颜色通道,64个卷积核
# ==============
# 卷积层对数据进行特征提取, 全连接层进行组合匹配

weight1 = variable_with_weight_loss(shape=[5, 5, 3, 64], stddev=5e-2, w1=0.0)
kernel1 = tf.nn.conv2d(image_holder, weight1, [1, 1, 1, 1], padding='SAME')
bias1 = tf.Variable(tf.constant(0.0, shape=[64]))
conv1 = tf.nn.relu(tf.nn.bias_add(kernel1, bias1))