def distorted_inputs(): """Construct distorted input for CIFAR training using the Reader ops. Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. Raises: ValueError: If no data_dir """ if not FLAGS.data_dir: raise ValueError('Please supply a data_dir') data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin') return cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=FLAGS.batch_size)
def distorted_inputs(data_dir, batch_size, use_fp16): """Construct distorted input for CIFAR training using the Reader ops. Args: data_dir: The directory for the data. batch_size: The batch size. use_fp16: Train the model using fp16. Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. Raises: ValueError: If no data_dir """ if not data_dir: raise ValueError('Please supply a data_dir') data_dir = os.path.join(data_dir, 'cifar-10-batches-bin') images, labels = cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=batch_size) if use_fp16: images = tf.cast(images, tf.float16) labels = tf.cast(labels, tf.float16) return images, labels
def variable_with_weight_loss(shape, stddev, w1): var = tf.Variable(tf.truncated_normal(shape, stddev=stddev)) if w1 is not None: weight_loss = tf.multiply(tf.nn.l2_loss(var), w1, name='weight_loss') tf.add_to_collection('losses', weight_loss) return var # 下载数据集 - 调用cifar10函数下载并解压 cifar10.maybe_download_and_extract() # 注意路径 cifar_dir = '.\\cifar10_data\\cifar-10-batches-bin' # 采用 data augmentation进行数据处理 # 生成训练数据,训练数据通过cifar10_input的distort变化 images_train, labels_train = cifar10_input.distorted_inputs( data_dir=cifar_dir, batch_size=FLAGS.batch_size) # 测试数据(eval_data 测试数据) images_test, labels_test = cifar10_input.inputs(eval_data=True, data_dir=cifar_dir, batch_size=FLAGS.batch_size) # 创建输入数据,采用 placeholder x_input = tf.placeholder(tf.float32, [FLAGS.batch_size, 24, 24, 3]) y_input = tf.placeholder(tf.int32, [FLAGS.batch_size]) # 创建第一个卷积层 input:3(channel) kernel:64 size:5*5 weight1 = variable_with_weight_loss(shape=[5, 5, 3, 64], stddev=5e-2, w1=0.0) bias1 = tf.Variable(tf.constant(0.0, shape=[64])) conv1 = tf.nn.conv2d(x_input, weight1, [1, 1, 1, 1], padding='SAME') relu1 = tf.nn.relu(tf.nn.bias_add(conv1, bias1)) pool1 = tf.nn.max_pool(conv1,
max_steps = 3000 batch_size = 128 data_dir = '/tmp/cifar10_data/cifar-10-batches-bin' def variable_with_weight_loss(shape, stddev, w1): # w1是用来控制L2 loss的大小 var = tf.Variable(tf.truncated_normal(shape, stddev=stddev)) #使用截断正态分布初始化 if w1 is not None: weight_loss = tf.multiply(tf.nn.l2_loss(var), w1, name='weight_loss') tf.add_to_collection('losses', weight_loss) return var cifar10.maybe_download_and_extract() #下载数据集 images_train, labels_train = cifar10_input.distorted_inputs( #产生训练数据 data_dir=data_dir, batch_size=batch_size) images_test, labels_test = cifar10_input.inputs( eval_data=True, #生成测试数据 data_dir=data_dir, batch_size=batch_size) images_holder = tf.placeholder(tf.float32, [batch_size, 24, 24, 3]) label_holder = tf.placeholder(tf.int32, [batch_size]) ####卷基层1####5x5 3通道 64核 weight1 = variable_with_weight_loss(shape=[5, 5, 3, 64], stddev=5e-2, w1=0.0) kernel1 = tf.nn.conv2d(images_holder, weight1, [1, 1, 1, 1], padding='SAME') bias1 = tf.Variable(tf.constant(0.0, shape=[64])) conv1 = tf.nn.relu(tf.nn.bias_add(kernel1, bias1)) pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1],