예제 #1
0
파일: slvgg.py 프로젝트: JayYip/GLN
def inputs(batch_size,
           eval_data='test_batch',
           data_dir='../../cifardataset/cifar-10-batches-bin'):
    """Construct input for CIFAR evaluation using the Reader ops.

  Args:
    eval_data: bool, indicating if one should use the train or eval data set.

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.

  Raises:
    ValueError: If no data_dir
  """

    images, labels = cifar10_input.inputs(eval_data=eval_data,
                                          data_dir=data_dir,
                                          batch_size=batch_size)

    images = tf.image.resize_images(
        tf.cast(images, tf.float32),
        tf.convert_to_tensor([64, 64], dtype=tf.int32))
    labels = tf.one_hot(tf.cast(labels, tf.int32), depth=10, dtype=tf.int32)
    return (images, labels)
예제 #2
0
def inputs(eval_data):
    if not DATA_DIR:
        raise ValueError('Please supply a data_dir')
    data_dir = os.path.join(DATA_DIR, 'cifar-10-batches-bin')
    return cifar10_input.inputs(eval_data=eval_data,
                                data_dir=data_dir,
                                batch_size=BATCH_SIZE)
예제 #3
0
def inputs(eval_data):
    """Construct input for CIFAR evaluation using the Reader ops.

  Args:
    eval_data: bool, indicating if one should use the train or eval data set.

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.

  Raises:
    ValueError: If no data_dir
  """
    if not FLAGS.data_dir:
        raise ValueError('Please supply a data_dir')
    data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
    images, labels = cifar10_input.inputs(eval_data=eval_data,
                                          data_dir=data_dir,
                                          batch_size=FLAGS.batch_size)
    if FLAGS.use_fp16:
        images = tf.cast(images, tf.float16)
        labels = tf.cast(labels, tf.float16)
    else:
        images = tf.cast(images, tf.float32)
        labels = tf.cast(labels, tf.float32)
    return images, labels
예제 #4
0
def inputs(eval_data):
  """Construct input for CIFAR evaluation using the Reader ops.
  Args:
    eval_data: bool, indicating if one should use the train or eval data set.
  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  Raises:
    ValueError: If no data_dir
  """
  if not FLAGS.data_dir:
    raise ValueError('Please supply a data_dir')
  data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
  return cifar10_input.inputs(eval_data=eval_data, data_dir=data_dir,
                              batch_size=FLAGS.batch_size)
예제 #5
0
def inputs(eval_data):
  if not FLAGS.data_dir:
    raise ValueError('Please supply a data_dir')
  data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
  return cifar10_input.inputs(eval_data=eval_data, data_dir=data_dir,
                              batch_size=FLAGS.batch_size)
예제 #6
0
# 初始化weight函数(附加L2的loss(L2正则化))
# L1正则:制造稀疏特征,大部分无用特征权重置0
# L2正则:特征权重不过大,特征权重比较平均
# 奥卡姆剃刀法则

cifar10.maybe_download_and_extract()
# 下载数据并解压展开
with tf.name_scope('Get'):
    images_train, labels_train = cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=batch_size)
    '''
    distorted_inputs
    产生训练需要使用的数据(特征,label)
    进行数据增强操作(图片随机水平旋转、随机剪切、随机设置亮度和对比度、数据标准化)
    '''

    images_test, labels_test = cifar10_input.inputs(eval_data=True, data_dir=data_dir, batch_size=batch_size)
# inputs:生成测试数据(只需进行24X24裁剪+数据标准化)
with tf.name_scope('Inputs'):
    image_holder = tf.placeholder(tf.float32, [batch_size, 24, 24, 3], name='image_input')
    label_holder = tf.placeholder(tf.int32, [batch_size], name='label_input')
'''
创建输入数据的placeholder(特征+label)
(batch_size后面定义网格结构时用到->样本条数需预先设定)
剪裁后图片大小24X24
颜色通道3(RGB)
'''
with tf.name_scope('Conv1'):
    weight1 = variable_with_weight_loss(shape=[5, 5, 3, 64], stddev=5e-2, wl=0.0, name='W1')
    with tf.name_scope('kernel1'):
        kernel1 = tf.nn.conv2d(image_holder, weight1, [1, 1, 1, 1], padding='SAME', name='kernel1')
    bias1 = tf.Variable(tf.constant(0.0, shape=[64]), name='b1')
예제 #7
0
def inputs(eval_data):
  if not DATA_DIR:
    raise ValueError('Please supply a data_dir')
  data_dir = os.path.join(DATA_DIR, 'cifar-10-batches-bin')
  return cifar10_input.inputs(eval_data=eval_data, data_dir=data_dir,
                              batch_size=BATCH_SIZE)