def testSimple(self):
    labels = [9, 3, 0]
    records = [self._record(labels[0], 0, 128, 255),
               self._record(labels[1], 255, 0, 1),
               self._record(labels[2], 254, 255, 0)]
    contents = b"".join([record for record, _ in records])
    expected = [expected for _, expected in records]
    filename = os.path.join(self.get_temp_dir(), "cifar.jpg")
    # img = cv2.imread(filename)
    # cv2.imshow("image", img)
    open(filename, "wb").write(contents)

    with self.test_session() as sess:
      q = tf.FIFOQueue(99, [tf.string], shapes=())
      q.enqueue([filename]).run()
      q.close().run()
      result = cifar10_input.read_cifar10(q)

      for i in range(3):
        key, label, uint8image = sess.run([
            result.key, result.label, result.uint8image])
        #self.assertEqual("%s:%d" % (filename, i), tf.compat.as_text(key))
        self.assertEqual("%s:%d" % (filename, i), cp.as_text(key))
        self.assertEqual(labels[i], label)
        self.assertAllEqual(expected[i], uint8image)

      with self.assertRaises(tf.errors.OutOfRangeError):
        sess.run([result.key, result.uint8image])
Beispiel #2
0
def inputs_origin(data_dir):
  # filenames一共5个,从data_batch_1.bin到data_batch_5.bin
  # 读入的都是训练图像
  filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
               for i in xrange(1, 6)]
  # 判断文件是否存在 
  """
  if not tf.gfile.Exists(dest_directory):
     gfile.MakeDirs(dest_directory)
  与下述功能相同
  if not os.path.exists(dest_directory):
     os.makedirs(dest_directory)
  """
  
  for f in filenames:
    if not tf.gfile.Exists(f):
      raise ValueError('Failed to find file: ' + f)
  # 将文件名的list包装成TensorFlow中queue的形式
  filename_queue = tf.train.string_input_producer(filenames)
  # cifar10_input.read_cifar10是事先写好的从queue中读取文件的函数
  # 返回的结果read_input的属性uint8image就是图像的Tensor
  read_input = cifar10_input.read_cifar10(filename_queue)
  # 将图片转换为实数形式
  reshaped_image = tf.cast(read_input.uint8image, tf.float32)
  # 返回的reshaped_image是一张图片的tensor
  # 我们应当这样理解reshaped_image:每次使用sess.run(reshaped_image),就会取出一张图片
  return reshaped_image
Beispiel #3
0
def evaluate(timestamp, batch_size=BATCH_SIZE * 64, n_batch=10):
    '''

    :param timestamp: It will retrieve model from the folder named the timestamp.
    :param n_batch: The number of batches to evaluate model.
    :return:
    '''

    with tf.Session() as sess:
        vgg, ckpt_path = restore(sess, timestamp)
        if ckpt_path is None:
            print("Checkpoint not found")
            return

        test_batch_xs, test_batch_ys = cifar10_input.read_cifar10(
            is_train=False, batch_size=batch_size, shuffle=False)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess, coord)

        accs = np.zeros(n_batch)
        for step in range(n_batch):
            test_xs, test_ys = sess.run([test_batch_xs, test_batch_ys])
            acc = sess.run(vgg.accuracy,
                           feed_dict={
                               vgg.xs: test_xs,
                               vgg.ys: test_ys
                           })
            print("Accuracy : %.4f%%" % (acc * 100))
            accs[step] = acc
        coord.request_stop()
        coord.join(threads)
        return np.mean(accs)
  def testSimple(self):
    labels = [9, 3, 0]
    records = [self._record(labels[0], 0, 128, 255),
               self._record(labels[1], 255, 0, 1),
               self._record(labels[2], 254, 255, 0)]
    contents = b"".join([record for record, _ in records])
    expected = [expected for _, expected in records]
    filename = os.path.join(self.get_temp_dir(), "cifar")
    open(filename, "wb").write(contents)

    with self.test_session() as sess:
      q = tf.FIFOQueue(99, [tf.string], shapes=())
      q.enqueue([filename]).run()
      q.close().run()
      result = cifar10_input.read_cifar10(q)

      for i in range(3):
        key, label, uint8image = sess.run([
            result.key, result.label, result.uint8image])
        self.assertEqual("%s:%d" % (filename, i), tf.compat.as_text(key))
        self.assertEqual(labels[i], label)
        self.assertAllEqual(expected[i], uint8image)

      with self.assertRaises(tf.errors.OutOfRangeError):
        sess.run([result.key, result.uint8image])
Beispiel #5
0
def distorted_inputs():
  """Construct distorted input for CIFAR training using the Reader ops.

  Raises:
    ValueError: if no data_dir

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  filenames = [os.path.join(FLAGS.data_dir, 'acacia-10-batches-bin',
                            'data_batch_%d.bin' % i)
               for i in xrange(0, 3)]
  for f in filenames:
    if not gfile.Exists(f):
      raise ValueError('Failed to find file: ' + f)

  # Create a queue that produces the filenames to read.
  filename_queue = tf.train.string_input_producer(filenames)

  # Read examples from files in the filename queue.
  read_input = cifar10_input.read_cifar10(filename_queue)
  reshaped_image = tf.cast(read_input.uint8image, tf.float32)

  height = IMAGE_SIZE
  width = IMAGE_SIZE

  # # Image processing for training the network. Note the many random
  # distortions applied to the image.

  # Randomly crop a [height, width] section of the image.
  distorted_image = tf.image.random_crop(reshaped_image, [height, width])

  # Randomly flip the image horizontally.
  distorted_image = tf.image.random_flip_left_right(distorted_image)

  # Because these operations are not commutative, consider randomizing
  # randomize the order their operation.
  distorted_image = tf.image.random_brightness(distorted_image,
                                               max_delta=40) #era 63
  distorted_image = tf.image.random_contrast(distorted_image,
                                             lower=0.6, upper=1.2) #era 0.2 e 1.8

  # distorted_image = reshaped_image

  # Subtract off the mean and divide by the variance of the pixels.
  float_image = tf.image.per_image_whitening(distorted_image)

  # Ensure that the random shuffling has good mixing properties.
  min_fraction_of_examples_in_queue = MIN_FRACTION_OF_EXAMPLES_QUEUE # 0.4
  min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
                           min_fraction_of_examples_in_queue)
  print ('Filling queue with LALALALALu %d CIFAR images before starting to train. '
         'This will take a few minutes.' % min_queue_examples)

  # Generate a batch of images and labels by building up a queue of examples.
  print "I have read", read_input.label, "for this batch"
  return _generate_image_and_label_batch(float_image, read_input.label,
                                         min_queue_examples)
Beispiel #6
0
def distorted_inputs():
  """Construct distorted input for CIFAR training using the Reader ops.

  Raises:
    ValueError: if no data_dir

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  filenames = [os.path.join(FLAGS.data_dir, 'acacia-10-batches-bin',
                            'data_batch_%d.bin' % i)
               for i in xrange(0, 3)]
  for f in filenames:
    if not gfile.Exists(f):
      raise ValueError('Failed to find file: ' + f)

  # Create a queue that produces the filenames to read.
  filename_queue = tf.train.string_input_producer(filenames)

  # Read examples from files in the filename queue.
  read_input = cifar10_input.read_cifar10(filename_queue)
  reshaped_image = tf.cast(read_input.uint8image, tf.float32)

  height = IMAGE_SIZE
  width = IMAGE_SIZE

  # # Image processing for training the network. Note the many random
  # distortions applied to the image.

  # Randomly crop a [height, width] section of the image.
  distorted_image = tf.image.random_crop(reshaped_image, [height, width])

  # Randomly flip the image horizontally.
  distorted_image = tf.image.random_flip_left_right(distorted_image)

  # Because these operations are not commutative, consider randomizing
  # randomize the order their operation.
  distorted_image = tf.image.random_brightness(distorted_image,
                                               max_delta=40) #era 63
  distorted_image = tf.image.random_contrast(distorted_image,
                                             lower=0.6, upper=1.2) #era 0.2 e 1.8

  # distorted_image = reshaped_image

  # Subtract off the mean and divide by the variance of the pixels.
  float_image = tf.image.per_image_whitening(distorted_image)

  # Ensure that the random shuffling has good mixing properties.
  min_fraction_of_examples_in_queue = MIN_FRACTION_OF_EXAMPLES_QUEUE # 0.4
  min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
                           min_fraction_of_examples_in_queue)
  print ('Filling queue with LALALALALu %d CIFAR images before starting to train. '
         'This will take a few minutes.' % min_queue_examples)

  # Generate a batch of images and labels by building up a queue of examples.
  print "I have read", read_input.label, "for this batch"
  return _generate_image_and_label_batch(float_image, read_input.label,
                                         min_queue_examples)
Beispiel #7
0
def inputs(eval_data):
    """Construct input for CIFAR evaluation using the Reader ops.

  Args:
    eval_data: bool, indicating if one should use the train or eval data set.

  Raises:
    ValueError: if no data_dir

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
    if not FLAGS.data_dir:
        raise ValueError('Please supply a data_dir')

    if not eval_data:
        filenames = [
            os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin',
                         'data_batch_%d.bin' % i) for i in xrange(1, 5)
        ]
        num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
    else:
        filenames = [
            os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin',
                         'test_batch.bin')
        ]
        num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL

    for f in filenames:
        if not gfile.Exists(f):
            raise ValueError('Failed to find file: ' + f)

    # Create a queue that produces the filenames to read.
    filename_queue = tf.train.string_input_producer(filenames)

    # Read examples from files in the filename queue.
    read_input = cifar10_input.read_cifar10(filename_queue)
    reshaped_image = tf.cast(read_input.uint8image, tf.float32)

    height = IMAGE_SIZE
    width = IMAGE_SIZE

    # Image processing for evaluation.
    # Crop the central [height, width] of the image.
    resized_image = tf.image.resize_image_with_crop_or_pad(
        reshaped_image, width, height)

    # Subtract off the mean and divide by the variance of the pixels.
    float_image = tf.image.per_image_whitening(resized_image)

    # Ensure that the random shuffling has good mixing properties.
    min_fraction_of_examples_in_queue = 0.4
    min_queue_examples = int(num_examples_per_epoch *
                             min_fraction_of_examples_in_queue)

    # Generate a batch of images and labels by building up a queue of examples.
    return _generate_image_and_label_batch(float_image, read_input.label,
                                           min_queue_examples)
Beispiel #8
0
def train():
    
    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    
    
    data_dir = './data/cifar-10-batches-bin/'
    log_dir = './logs/train/'
    
    images, labels = cifar10_input.read_cifar10(data_dir=data_dir,
                                                is_train=True,
                                                batch_size= BATCH_SIZE,
                                                shuffle=True)
    logits = cifar10_model.inference(images, BATCH_SIZE, n_classes=N_CLASSES)
    
    loss = cifar10_model.losses(logits, labels)
    
    
    optimizer = tf.train.GradientDescentOptimizer(learning_rate) # 定义优化器
    train_op = optimizer.minimize(loss, global_step= my_global_step) # 运行优化
    
    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()
    
    
    
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    
    summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
    
    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                    break
            _, loss_value = sess.run([train_op, loss])
               
            if step % 50 == 0:                 
                print ('Step: %d, loss: %.4f' % (step, loss_value))
                
            if step % 100 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)                
    
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
                
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
        
    coord.join(threads)
    sess.close()
#%%
Beispiel #9
0
def train():
    
    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    
    
    data_dir = '/home/kevin/tensorflow/CIFAR10/data/cifar-10-batches-bin/'
    log_dir = '/home/kevin/tensorflow/CIFAR10/logs234/'
    
    images, labels = cifar10_input.read_cifar10(data_dir=data_dir,
                                                is_train=True,
                                                batch_size= BATCH_SIZE,
                                                shuffle=True)
    logits = inference(images)
    
    loss = losses(logits, labels)
    
    
    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    train_op = optimizer.minimize(loss, global_step= my_global_step)
    
    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()
    
    
    
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    
    summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
    
    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                    break
            _, loss_value = sess.run([train_op, loss])
               
            if step % 50 == 0:                 
                print ('Step: %d, loss: %.4f' % (step, loss_value))
                
            if step % 100 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)                
    
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
                
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
        
    coord.join(threads)
    sess.close()
Beispiel #10
0
def inputs(eval_data):
  """Construct input for CIFAR evaluation using the Reader ops.

  Args:
    eval_data: bool, indicating if one should use the train or eval data set.

  Raises:
    ValueError: if no data_dir

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  if not FLAGS.data_dir:
    raise ValueError('Please supply a data_dir')

  if not eval_data:
    filenames = [os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin',
                              'data_batch_%d.bin' % i)
                 for i in xrange(1, 5)]
    num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
  else:
    filenames = [os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin',
                              'test_batch.bin')]
    num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL

  for f in filenames:
    if not gfile.Exists(f):
      raise ValueError('Failed to find file: ' + f)

  # Create a queue that produces the filenames to read.
  filename_queue = tf.train.string_input_producer(filenames)

  # Read examples from files in the filename queue.
  read_input = cifar10_input.read_cifar10(filename_queue)
  reshaped_image = tf.cast(read_input.uint8image, tf.float32)

  height = IMAGE_SIZE
  width = IMAGE_SIZE

  # Image processing for evaluation.
  # Crop the central [height, width] of the image.
  resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
                                                         width, height)

  # Subtract off the mean and divide by the variance of the pixels.
  float_image = tf.image.per_image_whitening(resized_image)

  # Ensure that the random shuffling has good mixing properties.
  min_fraction_of_examples_in_queue = 0.4
  min_queue_examples = int(num_examples_per_epoch *
                           min_fraction_of_examples_in_queue)

  # Generate a batch of images and labels by building up a queue of examples.
  return _generate_image_and_label_batch(float_image, read_input.label,
                                         min_queue_examples)
def inputs_origin(data_dir):
    filenames = [os.path.join(data_dir,'data_batch_%d.bin' % i) for i in range(1,6)]
    for f in filenames:
        if not tf.gfile.Exists(f):
            raise ValueError('Failed to find file: ' + f)
    filename_queue = tf.train.string_input_producer(filenames)
    read_input = cifar10_input.read_cifar10(filename_queue)
    reshaped_image = tf.cast(read_input.uint8image,tf.float32)

    return reshaped_image
def evaluate():
    with tf.Graph().as_default():
        
        log_dir = './logs/train/'
        test_dir = './data/cifar-10-batches-bin/'
        n_test = 10000
        
        
        # reading test data
        images, labels = cifar10_input.read_cifar10(data_dir=test_dir,
                                                    is_train=False,
                                                    batch_size= BATCH_SIZE,
                                                    shuffle=False)

        logits = cifar10_model.inference(images, BATCH_SIZE, N_CLASSES)
        # 比较真实label和预测值
        top_k_op = tf.nn.in_top_k(logits, labels, 1)
        saver = tf.train.Saver(tf.global_variables())
        
        with tf.Session() as sess:
            # 读取模型文件
            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
                return
        
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess = sess, coord = coord)
            
            try:
                num_iter = int(math.ceil(n_test / BATCH_SIZE)) # 10000/64 取整
                true_count = 0
                total_sample_count = num_iter * BATCH_SIZE # 我们测试的数量
                step = 0

                while step < num_iter and not coord.should_stop():
                    predictions = sess.run([top_k_op])
                    true_count += np.sum(predictions)
                    step += 1
                    precision = true_count / total_sample_count
                print('precision = %.3f' % precision)
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
    
#%%
def inputs_origin(data_dir):
    # 读取训练图像:filenames一共5个,从data_batch_1.bin到data_batch_5.bin,
    filenames = [
        os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in range(1, 6)
    ]
    # 判断文件是否存在
    for f in filenames:
        if not tf.gfile.Exists(f):
            raise ValueError('Failed to find file: ' + f)
    filename_queue = tf.train.string_input_producer(filenames)  # 包装成queue
    read_input = cifar10_input.read_cifar10(filename_queue)  # 读取queue
    reshaped_image = tf.cast(read_input.uint8image, tf.float32)  # 将图片转换为实数形式
    return reshaped_image  # 返回的reshaped_image是一张图片的tensor
def inputs_origin(data_dir):
    filenames = [
        os.path.join(data_dir, 'data_batch_%d' % i1) for i1 in range(1, 6)
    ]
    for f in filenames:
        if not tf.gfile.Exists(f):
            raise ValueError('Failed to find file: ' + f)
    filename_queue = tf.train.string_input_producer(filenames)
    # cifar10_input.read_cifar10是事先写好的从queue中读取文件的函数
    # 返回的结果read_input的属性uint8image就是图像的Tensor
    read_input = cifar10_input.read_cifar10(filename_queue)
    read_image = tf.cast(read_input.uint8image, tf.float32)

    return read_image
Beispiel #15
0
def evaluate():
    with tf.Graph().as_default():
        
        log_dir = '/home/kevin/tensorflow/CIFAR10/logs10000/'
        test_dir = '/home/kevin/tensorflow/CIFAR10/data/cifar-10-batches-bin/'
        n_test = 10000
        
        
        # reading test data
        images, labels = cifar10_input.read_cifar10(data_dir=test_dir,
                                                    is_train=False,
                                                    batch_size= BATCH_SIZE,
                                                    shuffle=False)

        logits = inference(images)
        top_k_op = tf.nn.in_top_k(logits, labels, 1)
        saver = tf.train.Saver(tf.global_variables())
        
        with tf.Session() as sess:
            
            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
                return
        
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess = sess, coord = coord)
            
            try:
                num_iter = int(math.ceil(n_test / BATCH_SIZE))
                true_count = 0
                total_sample_count = num_iter * BATCH_SIZE
                step = 0

                while step < num_iter and not coord.should_stop():
                    predictions = sess.run([top_k_op])
                    true_count += np.sum(predictions)
                    step += 1
                    precision = true_count / total_sample_count
                print('precision = %.3f' % precision)
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
Beispiel #16
0
def inputs_origin(data_dir):
    # only training set
    filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in range(1, 6)]

    for f in filenames:
        if not tf.gfile.Exists(f):
            raise ValueError('Failed to find file: ' + f)
    # Wrap the list of file names into the form of the queue in TensorFlow
    filename_queue = tf.train.string_input_producer(filenames)
    # The result of the returned read_input attribute uint8image is the image of the Tensor
    read_input = cifar10_input.read_cifar10(filename_queue)
    # Convert image to real form
    reshaped_image = tf.cast(read_input.uint8image, tf.float32)
    return reshaped_image
Beispiel #17
0
def evaluate():
    with tf.Graph().as_default() as g:

        log_dir = '/home/caozhang/spyder_projects/cifar10_code/logs_no_one_hot/'
        test_dir = '/home/caozhang/spyder_projects/cifar10_code/cifar-10-batches-bin/'
        n_test = 10000

        # reading test data
        images, labels = cifar10_input.read_cifar10(data_dir=test_dir,
                                                    is_train=False,
                                                    batch_size=BATCH_SIZE,
                                                    is_shuffle=False)

        logits = inference(images)
        top_k_op = tf.nn.in_top_k(logits, labels, 1)
        saver = tf.train.Saver(tf.global_variables())

        with tf.Session() as sess:
            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
                return

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                num_iter = int(math.ceil(n_test / BATCH_SIZE))
                true_count = 0
                total_sample_count = num_iter * BATCH_SIZE
                step = 0

                while step < num_iter and not coord.should_stop():
                    predictions = sess.run([top_k_op])
                    true_count += np.sum(predictions)
                    step += 1
                    # python2.*中的除法是取商,而python3.*中的除法是得到浮点型
                    precision = true_count / total_sample_count
                print("precision: %.3f" % precision)
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
            coord.join(threads)
Beispiel #18
0
def evaluate_running():
    with tf.Graph().as_default():

        # reading test data
        images, labels = cifar10_input.read_cifar10(data_path=test_data_path,
                                                    is_train=False,
                                                    batch_size=batch_size,
                                                    shuffle=False)

        model = LeNet5(images, num_classes)
        logits = model.logits
        labels = tf.cast(labels, tf.int64)
        top_k_op = tf.nn.in_top_k(logits, tf.argmax(labels, 1), 1)
        saver = tf.train.Saver(tf.global_variables())

        with tf.Session() as sess:

            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(checkpoint_path)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
                return

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                num_iter = int(math.ceil(n_test / batch_size))
                true_count = 0
                total_sample_count = num_iter * batch_size
                step = 0

                while step < num_iter and not coord.should_stop():
                    predictions = sess.run([top_k_op])
                    true_count += np.sum(predictions)
                    step += 1
                    precision = true_count / total_sample_count * 100.0
                print('precision = %.2f%%' % precision)
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
Beispiel #19
0
def generate_cnn_inputs(bin_fnm):
    """Construct input for CIFAR evaluation using the Reader ops.
	Args:
	bin_fnm: filename that is used for binary input into the network
	
	Returns:
	images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
	labels: Labels. 1D tensor of [batch_size] size.
    """
    with tf.name_scope('input'):
        # Create a queue that produces the filenames to read.
        #   filenames = ['C:/codes/stylar_hw/tensorflow_cifar10_tutorial/img_1.bin']
        complete_filename = [os.path.join(FLAGS.image_dir, bin_fnm)]
        filename_queue = tf.train.string_input_producer(complete_filename)

        # Read examples from files in the filename queue.
        read_input = read_cifar10(filename_queue)
        reshaped_image = tf.cast(read_input.uint8image, tf.float32)

        height = IMAGE_SIZE
        width = IMAGE_SIZE

        # Image processing for evaluation.
        # Crop the central [height, width] of the image.
        resized_image = tf.image.resize_image_with_crop_or_pad(
            reshaped_image, height, width)

        # Subtract off the mean and divide by the variance of the pixels.
        float_image = tf.image.per_image_standardization(resized_image)

        # Set the shapes of tensors.
        float_image.set_shape([height, width, 3])
        read_input.label.set_shape([1])

        images, label_batch = tf.train.batch([float_image, read_input.label],
                                             batch_size=1,
                                             num_threads=1,
                                             capacity=1)

        labels = tf.reshape(label_batch, [1])

        if FLAGS.use_fp16:
            images = tf.cast(images, tf.float16)
            labels = tf.cast(labels, tf.float16)
        return images, labels
def inputs_origin(data_dir):
  # filenames一共5个,从data_batch_1.bin到data_batch_5.bin
  # 读入的都是训练图像
  filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
               for i in xrange(1, 6)]
  # 判断文件是否存在
  for f in filenames:
    if not tf.gfile.Exists(f):
      raise ValueError('Failed to find file: ' + f)
  # 将文件名的list包装成TensorFlow中queue的形式
  filename_queue = tf.train.string_input_producer(filenames)
  # cifar10_input.read_cifar10是事先写好的从queue中读取文件的函数
  # 返回的结果read_input的属性uint8image就是图像的Tensor
  read_input = cifar10_input.read_cifar10(filename_queue)
  # 将图片转换为实数形式
  reshaped_image = tf.cast(read_input.uint8image, tf.float32)
  # 返回的reshaped_image是一张图片的tensor
  # 我们应当这样理解reshaped_image:每次使用sess.run(reshaped_image),就会取出一张图片
  return reshaped_image
Beispiel #21
0
def train():
    my_global_step = tf.Variable(0, name='global_step', trainable=False)

    data_dir = 'data/'
    log_dir = 'logs/'

    images, labels = cifar10_input.read_cifar10(data_dir=data_dir,
                                                is_train=True,
                                                batch_size=BATCH_SIZE,
                                                shuffle=True)

    logits = inference(images)
    loss = losses(logits, labels)

    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    train_op = optimizer.minimize(loss)

    saver = tf.train.Saver(tf.global_variables())

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners()

    try:
        for step in range(MAX_STEP):
            if coord.should_stop():
                break
            _, loss_value = sess.run([train_op, loss])
            if step % 200 == 0:
                print('Step: %d, loss: %.4f' % (step, loss_value))
            if step % 2000 == 0:
                check_path = os.path.join(log_dir, 'model_ckpt')
                saver.save(sess, check_path, global_step=step)
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
Beispiel #22
0
def evaluation():
    with tf.Graph().as_default():
        log_dir = 'logs/'
        test_dir = 'data/'
        n_test = 10000

        images, labels = cifar10_input.read_cifar10(data_dir=test_dir,
                                                    is_train=False,
                                                    batch_size=BATCH_SIZE,
                                                    shuffle=False)

        logits = inference(images)
        top_k_op = tf.nn.in_top_k(logits, labels, 1)
        saver = tf.train.Saver(tf.global_variables())

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                print('No checkpoint file found')
                return

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                num_iter = int(math.ceil(n_test / BATCH_SIZE))
                true_count = 0
                total_sample_count = num_iter * BATCH_SIZE
                step = 0

                while step < num_iter and not coord.should_stop():
                    predictions = sess.run([top_k_op])
                    true_count = np.sum(predictions)
                    step += 1
                    precision = true_count / total_sample_count
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
Beispiel #23
0
def inputs_origin(data_dir):
    #filenames一共5个,从data_batch1.bin到data_5.bin
    filenames = [
        os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in range(1, 6)
    ]
    #判断文件是否存在
    for f in filenames:
        if not tf.gfile.Exists(f):
            raise ValueError('Failed to find file:' + f)

    #将文件名包装成TensorFlow中的queue形式
    filenames_queue = tf.train.string_input_producer(filenames)

    # cifar10_input.read_cifar10是事先写好的从queue中读取文件的函数
    # 返回的结果read_input的属性uint8image就是图像的Tensor
    read_input = cifar10_input.read_cifar10(filenames_queue)

    #将图片转换为实数形式
    reshaped_image = tf.cast(read_input.uint8image, tf.float32)

    # 返回的reshaped_image是一张图片的tensor
    # 我们应当这样理解reshaped_image:每次使用sess.run(reshaped_image),就会取出一张图片
    return reshaped_image
class CIFAR10InputTest(tf.test.TestCase):
    def _record(self, label, red, green, blue):
        image_size = 32 * 32
        record = bytes(
            bytearray([label] + [red] * image_size + [green] * image_size +
                      [blue] * image_size))
        expected = [[[red, green, blue]] * 32] * 32
        return record, expected

    def testSimple(self):
        labels = [9, 3, 0]
        records = [
            self._record(labels[0], 0, 128, 255),
            self._record(labels[1], 255, 0, 1),
            self._record(labels[2], 254, 255, 0)
        ]
        contents = b"".join([record for record, _ in records])
        expected = [expected for _, expected in records]
        filename = os.path.join(self.get_temp_dir(), "cifar")
        open(filename, "wb").write(contents)

    with self.test_session() as sess:
        q = tf.FIFOQueue(99, [tf.string], shapes=())
        q.enqueue([filename]).run()
        q.close().run()
        result = cifar10_input.read_cifar10(q)

        for i in range(3):
            key, label, uint8image = sess.run(
                [result.key, result.label, result.uint8image])
            self.assertEqual("%s:%d" % (filename, i), tf.compat.as_text(key))
            self.assertEqual(labels[i], label)
            self.assertAllEqual(expected[i], uint8image)

        with self.assertRaises(tf.errors.OutOfRangeError):
            sess.run([result.key, result.uint8image])
#%%
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import os
import cifar10_input

os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '7'

#%%
data_dir = '/data/minglin/data/cifar10_data/cifar-10-batches-bin/'
batch_size = 4

image_batch, label_batch = cifar10_input.read_cifar10(data_dir,
                                                      is_train=True,
                                                      batch_size=batch_size,
                                                      shuffle=True)

with tf.Session() as sess:
    i = 0  # 控制只运行一个batch
    coord = tf.train.Coordinator()  # 开启协调器
    threads = tf.train.start_queue_runners(coord=coord)  # 开启队列

    try:
        while not coord.should_stop() and i < 1:
            images, labels = sess.run([image_batch, label_batch])
            # 显示label和image
            for j in np.arange(batch_size):
                print('labels: %d' % np.argmax(labels[j]))
                plt.imshow(images[j, :, :, :])
                plt.show()
Beispiel #26
0
        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()

        coord.join(threads)


if __name__ == '__main__':
    with tf.Graph().as_default():

        with tf.device('/cpu:0'):
            train_batch, train_label_batch = cifar10_input.read_cifar10(
                data_path=data_path,
                is_train=True,
                batch_size=batch_size,
                shuffle=True)
            val_batch, val_label_batch = cifar10_input.read_cifar10(
                data_path=data_path,
                is_train=False,
                batch_size=batch_size,
                shuffle=False)

        xs = tf.placeholder(tf.float32, shape=[batch_size, img_h, img_w, 3])
        ys = tf.placeholder(tf.int32, shape=[batch_size, num_classes])
        keep_prob = tf.placeholder(tf.float32)

        model = VGG16(xs, num_classes, keep_prob)
        logits = model.logits
Beispiel #27
0
                                 
                                 def testSimple(self):
                                     labels = [9, 3, 0]
records = [self._record(labels[0], 0, 128, 255),
           self._record(labels[1], 255, 0, 1),
           self._record(labels[2], 254, 255, 0)]
    contents = b"".join([record for record, _ in records])
    expected = [expected for _, expected in records]
    filename = os.path.join(self.get_temp_dir(), "cifar")
    open(filename, "wb").write(contents)
    
    with self.test_session() as sess:
        q = tf.FIFOQueue(99, [tf.string], shapes=())
            q.enqueue([filename]).run()
                q.close().run()
                    result = cifar10_input.read_cifar10(q)
                        
                        for i in range(3):
                            key, label, uint8image = sess.run([
                                                               result.key, result.label, result.uint8image])
                                self.assertEqual("%s:%d" % (filename, i), tf.compat.as_text(key))
                                    self.assertEqual(labels[i], label)
                                        self.assertAllEqual(expected[i], uint8image)
                                            
                                            with self.assertRaises(tf.errors.OutOfRangeError):
                                                sess.run([result.key, result.uint8image])


if __name__ == "__main__":
    tf.test.main()
Beispiel #28
0
def train(vgg, timestamp=None):
    '''

    :param timestamp: Timestamp is used as a label to differentiate each experiment.

                      If None or there isn't any folder named the timestamp,
                        it will make new folder named current timestamp to save logs and models.
                        (e.g. logs/1506496800/train)
                      Otherwise(folder exists), it restore the model from the folder.

    '''

    log_dirs = './log/' + str(timestamp)
    tmp_dirs = './tmp/' + str(timestamp)
    if not os.path.exists(log_dirs):
        os.makedirs(log_dirs + '/train')
        os.makedirs(log_dirs + '/test')
        os.makedirs(tmp_dirs)

        for dir in [log_dirs, tmp_dirs]:
            with open(os.path.join(dir, 'params.pkl'), 'wb'
                      ) as f:  # Save hyperparameters of vgg models to retrieve
                pickle.dump(vgg.params, f)
            with open(os.path.join(dir, "spec.txt"), "w") as f:  # For logging
                f.write(vgg.desc)

    summary_op = tf.summary.merge_all()
    train_summary_writer = tf.summary.FileWriter(log_dirs + '/train')
    test_summary_writer = tf.summary.FileWriter(log_dirs + '/test')

    with tf.Session() as sess:
        train_batch_xs, train_batch_ys = cifar10_input.read_cifar10(
            is_train=True, batch_size=BATCH_SIZE, shuffle=True)
        test_batch_xs, test_batch_ys = cifar10_input.read_cifar10(
            is_train=False, batch_size=BATCH_SIZE * 4, shuffle=False)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())
        vgg.load_weight_with_skip(sess, 'vgg16_weights.npz',
                                  ['fc6', 'fc7', 'fc8'])

        for step in range(0, MAX_STEP + 1):
            if coord.should_stop():
                break

            xs, ys, = sess.run([train_batch_xs,
                                train_batch_ys])  # Multi-thread
            loss, acc, summaries, _ = vgg.update(sess, xs, ys, summary_op)
            if step % 50 == 0:
                print('Step: %d, loss: %.4f, accuracy: %.4f%%' %
                      (step, loss, acc * 100))
                train_summary_writer.add_summary(summaries, step)

            if step % 100 == 0:
                xs, ys, = sess.run([test_batch_xs, test_batch_ys])
                test_loss, test_acc, test_summaries = vgg.validate(
                    sess, xs, ys, summary_op)
                test_result = 'Step: %d, loss: %.4f, accuracy: %.4f%%' % (
                    step, test_loss, test_acc * 100)
                print('[Test]' + test_result)
                if step % 5000 == 0:
                    with open(
                            os.path.join(
                                log_dirs,
                                "result_{}.txt".format(int(test_acc * 100))),
                            "w") as f:
                        f.write(test_result)
                test_summary_writer.add_summary(test_summaries, step)

                if step != 0:
                    if not os.path.exists('./tmp'):
                        os.makedirs('./tmp')
                    saver_path = saver.save(sess,
                                            os.path.join(
                                                './tmp', str(timestamp),
                                                'vgg.skpt'),
                                            global_step=step)
                    print("Model saved in file: %s" % saver_path)

        coord.request_stop()
        coord.join(threads)
Beispiel #29
0
# author: Jason Howe
import os
import tensorflow as tf
from six.moves import xrange
import cifar10_input

data_dir = 'F:/cifar-10-batches-bin'
filenames = [
    os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)
]
for f in filenames:
    if not tf.gfile.Exists(f):
        raise ValueError('Failed to find file: ' + f)

# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
result_original = cifar10_input.read_cifar10(filename_queue)
print(result_original.uint8image.get_shape)
print(filenames)
# with tf.Session() as sess:
#     print(sess.run(result_original.label[0]))
Beispiel #30
0
import tensorflow as tf
from six.moves import xrange
import cifar10_input
import matplotlib.pyplot as plt

data_dir = os.path.join('/mnt/nas/ntu-rgbd/other_Datasets/cifar10_data',
                        'cifar-10-batches-bin')
filenames = [
    os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)
]
for f in filenames:
    if not tf.gfile.Exists(f):
        raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
read_input = cifar10_input.read_cifar10(filename_queue)

print(type(tf.Session().run(read_input.uint8image)))
'''
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
read_input = cifar10_input.read_cifar10(filename_queue)

from PIL import Image
import numpy as np

im = Image.open('images.jpeg')
im = (np.array(im))

r = im[:,:,0].flatten()
g = im[:,:,1].flatten()
Beispiel #31
0
def inputs(eval_data, data_dir, batch_size):
    """Construct input for CIFAR evaluation using the Reader ops.

    Args:
      eval_data: bool, indicating if one should use the train or eval data set.
      data_dir: Path to the CIFAR-10 data directory.
      batch_size: Number of images per batch.

    Returns:
      images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
      labels: Labels. 1D tensor of [batch_size] size.
    """
    import os
    num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
    filenames = []
    if GET_TrainFile_By_BIN:
        tempfilelist = os.listdir("/tmp/cifar10_data")
        for item in tempfilelist:
            if item.find(".bin") != -1:
                filenames.append(
                    os.path.join("/tmp/cifar10_data", item).replace("\\", "/"))
        filename_queue = tf.train.string_input_producer(
            filenames, name="filename_queue_hcq", shuffle=True)
        read_input = cifar10_input.read_cifar10(filename_queue)

    else:
        data_dir = trainPath
        if len(os.listdir(data_dir)) != NUM_CLASSES:
            raise Exception('图片分类总数与设置NUM_CLASSES参数不一致')
            return
        filenames, calist = labeltools.getfilelist(data_dir)

        filename_queue = tf.train.string_input_producer(
            filenames, name="filename_queue_hcq", shuffle=True)
        read_input = read_from_imagenet(filename_queue, filenames)

    reshaped_image = tf.cast(read_input.uint8image, tf.float32)

    height = IMAGE_SIZE
    width = IMAGE_SIZE

    # Image processing for evaluation.
    # Crop the central [height, width] of the image.
    resized_image = tf.image.resize_image_with_crop_or_pad(
        reshaped_image, width, height)

    # Subtract off the mean and divide by the variance of the pixels.
    float_image = tf.image.per_image_standardization(resized_image)

    # Set the shapes of tensors.
    float_image.set_shape([height, width, 3])
    #read_input.label.set_shape([])

    # Ensure that the random shuffling has good mixing properties.
    min_fraction_of_examples_in_queue = 0.4
    min_queue_examples = int(num_examples_per_epoch *
                             min_fraction_of_examples_in_queue)

    # Generate a batch of images and labels by building up a queue of examples.
    return _generate_image_and_label_batch(float_image,
                                           read_input.label,
                                           min_queue_examples,
                                           batch_size,
                                           shuffle=False)