Пример #1
0
def ReadExamples(path):
    print("Reading", path)

    records = list(tf.python_io.tf_record_iterator(path, TF_RECORD_CONFIG))
    num_records = len(records)

    features = {
        'x': tf.FixedLenFeature([], tf.string),
        'pi': tf.FixedLenFeature([], tf.string),
        'outcome': tf.FixedLenFeature([], tf.float32),
        'n': tf.FixedLenFeature([], tf.int64, default_value=[-1]),
        'q': tf.FixedLenFeature([], tf.float32, default_value=[-1]),
        'c': tf.FixedLenFeature([], tf.int64, default_value=[-1]),
    }

    parsed = tf.parse_example(records, features)

    x = tf.decode_raw(parsed['x'], tf.uint8)
    x = tf.cast(x, tf.float32)
    x = tf.reshape(x, [num_records, go.N, go.N, -1])
    x = x.eval()

    pi = tf.decode_raw(parsed['pi'], tf.float32)
    pi = tf.reshape(pi, [num_records, go.N * go.N + 1])
    pi = pi.eval()

    outcome = parsed['outcome'].eval()
    n = parsed['n'].eval()
    q = parsed['q'].eval()
    c = parsed['c'].eval()

    return [ParsedExample(*args) for args in zip(x, pi, outcome, q, n, c)]
Пример #2
0
    def init(self,
             batch_size,
             macrobatch=1,
             gpus_num=None,
             logbase='leelalogs'):
        self.batch_size = batch_size
        self.macrobatch = macrobatch
        self.logbase = logbase
        # Input batch placeholders
        self.planes = tf.placeholder(tf.string, name='in_planes')
        self.probs = tf.placeholder(tf.string, name='in_probs')
        self.winner = tf.placeholder(tf.string, name='in_winner')

        # Mini-batches come as raw packed strings. Decode
        # into tensors to feed into network.
        planes = tf.decode_raw(self.planes, tf.uint8)
        probs = tf.decode_raw(self.probs, tf.float32)
        winner = tf.decode_raw(self.winner, tf.float32)

        planes = tf.cast(planes, self.model_dtype)

        planes = tf.reshape(planes,
                            (batch_size, INPUT_CHANNELS, NUM_INTERSECTIONS))
        probs = tf.reshape(probs, (batch_size, POTENTIAL_MOVES))
        winner = tf.reshape(winner, (batch_size, 1))

        if gpus_num is None:
            gpus_num = self.gpus_num
        self.init_net(planes, probs, winner, gpus_num)
def read_and_decode(filename_queue, label_type, shape):

    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(serialized_example,
                                       features={
                                           'image_raw':
                                           tf.FixedLenFeature([], tf.string),
                                           'label_raw':
                                           tf.FixedLenFeature([], tf.string),
                                       })
    image = tf.decode_raw(features['image_raw'], tf.uint8)
    image = tf.cast(image, tf.float32)

    image = (image - 127.5) * (1. / 128.0)
    image.set_shape([shape * shape * 3])
    image = tf.reshape(image, [shape, shape, 3])
    label = tf.decode_raw(features['label_raw'], tf.float32)

    if label_type == 'cls':
        image = tf.image.random_flip_left_right(image)
        image = tf.image.random_flip_up_down(image)
        label.set_shape([2])
    elif label_type == 'bbx':
        label.set_shape([4])
    elif label_type == 'pts':
        label.set_shape([10])

    return image, label
Пример #4
0
def batch_parse_tf_example(batch_size, layout, example_batch):
  """
    Args:
        batch_size: batch size
        layout: 'nchw' or 'nhwc'
        example_batch: a batch of tf.Example

    Returns:
        A tuple (feature_tensor, dict of output tensors)
  """
  planes = dual_net.get_features_planes()

  features = {
      'x': tf.FixedLenFeature([], tf.string),
      'pi': tf.FixedLenFeature([], tf.string),
      'outcome': tf.FixedLenFeature([], tf.float32),
  }
  parsed = tf.parse_example(example_batch, features)
  x = tf.decode_raw(parsed['x'], tf.uint8)
  x = tf.cast(x, tf.float32)

  if layout == 'nhwc':
    shape = [batch_size, go.N, go.N, planes]
  else:
    shape = [batch_size, planes, go.N, go.N]
  x = tf.reshape(x, shape)

  pi = tf.decode_raw(parsed['pi'], tf.float32)
  pi = tf.reshape(pi, [batch_size, go.N * go.N + 1])
  outcome = parsed['outcome']
  outcome.set_shape([batch_size])
  return x, {'pi_tensor': pi, 'value_tensor': outcome}
Пример #5
0
def read_record(data_path, batch_size=1, size=512):
	feature = {'image': tf.FixedLenFeature(shape=(), dtype=tf.string),
				'wall': tf.FixedLenFeature(shape=(), dtype=tf.string),
				'close': tf.FixedLenFeature(shape=(), dtype=tf.string),
				'room': tf.FixedLenFeature(shape=(), dtype=tf.string),
				'close_wall': tf.FixedLenFeature(shape=(), dtype=tf.string)}

	# Create a list of filenames and pass it to a queue
	filename_queue = tf.train.string_input_producer([data_path], num_epochs=None, shuffle=False, capacity=batch_size*128)
	
	# Define a reader and read the next record
	reader = tf.TFRecordReader()
	_, serialized_example = reader.read(filename_queue)

	# Decode the record read by the reader
	features = tf.parse_single_example(serialized_example, features=feature)

	# Convert the image data from string back to the numbers
	image = tf.decode_raw(features['image'], tf.uint8)
	wall = tf.decode_raw(features['wall'], tf.uint8)
	close = tf.decode_raw(features['close'], tf.uint8)
	room = tf.decode_raw(features['room'], tf.uint8)
	close_wall = tf.decode_raw(features['close_wall'], tf.uint8)

	# Cast data
	image = tf.cast(image, dtype=tf.float32)
	wall = tf.cast(wall, dtype=tf.float32)
	close = tf.cast(close, dtype=tf.float32)
	# room = tf.cast(room, dtype=tf.float32)
	close_wall = tf.cast(close_wall, dtype=tf.float32)

	# Reshape image data into the original shape
	image = tf.reshape(image, [size, size, 3])
	wall = tf.reshape(wall, [size, size, 1])
	close = tf.reshape(close, [size, size, 1])
	room = tf.reshape(room, [size, size])
	close_wall = tf.reshape(close_wall, [size, size, 1])


	# Any preprocessing here ...
	# normalize 
	image = tf.divide(image, tf.constant(255.0))
	wall = tf.divide(wall, tf.constant(255.0))
	close = tf.divide(close, tf.constant(255.0))
	close_wall = tf.divide(close_wall, tf.constant(255.0))

	# Genereate one hot room label
	room_one_hot = tf.one_hot(room, 9, axis=-1)

	# Creates batches by randomly shuffling tensors
	images, walls, closes, rooms, close_walls = tf.train.shuffle_batch([image, wall, close, room_one_hot, close_wall], 
						batch_size=batch_size, capacity=batch_size*128, num_threads=1, min_after_dequeue=batch_size*32)	

	# images, walls = tf.train.shuffle_batch([image, wall], 
						# batch_size=batch_size, capacity=batch_size*128, num_threads=1, min_after_dequeue=batch_size*32)	

	return {'images': images, 'walls': walls, 'closes': closes, 'rooms': rooms, 'close_walls': close_walls}
Пример #6
0
def read_examples(path):
    records = list(tf.data.TFRecordDataset([path], 'ZLIB'))
    num_records = len(records)

    # n, q, c have default_values because they're optional.
    features = {
        'x': tf.io.FixedLenFeature([], tf.string),
        'pi': tf.io.FixedLenFeature([], tf.string),
        'outcome': tf.io.FixedLenFeature([], tf.float32),
        'n': tf.io.FixedLenFeature([], tf.int64, default_value=[-1]),
        'q': tf.io.FixedLenFeature([], tf.float32, default_value=[-1]),
        'c': tf.io.FixedLenFeature([], tf.int64, default_value=[-1]),
    }

    parsed = tf.io.parse_example(records, features)

    x = tf.decode_raw(parsed['x'], tf.uint8)
    x = tf.cast(x, tf.float32)
    pi = tf.decode_raw(parsed['pi'], tf.float32)

    if not (FLAGS.board_size and FLAGS.feature_layout
            and FLAGS.to_play_feature):
        try:
            FLAGS.board_size, FLAGS.feature_layout, FLAGS.to_play_feature = (
                guess_format(x, pi, num_records))
        except:
            print('Unable to guess feature format from examples, please set '
                  'the board_size, feature_layout and to_play_feature flags')
            sys.exit(1)

    # We must set the BOARD_SIZE environment variable before importing the go
    # module.
    os.environ['BOARD_SIZE'] = str(FLAGS.board_size)
    global go

    if FLAGS.feature_layout == 'nhwc':
        x = tf.reshape(x, [num_records, go.N, go.N, -1])
    elif FLAGS.feature_layout == 'nchw':
        x = tf.reshape(x, [num_records, -1, go.N, go.N])
        x = tf.transpose(x, [0, 2, 3, 1])
    else:
        raise ValueError('Invalid feature_layout "%s"' % FLAGS.feature_layout)
    x = x.numpy()

    pi = tf.reshape(pi, [num_records, go.N * go.N + 1])
    pi = pi.numpy()

    outcome = parsed['outcome'].numpy()
    n = parsed['n'].numpy()
    q = parsed['q'].numpy()
    c = parsed['c'].numpy()

    return [ParsedExample(*args) for args in zip(x, pi, outcome, q, n, c)]
Пример #7
0
def read_bd_rm_record(data_path, batch_size=1, size=512):
	feature = {'image': tf.FixedLenFeature(shape=(), dtype=tf.string),
				'boundary': tf.FixedLenFeature(shape=(), dtype=tf.string),
				'room': tf.FixedLenFeature(shape=(), dtype=tf.string),
				'door': tf.FixedLenFeature(shape=(), dtype=tf.string)}
	
	print("data_path=",data_path)

	# Create a list of filenames and pass it to a queue
	filename_queue = tf.train.string_input_producer([data_path], num_epochs=None, shuffle=False, capacity=batch_size*128)
	# filename_queue = tf.data.Dataset.from_tensor_slices([data_path]).shuffle(tf.shape([data_path], out_type=tf.int64)[0]).repeat(None)
	
	# Define a reader and read the next record
	reader = tf.TFRecordReader()
	_, serialized_example = reader.read(filename_queue)

	# Decode the record read by the reader
	features = tf.parse_single_example(serialized_example, features=feature)

	# Convert the image data from string back to the numbers
	image = tf.decode_raw(features['image'], tf.uint8)
	boundary = tf.decode_raw(features['boundary'], tf.uint8)
	room = tf.decode_raw(features['room'], tf.uint8)
	door = tf.decode_raw(features['door'], tf.uint8)

	# Cast data
	image = tf.cast(image, dtype=tf.float32)

	# Reshape image data into the original shape
	image = tf.reshape(image, [size, size, 3])
	boundary = tf.reshape(boundary, [size, size])
	room = tf.reshape(room, [size, size])
	door = tf.reshape(door, [size, size])

	# Any preprocessing here ...
	# normalize 
	image = tf.divide(image, tf.constant(255.0))

	# Genereate one hot room label
	label_boundary = tf.one_hot(boundary, 3, axis=-1)
	label_room = tf.one_hot(room, 9, axis=-1)

	# Creates batches by randomly shuffling tensors
	images, label_boundaries, label_rooms, label_doors = tf.train.shuffle_batch([image, label_boundary, label_room, door], 
						batch_size=batch_size, capacity=batch_size*128, num_threads=1, min_after_dequeue=batch_size*32)	

	# images, walls = tf.train.shuffle_batch([image, wall], 
						# batch_size=batch_size, capacity=batch_size*128, num_threads=1, min_after_dequeue=batch_size*32)	

	return {'images': images, 'label_boundaries': label_boundaries, 'label_rooms': label_rooms, 'label_doors': label_doors}
Пример #8
0
    def decode_record(record):
        example = tf.io.parse_single_example(record, features)
        image = tf.decode_raw(example["image_raw"], tf.uint8)
        image = tf.cast(image, tf.float32)
        image = tf.reshape(image, [28, 28, 1])

        return image, example["label"]
Пример #9
0
def _elmo_token_to_bytes(text, max_length, bos=_BOS, eos=_EOS, pad=_PAD):
    """ELMO-specific way of converting a word into a  byte seq.

  This mimics docqa/elmo/data.py, UnicodeCharsVocabulary.

  Args:
    text: tf.string tensor of shape []
    max_length: Maximum number of bytes per word. Defaults to 50.
    bos: begin-of-sentence token
    eos: end-of-sentence token
    pad: padding token

  Returns:
    A tf.int32 tensor of the byte encoded text.
  """
    byte_ids = tf.to_int32(tf.decode_raw(text, tf.uint8))

    # Special handling for bos and eos
    byte_ids = tf.cond(tf.equal(text, bos),
                       lambda: tf.constant([_BOS_CHAR_ID]), lambda: byte_ids)
    byte_ids = tf.cond(tf.equal(text, eos),
                       lambda: tf.constant([_EOS_CHAR_ID]), lambda: byte_ids)

    byte_ids = byte_ids[:max_length - 2]
    padding = tf.fill([max_length - tf.shape(byte_ids)[0] - 2], _PAD_CHAR_ID)
    byte_ids = tf.concat([[_BOW_CHAR_ID], byte_ids, [_EOW_CHAR_ID], padding],
                         axis=0)
    tf.logging.info(byte_ids)

    byte_ids = tf.reshape(byte_ids, [max_length])
    tf.logging.info(byte_ids.get_shape().as_list())
    return tf.cond(tf.equal(text,
                            pad), lambda: tf.zeros(max_length, dtype=tf.int32),
                   lambda: byte_ids + 1)
Пример #10
0
    def decode_image(image):
        # Normalize from [0, 255] to [0.0, 1.0]
        image = tf.decode_raw(image, tf.uint8)
        image = tf.cast(image, tf.float32)

        image = tf.reshape(image, [784])
        return image / 255.0
Пример #11
0
def read_cifar10(filename_queue):
    class CIFAR10Record(object):
        pass

    result = CIFAR10Record()

    label_bytes = 1
    result.height = 32
    result.width = 32
    result.depth = 3
    image_bytes = result.height * result.width * result.depth
    record_bytes = label_bytes + image_bytes

    reader = tf.FixedLengthRecordReader(record_bytes)
    result.key, value = reader.read(filename_queue)

    record_bytes = tf.decode_raw(value, tf.uint8)
    result.label = tf.cast(tf.strided_slice(record_bytes, [0], [label_bytes]),
                           tf.int32)
    depth_major = tf.reshape(
        tf.strided_slice(record_bytes, [label_bytes],
                         [label_bytes + image_bytes]),
        [result.depth, result.height, result.width])
    result.uint8image = tf.transpose(depth_major, [1, 2, 0])

    return result
Пример #12
0
def read_and_decode2stand(tfrecords_file, batch_size):


    # make an input queue from the tfrecord file
    filename_queue = tf.train.string_input_producer([tfrecords_file])

    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    img_features = tf.parse_single_example(
        serialized_example,
        features={
            'label': tf.FixedLenFeature([], tf.int64),
            'image_raw': tf.FixedLenFeature([], tf.string),
        })
    image = tf.decode_raw(img_features['image_raw'], tf.uint8)

    image = tf.reshape(image, [H, W, Channels])
    image = tf.cast(image, tf.float32) * (1.0 / 255)
    image = tf.image.per_image_standardization(image)  # standardization

    # all the images of notMNIST are 200*150, you need to change the image size if you use other dataset.
    label = tf.cast(img_features['label'], tf.int32)
    image_batch, label_batch = tf.train.batch([image, label],
                                              batch_size=batch_size,
                                              num_threads=64,
                                              capacity=2000)
    # Change to ONE-HOT
    label_batch = tf.one_hot(label_batch, depth=n_classes)
    label_batch = tf.cast(label_batch, dtype=tf.int32)
    label_batch = tf.reshape(label_batch, [batch_size, n_classes])
    print(label_batch)
    return image_batch, label_batch
Пример #13
0
def to_tensor(serialized_example: tf.Tensor,
              shape: Iterable[int]) -> tf.Tensor:
    """
    Creates a deserialization operation for Example protobufs.
    
    Parameters
    ----------
    serialized_example: tf.Tensor
        Tensor containing serialized Example protobufs
    shape: list of int
        The shape of the feature matrices in the Example protobufs.

    Returns
    -------
    tf.Tensor
        The deserialized feature matrix
    """
    features = tf.parse_single_example(
        serialized_example,
        features={
            _DATA_RAW_KEY: tf.FixedLenFeature([], tf.string),
        })

    data = tf.decode_raw(features[_DATA_RAW_KEY], tf.float32)
    data = tf.reshape(data, shape)

    return data
Пример #14
0
    def parser(self, serialized_example, skip_preprocess=False):
        """Parses a single tf.Example into image and label tensors."""
        # Dimensions of the images in the CIFAR-10 dataset.
        # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of
        # the input format.
        features = tf.parse_single_example(serialized_example,
                                           features={
                                               'image':
                                               tf.FixedLenFeature([],
                                                                  tf.string),
                                               'label':
                                               tf.FixedLenFeature([],
                                                                  tf.int64),
                                           })
        image = tf.decode_raw(features['image'], tf.uint8)
        image.set_shape([3 * 32 * 32])

        # Reshape from [depth * height * width] to [depth, height, width].
        image = tf.cast(
            tf.transpose(tf.reshape(image, [3, 32, 32]), [1, 2, 0]),
            tf.float32)
        label = tf.cast(features['label'], tf.int32)
        if not skip_preprocess:
            # Custom preprocessing.
            image = self.preprocess(image)
            image = image / 128 - 1

        return image, label
Пример #15
0
def _string_to_bytes(text, max_length):
  """Given string and length, convert to byte seq of at most max_length.

  This process mimics docqa/elmo's preprocessing:
  https://github.com/allenai/document-qa/blob/master/docqa/elmo/data.py

  Note that we make use of BOS_CHAR_ID and EOS_CHAR_ID in iterator_utils.py &
  our usage differs from docqa/elmo.

  Args:
    text: tf.string tensor of shape []
    max_length: max number of chars for each word.

  Returns:
    A tf.int32 tensor of the byte encoded text.
  """
  byte_ids = tf.to_int32(tf.decode_raw(text, tf.uint8))
  byte_ids = byte_ids[:max_length - 2]
  padding = tf.fill([max_length - tf.shape(byte_ids)[0] - 2], PAD_CHAR_ID)
  byte_ids = tf.concat(
      [[BOW_CHAR_ID], byte_ids, [EOW_CHAR_ID], padding], axis=0)
  tf.logging.info(byte_ids)

  byte_ids = tf.reshape(byte_ids, [max_length])
  tf.logging.info(byte_ids.get_shape().as_list())
  return byte_ids + 1
def _decode(example_proto):
    # Parse the input `tf.Example` proto using the feature description dict above.
    single_example = tf.parse_single_example(example_proto, features)
    for k in BYTE_FEATURES:
        single_example[k] = tf.squeeze(tf.decode_raw(single_example[k],
                                                     tf.uint8),
                                       axis=-1)
    return single_example
Пример #17
0
    def train(self):
        #输入向量
        inputs = tf.placeholder('float',[None,224,224,3])
        #输出向量
        outputs = tf.placeholder('float',[None,224,224,3])
        #encoded是图片经过编码器的结果,decoded是图片又经过解码器的结果,decoded_encoded生成新图片经过编码器的结果
        encoded,decoded,decoded_encoded = self.encoder_decoder(inputs)
        #内容损失 重建图像和原图(outputs,decoded)
        pixel_loss = tf.losses.mean_squared_error(decoded,outputs)
        #风格损失 原图经过编码器(encoded)和重建图像经过解码器(decoded_encoded)
        feature_loss = tf.losses.mean_squared_error(decoded_encoded,encoded)
        # loss = pixel_loss+ feature_loss
        loss=0.5*pixel_loss + 0.1*feature_loss
        opt= tf.train.AdamOptimizer(0.0001).minimize(loss)
        #训练集的位置
        tfrecords_filename =  self.tfrecord_path
        filename_queue = tf.train.string_input_producer([tfrecords_filename],num_epochs=100)

        reader = tf.TFRecordReader()  
        _, serialized_example = reader.read(filename_queue)

        feature2 = {  
                    'image_raw': tf.FixedLenFeature([], tf.string)} 
        features = tf.parse_single_example(serialized_example, features=feature2)  
        image = tf.decode_raw(features['image_raw'], tf.uint8) 
        image = tf.reshape(image,[224,224,3])   
        images = tf.train.shuffle_batch([image], batch_size=self.batch_size, capacity=30, min_after_dequeue=10)
        
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config = config)as sess  :
             tf.global_variables_initializer().run()
             tf.local_variables_initializer().run()
        

        
             coord = tf.train.Coordinator()  
             threads = tf.train.start_queue_runners(coord=coord)  
  
             saver = tf.train.Saver()
             

             for i in range (self.max_iterator):
                 batch_x=sess.run(images)
                 feed_dict = {inputs:batch_x, outputs : batch_x}
            
                 _,p_loss,f_loss,reconstruct_imgs=sess.run([opt,pixel_loss,feature_loss,decoded],feed_dict=feed_dict)
            
                 print('step %d |  pixel_loss is %f   | feature_loss is %f  |'%(i,p_loss,f_loss))
            
                 if i % 5 ==0:
                    result_img = np.clip(reconstruct_imgs[0],0,255).astype(np.uint8)
                    imsave('result.jpg',result_img)
                
             saver.save(sess,self.checkpoint_path)
             coord.request_stop()  
             coord.join(threads)
Пример #18
0
 def decode_label(inputstream):
     inputstream = tf.decode_raw(inputstream, tf.uint8)
     _, label = tf.slice(inputstream,[1],[image_vec_length]), tf.slice(inputstream,[0],[1])
     # image = tf.cast(image, tf.float32)
     # image = tf.reshape(image, [32*32*3])
     # image = image/255
     label = tf.reshape(label, [])
     label = tf.to_int32(label)
     return label
Пример #19
0
def decode(serialized_example):
    feature = {
        'image_raw': tf.FixedLenFeature([], tf.string),
        'label': tf.VarLenFeature(tf.int64)
    }
    features = tf.parse_single_example(serialized_example, features=feature)
    image = tf.decode_raw(features['image_raw'], tf.uint8)
    label = features['label'].values
    return image, label
Пример #20
0
  def _parse_example(self, example_proto):
    d = tf.parse_single_example(example_proto, self._feature_description)

    img = tf.decode_raw(d['image_raw'], tf.uint8)
    img = tf.reshape(img, self._img_shape)
    d['image'] = tf.to_float(img) / 255.
    del d['image_raw']

    return d
Пример #21
0
def read_and_decode(filename):
    filename_queue = tf.train.string_input_producer([filename])
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(serialized_example,
                                       features={
                                           'label':
                                           tf.FixedLenFeature([], tf.string),
                                           'img_raw':
                                           tf.FixedLenFeature([], tf.string)
                                       })
    image = tf.decode_raw(features['img_raw'], tf.uint8)

    image = tf.reshape(image, [32, 20, 1])
    image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
    l = tf.decode_raw(features['label'], tf.float64)
    label = tf.reshape(tf.cast(l, tf.float32), [2])
    return image, label
Пример #22
0
def parse_tfrecord_tf(record):
    features = tf.parse_single_example(record,
                                       features={
                                           'shape':
                                           tf.FixedLenFeature([3], tf.int64),
                                           'data':
                                           tf.FixedLenFeature([], tf.string)
                                       })
    data = tf.decode_raw(features['data'], tf.uint8)
    return tf.reshape(data, features['shape'])
Пример #23
0
def read_and_decode(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(serialized_example,
                                       features={
                                           'image_raw':
                                           tf.FixedLenFeature([], tf.string),
                                           'label_raw':
                                           tf.FixedLenFeature([], tf.string),
                                       })
    image = tf.decode_raw(features['image_raw'], tf.int16)
    image.set_shape([IMAGE_HEIGHT * IMAGE_WIDTH])
    image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
    reshape_image = tf.reshape(image, [IMAGE_HEIGHT, IMAGE_WIDTH, 1])
    label = tf.decode_raw(features['label_raw'], tf.uint8)
    label.set_shape([CHARS_NUM * CLASSES_NUM])
    reshape_label = tf.reshape(label, [CHARS_NUM, CLASSES_NUM])
    return tf.cast(reshape_image, tf.float32), tf.cast(reshape_label,
                                                       tf.float32)
def _decode(example_proto, features):
  # Parse the input `tf.Example` proto using a feature description dictionary.
  single_example = tf.parse_single_example(example_proto, features)
  for k in BYTE_FEATURES:
    single_example[k] = tf.squeeze(tf.decode_raw(single_example[k], tf.uint8),
                                   axis=-1)
  # To return masks in the canonical [entities, height, width, channels] format,
  # we need to transpose the tensor axes.
  single_example['mask'] = tf.transpose(single_example['mask'], [2, 0, 1, 3])
  return single_example
Пример #25
0
def get_image_label_from_record(image_record, label_record):
    """Decodes the image and label information from one data record."""
    # Convert from tf.string to tf.uint8.
    image = tf.decode_raw(image_record, tf.uint8)
    # Convert from tf.uint8 to tf.float32.
    image = tf.cast(image, tf.float32)

    # Reshape image to correct shape.
    image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS])
    # Normalize from [0, 255] to [0.0, 1.0]
    image /= 255.0

    # Convert from tf.string to tf.uint8.
    label = tf.decode_raw(label_record, tf.uint8)
    # Convert from tf.uint8 to tf.int32.
    label = tf.to_int32(label)
    # Reshape label to correct shape.
    label = tf.reshape(label, [])  # label is a scalar
    return image, label
def parse_tf_example(tf_example_string, params):
    """Parse the serialized tf.Example and decode it to the image tensor."""
    decoded_tensors = tf.parse_single_example(
        serialized=tf_example_string,
        features={'image/ct_image': tf.FixedLenFeature([], tf.string)})
    image = tf.decode_raw(decoded_tensors['image/ct_image'],
                          tf.as_dtype(tf.float32))
    image_size = params.input_image_size + [params.num_channels]
    image = tf.reshape(image, image_size)
    return image
Пример #27
0
def _read_and_decode(filename_queue, image_pixel=96, distort=0):
  """Read a norb tf record file."""
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue)
  features = tf.parse_single_example(
      serialized_example,
      # Defaults are not specified since both keys are required.
      features={
          'image_raw': tf.FixedLenFeature([], tf.string),
          'label': tf.FixedLenFeature([], tf.int64),
          'height': tf.FixedLenFeature([], tf.int64),
          'width': tf.FixedLenFeature([], tf.int64),
          'depth': tf.FixedLenFeature([], tf.int64),
          'meta': tf.FixedLenFeature([4], tf.int64),
      })

  # Convert from a scalar string tensor (whose single string has
  # length image_pixels) to a uint8 tensor with shape
  # [image_pixels].
  image = tf.decode_raw(features['image_raw'], tf.uint8)
  height = tf.cast(features['height'], tf.int32)
  depth = tf.cast(features['depth'], tf.int32)
  image = tf.reshape(image, tf.stack([depth, height, height]))
  image = tf.transpose(image, [1, 2, 0])
  image = tf.cast(image, tf.float32)
  if image_pixel < 96:
    print('image resizing to {}'.format(image_pixel))
    image = tf.image.resize_images(image, [image_pixel, image_pixel])
    orig_images = image

  if image_pixel == 48:
    new_dim = 32
  elif image_pixel == 32:
    new_dim = 22
  if distort == 1:
    image = tf.image.random_brightness(image, max_delta=63)
    image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
    image = tf.random_crop(image, tf.stack([new_dim, new_dim, depth]))
    # 0.26179938779 is 15 degress in radians
    image = tf.image.per_image_standardization(image)
    image_pixel = new_dim
  elif distort == 2:
    image = tf.image.resize_image_with_crop_or_pad(image, new_dim, new_dim)
    image = tf.image.per_image_standardization(image)
    image_pixel = new_dim
  else:
    image = image * (1.0 / 255.0)
    image = tf.div(
        tf.subtract(image, tf.reduce_min(image)),
        tf.subtract(tf.reduce_max(image), tf.reduce_min(image)))

  # Convert label from a scalar uint8 tensor to an int32 scalar.
  label = tf.cast(features['label'], tf.int32)

  return image, label, image_pixel, orig_images
def read_and_decode(filename_queue):
    reader = tf.TFRecordReader()
    key, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(serialized_example, features={'vol_raw': tf.FixedLenFeature([], tf.string)})
    vol_str = tf.decode_raw(features['vol_raw'], tf.float32)
    volume = tf.reshape(vol_str, volume_shape)
    volume_batch = tf.train.shuffle_batch([volume], batch_size=batch_size, capacity=capacity, num_threads=num_threads,
                                          min_after_dequeue=min_after_dequeue)
    finalbatch = tf.expand_dims(volume_batch, -1)

    return finalbatch
Пример #29
0
def _parse_function(example_proto):
  features = {"image": tf.FixedLenFeature((), tf.string, default_value=""),
      "label": tf.FixedLenFeature((), tf.int64, default_value=0)}
  parsed_features = tf.parse_single_example(example_proto, features)
  images = parsed_features["image"]
  images = tf.decode_raw(images, tf.uint8)
  # channel first
  images = tf.reshape(images, [3, 32, 32])
  images = tf.cast(images, tf.float32)
  images = (images - 127) / 128.0 * 4
  return images, parsed_features["label"]
Пример #30
0
 def decode_image(inputstream):
     inputstream = tf.decode_raw(inputstream, tf.uint8)
     image, _ = tf.slice(inputstream,[1],[image_vec_length]), tf.slice(inputstream,[0],[1])
     if use_fp16:
         image = tf.cast(image, tf.float16)
     else:
         image = tf.cast(image, tf.float32)
     image = tf.reshape(image, [32*32*3])
     image = image/255
     # label = tf.reshape(label, [])
     # label = tf.to_int32(label)
     return image